FreeBSD/Linux Kernel Cross Reference
sys/nfs/nfs_vnops.c
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
37 * $FreeBSD$
38 */
39
40
41 /*
42 * vnode op calls for Sun NFS version 2 and 3
43 */
44
45 #include "opt_inet.h"
46
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 #include <sys/resourcevar.h>
51 #include <sys/proc.h>
52 #include <sys/mount.h>
53 #include <sys/buf.h>
54 #include <sys/malloc.h>
55 #include <sys/mbuf.h>
56 #include <sys/namei.h>
57 #include <sys/socket.h>
58 #include <sys/vnode.h>
59 #include <sys/dirent.h>
60 #include <sys/fcntl.h>
61 #include <sys/lockf.h>
62 #include <sys/stat.h>
63 #include <sys/sysctl.h>
64
65 #include <vm/vm.h>
66 #include <vm/vm_extern.h>
67 #include <vm/vm_zone.h>
68
69 #include <miscfs/fifofs/fifo.h>
70 #include <miscfs/specfs/specdev.h>
71
72 #include <nfs/rpcv2.h>
73 #include <nfs/nfsproto.h>
74 #include <nfs/nfs.h>
75 #include <nfs/nfsnode.h>
76 #include <nfs/nfsmount.h>
77 #include <nfs/xdr_subs.h>
78 #include <nfs/nfsm_subs.h>
79 #include <nfs/nqnfs.h>
80
81 #include <net/if.h>
82 #include <netinet/in.h>
83 #include <netinet/in_var.h>
84
85 /* Defs */
86 #define TRUE 1
87 #define FALSE 0
88
89 /*
90 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
91 * calls are not in getblk() and brelse() so that they would not be necessary
92 * here.
93 */
94 #ifndef B_VMIO
95 #define vfs_busy_pages(bp, f)
96 #endif
97
98 static int nfsspec_read __P((struct vop_read_args *));
99 static int nfsspec_write __P((struct vop_write_args *));
100 static int nfsfifo_read __P((struct vop_read_args *));
101 static int nfsfifo_write __P((struct vop_write_args *));
102 static int nfsspec_close __P((struct vop_close_args *));
103 static int nfsfifo_close __P((struct vop_close_args *));
104 #define nfs_poll vop_nopoll
105 static int nfs_flush __P((struct vnode *,struct ucred *,int,struct proc *,int));
106 static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct proc *));
107 static int nfs_lookup __P((struct vop_lookup_args *));
108 static int nfs_create __P((struct vop_create_args *));
109 static int nfs_mknod __P((struct vop_mknod_args *));
110 static int nfs_open __P((struct vop_open_args *));
111 static int nfs_close __P((struct vop_close_args *));
112 static int nfs_access __P((struct vop_access_args *));
113 static int nfs_getattr __P((struct vop_getattr_args *));
114 static int nfs_setattr __P((struct vop_setattr_args *));
115 static int nfs_read __P((struct vop_read_args *));
116 static int nfs_mmap __P((struct vop_mmap_args *));
117 static int nfs_fsync __P((struct vop_fsync_args *));
118 static int nfs_remove __P((struct vop_remove_args *));
119 static int nfs_link __P((struct vop_link_args *));
120 static int nfs_rename __P((struct vop_rename_args *));
121 static int nfs_mkdir __P((struct vop_mkdir_args *));
122 static int nfs_rmdir __P((struct vop_rmdir_args *));
123 static int nfs_symlink __P((struct vop_symlink_args *));
124 static int nfs_readdir __P((struct vop_readdir_args *));
125 static int nfs_bmap __P((struct vop_bmap_args *));
126 static int nfs_strategy __P((struct vop_strategy_args *));
127 static int nfs_lookitup __P((struct vnode *, const char *, int,
128 struct ucred *, struct proc *, struct nfsnode **));
129 static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *));
130 static int nfsspec_access __P((struct vop_access_args *));
131 static int nfs_readlink __P((struct vop_readlink_args *));
132 static int nfs_print __P((struct vop_print_args *));
133 static int nfs_advlock __P((struct vop_advlock_args *));
134 static int nfs_bwrite __P((struct vop_bwrite_args *));
135 /*
136 * Global vfs data structures for nfs
137 */
138 vop_t **nfsv2_vnodeop_p;
139 static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
140 { &vop_default_desc, (vop_t *) vop_defaultop },
141 { &vop_abortop_desc, (vop_t *) nfs_abortop },
142 { &vop_access_desc, (vop_t *) nfs_access },
143 { &vop_advlock_desc, (vop_t *) nfs_advlock },
144 { &vop_bmap_desc, (vop_t *) nfs_bmap },
145 { &vop_bwrite_desc, (vop_t *) nfs_bwrite },
146 { &vop_close_desc, (vop_t *) nfs_close },
147 { &vop_create_desc, (vop_t *) nfs_create },
148 { &vop_fsync_desc, (vop_t *) nfs_fsync },
149 { &vop_getattr_desc, (vop_t *) nfs_getattr },
150 { &vop_getpages_desc, (vop_t *) nfs_getpages },
151 { &vop_putpages_desc, (vop_t *) nfs_putpages },
152 { &vop_inactive_desc, (vop_t *) nfs_inactive },
153 { &vop_lease_desc, (vop_t *) vop_null },
154 { &vop_link_desc, (vop_t *) nfs_link },
155 { &vop_lock_desc, (vop_t *) vop_sharedlock },
156 { &vop_lookup_desc, (vop_t *) nfs_lookup },
157 { &vop_mkdir_desc, (vop_t *) nfs_mkdir },
158 { &vop_mknod_desc, (vop_t *) nfs_mknod },
159 { &vop_mmap_desc, (vop_t *) nfs_mmap },
160 { &vop_open_desc, (vop_t *) nfs_open },
161 { &vop_poll_desc, (vop_t *) nfs_poll },
162 { &vop_print_desc, (vop_t *) nfs_print },
163 { &vop_read_desc, (vop_t *) nfs_read },
164 { &vop_readdir_desc, (vop_t *) nfs_readdir },
165 { &vop_readlink_desc, (vop_t *) nfs_readlink },
166 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
167 { &vop_remove_desc, (vop_t *) nfs_remove },
168 { &vop_rename_desc, (vop_t *) nfs_rename },
169 { &vop_rmdir_desc, (vop_t *) nfs_rmdir },
170 { &vop_setattr_desc, (vop_t *) nfs_setattr },
171 { &vop_strategy_desc, (vop_t *) nfs_strategy },
172 { &vop_symlink_desc, (vop_t *) nfs_symlink },
173 { &vop_write_desc, (vop_t *) nfs_write },
174 { NULL, NULL }
175 };
176 static struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
177 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
178 VNODEOP_SET(nfsv2_vnodeop_opv_desc);
179
180 /*
181 * Special device vnode ops
182 */
183 vop_t **spec_nfsv2nodeop_p;
184 static struct vnodeopv_entry_desc nfsv2_specop_entries[] = {
185 { &vop_default_desc, (vop_t *) spec_vnoperate },
186 { &vop_access_desc, (vop_t *) nfsspec_access },
187 { &vop_close_desc, (vop_t *) nfsspec_close },
188 { &vop_fsync_desc, (vop_t *) nfs_fsync },
189 { &vop_getattr_desc, (vop_t *) nfs_getattr },
190 { &vop_inactive_desc, (vop_t *) nfs_inactive },
191 { &vop_lock_desc, (vop_t *) vop_sharedlock },
192 { &vop_print_desc, (vop_t *) nfs_print },
193 { &vop_read_desc, (vop_t *) nfsspec_read },
194 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
195 { &vop_setattr_desc, (vop_t *) nfs_setattr },
196 { &vop_write_desc, (vop_t *) nfsspec_write },
197 { NULL, NULL }
198 };
199 static struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
200 { &spec_nfsv2nodeop_p, nfsv2_specop_entries };
201 VNODEOP_SET(spec_nfsv2nodeop_opv_desc);
202
203 vop_t **fifo_nfsv2nodeop_p;
204 static struct vnodeopv_entry_desc nfsv2_fifoop_entries[] = {
205 { &vop_default_desc, (vop_t *) fifo_vnoperate },
206 { &vop_access_desc, (vop_t *) nfsspec_access },
207 { &vop_close_desc, (vop_t *) nfsfifo_close },
208 { &vop_fsync_desc, (vop_t *) nfs_fsync },
209 { &vop_getattr_desc, (vop_t *) nfs_getattr },
210 { &vop_inactive_desc, (vop_t *) nfs_inactive },
211 { &vop_lock_desc, (vop_t *) vop_sharedlock },
212 { &vop_print_desc, (vop_t *) nfs_print },
213 { &vop_read_desc, (vop_t *) nfsfifo_read },
214 { &vop_reclaim_desc, (vop_t *) nfs_reclaim },
215 { &vop_setattr_desc, (vop_t *) nfs_setattr },
216 { &vop_write_desc, (vop_t *) nfsfifo_write },
217 { NULL, NULL }
218 };
219 static struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
220 { &fifo_nfsv2nodeop_p, nfsv2_fifoop_entries };
221 VNODEOP_SET(fifo_nfsv2nodeop_opv_desc);
222
223 static int nfs_commit __P((struct vnode *vp, u_quad_t offset, int cnt,
224 struct ucred *cred, struct proc *procp));
225 static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp,
226 struct componentname *cnp,
227 struct vattr *vap));
228 static int nfs_removerpc __P((struct vnode *dvp, const char *name,
229 int namelen,
230 struct ucred *cred, struct proc *proc));
231 static int nfs_renamerpc __P((struct vnode *fdvp, const char *fnameptr,
232 int fnamelen, struct vnode *tdvp,
233 const char *tnameptr, int tnamelen,
234 struct ucred *cred, struct proc *proc));
235 static int nfs_renameit __P((struct vnode *sdvp,
236 struct componentname *scnp,
237 struct sillyrename *sp));
238
239 /*
240 * Global variables
241 */
242 extern u_int32_t nfs_true, nfs_false;
243 extern u_int32_t nfs_xdrneg1;
244 extern struct nfsstats nfsstats;
245 extern nfstype nfsv3_type[9];
246 struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
247 struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON];
248 int nfs_numasync = 0;
249 #define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
250
251 static int nfsaccess_cache_timeout = 2;
252 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
253 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
254
255 static int nfsaccess_cache_hits;
256 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
257 &nfsaccess_cache_hits, 0, "NFS ACCESS cache hit count");
258
259 static int nfsaccess_cache_fills;
260 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_fills, CTLFLAG_RD,
261 &nfsaccess_cache_fills, 0, "NFS ACCESS cache fill count");
262
263 /*
264 * nfs access vnode op.
265 * For nfs version 2, just return ok. File accesses may fail later.
266 * For nfs version 3, use the access rpc to check accessibility. If file modes
267 * are changed on the server, accesses might still fail later.
268 */
269 static int
270 nfs_access(ap)
271 struct vop_access_args /* {
272 struct vnode *a_vp;
273 int a_mode;
274 struct ucred *a_cred;
275 struct proc *a_p;
276 } */ *ap;
277 {
278 register struct vnode *vp = ap->a_vp;
279 register u_int32_t *tl;
280 register caddr_t cp;
281 register int32_t t1, t2;
282 caddr_t bpos, dpos, cp2;
283 int error = 0, attrflag;
284 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
285 u_int32_t mode, rmode, wmode;
286 int v3 = NFS_ISV3(vp);
287 struct nfsnode *np = VTONFS(vp);
288
289 /*
290 * Disallow write attempts on filesystems mounted read-only;
291 * unless the file is a socket, fifo, or a block or character
292 * device resident on the filesystem.
293 */
294 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
295 switch (vp->v_type) {
296 case VREG:
297 case VDIR:
298 case VLNK:
299 return (EROFS);
300 default:
301 break;
302 }
303 }
304 /*
305 * For nfs v3, check to see if we have done this recently, and if
306 * so return our cached result instead of making an ACCESS call.
307 * If not, do an access rpc, otherwise you are stuck emulating
308 * ufs_access() locally using the vattr. This may not be correct,
309 * since the server may apply other access criteria such as
310 * client uid-->server uid mapping that we do not know about.
311 */
312 if (v3) {
313 if (ap->a_mode & VREAD)
314 mode = NFSV3ACCESS_READ;
315 else
316 mode = 0;
317 if (vp->v_type != VDIR) {
318 if (ap->a_mode & VWRITE)
319 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
320 if (ap->a_mode & VEXEC)
321 mode |= NFSV3ACCESS_EXECUTE;
322 } else {
323 if (ap->a_mode & VWRITE)
324 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
325 NFSV3ACCESS_DELETE);
326 if (ap->a_mode & VEXEC)
327 mode |= NFSV3ACCESS_LOOKUP;
328 }
329 /* XXX safety belt, only make blanket request if caching */
330 if (nfsaccess_cache_timeout > 0) {
331 wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY |
332 NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE |
333 NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP;
334 } else {
335 wmode = mode;
336 }
337
338 /*
339 * Does our cached result allow us to give a definite yes to
340 * this request?
341 */
342 if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) &&
343 (ap->a_cred->cr_uid == np->n_modeuid) &&
344 ((np->n_mode & mode) == mode)) {
345 nfsaccess_cache_hits++;
346 } else {
347 /*
348 * Either a no, or a don't know. Go to the wire.
349 */
350 nfsstats.rpccnt[NFSPROC_ACCESS]++;
351 nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
352 nfsm_fhtom(vp, v3);
353 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
354 *tl = txdr_unsigned(wmode);
355 nfsm_request(vp, NFSPROC_ACCESS, ap->a_p, ap->a_cred);
356 nfsm_postop_attr(vp, attrflag);
357 if (!error) {
358 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
359 rmode = fxdr_unsigned(u_int32_t, *tl);
360 /*
361 * The NFS V3 spec does not clarify whether or not
362 * the returned access bits can be a superset of
363 * the ones requested, so...
364 */
365 if ((rmode & mode) != mode) {
366 error = EACCES;
367 } else if (nfsaccess_cache_timeout > 0) {
368 /* cache the result */
369 nfsaccess_cache_fills++;
370 np->n_mode = rmode;
371 np->n_modeuid = ap->a_cred->cr_uid;
372 np->n_modestamp = time_second;
373 }
374 }
375 nfsm_reqdone;
376 }
377 return (error);
378 } else {
379 if (error = nfsspec_access(ap))
380 return (error);
381
382 /*
383 * Attempt to prevent a mapped root from accessing a file
384 * which it shouldn't. We try to read a byte from the file
385 * if the user is root and the file is not zero length.
386 * After calling nfsspec_access, we should have the correct
387 * file size cached.
388 */
389 if (ap->a_cred->cr_uid == 0 && (ap->a_mode & VREAD)
390 && VTONFS(vp)->n_size > 0) {
391 struct iovec aiov;
392 struct uio auio;
393 char buf[1];
394
395 aiov.iov_base = buf;
396 aiov.iov_len = 1;
397 auio.uio_iov = &aiov;
398 auio.uio_iovcnt = 1;
399 auio.uio_offset = 0;
400 auio.uio_resid = 1;
401 auio.uio_segflg = UIO_SYSSPACE;
402 auio.uio_rw = UIO_READ;
403 auio.uio_procp = ap->a_p;
404
405 if (vp->v_type == VREG)
406 error = nfs_readrpc(vp, &auio, ap->a_cred);
407 else if (vp->v_type == VDIR) {
408 char* bp;
409 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
410 aiov.iov_base = bp;
411 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
412 error = nfs_readdirrpc(vp, &auio, ap->a_cred);
413 free(bp, M_TEMP);
414 } else if (vp->v_type == VLNK)
415 error = nfs_readlinkrpc(vp, &auio, ap->a_cred);
416 else
417 error = EACCES;
418 }
419 return (error);
420 }
421 }
422
423 /*
424 * nfs open vnode op
425 * Check to see if the type is ok
426 * and that deletion is not in progress.
427 * For paged in text files, you will need to flush the page cache
428 * if consistency is lost.
429 */
430 /* ARGSUSED */
431 static int
432 nfs_open(ap)
433 struct vop_open_args /* {
434 struct vnode *a_vp;
435 int a_mode;
436 struct ucred *a_cred;
437 struct proc *a_p;
438 } */ *ap;
439 {
440 register struct vnode *vp = ap->a_vp;
441 struct nfsnode *np = VTONFS(vp);
442 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
443 struct vattr vattr;
444 int error;
445
446 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
447 #ifdef DIAGNOSTIC
448 printf("open eacces vtyp=%d\n",vp->v_type);
449 #endif
450 return (EACCES);
451 }
452 /*
453 * Get a valid lease. If cached data is stale, flush it.
454 */
455 if (nmp->nm_flag & NFSMNT_NQNFS) {
456 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
457 do {
458 error = nqnfs_getlease(vp, ND_READ, ap->a_cred,
459 ap->a_p);
460 } while (error == NQNFS_EXPIRED);
461 if (error)
462 return (error);
463 if (np->n_lrev != np->n_brev ||
464 (np->n_flag & NQNFSNONCACHE)) {
465 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
466 ap->a_p, 1)) == EINTR)
467 return (error);
468 np->n_brev = np->n_lrev;
469 }
470 }
471 } else {
472 if (np->n_flag & NMODIFIED) {
473 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
474 ap->a_p, 1)) == EINTR)
475 return (error);
476 np->n_attrstamp = 0;
477 if (vp->v_type == VDIR)
478 np->n_direofoffset = 0;
479 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
480 if (error)
481 return (error);
482 np->n_mtime = vattr.va_mtime.tv_sec;
483 } else {
484 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p);
485 if (error)
486 return (error);
487 if (np->n_mtime != vattr.va_mtime.tv_sec) {
488 if (vp->v_type == VDIR)
489 np->n_direofoffset = 0;
490 if ((error = nfs_vinvalbuf(vp, V_SAVE,
491 ap->a_cred, ap->a_p, 1)) == EINTR)
492 return (error);
493 np->n_mtime = vattr.va_mtime.tv_sec;
494 }
495 }
496 }
497 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0)
498 np->n_attrstamp = 0; /* For Open/Close consistency */
499 return (0);
500 }
501
502 /*
503 * nfs close vnode op
504 * What an NFS client should do upon close after writing is a debatable issue.
505 * Most NFS clients push delayed writes to the server upon close, basically for
506 * two reasons:
507 * 1 - So that any write errors may be reported back to the client process
508 * doing the close system call. By far the two most likely errors are
509 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
510 * 2 - To put a worst case upper bound on cache inconsistency between
511 * multiple clients for the file.
512 * There is also a consistency problem for Version 2 of the protocol w.r.t.
513 * not being able to tell if other clients are writing a file concurrently,
514 * since there is no way of knowing if the changed modify time in the reply
515 * is only due to the write for this client.
516 * (NFS Version 3 provides weak cache consistency data in the reply that
517 * should be sufficient to detect and handle this case.)
518 *
519 * The current code does the following:
520 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
521 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
522 * or commit them (this satisfies 1 and 2 except for the
523 * case where the server crashes after this close but
524 * before the commit RPC, which is felt to be "good
525 * enough". Changing the last argument to nfs_flush() to
526 * a 1 would force a commit operation, if it is felt a
527 * commit is necessary now.
528 * for NQNFS - do nothing now, since 2 is dealt with via leases and
529 * 1 should be dealt with via an fsync() system call for
530 * cases where write errors are important.
531 */
532 /* ARGSUSED */
533 static int
534 nfs_close(ap)
535 struct vop_close_args /* {
536 struct vnodeop_desc *a_desc;
537 struct vnode *a_vp;
538 int a_fflag;
539 struct ucred *a_cred;
540 struct proc *a_p;
541 } */ *ap;
542 {
543 register struct vnode *vp = ap->a_vp;
544 register struct nfsnode *np = VTONFS(vp);
545 int error = 0;
546
547 if (vp->v_type == VREG) {
548 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
549 (np->n_flag & NMODIFIED)) {
550 if (NFS_ISV3(vp)) {
551 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0);
552 np->n_flag &= ~NMODIFIED;
553 } else
554 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1);
555 np->n_attrstamp = 0;
556 }
557 if (np->n_flag & NWRITEERR) {
558 np->n_flag &= ~NWRITEERR;
559 error = np->n_error;
560 }
561 }
562 return (error);
563 }
564
565 /*
566 * nfs getattr call from vfs.
567 */
568 static int
569 nfs_getattr(ap)
570 struct vop_getattr_args /* {
571 struct vnode *a_vp;
572 struct vattr *a_vap;
573 struct ucred *a_cred;
574 struct proc *a_p;
575 } */ *ap;
576 {
577 register struct vnode *vp = ap->a_vp;
578 register struct nfsnode *np = VTONFS(vp);
579 register caddr_t cp;
580 register u_int32_t *tl;
581 register int32_t t1, t2;
582 caddr_t bpos, dpos;
583 int error = 0;
584 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
585 int v3 = NFS_ISV3(vp);
586
587 /*
588 * Update local times for special files.
589 */
590 if (np->n_flag & (NACC | NUPD))
591 np->n_flag |= NCHG;
592 /*
593 * First look in the cache.
594 */
595 if (nfs_getattrcache(vp, ap->a_vap) == 0)
596 return (0);
597 nfsstats.rpccnt[NFSPROC_GETATTR]++;
598 nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3));
599 nfsm_fhtom(vp, v3);
600 nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred);
601 if (!error) {
602 nfsm_loadattr(vp, ap->a_vap);
603 }
604 nfsm_reqdone;
605 return (error);
606 }
607
608 /*
609 * nfs setattr call.
610 */
611 static int
612 nfs_setattr(ap)
613 struct vop_setattr_args /* {
614 struct vnodeop_desc *a_desc;
615 struct vnode *a_vp;
616 struct vattr *a_vap;
617 struct ucred *a_cred;
618 struct proc *a_p;
619 } */ *ap;
620 {
621 register struct vnode *vp = ap->a_vp;
622 register struct nfsnode *np = VTONFS(vp);
623 register struct vattr *vap = ap->a_vap;
624 int error = 0;
625 u_quad_t tsize;
626
627 #ifndef nolint
628 tsize = (u_quad_t)0;
629 #endif
630
631 /*
632 * Setting of flags is not supported.
633 */
634 if (vap->va_flags != VNOVAL)
635 return (EOPNOTSUPP);
636
637 /*
638 * Disallow write attempts if the filesystem is mounted read-only.
639 */
640 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
641 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
642 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
643 (vp->v_mount->mnt_flag & MNT_RDONLY))
644 return (EROFS);
645 if (vap->va_size != VNOVAL) {
646 switch (vp->v_type) {
647 case VDIR:
648 return (EISDIR);
649 case VCHR:
650 case VBLK:
651 case VSOCK:
652 case VFIFO:
653 if (vap->va_mtime.tv_sec == VNOVAL &&
654 vap->va_atime.tv_sec == VNOVAL &&
655 vap->va_mode == (mode_t)VNOVAL &&
656 vap->va_uid == (uid_t)VNOVAL &&
657 vap->va_gid == (gid_t)VNOVAL)
658 return (0);
659 vap->va_size = VNOVAL;
660 break;
661 default:
662 /*
663 * Disallow write attempts if the filesystem is
664 * mounted read-only.
665 */
666 if (vp->v_mount->mnt_flag & MNT_RDONLY)
667 return (EROFS);
668 vnode_pager_setsize(vp, vap->va_size);
669 if (np->n_flag & NMODIFIED) {
670 if (vap->va_size == 0)
671 error = nfs_vinvalbuf(vp, 0,
672 ap->a_cred, ap->a_p, 1);
673 else
674 error = nfs_vinvalbuf(vp, V_SAVE,
675 ap->a_cred, ap->a_p, 1);
676 if (error) {
677 vnode_pager_setsize(vp, np->n_size);
678 return (error);
679 }
680 }
681 tsize = np->n_size;
682 np->n_size = np->n_vattr.va_size = vap->va_size;
683 };
684 } else if ((vap->va_mtime.tv_sec != VNOVAL ||
685 vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) &&
686 vp->v_type == VREG &&
687 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
688 ap->a_p, 1)) == EINTR)
689 return (error);
690 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p);
691 if (error && vap->va_size != VNOVAL) {
692 np->n_size = np->n_vattr.va_size = tsize;
693 vnode_pager_setsize(vp, np->n_size);
694 }
695 return (error);
696 }
697
698 /*
699 * Do an nfs setattr rpc.
700 */
701 static int
702 nfs_setattrrpc(vp, vap, cred, procp)
703 register struct vnode *vp;
704 register struct vattr *vap;
705 struct ucred *cred;
706 struct proc *procp;
707 {
708 register struct nfsv2_sattr *sp;
709 register caddr_t cp;
710 register int32_t t1, t2;
711 caddr_t bpos, dpos, cp2;
712 u_int32_t *tl;
713 int error = 0, wccflag = NFSV3_WCCRATTR;
714 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
715 int v3 = NFS_ISV3(vp);
716
717 nfsstats.rpccnt[NFSPROC_SETATTR]++;
718 nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
719 nfsm_fhtom(vp, v3);
720 if (v3) {
721 nfsm_v3attrbuild(vap, TRUE);
722 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
723 *tl = nfs_false;
724 } else {
725 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
726 if (vap->va_mode == (mode_t)VNOVAL)
727 sp->sa_mode = nfs_xdrneg1;
728 else
729 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
730 if (vap->va_uid == (uid_t)VNOVAL)
731 sp->sa_uid = nfs_xdrneg1;
732 else
733 sp->sa_uid = txdr_unsigned(vap->va_uid);
734 if (vap->va_gid == (gid_t)VNOVAL)
735 sp->sa_gid = nfs_xdrneg1;
736 else
737 sp->sa_gid = txdr_unsigned(vap->va_gid);
738 sp->sa_size = txdr_unsigned(vap->va_size);
739 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
740 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
741 }
742 nfsm_request(vp, NFSPROC_SETATTR, procp, cred);
743 if (v3) {
744 nfsm_wcc_data(vp, wccflag);
745 } else
746 nfsm_loadattr(vp, (struct vattr *)0);
747 nfsm_reqdone;
748 return (error);
749 }
750
751 /*
752 * nfs lookup call, one step at a time...
753 * First look in cache
754 * If not found, unlock the directory nfsnode and do the rpc
755 */
756 static int
757 nfs_lookup(ap)
758 struct vop_lookup_args /* {
759 struct vnodeop_desc *a_desc;
760 struct vnode *a_dvp;
761 struct vnode **a_vpp;
762 struct componentname *a_cnp;
763 } */ *ap;
764 {
765 struct componentname *cnp = ap->a_cnp;
766 struct vnode *dvp = ap->a_dvp;
767 struct vnode **vpp = ap->a_vpp;
768 int flags = cnp->cn_flags;
769 struct vnode *newvp;
770 u_int32_t *tl;
771 caddr_t cp;
772 int32_t t1, t2;
773 struct nfsmount *nmp;
774 caddr_t bpos, dpos, cp2;
775 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
776 long len;
777 nfsfh_t *fhp;
778 struct nfsnode *np;
779 int lockparent, wantparent, error = 0, attrflag, fhsize;
780 int v3 = NFS_ISV3(dvp);
781 struct proc *p = cnp->cn_proc;
782
783 *vpp = NULLVP;
784 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
785 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
786 return (EROFS);
787 if (dvp->v_type != VDIR)
788 return (ENOTDIR);
789 lockparent = flags & LOCKPARENT;
790 wantparent = flags & (LOCKPARENT|WANTPARENT);
791 nmp = VFSTONFS(dvp->v_mount);
792 np = VTONFS(dvp);
793 if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
794 struct vattr vattr;
795 int vpid;
796
797 if (error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p)) {
798 *vpp = NULLVP;
799 return (error);
800 }
801
802 newvp = *vpp;
803 vpid = newvp->v_id;
804 /*
805 * See the comment starting `Step through' in ufs/ufs_lookup.c
806 * for an explanation of the locking protocol
807 */
808 if (dvp == newvp) {
809 VREF(newvp);
810 error = 0;
811 } else if (flags & ISDOTDOT) {
812 VOP_UNLOCK(dvp, 0, p);
813 error = vget(newvp, LK_EXCLUSIVE, p);
814 if (!error && lockparent && (flags & ISLASTCN))
815 error = vn_lock(dvp, LK_EXCLUSIVE, p);
816 } else {
817 error = vget(newvp, LK_EXCLUSIVE, p);
818 if (!lockparent || error || !(flags & ISLASTCN))
819 VOP_UNLOCK(dvp, 0, p);
820 }
821 if (!error) {
822 if (vpid == newvp->v_id) {
823 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p)
824 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
825 nfsstats.lookupcache_hits++;
826 if (cnp->cn_nameiop != LOOKUP &&
827 (flags & ISLASTCN))
828 cnp->cn_flags |= SAVENAME;
829 return (0);
830 }
831 cache_purge(newvp);
832 }
833 vput(newvp);
834 if (lockparent && dvp != newvp && (flags & ISLASTCN))
835 VOP_UNLOCK(dvp, 0, p);
836 }
837 error = vn_lock(dvp, LK_EXCLUSIVE, p);
838 *vpp = NULLVP;
839 if (error)
840 return (error);
841 }
842 error = 0;
843 newvp = NULLVP;
844 nfsstats.lookupcache_misses++;
845 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
846 len = cnp->cn_namelen;
847 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
848 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
849 nfsm_fhtom(dvp, v3);
850 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
851 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred);
852 if (error) {
853 nfsm_postop_attr(dvp, attrflag);
854 m_freem(mrep);
855 goto nfsmout;
856 }
857 nfsm_getfh(fhp, fhsize, v3);
858
859 /*
860 * Handle RENAME case...
861 */
862 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
863 if (NFS_CMPFH(np, fhp, fhsize)) {
864 m_freem(mrep);
865 return (EISDIR);
866 }
867 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
868 if (error) {
869 m_freem(mrep);
870 return (error);
871 }
872 newvp = NFSTOV(np);
873 if (v3) {
874 nfsm_postop_attr(newvp, attrflag);
875 nfsm_postop_attr(dvp, attrflag);
876 } else
877 nfsm_loadattr(newvp, (struct vattr *)0);
878 *vpp = newvp;
879 m_freem(mrep);
880 cnp->cn_flags |= SAVENAME;
881 if (!lockparent)
882 VOP_UNLOCK(dvp, 0, p);
883 return (0);
884 }
885
886 if (flags & ISDOTDOT) {
887 VOP_UNLOCK(dvp, 0, p);
888 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
889 if (error) {
890 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
891 return (error);
892 }
893 newvp = NFSTOV(np);
894 if (lockparent && (flags & ISLASTCN) &&
895 (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
896 vput(newvp);
897 return (error);
898 }
899 } else if (NFS_CMPFH(np, fhp, fhsize)) {
900 VREF(dvp);
901 newvp = dvp;
902 } else {
903 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
904 if (error) {
905 m_freem(mrep);
906 return (error);
907 }
908 if (!lockparent || !(flags & ISLASTCN))
909 VOP_UNLOCK(dvp, 0, p);
910 newvp = NFSTOV(np);
911 }
912 if (v3) {
913 nfsm_postop_attr(newvp, attrflag);
914 nfsm_postop_attr(dvp, attrflag);
915 } else
916 nfsm_loadattr(newvp, (struct vattr *)0);
917 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
918 cnp->cn_flags |= SAVENAME;
919 if ((cnp->cn_flags & MAKEENTRY) &&
920 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
921 np->n_ctime = np->n_vattr.va_ctime.tv_sec;
922 cache_enter(dvp, newvp, cnp);
923 }
924 *vpp = newvp;
925 nfsm_reqdone;
926 if (error) {
927 if (newvp != NULLVP) {
928 vrele(newvp);
929 *vpp = NULLVP;
930 }
931 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
932 (flags & ISLASTCN) && error == ENOENT) {
933 if (!lockparent)
934 VOP_UNLOCK(dvp, 0, p);
935 if (dvp->v_mount->mnt_flag & MNT_RDONLY)
936 error = EROFS;
937 else
938 error = EJUSTRETURN;
939 }
940 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
941 cnp->cn_flags |= SAVENAME;
942 }
943 return (error);
944 }
945
946 /*
947 * nfs read call.
948 * Just call nfs_bioread() to do the work.
949 */
950 static int
951 nfs_read(ap)
952 struct vop_read_args /* {
953 struct vnode *a_vp;
954 struct uio *a_uio;
955 int a_ioflag;
956 struct ucred *a_cred;
957 } */ *ap;
958 {
959 register struct vnode *vp = ap->a_vp;
960
961 if (vp->v_type != VREG)
962 return (EPERM);
963 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
964 }
965
966 /*
967 * nfs readlink call
968 */
969 static int
970 nfs_readlink(ap)
971 struct vop_readlink_args /* {
972 struct vnode *a_vp;
973 struct uio *a_uio;
974 struct ucred *a_cred;
975 } */ *ap;
976 {
977 register struct vnode *vp = ap->a_vp;
978
979 if (vp->v_type != VLNK)
980 return (EINVAL);
981 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
982 }
983
984 /*
985 * Do a readlink rpc.
986 * Called by nfs_doio() from below the buffer cache.
987 */
988 int
989 nfs_readlinkrpc(vp, uiop, cred)
990 register struct vnode *vp;
991 struct uio *uiop;
992 struct ucred *cred;
993 {
994 register u_int32_t *tl;
995 register caddr_t cp;
996 register int32_t t1, t2;
997 caddr_t bpos, dpos, cp2;
998 int error = 0, len, attrflag;
999 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1000 int v3 = NFS_ISV3(vp);
1001
1002 nfsstats.rpccnt[NFSPROC_READLINK]++;
1003 nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3));
1004 nfsm_fhtom(vp, v3);
1005 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred);
1006 if (v3)
1007 nfsm_postop_attr(vp, attrflag);
1008 if (!error) {
1009 nfsm_strsiz(len, NFS_MAXPATHLEN);
1010 if (len == NFS_MAXPATHLEN) {
1011 struct nfsnode *np = VTONFS(vp);
1012 if (np->n_size && np->n_size < NFS_MAXPATHLEN)
1013 len = np->n_size;
1014 }
1015 nfsm_mtouio(uiop, len);
1016 }
1017 nfsm_reqdone;
1018 return (error);
1019 }
1020
1021 /*
1022 * nfs read rpc call
1023 * Ditto above
1024 */
1025 int
1026 nfs_readrpc(vp, uiop, cred)
1027 register struct vnode *vp;
1028 struct uio *uiop;
1029 struct ucred *cred;
1030 {
1031 register u_int32_t *tl;
1032 register caddr_t cp;
1033 register int32_t t1, t2;
1034 caddr_t bpos, dpos, cp2;
1035 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1036 struct nfsmount *nmp;
1037 int error = 0, len, retlen, tsiz, eof, attrflag;
1038 int v3 = NFS_ISV3(vp);
1039
1040 #ifndef nolint
1041 eof = 0;
1042 #endif
1043 nmp = VFSTONFS(vp->v_mount);
1044 tsiz = uiop->uio_resid;
1045 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1046 return (EFBIG);
1047 while (tsiz > 0) {
1048 nfsstats.rpccnt[NFSPROC_READ]++;
1049 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1050 nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1051 nfsm_fhtom(vp, v3);
1052 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1053 if (v3) {
1054 txdr_hyper(&uiop->uio_offset, tl);
1055 *(tl + 2) = txdr_unsigned(len);
1056 } else {
1057 *tl++ = txdr_unsigned(uiop->uio_offset);
1058 *tl++ = txdr_unsigned(len);
1059 *tl = 0;
1060 }
1061 nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred);
1062 if (v3) {
1063 nfsm_postop_attr(vp, attrflag);
1064 if (error) {
1065 m_freem(mrep);
1066 goto nfsmout;
1067 }
1068 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1069 eof = fxdr_unsigned(int, *(tl + 1));
1070 } else
1071 nfsm_loadattr(vp, (struct vattr *)0);
1072 nfsm_strsiz(retlen, nmp->nm_rsize);
1073 nfsm_mtouio(uiop, retlen);
1074 m_freem(mrep);
1075 tsiz -= retlen;
1076 if (v3) {
1077 if (eof || retlen == 0)
1078 tsiz = 0;
1079 } else if (retlen < len)
1080 tsiz = 0;
1081 }
1082 nfsmout:
1083 return (error);
1084 }
1085
1086 /*
1087 * nfs write call
1088 */
1089 int
1090 nfs_writerpc(vp, uiop, cred, iomode, must_commit)
1091 register struct vnode *vp;
1092 register struct uio *uiop;
1093 struct ucred *cred;
1094 int *iomode, *must_commit;
1095 {
1096 register u_int32_t *tl;
1097 register caddr_t cp;
1098 register int32_t t1, t2, backup;
1099 caddr_t bpos, dpos, cp2;
1100 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1101 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1102 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit;
1103 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC;
1104
1105 #ifndef DIAGNOSTIC
1106 if (uiop->uio_iovcnt != 1)
1107 panic("nfs: writerpc iovcnt > 1");
1108 #endif
1109 *must_commit = 0;
1110 tsiz = uiop->uio_resid;
1111 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1112 return (EFBIG);
1113 while (tsiz > 0) {
1114 nfsstats.rpccnt[NFSPROC_WRITE]++;
1115 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
1116 nfsm_reqhead(vp, NFSPROC_WRITE,
1117 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
1118 nfsm_fhtom(vp, v3);
1119 if (v3) {
1120 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1121 txdr_hyper(&uiop->uio_offset, tl);
1122 tl += 2;
1123 *tl++ = txdr_unsigned(len);
1124 *tl++ = txdr_unsigned(*iomode);
1125 *tl = txdr_unsigned(len);
1126 } else {
1127 register u_int32_t x;
1128
1129 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1130 /* Set both "begin" and "current" to non-garbage. */
1131 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1132 *tl++ = x; /* "begin offset" */
1133 *tl++ = x; /* "current offset" */
1134 x = txdr_unsigned(len);
1135 *tl++ = x; /* total to this offset */
1136 *tl = x; /* size of this write */
1137 }
1138 nfsm_uiotom(uiop, len);
1139 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred);
1140 if (v3) {
1141 wccflag = NFSV3_WCCCHK;
1142 nfsm_wcc_data(vp, wccflag);
1143 if (!error) {
1144 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1145 + NFSX_V3WRITEVERF);
1146 rlen = fxdr_unsigned(int, *tl++);
1147 if (rlen == 0) {
1148 error = NFSERR_IO;
1149 m_freem(mrep);
1150 break;
1151 } else if (rlen < len) {
1152 backup = len - rlen;
1153 uiop->uio_iov->iov_base -= backup;
1154 uiop->uio_iov->iov_len += backup;
1155 uiop->uio_offset -= backup;
1156 uiop->uio_resid += backup;
1157 len = rlen;
1158 }
1159 commit = fxdr_unsigned(int, *tl++);
1160
1161 /*
1162 * Return the lowest committment level
1163 * obtained by any of the RPCs.
1164 */
1165 if (committed == NFSV3WRITE_FILESYNC)
1166 committed = commit;
1167 else if (committed == NFSV3WRITE_DATASYNC &&
1168 commit == NFSV3WRITE_UNSTABLE)
1169 committed = commit;
1170 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0){
1171 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1172 NFSX_V3WRITEVERF);
1173 nmp->nm_state |= NFSSTA_HASWRITEVERF;
1174 } else if (bcmp((caddr_t)tl,
1175 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) {
1176 *must_commit = 1;
1177 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
1178 NFSX_V3WRITEVERF);
1179 }
1180 }
1181 } else
1182 nfsm_loadattr(vp, (struct vattr *)0);
1183 if (wccflag)
1184 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec;
1185 m_freem(mrep);
1186 if (error)
1187 break;
1188 tsiz -= len;
1189 }
1190 nfsmout:
1191 if (vp->v_mount->mnt_flag & MNT_ASYNC)
1192 committed = NFSV3WRITE_FILESYNC;
1193 *iomode = committed;
1194 if (error)
1195 uiop->uio_resid = tsiz;
1196 return (error);
1197 }
1198
1199 /*
1200 * nfs mknod rpc
1201 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1202 * mode set to specify the file type and the size field for rdev.
1203 */
1204 static int
1205 nfs_mknodrpc(dvp, vpp, cnp, vap)
1206 register struct vnode *dvp;
1207 register struct vnode **vpp;
1208 register struct componentname *cnp;
1209 register struct vattr *vap;
1210 {
1211 register struct nfsv2_sattr *sp;
1212 register u_int32_t *tl;
1213 register caddr_t cp;
1214 register int32_t t1, t2;
1215 struct vnode *newvp = (struct vnode *)0;
1216 struct nfsnode *np = (struct nfsnode *)0;
1217 struct vattr vattr;
1218 char *cp2;
1219 caddr_t bpos, dpos;
1220 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1221 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1222 u_int32_t rdev;
1223 int v3 = NFS_ISV3(dvp);
1224
1225 if (vap->va_type == VCHR || vap->va_type == VBLK)
1226 rdev = txdr_unsigned(vap->va_rdev);
1227 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1228 rdev = nfs_xdrneg1;
1229 else {
1230 VOP_ABORTOP(dvp, cnp);
1231 return (EOPNOTSUPP);
1232 }
1233 if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) {
1234 VOP_ABORTOP(dvp, cnp);
1235 return (error);
1236 }
1237 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1238 nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1239 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1240 nfsm_fhtom(dvp, v3);
1241 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1242 if (v3) {
1243 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1244 *tl++ = vtonfsv3_type(vap->va_type);
1245 nfsm_v3attrbuild(vap, FALSE);
1246 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1247 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1248 *tl++ = txdr_unsigned(major(vap->va_rdev));
1249 *tl = txdr_unsigned(minor(vap->va_rdev));
1250 }
1251 } else {
1252 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1253 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1254 sp->sa_uid = nfs_xdrneg1;
1255 sp->sa_gid = nfs_xdrneg1;
1256 sp->sa_size = rdev;
1257 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1258 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1259 }
1260 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred);
1261 if (!error) {
1262 nfsm_mtofh(dvp, newvp, v3, gotvp);
1263 if (!gotvp) {
1264 if (newvp) {
1265 vput(newvp);
1266 newvp = (struct vnode *)0;
1267 }
1268 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1269 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1270 if (!error)
1271 newvp = NFSTOV(np);
1272 }
1273 }
1274 if (v3)
1275 nfsm_wcc_data(dvp, wccflag);
1276 nfsm_reqdone;
1277 if (error) {
1278 if (newvp)
1279 vput(newvp);
1280 } else {
1281 if (cnp->cn_flags & MAKEENTRY)
1282 cache_enter(dvp, newvp, cnp);
1283 *vpp = newvp;
1284 }
1285 zfree(namei_zone, cnp->cn_pnbuf);
1286 VTONFS(dvp)->n_flag |= NMODIFIED;
1287 if (!wccflag)
1288 VTONFS(dvp)->n_attrstamp = 0;
1289 return (error);
1290 }
1291
1292 /*
1293 * nfs mknod vop
1294 * just call nfs_mknodrpc() to do the work.
1295 */
1296 /* ARGSUSED */
1297 static int
1298 nfs_mknod(ap)
1299 struct vop_mknod_args /* {
1300 struct vnode *a_dvp;
1301 struct vnode **a_vpp;
1302 struct componentname *a_cnp;
1303 struct vattr *a_vap;
1304 } */ *ap;
1305 {
1306 struct vnode *newvp;
1307 int error;
1308
1309 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap);
1310 if (!error)
1311 vput(newvp);
1312 return (error);
1313 }
1314
1315 static u_long create_verf;
1316 /*
1317 * nfs file create call
1318 */
1319 static int
1320 nfs_create(ap)
1321 struct vop_create_args /* {
1322 struct vnode *a_dvp;
1323 struct vnode **a_vpp;
1324 struct componentname *a_cnp;
1325 struct vattr *a_vap;
1326 } */ *ap;
1327 {
1328 register struct vnode *dvp = ap->a_dvp;
1329 register struct vattr *vap = ap->a_vap;
1330 register struct componentname *cnp = ap->a_cnp;
1331 register struct nfsv2_sattr *sp;
1332 register u_int32_t *tl;
1333 register caddr_t cp;
1334 register int32_t t1, t2;
1335 struct nfsnode *np = (struct nfsnode *)0;
1336 struct vnode *newvp = (struct vnode *)0;
1337 caddr_t bpos, dpos, cp2;
1338 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1339 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1340 struct vattr vattr;
1341 int v3 = NFS_ISV3(dvp);
1342
1343 /*
1344 * Oops, not for me..
1345 */
1346 if (vap->va_type == VSOCK)
1347 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1348
1349 if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) {
1350 VOP_ABORTOP(dvp, cnp);
1351 return (error);
1352 }
1353 if (vap->va_vaflags & VA_EXCLUSIVE)
1354 fmode |= O_EXCL;
1355 again:
1356 nfsstats.rpccnt[NFSPROC_CREATE]++;
1357 nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1358 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1359 nfsm_fhtom(dvp, v3);
1360 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1361 if (v3) {
1362 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1363 if (fmode & O_EXCL) {
1364 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1365 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1366 #ifdef INET
1367 if (!TAILQ_EMPTY(&in_ifaddrhead))
1368 *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
1369 else
1370 #endif
1371 *tl++ = create_verf;
1372 *tl = ++create_verf;
1373 } else {
1374 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1375 nfsm_v3attrbuild(vap, FALSE);
1376 }
1377 } else {
1378 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1379 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1380 sp->sa_uid = nfs_xdrneg1;
1381 sp->sa_gid = nfs_xdrneg1;
1382 sp->sa_size = 0;
1383 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1384 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1385 }
1386 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred);
1387 if (!error) {
1388 nfsm_mtofh(dvp, newvp, v3, gotvp);
1389 if (!gotvp) {
1390 if (newvp) {
1391 vput(newvp);
1392 newvp = (struct vnode *)0;
1393 }
1394 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1395 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np);
1396 if (!error)
1397 newvp = NFSTOV(np);
1398 }
1399 }
1400 if (v3)
1401 nfsm_wcc_data(dvp, wccflag);
1402 nfsm_reqdone;
1403 if (error) {
1404 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) {
1405 fmode &= ~O_EXCL;
1406 goto again;
1407 }
1408 if (newvp)
1409 vput(newvp);
1410 } else if (v3 && (fmode & O_EXCL)) {
1411 /*
1412 * We are normally called with only a partially initialized
1413 * VAP. Since the NFSv3 spec says that server may use the
1414 * file attributes to store the verifier, the spec requires
1415 * us to do a SETATTR RPC. FreeBSD servers store the verifier
1416 * in atime, but we can't really assume that all servers will
1417 * so we ensure that our SETATTR sets both atime and mtime.
1418 */
1419 if (vap->va_mtime.tv_sec == VNOVAL)
1420 getnanotime(&vap->va_mtime);
1421 if (vap->va_atime.tv_sec == VNOVAL)
1422 vap->va_atime = vap->va_mtime;
1423 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc);
1424 }
1425 if (!error) {
1426 if (cnp->cn_flags & MAKEENTRY)
1427 cache_enter(dvp, newvp, cnp);
1428 *ap->a_vpp = newvp;
1429 }
1430 zfree(namei_zone, cnp->cn_pnbuf);
1431 VTONFS(dvp)->n_flag |= NMODIFIED;
1432 if (!wccflag)
1433 VTONFS(dvp)->n_attrstamp = 0;
1434 return (error);
1435 }
1436
1437 /*
1438 * nfs file remove call
1439 * To try and make nfs semantics closer to ufs semantics, a file that has
1440 * other processes using the vnode is renamed instead of removed and then
1441 * removed later on the last close.
1442 * - If v_usecount > 1
1443 * If a rename is not already in the works
1444 * call nfs_sillyrename() to set it up
1445 * else
1446 * do the remove rpc
1447 */
1448 static int
1449 nfs_remove(ap)
1450 struct vop_remove_args /* {
1451 struct vnodeop_desc *a_desc;
1452 struct vnode * a_dvp;
1453 struct vnode * a_vp;
1454 struct componentname * a_cnp;
1455 } */ *ap;
1456 {
1457 register struct vnode *vp = ap->a_vp;
1458 register struct vnode *dvp = ap->a_dvp;
1459 register struct componentname *cnp = ap->a_cnp;
1460 register struct nfsnode *np = VTONFS(vp);
1461 int error = 0;
1462 struct vattr vattr;
1463
1464 #ifndef DIAGNOSTIC
1465 if ((cnp->cn_flags & HASBUF) == 0)
1466 panic("nfs_remove: no name");
1467 if (vp->v_usecount < 1)
1468 panic("nfs_remove: bad v_usecount");
1469 #endif
1470 if (vp->v_type == VDIR)
1471 error = EPERM;
1472 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1473 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 &&
1474 vattr.va_nlink > 1)) {
1475 /*
1476 * Purge the name cache so that the chance of a lookup for
1477 * the name succeeding while the remove is in progress is
1478 * minimized. Without node locking it can still happen, such
1479 * that an I/O op returns ESTALE, but since you get this if
1480 * another host removes the file..
1481 */
1482 cache_purge(vp);
1483 /*
1484 * throw away biocache buffers, mainly to avoid
1485 * unnecessary delayed writes later.
1486 */
1487 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1);
1488 /* Do the rpc */
1489 if (error != EINTR)
1490 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1491 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc);
1492 /*
1493 * Kludge City: If the first reply to the remove rpc is lost..
1494 * the reply to the retransmitted request will be ENOENT
1495 * since the file was in fact removed
1496 * Therefore, we cheat and return success.
1497 */
1498 if (error == ENOENT)
1499 error = 0;
1500 } else if (!np->n_sillyrename)
1501 error = nfs_sillyrename(dvp, vp, cnp);
1502 zfree(namei_zone, cnp->cn_pnbuf);
1503 np->n_attrstamp = 0;
1504 return (error);
1505 }
1506
1507 /*
1508 * nfs file remove rpc called from nfs_inactive
1509 */
1510 int
1511 nfs_removeit(sp)
1512 register struct sillyrename *sp;
1513 {
1514
1515 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1516 (struct proc *)0));
1517 }
1518
1519 /*
1520 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1521 */
1522 static int
1523 nfs_removerpc(dvp, name, namelen, cred, proc)
1524 register struct vnode *dvp;
1525 const char *name;
1526 int namelen;
1527 struct ucred *cred;
1528 struct proc *proc;
1529 {
1530 register u_int32_t *tl;
1531 register caddr_t cp;
1532 register int32_t t1, t2;
1533 caddr_t bpos, dpos, cp2;
1534 int error = 0, wccflag = NFSV3_WCCRATTR;
1535 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1536 int v3 = NFS_ISV3(dvp);
1537
1538 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1539 nfsm_reqhead(dvp, NFSPROC_REMOVE,
1540 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1541 nfsm_fhtom(dvp, v3);
1542 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1543 nfsm_request(dvp, NFSPROC_REMOVE, proc, cred);
1544 if (v3)
1545 nfsm_wcc_data(dvp, wccflag);
1546 nfsm_reqdone;
1547 VTONFS(dvp)->n_flag |= NMODIFIED;
1548 if (!wccflag)
1549 VTONFS(dvp)->n_attrstamp = 0;
1550 return (error);
1551 }
1552
1553 /*
1554 * nfs file rename call
1555 */
1556 static int
1557 nfs_rename(ap)
1558 struct vop_rename_args /* {
1559 struct vnode *a_fdvp;
1560 struct vnode *a_fvp;
1561 struct componentname *a_fcnp;
1562 struct vnode *a_tdvp;
1563 struct vnode *a_tvp;
1564 struct componentname *a_tcnp;
1565 } */ *ap;
1566 {
1567 register struct vnode *fvp = ap->a_fvp;
1568 register struct vnode *tvp = ap->a_tvp;
1569 register struct vnode *fdvp = ap->a_fdvp;
1570 register struct vnode *tdvp = ap->a_tdvp;
1571 register struct componentname *tcnp = ap->a_tcnp;
1572 register struct componentname *fcnp = ap->a_fcnp;
1573 int error;
1574
1575 #ifndef DIAGNOSTIC
1576 if ((tcnp->cn_flags & HASBUF) == 0 ||
1577 (fcnp->cn_flags & HASBUF) == 0)
1578 panic("nfs_rename: no name");
1579 #endif
1580 /* Check for cross-device rename */
1581 if ((fvp->v_mount != tdvp->v_mount) ||
1582 (tvp && (fvp->v_mount != tvp->v_mount))) {
1583 error = EXDEV;
1584 goto out;
1585 }
1586
1587 /*
1588 * We have to flush B_DELWRI data prior to renaming
1589 * the file. If we don't, the delayed-write buffers
1590 * can be flushed out later after the file has gone stale
1591 * under NFSV3. NFSV2 does not have this problem because
1592 * ( as far as I can tell ) it flushes dirty buffers more
1593 * often.
1594 */
1595
1596 VOP_FSYNC(fvp, fcnp->cn_cred, MNT_WAIT, fcnp->cn_proc);
1597 if (tvp)
1598 VOP_FSYNC(tvp, tcnp->cn_cred, MNT_WAIT, tcnp->cn_proc);
1599
1600 /*
1601 * If the tvp exists and is in use, sillyrename it before doing the
1602 * rename of the new file over it.
1603 * XXX Can't sillyrename a directory.
1604 */
1605 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1606 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1607 vput(tvp);
1608 tvp = NULL;
1609 }
1610
1611 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1612 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1613 tcnp->cn_proc);
1614
1615 if (fvp->v_type == VDIR) {
1616 if (tvp != NULL && tvp->v_type == VDIR)
1617 cache_purge(tdvp);
1618 cache_purge(fdvp);
1619 }
1620
1621 out:
1622 if (tdvp == tvp)
1623 vrele(tdvp);
1624 else
1625 vput(tdvp);
1626 if (tvp)
1627 vput(tvp);
1628 vrele(fdvp);
1629 vrele(fvp);
1630 /*
1631 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1632 */
1633 if (error == ENOENT)
1634 error = 0;
1635 return (error);
1636 }
1637
1638 /*
1639 * nfs file rename rpc called from nfs_remove() above
1640 */
1641 static int
1642 nfs_renameit(sdvp, scnp, sp)
1643 struct vnode *sdvp;
1644 struct componentname *scnp;
1645 register struct sillyrename *sp;
1646 {
1647 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1648 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc));
1649 }
1650
1651 /*
1652 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1653 */
1654 static int
1655 nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc)
1656 register struct vnode *fdvp;
1657 const char *fnameptr;
1658 int fnamelen;
1659 register struct vnode *tdvp;
1660 const char *tnameptr;
1661 int tnamelen;
1662 struct ucred *cred;
1663 struct proc *proc;
1664 {
1665 register u_int32_t *tl;
1666 register caddr_t cp;
1667 register int32_t t1, t2;
1668 caddr_t bpos, dpos, cp2;
1669 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1670 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1671 int v3 = NFS_ISV3(fdvp);
1672
1673 nfsstats.rpccnt[NFSPROC_RENAME]++;
1674 nfsm_reqhead(fdvp, NFSPROC_RENAME,
1675 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1676 nfsm_rndup(tnamelen));
1677 nfsm_fhtom(fdvp, v3);
1678 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1679 nfsm_fhtom(tdvp, v3);
1680 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1681 nfsm_request(fdvp, NFSPROC_RENAME, proc, cred);
1682 if (v3) {
1683 nfsm_wcc_data(fdvp, fwccflag);
1684 nfsm_wcc_data(tdvp, twccflag);
1685 }
1686 nfsm_reqdone;
1687 VTONFS(fdvp)->n_flag |= NMODIFIED;
1688 VTONFS(tdvp)->n_flag |= NMODIFIED;
1689 if (!fwccflag)
1690 VTONFS(fdvp)->n_attrstamp = 0;
1691 if (!twccflag)
1692 VTONFS(tdvp)->n_attrstamp = 0;
1693 return (error);
1694 }
1695
1696 /*
1697 * nfs hard link create call
1698 */
1699 static int
1700 nfs_link(ap)
1701 struct vop_link_args /* {
1702 struct vnode *a_tdvp;
1703 struct vnode *a_vp;
1704 struct componentname *a_cnp;
1705 } */ *ap;
1706 {
1707 register struct vnode *vp = ap->a_vp;
1708 register struct vnode *tdvp = ap->a_tdvp;
1709 register struct componentname *cnp = ap->a_cnp;
1710 register u_int32_t *tl;
1711 register caddr_t cp;
1712 register int32_t t1, t2;
1713 caddr_t bpos, dpos, cp2;
1714 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1715 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1716 int v3;
1717
1718 if (vp->v_mount != tdvp->v_mount) {
1719 VOP_ABORTOP(tdvp, cnp);
1720 return (EXDEV);
1721 }
1722
1723 /*
1724 * Push all writes to the server, so that the attribute cache
1725 * doesn't get "out of sync" with the server.
1726 * XXX There should be a better way!
1727 */
1728 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc);
1729
1730 v3 = NFS_ISV3(vp);
1731 nfsstats.rpccnt[NFSPROC_LINK]++;
1732 nfsm_reqhead(vp, NFSPROC_LINK,
1733 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1734 nfsm_fhtom(vp, v3);
1735 nfsm_fhtom(tdvp, v3);
1736 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1737 nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred);
1738 if (v3) {
1739 nfsm_postop_attr(vp, attrflag);
1740 nfsm_wcc_data(tdvp, wccflag);
1741 }
1742 nfsm_reqdone;
1743 zfree(namei_zone, cnp->cn_pnbuf);
1744 VTONFS(tdvp)->n_flag |= NMODIFIED;
1745 if (!attrflag)
1746 VTONFS(vp)->n_attrstamp = 0;
1747 if (!wccflag)
1748 VTONFS(tdvp)->n_attrstamp = 0;
1749 /*
1750 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1751 */
1752 if (error == EEXIST)
1753 error = 0;
1754 return (error);
1755 }
1756
1757 /*
1758 * nfs symbolic link create call
1759 */
1760 static int
1761 nfs_symlink(ap)
1762 struct vop_symlink_args /* {
1763 struct vnode *a_dvp;
1764 struct vnode **a_vpp;
1765 struct componentname *a_cnp;
1766 struct vattr *a_vap;
1767 char *a_target;
1768 } */ *ap;
1769 {
1770 register struct vnode *dvp = ap->a_dvp;
1771 register struct vattr *vap = ap->a_vap;
1772 register struct componentname *cnp = ap->a_cnp;
1773 register struct nfsv2_sattr *sp;
1774 register u_int32_t *tl;
1775 register caddr_t cp;
1776 register int32_t t1, t2;
1777 caddr_t bpos, dpos, cp2;
1778 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
1779 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1780 struct vnode *newvp = (struct vnode *)0;
1781 int v3 = NFS_ISV3(dvp);
1782
1783 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
1784 slen = strlen(ap->a_target);
1785 nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
1786 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
1787 nfsm_fhtom(dvp, v3);
1788 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1789 if (v3) {
1790 nfsm_v3attrbuild(vap, FALSE);
1791 }
1792 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
1793 if (!v3) {
1794 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1795 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
1796 sp->sa_uid = nfs_xdrneg1;
1797 sp->sa_gid = nfs_xdrneg1;
1798 sp->sa_size = nfs_xdrneg1;
1799 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1800 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1801 }
1802 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred);
1803 if (v3) {
1804 if (!error)
1805 nfsm_mtofh(dvp, newvp, v3, gotvp);
1806 nfsm_wcc_data(dvp, wccflag);
1807 }
1808 nfsm_reqdone;
1809 if (newvp)
1810 vput(newvp);
1811 zfree(namei_zone, cnp->cn_pnbuf);
1812 VTONFS(dvp)->n_flag |= NMODIFIED;
1813 if (!wccflag)
1814 VTONFS(dvp)->n_attrstamp = 0;
1815 /*
1816 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1817 */
1818 if (error == EEXIST)
1819 error = 0;
1820 return (error);
1821 }
1822
1823 /*
1824 * nfs make dir call
1825 */
1826 static int
1827 nfs_mkdir(ap)
1828 struct vop_mkdir_args /* {
1829 struct vnode *a_dvp;
1830 struct vnode **a_vpp;
1831 struct componentname *a_cnp;
1832 struct vattr *a_vap;
1833 } */ *ap;
1834 {
1835 register struct vnode *dvp = ap->a_dvp;
1836 register struct vattr *vap = ap->a_vap;
1837 register struct componentname *cnp = ap->a_cnp;
1838 register struct nfsv2_sattr *sp;
1839 register u_int32_t *tl;
1840 register caddr_t cp;
1841 register int32_t t1, t2;
1842 register int len;
1843 struct nfsnode *np = (struct nfsnode *)0;
1844 struct vnode *newvp = (struct vnode *)0;
1845 caddr_t bpos, dpos, cp2;
1846 int error = 0, wccflag = NFSV3_WCCRATTR;
1847 int gotvp = 0;
1848 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1849 struct vattr vattr;
1850 int v3 = NFS_ISV3(dvp);
1851
1852 if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) {
1853 VOP_ABORTOP(dvp, cnp);
1854 return (error);
1855 }
1856 len = cnp->cn_namelen;
1857 nfsstats.rpccnt[NFSPROC_MKDIR]++;
1858 nfsm_reqhead(dvp, NFSPROC_MKDIR,
1859 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
1860 nfsm_fhtom(dvp, v3);
1861 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
1862 if (v3) {
1863 nfsm_v3attrbuild(vap, FALSE);
1864 } else {
1865 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1866 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
1867 sp->sa_uid = nfs_xdrneg1;
1868 sp->sa_gid = nfs_xdrneg1;
1869 sp->sa_size = nfs_xdrneg1;
1870 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1871 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1872 }
1873 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred);
1874 if (!error)
1875 nfsm_mtofh(dvp, newvp, v3, gotvp);
1876 if (v3)
1877 nfsm_wcc_data(dvp, wccflag);
1878 nfsm_reqdone;
1879 VTONFS(dvp)->n_flag |= NMODIFIED;
1880 if (!wccflag)
1881 VTONFS(dvp)->n_attrstamp = 0;
1882 /*
1883 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
1884 * if we can succeed in looking up the directory.
1885 */
1886 if (error == EEXIST || (!error && !gotvp)) {
1887 if (newvp) {
1888 vrele(newvp);
1889 newvp = (struct vnode *)0;
1890 }
1891 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
1892 cnp->cn_proc, &np);
1893 if (!error) {
1894 newvp = NFSTOV(np);
1895 if (newvp->v_type != VDIR)
1896 error = EEXIST;
1897 }
1898 }
1899 if (error) {
1900 if (newvp)
1901 vrele(newvp);
1902 } else
1903 *ap->a_vpp = newvp;
1904 zfree(namei_zone, cnp->cn_pnbuf);
1905 return (error);
1906 }
1907
1908 /*
1909 * nfs remove directory call
1910 */
1911 static int
1912 nfs_rmdir(ap)
1913 struct vop_rmdir_args /* {
1914 struct vnode *a_dvp;
1915 struct vnode *a_vp;
1916 struct componentname *a_cnp;
1917 } */ *ap;
1918 {
1919 register struct vnode *vp = ap->a_vp;
1920 register struct vnode *dvp = ap->a_dvp;
1921 register struct componentname *cnp = ap->a_cnp;
1922 register u_int32_t *tl;
1923 register caddr_t cp;
1924 register int32_t t1, t2;
1925 caddr_t bpos, dpos, cp2;
1926 int error = 0, wccflag = NFSV3_WCCRATTR;
1927 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
1928 int v3 = NFS_ISV3(dvp);
1929
1930 if (dvp == vp)
1931 return (EINVAL);
1932 nfsstats.rpccnt[NFSPROC_RMDIR]++;
1933 nfsm_reqhead(dvp, NFSPROC_RMDIR,
1934 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
1935 nfsm_fhtom(dvp, v3);
1936 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1937 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred);
1938 if (v3)
1939 nfsm_wcc_data(dvp, wccflag);
1940 nfsm_reqdone;
1941 zfree(namei_zone, cnp->cn_pnbuf);
1942 VTONFS(dvp)->n_flag |= NMODIFIED;
1943 if (!wccflag)
1944 VTONFS(dvp)->n_attrstamp = 0;
1945 cache_purge(dvp);
1946 cache_purge(vp);
1947 /*
1948 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
1949 */
1950 if (error == ENOENT)
1951 error = 0;
1952 return (error);
1953 }
1954
1955 /*
1956 * nfs readdir call
1957 */
1958 static int
1959 nfs_readdir(ap)
1960 struct vop_readdir_args /* {
1961 struct vnode *a_vp;
1962 struct uio *a_uio;
1963 struct ucred *a_cred;
1964 } */ *ap;
1965 {
1966 register struct vnode *vp = ap->a_vp;
1967 register struct nfsnode *np = VTONFS(vp);
1968 register struct uio *uio = ap->a_uio;
1969 int tresid, error;
1970 struct vattr vattr;
1971
1972 if (vp->v_type != VDIR)
1973 return (EPERM);
1974 /*
1975 * First, check for hit on the EOF offset cache
1976 */
1977 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
1978 (np->n_flag & NMODIFIED) == 0) {
1979 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) {
1980 if (NQNFS_CKCACHABLE(vp, ND_READ)) {
1981 nfsstats.direofcache_hits++;
1982 return (0);
1983 }
1984 } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 &&
1985 np->n_mtime == vattr.va_mtime.tv_sec) {
1986 nfsstats.direofcache_hits++;
1987 return (0);
1988 }
1989 }
1990
1991 /*
1992 * Call nfs_bioread() to do the real work.
1993 */
1994 tresid = uio->uio_resid;
1995 error = nfs_bioread(vp, uio, 0, ap->a_cred, 0);
1996
1997 if (!error && uio->uio_resid == tresid)
1998 nfsstats.direofcache_misses++;
1999 return (error);
2000 }
2001
2002 /*
2003 * Readdir rpc call.
2004 * Called from below the buffer cache by nfs_doio().
2005 */
2006 int
2007 nfs_readdirrpc(vp, uiop, cred)
2008 struct vnode *vp;
2009 register struct uio *uiop;
2010 struct ucred *cred;
2011
2012 {
2013 register int len, left;
2014 register struct dirent *dp;
2015 register u_int32_t *tl;
2016 register caddr_t cp;
2017 register int32_t t1, t2;
2018 register nfsuint64 *cookiep;
2019 caddr_t bpos, dpos, cp2;
2020 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2021 nfsuint64 cookie;
2022 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2023 struct nfsnode *dnp = VTONFS(vp);
2024 u_quad_t fileno;
2025 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1;
2026 int attrflag;
2027 int v3 = NFS_ISV3(vp);
2028
2029 #ifndef nolint
2030 dp = (struct dirent *)0;
2031 #endif
2032 #ifndef DIAGNOSTIC
2033 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (NFS_DIRBLKSIZ - 1)) ||
2034 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1)))
2035 panic("nfs readdirrpc bad uio");
2036 #endif
2037
2038 /*
2039 * If there is no cookie, assume directory was stale.
2040 */
2041 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2042 if (cookiep)
2043 cookie = *cookiep;
2044 else
2045 return (NFSERR_BAD_COOKIE);
2046 /*
2047 * Loop around doing readdir rpc's of size nm_readdirsize
2048 * truncated to a multiple of DIRBLKSIZ.
2049 * The stopping criteria is EOF or buffer full.
2050 */
2051 while (more_dirs && bigenough) {
2052 nfsstats.rpccnt[NFSPROC_READDIR]++;
2053 nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) +
2054 NFSX_READDIR(v3));
2055 nfsm_fhtom(vp, v3);
2056 if (v3) {
2057 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2058 *tl++ = cookie.nfsuquad[0];
2059 *tl++ = cookie.nfsuquad[1];
2060 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2061 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2062 } else {
2063 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2064 *tl++ = cookie.nfsuquad[0];
2065 }
2066 *tl = txdr_unsigned(nmp->nm_readdirsize);
2067 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred);
2068 if (v3) {
2069 nfsm_postop_attr(vp, attrflag);
2070 if (!error) {
2071 nfsm_dissect(tl, u_int32_t *,
2072 2 * NFSX_UNSIGNED);
2073 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2074 dnp->n_cookieverf.nfsuquad[1] = *tl;
2075 } else {
2076 m_freem(mrep);
2077 goto nfsmout;
2078 }
2079 }
2080 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2081 more_dirs = fxdr_unsigned(int, *tl);
2082
2083 /* loop thru the dir entries, doctoring them to 4bsd form */
2084 while (more_dirs && bigenough) {
2085 if (v3) {
2086 nfsm_dissect(tl, u_int32_t *,
2087 3 * NFSX_UNSIGNED);
2088 fxdr_hyper(tl, &fileno);
2089 len = fxdr_unsigned(int, *(tl + 2));
2090 } else {
2091 nfsm_dissect(tl, u_int32_t *,
2092 2 * NFSX_UNSIGNED);
2093 fileno = fxdr_unsigned(u_quad_t, *tl++);
2094 len = fxdr_unsigned(int, *tl);
2095 }
2096 if (len <= 0 || len > NFS_MAXNAMLEN) {
2097 error = EBADRPC;
2098 m_freem(mrep);
2099 goto nfsmout;
2100 }
2101 tlen = nfsm_rndup(len);
2102 if (tlen == len)
2103 tlen += 4; /* To ensure null termination */
2104 left = DIRBLKSIZ - blksiz;
2105 if ((tlen + DIRHDSIZ) > left) {
2106 dp->d_reclen += left;
2107 uiop->uio_iov->iov_base += left;
2108 uiop->uio_iov->iov_len -= left;
2109 uiop->uio_offset += left;
2110 uiop->uio_resid -= left;
2111 blksiz = 0;
2112 }
2113 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2114 bigenough = 0;
2115 if (bigenough) {
2116 dp = (struct dirent *)uiop->uio_iov->iov_base;
2117 dp->d_fileno = (int)fileno;
2118 dp->d_namlen = len;
2119 dp->d_reclen = tlen + DIRHDSIZ;
2120 dp->d_type = DT_UNKNOWN;
2121 blksiz += dp->d_reclen;
2122 if (blksiz == DIRBLKSIZ)
2123 blksiz = 0;
2124 uiop->uio_offset += DIRHDSIZ;
2125 uiop->uio_resid -= DIRHDSIZ;
2126 uiop->uio_iov->iov_base += DIRHDSIZ;
2127 uiop->uio_iov->iov_len -= DIRHDSIZ;
2128 nfsm_mtouio(uiop, len);
2129 cp = uiop->uio_iov->iov_base;
2130 tlen -= len;
2131 *cp = '\0'; /* null terminate */
2132 uiop->uio_iov->iov_base += tlen;
2133 uiop->uio_iov->iov_len -= tlen;
2134 uiop->uio_offset += tlen;
2135 uiop->uio_resid -= tlen;
2136 } else
2137 nfsm_adv(nfsm_rndup(len));
2138 if (v3) {
2139 nfsm_dissect(tl, u_int32_t *,
2140 3 * NFSX_UNSIGNED);
2141 } else {
2142 nfsm_dissect(tl, u_int32_t *,
2143 2 * NFSX_UNSIGNED);
2144 }
2145 if (bigenough) {
2146 cookie.nfsuquad[0] = *tl++;
2147 if (v3)
2148 cookie.nfsuquad[1] = *tl++;
2149 } else if (v3)
2150 tl += 2;
2151 else
2152 tl++;
2153 more_dirs = fxdr_unsigned(int, *tl);
2154 }
2155 /*
2156 * If at end of rpc data, get the eof boolean
2157 */
2158 if (!more_dirs) {
2159 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2160 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2161 }
2162 m_freem(mrep);
2163 }
2164 /*
2165 * Fill last record, iff any, out to a multiple of DIRBLKSIZ
2166 * by increasing d_reclen for the last record.
2167 */
2168 if (blksiz > 0) {
2169 left = DIRBLKSIZ - blksiz;
2170 dp->d_reclen += left;
2171 uiop->uio_iov->iov_base += left;
2172 uiop->uio_iov->iov_len -= left;
2173 uiop->uio_offset += left;
2174 uiop->uio_resid -= left;
2175 }
2176
2177 /*
2178 * We are now either at the end of the directory or have filled the
2179 * block.
2180 */
2181 if (bigenough)
2182 dnp->n_direofoffset = uiop->uio_offset;
2183 else {
2184 if (uiop->uio_resid > 0)
2185 printf("EEK! readdirrpc resid > 0\n");
2186 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2187 *cookiep = cookie;
2188 }
2189 nfsmout:
2190 return (error);
2191 }
2192
2193 /*
2194 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2195 */
2196 int
2197 nfs_readdirplusrpc(vp, uiop, cred)
2198 struct vnode *vp;
2199 register struct uio *uiop;
2200 struct ucred *cred;
2201 {
2202 register int len, left;
2203 register struct dirent *dp;
2204 register u_int32_t *tl;
2205 register caddr_t cp;
2206 register int32_t t1, t2;
2207 register struct vnode *newvp;
2208 register nfsuint64 *cookiep;
2209 caddr_t bpos, dpos, cp2, dpossav1, dpossav2;
2210 struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2;
2211 struct nameidata nami, *ndp = &nami;
2212 struct componentname *cnp = &ndp->ni_cnd;
2213 nfsuint64 cookie;
2214 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2215 struct nfsnode *dnp = VTONFS(vp), *np;
2216 nfsfh_t *fhp;
2217 u_quad_t fileno;
2218 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2219 int attrflag, fhsize;
2220
2221 #ifndef nolint
2222 dp = (struct dirent *)0;
2223 #endif
2224 #ifndef DIAGNOSTIC
2225 if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) ||
2226 (uiop->uio_resid & (DIRBLKSIZ - 1)))
2227 panic("nfs readdirplusrpc bad uio");
2228 #endif
2229 ndp->ni_dvp = vp;
2230 newvp = NULLVP;
2231
2232 /*
2233 * If there is no cookie, assume directory was stale.
2234 */
2235 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0);
2236 if (cookiep)
2237 cookie = *cookiep;
2238 else
2239 return (NFSERR_BAD_COOKIE);
2240 /*
2241 * Loop around doing readdir rpc's of size nm_readdirsize
2242 * truncated to a multiple of DIRBLKSIZ.
2243 * The stopping criteria is EOF or buffer full.
2244 */
2245 while (more_dirs && bigenough) {
2246 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2247 nfsm_reqhead(vp, NFSPROC_READDIRPLUS,
2248 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2249 nfsm_fhtom(vp, 1);
2250 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2251 *tl++ = cookie.nfsuquad[0];
2252 *tl++ = cookie.nfsuquad[1];
2253 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2254 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2255 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2256 *tl = txdr_unsigned(nmp->nm_rsize);
2257 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred);
2258 nfsm_postop_attr(vp, attrflag);
2259 if (error) {
2260 m_freem(mrep);
2261 goto nfsmout;
2262 }
2263 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2264 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2265 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2266 more_dirs = fxdr_unsigned(int, *tl);
2267
2268 /* loop thru the dir entries, doctoring them to 4bsd form */
2269 while (more_dirs && bigenough) {
2270 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2271 fxdr_hyper(tl, &fileno);
2272 len = fxdr_unsigned(int, *(tl + 2));
2273 if (len <= 0 || len > NFS_MAXNAMLEN) {
2274 error = EBADRPC;
2275 m_freem(mrep);
2276 goto nfsmout;
2277 }
2278 tlen = nfsm_rndup(len);
2279 if (tlen == len)
2280 tlen += 4; /* To ensure null termination*/
2281 left = DIRBLKSIZ - blksiz;
2282 if ((tlen + DIRHDSIZ) > left) {
2283 dp->d_reclen += left;
2284 uiop->uio_iov->iov_base += left;
2285 uiop->uio_iov->iov_len -= left;
2286 uiop->uio_offset += left;
2287 uiop->uio_resid -= left;
2288 blksiz = 0;
2289 }
2290 if ((tlen + DIRHDSIZ) > uiop->uio_resid)
2291 bigenough = 0;
2292 if (bigenough) {
2293 dp = (struct dirent *)uiop->uio_iov->iov_base;
2294 dp->d_fileno = (int)fileno;
2295 dp->d_namlen = len;
2296 dp->d_reclen = tlen + DIRHDSIZ;
2297 dp->d_type = DT_UNKNOWN;
2298 blksiz += dp->d_reclen;
2299 if (blksiz == DIRBLKSIZ)
2300 blksiz = 0;
2301 uiop->uio_offset += DIRHDSIZ;
2302 uiop->uio_resid -= DIRHDSIZ;
2303 uiop->uio_iov->iov_base += DIRHDSIZ;
2304 uiop->uio_iov->iov_len -= DIRHDSIZ;
2305 cnp->cn_nameptr = uiop->uio_iov->iov_base;
2306 cnp->cn_namelen = len;
2307 nfsm_mtouio(uiop, len);
2308 cp = uiop->uio_iov->iov_base;
2309 tlen -= len;
2310 *cp = '\0';
2311 uiop->uio_iov->iov_base += tlen;
2312 uiop->uio_iov->iov_len -= tlen;
2313 uiop->uio_offset += tlen;
2314 uiop->uio_resid -= tlen;
2315 } else
2316 nfsm_adv(nfsm_rndup(len));
2317 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2318 if (bigenough) {
2319 cookie.nfsuquad[0] = *tl++;
2320 cookie.nfsuquad[1] = *tl++;
2321 } else
2322 tl += 2;
2323
2324 /*
2325 * Since the attributes are before the file handle
2326 * (sigh), we must skip over the attributes and then
2327 * come back and get them.
2328 */
2329 attrflag = fxdr_unsigned(int, *tl);
2330 if (attrflag) {
2331 dpossav1 = dpos;
2332 mdsav1 = md;
2333 nfsm_adv(NFSX_V3FATTR);
2334 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2335 doit = fxdr_unsigned(int, *tl);
2336 if (doit) {
2337 nfsm_getfh(fhp, fhsize, 1);
2338 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2339 VREF(vp);
2340 newvp = vp;
2341 np = dnp;
2342 } else {
2343 error = nfs_nget(vp->v_mount, fhp,
2344 fhsize, &np);
2345 if (error)
2346 doit = 0;
2347 else
2348 newvp = NFSTOV(np);
2349 }
2350 }
2351 if (doit && bigenough) {
2352 dpossav2 = dpos;
2353 dpos = dpossav1;
2354 mdsav2 = md;
2355 md = mdsav1;
2356 nfsm_loadattr(newvp, (struct vattr *)0);
2357 dpos = dpossav2;
2358 md = mdsav2;
2359 dp->d_type =
2360 IFTODT(VTTOIF(np->n_vattr.va_type));
2361 ndp->ni_vp = newvp;
2362 cnp->cn_hash = 0;
2363 for (cp = cnp->cn_nameptr, i = 1; i <= len;
2364 i++, cp++)
2365 cnp->cn_hash += (unsigned char)*cp;
2366 cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
2367 }
2368 } else {
2369 /* Just skip over the file handle */
2370 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2371 i = fxdr_unsigned(int, *tl);
2372 nfsm_adv(nfsm_rndup(i));
2373 }
2374 if (newvp != NULLVP) {
2375 if (newvp == vp)
2376 vrele(newvp);
2377 else
2378 vput(newvp);
2379 newvp = NULLVP;
2380 }
2381 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2382 more_dirs = fxdr_unsigned(int, *tl);
2383 }
2384 /*
2385 * If at end of rpc data, get the eof boolean
2386 */
2387 if (!more_dirs) {
2388 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2389 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2390 }
2391 m_freem(mrep);
2392 }
2393 /*
2394 * Fill last record, iff any, out to a multiple of NFS_DIRBLKSIZ
2395 * by increasing d_reclen for the last record.
2396 */
2397 if (blksiz > 0) {
2398 left = DIRBLKSIZ - blksiz;
2399 dp->d_reclen += left;
2400 uiop->uio_iov->iov_base += left;
2401 uiop->uio_iov->iov_len -= left;
2402 uiop->uio_offset += left;
2403 uiop->uio_resid -= left;
2404 }
2405
2406 /*
2407 * We are now either at the end of the directory or have filled the
2408 * block.
2409 */
2410 if (bigenough)
2411 dnp->n_direofoffset = uiop->uio_offset;
2412 else {
2413 if (uiop->uio_resid > 0)
2414 printf("EEK! readdirplusrpc resid > 0\n");
2415 cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1);
2416 *cookiep = cookie;
2417 }
2418 nfsmout:
2419 if (newvp != NULLVP) {
2420 if (newvp == vp)
2421 vrele(newvp);
2422 else
2423 vput(newvp);
2424 newvp = NULLVP;
2425 }
2426 return (error);
2427 }
2428
2429 /*
2430 * Silly rename. To make the NFS filesystem that is stateless look a little
2431 * more like the "ufs" a remove of an active vnode is translated to a rename
2432 * to a funny looking filename that is removed by nfs_inactive on the
2433 * nfsnode. There is the potential for another process on a different client
2434 * to create the same funny name between the nfs_lookitup() fails and the
2435 * nfs_rename() completes, but...
2436 */
2437 static int
2438 nfs_sillyrename(dvp, vp, cnp)
2439 struct vnode *dvp, *vp;
2440 struct componentname *cnp;
2441 {
2442 register struct sillyrename *sp;
2443 struct nfsnode *np;
2444 int error;
2445 short pid;
2446
2447 cache_purge(dvp);
2448 np = VTONFS(vp);
2449 #ifndef DIAGNOSTIC
2450 if (vp->v_type == VDIR)
2451 panic("nfs: sillyrename dir");
2452 #endif
2453 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2454 M_NFSREQ, M_WAITOK);
2455 sp->s_cred = crdup(cnp->cn_cred);
2456 sp->s_dvp = dvp;
2457 VREF(dvp);
2458
2459 /* Fudge together a funny name */
2460 pid = cnp->cn_proc->p_pid;
2461 sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid);
2462
2463 /* Try lookitups until we get one that isn't there */
2464 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2465 cnp->cn_proc, (struct nfsnode **)0) == 0) {
2466 sp->s_name[4]++;
2467 if (sp->s_name[4] > 'z') {
2468 error = EINVAL;
2469 goto bad;
2470 }
2471 }
2472 error = nfs_renameit(dvp, cnp, sp);
2473 if (error)
2474 goto bad;
2475 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2476 cnp->cn_proc, &np);
2477 np->n_sillyrename = sp;
2478 return (0);
2479 bad:
2480 vrele(sp->s_dvp);
2481 crfree(sp->s_cred);
2482 free((caddr_t)sp, M_NFSREQ);
2483 return (error);
2484 }
2485
2486 /*
2487 * Look up a file name and optionally either update the file handle or
2488 * allocate an nfsnode, depending on the value of npp.
2489 * npp == NULL --> just do the lookup
2490 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2491 * handled too
2492 * *npp != NULL --> update the file handle in the vnode
2493 */
2494 static int
2495 nfs_lookitup(dvp, name, len, cred, procp, npp)
2496 register struct vnode *dvp;
2497 const char *name;
2498 int len;
2499 struct ucred *cred;
2500 struct proc *procp;
2501 struct nfsnode **npp;
2502 {
2503 register u_int32_t *tl;
2504 register caddr_t cp;
2505 register int32_t t1, t2;
2506 struct vnode *newvp = (struct vnode *)0;
2507 struct nfsnode *np, *dnp = VTONFS(dvp);
2508 caddr_t bpos, dpos, cp2;
2509 int error = 0, fhlen, attrflag;
2510 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2511 nfsfh_t *nfhp;
2512 int v3 = NFS_ISV3(dvp);
2513
2514 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2515 nfsm_reqhead(dvp, NFSPROC_LOOKUP,
2516 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2517 nfsm_fhtom(dvp, v3);
2518 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2519 nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred);
2520 if (npp && !error) {
2521 nfsm_getfh(nfhp, fhlen, v3);
2522 if (*npp) {
2523 np = *npp;
2524 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2525 free((caddr_t)np->n_fhp, M_NFSBIGFH);
2526 np->n_fhp = &np->n_fh;
2527 } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
2528 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
2529 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen);
2530 np->n_fhsize = fhlen;
2531 newvp = NFSTOV(np);
2532 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2533 VREF(dvp);
2534 newvp = dvp;
2535 } else {
2536 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2537 if (error) {
2538 m_freem(mrep);
2539 return (error);
2540 }
2541 newvp = NFSTOV(np);
2542 }
2543 if (v3) {
2544 nfsm_postop_attr(newvp, attrflag);
2545 if (!attrflag && *npp == NULL) {
2546 m_freem(mrep);
2547 if (newvp == dvp)
2548 vrele(newvp);
2549 else
2550 vput(newvp);
2551 return (ENOENT);
2552 }
2553 } else
2554 nfsm_loadattr(newvp, (struct vattr *)0);
2555 }
2556 nfsm_reqdone;
2557 if (npp && *npp == NULL) {
2558 if (error) {
2559 if (newvp)
2560 if (newvp == dvp)
2561 vrele(newvp);
2562 else
2563 vput(newvp);
2564 } else
2565 *npp = np;
2566 }
2567 return (error);
2568 }
2569
2570 /*
2571 * Nfs Version 3 commit rpc
2572 */
2573 static int
2574 nfs_commit(vp, offset, cnt, cred, procp)
2575 register struct vnode *vp;
2576 u_quad_t offset;
2577 int cnt;
2578 struct ucred *cred;
2579 struct proc *procp;
2580 {
2581 register caddr_t cp;
2582 register u_int32_t *tl;
2583 register int32_t t1, t2;
2584 register struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2585 caddr_t bpos, dpos, cp2;
2586 int error = 0, wccflag = NFSV3_WCCRATTR;
2587 struct mbuf *mreq, *mrep, *md, *mb, *mb2;
2588
2589 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0)
2590 return (0);
2591 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2592 nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1));
2593 nfsm_fhtom(vp, 1);
2594 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2595 txdr_hyper(&offset, tl);
2596 tl += 2;
2597 *tl = txdr_unsigned(cnt);
2598 nfsm_request(vp, NFSPROC_COMMIT, procp, cred);
2599 nfsm_wcc_data(vp, wccflag);
2600 if (!error) {
2601 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
2602 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl,
2603 NFSX_V3WRITEVERF)) {
2604 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf,
2605 NFSX_V3WRITEVERF);
2606 error = NFSERR_STALEWRITEVERF;
2607 }
2608 }
2609 nfsm_reqdone;
2610 return (error);
2611 }
2612
2613 /*
2614 * Kludge City..
2615 * - make nfs_bmap() essentially a no-op that does no translation
2616 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
2617 * (Maybe I could use the process's page mapping, but I was concerned that
2618 * Kernel Write might not be enabled and also figured copyout() would do
2619 * a lot more work than bcopy() and also it currently happens in the
2620 * context of the swapper process (2).
2621 */
2622 static int
2623 nfs_bmap(ap)
2624 struct vop_bmap_args /* {
2625 struct vnode *a_vp;
2626 daddr_t a_bn;
2627 struct vnode **a_vpp;
2628 daddr_t *a_bnp;
2629 int *a_runp;
2630 int *a_runb;
2631 } */ *ap;
2632 {
2633 register struct vnode *vp = ap->a_vp;
2634
2635 if (ap->a_vpp != NULL)
2636 *ap->a_vpp = vp;
2637 if (ap->a_bnp != NULL)
2638 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
2639 if (ap->a_runp != NULL)
2640 *ap->a_runp = 0;
2641 if (ap->a_runb != NULL)
2642 *ap->a_runb = 0;
2643 return (0);
2644 }
2645
2646 /*
2647 * Strategy routine.
2648 * For async requests when nfsiod(s) are running, queue the request by
2649 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
2650 * request.
2651 */
2652 static int
2653 nfs_strategy(ap)
2654 struct vop_strategy_args *ap;
2655 {
2656 register struct buf *bp = ap->a_bp;
2657 struct ucred *cr;
2658 struct proc *p;
2659 int error = 0;
2660
2661 if (bp->b_flags & B_PHYS)
2662 panic("nfs physio");
2663 if (bp->b_flags & B_ASYNC)
2664 p = (struct proc *)0;
2665 else
2666 p = curproc; /* XXX */
2667 if (bp->b_flags & B_READ)
2668 cr = bp->b_rcred;
2669 else
2670 cr = bp->b_wcred;
2671 /*
2672 * If the op is asynchronous and an i/o daemon is waiting
2673 * queue the request, wake it up and wait for completion
2674 * otherwise just do it ourselves.
2675 */
2676 if ((bp->b_flags & B_ASYNC) == 0 ||
2677 nfs_asyncio(bp, NOCRED))
2678 error = nfs_doio(bp, cr, p);
2679 return (error);
2680 }
2681
2682 /*
2683 * Mmap a file
2684 *
2685 * NB Currently unsupported.
2686 */
2687 /* ARGSUSED */
2688 static int
2689 nfs_mmap(ap)
2690 struct vop_mmap_args /* {
2691 struct vnode *a_vp;
2692 int a_fflags;
2693 struct ucred *a_cred;
2694 struct proc *a_p;
2695 } */ *ap;
2696 {
2697
2698 return (EINVAL);
2699 }
2700
2701 /*
2702 * fsync vnode op. Just call nfs_flush() with commit == 1.
2703 */
2704 /* ARGSUSED */
2705 static int
2706 nfs_fsync(ap)
2707 struct vop_fsync_args /* {
2708 struct vnodeop_desc *a_desc;
2709 struct vnode * a_vp;
2710 struct ucred * a_cred;
2711 int a_waitfor;
2712 struct proc * a_p;
2713 } */ *ap;
2714 {
2715
2716 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1));
2717 }
2718
2719 /*
2720 * Flush all the blocks associated with a vnode.
2721 * Walk through the buffer pool and push any dirty pages
2722 * associated with the vnode.
2723 */
2724 static int
2725 nfs_flush(vp, cred, waitfor, p, commit)
2726 register struct vnode *vp;
2727 struct ucred *cred;
2728 int waitfor;
2729 struct proc *p;
2730 int commit;
2731 {
2732 register struct nfsnode *np = VTONFS(vp);
2733 register struct buf *bp;
2734 register int i;
2735 struct buf *nbp;
2736 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2737 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2738 int passone = 1;
2739 u_quad_t off, endoff, toff;
2740 struct ucred* wcred = NULL;
2741 struct buf **bvec = NULL;
2742 #ifndef NFS_COMMITBVECSIZ
2743 #define NFS_COMMITBVECSIZ 20
2744 #endif
2745 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2746 int bvecsize = 0, bveccount;
2747
2748 if (nmp->nm_flag & NFSMNT_INT)
2749 slpflag = PCATCH;
2750 if (!commit)
2751 passone = 0;
2752 /*
2753 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2754 * server, but nas not been committed to stable storage on the server
2755 * yet. On the first pass, the byte range is worked out and the commit
2756 * rpc is done. On the second pass, nfs_writebp() is called to do the
2757 * job.
2758 */
2759 again:
2760 off = (u_quad_t)-1;
2761 endoff = 0;
2762 bvecpos = 0;
2763 if (NFS_ISV3(vp) && commit) {
2764 s = splbio();
2765 /*
2766 * Count up how many buffers waiting for a commit.
2767 */
2768 bveccount = 0;
2769 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2770 nbp = TAILQ_NEXT(bp, b_vnbufs);
2771 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT))
2772 == (B_DELWRI | B_NEEDCOMMIT))
2773 bveccount++;
2774 }
2775 /*
2776 * Allocate space to remember the list of bufs to commit. It is
2777 * important to use M_NOWAIT here to avoid a race with nfs_write.
2778 * If we can't get memory (for whatever reason), we will end up
2779 * committing the buffers one-by-one in the loop below.
2780 */
2781 if (bveccount > NFS_COMMITBVECSIZ) {
2782 if (bvec != NULL && bvec != bvec_on_stack)
2783 free(bvec, M_TEMP);
2784 bvec = (struct buf **)
2785 malloc(bveccount * sizeof(struct buf *),
2786 M_TEMP, M_NOWAIT);
2787 if (bvec == NULL) {
2788 bvec = bvec_on_stack;
2789 bvecsize = NFS_COMMITBVECSIZ;
2790 } else
2791 bvecsize = bveccount;
2792 } else {
2793 bvec = bvec_on_stack;
2794 bvecsize = NFS_COMMITBVECSIZ;
2795 }
2796 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2797 nbp = TAILQ_NEXT(bp, b_vnbufs);
2798 if (bvecpos >= bvecsize)
2799 break;
2800 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT))
2801 != (B_DELWRI | B_NEEDCOMMIT))
2802 continue;
2803 bremfree(bp);
2804 /*
2805 * Work out if all buffers are using the same cred
2806 * so we can deal with them all with one commit.
2807 */
2808 if (wcred == NULL)
2809 wcred = bp->b_wcred;
2810 else if (wcred != bp->b_wcred)
2811 wcred = NOCRED;
2812 bp->b_flags |= (B_BUSY | B_WRITEINPROG);
2813 vfs_busy_pages(bp, 1);
2814 /*
2815 * A list of these buffers is kept so that the
2816 * second loop knows which buffers have actually
2817 * been committed. This is necessary, since there
2818 * may be a race between the commit rpc and new
2819 * uncommitted writes on the file.
2820 */
2821 bvec[bvecpos++] = bp;
2822 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2823 bp->b_dirtyoff;
2824 if (toff < off)
2825 off = toff;
2826 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2827 if (toff > endoff)
2828 endoff = toff;
2829 }
2830 splx(s);
2831 }
2832 if (bvecpos > 0) {
2833 /*
2834 * Commit data on the server, as required.
2835 * If all bufs are using the same wcred, then use that with
2836 * one call for all of them, otherwise commit each one
2837 * separately.
2838 */
2839 if (wcred != NOCRED)
2840 retv = nfs_commit(vp, off, (int)(endoff - off),
2841 wcred, p);
2842 else {
2843 retv = 0;
2844 for (i = 0; i < bvecpos; i++) {
2845 off_t off, size;
2846 bp = bvec[i];
2847 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2848 bp->b_dirtyoff;
2849 size = (u_quad_t)(bp->b_dirtyend
2850 - bp->b_dirtyoff);
2851 retv = nfs_commit(vp, off, (int)size,
2852 bp->b_wcred, p);
2853 if (retv) break;
2854 }
2855 }
2856
2857 if (retv == NFSERR_STALEWRITEVERF)
2858 nfs_clearcommit(vp->v_mount);
2859 /*
2860 * Now, either mark the blocks I/O done or mark the
2861 * blocks dirty, depending on whether the commit
2862 * succeeded.
2863 */
2864 for (i = 0; i < bvecpos; i++) {
2865 bp = bvec[i];
2866 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG | B_CLUSTEROK);
2867 if (retv) {
2868 vfs_unbusy_pages(bp);
2869 brelse(bp);
2870 } else {
2871 s = splbio(); /* XXX check this positionning */
2872 vp->v_numoutput++;
2873 bp->b_flags |= B_ASYNC;
2874 if (bp->b_flags & B_DELWRI) {
2875 --numdirtybuffers;
2876 if (needsbuffer) {
2877 vfs_bio_need_satisfy();
2878 }
2879 }
2880 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI);
2881 bp->b_dirtyoff = bp->b_dirtyend = 0;
2882 reassignbuf(bp, vp);
2883 splx(s);
2884 biodone(bp);
2885 }
2886 }
2887 }
2888
2889 /*
2890 * Start/do any write(s) that are required.
2891 */
2892 loop:
2893 s = splbio();
2894 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2895 nbp = TAILQ_NEXT(bp, b_vnbufs);
2896 if (bp->b_flags & B_BUSY) {
2897 if (waitfor != MNT_WAIT || passone)
2898 continue;
2899 bp->b_flags |= B_WANTED;
2900 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
2901 "nfsfsync", slptimeo);
2902 splx(s);
2903 if (error) {
2904 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
2905 error = EINTR;
2906 goto done;
2907 }
2908 if (slpflag == PCATCH) {
2909 slpflag = 0;
2910 slptimeo = 2 * hz;
2911 }
2912 }
2913 goto loop;
2914 }
2915 if ((bp->b_flags & B_DELWRI) == 0)
2916 panic("nfs_fsync: not dirty");
2917 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT))
2918 continue;
2919 bremfree(bp);
2920 if (passone || !commit)
2921 bp->b_flags |= (B_BUSY|B_ASYNC);
2922 else
2923 bp->b_flags |= (B_BUSY|B_ASYNC|B_WRITEINPROG);
2924 splx(s);
2925 VOP_BWRITE(bp);
2926 goto loop;
2927 }
2928 splx(s);
2929 if (passone) {
2930 passone = 0;
2931 goto again;
2932 }
2933 if (waitfor == MNT_WAIT) {
2934 while (vp->v_numoutput) {
2935 vp->v_flag |= VBWAIT;
2936 error = tsleep((caddr_t)&vp->v_numoutput,
2937 slpflag | (PRIBIO + 1), "nfsfsync", slptimeo);
2938 if (error) {
2939 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
2940 error = EINTR;
2941 goto done;
2942 }
2943 if (slpflag == PCATCH) {
2944 slpflag = 0;
2945 slptimeo = 2 * hz;
2946 }
2947 }
2948 }
2949 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) && commit) {
2950 goto loop;
2951 }
2952 }
2953 if (np->n_flag & NWRITEERR) {
2954 error = np->n_error;
2955 np->n_flag &= ~NWRITEERR;
2956 }
2957 done:
2958 if (bvec != NULL && bvec != bvec_on_stack)
2959 free(bvec, M_TEMP);
2960 return (error);
2961 }
2962
2963 /*
2964 * NFS advisory byte-level locks.
2965 * Currently unsupported.
2966 */
2967 static int
2968 nfs_advlock(ap)
2969 struct vop_advlock_args /* {
2970 struct vnode *a_vp;
2971 caddr_t a_id;
2972 int a_op;
2973 struct flock *a_fl;
2974 int a_flags;
2975 } */ *ap;
2976 {
2977 register struct nfsnode *np = VTONFS(ap->a_vp);
2978
2979 /*
2980 * The following kludge is to allow diskless support to work
2981 * until a real NFS lockd is implemented. Basically, just pretend
2982 * that this is a local lock.
2983 */
2984 return (lf_advlock(ap, &(np->n_lockf), np->n_size));
2985 }
2986
2987 /*
2988 * Print out the contents of an nfsnode.
2989 */
2990 static int
2991 nfs_print(ap)
2992 struct vop_print_args /* {
2993 struct vnode *a_vp;
2994 } */ *ap;
2995 {
2996 register struct vnode *vp = ap->a_vp;
2997 register struct nfsnode *np = VTONFS(vp);
2998
2999 printf("tag VT_NFS, fileid %ld fsid 0x%lx",
3000 np->n_vattr.va_fileid, np->n_vattr.va_fsid);
3001 if (vp->v_type == VFIFO)
3002 fifo_printinfo(vp);
3003 printf("\n");
3004 return (0);
3005 }
3006
3007 /*
3008 * Just call nfs_writebp() with the force argument set to 1.
3009 */
3010 static int
3011 nfs_bwrite(ap)
3012 struct vop_bwrite_args /* {
3013 struct vnode *a_bp;
3014 } */ *ap;
3015 {
3016
3017 return (nfs_writebp(ap->a_bp, 1));
3018 }
3019
3020 /*
3021 * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless
3022 * the force flag is one and it also handles the B_NEEDCOMMIT flag.
3023 */
3024 int
3025 nfs_writebp(bp, force)
3026 register struct buf *bp;
3027 int force;
3028 {
3029 int s;
3030 register int oldflags = bp->b_flags, retv = 1;
3031 off_t off;
3032
3033 if(!(bp->b_flags & B_BUSY))
3034 panic("bwrite: buffer is not busy???");
3035
3036 if (bp->b_flags & B_INVAL)
3037 bp->b_flags |= B_INVAL | B_NOCACHE;
3038
3039 if (bp->b_flags & B_DELWRI) {
3040 --numdirtybuffers;
3041 if (needsbuffer)
3042 vfs_bio_need_satisfy();
3043 }
3044 s = splbio(); /* XXX check if needed */
3045 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI);
3046
3047 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
3048 reassignbuf(bp, bp->b_vp);
3049 }
3050
3051 bp->b_vp->v_numoutput++;
3052 curproc->p_stats->p_ru.ru_oublock++;
3053 splx(s);
3054
3055 /*
3056 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not
3057 * an actual write will have to be scheduled via. VOP_STRATEGY().
3058 * If B_WRITEINPROG is already set, then push it with a write anyhow.
3059 */
3060 vfs_busy_pages(bp, 1);
3061 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) {
3062 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
3063 bp->b_flags |= B_WRITEINPROG;
3064 retv = nfs_commit(bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff,
3065 bp->b_wcred, bp->b_proc);
3066 bp->b_flags &= ~B_WRITEINPROG;
3067 if (!retv) {
3068 bp->b_dirtyoff = bp->b_dirtyend = 0;
3069 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
3070 biodone(bp);
3071 } else if (retv == NFSERR_STALEWRITEVERF)
3072 nfs_clearcommit(bp->b_vp->v_mount);
3073 }
3074 if (retv) {
3075 if (force)
3076 bp->b_flags |= B_WRITEINPROG;
3077 VOP_STRATEGY(bp->b_vp, bp);
3078 }
3079
3080 if( (oldflags & B_ASYNC) == 0) {
3081 int rtval = biowait(bp);
3082
3083 if (oldflags & B_DELWRI) {
3084 s = splbio();
3085 reassignbuf(bp, bp->b_vp);
3086 splx(s);
3087 }
3088
3089 brelse(bp);
3090 return (rtval);
3091 }
3092
3093 return (0);
3094 }
3095
3096 /*
3097 * nfs special file access vnode op.
3098 * Essentially just get vattr and then imitate iaccess() since the device is
3099 * local to the client.
3100 */
3101 static int
3102 nfsspec_access(ap)
3103 struct vop_access_args /* {
3104 struct vnode *a_vp;
3105 int a_mode;
3106 struct ucred *a_cred;
3107 struct proc *a_p;
3108 } */ *ap;
3109 {
3110 register struct vattr *vap;
3111 register gid_t *gp;
3112 register struct ucred *cred = ap->a_cred;
3113 struct vnode *vp = ap->a_vp;
3114 mode_t mode = ap->a_mode;
3115 struct vattr vattr;
3116 register int i;
3117 int error;
3118
3119 /*
3120 * Disallow write attempts on filesystems mounted read-only;
3121 * unless the file is a socket, fifo, or a block or character
3122 * device resident on the filesystem.
3123 */
3124 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3125 switch (vp->v_type) {
3126 case VREG:
3127 case VDIR:
3128 case VLNK:
3129 return (EROFS);
3130 default:
3131 break;
3132 }
3133 }
3134 /*
3135 * If you're the super-user,
3136 * you always get access.
3137 */
3138 if (cred->cr_uid == 0)
3139 return (0);
3140 vap = &vattr;
3141 error = VOP_GETATTR(vp, vap, cred, ap->a_p);
3142 if (error)
3143 return (error);
3144 /*
3145 * Access check is based on only one of owner, group, public.
3146 * If not owner, then check group. If not a member of the
3147 * group, then check public access.
3148 */
3149 if (cred->cr_uid != vap->va_uid) {
3150 mode >>= 3;
3151 gp = cred->cr_groups;
3152 for (i = 0; i < cred->cr_ngroups; i++, gp++)
3153 if (vap->va_gid == *gp)
3154 goto found;
3155 mode >>= 3;
3156 found:
3157 ;
3158 }
3159 error = (vap->va_mode & mode) == mode ? 0 : EACCES;
3160 return (error);
3161 }
3162
3163 /*
3164 * Read wrapper for special devices.
3165 */
3166 static int
3167 nfsspec_read(ap)
3168 struct vop_read_args /* {
3169 struct vnode *a_vp;
3170 struct uio *a_uio;
3171 int a_ioflag;
3172 struct ucred *a_cred;
3173 } */ *ap;
3174 {
3175 register struct nfsnode *np = VTONFS(ap->a_vp);
3176
3177 /*
3178 * Set access flag.
3179 */
3180 np->n_flag |= NACC;
3181 getnanotime(&np->n_atim);
3182 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3183 }
3184
3185 /*
3186 * Write wrapper for special devices.
3187 */
3188 static int
3189 nfsspec_write(ap)
3190 struct vop_write_args /* {
3191 struct vnode *a_vp;
3192 struct uio *a_uio;
3193 int a_ioflag;
3194 struct ucred *a_cred;
3195 } */ *ap;
3196 {
3197 register struct nfsnode *np = VTONFS(ap->a_vp);
3198
3199 /*
3200 * Set update flag.
3201 */
3202 np->n_flag |= NUPD;
3203 getnanotime(&np->n_mtim);
3204 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3205 }
3206
3207 /*
3208 * Close wrapper for special devices.
3209 *
3210 * Update the times on the nfsnode then do device close.
3211 */
3212 static int
3213 nfsspec_close(ap)
3214 struct vop_close_args /* {
3215 struct vnode *a_vp;
3216 int a_fflag;
3217 struct ucred *a_cred;
3218 struct proc *a_p;
3219 } */ *ap;
3220 {
3221 register struct vnode *vp = ap->a_vp;
3222 register struct nfsnode *np = VTONFS(vp);
3223 struct vattr vattr;
3224
3225 if (np->n_flag & (NACC | NUPD)) {
3226 np->n_flag |= NCHG;
3227 if (vp->v_usecount == 1 &&
3228 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3229 VATTR_NULL(&vattr);
3230 if (np->n_flag & NACC)
3231 vattr.va_atime = np->n_atim;
3232 if (np->n_flag & NUPD)
3233 vattr.va_mtime = np->n_mtim;
3234 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3235 }
3236 }
3237 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3238 }
3239
3240 /*
3241 * Read wrapper for fifos.
3242 */
3243 static int
3244 nfsfifo_read(ap)
3245 struct vop_read_args /* {
3246 struct vnode *a_vp;
3247 struct uio *a_uio;
3248 int a_ioflag;
3249 struct ucred *a_cred;
3250 } */ *ap;
3251 {
3252 register struct nfsnode *np = VTONFS(ap->a_vp);
3253
3254 /*
3255 * Set access flag.
3256 */
3257 np->n_flag |= NACC;
3258 getnanotime(&np->n_atim);
3259 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3260 }
3261
3262 /*
3263 * Write wrapper for fifos.
3264 */
3265 static int
3266 nfsfifo_write(ap)
3267 struct vop_write_args /* {
3268 struct vnode *a_vp;
3269 struct uio *a_uio;
3270 int a_ioflag;
3271 struct ucred *a_cred;
3272 } */ *ap;
3273 {
3274 register struct nfsnode *np = VTONFS(ap->a_vp);
3275
3276 /*
3277 * Set update flag.
3278 */
3279 np->n_flag |= NUPD;
3280 getnanotime(&np->n_mtim);
3281 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3282 }
3283
3284 /*
3285 * Close wrapper for fifos.
3286 *
3287 * Update the times on the nfsnode then do fifo close.
3288 */
3289 static int
3290 nfsfifo_close(ap)
3291 struct vop_close_args /* {
3292 struct vnode *a_vp;
3293 int a_fflag;
3294 struct ucred *a_cred;
3295 struct proc *a_p;
3296 } */ *ap;
3297 {
3298 register struct vnode *vp = ap->a_vp;
3299 register struct nfsnode *np = VTONFS(vp);
3300 struct vattr vattr;
3301 struct timespec ts;
3302
3303 if (np->n_flag & (NACC | NUPD)) {
3304 getnanotime(&ts);
3305 if (np->n_flag & NACC)
3306 np->n_atim = ts;
3307 if (np->n_flag & NUPD)
3308 np->n_mtim = ts;
3309 np->n_flag |= NCHG;
3310 if (vp->v_usecount == 1 &&
3311 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3312 VATTR_NULL(&vattr);
3313 if (np->n_flag & NACC)
3314 vattr.va_atime = np->n_atim;
3315 if (np->n_flag & NUPD)
3316 vattr.va_mtime = np->n_mtim;
3317 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p);
3318 }
3319 }
3320 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3321 }
Cache object: 259cd001a25166c0efdebf332526ba8d
|