1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from nfs_vnops.c 8.16 (Berkeley) 5/27/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/11.0/sys/fs/nfsclient/nfs_clvnops.c 303909 2016-08-10 12:53:30Z kib $");
37
38 /*
39 * vnode op calls for Sun NFS version 2, 3 and 4
40 */
41
42 #include "opt_inet.h"
43
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/systm.h>
47 #include <sys/resourcevar.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/jail.h>
53 #include <sys/malloc.h>
54 #include <sys/mbuf.h>
55 #include <sys/namei.h>
56 #include <sys/socket.h>
57 #include <sys/vnode.h>
58 #include <sys/dirent.h>
59 #include <sys/fcntl.h>
60 #include <sys/lockf.h>
61 #include <sys/stat.h>
62 #include <sys/sysctl.h>
63 #include <sys/signalvar.h>
64
65 #include <vm/vm.h>
66 #include <vm/vm_extern.h>
67 #include <vm/vm_object.h>
68
69 #include <fs/nfs/nfsport.h>
70 #include <fs/nfsclient/nfsnode.h>
71 #include <fs/nfsclient/nfsmount.h>
72 #include <fs/nfsclient/nfs.h>
73 #include <fs/nfsclient/nfs_kdtrace.h>
74
75 #include <net/if.h>
76 #include <netinet/in.h>
77 #include <netinet/in_var.h>
78
79 #include <nfs/nfs_lock.h>
80
81 #ifdef KDTRACE_HOOKS
82 #include <sys/dtrace_bsd.h>
83
84 dtrace_nfsclient_accesscache_flush_probe_func_t
85 dtrace_nfscl_accesscache_flush_done_probe;
86 uint32_t nfscl_accesscache_flush_done_id;
87
88 dtrace_nfsclient_accesscache_get_probe_func_t
89 dtrace_nfscl_accesscache_get_hit_probe,
90 dtrace_nfscl_accesscache_get_miss_probe;
91 uint32_t nfscl_accesscache_get_hit_id;
92 uint32_t nfscl_accesscache_get_miss_id;
93
94 dtrace_nfsclient_accesscache_load_probe_func_t
95 dtrace_nfscl_accesscache_load_done_probe;
96 uint32_t nfscl_accesscache_load_done_id;
97 #endif /* !KDTRACE_HOOKS */
98
99 /* Defs */
100 #define TRUE 1
101 #define FALSE 0
102
103 extern struct nfsstats newnfsstats;
104 extern int nfsrv_useacl;
105 extern int nfscl_debuglevel;
106 MALLOC_DECLARE(M_NEWNFSREQ);
107
108 /*
109 * Ifdef for FreeBSD-current merged buffer cache. It is unfortunate that these
110 * calls are not in getblk() and brelse() so that they would not be necessary
111 * here.
112 */
113 #ifndef B_VMIO
114 #define vfs_busy_pages(bp, f)
115 #endif
116
117 static vop_read_t nfsfifo_read;
118 static vop_write_t nfsfifo_write;
119 static vop_close_t nfsfifo_close;
120 static int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *,
121 struct thread *);
122 static vop_lookup_t nfs_lookup;
123 static vop_create_t nfs_create;
124 static vop_mknod_t nfs_mknod;
125 static vop_open_t nfs_open;
126 static vop_pathconf_t nfs_pathconf;
127 static vop_close_t nfs_close;
128 static vop_access_t nfs_access;
129 static vop_getattr_t nfs_getattr;
130 static vop_setattr_t nfs_setattr;
131 static vop_read_t nfs_read;
132 static vop_fsync_t nfs_fsync;
133 static vop_remove_t nfs_remove;
134 static vop_link_t nfs_link;
135 static vop_rename_t nfs_rename;
136 static vop_mkdir_t nfs_mkdir;
137 static vop_rmdir_t nfs_rmdir;
138 static vop_symlink_t nfs_symlink;
139 static vop_readdir_t nfs_readdir;
140 static vop_strategy_t nfs_strategy;
141 static int nfs_lookitup(struct vnode *, char *, int,
142 struct ucred *, struct thread *, struct nfsnode **);
143 static int nfs_sillyrename(struct vnode *, struct vnode *,
144 struct componentname *);
145 static vop_access_t nfsspec_access;
146 static vop_readlink_t nfs_readlink;
147 static vop_print_t nfs_print;
148 static vop_advlock_t nfs_advlock;
149 static vop_advlockasync_t nfs_advlockasync;
150 static vop_getacl_t nfs_getacl;
151 static vop_setacl_t nfs_setacl;
152
153 /*
154 * Global vfs data structures for nfs
155 */
156 struct vop_vector newnfs_vnodeops = {
157 .vop_default = &default_vnodeops,
158 .vop_access = nfs_access,
159 .vop_advlock = nfs_advlock,
160 .vop_advlockasync = nfs_advlockasync,
161 .vop_close = nfs_close,
162 .vop_create = nfs_create,
163 .vop_fsync = nfs_fsync,
164 .vop_getattr = nfs_getattr,
165 .vop_getpages = ncl_getpages,
166 .vop_putpages = ncl_putpages,
167 .vop_inactive = ncl_inactive,
168 .vop_link = nfs_link,
169 .vop_lookup = nfs_lookup,
170 .vop_mkdir = nfs_mkdir,
171 .vop_mknod = nfs_mknod,
172 .vop_open = nfs_open,
173 .vop_pathconf = nfs_pathconf,
174 .vop_print = nfs_print,
175 .vop_read = nfs_read,
176 .vop_readdir = nfs_readdir,
177 .vop_readlink = nfs_readlink,
178 .vop_reclaim = ncl_reclaim,
179 .vop_remove = nfs_remove,
180 .vop_rename = nfs_rename,
181 .vop_rmdir = nfs_rmdir,
182 .vop_setattr = nfs_setattr,
183 .vop_strategy = nfs_strategy,
184 .vop_symlink = nfs_symlink,
185 .vop_write = ncl_write,
186 .vop_getacl = nfs_getacl,
187 .vop_setacl = nfs_setacl,
188 };
189
190 struct vop_vector newnfs_fifoops = {
191 .vop_default = &fifo_specops,
192 .vop_access = nfsspec_access,
193 .vop_close = nfsfifo_close,
194 .vop_fsync = nfs_fsync,
195 .vop_getattr = nfs_getattr,
196 .vop_inactive = ncl_inactive,
197 .vop_print = nfs_print,
198 .vop_read = nfsfifo_read,
199 .vop_reclaim = ncl_reclaim,
200 .vop_setattr = nfs_setattr,
201 .vop_write = nfsfifo_write,
202 };
203
204 static int nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp,
205 struct componentname *cnp, struct vattr *vap);
206 static int nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
207 int namelen, struct ucred *cred, struct thread *td);
208 static int nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp,
209 char *fnameptr, int fnamelen, struct vnode *tdvp, struct vnode *tvp,
210 char *tnameptr, int tnamelen, struct ucred *cred, struct thread *td);
211 static int nfs_renameit(struct vnode *sdvp, struct vnode *svp,
212 struct componentname *scnp, struct sillyrename *sp);
213
214 /*
215 * Global variables
216 */
217 #define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
218
219 SYSCTL_DECL(_vfs_nfs);
220
221 static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO;
222 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW,
223 &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout");
224
225 static int nfs_prime_access_cache = 0;
226 SYSCTL_INT(_vfs_nfs, OID_AUTO, prime_access_cache, CTLFLAG_RW,
227 &nfs_prime_access_cache, 0,
228 "Prime NFS ACCESS cache when fetching attributes");
229
230 static int newnfs_commit_on_close = 0;
231 SYSCTL_INT(_vfs_nfs, OID_AUTO, commit_on_close, CTLFLAG_RW,
232 &newnfs_commit_on_close, 0, "write+commit on close, else only write");
233
234 static int nfs_clean_pages_on_close = 1;
235 SYSCTL_INT(_vfs_nfs, OID_AUTO, clean_pages_on_close, CTLFLAG_RW,
236 &nfs_clean_pages_on_close, 0, "NFS clean dirty pages on close");
237
238 int newnfs_directio_enable = 0;
239 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_enable, CTLFLAG_RW,
240 &newnfs_directio_enable, 0, "Enable NFS directio");
241
242 int nfs_keep_dirty_on_error;
243 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_keep_dirty_on_error, CTLFLAG_RW,
244 &nfs_keep_dirty_on_error, 0, "Retry pageout if error returned");
245
246 /*
247 * This sysctl allows other processes to mmap a file that has been opened
248 * O_DIRECT by a process. In general, having processes mmap the file while
249 * Direct IO is in progress can lead to Data Inconsistencies. But, we allow
250 * this by default to prevent DoS attacks - to prevent a malicious user from
251 * opening up files O_DIRECT preventing other users from mmap'ing these
252 * files. "Protected" environments where stricter consistency guarantees are
253 * required can disable this knob. The process that opened the file O_DIRECT
254 * cannot mmap() the file, because mmap'ed IO on an O_DIRECT open() is not
255 * meaningful.
256 */
257 int newnfs_directio_allow_mmap = 1;
258 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
259 &newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
260
261 #if 0
262 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
263 &newnfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
264
265 SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
266 &newnfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
267 #endif
268
269 #define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \
270 | NFSACCESS_EXTEND | NFSACCESS_EXECUTE \
271 | NFSACCESS_DELETE | NFSACCESS_LOOKUP)
272
273 /*
274 * SMP Locking Note :
275 * The list of locks after the description of the lock is the ordering
276 * of other locks acquired with the lock held.
277 * np->n_mtx : Protects the fields in the nfsnode.
278 VM Object Lock
279 VI_MTX (acquired indirectly)
280 * nmp->nm_mtx : Protects the fields in the nfsmount.
281 rep->r_mtx
282 * ncl_iod_mutex : Global lock, protects shared nfsiod state.
283 * nfs_reqq_mtx : Global lock, protects the nfs_reqq list.
284 nmp->nm_mtx
285 rep->r_mtx
286 * rep->r_mtx : Protects the fields in an nfsreq.
287 */
288
289 static int
290 nfs34_access_otw(struct vnode *vp, int wmode, struct thread *td,
291 struct ucred *cred, u_int32_t *retmode)
292 {
293 int error = 0, attrflag, i, lrupos;
294 u_int32_t rmode;
295 struct nfsnode *np = VTONFS(vp);
296 struct nfsvattr nfsva;
297
298 error = nfsrpc_accessrpc(vp, wmode, cred, td, &nfsva, &attrflag,
299 &rmode, NULL);
300 if (attrflag)
301 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
302 if (!error) {
303 lrupos = 0;
304 mtx_lock(&np->n_mtx);
305 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
306 if (np->n_accesscache[i].uid == cred->cr_uid) {
307 np->n_accesscache[i].mode = rmode;
308 np->n_accesscache[i].stamp = time_second;
309 break;
310 }
311 if (i > 0 && np->n_accesscache[i].stamp <
312 np->n_accesscache[lrupos].stamp)
313 lrupos = i;
314 }
315 if (i == NFS_ACCESSCACHESIZE) {
316 np->n_accesscache[lrupos].uid = cred->cr_uid;
317 np->n_accesscache[lrupos].mode = rmode;
318 np->n_accesscache[lrupos].stamp = time_second;
319 }
320 mtx_unlock(&np->n_mtx);
321 if (retmode != NULL)
322 *retmode = rmode;
323 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, rmode, 0);
324 } else if (NFS_ISV4(vp)) {
325 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
326 }
327 #ifdef KDTRACE_HOOKS
328 if (error != 0)
329 KDTRACE_NFS_ACCESSCACHE_LOAD_DONE(vp, cred->cr_uid, 0,
330 error);
331 #endif
332 return (error);
333 }
334
335 /*
336 * nfs access vnode op.
337 * For nfs version 2, just return ok. File accesses may fail later.
338 * For nfs version 3, use the access rpc to check accessibility. If file modes
339 * are changed on the server, accesses might still fail later.
340 */
341 static int
342 nfs_access(struct vop_access_args *ap)
343 {
344 struct vnode *vp = ap->a_vp;
345 int error = 0, i, gotahit;
346 u_int32_t mode, wmode, rmode;
347 int v34 = NFS_ISV34(vp);
348 struct nfsnode *np = VTONFS(vp);
349
350 /*
351 * Disallow write attempts on filesystems mounted read-only;
352 * unless the file is a socket, fifo, or a block or character
353 * device resident on the filesystem.
354 */
355 if ((ap->a_accmode & (VWRITE | VAPPEND | VWRITE_NAMED_ATTRS |
356 VDELETE_CHILD | VWRITE_ATTRIBUTES | VDELETE | VWRITE_ACL |
357 VWRITE_OWNER)) != 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
358 switch (vp->v_type) {
359 case VREG:
360 case VDIR:
361 case VLNK:
362 return (EROFS);
363 default:
364 break;
365 }
366 }
367 /*
368 * For nfs v3 or v4, check to see if we have done this recently, and if
369 * so return our cached result instead of making an ACCESS call.
370 * If not, do an access rpc, otherwise you are stuck emulating
371 * ufs_access() locally using the vattr. This may not be correct,
372 * since the server may apply other access criteria such as
373 * client uid-->server uid mapping that we do not know about.
374 */
375 if (v34) {
376 if (ap->a_accmode & VREAD)
377 mode = NFSACCESS_READ;
378 else
379 mode = 0;
380 if (vp->v_type != VDIR) {
381 if (ap->a_accmode & VWRITE)
382 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
383 if (ap->a_accmode & VAPPEND)
384 mode |= NFSACCESS_EXTEND;
385 if (ap->a_accmode & VEXEC)
386 mode |= NFSACCESS_EXECUTE;
387 if (ap->a_accmode & VDELETE)
388 mode |= NFSACCESS_DELETE;
389 } else {
390 if (ap->a_accmode & VWRITE)
391 mode |= (NFSACCESS_MODIFY | NFSACCESS_EXTEND);
392 if (ap->a_accmode & VAPPEND)
393 mode |= NFSACCESS_EXTEND;
394 if (ap->a_accmode & VEXEC)
395 mode |= NFSACCESS_LOOKUP;
396 if (ap->a_accmode & VDELETE)
397 mode |= NFSACCESS_DELETE;
398 if (ap->a_accmode & VDELETE_CHILD)
399 mode |= NFSACCESS_MODIFY;
400 }
401 /* XXX safety belt, only make blanket request if caching */
402 if (nfsaccess_cache_timeout > 0) {
403 wmode = NFSACCESS_READ | NFSACCESS_MODIFY |
404 NFSACCESS_EXTEND | NFSACCESS_EXECUTE |
405 NFSACCESS_DELETE | NFSACCESS_LOOKUP;
406 } else {
407 wmode = mode;
408 }
409
410 /*
411 * Does our cached result allow us to give a definite yes to
412 * this request?
413 */
414 gotahit = 0;
415 mtx_lock(&np->n_mtx);
416 for (i = 0; i < NFS_ACCESSCACHESIZE; i++) {
417 if (ap->a_cred->cr_uid == np->n_accesscache[i].uid) {
418 if (time_second < (np->n_accesscache[i].stamp
419 + nfsaccess_cache_timeout) &&
420 (np->n_accesscache[i].mode & mode) == mode) {
421 NFSINCRGLOBAL(newnfsstats.accesscache_hits);
422 gotahit = 1;
423 }
424 break;
425 }
426 }
427 mtx_unlock(&np->n_mtx);
428 #ifdef KDTRACE_HOOKS
429 if (gotahit != 0)
430 KDTRACE_NFS_ACCESSCACHE_GET_HIT(vp,
431 ap->a_cred->cr_uid, mode);
432 else
433 KDTRACE_NFS_ACCESSCACHE_GET_MISS(vp,
434 ap->a_cred->cr_uid, mode);
435 #endif
436 if (gotahit == 0) {
437 /*
438 * Either a no, or a don't know. Go to the wire.
439 */
440 NFSINCRGLOBAL(newnfsstats.accesscache_misses);
441 error = nfs34_access_otw(vp, wmode, ap->a_td,
442 ap->a_cred, &rmode);
443 if (!error &&
444 (rmode & mode) != mode)
445 error = EACCES;
446 }
447 return (error);
448 } else {
449 if ((error = nfsspec_access(ap)) != 0) {
450 return (error);
451 }
452 /*
453 * Attempt to prevent a mapped root from accessing a file
454 * which it shouldn't. We try to read a byte from the file
455 * if the user is root and the file is not zero length.
456 * After calling nfsspec_access, we should have the correct
457 * file size cached.
458 */
459 mtx_lock(&np->n_mtx);
460 if (ap->a_cred->cr_uid == 0 && (ap->a_accmode & VREAD)
461 && VTONFS(vp)->n_size > 0) {
462 struct iovec aiov;
463 struct uio auio;
464 char buf[1];
465
466 mtx_unlock(&np->n_mtx);
467 aiov.iov_base = buf;
468 aiov.iov_len = 1;
469 auio.uio_iov = &aiov;
470 auio.uio_iovcnt = 1;
471 auio.uio_offset = 0;
472 auio.uio_resid = 1;
473 auio.uio_segflg = UIO_SYSSPACE;
474 auio.uio_rw = UIO_READ;
475 auio.uio_td = ap->a_td;
476
477 if (vp->v_type == VREG)
478 error = ncl_readrpc(vp, &auio, ap->a_cred);
479 else if (vp->v_type == VDIR) {
480 char* bp;
481 bp = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK);
482 aiov.iov_base = bp;
483 aiov.iov_len = auio.uio_resid = NFS_DIRBLKSIZ;
484 error = ncl_readdirrpc(vp, &auio, ap->a_cred,
485 ap->a_td);
486 free(bp, M_TEMP);
487 } else if (vp->v_type == VLNK)
488 error = ncl_readlinkrpc(vp, &auio, ap->a_cred);
489 else
490 error = EACCES;
491 } else
492 mtx_unlock(&np->n_mtx);
493 return (error);
494 }
495 }
496
497
498 /*
499 * nfs open vnode op
500 * Check to see if the type is ok
501 * and that deletion is not in progress.
502 * For paged in text files, you will need to flush the page cache
503 * if consistency is lost.
504 */
505 /* ARGSUSED */
506 static int
507 nfs_open(struct vop_open_args *ap)
508 {
509 struct vnode *vp = ap->a_vp;
510 struct nfsnode *np = VTONFS(vp);
511 struct vattr vattr;
512 int error;
513 int fmode = ap->a_mode;
514 struct ucred *cred;
515
516 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK)
517 return (EOPNOTSUPP);
518
519 /*
520 * For NFSv4, we need to do the Open Op before cache validation,
521 * so that we conform to RFC3530 Sec. 9.3.1.
522 */
523 if (NFS_ISV4(vp)) {
524 error = nfsrpc_open(vp, fmode, ap->a_cred, ap->a_td);
525 if (error) {
526 error = nfscl_maperr(ap->a_td, error, (uid_t)0,
527 (gid_t)0);
528 return (error);
529 }
530 }
531
532 /*
533 * Now, if this Open will be doing reading, re-validate/flush the
534 * cache, so that Close/Open coherency is maintained.
535 */
536 mtx_lock(&np->n_mtx);
537 if (np->n_flag & NMODIFIED) {
538 mtx_unlock(&np->n_mtx);
539 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
540 if (error == EINTR || error == EIO) {
541 if (NFS_ISV4(vp))
542 (void) nfsrpc_close(vp, 0, ap->a_td);
543 return (error);
544 }
545 mtx_lock(&np->n_mtx);
546 np->n_attrstamp = 0;
547 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
548 if (vp->v_type == VDIR)
549 np->n_direofoffset = 0;
550 mtx_unlock(&np->n_mtx);
551 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
552 if (error) {
553 if (NFS_ISV4(vp))
554 (void) nfsrpc_close(vp, 0, ap->a_td);
555 return (error);
556 }
557 mtx_lock(&np->n_mtx);
558 np->n_mtime = vattr.va_mtime;
559 if (NFS_ISV4(vp))
560 np->n_change = vattr.va_filerev;
561 } else {
562 mtx_unlock(&np->n_mtx);
563 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
564 if (error) {
565 if (NFS_ISV4(vp))
566 (void) nfsrpc_close(vp, 0, ap->a_td);
567 return (error);
568 }
569 mtx_lock(&np->n_mtx);
570 if ((NFS_ISV4(vp) && np->n_change != vattr.va_filerev) ||
571 NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
572 if (vp->v_type == VDIR)
573 np->n_direofoffset = 0;
574 mtx_unlock(&np->n_mtx);
575 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
576 if (error == EINTR || error == EIO) {
577 if (NFS_ISV4(vp))
578 (void) nfsrpc_close(vp, 0, ap->a_td);
579 return (error);
580 }
581 mtx_lock(&np->n_mtx);
582 np->n_mtime = vattr.va_mtime;
583 if (NFS_ISV4(vp))
584 np->n_change = vattr.va_filerev;
585 }
586 }
587
588 /*
589 * If the object has >= 1 O_DIRECT active opens, we disable caching.
590 */
591 if (newnfs_directio_enable && (fmode & O_DIRECT) &&
592 (vp->v_type == VREG)) {
593 if (np->n_directio_opens == 0) {
594 mtx_unlock(&np->n_mtx);
595 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
596 if (error) {
597 if (NFS_ISV4(vp))
598 (void) nfsrpc_close(vp, 0, ap->a_td);
599 return (error);
600 }
601 mtx_lock(&np->n_mtx);
602 np->n_flag |= NNONCACHE;
603 }
604 np->n_directio_opens++;
605 }
606
607 /* If opened for writing via NFSv4.1 or later, mark that for pNFS. */
608 if (NFSHASPNFS(VFSTONFS(vp->v_mount)) && (fmode & FWRITE) != 0)
609 np->n_flag |= NWRITEOPENED;
610
611 /*
612 * If this is an open for writing, capture a reference to the
613 * credentials, so they can be used by ncl_putpages(). Using
614 * these write credentials is preferable to the credentials of
615 * whatever thread happens to be doing the VOP_PUTPAGES() since
616 * the write RPCs are less likely to fail with EACCES.
617 */
618 if ((fmode & FWRITE) != 0) {
619 cred = np->n_writecred;
620 np->n_writecred = crhold(ap->a_cred);
621 } else
622 cred = NULL;
623 mtx_unlock(&np->n_mtx);
624
625 if (cred != NULL)
626 crfree(cred);
627 vnode_create_vobject(vp, vattr.va_size, ap->a_td);
628 return (0);
629 }
630
631 /*
632 * nfs close vnode op
633 * What an NFS client should do upon close after writing is a debatable issue.
634 * Most NFS clients push delayed writes to the server upon close, basically for
635 * two reasons:
636 * 1 - So that any write errors may be reported back to the client process
637 * doing the close system call. By far the two most likely errors are
638 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
639 * 2 - To put a worst case upper bound on cache inconsistency between
640 * multiple clients for the file.
641 * There is also a consistency problem for Version 2 of the protocol w.r.t.
642 * not being able to tell if other clients are writing a file concurrently,
643 * since there is no way of knowing if the changed modify time in the reply
644 * is only due to the write for this client.
645 * (NFS Version 3 provides weak cache consistency data in the reply that
646 * should be sufficient to detect and handle this case.)
647 *
648 * The current code does the following:
649 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
650 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
651 * or commit them (this satisfies 1 and 2 except for the
652 * case where the server crashes after this close but
653 * before the commit RPC, which is felt to be "good
654 * enough". Changing the last argument to ncl_flush() to
655 * a 1 would force a commit operation, if it is felt a
656 * commit is necessary now.
657 * for NFS Version 4 - flush the dirty buffers and commit them, if
658 * nfscl_mustflush() says this is necessary.
659 * It is necessary if there is no write delegation held,
660 * in order to satisfy open/close coherency.
661 * If the file isn't cached on local stable storage,
662 * it may be necessary in order to detect "out of space"
663 * errors from the server, if the write delegation
664 * issued by the server doesn't allow the file to grow.
665 */
666 /* ARGSUSED */
667 static int
668 nfs_close(struct vop_close_args *ap)
669 {
670 struct vnode *vp = ap->a_vp;
671 struct nfsnode *np = VTONFS(vp);
672 struct nfsvattr nfsva;
673 struct ucred *cred;
674 int error = 0, ret, localcred = 0;
675 int fmode = ap->a_fflag;
676
677 if ((vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF))
678 return (0);
679 /*
680 * During shutdown, a_cred isn't valid, so just use root.
681 */
682 if (ap->a_cred == NOCRED) {
683 cred = newnfs_getcred();
684 localcred = 1;
685 } else {
686 cred = ap->a_cred;
687 }
688 if (vp->v_type == VREG) {
689 /*
690 * Examine and clean dirty pages, regardless of NMODIFIED.
691 * This closes a major hole in close-to-open consistency.
692 * We want to push out all dirty pages (and buffers) on
693 * close, regardless of whether they were dirtied by
694 * mmap'ed writes or via write().
695 */
696 if (nfs_clean_pages_on_close && vp->v_object) {
697 VM_OBJECT_WLOCK(vp->v_object);
698 vm_object_page_clean(vp->v_object, 0, 0, 0);
699 VM_OBJECT_WUNLOCK(vp->v_object);
700 }
701 mtx_lock(&np->n_mtx);
702 if (np->n_flag & NMODIFIED) {
703 mtx_unlock(&np->n_mtx);
704 if (NFS_ISV3(vp)) {
705 /*
706 * Under NFSv3 we have dirty buffers to dispose of. We
707 * must flush them to the NFS server. We have the option
708 * of waiting all the way through the commit rpc or just
709 * waiting for the initial write. The default is to only
710 * wait through the initial write so the data is in the
711 * server's cache, which is roughly similar to the state
712 * a standard disk subsystem leaves the file in on close().
713 *
714 * We cannot clear the NMODIFIED bit in np->n_flag due to
715 * potential races with other processes, and certainly
716 * cannot clear it if we don't commit.
717 * These races occur when there is no longer the old
718 * traditional vnode locking implemented for Vnode Ops.
719 */
720 int cm = newnfs_commit_on_close ? 1 : 0;
721 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td, cm, 0);
722 /* np->n_flag &= ~NMODIFIED; */
723 } else if (NFS_ISV4(vp)) {
724 if (nfscl_mustflush(vp) != 0) {
725 int cm = newnfs_commit_on_close ? 1 : 0;
726 error = ncl_flush(vp, MNT_WAIT, cred, ap->a_td,
727 cm, 0);
728 /*
729 * as above w.r.t races when clearing
730 * NMODIFIED.
731 * np->n_flag &= ~NMODIFIED;
732 */
733 }
734 } else
735 error = ncl_vinvalbuf(vp, V_SAVE, ap->a_td, 1);
736 mtx_lock(&np->n_mtx);
737 }
738 /*
739 * Invalidate the attribute cache in all cases.
740 * An open is going to fetch fresh attrs any way, other procs
741 * on this node that have file open will be forced to do an
742 * otw attr fetch, but this is safe.
743 * --> A user found that their RPC count dropped by 20% when
744 * this was commented out and I can't see any requirement
745 * for it, so I've disabled it when negative lookups are
746 * enabled. (What does this have to do with negative lookup
747 * caching? Well nothing, except it was reported by the
748 * same user that needed negative lookup caching and I wanted
749 * there to be a way to disable it to see if it
750 * is the cause of some caching/coherency issue that might
751 * crop up.)
752 */
753 if (VFSTONFS(vp->v_mount)->nm_negnametimeo == 0) {
754 np->n_attrstamp = 0;
755 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
756 }
757 if (np->n_flag & NWRITEERR) {
758 np->n_flag &= ~NWRITEERR;
759 error = np->n_error;
760 }
761 mtx_unlock(&np->n_mtx);
762 }
763
764 if (NFS_ISV4(vp)) {
765 /*
766 * Get attributes so "change" is up to date.
767 */
768 if (error == 0 && nfscl_mustflush(vp) != 0 &&
769 vp->v_type == VREG &&
770 (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOCTO) == 0) {
771 ret = nfsrpc_getattr(vp, cred, ap->a_td, &nfsva,
772 NULL);
773 if (!ret) {
774 np->n_change = nfsva.na_filerev;
775 (void) nfscl_loadattrcache(&vp, &nfsva, NULL,
776 NULL, 0, 0);
777 }
778 }
779
780 /*
781 * and do the close.
782 */
783 ret = nfsrpc_close(vp, 0, ap->a_td);
784 if (!error && ret)
785 error = ret;
786 if (error)
787 error = nfscl_maperr(ap->a_td, error, (uid_t)0,
788 (gid_t)0);
789 }
790 if (newnfs_directio_enable)
791 KASSERT((np->n_directio_asyncwr == 0),
792 ("nfs_close: dirty unflushed (%d) directio buffers\n",
793 np->n_directio_asyncwr));
794 if (newnfs_directio_enable && (fmode & O_DIRECT) && (vp->v_type == VREG)) {
795 mtx_lock(&np->n_mtx);
796 KASSERT((np->n_directio_opens > 0),
797 ("nfs_close: unexpectedly value (0) of n_directio_opens\n"));
798 np->n_directio_opens--;
799 if (np->n_directio_opens == 0)
800 np->n_flag &= ~NNONCACHE;
801 mtx_unlock(&np->n_mtx);
802 }
803 if (localcred)
804 NFSFREECRED(cred);
805 return (error);
806 }
807
808 /*
809 * nfs getattr call from vfs.
810 */
811 static int
812 nfs_getattr(struct vop_getattr_args *ap)
813 {
814 struct vnode *vp = ap->a_vp;
815 struct thread *td = curthread; /* XXX */
816 struct nfsnode *np = VTONFS(vp);
817 int error = 0;
818 struct nfsvattr nfsva;
819 struct vattr *vap = ap->a_vap;
820 struct vattr vattr;
821
822 /*
823 * Update local times for special files.
824 */
825 mtx_lock(&np->n_mtx);
826 if (np->n_flag & (NACC | NUPD))
827 np->n_flag |= NCHG;
828 mtx_unlock(&np->n_mtx);
829 /*
830 * First look in the cache.
831 */
832 if (ncl_getattrcache(vp, &vattr) == 0) {
833 vap->va_type = vattr.va_type;
834 vap->va_mode = vattr.va_mode;
835 vap->va_nlink = vattr.va_nlink;
836 vap->va_uid = vattr.va_uid;
837 vap->va_gid = vattr.va_gid;
838 vap->va_fsid = vattr.va_fsid;
839 vap->va_fileid = vattr.va_fileid;
840 vap->va_size = vattr.va_size;
841 vap->va_blocksize = vattr.va_blocksize;
842 vap->va_atime = vattr.va_atime;
843 vap->va_mtime = vattr.va_mtime;
844 vap->va_ctime = vattr.va_ctime;
845 vap->va_gen = vattr.va_gen;
846 vap->va_flags = vattr.va_flags;
847 vap->va_rdev = vattr.va_rdev;
848 vap->va_bytes = vattr.va_bytes;
849 vap->va_filerev = vattr.va_filerev;
850 /*
851 * Get the local modify time for the case of a write
852 * delegation.
853 */
854 nfscl_deleggetmodtime(vp, &vap->va_mtime);
855 return (0);
856 }
857
858 if (NFS_ISV34(vp) && nfs_prime_access_cache &&
859 nfsaccess_cache_timeout > 0) {
860 NFSINCRGLOBAL(newnfsstats.accesscache_misses);
861 nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL);
862 if (ncl_getattrcache(vp, ap->a_vap) == 0) {
863 nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime);
864 return (0);
865 }
866 }
867 error = nfsrpc_getattr(vp, ap->a_cred, td, &nfsva, NULL);
868 if (!error)
869 error = nfscl_loadattrcache(&vp, &nfsva, vap, NULL, 0, 0);
870 if (!error) {
871 /*
872 * Get the local modify time for the case of a write
873 * delegation.
874 */
875 nfscl_deleggetmodtime(vp, &vap->va_mtime);
876 } else if (NFS_ISV4(vp)) {
877 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
878 }
879 return (error);
880 }
881
882 /*
883 * nfs setattr call.
884 */
885 static int
886 nfs_setattr(struct vop_setattr_args *ap)
887 {
888 struct vnode *vp = ap->a_vp;
889 struct nfsnode *np = VTONFS(vp);
890 struct thread *td = curthread; /* XXX */
891 struct vattr *vap = ap->a_vap;
892 int error = 0;
893 u_quad_t tsize;
894
895 #ifndef nolint
896 tsize = (u_quad_t)0;
897 #endif
898
899 /*
900 * Setting of flags and marking of atimes are not supported.
901 */
902 if (vap->va_flags != VNOVAL)
903 return (EOPNOTSUPP);
904
905 /*
906 * Disallow write attempts if the filesystem is mounted read-only.
907 */
908 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
909 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
910 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
911 (vp->v_mount->mnt_flag & MNT_RDONLY))
912 return (EROFS);
913 if (vap->va_size != VNOVAL) {
914 switch (vp->v_type) {
915 case VDIR:
916 return (EISDIR);
917 case VCHR:
918 case VBLK:
919 case VSOCK:
920 case VFIFO:
921 if (vap->va_mtime.tv_sec == VNOVAL &&
922 vap->va_atime.tv_sec == VNOVAL &&
923 vap->va_mode == (mode_t)VNOVAL &&
924 vap->va_uid == (uid_t)VNOVAL &&
925 vap->va_gid == (gid_t)VNOVAL)
926 return (0);
927 vap->va_size = VNOVAL;
928 break;
929 default:
930 /*
931 * Disallow write attempts if the filesystem is
932 * mounted read-only.
933 */
934 if (vp->v_mount->mnt_flag & MNT_RDONLY)
935 return (EROFS);
936 /*
937 * We run vnode_pager_setsize() early (why?),
938 * we must set np->n_size now to avoid vinvalbuf
939 * V_SAVE races that might setsize a lower
940 * value.
941 */
942 mtx_lock(&np->n_mtx);
943 tsize = np->n_size;
944 mtx_unlock(&np->n_mtx);
945 error = ncl_meta_setsize(vp, ap->a_cred, td,
946 vap->va_size);
947 mtx_lock(&np->n_mtx);
948 if (np->n_flag & NMODIFIED) {
949 tsize = np->n_size;
950 mtx_unlock(&np->n_mtx);
951 if (vap->va_size == 0)
952 error = ncl_vinvalbuf(vp, 0, td, 1);
953 else
954 error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
955 if (error) {
956 vnode_pager_setsize(vp, tsize);
957 return (error);
958 }
959 /*
960 * Call nfscl_delegmodtime() to set the modify time
961 * locally, as required.
962 */
963 nfscl_delegmodtime(vp);
964 } else
965 mtx_unlock(&np->n_mtx);
966 /*
967 * np->n_size has already been set to vap->va_size
968 * in ncl_meta_setsize(). We must set it again since
969 * nfs_loadattrcache() could be called through
970 * ncl_meta_setsize() and could modify np->n_size.
971 */
972 mtx_lock(&np->n_mtx);
973 np->n_vattr.na_size = np->n_size = vap->va_size;
974 mtx_unlock(&np->n_mtx);
975 }
976 } else {
977 mtx_lock(&np->n_mtx);
978 if ((vap->va_mtime.tv_sec != VNOVAL || vap->va_atime.tv_sec != VNOVAL) &&
979 (np->n_flag & NMODIFIED) && vp->v_type == VREG) {
980 mtx_unlock(&np->n_mtx);
981 if ((error = ncl_vinvalbuf(vp, V_SAVE, td, 1)) != 0 &&
982 (error == EINTR || error == EIO))
983 return (error);
984 } else
985 mtx_unlock(&np->n_mtx);
986 }
987 error = nfs_setattrrpc(vp, vap, ap->a_cred, td);
988 if (error && vap->va_size != VNOVAL) {
989 mtx_lock(&np->n_mtx);
990 np->n_size = np->n_vattr.na_size = tsize;
991 vnode_pager_setsize(vp, tsize);
992 mtx_unlock(&np->n_mtx);
993 }
994 return (error);
995 }
996
997 /*
998 * Do an nfs setattr rpc.
999 */
1000 static int
1001 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred,
1002 struct thread *td)
1003 {
1004 struct nfsnode *np = VTONFS(vp);
1005 int error, ret, attrflag, i;
1006 struct nfsvattr nfsva;
1007
1008 if (NFS_ISV34(vp)) {
1009 mtx_lock(&np->n_mtx);
1010 for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
1011 np->n_accesscache[i].stamp = 0;
1012 np->n_flag |= NDELEGMOD;
1013 mtx_unlock(&np->n_mtx);
1014 KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
1015 }
1016 error = nfsrpc_setattr(vp, vap, NULL, cred, td, &nfsva, &attrflag,
1017 NULL);
1018 if (attrflag) {
1019 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
1020 if (ret && !error)
1021 error = ret;
1022 }
1023 if (error && NFS_ISV4(vp))
1024 error = nfscl_maperr(td, error, vap->va_uid, vap->va_gid);
1025 return (error);
1026 }
1027
1028 /*
1029 * nfs lookup call, one step at a time...
1030 * First look in cache
1031 * If not found, unlock the directory nfsnode and do the rpc
1032 */
1033 static int
1034 nfs_lookup(struct vop_lookup_args *ap)
1035 {
1036 struct componentname *cnp = ap->a_cnp;
1037 struct vnode *dvp = ap->a_dvp;
1038 struct vnode **vpp = ap->a_vpp;
1039 struct mount *mp = dvp->v_mount;
1040 int flags = cnp->cn_flags;
1041 struct vnode *newvp;
1042 struct nfsmount *nmp;
1043 struct nfsnode *np, *newnp;
1044 int error = 0, attrflag, dattrflag, ltype, ncticks;
1045 struct thread *td = cnp->cn_thread;
1046 struct nfsfh *nfhp;
1047 struct nfsvattr dnfsva, nfsva;
1048 struct vattr vattr;
1049 struct timespec nctime;
1050
1051 *vpp = NULLVP;
1052 if ((flags & ISLASTCN) && (mp->mnt_flag & MNT_RDONLY) &&
1053 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
1054 return (EROFS);
1055 if (dvp->v_type != VDIR)
1056 return (ENOTDIR);
1057 nmp = VFSTONFS(mp);
1058 np = VTONFS(dvp);
1059
1060 /* For NFSv4, wait until any remove is done. */
1061 mtx_lock(&np->n_mtx);
1062 while (NFSHASNFSV4(nmp) && (np->n_flag & NREMOVEINPROG)) {
1063 np->n_flag |= NREMOVEWANT;
1064 (void) msleep((caddr_t)np, &np->n_mtx, PZERO, "nfslkup", 0);
1065 }
1066 mtx_unlock(&np->n_mtx);
1067
1068 if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td)) != 0)
1069 return (error);
1070 error = cache_lookup(dvp, vpp, cnp, &nctime, &ncticks);
1071 if (error > 0 && error != ENOENT)
1072 return (error);
1073 if (error == -1) {
1074 /*
1075 * Lookups of "." are special and always return the
1076 * current directory. cache_lookup() already handles
1077 * associated locking bookkeeping, etc.
1078 */
1079 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
1080 /* XXX: Is this really correct? */
1081 if (cnp->cn_nameiop != LOOKUP &&
1082 (flags & ISLASTCN))
1083 cnp->cn_flags |= SAVENAME;
1084 return (0);
1085 }
1086
1087 /*
1088 * We only accept a positive hit in the cache if the
1089 * change time of the file matches our cached copy.
1090 * Otherwise, we discard the cache entry and fallback
1091 * to doing a lookup RPC. We also only trust cache
1092 * entries for less than nm_nametimeo seconds.
1093 *
1094 * To better handle stale file handles and attributes,
1095 * clear the attribute cache of this node if it is a
1096 * leaf component, part of an open() call, and not
1097 * locally modified before fetching the attributes.
1098 * This should allow stale file handles to be detected
1099 * here where we can fall back to a LOOKUP RPC to
1100 * recover rather than having nfs_open() detect the
1101 * stale file handle and failing open(2) with ESTALE.
1102 */
1103 newvp = *vpp;
1104 newnp = VTONFS(newvp);
1105 if (!(nmp->nm_flag & NFSMNT_NOCTO) &&
1106 (flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
1107 !(newnp->n_flag & NMODIFIED)) {
1108 mtx_lock(&newnp->n_mtx);
1109 newnp->n_attrstamp = 0;
1110 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
1111 mtx_unlock(&newnp->n_mtx);
1112 }
1113 if (nfscl_nodeleg(newvp, 0) == 0 ||
1114 ((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) &&
1115 VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 &&
1116 timespeccmp(&vattr.va_ctime, &nctime, ==))) {
1117 NFSINCRGLOBAL(newnfsstats.lookupcache_hits);
1118 if (cnp->cn_nameiop != LOOKUP &&
1119 (flags & ISLASTCN))
1120 cnp->cn_flags |= SAVENAME;
1121 return (0);
1122 }
1123 cache_purge(newvp);
1124 if (dvp != newvp)
1125 vput(newvp);
1126 else
1127 vrele(newvp);
1128 *vpp = NULLVP;
1129 } else if (error == ENOENT) {
1130 if (dvp->v_iflag & VI_DOOMED)
1131 return (ENOENT);
1132 /*
1133 * We only accept a negative hit in the cache if the
1134 * modification time of the parent directory matches
1135 * the cached copy in the name cache entry.
1136 * Otherwise, we discard all of the negative cache
1137 * entries for this directory. We also only trust
1138 * negative cache entries for up to nm_negnametimeo
1139 * seconds.
1140 */
1141 if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) &&
1142 VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 &&
1143 timespeccmp(&vattr.va_mtime, &nctime, ==)) {
1144 NFSINCRGLOBAL(newnfsstats.lookupcache_hits);
1145 return (ENOENT);
1146 }
1147 cache_purge_negative(dvp);
1148 }
1149
1150 error = 0;
1151 newvp = NULLVP;
1152 NFSINCRGLOBAL(newnfsstats.lookupcache_misses);
1153 error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1154 cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
1155 NULL);
1156 if (dattrflag)
1157 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
1158 if (error) {
1159 if (newvp != NULLVP) {
1160 vput(newvp);
1161 *vpp = NULLVP;
1162 }
1163
1164 if (error != ENOENT) {
1165 if (NFS_ISV4(dvp))
1166 error = nfscl_maperr(td, error, (uid_t)0,
1167 (gid_t)0);
1168 return (error);
1169 }
1170
1171 /* The requested file was not found. */
1172 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1173 (flags & ISLASTCN)) {
1174 /*
1175 * XXX: UFS does a full VOP_ACCESS(dvp,
1176 * VWRITE) here instead of just checking
1177 * MNT_RDONLY.
1178 */
1179 if (mp->mnt_flag & MNT_RDONLY)
1180 return (EROFS);
1181 cnp->cn_flags |= SAVENAME;
1182 return (EJUSTRETURN);
1183 }
1184
1185 if ((cnp->cn_flags & MAKEENTRY) != 0 && dattrflag) {
1186 /*
1187 * Cache the modification time of the parent
1188 * directory from the post-op attributes in
1189 * the name cache entry. The negative cache
1190 * entry will be ignored once the directory
1191 * has changed. Don't bother adding the entry
1192 * if the directory has already changed.
1193 */
1194 mtx_lock(&np->n_mtx);
1195 if (timespeccmp(&np->n_vattr.na_mtime,
1196 &dnfsva.na_mtime, ==)) {
1197 mtx_unlock(&np->n_mtx);
1198 cache_enter_time(dvp, NULL, cnp,
1199 &dnfsva.na_mtime, NULL);
1200 } else
1201 mtx_unlock(&np->n_mtx);
1202 }
1203 return (ENOENT);
1204 }
1205
1206 /*
1207 * Handle RENAME case...
1208 */
1209 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
1210 if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
1211 FREE((caddr_t)nfhp, M_NFSFH);
1212 return (EISDIR);
1213 }
1214 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
1215 LK_EXCLUSIVE);
1216 if (error)
1217 return (error);
1218 newvp = NFSTOV(np);
1219 if (attrflag)
1220 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1221 0, 1);
1222 *vpp = newvp;
1223 cnp->cn_flags |= SAVENAME;
1224 return (0);
1225 }
1226
1227 if (flags & ISDOTDOT) {
1228 ltype = NFSVOPISLOCKED(dvp);
1229 error = vfs_busy(mp, MBF_NOWAIT);
1230 if (error != 0) {
1231 vfs_ref(mp);
1232 NFSVOPUNLOCK(dvp, 0);
1233 error = vfs_busy(mp, 0);
1234 NFSVOPLOCK(dvp, ltype | LK_RETRY);
1235 vfs_rel(mp);
1236 if (error == 0 && (dvp->v_iflag & VI_DOOMED)) {
1237 vfs_unbusy(mp);
1238 error = ENOENT;
1239 }
1240 if (error != 0)
1241 return (error);
1242 }
1243 NFSVOPUNLOCK(dvp, 0);
1244 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
1245 cnp->cn_lkflags);
1246 if (error == 0)
1247 newvp = NFSTOV(np);
1248 vfs_unbusy(mp);
1249 if (newvp != dvp)
1250 NFSVOPLOCK(dvp, ltype | LK_RETRY);
1251 if (dvp->v_iflag & VI_DOOMED) {
1252 if (error == 0) {
1253 if (newvp == dvp)
1254 vrele(newvp);
1255 else
1256 vput(newvp);
1257 }
1258 error = ENOENT;
1259 }
1260 if (error != 0)
1261 return (error);
1262 if (attrflag)
1263 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1264 0, 1);
1265 } else if (NFS_CMPFH(np, nfhp->nfh_fh, nfhp->nfh_len)) {
1266 FREE((caddr_t)nfhp, M_NFSFH);
1267 VREF(dvp);
1268 newvp = dvp;
1269 if (attrflag)
1270 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1271 0, 1);
1272 } else {
1273 error = nfscl_nget(mp, dvp, nfhp, cnp, td, &np, NULL,
1274 cnp->cn_lkflags);
1275 if (error)
1276 return (error);
1277 newvp = NFSTOV(np);
1278 if (attrflag)
1279 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1280 0, 1);
1281 else if ((flags & (ISLASTCN | ISOPEN)) == (ISLASTCN | ISOPEN) &&
1282 !(np->n_flag & NMODIFIED)) {
1283 /*
1284 * Flush the attribute cache when opening a
1285 * leaf node to ensure that fresh attributes
1286 * are fetched in nfs_open() since we did not
1287 * fetch attributes from the LOOKUP reply.
1288 */
1289 mtx_lock(&np->n_mtx);
1290 np->n_attrstamp = 0;
1291 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(newvp);
1292 mtx_unlock(&np->n_mtx);
1293 }
1294 }
1295 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
1296 cnp->cn_flags |= SAVENAME;
1297 if ((cnp->cn_flags & MAKEENTRY) &&
1298 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) &&
1299 attrflag != 0 && (newvp->v_type != VDIR || dattrflag != 0))
1300 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
1301 newvp->v_type != VDIR ? NULL : &dnfsva.na_ctime);
1302 *vpp = newvp;
1303 return (0);
1304 }
1305
1306 /*
1307 * nfs read call.
1308 * Just call ncl_bioread() to do the work.
1309 */
1310 static int
1311 nfs_read(struct vop_read_args *ap)
1312 {
1313 struct vnode *vp = ap->a_vp;
1314
1315 switch (vp->v_type) {
1316 case VREG:
1317 return (ncl_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred));
1318 case VDIR:
1319 return (EISDIR);
1320 default:
1321 return (EOPNOTSUPP);
1322 }
1323 }
1324
1325 /*
1326 * nfs readlink call
1327 */
1328 static int
1329 nfs_readlink(struct vop_readlink_args *ap)
1330 {
1331 struct vnode *vp = ap->a_vp;
1332
1333 if (vp->v_type != VLNK)
1334 return (EINVAL);
1335 return (ncl_bioread(vp, ap->a_uio, 0, ap->a_cred));
1336 }
1337
1338 /*
1339 * Do a readlink rpc.
1340 * Called by ncl_doio() from below the buffer cache.
1341 */
1342 int
1343 ncl_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1344 {
1345 int error, ret, attrflag;
1346 struct nfsvattr nfsva;
1347
1348 error = nfsrpc_readlink(vp, uiop, cred, uiop->uio_td, &nfsva,
1349 &attrflag, NULL);
1350 if (attrflag) {
1351 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
1352 if (ret && !error)
1353 error = ret;
1354 }
1355 if (error && NFS_ISV4(vp))
1356 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
1357 return (error);
1358 }
1359
1360 /*
1361 * nfs read rpc call
1362 * Ditto above
1363 */
1364 int
1365 ncl_readrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred)
1366 {
1367 int error, ret, attrflag;
1368 struct nfsvattr nfsva;
1369 struct nfsmount *nmp;
1370
1371 nmp = VFSTONFS(vnode_mount(vp));
1372 error = EIO;
1373 attrflag = 0;
1374 if (NFSHASPNFS(nmp))
1375 error = nfscl_doiods(vp, uiop, NULL, NULL,
1376 NFSV4OPEN_ACCESSREAD, cred, uiop->uio_td);
1377 NFSCL_DEBUG(4, "readrpc: aft doiods=%d\n", error);
1378 if (error != 0)
1379 error = nfsrpc_read(vp, uiop, cred, uiop->uio_td, &nfsva,
1380 &attrflag, NULL);
1381 if (attrflag) {
1382 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
1383 if (ret && !error)
1384 error = ret;
1385 }
1386 if (error && NFS_ISV4(vp))
1387 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
1388 return (error);
1389 }
1390
1391 /*
1392 * nfs write call
1393 */
1394 int
1395 ncl_writerpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
1396 int *iomode, int *must_commit, int called_from_strategy)
1397 {
1398 struct nfsvattr nfsva;
1399 int error, attrflag, ret;
1400 struct nfsmount *nmp;
1401
1402 nmp = VFSTONFS(vnode_mount(vp));
1403 error = EIO;
1404 attrflag = 0;
1405 if (NFSHASPNFS(nmp))
1406 error = nfscl_doiods(vp, uiop, iomode, must_commit,
1407 NFSV4OPEN_ACCESSWRITE, cred, uiop->uio_td);
1408 NFSCL_DEBUG(4, "writerpc: aft doiods=%d\n", error);
1409 if (error != 0)
1410 error = nfsrpc_write(vp, uiop, iomode, must_commit, cred,
1411 uiop->uio_td, &nfsva, &attrflag, NULL,
1412 called_from_strategy);
1413 if (attrflag) {
1414 if (VTONFS(vp)->n_flag & ND_NFSV4)
1415 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 1,
1416 1);
1417 else
1418 ret = nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0,
1419 1);
1420 if (ret && !error)
1421 error = ret;
1422 }
1423 if (DOINGASYNC(vp))
1424 *iomode = NFSWRITE_FILESYNC;
1425 if (error && NFS_ISV4(vp))
1426 error = nfscl_maperr(uiop->uio_td, error, (uid_t)0, (gid_t)0);
1427 return (error);
1428 }
1429
1430 /*
1431 * nfs mknod rpc
1432 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1433 * mode set to specify the file type and the size field for rdev.
1434 */
1435 static int
1436 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
1437 struct vattr *vap)
1438 {
1439 struct nfsvattr nfsva, dnfsva;
1440 struct vnode *newvp = NULL;
1441 struct nfsnode *np = NULL, *dnp;
1442 struct nfsfh *nfhp;
1443 struct vattr vattr;
1444 int error = 0, attrflag, dattrflag;
1445 u_int32_t rdev;
1446
1447 if (vap->va_type == VCHR || vap->va_type == VBLK)
1448 rdev = vap->va_rdev;
1449 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1450 rdev = 0xffffffff;
1451 else
1452 return (EOPNOTSUPP);
1453 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
1454 return (error);
1455 error = nfsrpc_mknod(dvp, cnp->cn_nameptr, cnp->cn_namelen, vap,
1456 rdev, vap->va_type, cnp->cn_cred, cnp->cn_thread, &dnfsva,
1457 &nfsva, &nfhp, &attrflag, &dattrflag, NULL);
1458 if (!error) {
1459 if (!nfhp)
1460 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
1461 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread,
1462 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
1463 NULL);
1464 if (nfhp)
1465 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
1466 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE);
1467 }
1468 if (dattrflag)
1469 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
1470 if (!error) {
1471 newvp = NFSTOV(np);
1472 if (attrflag != 0) {
1473 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1474 0, 1);
1475 if (error != 0)
1476 vput(newvp);
1477 }
1478 }
1479 if (!error) {
1480 *vpp = newvp;
1481 } else if (NFS_ISV4(dvp)) {
1482 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid,
1483 vap->va_gid);
1484 }
1485 dnp = VTONFS(dvp);
1486 mtx_lock(&dnp->n_mtx);
1487 dnp->n_flag |= NMODIFIED;
1488 if (!dattrflag) {
1489 dnp->n_attrstamp = 0;
1490 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
1491 }
1492 mtx_unlock(&dnp->n_mtx);
1493 return (error);
1494 }
1495
1496 /*
1497 * nfs mknod vop
1498 * just call nfs_mknodrpc() to do the work.
1499 */
1500 /* ARGSUSED */
1501 static int
1502 nfs_mknod(struct vop_mknod_args *ap)
1503 {
1504 return (nfs_mknodrpc(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap));
1505 }
1506
1507 static struct mtx nfs_cverf_mtx;
1508 MTX_SYSINIT(nfs_cverf_mtx, &nfs_cverf_mtx, "NFS create verifier mutex",
1509 MTX_DEF);
1510
1511 static nfsquad_t
1512 nfs_get_cverf(void)
1513 {
1514 static nfsquad_t cverf;
1515 nfsquad_t ret;
1516 static int cverf_initialized = 0;
1517
1518 mtx_lock(&nfs_cverf_mtx);
1519 if (cverf_initialized == 0) {
1520 cverf.lval[0] = arc4random();
1521 cverf.lval[1] = arc4random();
1522 cverf_initialized = 1;
1523 } else
1524 cverf.qval++;
1525 ret = cverf;
1526 mtx_unlock(&nfs_cverf_mtx);
1527
1528 return (ret);
1529 }
1530
1531 /*
1532 * nfs file create call
1533 */
1534 static int
1535 nfs_create(struct vop_create_args *ap)
1536 {
1537 struct vnode *dvp = ap->a_dvp;
1538 struct vattr *vap = ap->a_vap;
1539 struct componentname *cnp = ap->a_cnp;
1540 struct nfsnode *np = NULL, *dnp;
1541 struct vnode *newvp = NULL;
1542 struct nfsmount *nmp;
1543 struct nfsvattr dnfsva, nfsva;
1544 struct nfsfh *nfhp;
1545 nfsquad_t cverf;
1546 int error = 0, attrflag, dattrflag, fmode = 0;
1547 struct vattr vattr;
1548
1549 /*
1550 * Oops, not for me..
1551 */
1552 if (vap->va_type == VSOCK)
1553 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1554
1555 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)))
1556 return (error);
1557 if (vap->va_vaflags & VA_EXCLUSIVE)
1558 fmode |= O_EXCL;
1559 dnp = VTONFS(dvp);
1560 nmp = VFSTONFS(vnode_mount(dvp));
1561 again:
1562 /* For NFSv4, wait until any remove is done. */
1563 mtx_lock(&dnp->n_mtx);
1564 while (NFSHASNFSV4(nmp) && (dnp->n_flag & NREMOVEINPROG)) {
1565 dnp->n_flag |= NREMOVEWANT;
1566 (void) msleep((caddr_t)dnp, &dnp->n_mtx, PZERO, "nfscrt", 0);
1567 }
1568 mtx_unlock(&dnp->n_mtx);
1569
1570 cverf = nfs_get_cverf();
1571 error = nfsrpc_create(dvp, cnp->cn_nameptr, cnp->cn_namelen,
1572 vap, cverf, fmode, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva,
1573 &nfhp, &attrflag, &dattrflag, NULL);
1574 if (!error) {
1575 if (nfhp == NULL)
1576 (void) nfsrpc_lookup(dvp, cnp->cn_nameptr,
1577 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread,
1578 &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
1579 NULL);
1580 if (nfhp != NULL)
1581 error = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp,
1582 cnp->cn_thread, &np, NULL, LK_EXCLUSIVE);
1583 }
1584 if (dattrflag)
1585 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
1586 if (!error) {
1587 newvp = NFSTOV(np);
1588 if (attrflag == 0)
1589 error = nfsrpc_getattr(newvp, cnp->cn_cred,
1590 cnp->cn_thread, &nfsva, NULL);
1591 if (error == 0)
1592 error = nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
1593 0, 1);
1594 }
1595 if (error) {
1596 if (newvp != NULL) {
1597 vput(newvp);
1598 newvp = NULL;
1599 }
1600 if (NFS_ISV34(dvp) && (fmode & O_EXCL) &&
1601 error == NFSERR_NOTSUPP) {
1602 fmode &= ~O_EXCL;
1603 goto again;
1604 }
1605 } else if (NFS_ISV34(dvp) && (fmode & O_EXCL)) {
1606 if (nfscl_checksattr(vap, &nfsva)) {
1607 error = nfsrpc_setattr(newvp, vap, NULL, cnp->cn_cred,
1608 cnp->cn_thread, &nfsva, &attrflag, NULL);
1609 if (error && (vap->va_uid != (uid_t)VNOVAL ||
1610 vap->va_gid != (gid_t)VNOVAL)) {
1611 /* try again without setting uid/gid */
1612 vap->va_uid = (uid_t)VNOVAL;
1613 vap->va_gid = (uid_t)VNOVAL;
1614 error = nfsrpc_setattr(newvp, vap, NULL,
1615 cnp->cn_cred, cnp->cn_thread, &nfsva,
1616 &attrflag, NULL);
1617 }
1618 if (attrflag)
1619 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
1620 NULL, 0, 1);
1621 if (error != 0)
1622 vput(newvp);
1623 }
1624 }
1625 if (!error) {
1626 if ((cnp->cn_flags & MAKEENTRY) && attrflag)
1627 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
1628 NULL);
1629 *ap->a_vpp = newvp;
1630 } else if (NFS_ISV4(dvp)) {
1631 error = nfscl_maperr(cnp->cn_thread, error, vap->va_uid,
1632 vap->va_gid);
1633 }
1634 mtx_lock(&dnp->n_mtx);
1635 dnp->n_flag |= NMODIFIED;
1636 if (!dattrflag) {
1637 dnp->n_attrstamp = 0;
1638 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
1639 }
1640 mtx_unlock(&dnp->n_mtx);
1641 return (error);
1642 }
1643
1644 /*
1645 * nfs file remove call
1646 * To try and make nfs semantics closer to ufs semantics, a file that has
1647 * other processes using the vnode is renamed instead of removed and then
1648 * removed later on the last close.
1649 * - If v_usecount > 1
1650 * If a rename is not already in the works
1651 * call nfs_sillyrename() to set it up
1652 * else
1653 * do the remove rpc
1654 */
1655 static int
1656 nfs_remove(struct vop_remove_args *ap)
1657 {
1658 struct vnode *vp = ap->a_vp;
1659 struct vnode *dvp = ap->a_dvp;
1660 struct componentname *cnp = ap->a_cnp;
1661 struct nfsnode *np = VTONFS(vp);
1662 int error = 0;
1663 struct vattr vattr;
1664
1665 KASSERT((cnp->cn_flags & HASBUF) != 0, ("nfs_remove: no name"));
1666 KASSERT(vrefcnt(vp) > 0, ("nfs_remove: bad v_usecount"));
1667 if (vp->v_type == VDIR)
1668 error = EPERM;
1669 else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
1670 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
1671 vattr.va_nlink > 1)) {
1672 /*
1673 * Purge the name cache so that the chance of a lookup for
1674 * the name succeeding while the remove is in progress is
1675 * minimized. Without node locking it can still happen, such
1676 * that an I/O op returns ESTALE, but since you get this if
1677 * another host removes the file..
1678 */
1679 cache_purge(vp);
1680 /*
1681 * throw away biocache buffers, mainly to avoid
1682 * unnecessary delayed writes later.
1683 */
1684 error = ncl_vinvalbuf(vp, 0, cnp->cn_thread, 1);
1685 /* Do the rpc */
1686 if (error != EINTR && error != EIO)
1687 error = nfs_removerpc(dvp, vp, cnp->cn_nameptr,
1688 cnp->cn_namelen, cnp->cn_cred, cnp->cn_thread);
1689 /*
1690 * Kludge City: If the first reply to the remove rpc is lost..
1691 * the reply to the retransmitted request will be ENOENT
1692 * since the file was in fact removed
1693 * Therefore, we cheat and return success.
1694 */
1695 if (error == ENOENT)
1696 error = 0;
1697 } else if (!np->n_sillyrename)
1698 error = nfs_sillyrename(dvp, vp, cnp);
1699 mtx_lock(&np->n_mtx);
1700 np->n_attrstamp = 0;
1701 mtx_unlock(&np->n_mtx);
1702 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1703 return (error);
1704 }
1705
1706 /*
1707 * nfs file remove rpc called from nfs_inactive
1708 */
1709 int
1710 ncl_removeit(struct sillyrename *sp, struct vnode *vp)
1711 {
1712 /*
1713 * Make sure that the directory vnode is still valid.
1714 * XXX we should lock sp->s_dvp here.
1715 */
1716 if (sp->s_dvp->v_type == VBAD)
1717 return (0);
1718 return (nfs_removerpc(sp->s_dvp, vp, sp->s_name, sp->s_namlen,
1719 sp->s_cred, NULL));
1720 }
1721
1722 /*
1723 * Nfs remove rpc, called from nfs_remove() and ncl_removeit().
1724 */
1725 static int
1726 nfs_removerpc(struct vnode *dvp, struct vnode *vp, char *name,
1727 int namelen, struct ucred *cred, struct thread *td)
1728 {
1729 struct nfsvattr dnfsva;
1730 struct nfsnode *dnp = VTONFS(dvp);
1731 int error = 0, dattrflag;
1732
1733 mtx_lock(&dnp->n_mtx);
1734 dnp->n_flag |= NREMOVEINPROG;
1735 mtx_unlock(&dnp->n_mtx);
1736 error = nfsrpc_remove(dvp, name, namelen, vp, cred, td, &dnfsva,
1737 &dattrflag, NULL);
1738 mtx_lock(&dnp->n_mtx);
1739 if ((dnp->n_flag & NREMOVEWANT)) {
1740 dnp->n_flag &= ~(NREMOVEWANT | NREMOVEINPROG);
1741 mtx_unlock(&dnp->n_mtx);
1742 wakeup((caddr_t)dnp);
1743 } else {
1744 dnp->n_flag &= ~NREMOVEINPROG;
1745 mtx_unlock(&dnp->n_mtx);
1746 }
1747 if (dattrflag)
1748 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
1749 mtx_lock(&dnp->n_mtx);
1750 dnp->n_flag |= NMODIFIED;
1751 if (!dattrflag) {
1752 dnp->n_attrstamp = 0;
1753 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
1754 }
1755 mtx_unlock(&dnp->n_mtx);
1756 if (error && NFS_ISV4(dvp))
1757 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
1758 return (error);
1759 }
1760
1761 /*
1762 * nfs file rename call
1763 */
1764 static int
1765 nfs_rename(struct vop_rename_args *ap)
1766 {
1767 struct vnode *fvp = ap->a_fvp;
1768 struct vnode *tvp = ap->a_tvp;
1769 struct vnode *fdvp = ap->a_fdvp;
1770 struct vnode *tdvp = ap->a_tdvp;
1771 struct componentname *tcnp = ap->a_tcnp;
1772 struct componentname *fcnp = ap->a_fcnp;
1773 struct nfsnode *fnp = VTONFS(ap->a_fvp);
1774 struct nfsnode *tdnp = VTONFS(ap->a_tdvp);
1775 struct nfsv4node *newv4 = NULL;
1776 int error;
1777
1778 KASSERT((tcnp->cn_flags & HASBUF) != 0 &&
1779 (fcnp->cn_flags & HASBUF) != 0, ("nfs_rename: no name"));
1780 /* Check for cross-device rename */
1781 if ((fvp->v_mount != tdvp->v_mount) ||
1782 (tvp && (fvp->v_mount != tvp->v_mount))) {
1783 error = EXDEV;
1784 goto out;
1785 }
1786
1787 if (fvp == tvp) {
1788 printf("nfs_rename: fvp == tvp (can't happen)\n");
1789 error = 0;
1790 goto out;
1791 }
1792 if ((error = NFSVOPLOCK(fvp, LK_EXCLUSIVE)) != 0)
1793 goto out;
1794
1795 /*
1796 * We have to flush B_DELWRI data prior to renaming
1797 * the file. If we don't, the delayed-write buffers
1798 * can be flushed out later after the file has gone stale
1799 * under NFSV3. NFSV2 does not have this problem because
1800 * ( as far as I can tell ) it flushes dirty buffers more
1801 * often.
1802 *
1803 * Skip the rename operation if the fsync fails, this can happen
1804 * due to the server's volume being full, when we pushed out data
1805 * that was written back to our cache earlier. Not checking for
1806 * this condition can result in potential (silent) data loss.
1807 */
1808 error = VOP_FSYNC(fvp, MNT_WAIT, fcnp->cn_thread);
1809 NFSVOPUNLOCK(fvp, 0);
1810 if (!error && tvp)
1811 error = VOP_FSYNC(tvp, MNT_WAIT, tcnp->cn_thread);
1812 if (error)
1813 goto out;
1814
1815 /*
1816 * If the tvp exists and is in use, sillyrename it before doing the
1817 * rename of the new file over it.
1818 * XXX Can't sillyrename a directory.
1819 */
1820 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
1821 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) {
1822 vput(tvp);
1823 tvp = NULL;
1824 }
1825
1826 error = nfs_renamerpc(fdvp, fvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1827 tdvp, tvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1828 tcnp->cn_thread);
1829
1830 if (error == 0 && NFS_ISV4(tdvp)) {
1831 /*
1832 * For NFSv4, check to see if it is the same name and
1833 * replace the name, if it is different.
1834 */
1835 MALLOC(newv4, struct nfsv4node *,
1836 sizeof (struct nfsv4node) +
1837 tdnp->n_fhp->nfh_len + tcnp->cn_namelen - 1,
1838 M_NFSV4NODE, M_WAITOK);
1839 mtx_lock(&tdnp->n_mtx);
1840 mtx_lock(&fnp->n_mtx);
1841 if (fnp->n_v4 != NULL && fvp->v_type == VREG &&
1842 (fnp->n_v4->n4_namelen != tcnp->cn_namelen ||
1843 NFSBCMP(tcnp->cn_nameptr, NFS4NODENAME(fnp->n_v4),
1844 tcnp->cn_namelen) ||
1845 tdnp->n_fhp->nfh_len != fnp->n_v4->n4_fhlen ||
1846 NFSBCMP(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
1847 tdnp->n_fhp->nfh_len))) {
1848 #ifdef notdef
1849 { char nnn[100]; int nnnl;
1850 nnnl = (tcnp->cn_namelen < 100) ? tcnp->cn_namelen : 99;
1851 bcopy(tcnp->cn_nameptr, nnn, nnnl);
1852 nnn[nnnl] = '\0';
1853 printf("ren replace=%s\n",nnn);
1854 }
1855 #endif
1856 FREE((caddr_t)fnp->n_v4, M_NFSV4NODE);
1857 fnp->n_v4 = newv4;
1858 newv4 = NULL;
1859 fnp->n_v4->n4_fhlen = tdnp->n_fhp->nfh_len;
1860 fnp->n_v4->n4_namelen = tcnp->cn_namelen;
1861 NFSBCOPY(tdnp->n_fhp->nfh_fh, fnp->n_v4->n4_data,
1862 tdnp->n_fhp->nfh_len);
1863 NFSBCOPY(tcnp->cn_nameptr,
1864 NFS4NODENAME(fnp->n_v4), tcnp->cn_namelen);
1865 }
1866 mtx_unlock(&tdnp->n_mtx);
1867 mtx_unlock(&fnp->n_mtx);
1868 if (newv4 != NULL)
1869 FREE((caddr_t)newv4, M_NFSV4NODE);
1870 }
1871
1872 if (fvp->v_type == VDIR) {
1873 if (tvp != NULL && tvp->v_type == VDIR)
1874 cache_purge(tdvp);
1875 cache_purge(fdvp);
1876 }
1877
1878 out:
1879 if (tdvp == tvp)
1880 vrele(tdvp);
1881 else
1882 vput(tdvp);
1883 if (tvp)
1884 vput(tvp);
1885 vrele(fdvp);
1886 vrele(fvp);
1887 /*
1888 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1889 */
1890 if (error == ENOENT)
1891 error = 0;
1892 return (error);
1893 }
1894
1895 /*
1896 * nfs file rename rpc called from nfs_remove() above
1897 */
1898 static int
1899 nfs_renameit(struct vnode *sdvp, struct vnode *svp, struct componentname *scnp,
1900 struct sillyrename *sp)
1901 {
1902
1903 return (nfs_renamerpc(sdvp, svp, scnp->cn_nameptr, scnp->cn_namelen,
1904 sdvp, NULL, sp->s_name, sp->s_namlen, scnp->cn_cred,
1905 scnp->cn_thread));
1906 }
1907
1908 /*
1909 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1910 */
1911 static int
1912 nfs_renamerpc(struct vnode *fdvp, struct vnode *fvp, char *fnameptr,
1913 int fnamelen, struct vnode *tdvp, struct vnode *tvp, char *tnameptr,
1914 int tnamelen, struct ucred *cred, struct thread *td)
1915 {
1916 struct nfsvattr fnfsva, tnfsva;
1917 struct nfsnode *fdnp = VTONFS(fdvp);
1918 struct nfsnode *tdnp = VTONFS(tdvp);
1919 int error = 0, fattrflag, tattrflag;
1920
1921 error = nfsrpc_rename(fdvp, fvp, fnameptr, fnamelen, tdvp, tvp,
1922 tnameptr, tnamelen, cred, td, &fnfsva, &tnfsva, &fattrflag,
1923 &tattrflag, NULL, NULL);
1924 mtx_lock(&fdnp->n_mtx);
1925 fdnp->n_flag |= NMODIFIED;
1926 if (fattrflag != 0) {
1927 mtx_unlock(&fdnp->n_mtx);
1928 (void) nfscl_loadattrcache(&fdvp, &fnfsva, NULL, NULL, 0, 1);
1929 } else {
1930 fdnp->n_attrstamp = 0;
1931 mtx_unlock(&fdnp->n_mtx);
1932 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(fdvp);
1933 }
1934 mtx_lock(&tdnp->n_mtx);
1935 tdnp->n_flag |= NMODIFIED;
1936 if (tattrflag != 0) {
1937 mtx_unlock(&tdnp->n_mtx);
1938 (void) nfscl_loadattrcache(&tdvp, &tnfsva, NULL, NULL, 0, 1);
1939 } else {
1940 tdnp->n_attrstamp = 0;
1941 mtx_unlock(&tdnp->n_mtx);
1942 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
1943 }
1944 if (error && NFS_ISV4(fdvp))
1945 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
1946 return (error);
1947 }
1948
1949 /*
1950 * nfs hard link create call
1951 */
1952 static int
1953 nfs_link(struct vop_link_args *ap)
1954 {
1955 struct vnode *vp = ap->a_vp;
1956 struct vnode *tdvp = ap->a_tdvp;
1957 struct componentname *cnp = ap->a_cnp;
1958 struct nfsnode *np, *tdnp;
1959 struct nfsvattr nfsva, dnfsva;
1960 int error = 0, attrflag, dattrflag;
1961
1962 /*
1963 * Push all writes to the server, so that the attribute cache
1964 * doesn't get "out of sync" with the server.
1965 * XXX There should be a better way!
1966 */
1967 VOP_FSYNC(vp, MNT_WAIT, cnp->cn_thread);
1968
1969 error = nfsrpc_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
1970 cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &attrflag,
1971 &dattrflag, NULL);
1972 tdnp = VTONFS(tdvp);
1973 mtx_lock(&tdnp->n_mtx);
1974 tdnp->n_flag |= NMODIFIED;
1975 if (dattrflag != 0) {
1976 mtx_unlock(&tdnp->n_mtx);
1977 (void) nfscl_loadattrcache(&tdvp, &dnfsva, NULL, NULL, 0, 1);
1978 } else {
1979 tdnp->n_attrstamp = 0;
1980 mtx_unlock(&tdnp->n_mtx);
1981 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(tdvp);
1982 }
1983 if (attrflag)
1984 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
1985 else {
1986 np = VTONFS(vp);
1987 mtx_lock(&np->n_mtx);
1988 np->n_attrstamp = 0;
1989 mtx_unlock(&np->n_mtx);
1990 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1991 }
1992 /*
1993 * If negative lookup caching is enabled, I might as well
1994 * add an entry for this node. Not necessary for correctness,
1995 * but if negative caching is enabled, then the system
1996 * must care about lookup caching hit rate, so...
1997 */
1998 if (VFSTONFS(vp->v_mount)->nm_negnametimeo != 0 &&
1999 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
2000 cache_enter_time(tdvp, vp, cnp, &nfsva.na_ctime, NULL);
2001 }
2002 if (error && NFS_ISV4(vp))
2003 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0,
2004 (gid_t)0);
2005 return (error);
2006 }
2007
2008 /*
2009 * nfs symbolic link create call
2010 */
2011 static int
2012 nfs_symlink(struct vop_symlink_args *ap)
2013 {
2014 struct vnode *dvp = ap->a_dvp;
2015 struct vattr *vap = ap->a_vap;
2016 struct componentname *cnp = ap->a_cnp;
2017 struct nfsvattr nfsva, dnfsva;
2018 struct nfsfh *nfhp;
2019 struct nfsnode *np = NULL, *dnp;
2020 struct vnode *newvp = NULL;
2021 int error = 0, attrflag, dattrflag, ret;
2022
2023 vap->va_type = VLNK;
2024 error = nfsrpc_symlink(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2025 ap->a_target, vap, cnp->cn_cred, cnp->cn_thread, &dnfsva,
2026 &nfsva, &nfhp, &attrflag, &dattrflag, NULL);
2027 if (nfhp) {
2028 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread,
2029 &np, NULL, LK_EXCLUSIVE);
2030 if (!ret)
2031 newvp = NFSTOV(np);
2032 else if (!error)
2033 error = ret;
2034 }
2035 if (newvp != NULL) {
2036 if (attrflag)
2037 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
2038 0, 1);
2039 } else if (!error) {
2040 /*
2041 * If we do not have an error and we could not extract the
2042 * newvp from the response due to the request being NFSv2, we
2043 * have to do a lookup in order to obtain a newvp to return.
2044 */
2045 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2046 cnp->cn_cred, cnp->cn_thread, &np);
2047 if (!error)
2048 newvp = NFSTOV(np);
2049 }
2050 if (error) {
2051 if (newvp)
2052 vput(newvp);
2053 if (NFS_ISV4(dvp))
2054 error = nfscl_maperr(cnp->cn_thread, error,
2055 vap->va_uid, vap->va_gid);
2056 } else {
2057 *ap->a_vpp = newvp;
2058 }
2059
2060 dnp = VTONFS(dvp);
2061 mtx_lock(&dnp->n_mtx);
2062 dnp->n_flag |= NMODIFIED;
2063 if (dattrflag != 0) {
2064 mtx_unlock(&dnp->n_mtx);
2065 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
2066 } else {
2067 dnp->n_attrstamp = 0;
2068 mtx_unlock(&dnp->n_mtx);
2069 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
2070 }
2071 /*
2072 * If negative lookup caching is enabled, I might as well
2073 * add an entry for this node. Not necessary for correctness,
2074 * but if negative caching is enabled, then the system
2075 * must care about lookup caching hit rate, so...
2076 */
2077 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
2078 (cnp->cn_flags & MAKEENTRY) && attrflag != 0 && error == 0) {
2079 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime, NULL);
2080 }
2081 return (error);
2082 }
2083
2084 /*
2085 * nfs make dir call
2086 */
2087 static int
2088 nfs_mkdir(struct vop_mkdir_args *ap)
2089 {
2090 struct vnode *dvp = ap->a_dvp;
2091 struct vattr *vap = ap->a_vap;
2092 struct componentname *cnp = ap->a_cnp;
2093 struct nfsnode *np = NULL, *dnp;
2094 struct vnode *newvp = NULL;
2095 struct vattr vattr;
2096 struct nfsfh *nfhp;
2097 struct nfsvattr nfsva, dnfsva;
2098 int error = 0, attrflag, dattrflag, ret;
2099
2100 if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred)) != 0)
2101 return (error);
2102 vap->va_type = VDIR;
2103 error = nfsrpc_mkdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2104 vap, cnp->cn_cred, cnp->cn_thread, &dnfsva, &nfsva, &nfhp,
2105 &attrflag, &dattrflag, NULL);
2106 dnp = VTONFS(dvp);
2107 mtx_lock(&dnp->n_mtx);
2108 dnp->n_flag |= NMODIFIED;
2109 if (dattrflag != 0) {
2110 mtx_unlock(&dnp->n_mtx);
2111 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
2112 } else {
2113 dnp->n_attrstamp = 0;
2114 mtx_unlock(&dnp->n_mtx);
2115 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
2116 }
2117 if (nfhp) {
2118 ret = nfscl_nget(dvp->v_mount, dvp, nfhp, cnp, cnp->cn_thread,
2119 &np, NULL, LK_EXCLUSIVE);
2120 if (!ret) {
2121 newvp = NFSTOV(np);
2122 if (attrflag)
2123 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL,
2124 NULL, 0, 1);
2125 } else if (!error)
2126 error = ret;
2127 }
2128 if (!error && newvp == NULL) {
2129 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2130 cnp->cn_cred, cnp->cn_thread, &np);
2131 if (!error) {
2132 newvp = NFSTOV(np);
2133 if (newvp->v_type != VDIR)
2134 error = EEXIST;
2135 }
2136 }
2137 if (error) {
2138 if (newvp)
2139 vput(newvp);
2140 if (NFS_ISV4(dvp))
2141 error = nfscl_maperr(cnp->cn_thread, error,
2142 vap->va_uid, vap->va_gid);
2143 } else {
2144 /*
2145 * If negative lookup caching is enabled, I might as well
2146 * add an entry for this node. Not necessary for correctness,
2147 * but if negative caching is enabled, then the system
2148 * must care about lookup caching hit rate, so...
2149 */
2150 if (VFSTONFS(dvp->v_mount)->nm_negnametimeo != 0 &&
2151 (cnp->cn_flags & MAKEENTRY) &&
2152 attrflag != 0 && dattrflag != 0)
2153 cache_enter_time(dvp, newvp, cnp, &nfsva.na_ctime,
2154 &dnfsva.na_ctime);
2155 *ap->a_vpp = newvp;
2156 }
2157 return (error);
2158 }
2159
2160 /*
2161 * nfs remove directory call
2162 */
2163 static int
2164 nfs_rmdir(struct vop_rmdir_args *ap)
2165 {
2166 struct vnode *vp = ap->a_vp;
2167 struct vnode *dvp = ap->a_dvp;
2168 struct componentname *cnp = ap->a_cnp;
2169 struct nfsnode *dnp;
2170 struct nfsvattr dnfsva;
2171 int error, dattrflag;
2172
2173 if (dvp == vp)
2174 return (EINVAL);
2175 error = nfsrpc_rmdir(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2176 cnp->cn_cred, cnp->cn_thread, &dnfsva, &dattrflag, NULL);
2177 dnp = VTONFS(dvp);
2178 mtx_lock(&dnp->n_mtx);
2179 dnp->n_flag |= NMODIFIED;
2180 if (dattrflag != 0) {
2181 mtx_unlock(&dnp->n_mtx);
2182 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
2183 } else {
2184 dnp->n_attrstamp = 0;
2185 mtx_unlock(&dnp->n_mtx);
2186 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(dvp);
2187 }
2188
2189 cache_purge(dvp);
2190 cache_purge(vp);
2191 if (error && NFS_ISV4(dvp))
2192 error = nfscl_maperr(cnp->cn_thread, error, (uid_t)0,
2193 (gid_t)0);
2194 /*
2195 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2196 */
2197 if (error == ENOENT)
2198 error = 0;
2199 return (error);
2200 }
2201
2202 /*
2203 * nfs readdir call
2204 */
2205 static int
2206 nfs_readdir(struct vop_readdir_args *ap)
2207 {
2208 struct vnode *vp = ap->a_vp;
2209 struct nfsnode *np = VTONFS(vp);
2210 struct uio *uio = ap->a_uio;
2211 ssize_t tresid, left;
2212 int error = 0;
2213 struct vattr vattr;
2214
2215 if (ap->a_eofflag != NULL)
2216 *ap->a_eofflag = 0;
2217 if (vp->v_type != VDIR)
2218 return(EPERM);
2219
2220 /*
2221 * First, check for hit on the EOF offset cache
2222 */
2223 if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset &&
2224 (np->n_flag & NMODIFIED) == 0) {
2225 if (VOP_GETATTR(vp, &vattr, ap->a_cred) == 0) {
2226 mtx_lock(&np->n_mtx);
2227 if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) ||
2228 !NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
2229 mtx_unlock(&np->n_mtx);
2230 NFSINCRGLOBAL(newnfsstats.direofcache_hits);
2231 if (ap->a_eofflag != NULL)
2232 *ap->a_eofflag = 1;
2233 return (0);
2234 } else
2235 mtx_unlock(&np->n_mtx);
2236 }
2237 }
2238
2239 /*
2240 * NFS always guarantees that directory entries don't straddle
2241 * DIRBLKSIZ boundaries. As such, we need to limit the size
2242 * to an exact multiple of DIRBLKSIZ, to avoid copying a partial
2243 * directory entry.
2244 */
2245 left = uio->uio_resid % DIRBLKSIZ;
2246 if (left == uio->uio_resid)
2247 return (EINVAL);
2248 uio->uio_resid -= left;
2249
2250 /*
2251 * Call ncl_bioread() to do the real work.
2252 */
2253 tresid = uio->uio_resid;
2254 error = ncl_bioread(vp, uio, 0, ap->a_cred);
2255
2256 if (!error && uio->uio_resid == tresid) {
2257 NFSINCRGLOBAL(newnfsstats.direofcache_misses);
2258 if (ap->a_eofflag != NULL)
2259 *ap->a_eofflag = 1;
2260 }
2261
2262 /* Add the partial DIRBLKSIZ (left) back in. */
2263 uio->uio_resid += left;
2264 return (error);
2265 }
2266
2267 /*
2268 * Readdir rpc call.
2269 * Called from below the buffer cache by ncl_doio().
2270 */
2271 int
2272 ncl_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
2273 struct thread *td)
2274 {
2275 struct nfsvattr nfsva;
2276 nfsuint64 *cookiep, cookie;
2277 struct nfsnode *dnp = VTONFS(vp);
2278 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2279 int error = 0, eof, attrflag;
2280
2281 KASSERT(uiop->uio_iovcnt == 1 &&
2282 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
2283 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
2284 ("nfs readdirrpc bad uio"));
2285
2286 /*
2287 * If there is no cookie, assume directory was stale.
2288 */
2289 ncl_dircookie_lock(dnp);
2290 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
2291 if (cookiep) {
2292 cookie = *cookiep;
2293 ncl_dircookie_unlock(dnp);
2294 } else {
2295 ncl_dircookie_unlock(dnp);
2296 return (NFSERR_BAD_COOKIE);
2297 }
2298
2299 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
2300 (void)ncl_fsinfo(nmp, vp, cred, td);
2301
2302 error = nfsrpc_readdir(vp, uiop, &cookie, cred, td, &nfsva,
2303 &attrflag, &eof, NULL);
2304 if (attrflag)
2305 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
2306
2307 if (!error) {
2308 /*
2309 * We are now either at the end of the directory or have filled
2310 * the block.
2311 */
2312 if (eof)
2313 dnp->n_direofoffset = uiop->uio_offset;
2314 else {
2315 if (uiop->uio_resid > 0)
2316 printf("EEK! readdirrpc resid > 0\n");
2317 ncl_dircookie_lock(dnp);
2318 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
2319 *cookiep = cookie;
2320 ncl_dircookie_unlock(dnp);
2321 }
2322 } else if (NFS_ISV4(vp)) {
2323 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
2324 }
2325 return (error);
2326 }
2327
2328 /*
2329 * NFS V3 readdir plus RPC. Used in place of ncl_readdirrpc().
2330 */
2331 int
2332 ncl_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred,
2333 struct thread *td)
2334 {
2335 struct nfsvattr nfsva;
2336 nfsuint64 *cookiep, cookie;
2337 struct nfsnode *dnp = VTONFS(vp);
2338 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2339 int error = 0, attrflag, eof;
2340
2341 KASSERT(uiop->uio_iovcnt == 1 &&
2342 (uiop->uio_offset & (DIRBLKSIZ - 1)) == 0 &&
2343 (uiop->uio_resid & (DIRBLKSIZ - 1)) == 0,
2344 ("nfs readdirplusrpc bad uio"));
2345
2346 /*
2347 * If there is no cookie, assume directory was stale.
2348 */
2349 ncl_dircookie_lock(dnp);
2350 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 0);
2351 if (cookiep) {
2352 cookie = *cookiep;
2353 ncl_dircookie_unlock(dnp);
2354 } else {
2355 ncl_dircookie_unlock(dnp);
2356 return (NFSERR_BAD_COOKIE);
2357 }
2358
2359 if (NFSHASNFSV3(nmp) && !NFSHASGOTFSINFO(nmp))
2360 (void)ncl_fsinfo(nmp, vp, cred, td);
2361 error = nfsrpc_readdirplus(vp, uiop, &cookie, cred, td, &nfsva,
2362 &attrflag, &eof, NULL);
2363 if (attrflag)
2364 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0, 1);
2365
2366 if (!error) {
2367 /*
2368 * We are now either at end of the directory or have filled the
2369 * the block.
2370 */
2371 if (eof)
2372 dnp->n_direofoffset = uiop->uio_offset;
2373 else {
2374 if (uiop->uio_resid > 0)
2375 printf("EEK! readdirplusrpc resid > 0\n");
2376 ncl_dircookie_lock(dnp);
2377 cookiep = ncl_getcookie(dnp, uiop->uio_offset, 1);
2378 *cookiep = cookie;
2379 ncl_dircookie_unlock(dnp);
2380 }
2381 } else if (NFS_ISV4(vp)) {
2382 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
2383 }
2384 return (error);
2385 }
2386
2387 /*
2388 * Silly rename. To make the NFS filesystem that is stateless look a little
2389 * more like the "ufs" a remove of an active vnode is translated to a rename
2390 * to a funny looking filename that is removed by nfs_inactive on the
2391 * nfsnode. There is the potential for another process on a different client
2392 * to create the same funny name between the nfs_lookitup() fails and the
2393 * nfs_rename() completes, but...
2394 */
2395 static int
2396 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2397 {
2398 struct sillyrename *sp;
2399 struct nfsnode *np;
2400 int error;
2401 short pid;
2402 unsigned int lticks;
2403
2404 cache_purge(dvp);
2405 np = VTONFS(vp);
2406 KASSERT(vp->v_type != VDIR, ("nfs: sillyrename dir"));
2407 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2408 M_NEWNFSREQ, M_WAITOK);
2409 sp->s_cred = crhold(cnp->cn_cred);
2410 sp->s_dvp = dvp;
2411 VREF(dvp);
2412
2413 /*
2414 * Fudge together a funny name.
2415 * Changing the format of the funny name to accommodate more
2416 * sillynames per directory.
2417 * The name is now changed to .nfs.<ticks>.<pid>.4, where ticks is
2418 * CPU ticks since boot.
2419 */
2420 pid = cnp->cn_thread->td_proc->p_pid;
2421 lticks = (unsigned int)ticks;
2422 for ( ; ; ) {
2423 sp->s_namlen = sprintf(sp->s_name,
2424 ".nfs.%08x.%04x4.4", lticks,
2425 pid);
2426 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2427 cnp->cn_thread, NULL))
2428 break;
2429 lticks++;
2430 }
2431 error = nfs_renameit(dvp, vp, cnp, sp);
2432 if (error)
2433 goto bad;
2434 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2435 cnp->cn_thread, &np);
2436 np->n_sillyrename = sp;
2437 return (0);
2438 bad:
2439 vrele(sp->s_dvp);
2440 crfree(sp->s_cred);
2441 free((caddr_t)sp, M_NEWNFSREQ);
2442 return (error);
2443 }
2444
2445 /*
2446 * Look up a file name and optionally either update the file handle or
2447 * allocate an nfsnode, depending on the value of npp.
2448 * npp == NULL --> just do the lookup
2449 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2450 * handled too
2451 * *npp != NULL --> update the file handle in the vnode
2452 */
2453 static int
2454 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred,
2455 struct thread *td, struct nfsnode **npp)
2456 {
2457 struct vnode *newvp = NULL, *vp;
2458 struct nfsnode *np, *dnp = VTONFS(dvp);
2459 struct nfsfh *nfhp, *onfhp;
2460 struct nfsvattr nfsva, dnfsva;
2461 struct componentname cn;
2462 int error = 0, attrflag, dattrflag;
2463 u_int hash;
2464
2465 error = nfsrpc_lookup(dvp, name, len, cred, td, &dnfsva, &nfsva,
2466 &nfhp, &attrflag, &dattrflag, NULL);
2467 if (dattrflag)
2468 (void) nfscl_loadattrcache(&dvp, &dnfsva, NULL, NULL, 0, 1);
2469 if (npp && !error) {
2470 if (*npp != NULL) {
2471 np = *npp;
2472 vp = NFSTOV(np);
2473 /*
2474 * For NFSv4, check to see if it is the same name and
2475 * replace the name, if it is different.
2476 */
2477 if (np->n_v4 != NULL && nfsva.na_type == VREG &&
2478 (np->n_v4->n4_namelen != len ||
2479 NFSBCMP(name, NFS4NODENAME(np->n_v4), len) ||
2480 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
2481 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
2482 dnp->n_fhp->nfh_len))) {
2483 #ifdef notdef
2484 { char nnn[100]; int nnnl;
2485 nnnl = (len < 100) ? len : 99;
2486 bcopy(name, nnn, nnnl);
2487 nnn[nnnl] = '\0';
2488 printf("replace=%s\n",nnn);
2489 }
2490 #endif
2491 FREE((caddr_t)np->n_v4, M_NFSV4NODE);
2492 MALLOC(np->n_v4, struct nfsv4node *,
2493 sizeof (struct nfsv4node) +
2494 dnp->n_fhp->nfh_len + len - 1,
2495 M_NFSV4NODE, M_WAITOK);
2496 np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
2497 np->n_v4->n4_namelen = len;
2498 NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
2499 dnp->n_fhp->nfh_len);
2500 NFSBCOPY(name, NFS4NODENAME(np->n_v4), len);
2501 }
2502 hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len,
2503 FNV1_32_INIT);
2504 onfhp = np->n_fhp;
2505 /*
2506 * Rehash node for new file handle.
2507 */
2508 vfs_hash_rehash(vp, hash);
2509 np->n_fhp = nfhp;
2510 if (onfhp != NULL)
2511 FREE((caddr_t)onfhp, M_NFSFH);
2512 newvp = NFSTOV(np);
2513 } else if (NFS_CMPFH(dnp, nfhp->nfh_fh, nfhp->nfh_len)) {
2514 FREE((caddr_t)nfhp, M_NFSFH);
2515 VREF(dvp);
2516 newvp = dvp;
2517 } else {
2518 cn.cn_nameptr = name;
2519 cn.cn_namelen = len;
2520 error = nfscl_nget(dvp->v_mount, dvp, nfhp, &cn, td,
2521 &np, NULL, LK_EXCLUSIVE);
2522 if (error)
2523 return (error);
2524 newvp = NFSTOV(np);
2525 }
2526 if (!attrflag && *npp == NULL) {
2527 if (newvp == dvp)
2528 vrele(newvp);
2529 else
2530 vput(newvp);
2531 return (ENOENT);
2532 }
2533 if (attrflag)
2534 (void) nfscl_loadattrcache(&newvp, &nfsva, NULL, NULL,
2535 0, 1);
2536 }
2537 if (npp && *npp == NULL) {
2538 if (error) {
2539 if (newvp) {
2540 if (newvp == dvp)
2541 vrele(newvp);
2542 else
2543 vput(newvp);
2544 }
2545 } else
2546 *npp = np;
2547 }
2548 if (error && NFS_ISV4(dvp))
2549 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
2550 return (error);
2551 }
2552
2553 /*
2554 * Nfs Version 3 and 4 commit rpc
2555 */
2556 int
2557 ncl_commit(struct vnode *vp, u_quad_t offset, int cnt, struct ucred *cred,
2558 struct thread *td)
2559 {
2560 struct nfsvattr nfsva;
2561 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2562 int error, attrflag;
2563
2564 mtx_lock(&nmp->nm_mtx);
2565 if ((nmp->nm_state & NFSSTA_HASWRITEVERF) == 0) {
2566 mtx_unlock(&nmp->nm_mtx);
2567 return (0);
2568 }
2569 mtx_unlock(&nmp->nm_mtx);
2570 error = nfsrpc_commit(vp, offset, cnt, cred, td, &nfsva,
2571 &attrflag, NULL);
2572 if (attrflag != 0)
2573 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL,
2574 0, 1);
2575 if (error != 0 && NFS_ISV4(vp))
2576 error = nfscl_maperr(td, error, (uid_t)0, (gid_t)0);
2577 return (error);
2578 }
2579
2580 /*
2581 * Strategy routine.
2582 * For async requests when nfsiod(s) are running, queue the request by
2583 * calling ncl_asyncio(), otherwise just all ncl_doio() to do the
2584 * request.
2585 */
2586 static int
2587 nfs_strategy(struct vop_strategy_args *ap)
2588 {
2589 struct buf *bp = ap->a_bp;
2590 struct ucred *cr;
2591
2592 KASSERT(!(bp->b_flags & B_DONE),
2593 ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp));
2594 BUF_ASSERT_HELD(bp);
2595
2596 if (bp->b_iocmd == BIO_READ)
2597 cr = bp->b_rcred;
2598 else
2599 cr = bp->b_wcred;
2600
2601 /*
2602 * If the op is asynchronous and an i/o daemon is waiting
2603 * queue the request, wake it up and wait for completion
2604 * otherwise just do it ourselves.
2605 */
2606 if ((bp->b_flags & B_ASYNC) == 0 ||
2607 ncl_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread))
2608 (void) ncl_doio(ap->a_vp, bp, cr, curthread, 1);
2609 return (0);
2610 }
2611
2612 /*
2613 * fsync vnode op. Just call ncl_flush() with commit == 1.
2614 */
2615 /* ARGSUSED */
2616 static int
2617 nfs_fsync(struct vop_fsync_args *ap)
2618 {
2619
2620 if (ap->a_vp->v_type != VREG) {
2621 /*
2622 * For NFS, metadata is changed synchronously on the server,
2623 * so there is nothing to flush. Also, ncl_flush() clears
2624 * the NMODIFIED flag and that shouldn't be done here for
2625 * directories.
2626 */
2627 return (0);
2628 }
2629 return (ncl_flush(ap->a_vp, ap->a_waitfor, NULL, ap->a_td, 1, 0));
2630 }
2631
2632 /*
2633 * Flush all the blocks associated with a vnode.
2634 * Walk through the buffer pool and push any dirty pages
2635 * associated with the vnode.
2636 * If the called_from_renewthread argument is TRUE, it has been called
2637 * from the NFSv4 renew thread and, as such, cannot block indefinitely
2638 * waiting for a buffer write to complete.
2639 */
2640 int
2641 ncl_flush(struct vnode *vp, int waitfor, struct ucred *cred, struct thread *td,
2642 int commit, int called_from_renewthread)
2643 {
2644 struct nfsnode *np = VTONFS(vp);
2645 struct buf *bp;
2646 int i;
2647 struct buf *nbp;
2648 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2649 int error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos;
2650 int passone = 1, trycnt = 0;
2651 u_quad_t off, endoff, toff;
2652 struct ucred* wcred = NULL;
2653 struct buf **bvec = NULL;
2654 struct bufobj *bo;
2655 #ifndef NFS_COMMITBVECSIZ
2656 #define NFS_COMMITBVECSIZ 20
2657 #endif
2658 struct buf *bvec_on_stack[NFS_COMMITBVECSIZ];
2659 int bvecsize = 0, bveccount;
2660
2661 if (called_from_renewthread != 0)
2662 slptimeo = hz;
2663 if (nmp->nm_flag & NFSMNT_INT)
2664 slpflag = PCATCH;
2665 if (!commit)
2666 passone = 0;
2667 bo = &vp->v_bufobj;
2668 /*
2669 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the
2670 * server, but has not been committed to stable storage on the server
2671 * yet. On the first pass, the byte range is worked out and the commit
2672 * rpc is done. On the second pass, ncl_writebp() is called to do the
2673 * job.
2674 */
2675 again:
2676 off = (u_quad_t)-1;
2677 endoff = 0;
2678 bvecpos = 0;
2679 if (NFS_ISV34(vp) && commit) {
2680 if (bvec != NULL && bvec != bvec_on_stack)
2681 free(bvec, M_TEMP);
2682 /*
2683 * Count up how many buffers waiting for a commit.
2684 */
2685 bveccount = 0;
2686 BO_LOCK(bo);
2687 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2688 if (!BUF_ISLOCKED(bp) &&
2689 (bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
2690 == (B_DELWRI | B_NEEDCOMMIT))
2691 bveccount++;
2692 }
2693 /*
2694 * Allocate space to remember the list of bufs to commit. It is
2695 * important to use M_NOWAIT here to avoid a race with nfs_write.
2696 * If we can't get memory (for whatever reason), we will end up
2697 * committing the buffers one-by-one in the loop below.
2698 */
2699 if (bveccount > NFS_COMMITBVECSIZ) {
2700 /*
2701 * Release the vnode interlock to avoid a lock
2702 * order reversal.
2703 */
2704 BO_UNLOCK(bo);
2705 bvec = (struct buf **)
2706 malloc(bveccount * sizeof(struct buf *),
2707 M_TEMP, M_NOWAIT);
2708 BO_LOCK(bo);
2709 if (bvec == NULL) {
2710 bvec = bvec_on_stack;
2711 bvecsize = NFS_COMMITBVECSIZ;
2712 } else
2713 bvecsize = bveccount;
2714 } else {
2715 bvec = bvec_on_stack;
2716 bvecsize = NFS_COMMITBVECSIZ;
2717 }
2718 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2719 if (bvecpos >= bvecsize)
2720 break;
2721 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
2722 nbp = TAILQ_NEXT(bp, b_bobufs);
2723 continue;
2724 }
2725 if ((bp->b_flags & (B_DELWRI | B_NEEDCOMMIT)) !=
2726 (B_DELWRI | B_NEEDCOMMIT)) {
2727 BUF_UNLOCK(bp);
2728 nbp = TAILQ_NEXT(bp, b_bobufs);
2729 continue;
2730 }
2731 BO_UNLOCK(bo);
2732 bremfree(bp);
2733 /*
2734 * Work out if all buffers are using the same cred
2735 * so we can deal with them all with one commit.
2736 *
2737 * NOTE: we are not clearing B_DONE here, so we have
2738 * to do it later on in this routine if we intend to
2739 * initiate I/O on the bp.
2740 *
2741 * Note: to avoid loopback deadlocks, we do not
2742 * assign b_runningbufspace.
2743 */
2744 if (wcred == NULL)
2745 wcred = bp->b_wcred;
2746 else if (wcred != bp->b_wcred)
2747 wcred = NOCRED;
2748 vfs_busy_pages(bp, 1);
2749
2750 BO_LOCK(bo);
2751 /*
2752 * bp is protected by being locked, but nbp is not
2753 * and vfs_busy_pages() may sleep. We have to
2754 * recalculate nbp.
2755 */
2756 nbp = TAILQ_NEXT(bp, b_bobufs);
2757
2758 /*
2759 * A list of these buffers is kept so that the
2760 * second loop knows which buffers have actually
2761 * been committed. This is necessary, since there
2762 * may be a race between the commit rpc and new
2763 * uncommitted writes on the file.
2764 */
2765 bvec[bvecpos++] = bp;
2766 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2767 bp->b_dirtyoff;
2768 if (toff < off)
2769 off = toff;
2770 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff);
2771 if (toff > endoff)
2772 endoff = toff;
2773 }
2774 BO_UNLOCK(bo);
2775 }
2776 if (bvecpos > 0) {
2777 /*
2778 * Commit data on the server, as required.
2779 * If all bufs are using the same wcred, then use that with
2780 * one call for all of them, otherwise commit each one
2781 * separately.
2782 */
2783 if (wcred != NOCRED)
2784 retv = ncl_commit(vp, off, (int)(endoff - off),
2785 wcred, td);
2786 else {
2787 retv = 0;
2788 for (i = 0; i < bvecpos; i++) {
2789 off_t off, size;
2790 bp = bvec[i];
2791 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE +
2792 bp->b_dirtyoff;
2793 size = (u_quad_t)(bp->b_dirtyend
2794 - bp->b_dirtyoff);
2795 retv = ncl_commit(vp, off, (int)size,
2796 bp->b_wcred, td);
2797 if (retv) break;
2798 }
2799 }
2800
2801 if (retv == NFSERR_STALEWRITEVERF)
2802 ncl_clearcommit(vp->v_mount);
2803
2804 /*
2805 * Now, either mark the blocks I/O done or mark the
2806 * blocks dirty, depending on whether the commit
2807 * succeeded.
2808 */
2809 for (i = 0; i < bvecpos; i++) {
2810 bp = bvec[i];
2811 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
2812 if (retv) {
2813 /*
2814 * Error, leave B_DELWRI intact
2815 */
2816 vfs_unbusy_pages(bp);
2817 brelse(bp);
2818 } else {
2819 /*
2820 * Success, remove B_DELWRI ( bundirty() ).
2821 *
2822 * b_dirtyoff/b_dirtyend seem to be NFS
2823 * specific. We should probably move that
2824 * into bundirty(). XXX
2825 */
2826 bufobj_wref(bo);
2827 bp->b_flags |= B_ASYNC;
2828 bundirty(bp);
2829 bp->b_flags &= ~B_DONE;
2830 bp->b_ioflags &= ~BIO_ERROR;
2831 bp->b_dirtyoff = bp->b_dirtyend = 0;
2832 bufdone(bp);
2833 }
2834 }
2835 }
2836
2837 /*
2838 * Start/do any write(s) that are required.
2839 */
2840 loop:
2841 BO_LOCK(bo);
2842 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2843 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
2844 if (waitfor != MNT_WAIT || passone)
2845 continue;
2846
2847 error = BUF_TIMELOCK(bp,
2848 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2849 BO_LOCKPTR(bo), "nfsfsync", slpflag, slptimeo);
2850 if (error == 0) {
2851 BUF_UNLOCK(bp);
2852 goto loop;
2853 }
2854 if (error == ENOLCK) {
2855 error = 0;
2856 goto loop;
2857 }
2858 if (called_from_renewthread != 0) {
2859 /*
2860 * Return EIO so the flush will be retried
2861 * later.
2862 */
2863 error = EIO;
2864 goto done;
2865 }
2866 if (newnfs_sigintr(nmp, td)) {
2867 error = EINTR;
2868 goto done;
2869 }
2870 if (slpflag == PCATCH) {
2871 slpflag = 0;
2872 slptimeo = 2 * hz;
2873 }
2874 goto loop;
2875 }
2876 if ((bp->b_flags & B_DELWRI) == 0)
2877 panic("nfs_fsync: not dirty");
2878 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) {
2879 BUF_UNLOCK(bp);
2880 continue;
2881 }
2882 BO_UNLOCK(bo);
2883 bremfree(bp);
2884 if (passone || !commit)
2885 bp->b_flags |= B_ASYNC;
2886 else
2887 bp->b_flags |= B_ASYNC;
2888 bwrite(bp);
2889 if (newnfs_sigintr(nmp, td)) {
2890 error = EINTR;
2891 goto done;
2892 }
2893 goto loop;
2894 }
2895 if (passone) {
2896 passone = 0;
2897 BO_UNLOCK(bo);
2898 goto again;
2899 }
2900 if (waitfor == MNT_WAIT) {
2901 while (bo->bo_numoutput) {
2902 error = bufobj_wwait(bo, slpflag, slptimeo);
2903 if (error) {
2904 BO_UNLOCK(bo);
2905 if (called_from_renewthread != 0) {
2906 /*
2907 * Return EIO so that the flush will be
2908 * retried later.
2909 */
2910 error = EIO;
2911 goto done;
2912 }
2913 error = newnfs_sigintr(nmp, td);
2914 if (error)
2915 goto done;
2916 if (slpflag == PCATCH) {
2917 slpflag = 0;
2918 slptimeo = 2 * hz;
2919 }
2920 BO_LOCK(bo);
2921 }
2922 }
2923 if (bo->bo_dirty.bv_cnt != 0 && commit) {
2924 BO_UNLOCK(bo);
2925 goto loop;
2926 }
2927 /*
2928 * Wait for all the async IO requests to drain
2929 */
2930 BO_UNLOCK(bo);
2931 mtx_lock(&np->n_mtx);
2932 while (np->n_directio_asyncwr > 0) {
2933 np->n_flag |= NFSYNCWAIT;
2934 error = newnfs_msleep(td, &np->n_directio_asyncwr,
2935 &np->n_mtx, slpflag | (PRIBIO + 1),
2936 "nfsfsync", 0);
2937 if (error) {
2938 if (newnfs_sigintr(nmp, td)) {
2939 mtx_unlock(&np->n_mtx);
2940 error = EINTR;
2941 goto done;
2942 }
2943 }
2944 }
2945 mtx_unlock(&np->n_mtx);
2946 } else
2947 BO_UNLOCK(bo);
2948 if (NFSHASPNFS(nmp)) {
2949 nfscl_layoutcommit(vp, td);
2950 /*
2951 * Invalidate the attribute cache, since writes to a DS
2952 * won't update the size attribute.
2953 */
2954 mtx_lock(&np->n_mtx);
2955 np->n_attrstamp = 0;
2956 } else
2957 mtx_lock(&np->n_mtx);
2958 if (np->n_flag & NWRITEERR) {
2959 error = np->n_error;
2960 np->n_flag &= ~NWRITEERR;
2961 }
2962 if (commit && bo->bo_dirty.bv_cnt == 0 &&
2963 bo->bo_numoutput == 0 && np->n_directio_asyncwr == 0)
2964 np->n_flag &= ~NMODIFIED;
2965 mtx_unlock(&np->n_mtx);
2966 done:
2967 if (bvec != NULL && bvec != bvec_on_stack)
2968 free(bvec, M_TEMP);
2969 if (error == 0 && commit != 0 && waitfor == MNT_WAIT &&
2970 (bo->bo_dirty.bv_cnt != 0 || bo->bo_numoutput != 0 ||
2971 np->n_directio_asyncwr != 0) && trycnt++ < 5) {
2972 /* try, try again... */
2973 passone = 1;
2974 wcred = NULL;
2975 bvec = NULL;
2976 bvecsize = 0;
2977 printf("try%d\n", trycnt);
2978 goto again;
2979 }
2980 return (error);
2981 }
2982
2983 /*
2984 * NFS advisory byte-level locks.
2985 */
2986 static int
2987 nfs_advlock(struct vop_advlock_args *ap)
2988 {
2989 struct vnode *vp = ap->a_vp;
2990 struct ucred *cred;
2991 struct nfsnode *np = VTONFS(ap->a_vp);
2992 struct proc *p = (struct proc *)ap->a_id;
2993 struct thread *td = curthread; /* XXX */
2994 struct vattr va;
2995 int ret, error = EOPNOTSUPP;
2996 u_quad_t size;
2997
2998 if (NFS_ISV4(vp) && (ap->a_flags & (F_POSIX | F_FLOCK)) != 0) {
2999 if (vp->v_type != VREG)
3000 return (EINVAL);
3001 if ((ap->a_flags & F_POSIX) != 0)
3002 cred = p->p_ucred;
3003 else
3004 cred = td->td_ucred;
3005 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
3006 if (vp->v_iflag & VI_DOOMED) {
3007 NFSVOPUNLOCK(vp, 0);
3008 return (EBADF);
3009 }
3010
3011 /*
3012 * If this is unlocking a write locked region, flush and
3013 * commit them before unlocking. This is required by
3014 * RFC3530 Sec. 9.3.2.
3015 */
3016 if (ap->a_op == F_UNLCK &&
3017 nfscl_checkwritelocked(vp, ap->a_fl, cred, td, ap->a_id,
3018 ap->a_flags))
3019 (void) ncl_flush(vp, MNT_WAIT, cred, td, 1, 0);
3020
3021 /*
3022 * Loop around doing the lock op, while a blocking lock
3023 * must wait for the lock op to succeed.
3024 */
3025 do {
3026 ret = nfsrpc_advlock(vp, np->n_size, ap->a_op,
3027 ap->a_fl, 0, cred, td, ap->a_id, ap->a_flags);
3028 if (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
3029 ap->a_op == F_SETLK) {
3030 NFSVOPUNLOCK(vp, 0);
3031 error = nfs_catnap(PZERO | PCATCH, ret,
3032 "ncladvl");
3033 if (error)
3034 return (EINTR);
3035 NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY);
3036 if (vp->v_iflag & VI_DOOMED) {
3037 NFSVOPUNLOCK(vp, 0);
3038 return (EBADF);
3039 }
3040 }
3041 } while (ret == NFSERR_DENIED && (ap->a_flags & F_WAIT) &&
3042 ap->a_op == F_SETLK);
3043 if (ret == NFSERR_DENIED) {
3044 NFSVOPUNLOCK(vp, 0);
3045 return (EAGAIN);
3046 } else if (ret == EINVAL || ret == EBADF || ret == EINTR) {
3047 NFSVOPUNLOCK(vp, 0);
3048 return (ret);
3049 } else if (ret != 0) {
3050 NFSVOPUNLOCK(vp, 0);
3051 return (EACCES);
3052 }
3053
3054 /*
3055 * Now, if we just got a lock, invalidate data in the buffer
3056 * cache, as required, so that the coherency conforms with
3057 * RFC3530 Sec. 9.3.2.
3058 */
3059 if (ap->a_op == F_SETLK) {
3060 if ((np->n_flag & NMODIFIED) == 0) {
3061 np->n_attrstamp = 0;
3062 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
3063 ret = VOP_GETATTR(vp, &va, cred);
3064 }
3065 if ((np->n_flag & NMODIFIED) || ret ||
3066 np->n_change != va.va_filerev) {
3067 (void) ncl_vinvalbuf(vp, V_SAVE, td, 1);
3068 np->n_attrstamp = 0;
3069 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
3070 ret = VOP_GETATTR(vp, &va, cred);
3071 if (!ret) {
3072 np->n_mtime = va.va_mtime;
3073 np->n_change = va.va_filerev;
3074 }
3075 }
3076 /* Mark that a file lock has been acquired. */
3077 mtx_lock(&np->n_mtx);
3078 np->n_flag |= NHASBEENLOCKED;
3079 mtx_unlock(&np->n_mtx);
3080 }
3081 NFSVOPUNLOCK(vp, 0);
3082 return (0);
3083 } else if (!NFS_ISV4(vp)) {
3084 error = NFSVOPLOCK(vp, LK_SHARED);
3085 if (error)
3086 return (error);
3087 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
3088 size = VTONFS(vp)->n_size;
3089 NFSVOPUNLOCK(vp, 0);
3090 error = lf_advlock(ap, &(vp->v_lockf), size);
3091 } else {
3092 if (nfs_advlock_p != NULL)
3093 error = nfs_advlock_p(ap);
3094 else {
3095 NFSVOPUNLOCK(vp, 0);
3096 error = ENOLCK;
3097 }
3098 }
3099 if (error == 0 && ap->a_op == F_SETLK) {
3100 error = NFSVOPLOCK(vp, LK_SHARED);
3101 if (error == 0) {
3102 /* Mark that a file lock has been acquired. */
3103 mtx_lock(&np->n_mtx);
3104 np->n_flag |= NHASBEENLOCKED;
3105 mtx_unlock(&np->n_mtx);
3106 NFSVOPUNLOCK(vp, 0);
3107 }
3108 }
3109 }
3110 return (error);
3111 }
3112
3113 /*
3114 * NFS advisory byte-level locks.
3115 */
3116 static int
3117 nfs_advlockasync(struct vop_advlockasync_args *ap)
3118 {
3119 struct vnode *vp = ap->a_vp;
3120 u_quad_t size;
3121 int error;
3122
3123 if (NFS_ISV4(vp))
3124 return (EOPNOTSUPP);
3125 error = NFSVOPLOCK(vp, LK_SHARED);
3126 if (error)
3127 return (error);
3128 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NOLOCKD) != 0) {
3129 size = VTONFS(vp)->n_size;
3130 NFSVOPUNLOCK(vp, 0);
3131 error = lf_advlockasync(ap, &(vp->v_lockf), size);
3132 } else {
3133 NFSVOPUNLOCK(vp, 0);
3134 error = EOPNOTSUPP;
3135 }
3136 return (error);
3137 }
3138
3139 /*
3140 * Print out the contents of an nfsnode.
3141 */
3142 static int
3143 nfs_print(struct vop_print_args *ap)
3144 {
3145 struct vnode *vp = ap->a_vp;
3146 struct nfsnode *np = VTONFS(vp);
3147
3148 printf("\tfileid %ld fsid 0x%x", np->n_vattr.na_fileid,
3149 np->n_vattr.na_fsid);
3150 if (vp->v_type == VFIFO)
3151 fifo_printinfo(vp);
3152 printf("\n");
3153 return (0);
3154 }
3155
3156 /*
3157 * This is the "real" nfs::bwrite(struct buf*).
3158 * We set B_CACHE if this is a VMIO buffer.
3159 */
3160 int
3161 ncl_writebp(struct buf *bp, int force __unused, struct thread *td)
3162 {
3163 int s;
3164 int oldflags = bp->b_flags;
3165 #if 0
3166 int retv = 1;
3167 off_t off;
3168 #endif
3169
3170 BUF_ASSERT_HELD(bp);
3171
3172 if (bp->b_flags & B_INVAL) {
3173 brelse(bp);
3174 return(0);
3175 }
3176
3177 bp->b_flags |= B_CACHE;
3178
3179 /*
3180 * Undirty the bp. We will redirty it later if the I/O fails.
3181 */
3182
3183 s = splbio();
3184 bundirty(bp);
3185 bp->b_flags &= ~B_DONE;
3186 bp->b_ioflags &= ~BIO_ERROR;
3187 bp->b_iocmd = BIO_WRITE;
3188
3189 bufobj_wref(bp->b_bufobj);
3190 curthread->td_ru.ru_oublock++;
3191 splx(s);
3192
3193 /*
3194 * Note: to avoid loopback deadlocks, we do not
3195 * assign b_runningbufspace.
3196 */
3197 vfs_busy_pages(bp, 1);
3198
3199 BUF_KERNPROC(bp);
3200 bp->b_iooffset = dbtob(bp->b_blkno);
3201 bstrategy(bp);
3202
3203 if( (oldflags & B_ASYNC) == 0) {
3204 int rtval = bufwait(bp);
3205
3206 if (oldflags & B_DELWRI) {
3207 s = splbio();
3208 reassignbuf(bp);
3209 splx(s);
3210 }
3211 brelse(bp);
3212 return (rtval);
3213 }
3214
3215 return (0);
3216 }
3217
3218 /*
3219 * nfs special file access vnode op.
3220 * Essentially just get vattr and then imitate iaccess() since the device is
3221 * local to the client.
3222 */
3223 static int
3224 nfsspec_access(struct vop_access_args *ap)
3225 {
3226 struct vattr *vap;
3227 struct ucred *cred = ap->a_cred;
3228 struct vnode *vp = ap->a_vp;
3229 accmode_t accmode = ap->a_accmode;
3230 struct vattr vattr;
3231 int error;
3232
3233 /*
3234 * Disallow write attempts on filesystems mounted read-only;
3235 * unless the file is a socket, fifo, or a block or character
3236 * device resident on the filesystem.
3237 */
3238 if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3239 switch (vp->v_type) {
3240 case VREG:
3241 case VDIR:
3242 case VLNK:
3243 return (EROFS);
3244 default:
3245 break;
3246 }
3247 }
3248 vap = &vattr;
3249 error = VOP_GETATTR(vp, vap, cred);
3250 if (error)
3251 goto out;
3252 error = vaccess(vp->v_type, vap->va_mode, vap->va_uid, vap->va_gid,
3253 accmode, cred, NULL);
3254 out:
3255 return error;
3256 }
3257
3258 /*
3259 * Read wrapper for fifos.
3260 */
3261 static int
3262 nfsfifo_read(struct vop_read_args *ap)
3263 {
3264 struct nfsnode *np = VTONFS(ap->a_vp);
3265 int error;
3266
3267 /*
3268 * Set access flag.
3269 */
3270 mtx_lock(&np->n_mtx);
3271 np->n_flag |= NACC;
3272 vfs_timestamp(&np->n_atim);
3273 mtx_unlock(&np->n_mtx);
3274 error = fifo_specops.vop_read(ap);
3275 return error;
3276 }
3277
3278 /*
3279 * Write wrapper for fifos.
3280 */
3281 static int
3282 nfsfifo_write(struct vop_write_args *ap)
3283 {
3284 struct nfsnode *np = VTONFS(ap->a_vp);
3285
3286 /*
3287 * Set update flag.
3288 */
3289 mtx_lock(&np->n_mtx);
3290 np->n_flag |= NUPD;
3291 vfs_timestamp(&np->n_mtim);
3292 mtx_unlock(&np->n_mtx);
3293 return(fifo_specops.vop_write(ap));
3294 }
3295
3296 /*
3297 * Close wrapper for fifos.
3298 *
3299 * Update the times on the nfsnode then do fifo close.
3300 */
3301 static int
3302 nfsfifo_close(struct vop_close_args *ap)
3303 {
3304 struct vnode *vp = ap->a_vp;
3305 struct nfsnode *np = VTONFS(vp);
3306 struct vattr vattr;
3307 struct timespec ts;
3308
3309 mtx_lock(&np->n_mtx);
3310 if (np->n_flag & (NACC | NUPD)) {
3311 vfs_timestamp(&ts);
3312 if (np->n_flag & NACC)
3313 np->n_atim = ts;
3314 if (np->n_flag & NUPD)
3315 np->n_mtim = ts;
3316 np->n_flag |= NCHG;
3317 if (vrefcnt(vp) == 1 &&
3318 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3319 VATTR_NULL(&vattr);
3320 if (np->n_flag & NACC)
3321 vattr.va_atime = np->n_atim;
3322 if (np->n_flag & NUPD)
3323 vattr.va_mtime = np->n_mtim;
3324 mtx_unlock(&np->n_mtx);
3325 (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3326 goto out;
3327 }
3328 }
3329 mtx_unlock(&np->n_mtx);
3330 out:
3331 return (fifo_specops.vop_close(ap));
3332 }
3333
3334 /*
3335 * Just call ncl_writebp() with the force argument set to 1.
3336 *
3337 * NOTE: B_DONE may or may not be set in a_bp on call.
3338 */
3339 static int
3340 nfs_bwrite(struct buf *bp)
3341 {
3342
3343 return (ncl_writebp(bp, 1, curthread));
3344 }
3345
3346 struct buf_ops buf_ops_newnfs = {
3347 .bop_name = "buf_ops_nfs",
3348 .bop_write = nfs_bwrite,
3349 .bop_strategy = bufstrategy,
3350 .bop_sync = bufsync,
3351 .bop_bdflush = bufbdflush,
3352 };
3353
3354 static int
3355 nfs_getacl(struct vop_getacl_args *ap)
3356 {
3357 int error;
3358
3359 if (ap->a_type != ACL_TYPE_NFS4)
3360 return (EOPNOTSUPP);
3361 error = nfsrpc_getacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp,
3362 NULL);
3363 if (error > NFSERR_STALE) {
3364 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
3365 error = EPERM;
3366 }
3367 return (error);
3368 }
3369
3370 static int
3371 nfs_setacl(struct vop_setacl_args *ap)
3372 {
3373 int error;
3374
3375 if (ap->a_type != ACL_TYPE_NFS4)
3376 return (EOPNOTSUPP);
3377 error = nfsrpc_setacl(ap->a_vp, ap->a_cred, ap->a_td, ap->a_aclp,
3378 NULL);
3379 if (error > NFSERR_STALE) {
3380 (void) nfscl_maperr(ap->a_td, error, (uid_t)0, (gid_t)0);
3381 error = EPERM;
3382 }
3383 return (error);
3384 }
3385
3386 /*
3387 * Return POSIX pathconf information applicable to nfs filesystems.
3388 */
3389 static int
3390 nfs_pathconf(struct vop_pathconf_args *ap)
3391 {
3392 struct nfsv3_pathconf pc;
3393 struct nfsvattr nfsva;
3394 struct vnode *vp = ap->a_vp;
3395 struct thread *td = curthread;
3396 int attrflag, error;
3397
3398 if ((NFS_ISV34(vp) && (ap->a_name == _PC_LINK_MAX ||
3399 ap->a_name == _PC_NAME_MAX || ap->a_name == _PC_CHOWN_RESTRICTED ||
3400 ap->a_name == _PC_NO_TRUNC)) ||
3401 (NFS_ISV4(vp) && ap->a_name == _PC_ACL_NFS4)) {
3402 /*
3403 * Since only the above 4 a_names are returned by the NFSv3
3404 * Pathconf RPC, there is no point in doing it for others.
3405 * For NFSv4, the Pathconf RPC (actually a Getattr Op.) can
3406 * be used for _PC_NFS4_ACL as well.
3407 */
3408 error = nfsrpc_pathconf(vp, &pc, td->td_ucred, td, &nfsva,
3409 &attrflag, NULL);
3410 if (attrflag != 0)
3411 (void) nfscl_loadattrcache(&vp, &nfsva, NULL, NULL, 0,
3412 1);
3413 if (error != 0)
3414 return (error);
3415 } else {
3416 /*
3417 * For NFSv2 (or NFSv3 when not one of the above 4 a_names),
3418 * just fake them.
3419 */
3420 pc.pc_linkmax = LINK_MAX;
3421 pc.pc_namemax = NFS_MAXNAMLEN;
3422 pc.pc_notrunc = 1;
3423 pc.pc_chownrestricted = 1;
3424 pc.pc_caseinsensitive = 0;
3425 pc.pc_casepreserving = 1;
3426 error = 0;
3427 }
3428 switch (ap->a_name) {
3429 case _PC_LINK_MAX:
3430 *ap->a_retval = pc.pc_linkmax;
3431 break;
3432 case _PC_NAME_MAX:
3433 *ap->a_retval = pc.pc_namemax;
3434 break;
3435 case _PC_PATH_MAX:
3436 *ap->a_retval = PATH_MAX;
3437 break;
3438 case _PC_PIPE_BUF:
3439 *ap->a_retval = PIPE_BUF;
3440 break;
3441 case _PC_CHOWN_RESTRICTED:
3442 *ap->a_retval = pc.pc_chownrestricted;
3443 break;
3444 case _PC_NO_TRUNC:
3445 *ap->a_retval = pc.pc_notrunc;
3446 break;
3447 case _PC_ACL_EXTENDED:
3448 *ap->a_retval = 0;
3449 break;
3450 case _PC_ACL_NFS4:
3451 if (NFS_ISV4(vp) && nfsrv_useacl != 0 && attrflag != 0 &&
3452 NFSISSET_ATTRBIT(&nfsva.na_suppattr, NFSATTRBIT_ACL))
3453 *ap->a_retval = 1;
3454 else
3455 *ap->a_retval = 0;
3456 break;
3457 case _PC_ACL_PATH_MAX:
3458 if (NFS_ISV4(vp))
3459 *ap->a_retval = ACL_MAX_ENTRIES;
3460 else
3461 *ap->a_retval = 3;
3462 break;
3463 case _PC_MAC_PRESENT:
3464 *ap->a_retval = 0;
3465 break;
3466 case _PC_ASYNC_IO:
3467 /* _PC_ASYNC_IO should have been handled by upper layers. */
3468 KASSERT(0, ("_PC_ASYNC_IO should not get here"));
3469 error = EINVAL;
3470 break;
3471 case _PC_PRIO_IO:
3472 *ap->a_retval = 0;
3473 break;
3474 case _PC_SYNC_IO:
3475 *ap->a_retval = 0;
3476 break;
3477 case _PC_ALLOC_SIZE_MIN:
3478 *ap->a_retval = vp->v_mount->mnt_stat.f_bsize;
3479 break;
3480 case _PC_FILESIZEBITS:
3481 if (NFS_ISV34(vp))
3482 *ap->a_retval = 64;
3483 else
3484 *ap->a_retval = 32;
3485 break;
3486 case _PC_REC_INCR_XFER_SIZE:
3487 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
3488 break;
3489 case _PC_REC_MAX_XFER_SIZE:
3490 *ap->a_retval = -1; /* means ``unlimited'' */
3491 break;
3492 case _PC_REC_MIN_XFER_SIZE:
3493 *ap->a_retval = vp->v_mount->mnt_stat.f_iosize;
3494 break;
3495 case _PC_REC_XFER_ALIGN:
3496 *ap->a_retval = PAGE_SIZE;
3497 break;
3498 case _PC_SYMLINK_MAX:
3499 *ap->a_retval = NFS_MAXPATHLEN;
3500 break;
3501
3502 default:
3503 error = EINVAL;
3504 break;
3505 }
3506 return (error);
3507 }
3508
Cache object: dcc1fe3e4e9b683c45e8c3e649c4cbe5
|