FreeBSD/Linux Kernel Cross Reference
sys/coda/coda_vnops.c
1 /* $NetBSD: coda_vnops.c,v 1.68.22.1 2010/08/25 04:17:47 snj Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1990 Carnegie-Mellon University
37 * Copyright (c) 1989 Carnegie-Mellon University
38 * All rights reserved. The CMU software License Agreement specifies
39 * the terms and conditions for use and redistribution.
40 */
41
42 /*
43 * This code was written for the Coda file system at Carnegie Mellon
44 * University. Contributers include David Steere, James Kistler, and
45 * M. Satyanarayanan.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.68.22.1 2010/08/25 04:17:47 snj Exp $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/user.h>
64 #include <sys/vnode.h>
65 #include <sys/kauth.h>
66
67 #include <miscfs/genfs/genfs.h>
68
69 #include <coda/coda.h>
70 #include <coda/cnode.h>
71 #include <coda/coda_vnops.h>
72 #include <coda/coda_venus.h>
73 #include <coda/coda_opstats.h>
74 #include <coda/coda_subr.h>
75 #include <coda/coda_namecache.h>
76 #include <coda/coda_pioctl.h>
77
78 /*
79 * These flags select various performance enhancements.
80 */
81 int coda_attr_cache = 1; /* Set to cache attributes in the kernel */
82 int coda_symlink_cache = 1; /* Set to cache symbolic link information */
83 int coda_access_cache = 1; /* Set to handle some access checks directly */
84
85 /* structure to keep track of vfs calls */
86
87 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
88
89 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
90 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
91 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
92 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
93
94 /* What we are delaying for in printf */
95 int coda_printf_delay = 0; /* in microseconds */
96 int coda_vnop_print_entry = 0;
97 static int coda_lockdebug = 0;
98
99 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
100
101 /* Definition of the vnode operation vector */
102
103 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
104 { &vop_default_desc, coda_vop_error },
105 { &vop_lookup_desc, coda_lookup }, /* lookup */
106 { &vop_create_desc, coda_create }, /* create */
107 { &vop_mknod_desc, coda_vop_error }, /* mknod */
108 { &vop_open_desc, coda_open }, /* open */
109 { &vop_close_desc, coda_close }, /* close */
110 { &vop_access_desc, coda_access }, /* access */
111 { &vop_getattr_desc, coda_getattr }, /* getattr */
112 { &vop_setattr_desc, coda_setattr }, /* setattr */
113 { &vop_read_desc, coda_read }, /* read */
114 { &vop_write_desc, coda_write }, /* write */
115 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
116 { &vop_ioctl_desc, coda_ioctl }, /* ioctl */
117 { &vop_mmap_desc, genfs_mmap }, /* mmap */
118 { &vop_fsync_desc, coda_fsync }, /* fsync */
119 { &vop_remove_desc, coda_remove }, /* remove */
120 { &vop_link_desc, coda_link }, /* link */
121 { &vop_rename_desc, coda_rename }, /* rename */
122 { &vop_mkdir_desc, coda_mkdir }, /* mkdir */
123 { &vop_rmdir_desc, coda_rmdir }, /* rmdir */
124 { &vop_symlink_desc, coda_symlink }, /* symlink */
125 { &vop_readdir_desc, coda_readdir }, /* readdir */
126 { &vop_readlink_desc, coda_readlink }, /* readlink */
127 { &vop_abortop_desc, coda_abortop }, /* abortop */
128 { &vop_inactive_desc, coda_inactive }, /* inactive */
129 { &vop_reclaim_desc, coda_reclaim }, /* reclaim */
130 { &vop_lock_desc, coda_lock }, /* lock */
131 { &vop_unlock_desc, coda_unlock }, /* unlock */
132 { &vop_bmap_desc, coda_bmap }, /* bmap */
133 { &vop_strategy_desc, coda_strategy }, /* strategy */
134 { &vop_print_desc, coda_vop_error }, /* print */
135 { &vop_islocked_desc, coda_islocked }, /* islocked */
136 { &vop_pathconf_desc, coda_vop_error }, /* pathconf */
137 { &vop_advlock_desc, coda_vop_nop }, /* advlock */
138 { &vop_bwrite_desc, coda_vop_error }, /* bwrite */
139 { &vop_seek_desc, genfs_seek }, /* seek */
140 { &vop_poll_desc, genfs_poll }, /* poll */
141 { &vop_getpages_desc, coda_getpages }, /* getpages */
142 { &vop_putpages_desc, coda_putpages }, /* putpages */
143 { NULL, NULL }
144 };
145
146 const struct vnodeopv_desc coda_vnodeop_opv_desc =
147 { &coda_vnodeop_p, coda_vnodeop_entries };
148
149 /* Definitions of NetBSD vnodeop interfaces */
150
151 /*
152 * A generic error routine. Return EIO without looking at arguments.
153 */
154 int
155 coda_vop_error(void *anon) {
156 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
157
158 if (codadebug) {
159 myprintf(("coda_vop_error: Vnode operation %s called (error).\n",
160 (*desc)->vdesc_name));
161 }
162
163 return EIO;
164 }
165
166 /* A generic do-nothing. */
167 int
168 coda_vop_nop(void *anon) {
169 struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
170
171 if (codadebug) {
172 myprintf(("Vnode operation %s called, but unsupported\n",
173 (*desc)->vdesc_name));
174 }
175 return (0);
176 }
177
178 int
179 coda_vnodeopstats_init(void)
180 {
181 int i;
182
183 for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
184 coda_vnodeopstats[i].opcode = i;
185 coda_vnodeopstats[i].entries = 0;
186 coda_vnodeopstats[i].sat_intrn = 0;
187 coda_vnodeopstats[i].unsat_intrn = 0;
188 coda_vnodeopstats[i].gen_intrn = 0;
189 }
190
191 return 0;
192 }
193
194 /*
195 * XXX The entire relationship between VOP_OPEN and having a container
196 * file (via venus_open) needs to be reexamined. In particular, it's
197 * valid to open/mmap/close and then reference. Instead of doing
198 * VOP_OPEN when getpages needs a container, we should do the
199 * venus_open part, and record that the vnode has opened the container
200 * for getpages, and do the matching logical close on coda_inactive.
201 * Further, coda_rdwr needs a container file, and sometimes needs to
202 * do the equivalent of open (core dumps).
203 */
204 /*
205 * coda_open calls Venus to return the device and inode of the
206 * container file, and then obtains a vnode for that file. The
207 * container vnode is stored in the coda vnode, and a reference is
208 * added for each open file.
209 */
210 int
211 coda_open(void *v)
212 {
213 /*
214 * NetBSD can pass the O_EXCL flag in mode, even though the check
215 * has already happened. Venus defensively assumes that if open
216 * is passed the EXCL, it must be a bug. We strip the flag here.
217 */
218 /* true args */
219 struct vop_open_args *ap = v;
220 struct vnode *vp = ap->a_vp;
221 struct cnode *cp = VTOC(vp);
222 int flag = ap->a_mode & (~O_EXCL);
223 kauth_cred_t cred = ap->a_cred;
224 /* locals */
225 int error;
226 dev_t dev; /* container file device, inode, vnode */
227 ino_t inode;
228 struct vnode *container_vp;
229
230 MARK_ENTRY(CODA_OPEN_STATS);
231
232 /* Check for open of control file. */
233 if (IS_CTL_VP(vp)) {
234 /* if (WRITABLE(flag)) */
235 if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
236 MARK_INT_FAIL(CODA_OPEN_STATS);
237 return(EACCES);
238 }
239 MARK_INT_SAT(CODA_OPEN_STATS);
240 return(0);
241 }
242
243 error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
244 if (error)
245 return (error);
246 if (!error) {
247 CODADEBUG(CODA_OPEN, myprintf(("open: dev %d inode %llu result %d\n",
248 dev, (unsigned long long)inode, error)); )
249 }
250
251 /*
252 * Obtain locked and referenced container vnode from container
253 * device/inode.
254 */
255 error = coda_grab_vnode(dev, inode, &container_vp);
256 if (error)
257 return (error);
258
259 /* Save the vnode pointer for the container file. */
260 if (cp->c_ovp == NULL) {
261 cp->c_ovp = container_vp;
262 } else {
263 if (cp->c_ovp != container_vp)
264 /*
265 * Perhaps venus returned a different container, or
266 * something else went wrong.
267 */
268 panic("coda_open: cp->c_ovp != container_vp");
269 }
270 cp->c_ocount++;
271
272 /* Flush the attribute cache if writing the file. */
273 if (flag & FWRITE) {
274 cp->c_owrite++;
275 cp->c_flags &= ~C_VATTR;
276 }
277
278 /*
279 * Save the <device, inode> pair for the container file to speed
280 * up subsequent reads while closed (mmap, program execution).
281 * This is perhaps safe because venus will invalidate the node
282 * before changing the container file mapping.
283 */
284 cp->c_device = dev;
285 cp->c_inode = inode;
286
287 /* Open the container file. */
288 error = VOP_OPEN(container_vp, flag, cred);
289 /*
290 * Drop the lock on the container, after we have done VOP_OPEN
291 * (which requires a locked vnode).
292 */
293 VOP_UNLOCK(container_vp, 0);
294 return(error);
295 }
296
297 /*
298 * Close the cache file used for I/O and notify Venus.
299 */
300 int
301 coda_close(void *v)
302 {
303 /* true args */
304 struct vop_close_args *ap = v;
305 struct vnode *vp = ap->a_vp;
306 struct cnode *cp = VTOC(vp);
307 int flag = ap->a_fflag;
308 kauth_cred_t cred = ap->a_cred;
309 /* locals */
310 int error;
311
312 MARK_ENTRY(CODA_CLOSE_STATS);
313
314 /* Check for close of control file. */
315 if (IS_CTL_VP(vp)) {
316 MARK_INT_SAT(CODA_CLOSE_STATS);
317 return(0);
318 }
319
320 /*
321 * XXX The IS_UNMOUNTING part of this is very suspect.
322 */
323 if (IS_UNMOUNTING(cp)) {
324 if (cp->c_ovp) {
325 #ifdef CODA_VERBOSE
326 printf("coda_close: destroying container ref %d, ufs vp %p of vp %p/cp %p\n",
327 vp->v_usecount, cp->c_ovp, vp, cp);
328 #endif
329 #ifdef hmm
330 vgone(cp->c_ovp);
331 #else
332 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
333 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
334 vput(cp->c_ovp);
335 #endif
336 } else {
337 #ifdef CODA_VERBOSE
338 printf("coda_close: NO container vp %p/cp %p\n", vp, cp);
339 #endif
340 }
341 return ENODEV;
342 }
343
344 /* Lock the container node, and VOP_CLOSE it. */
345 vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
346 VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
347 /*
348 * Drop the lock we just obtained, and vrele the container vnode.
349 * Decrement reference counts, and clear container vnode pointer on
350 * last close.
351 */
352 vput(cp->c_ovp);
353 if (flag & FWRITE)
354 --cp->c_owrite;
355 if (--cp->c_ocount == 0)
356 cp->c_ovp = NULL;
357
358 error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
359
360 CODADEBUG(CODA_CLOSE, myprintf(("close: result %d\n",error)); )
361 return(error);
362 }
363
364 int
365 coda_read(void *v)
366 {
367 struct vop_read_args *ap = v;
368
369 ENTRY;
370 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
371 ap->a_ioflag, ap->a_cred, curlwp));
372 }
373
374 int
375 coda_write(void *v)
376 {
377 struct vop_write_args *ap = v;
378
379 ENTRY;
380 return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
381 ap->a_ioflag, ap->a_cred, curlwp));
382 }
383
384 int
385 coda_rdwr(struct vnode *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
386 kauth_cred_t cred, struct lwp *l)
387 {
388 /* upcall decl */
389 /* NOTE: container file operation!!! */
390 /* locals */
391 struct cnode *cp = VTOC(vp);
392 struct vnode *cfvp = cp->c_ovp;
393 struct proc *p = l->l_proc;
394 int opened_internally = 0;
395 int error = 0;
396
397 MARK_ENTRY(CODA_RDWR_STATS);
398
399 CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
400 uiop->uio_iov->iov_base,
401 (unsigned long) uiop->uio_resid,
402 (long long) uiop->uio_offset)); )
403
404 /* Check for rdwr of control object. */
405 if (IS_CTL_VP(vp)) {
406 MARK_INT_FAIL(CODA_RDWR_STATS);
407 return(EINVAL);
408 }
409
410 /* Redirect the request to UFS. */
411
412 /*
413 * If file is not already open this must be a page
414 * {read,write} request. Iget the cache file's inode
415 * pointer if we still have its <device, inode> pair.
416 * Otherwise, we must do an internal open to derive the
417 * pair.
418 * XXX Integrate this into a coherent strategy for container
419 * file acquisition.
420 */
421 if (cfvp == NULL) {
422 /*
423 * If we're dumping core, do the internal open. Otherwise
424 * venus won't have the correct size of the core when
425 * it's completely written.
426 */
427 if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
428 printf("coda_rdwr: grabbing container vnode, losing reference\n");
429 /* Get locked and refed vnode. */
430 error = coda_grab_vnode(cp->c_device, cp->c_inode, &cfvp);
431 if (error) {
432 MARK_INT_FAIL(CODA_RDWR_STATS);
433 return(error);
434 }
435 /*
436 * Drop lock.
437 * XXX Where is reference released.
438 */
439 VOP_UNLOCK(cfvp, 0);
440 }
441 else {
442 printf("coda_rdwr: internal VOP_OPEN\n");
443 opened_internally = 1;
444 MARK_INT_GEN(CODA_OPEN_STATS);
445 error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
446 #ifdef CODA_VERBOSE
447 printf("coda_rdwr: Internally Opening %p\n", vp);
448 #endif
449 if (error) {
450 MARK_INT_FAIL(CODA_RDWR_STATS);
451 return(error);
452 }
453 cfvp = cp->c_ovp;
454 }
455 }
456
457 /* Have UFS handle the call. */
458 CODADEBUG(CODA_RDWR, myprintf(("indirect rdwr: fid = %s, refcnt = %d\n",
459 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount)); )
460
461 if (rw == UIO_READ) {
462 error = VOP_READ(cfvp, uiop, ioflag, cred);
463 } else {
464 error = VOP_WRITE(cfvp, uiop, ioflag, cred);
465 }
466
467 if (error)
468 MARK_INT_FAIL(CODA_RDWR_STATS);
469 else
470 MARK_INT_SAT(CODA_RDWR_STATS);
471
472 /* Do an internal close if necessary. */
473 if (opened_internally) {
474 MARK_INT_GEN(CODA_CLOSE_STATS);
475 (void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
476 }
477
478 /* Invalidate cached attributes if writing. */
479 if (rw == UIO_WRITE)
480 cp->c_flags &= ~C_VATTR;
481 return(error);
482 }
483
484 int
485 coda_ioctl(void *v)
486 {
487 /* true args */
488 struct vop_ioctl_args *ap = v;
489 struct vnode *vp = ap->a_vp;
490 int com = ap->a_command;
491 void *data = ap->a_data;
492 int flag = ap->a_fflag;
493 kauth_cred_t cred = ap->a_cred;
494 /* locals */
495 int error;
496 struct vnode *tvp;
497 struct nameidata ndp;
498 struct PioctlData *iap = (struct PioctlData *)data;
499
500 MARK_ENTRY(CODA_IOCTL_STATS);
501
502 CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
503
504 /* Don't check for operation on a dying object, for ctlvp it
505 shouldn't matter */
506
507 /* Must be control object to succeed. */
508 if (!IS_CTL_VP(vp)) {
509 MARK_INT_FAIL(CODA_IOCTL_STATS);
510 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: vp != ctlvp"));)
511 return (EOPNOTSUPP);
512 }
513 /* Look up the pathname. */
514
515 /* Should we use the name cache here? It would get it from
516 lookupname sooner or later anyway, right? */
517
518 NDINIT(&ndp, LOOKUP, (iap->follow ? FOLLOW : NOFOLLOW), UIO_USERSPACE,
519 iap->path);
520 error = namei(&ndp);
521 tvp = ndp.ni_vp;
522
523 if (error) {
524 MARK_INT_FAIL(CODA_IOCTL_STATS);
525 CODADEBUG(CODA_IOCTL, myprintf(("coda_ioctl error: lookup returns %d\n",
526 error));)
527 return(error);
528 }
529
530 /*
531 * Make sure this is a coda style cnode, but it may be a
532 * different vfsp
533 */
534 /* XXX: this totally violates the comment about vtagtype in vnode.h */
535 if (tvp->v_tag != VT_CODA) {
536 vrele(tvp);
537 MARK_INT_FAIL(CODA_IOCTL_STATS);
538 CODADEBUG(CODA_IOCTL,
539 myprintf(("coda_ioctl error: %s not a coda object\n",
540 iap->path));)
541 return(EINVAL);
542 }
543
544 if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
545 vrele(tvp);
546 return(EINVAL);
547 }
548 error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
549 cred, curlwp);
550
551 if (error)
552 MARK_INT_FAIL(CODA_IOCTL_STATS);
553 else
554 CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
555
556 vrele(tvp);
557 return(error);
558 }
559
560 /*
561 * To reduce the cost of a user-level venus;we cache attributes in
562 * the kernel. Each cnode has storage allocated for an attribute. If
563 * c_vattr is valid, return a reference to it. Otherwise, get the
564 * attributes from venus and store them in the cnode. There is some
565 * question if this method is a security leak. But I think that in
566 * order to make this call, the user must have done a lookup and
567 * opened the file, and therefore should already have access.
568 */
569 int
570 coda_getattr(void *v)
571 {
572 /* true args */
573 struct vop_getattr_args *ap = v;
574 struct vnode *vp = ap->a_vp;
575 struct cnode *cp = VTOC(vp);
576 struct vattr *vap = ap->a_vap;
577 kauth_cred_t cred = ap->a_cred;
578 /* locals */
579 int error;
580
581 MARK_ENTRY(CODA_GETATTR_STATS);
582
583 /* Check for getattr of control object. */
584 if (IS_CTL_VP(vp)) {
585 MARK_INT_FAIL(CODA_GETATTR_STATS);
586 return(ENOENT);
587 }
588
589 /* Check to see if the attributes have already been cached */
590 if (VALID_VATTR(cp)) {
591 CODADEBUG(CODA_GETATTR, { myprintf(("attr cache hit: %s\n",
592 coda_f2s(&cp->c_fid)));});
593 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
594 print_vattr(&cp->c_vattr); );
595
596 *vap = cp->c_vattr;
597 MARK_INT_SAT(CODA_GETATTR_STATS);
598 return(0);
599 }
600
601 error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
602
603 if (!error) {
604 CODADEBUG(CODA_GETATTR, myprintf(("getattr miss %s: result %d\n",
605 coda_f2s(&cp->c_fid), error)); )
606
607 CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
608 print_vattr(vap); );
609
610 /* If not open for write, store attributes in cnode */
611 if ((cp->c_owrite == 0) && (coda_attr_cache)) {
612 cp->c_vattr = *vap;
613 cp->c_flags |= C_VATTR;
614 }
615
616 }
617 return(error);
618 }
619
620 int
621 coda_setattr(void *v)
622 {
623 /* true args */
624 struct vop_setattr_args *ap = v;
625 struct vnode *vp = ap->a_vp;
626 struct cnode *cp = VTOC(vp);
627 struct vattr *vap = ap->a_vap;
628 kauth_cred_t cred = ap->a_cred;
629 /* locals */
630 int error;
631
632 MARK_ENTRY(CODA_SETATTR_STATS);
633
634 /* Check for setattr of control object. */
635 if (IS_CTL_VP(vp)) {
636 MARK_INT_FAIL(CODA_SETATTR_STATS);
637 return(ENOENT);
638 }
639
640 if (codadebug & CODADBGMSK(CODA_SETATTR)) {
641 print_vattr(vap);
642 }
643 error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
644
645 if (!error)
646 cp->c_flags &= ~C_VATTR;
647
648 CODADEBUG(CODA_SETATTR, myprintf(("setattr %d\n", error)); )
649 return(error);
650 }
651
652 int
653 coda_access(void *v)
654 {
655 /* true args */
656 struct vop_access_args *ap = v;
657 struct vnode *vp = ap->a_vp;
658 struct cnode *cp = VTOC(vp);
659 int mode = ap->a_mode;
660 kauth_cred_t cred = ap->a_cred;
661 /* locals */
662 int error;
663
664 MARK_ENTRY(CODA_ACCESS_STATS);
665
666 /* Check for access of control object. Only read access is
667 allowed on it. */
668 if (IS_CTL_VP(vp)) {
669 /* bogus hack - all will be marked as successes */
670 MARK_INT_SAT(CODA_ACCESS_STATS);
671 return(((mode & VREAD) && !(mode & (VWRITE | VEXEC)))
672 ? 0 : EACCES);
673 }
674
675 /*
676 * if the file is a directory, and we are checking exec (eg lookup)
677 * access, and the file is in the namecache, then the user must have
678 * lookup access to it.
679 */
680 if (coda_access_cache) {
681 if ((vp->v_type == VDIR) && (mode & VEXEC)) {
682 if (coda_nc_lookup(cp, ".", 1, cred)) {
683 MARK_INT_SAT(CODA_ACCESS_STATS);
684 return(0); /* it was in the cache */
685 }
686 }
687 }
688
689 error = venus_access(vtomi(vp), &cp->c_fid, mode, cred, curlwp);
690
691 return(error);
692 }
693
694 /*
695 * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
696 * done. If a buffer has been saved in anticipation of a coda_create or
697 * a coda_remove, delete it.
698 */
699 /* ARGSUSED */
700 int
701 coda_abortop(void *v)
702 {
703 /* true args */
704 struct vop_abortop_args /* {
705 struct vnode *a_dvp;
706 struct componentname *a_cnp;
707 } */ *ap = v;
708 /* upcall decl */
709 /* locals */
710
711 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
712 PNBUF_PUT(ap->a_cnp->cn_pnbuf);
713 return (0);
714 }
715
716 int
717 coda_readlink(void *v)
718 {
719 /* true args */
720 struct vop_readlink_args *ap = v;
721 struct vnode *vp = ap->a_vp;
722 struct cnode *cp = VTOC(vp);
723 struct uio *uiop = ap->a_uio;
724 kauth_cred_t cred = ap->a_cred;
725 /* locals */
726 struct lwp *l = curlwp;
727 int error;
728 char *str;
729 int len;
730
731 MARK_ENTRY(CODA_READLINK_STATS);
732
733 /* Check for readlink of control object. */
734 if (IS_CTL_VP(vp)) {
735 MARK_INT_FAIL(CODA_READLINK_STATS);
736 return(ENOENT);
737 }
738
739 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
740 uiop->uio_rw = UIO_READ;
741 error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
742 if (error)
743 MARK_INT_FAIL(CODA_READLINK_STATS);
744 else
745 MARK_INT_SAT(CODA_READLINK_STATS);
746 return(error);
747 }
748
749 error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
750
751 if (!error) {
752 uiop->uio_rw = UIO_READ;
753 error = uiomove(str, len, uiop);
754
755 if (coda_symlink_cache) {
756 cp->c_symlink = str;
757 cp->c_symlen = len;
758 cp->c_flags |= C_SYMLINK;
759 } else
760 CODA_FREE(str, len);
761 }
762
763 CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
764 return(error);
765 }
766
767 int
768 coda_fsync(void *v)
769 {
770 /* true args */
771 struct vop_fsync_args *ap = v;
772 struct vnode *vp = ap->a_vp;
773 struct cnode *cp = VTOC(vp);
774 kauth_cred_t cred = ap->a_cred;
775 /* locals */
776 struct vnode *convp = cp->c_ovp;
777 int error;
778
779 MARK_ENTRY(CODA_FSYNC_STATS);
780
781 /* Check for fsync on an unmounting object */
782 /* The NetBSD kernel, in it's infinite wisdom, can try to fsync
783 * after an unmount has been initiated. This is a Bad Thing,
784 * which we have to avoid. Not a legitimate failure for stats.
785 */
786 if (IS_UNMOUNTING(cp)) {
787 return(ENODEV);
788 }
789
790 /* Check for fsync of control object. */
791 if (IS_CTL_VP(vp)) {
792 MARK_INT_SAT(CODA_FSYNC_STATS);
793 return(0);
794 }
795
796 if (convp)
797 VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
798
799 /*
800 * We can expect fsync on any vnode at all if venus is pruging it.
801 * Venus can't very well answer the fsync request, now can it?
802 * Hopefully, it won't have to, because hopefully, venus preserves
803 * the (possibly untrue) invariant that it never purges an open
804 * vnode. Hopefully.
805 */
806 if (cp->c_flags & C_PURGING) {
807 return(0);
808 }
809
810 error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
811
812 CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); );
813 return(error);
814 }
815
816 /*
817 * vp is locked on entry, and we must unlock it.
818 * XXX This routine is suspect and probably needs rewriting.
819 */
820 int
821 coda_inactive(void *v)
822 {
823 /* true args */
824 struct vop_inactive_args *ap = v;
825 struct vnode *vp = ap->a_vp;
826 struct cnode *cp = VTOC(vp);
827 kauth_cred_t cred __unused = NULL;
828
829 /* We don't need to send inactive to venus - DCS */
830 MARK_ENTRY(CODA_INACTIVE_STATS);
831
832 if (IS_CTL_VP(vp)) {
833 MARK_INT_SAT(CODA_INACTIVE_STATS);
834 return 0;
835 }
836
837 CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
838 coda_f2s(&cp->c_fid), vp->v_mount));)
839
840 /* If an array has been allocated to hold the symlink, deallocate it */
841 if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
842 if (cp->c_symlink == NULL)
843 panic("coda_inactive: null symlink pointer in cnode");
844
845 CODA_FREE(cp->c_symlink, cp->c_symlen);
846 cp->c_flags &= ~C_SYMLINK;
847 cp->c_symlen = 0;
848 }
849
850 /* Remove it from the table so it can't be found. */
851 coda_unsave(cp);
852 if (vp->v_mount->mnt_data == NULL) {
853 myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
854 panic("badness in coda_inactive");
855 }
856
857 if (IS_UNMOUNTING(cp)) {
858 /* XXX Do we need to VOP_CLOSE container vnodes? */
859 if (vp->v_usecount > 0)
860 printf("coda_inactive: IS_UNMOUNTING %p usecount %d\n",
861 vp, vp->v_usecount);
862 if (cp->c_ovp != NULL)
863 printf("coda_inactive: %p ovp != NULL\n", vp);
864 VOP_UNLOCK(vp, 0);
865 } else {
866 /* Sanity checks that perhaps should be panic. */
867 if (vp->v_usecount) {
868 printf("coda_inactive: %p usecount %d\n", vp, vp->v_usecount);
869 }
870 if (cp->c_ovp != NULL) {
871 printf("coda_inactive: %p ovp != NULL\n", vp);
872 }
873 VOP_UNLOCK(vp, 0);
874 *ap->a_recycle = true;
875 }
876
877 MARK_INT_SAT(CODA_INACTIVE_STATS);
878 return(0);
879 }
880
881 /*
882 * Coda does not use the normal namecache, but a private version.
883 * Consider how to use the standard facility instead.
884 */
885 int
886 coda_lookup(void *v)
887 {
888 /* true args */
889 struct vop_lookup_args *ap = v;
890 /* (locked) vnode of dir in which to do lookup */
891 struct vnode *dvp = ap->a_dvp;
892 struct cnode *dcp = VTOC(dvp);
893 /* output variable for result */
894 struct vnode **vpp = ap->a_vpp;
895 /* name to lookup */
896 struct componentname *cnp = ap->a_cnp;
897 kauth_cred_t cred = cnp->cn_cred;
898 struct lwp *l = curlwp;
899 /* locals */
900 struct cnode *cp;
901 const char *nm = cnp->cn_nameptr;
902 int len = cnp->cn_namelen;
903 int flags = cnp->cn_flags;
904 int isdot;
905 CodaFid VFid;
906 int vtype;
907 int error = 0;
908
909 MARK_ENTRY(CODA_LOOKUP_STATS);
910
911 CODADEBUG(CODA_LOOKUP, myprintf(("lookup: %s in %s\n",
912 nm, coda_f2s(&dcp->c_fid))););
913
914 /*
915 * XXX componentname flags in MODMASK are not handled at all
916 */
917
918 /*
919 * The overall strategy is to switch on the lookup type and get a
920 * result vnode that is vref'd but not locked. Then, the code at
921 * exit: switches on ., .., and regular lookups and does the right
922 * locking.
923 */
924
925 /* Check for lookup of control object. */
926 if (IS_CTL_NAME(dvp, nm, len)) {
927 *vpp = coda_ctlvp;
928 vref(*vpp);
929 MARK_INT_SAT(CODA_LOOKUP_STATS);
930 goto exit;
931 }
932
933 /* Avoid trying to hand venus an unreasonably long name. */
934 if (len+1 > CODA_MAXNAMLEN) {
935 MARK_INT_FAIL(CODA_LOOKUP_STATS);
936 CODADEBUG(CODA_LOOKUP, myprintf(("name too long: lookup, %s (%s)\n",
937 coda_f2s(&dcp->c_fid), nm)););
938 *vpp = (struct vnode *)0;
939 error = EINVAL;
940 goto exit;
941 }
942
943 /*
944 * XXX Check for DOT lookups, and short circuit all the caches,
945 * just doing an extra vref. (venus guarantees that lookup of
946 * . returns self.)
947 */
948 isdot = (len == 1 && nm[0] == '.');
949
950 /*
951 * Try to resolve the lookup in the minicache. If that fails, ask
952 * venus to do the lookup. XXX The interaction between vnode
953 * locking and any locking that coda does is not clear.
954 */
955 cp = coda_nc_lookup(dcp, nm, len, cred);
956 if (cp) {
957 *vpp = CTOV(cp);
958 vref(*vpp);
959 CODADEBUG(CODA_LOOKUP,
960 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
961 } else {
962 /* The name wasn't cached, so ask Venus. */
963 error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid, &vtype);
964
965 if (error) {
966 MARK_INT_FAIL(CODA_LOOKUP_STATS);
967 CODADEBUG(CODA_LOOKUP, myprintf(("lookup error on %s (%s)%d\n",
968 coda_f2s(&dcp->c_fid), nm, error));)
969 *vpp = (struct vnode *)0;
970 } else {
971 MARK_INT_SAT(CODA_LOOKUP_STATS);
972 CODADEBUG(CODA_LOOKUP,
973 myprintf(("lookup: %s type %o result %d\n",
974 coda_f2s(&VFid), vtype, error)); )
975
976 cp = make_coda_node(&VFid, dvp->v_mount, vtype);
977 *vpp = CTOV(cp);
978 /* vpp is now vrefed. */
979
980 /*
981 * Unless this vnode is marked CODA_NOCACHE, enter it into
982 * the coda name cache to avoid a future venus round-trip.
983 * XXX Interaction with componentname NOCACHE is unclear.
984 */
985 if (!(vtype & CODA_NOCACHE))
986 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
987 }
988 }
989
990 exit:
991 /*
992 * If we are creating, and this was the last name to be looked up,
993 * and the error was ENOENT, then make the leaf NULL and return
994 * success.
995 * XXX Check against new lookup rules.
996 */
997 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
998 && (cnp->cn_flags & ISLASTCN)
999 && (error == ENOENT))
1000 {
1001 error = EJUSTRETURN;
1002 cnp->cn_flags |= SAVENAME;
1003 *ap->a_vpp = NULL;
1004 }
1005
1006 /*
1007 * If we are removing, and we are at the last element, and we
1008 * found it, then we need to keep the name around so that the
1009 * removal will go ahead as planned.
1010 * XXX Check against new lookup rules.
1011 */
1012 if ((cnp->cn_nameiop == DELETE)
1013 && (cnp->cn_flags & ISLASTCN)
1014 && !error)
1015 {
1016 cnp->cn_flags |= SAVENAME;
1017 }
1018
1019 /*
1020 * If the lookup succeeded, we must generally lock the returned
1021 * vnode. This could be a ., .., or normal lookup. See
1022 * vnodeops(9) for the details.
1023 */
1024 /*
1025 * XXX LK_RETRY is likely incorrect. Handle vn_lock failure
1026 * somehow, and remove LK_RETRY.
1027 */
1028 if (!error || (error == EJUSTRETURN)) {
1029 /* Lookup has a value and it isn't "."? */
1030 if (*ap->a_vpp && (*ap->a_vpp != dvp)) {
1031 if (flags & ISDOTDOT)
1032 /* ..: unlock parent */
1033 VOP_UNLOCK(dvp, 0);
1034 /* all but .: lock child */
1035 vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY);
1036 if (flags & ISDOTDOT)
1037 /* ..: relock parent */
1038 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
1039 }
1040 /* else .: leave dvp locked */
1041 } else {
1042 /* The lookup failed, so return NULL. Leave dvp locked. */
1043 *ap->a_vpp = NULL;
1044 }
1045 return(error);
1046 }
1047
1048 /*ARGSUSED*/
1049 int
1050 coda_create(void *v)
1051 {
1052 /* true args */
1053 struct vop_create_args *ap = v;
1054 struct vnode *dvp = ap->a_dvp;
1055 struct cnode *dcp = VTOC(dvp);
1056 struct vattr *va = ap->a_vap;
1057 int exclusive = 1;
1058 int mode = ap->a_vap->va_mode;
1059 struct vnode **vpp = ap->a_vpp;
1060 struct componentname *cnp = ap->a_cnp;
1061 kauth_cred_t cred = cnp->cn_cred;
1062 struct lwp *l = curlwp;
1063 /* locals */
1064 int error;
1065 struct cnode *cp;
1066 const char *nm = cnp->cn_nameptr;
1067 int len = cnp->cn_namelen;
1068 CodaFid VFid;
1069 struct vattr attr;
1070
1071 MARK_ENTRY(CODA_CREATE_STATS);
1072
1073 /* All creates are exclusive XXX */
1074 /* I'm assuming the 'mode' argument is the file mode bits XXX */
1075
1076 /* Check for create of control object. */
1077 if (IS_CTL_NAME(dvp, nm, len)) {
1078 *vpp = (struct vnode *)0;
1079 MARK_INT_FAIL(CODA_CREATE_STATS);
1080 return(EACCES);
1081 }
1082
1083 error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1084
1085 if (!error) {
1086
1087 /*
1088 * XXX Violation of venus/kernel invariants is a difficult case,
1089 * but venus should not be able to cause a panic.
1090 */
1091 /* If this is an exclusive create, panic if the file already exists. */
1092 /* Venus should have detected the file and reported EEXIST. */
1093
1094 if ((exclusive == 1) &&
1095 (coda_find(&VFid) != NULL))
1096 panic("cnode existed for newly created file!");
1097
1098 cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1099 *vpp = CTOV(cp);
1100
1101 /* XXX vnodeops doesn't say this argument can be changed. */
1102 /* Update va to reflect the new attributes. */
1103 (*va) = attr;
1104
1105 /* Update the attribute cache and mark it as valid */
1106 if (coda_attr_cache) {
1107 VTOC(*vpp)->c_vattr = attr;
1108 VTOC(*vpp)->c_flags |= C_VATTR;
1109 }
1110
1111 /* Invalidate parent's attr cache (modification time has changed). */
1112 VTOC(dvp)->c_flags &= ~C_VATTR;
1113
1114 /* enter the new vnode in the Name Cache */
1115 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1116
1117 CODADEBUG(CODA_CREATE,
1118 myprintf(("create: %s, result %d\n",
1119 coda_f2s(&VFid), error)); )
1120 } else {
1121 *vpp = (struct vnode *)0;
1122 CODADEBUG(CODA_CREATE, myprintf(("create error %d\n", error));)
1123 }
1124
1125 /*
1126 * vnodeops(9) says that we must unlock the parent and lock the child.
1127 * XXX Should we lock the child first?
1128 */
1129 vput(dvp);
1130 if (!error) {
1131 if ((cnp->cn_flags & LOCKLEAF) == 0) {
1132 /* This should not happen; flags are for lookup only. */
1133 printf("coda_create: LOCKLEAF not set!\n");
1134 }
1135
1136 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1137 /* XXX Perhaps avoid this panic. */
1138 panic("coda_create: couldn't lock child");
1139 }
1140 }
1141
1142 /* Per vnodeops(9), free name except on success and SAVESTART. */
1143 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1144 PNBUF_PUT(cnp->cn_pnbuf);
1145 }
1146 return(error);
1147 }
1148
1149 int
1150 coda_remove(void *v)
1151 {
1152 /* true args */
1153 struct vop_remove_args *ap = v;
1154 struct vnode *dvp = ap->a_dvp;
1155 struct cnode *cp = VTOC(dvp);
1156 struct vnode *vp = ap->a_vp;
1157 struct componentname *cnp = ap->a_cnp;
1158 kauth_cred_t cred = cnp->cn_cred;
1159 struct lwp *l = curlwp;
1160 /* locals */
1161 int error;
1162 const char *nm = cnp->cn_nameptr;
1163 int len = cnp->cn_namelen;
1164 struct cnode *tp;
1165
1166 MARK_ENTRY(CODA_REMOVE_STATS);
1167
1168 CODADEBUG(CODA_REMOVE, myprintf(("remove: %s in %s\n",
1169 nm, coda_f2s(&cp->c_fid))););
1170
1171 /* Remove the file's entry from the CODA Name Cache */
1172 /* We're being conservative here, it might be that this person
1173 * doesn't really have sufficient access to delete the file
1174 * but we feel zapping the entry won't really hurt anyone -- dcs
1175 */
1176 /* I'm gonna go out on a limb here. If a file and a hardlink to it
1177 * exist, and one is removed, the link count on the other will be
1178 * off by 1. We could either invalidate the attrs if cached, or
1179 * fix them. I'll try to fix them. DCS 11/8/94
1180 */
1181 tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1182 if (tp) {
1183 if (VALID_VATTR(tp)) { /* If attrs are cached */
1184 if (tp->c_vattr.va_nlink > 1) { /* If it's a hard link */
1185 tp->c_vattr.va_nlink--;
1186 }
1187 }
1188
1189 coda_nc_zapfile(VTOC(dvp), nm, len);
1190 /* No need to flush it if it doesn't exist! */
1191 }
1192 /* Invalidate the parent's attr cache, the modification time has changed */
1193 VTOC(dvp)->c_flags &= ~C_VATTR;
1194
1195 /* Check for remove of control object. */
1196 if (IS_CTL_NAME(dvp, nm, len)) {
1197 MARK_INT_FAIL(CODA_REMOVE_STATS);
1198 return(ENOENT);
1199 }
1200
1201 error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1202
1203 CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1204
1205 /*
1206 * Unlock parent and child (avoiding double if ".").
1207 */
1208 if (dvp == vp) {
1209 vrele(vp);
1210 } else {
1211 vput(vp);
1212 }
1213 vput(dvp);
1214
1215 return(error);
1216 }
1217
1218 /*
1219 * dvp is the directory where the link is to go, and is locked.
1220 * vp is the object to be linked to, and is unlocked.
1221 * At exit, we must unlock dvp, and vput dvp.
1222 */
1223 int
1224 coda_link(void *v)
1225 {
1226 /* true args */
1227 struct vop_link_args *ap = v;
1228 struct vnode *vp = ap->a_vp;
1229 struct cnode *cp = VTOC(vp);
1230 struct vnode *dvp = ap->a_dvp;
1231 struct cnode *dcp = VTOC(dvp);
1232 struct componentname *cnp = ap->a_cnp;
1233 kauth_cred_t cred = cnp->cn_cred;
1234 struct lwp *l = curlwp;
1235 /* locals */
1236 int error;
1237 const char *nm = cnp->cn_nameptr;
1238 int len = cnp->cn_namelen;
1239
1240 MARK_ENTRY(CODA_LINK_STATS);
1241
1242 if (codadebug & CODADBGMSK(CODA_LINK)) {
1243
1244 myprintf(("nb_link: vp fid: %s\n",
1245 coda_f2s(&cp->c_fid)));
1246 myprintf(("nb_link: dvp fid: %s)\n",
1247 coda_f2s(&dcp->c_fid)));
1248
1249 }
1250 if (codadebug & CODADBGMSK(CODA_LINK)) {
1251 myprintf(("link: vp fid: %s\n",
1252 coda_f2s(&cp->c_fid)));
1253 myprintf(("link: dvp fid: %s\n",
1254 coda_f2s(&dcp->c_fid)));
1255
1256 }
1257
1258 /* Check for link to/from control object. */
1259 if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1260 MARK_INT_FAIL(CODA_LINK_STATS);
1261 return(EACCES);
1262 }
1263
1264 /* If linking . to a name, error out earlier. */
1265 if (vp == dvp) {
1266 printf("coda_link vp==dvp\n");
1267 error = EISDIR;
1268 goto exit;
1269 }
1270
1271 /* XXX Why does venus_link need the vnode to be locked?*/
1272 if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1273 printf("coda_link: couldn't lock vnode %p\n", vp);
1274 error = EFAULT; /* XXX better value */
1275 goto exit;
1276 }
1277 error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1278 VOP_UNLOCK(vp, 0);
1279
1280 /* Invalidate parent's attr cache (the modification time has changed). */
1281 VTOC(dvp)->c_flags &= ~C_VATTR;
1282 /* Invalidate child's attr cache (XXX why). */
1283 VTOC(vp)->c_flags &= ~C_VATTR;
1284
1285 CODADEBUG(CODA_LINK, myprintf(("in link result %d\n",error)); )
1286
1287 exit:
1288 vput(dvp);
1289 return(error);
1290 }
1291
1292 int
1293 coda_rename(void *v)
1294 {
1295 /* true args */
1296 struct vop_rename_args *ap = v;
1297 struct vnode *odvp = ap->a_fdvp;
1298 struct cnode *odcp = VTOC(odvp);
1299 struct componentname *fcnp = ap->a_fcnp;
1300 struct vnode *ndvp = ap->a_tdvp;
1301 struct cnode *ndcp = VTOC(ndvp);
1302 struct componentname *tcnp = ap->a_tcnp;
1303 kauth_cred_t cred = fcnp->cn_cred;
1304 struct lwp *l = curlwp;
1305 /* true args */
1306 int error;
1307 const char *fnm = fcnp->cn_nameptr;
1308 int flen = fcnp->cn_namelen;
1309 const char *tnm = tcnp->cn_nameptr;
1310 int tlen = tcnp->cn_namelen;
1311
1312 MARK_ENTRY(CODA_RENAME_STATS);
1313
1314 /* Hmmm. The vnodes are already looked up. Perhaps they are locked?
1315 This could be Bad. XXX */
1316 #ifdef OLD_DIAGNOSTIC
1317 if ((fcnp->cn_cred != tcnp->cn_cred)
1318 || (fcnp->cn_lwp != tcnp->cn_lwp))
1319 {
1320 panic("coda_rename: component names don't agree");
1321 }
1322 #endif
1323
1324 /* Check for rename involving control object. */
1325 if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1326 MARK_INT_FAIL(CODA_RENAME_STATS);
1327 return(EACCES);
1328 }
1329
1330 /* Problem with moving directories -- need to flush entry for .. */
1331 if (odvp != ndvp) {
1332 struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1333 if (ovcp) {
1334 struct vnode *ovp = CTOV(ovcp);
1335 if ((ovp) &&
1336 (ovp->v_type == VDIR)) /* If it's a directory */
1337 coda_nc_zapfile(VTOC(ovp),"..", 2);
1338 }
1339 }
1340
1341 /* Remove the entries for both source and target files */
1342 coda_nc_zapfile(VTOC(odvp), fnm, flen);
1343 coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1344
1345 /* Invalidate the parent's attr cache, the modification time has changed */
1346 VTOC(odvp)->c_flags &= ~C_VATTR;
1347 VTOC(ndvp)->c_flags &= ~C_VATTR;
1348
1349 if (flen+1 > CODA_MAXNAMLEN) {
1350 MARK_INT_FAIL(CODA_RENAME_STATS);
1351 error = EINVAL;
1352 goto exit;
1353 }
1354
1355 if (tlen+1 > CODA_MAXNAMLEN) {
1356 MARK_INT_FAIL(CODA_RENAME_STATS);
1357 error = EINVAL;
1358 goto exit;
1359 }
1360
1361 error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1362
1363 exit:
1364 CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1365 /* XXX - do we need to call cache pureg on the moved vnode? */
1366 cache_purge(ap->a_fvp);
1367
1368 /* It seems to be incumbent on us to drop locks on all four vnodes */
1369 /* From-vnodes are not locked, only ref'd. To-vnodes are locked. */
1370
1371 vrele(ap->a_fvp);
1372 vrele(odvp);
1373
1374 if (ap->a_tvp) {
1375 if (ap->a_tvp == ndvp) {
1376 vrele(ap->a_tvp);
1377 } else {
1378 vput(ap->a_tvp);
1379 }
1380 }
1381
1382 vput(ndvp);
1383 return(error);
1384 }
1385
1386 int
1387 coda_mkdir(void *v)
1388 {
1389 /* true args */
1390 struct vop_mkdir_args *ap = v;
1391 struct vnode *dvp = ap->a_dvp;
1392 struct cnode *dcp = VTOC(dvp);
1393 struct componentname *cnp = ap->a_cnp;
1394 struct vattr *va = ap->a_vap;
1395 struct vnode **vpp = ap->a_vpp;
1396 kauth_cred_t cred = cnp->cn_cred;
1397 struct lwp *l = curlwp;
1398 /* locals */
1399 int error;
1400 const char *nm = cnp->cn_nameptr;
1401 int len = cnp->cn_namelen;
1402 struct cnode *cp;
1403 CodaFid VFid;
1404 struct vattr ova;
1405
1406 MARK_ENTRY(CODA_MKDIR_STATS);
1407
1408 /* Check for mkdir of target object. */
1409 if (IS_CTL_NAME(dvp, nm, len)) {
1410 *vpp = (struct vnode *)0;
1411 MARK_INT_FAIL(CODA_MKDIR_STATS);
1412 return(EACCES);
1413 }
1414
1415 if (len+1 > CODA_MAXNAMLEN) {
1416 *vpp = (struct vnode *)0;
1417 MARK_INT_FAIL(CODA_MKDIR_STATS);
1418 return(EACCES);
1419 }
1420
1421 error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1422
1423 if (!error) {
1424 if (coda_find(&VFid) != NULL)
1425 panic("cnode existed for newly created directory!");
1426
1427
1428 cp = make_coda_node(&VFid, dvp->v_mount, va->va_type);
1429 *vpp = CTOV(cp);
1430
1431 /* enter the new vnode in the Name Cache */
1432 coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1433
1434 /* as a side effect, enter "." and ".." for the directory */
1435 coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1436 coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1437
1438 if (coda_attr_cache) {
1439 VTOC(*vpp)->c_vattr = ova; /* update the attr cache */
1440 VTOC(*vpp)->c_flags |= C_VATTR; /* Valid attributes in cnode */
1441 }
1442
1443 /* Invalidate the parent's attr cache, the modification time has changed */
1444 VTOC(dvp)->c_flags &= ~C_VATTR;
1445
1446 CODADEBUG( CODA_MKDIR, myprintf(("mkdir: %s result %d\n",
1447 coda_f2s(&VFid), error)); )
1448 } else {
1449 *vpp = (struct vnode *)0;
1450 CODADEBUG(CODA_MKDIR, myprintf(("mkdir error %d\n",error));)
1451 }
1452
1453 /*
1454 * Currently, all mkdirs explicitly vput their dvp's.
1455 * It also appears that we *must* lock the vpp, since
1456 * lockleaf isn't set, but someone down the road is going
1457 * to try to unlock the new directory.
1458 */
1459 vput(dvp);
1460 if (!error) {
1461 if ((error = vn_lock(*ap->a_vpp, LK_EXCLUSIVE))) {
1462 panic("coda_mkdir: couldn't lock child");
1463 }
1464 }
1465
1466 /* Per vnodeops(9), free name except on success and SAVESTART. */
1467 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1468 PNBUF_PUT(cnp->cn_pnbuf);
1469 }
1470 return(error);
1471 }
1472
1473 int
1474 coda_rmdir(void *v)
1475 {
1476 /* true args */
1477 struct vop_rmdir_args *ap = v;
1478 struct vnode *dvp = ap->a_dvp;
1479 struct cnode *dcp = VTOC(dvp);
1480 struct vnode *vp = ap->a_vp;
1481 struct componentname *cnp = ap->a_cnp;
1482 kauth_cred_t cred = cnp->cn_cred;
1483 struct lwp *l = curlwp;
1484 /* true args */
1485 int error;
1486 const char *nm = cnp->cn_nameptr;
1487 int len = cnp->cn_namelen;
1488 struct cnode *cp;
1489
1490 MARK_ENTRY(CODA_RMDIR_STATS);
1491
1492 /* Check for rmdir of control object. */
1493 if (IS_CTL_NAME(dvp, nm, len)) {
1494 MARK_INT_FAIL(CODA_RMDIR_STATS);
1495 return(ENOENT);
1496 }
1497
1498 /* Can't remove . in self. */
1499 if (dvp == vp) {
1500 printf("coda_rmdir: dvp == vp\n");
1501 error = EINVAL;
1502 goto exit;
1503 }
1504
1505 /*
1506 * The caller may not have adequate permissions, and the venus
1507 * operation may fail, but it doesn't hurt from a correctness
1508 * viewpoint to invalidate cache entries.
1509 * XXX Why isn't this done after the venus_rmdir call?
1510 */
1511 /* Look up child in name cache (by name, from parent). */
1512 cp = coda_nc_lookup(dcp, nm, len, cred);
1513 /* If found, remove all children of the child (., ..). */
1514 if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1515
1516 /* Remove child's own entry. */
1517 coda_nc_zapfile(dcp, nm, len);
1518
1519 /* Invalidate parent's attr cache (the modification time has changed). */
1520 dcp->c_flags &= ~C_VATTR;
1521
1522 error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1523
1524 CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1525
1526 exit:
1527 /* vput both vnodes */
1528 vput(dvp);
1529 if (dvp == vp) {
1530 vrele(vp);
1531 } else {
1532 vput(vp);
1533 }
1534
1535 return(error);
1536 }
1537
1538 int
1539 coda_symlink(void *v)
1540 {
1541 /* true args */
1542 struct vop_symlink_args *ap = v;
1543 struct vnode *dvp = ap->a_dvp;
1544 struct cnode *dcp = VTOC(dvp);
1545 /* a_vpp is used in place below */
1546 struct componentname *cnp = ap->a_cnp;
1547 struct vattr *tva = ap->a_vap;
1548 char *path = ap->a_target;
1549 kauth_cred_t cred = cnp->cn_cred;
1550 struct lwp *l = curlwp;
1551 /* locals */
1552 int error;
1553 u_long saved_cn_flags;
1554 const char *nm = cnp->cn_nameptr;
1555 int len = cnp->cn_namelen;
1556 int plen = strlen(path);
1557
1558 /*
1559 * Here's the strategy for the moment: perform the symlink, then
1560 * do a lookup to grab the resulting vnode. I know this requires
1561 * two communications with Venus for a new sybolic link, but
1562 * that's the way the ball bounces. I don't yet want to change
1563 * the way the Mach symlink works. When Mach support is
1564 * deprecated, we should change symlink so that the common case
1565 * returns the resultant vnode in a vpp argument.
1566 */
1567
1568 MARK_ENTRY(CODA_SYMLINK_STATS);
1569
1570 /* Check for symlink of control object. */
1571 if (IS_CTL_NAME(dvp, nm, len)) {
1572 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1573 error = EACCES;
1574 goto exit;
1575 }
1576
1577 if (plen+1 > CODA_MAXPATHLEN) {
1578 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1579 error = EINVAL;
1580 goto exit;
1581 }
1582
1583 if (len+1 > CODA_MAXNAMLEN) {
1584 MARK_INT_FAIL(CODA_SYMLINK_STATS);
1585 error = EINVAL;
1586 goto exit;
1587 }
1588
1589 error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1590
1591 /* Invalidate the parent's attr cache (modification time has changed). */
1592 dcp->c_flags &= ~C_VATTR;
1593
1594 if (!error) {
1595 /*
1596 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1597 * these are defined only for VOP_LOOKUP. We desire to reuse
1598 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1599 * stray flags passed to us. Such stray flags can occur because
1600 * sys_symlink makes a namei call and then reuses the
1601 * componentname structure.
1602 */
1603 /*
1604 * XXX Arguably we should create our own componentname structure
1605 * and not reuse the one that was passed in.
1606 */
1607 saved_cn_flags = cnp->cn_flags;
1608 cnp->cn_flags &= ~(MODMASK | OPMASK);
1609 cnp->cn_flags |= LOOKUP;
1610 error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1611 cnp->cn_flags = saved_cn_flags;
1612 /* Either an error occurs, or ap->a_vpp is locked. */
1613 }
1614
1615 exit:
1616 /* unlock and deference parent */
1617 vput(dvp);
1618
1619 /* Per vnodeops(9), free name except on success and SAVESTART. */
1620 if (error || (cnp->cn_flags & SAVESTART) == 0) {
1621 PNBUF_PUT(cnp->cn_pnbuf);
1622 }
1623
1624 CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1625 return(error);
1626 }
1627
1628 /*
1629 * Read directory entries.
1630 */
1631 int
1632 coda_readdir(void *v)
1633 {
1634 /* true args */
1635 struct vop_readdir_args *ap = v;
1636 struct vnode *vp = ap->a_vp;
1637 struct cnode *cp = VTOC(vp);
1638 struct uio *uiop = ap->a_uio;
1639 kauth_cred_t cred = ap->a_cred;
1640 int *eofflag = ap->a_eofflag;
1641 off_t **cookies = ap->a_cookies;
1642 int *ncookies = ap->a_ncookies;
1643 /* upcall decl */
1644 /* locals */
1645 int error = 0;
1646
1647 MARK_ENTRY(CODA_READDIR_STATS);
1648
1649 CODADEBUG(CODA_READDIR, myprintf(("coda_readdir(%p, %lu, %lld)\n", uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid, (long long) uiop->uio_offset)); )
1650
1651 /* Check for readdir of control object. */
1652 if (IS_CTL_VP(vp)) {
1653 MARK_INT_FAIL(CODA_READDIR_STATS);
1654 return(ENOENT);
1655 }
1656
1657 {
1658 /* Redirect the request to UFS. */
1659
1660 /* If directory is not already open do an "internal open" on it. */
1661 int opened_internally = 0;
1662 if (cp->c_ovp == NULL) {
1663 opened_internally = 1;
1664 MARK_INT_GEN(CODA_OPEN_STATS);
1665 error = VOP_OPEN(vp, FREAD, cred);
1666 #ifdef CODA_VERBOSE
1667 printf("coda_readdir: Internally Opening %p\n", vp);
1668 #endif
1669 if (error) return(error);
1670 } else
1671 vp = cp->c_ovp;
1672
1673 /* Have UFS handle the call. */
1674 CODADEBUG(CODA_READDIR, myprintf((
1675 "indirect readdir: fid = %s, refcnt = %d\n",
1676 coda_f2s(&cp->c_fid), vp->v_usecount)); )
1677 error = VOP_READDIR(vp, uiop, cred, eofflag, cookies, ncookies);
1678 if (error)
1679 MARK_INT_FAIL(CODA_READDIR_STATS);
1680 else
1681 MARK_INT_SAT(CODA_READDIR_STATS);
1682
1683 /* Do an "internal close" if necessary. */
1684 if (opened_internally) {
1685 MARK_INT_GEN(CODA_CLOSE_STATS);
1686 (void)VOP_CLOSE(vp, FREAD, cred);
1687 }
1688 }
1689
1690 return(error);
1691 }
1692
1693 /*
1694 * Convert from file system blocks to device blocks
1695 */
1696 int
1697 coda_bmap(void *v)
1698 {
1699 /* XXX on the global proc */
1700 /* true args */
1701 struct vop_bmap_args *ap = v;
1702 struct vnode *vp __unused = ap->a_vp; /* file's vnode */
1703 daddr_t bn __unused = ap->a_bn; /* fs block number */
1704 struct vnode **vpp = ap->a_vpp; /* RETURN vp of device */
1705 daddr_t *bnp __unused = ap->a_bnp; /* RETURN device block number */
1706 struct lwp *l __unused = curlwp;
1707 /* upcall decl */
1708 /* locals */
1709
1710 *vpp = (struct vnode *)0;
1711 myprintf(("coda_bmap called!\n"));
1712 return(EINVAL);
1713 }
1714
1715 /*
1716 * I don't think the following two things are used anywhere, so I've
1717 * commented them out
1718 *
1719 * struct buf *async_bufhead;
1720 * int async_daemon_count;
1721 */
1722 int
1723 coda_strategy(void *v)
1724 {
1725 /* true args */
1726 struct vop_strategy_args *ap = v;
1727 struct buf *bp __unused = ap->a_bp;
1728 struct lwp *l __unused = curlwp;
1729 /* upcall decl */
1730 /* locals */
1731
1732 myprintf(("coda_strategy called! "));
1733 return(EINVAL);
1734 }
1735
1736 int
1737 coda_reclaim(void *v)
1738 {
1739 /* true args */
1740 struct vop_reclaim_args *ap = v;
1741 struct vnode *vp = ap->a_vp;
1742 struct cnode *cp = VTOC(vp);
1743 /* upcall decl */
1744 /* locals */
1745
1746 /*
1747 * Forced unmount/flush will let vnodes with non zero use be destroyed!
1748 */
1749 ENTRY;
1750
1751 if (IS_UNMOUNTING(cp)) {
1752 #ifdef DEBUG
1753 if (VTOC(vp)->c_ovp) {
1754 if (IS_UNMOUNTING(cp))
1755 printf("coda_reclaim: c_ovp not void: vp %p, cp %p\n", vp, cp);
1756 }
1757 #endif
1758 } else {
1759 #ifdef OLD_DIAGNOSTIC
1760 if (vp->v_usecount != 0)
1761 print("coda_reclaim: pushing active %p\n", vp);
1762 if (VTOC(vp)->c_ovp) {
1763 panic("coda_reclaim: c_ovp not void");
1764 }
1765 #endif
1766 }
1767 cache_purge(vp);
1768 coda_free(VTOC(vp));
1769 SET_VTOC(vp) = NULL;
1770 return (0);
1771 }
1772
1773 int
1774 coda_lock(void *v)
1775 {
1776 /* true args */
1777 struct vop_lock_args *ap = v;
1778 struct vnode *vp = ap->a_vp;
1779 struct cnode *cp = VTOC(vp);
1780 int flags = ap->a_flags;
1781 /* upcall decl */
1782 /* locals */
1783
1784 ENTRY;
1785
1786 if (coda_lockdebug) {
1787 myprintf(("Attempting lock on %s\n",
1788 coda_f2s(&cp->c_fid)));
1789 }
1790
1791 if ((flags & LK_INTERLOCK) != 0) {
1792 mutex_exit(&vp->v_interlock);
1793 flags &= ~LK_INTERLOCK;
1794 }
1795
1796 return (vlockmgr(&vp->v_lock, flags));
1797 }
1798
1799 int
1800 coda_unlock(void *v)
1801 {
1802 /* true args */
1803 struct vop_unlock_args *ap = v;
1804 struct vnode *vp = ap->a_vp;
1805 struct cnode *cp = VTOC(vp);
1806 /* upcall decl */
1807 /* locals */
1808
1809 ENTRY;
1810 if (coda_lockdebug) {
1811 myprintf(("Attempting unlock on %s\n",
1812 coda_f2s(&cp->c_fid)));
1813 }
1814
1815 return (vlockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE));
1816 }
1817
1818 int
1819 coda_islocked(void *v)
1820 {
1821 /* true args */
1822 struct vop_islocked_args *ap = v;
1823 ENTRY;
1824
1825 return (vlockstatus(&ap->a_vp->v_lock));
1826 }
1827
1828 /*
1829 * Given a device and inode, obtain a locked vnode. One reference is
1830 * obtained and passed back to the caller.
1831 */
1832 int
1833 coda_grab_vnode(dev_t dev, ino_t ino, struct vnode **vpp)
1834 {
1835 int error;
1836 struct mount *mp;
1837
1838 /* Obtain mount point structure from device. */
1839 if (!(mp = devtomp(dev))) {
1840 myprintf(("coda_grab_vnode: devtomp(%d) returns NULL\n", dev));
1841 return(ENXIO);
1842 }
1843
1844 /*
1845 * Obtain vnode from mount point and inode.
1846 * XXX VFS_VGET does not clearly define locked/referenced state of
1847 * returned vnode.
1848 */
1849 error = VFS_VGET(mp, ino, vpp);
1850 if (error) {
1851 myprintf(("coda_grab_vnode: iget/vget(%d, %llu) returns %p, err %d\n",
1852 dev, (unsigned long long)ino, *vpp, error));
1853 return(ENOENT);
1854 }
1855 return(0);
1856 }
1857
1858 void
1859 print_vattr(struct vattr *attr)
1860 {
1861 const char *typestr;
1862
1863 switch (attr->va_type) {
1864 case VNON:
1865 typestr = "VNON";
1866 break;
1867 case VREG:
1868 typestr = "VREG";
1869 break;
1870 case VDIR:
1871 typestr = "VDIR";
1872 break;
1873 case VBLK:
1874 typestr = "VBLK";
1875 break;
1876 case VCHR:
1877 typestr = "VCHR";
1878 break;
1879 case VLNK:
1880 typestr = "VLNK";
1881 break;
1882 case VSOCK:
1883 typestr = "VSCK";
1884 break;
1885 case VFIFO:
1886 typestr = "VFFO";
1887 break;
1888 case VBAD:
1889 typestr = "VBAD";
1890 break;
1891 default:
1892 typestr = "????";
1893 break;
1894 }
1895
1896
1897 myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1898 typestr, (int)attr->va_mode, (int)attr->va_uid,
1899 (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1900
1901 myprintf((" fileid %d nlink %d size %d blocksize %d bytes %d\n",
1902 (int)attr->va_fileid, (int)attr->va_nlink,
1903 (int)attr->va_size,
1904 (int)attr->va_blocksize,(int)attr->va_bytes));
1905 myprintf((" gen %ld flags %ld vaflags %d\n",
1906 attr->va_gen, attr->va_flags, attr->va_vaflags));
1907 myprintf((" atime sec %d nsec %d\n",
1908 (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1909 myprintf((" mtime sec %d nsec %d\n",
1910 (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1911 myprintf((" ctime sec %d nsec %d\n",
1912 (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1913 }
1914
1915 /* How to print a ucred */
1916 void
1917 print_cred(kauth_cred_t cred)
1918 {
1919
1920 uint16_t ngroups;
1921 int i;
1922
1923 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
1924 kauth_cred_geteuid(cred)));
1925
1926 ngroups = kauth_cred_ngroups(cred);
1927 for (i=0; i < ngroups; i++)
1928 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
1929 myprintf(("\n"));
1930
1931 }
1932
1933 /*
1934 * Return a vnode for the given fid.
1935 * If no cnode exists for this fid create one and put it
1936 * in a table hashed by coda_f2i(). If the cnode for
1937 * this fid is already in the table return it (ref count is
1938 * incremented by coda_find. The cnode will be flushed from the
1939 * table when coda_inactive calls coda_unsave.
1940 */
1941 struct cnode *
1942 make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
1943 {
1944 struct cnode *cp;
1945 int err;
1946
1947 if ((cp = coda_find(fid)) == NULL) {
1948 struct vnode *vp;
1949
1950 cp = coda_alloc();
1951 cp->c_fid = *fid;
1952
1953 err = getnewvnode(VT_CODA, vfsp, coda_vnodeop_p, &vp);
1954 if (err) {
1955 panic("coda: getnewvnode returned error %d", err);
1956 }
1957 vp->v_data = cp;
1958 vp->v_type = type;
1959 cp->c_vnode = vp;
1960 uvm_vnp_setsize(vp, 0);
1961 coda_save(cp);
1962
1963 } else {
1964 vref(CTOV(cp));
1965 }
1966
1967 return cp;
1968 }
1969
1970 /*
1971 * coda_getpages may be called on a vnode which has not been opened,
1972 * e.g. to fault in pages to execute a program. In that case, we must
1973 * open the file to get the container. The vnode may or may not be
1974 * locked, and we must leave it in the same state.
1975 * XXX The protocol requires v_uobj.vmobjlock to be
1976 * held by caller, but this isn't documented in vnodeops(9) or vnode_if.src.
1977 */
1978 int
1979 coda_getpages(void *v)
1980 {
1981 struct vop_getpages_args /* {
1982 struct vnode *a_vp;
1983 voff_t a_offset;
1984 struct vm_page **a_m;
1985 int *a_count;
1986 int a_centeridx;
1987 vm_prot_t a_access_type;
1988 int a_advice;
1989 int a_flags;
1990 } */ *ap = v;
1991 struct vnode *vp = ap->a_vp;
1992 struct cnode *cp = VTOC(vp);
1993 struct lwp *l = curlwp;
1994 kauth_cred_t cred = l->l_cred;
1995 int error, cerror;
1996 int waslocked; /* 1 if vnode lock was held on entry */
1997 int didopen = 0; /* 1 if we opened container file */
1998
1999 /*
2000 * Handle a case that uvm_fault doesn't quite use yet.
2001 * See layer_vnops.c. for inspiration.
2002 */
2003 if (ap->a_flags & PGO_LOCKED) {
2004 return EBUSY;
2005 }
2006
2007 /* Check for control object. */
2008 if (IS_CTL_VP(vp)) {
2009 printf("coda_getpages: control object %p\n", vp);
2010 mutex_exit(&vp->v_uobj.vmobjlock);
2011 return(EINVAL);
2012 }
2013
2014 /*
2015 * XXX It's really not ok to be releasing the lock we get,
2016 * because we could be overlapping with another call to
2017 * getpages and drop a lock they are relying on. We need to
2018 * figure out whether getpages ever is called holding the
2019 * lock, and if we should serialize getpages calls by some
2020 * mechanism.
2021 */
2022 waslocked = VOP_ISLOCKED(vp);
2023
2024 /* Drop the vmobject lock. */
2025 mutex_exit(&vp->v_uobj.vmobjlock);
2026
2027 /* Get container file if not already present. */
2028 if (cp->c_ovp == NULL) {
2029 /*
2030 * VOP_OPEN requires a locked vnode. We must avoid
2031 * locking the vnode if it is already locked, and
2032 * leave it in the same state on exit.
2033 */
2034 if (waslocked == 0) {
2035 cerror = vn_lock(vp, LK_EXCLUSIVE);
2036 if (cerror) {
2037 printf("coda_getpages: can't lock vnode %p\n",
2038 vp);
2039 return cerror;
2040 }
2041 #if 0
2042 printf("coda_getpages: locked vnode %p\n", vp);
2043 #endif
2044 }
2045
2046 /*
2047 * Open file (causes upcall to venus).
2048 * XXX Perhaps we should not fully open the file, but
2049 * simply obtain a container file.
2050 */
2051 /* XXX Is it ok to do this while holding the simplelock? */
2052 cerror = VOP_OPEN(vp, FREAD, cred);
2053
2054 if (cerror) {
2055 printf("coda_getpages: cannot open vnode %p => %d\n",
2056 vp, cerror);
2057 if (waslocked == 0)
2058 VOP_UNLOCK(vp, 0);
2059 return cerror;
2060 }
2061
2062 #if 0
2063 printf("coda_getpages: opened vnode %p\n", vp);
2064 #endif
2065 didopen = 1;
2066 }
2067 KASSERT(cp->c_ovp != NULL);
2068
2069 /* Munge the arg structure to refer to the container vnode. */
2070 ap->a_vp = cp->c_ovp;
2071
2072 /* Get the lock on the container vnode, and call getpages on it. */
2073 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2074 error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2075
2076 /* If we opened the vnode, we must close it. */
2077 if (didopen) {
2078 /*
2079 * VOP_CLOSE requires a locked vnode, but we are still
2080 * holding the lock (or riding a caller's lock).
2081 */
2082 cerror = VOP_CLOSE(vp, FREAD, cred);
2083 if (cerror != 0)
2084 /* XXX How should we handle this? */
2085 printf("coda_getpages: closed vnode %p -> %d\n",
2086 vp, cerror);
2087
2088 /* If we obtained a lock, drop it. */
2089 if (waslocked == 0)
2090 VOP_UNLOCK(vp, 0);
2091 }
2092
2093 return error;
2094 }
2095
2096 /*
2097 * The protocol requires v_uobj.vmobjlock to be held by the caller, as
2098 * documented in vnodeops(9). XXX vnode_if.src doesn't say this.
2099 */
2100 int
2101 coda_putpages(void *v)
2102 {
2103 struct vop_putpages_args /* {
2104 struct vnode *a_vp;
2105 voff_t a_offlo;
2106 voff_t a_offhi;
2107 int a_flags;
2108 } */ *ap = v;
2109 struct vnode *vp = ap->a_vp;
2110 struct cnode *cp = VTOC(vp);
2111 int error;
2112
2113 /* Drop the vmobject lock. */
2114 mutex_exit(&vp->v_uobj.vmobjlock);
2115
2116 /* Check for control object. */
2117 if (IS_CTL_VP(vp)) {
2118 printf("coda_putpages: control object %p\n", vp);
2119 return(EINVAL);
2120 }
2121
2122 /*
2123 * If container object is not present, then there are no pages
2124 * to put; just return without error. This happens all the
2125 * time, apparently during discard of a closed vnode (which
2126 * trivially can't have dirty pages).
2127 */
2128 if (cp->c_ovp == NULL)
2129 return 0;
2130
2131 /* Munge the arg structure to refer to the container vnode. */
2132 ap->a_vp = cp->c_ovp;
2133
2134 /* Get the lock on the container vnode, and call putpages on it. */
2135 mutex_enter(&ap->a_vp->v_uobj.vmobjlock);
2136 error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2137
2138 return error;
2139 }
Cache object: a25acca1af5d72a087a568d11c3bc0c8
|