1 /*
2 * Copyright (c) 2007-2009 Google Inc. and Amit Singh
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Copyright (C) 2005 Csaba Henk.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56 #include <sys/cdefs.h>
57 __FBSDID("$FreeBSD: releng/10.4/sys/fs/fuse/fuse_vnops.c 301164 2016-06-01 20:30:31Z rmacklem $");
58
59 #include <sys/types.h>
60 #include <sys/module.h>
61 #include <sys/systm.h>
62 #include <sys/errno.h>
63 #include <sys/param.h>
64 #include <sys/kernel.h>
65 #include <sys/conf.h>
66 #include <sys/uio.h>
67 #include <sys/malloc.h>
68 #include <sys/queue.h>
69 #include <sys/lock.h>
70 #include <sys/rwlock.h>
71 #include <sys/sx.h>
72 #include <sys/proc.h>
73 #include <sys/mount.h>
74 #include <sys/vnode.h>
75 #include <sys/namei.h>
76 #include <sys/stat.h>
77 #include <sys/unistd.h>
78 #include <sys/filedesc.h>
79 #include <sys/file.h>
80 #include <sys/fcntl.h>
81 #include <sys/dirent.h>
82 #include <sys/bio.h>
83 #include <sys/buf.h>
84 #include <sys/sysctl.h>
85
86 #include <vm/vm.h>
87 #include <vm/vm_extern.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_param.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_pager.h>
94 #include <vm/vnode_pager.h>
95 #include <vm/vm_object.h>
96
97 #include "fuse.h"
98 #include "fuse_file.h"
99 #include "fuse_internal.h"
100 #include "fuse_ipc.h"
101 #include "fuse_node.h"
102 #include "fuse_param.h"
103 #include "fuse_io.h"
104
105 #include <sys/priv.h>
106
107 #define FUSE_DEBUG_MODULE VNOPS
108 #include "fuse_debug.h"
109
110 /* vnode ops */
111 static vop_access_t fuse_vnop_access;
112 static vop_close_t fuse_vnop_close;
113 static vop_create_t fuse_vnop_create;
114 static vop_fsync_t fuse_vnop_fsync;
115 static vop_getattr_t fuse_vnop_getattr;
116 static vop_inactive_t fuse_vnop_inactive;
117 static vop_link_t fuse_vnop_link;
118 static vop_lookup_t fuse_vnop_lookup;
119 static vop_mkdir_t fuse_vnop_mkdir;
120 static vop_mknod_t fuse_vnop_mknod;
121 static vop_open_t fuse_vnop_open;
122 static vop_read_t fuse_vnop_read;
123 static vop_readdir_t fuse_vnop_readdir;
124 static vop_readlink_t fuse_vnop_readlink;
125 static vop_reclaim_t fuse_vnop_reclaim;
126 static vop_remove_t fuse_vnop_remove;
127 static vop_rename_t fuse_vnop_rename;
128 static vop_rmdir_t fuse_vnop_rmdir;
129 static vop_setattr_t fuse_vnop_setattr;
130 static vop_strategy_t fuse_vnop_strategy;
131 static vop_symlink_t fuse_vnop_symlink;
132 static vop_write_t fuse_vnop_write;
133 static vop_getpages_t fuse_vnop_getpages;
134 static vop_putpages_t fuse_vnop_putpages;
135 static vop_print_t fuse_vnop_print;
136
137 struct vop_vector fuse_vnops = {
138 .vop_default = &default_vnodeops,
139 .vop_access = fuse_vnop_access,
140 .vop_close = fuse_vnop_close,
141 .vop_create = fuse_vnop_create,
142 .vop_fsync = fuse_vnop_fsync,
143 .vop_getattr = fuse_vnop_getattr,
144 .vop_inactive = fuse_vnop_inactive,
145 .vop_link = fuse_vnop_link,
146 .vop_lookup = fuse_vnop_lookup,
147 .vop_mkdir = fuse_vnop_mkdir,
148 .vop_mknod = fuse_vnop_mknod,
149 .vop_open = fuse_vnop_open,
150 .vop_pathconf = vop_stdpathconf,
151 .vop_read = fuse_vnop_read,
152 .vop_readdir = fuse_vnop_readdir,
153 .vop_readlink = fuse_vnop_readlink,
154 .vop_reclaim = fuse_vnop_reclaim,
155 .vop_remove = fuse_vnop_remove,
156 .vop_rename = fuse_vnop_rename,
157 .vop_rmdir = fuse_vnop_rmdir,
158 .vop_setattr = fuse_vnop_setattr,
159 .vop_strategy = fuse_vnop_strategy,
160 .vop_symlink = fuse_vnop_symlink,
161 .vop_write = fuse_vnop_write,
162 .vop_getpages = fuse_vnop_getpages,
163 .vop_putpages = fuse_vnop_putpages,
164 .vop_print = fuse_vnop_print,
165 };
166
167 static u_long fuse_lookup_cache_hits = 0;
168
169 SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_hits, CTLFLAG_RD,
170 &fuse_lookup_cache_hits, 0, "");
171
172 static u_long fuse_lookup_cache_misses = 0;
173
174 SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_misses, CTLFLAG_RD,
175 &fuse_lookup_cache_misses, 0, "");
176
177 int fuse_lookup_cache_enable = 1;
178
179 SYSCTL_INT(_vfs_fuse, OID_AUTO, lookup_cache_enable, CTLFLAG_RW,
180 &fuse_lookup_cache_enable, 0, "");
181
182 /*
183 * XXX: This feature is highly experimental and can bring to instabilities,
184 * needs revisiting before to be enabled by default.
185 */
186 static int fuse_reclaim_revoked = 0;
187
188 SYSCTL_INT(_vfs_fuse, OID_AUTO, reclaim_revoked, CTLFLAG_RW,
189 &fuse_reclaim_revoked, 0, "");
190
191 int fuse_pbuf_freecnt = -1;
192
193 #define fuse_vm_page_lock(m) vm_page_lock((m));
194 #define fuse_vm_page_unlock(m) vm_page_unlock((m));
195 #define fuse_vm_page_lock_queues() ((void)0)
196 #define fuse_vm_page_unlock_queues() ((void)0)
197
198 /*
199 struct vnop_access_args {
200 struct vnode *a_vp;
201 #if VOP_ACCESS_TAKES_ACCMODE_T
202 accmode_t a_accmode;
203 #else
204 int a_mode;
205 #endif
206 struct ucred *a_cred;
207 struct thread *a_td;
208 };
209 */
210 static int
211 fuse_vnop_access(struct vop_access_args *ap)
212 {
213 struct vnode *vp = ap->a_vp;
214 int accmode = ap->a_accmode;
215 struct ucred *cred = ap->a_cred;
216
217 struct fuse_access_param facp;
218 struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp));
219
220 int err;
221
222 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
223
224 if (fuse_isdeadfs(vp)) {
225 if (vnode_isvroot(vp)) {
226 return 0;
227 }
228 return ENXIO;
229 }
230 if (!(data->dataflags & FSESS_INITED)) {
231 if (vnode_isvroot(vp)) {
232 if (priv_check_cred(cred, PRIV_VFS_ADMIN, 0) ||
233 (fuse_match_cred(data->daemoncred, cred) == 0)) {
234 return 0;
235 }
236 }
237 return EBADF;
238 }
239 if (vnode_islnk(vp)) {
240 return 0;
241 }
242 bzero(&facp, sizeof(facp));
243
244 err = fuse_internal_access(vp, accmode, &facp, ap->a_td, ap->a_cred);
245 FS_DEBUG2G("err=%d accmode=0x%x\n", err, accmode);
246 return err;
247 }
248
249 /*
250 struct vnop_close_args {
251 struct vnode *a_vp;
252 int a_fflag;
253 struct ucred *a_cred;
254 struct thread *a_td;
255 };
256 */
257 static int
258 fuse_vnop_close(struct vop_close_args *ap)
259 {
260 struct vnode *vp = ap->a_vp;
261 struct ucred *cred = ap->a_cred;
262 int fflag = ap->a_fflag;
263 fufh_type_t fufh_type;
264
265 fuse_trace_printf_vnop();
266
267 if (fuse_isdeadfs(vp)) {
268 return 0;
269 }
270 if (vnode_isdir(vp)) {
271 if (fuse_filehandle_valid(vp, FUFH_RDONLY)) {
272 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred);
273 }
274 return 0;
275 }
276 if (fflag & IO_NDELAY) {
277 return 0;
278 }
279 fufh_type = fuse_filehandle_xlate_from_fflags(fflag);
280
281 if (!fuse_filehandle_valid(vp, fufh_type)) {
282 int i;
283
284 for (i = 0; i < FUFH_MAXTYPE; i++)
285 if (fuse_filehandle_valid(vp, i))
286 break;
287 if (i == FUFH_MAXTYPE)
288 panic("FUSE: fufh type %d found to be invalid in close"
289 " (fflag=0x%x)\n",
290 fufh_type, fflag);
291 }
292 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) {
293 fuse_vnode_savesize(vp, cred);
294 }
295 return 0;
296 }
297
298 /*
299 struct vnop_create_args {
300 struct vnode *a_dvp;
301 struct vnode **a_vpp;
302 struct componentname *a_cnp;
303 struct vattr *a_vap;
304 };
305 */
306 static int
307 fuse_vnop_create(struct vop_create_args *ap)
308 {
309 struct vnode *dvp = ap->a_dvp;
310 struct vnode **vpp = ap->a_vpp;
311 struct componentname *cnp = ap->a_cnp;
312 struct vattr *vap = ap->a_vap;
313 struct thread *td = cnp->cn_thread;
314 struct ucred *cred = cnp->cn_cred;
315
316 struct fuse_open_in *foi;
317 struct fuse_entry_out *feo;
318 struct fuse_dispatcher fdi;
319 struct fuse_dispatcher *fdip = &fdi;
320
321 int err;
322
323 struct mount *mp = vnode_mount(dvp);
324 uint64_t parentnid = VTOFUD(dvp)->nid;
325 mode_t mode = MAKEIMODE(vap->va_type, vap->va_mode);
326 uint64_t x_fh_id;
327 uint32_t x_open_flags;
328
329 fuse_trace_printf_vnop();
330
331 if (fuse_isdeadfs(dvp)) {
332 return ENXIO;
333 }
334 bzero(&fdi, sizeof(fdi));
335
336 /* XXX: Will we ever want devices ? */
337 if ((vap->va_type != VREG)) {
338 printf("fuse_vnop_create: unsupported va_type %d\n",
339 vap->va_type);
340 return (EINVAL);
341 }
342 debug_printf("parent nid = %ju, mode = %x\n", (uintmax_t)parentnid,
343 mode);
344
345 fdisp_init(fdip, sizeof(*foi) + cnp->cn_namelen + 1);
346 if (!fsess_isimpl(mp, FUSE_CREATE)) {
347 debug_printf("eh, daemon doesn't implement create?\n");
348 return (EINVAL);
349 }
350 fdisp_make(fdip, FUSE_CREATE, vnode_mount(dvp), parentnid, td, cred);
351
352 foi = fdip->indata;
353 foi->mode = mode;
354 foi->flags = O_CREAT | O_RDWR;
355
356 memcpy((char *)fdip->indata + sizeof(*foi), cnp->cn_nameptr,
357 cnp->cn_namelen);
358 ((char *)fdip->indata)[sizeof(*foi) + cnp->cn_namelen] = '\0';
359
360 err = fdisp_wait_answ(fdip);
361
362 if (err) {
363 if (err == ENOSYS)
364 fsess_set_notimpl(mp, FUSE_CREATE);
365 debug_printf("create: got err=%d from daemon\n", err);
366 goto out;
367 }
368
369 feo = fdip->answ;
370
371 if ((err = fuse_internal_checkentry(feo, VREG))) {
372 goto out;
373 }
374 err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, VREG);
375 if (err) {
376 struct fuse_release_in *fri;
377 uint64_t nodeid = feo->nodeid;
378 uint64_t fh_id = ((struct fuse_open_out *)(feo + 1))->fh;
379
380 fdisp_init(fdip, sizeof(*fri));
381 fdisp_make(fdip, FUSE_RELEASE, mp, nodeid, td, cred);
382 fri = fdip->indata;
383 fri->fh = fh_id;
384 fri->flags = OFLAGS(mode);
385 fuse_insert_callback(fdip->tick, fuse_internal_forget_callback);
386 fuse_insert_message(fdip->tick);
387 return err;
388 }
389 ASSERT_VOP_ELOCKED(*vpp, "fuse_vnop_create");
390
391 fdip->answ = feo + 1;
392
393 x_fh_id = ((struct fuse_open_out *)(feo + 1))->fh;
394 x_open_flags = ((struct fuse_open_out *)(feo + 1))->open_flags;
395 fuse_filehandle_init(*vpp, FUFH_RDWR, NULL, x_fh_id);
396 fuse_vnode_open(*vpp, x_open_flags, td);
397 cache_purge_negative(dvp);
398
399 out:
400 fdisp_destroy(fdip);
401 return err;
402 }
403
404 /*
405 * Our vnop_fsync roughly corresponds to the FUSE_FSYNC method. The Linux
406 * version of FUSE also has a FUSE_FLUSH method.
407 *
408 * On Linux, fsync() synchronizes a file's complete in-core state with that
409 * on disk. The call is not supposed to return until the system has completed
410 * that action or until an error is detected.
411 *
412 * Linux also has an fdatasync() call that is similar to fsync() but is not
413 * required to update the metadata such as access time and modification time.
414 */
415
416 /*
417 struct vnop_fsync_args {
418 struct vnodeop_desc *a_desc;
419 struct vnode * a_vp;
420 struct ucred * a_cred;
421 int a_waitfor;
422 struct thread * a_td;
423 };
424 */
425 static int
426 fuse_vnop_fsync(struct vop_fsync_args *ap)
427 {
428 struct vnode *vp = ap->a_vp;
429 struct thread *td = ap->a_td;
430
431 struct fuse_filehandle *fufh;
432 struct fuse_vnode_data *fvdat = VTOFUD(vp);
433
434 int type, err = 0;
435
436 fuse_trace_printf_vnop();
437
438 if (fuse_isdeadfs(vp)) {
439 return 0;
440 }
441 if ((err = vop_stdfsync(ap)))
442 return err;
443
444 if (!fsess_isimpl(vnode_mount(vp),
445 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) {
446 goto out;
447 }
448 for (type = 0; type < FUFH_MAXTYPE; type++) {
449 fufh = &(fvdat->fufh[type]);
450 if (FUFH_IS_VALID(fufh)) {
451 fuse_internal_fsync(vp, td, NULL, fufh);
452 }
453 }
454
455 out:
456 return 0;
457 }
458
459 /*
460 struct vnop_getattr_args {
461 struct vnode *a_vp;
462 struct vattr *a_vap;
463 struct ucred *a_cred;
464 struct thread *a_td;
465 };
466 */
467 static int
468 fuse_vnop_getattr(struct vop_getattr_args *ap)
469 {
470 struct vnode *vp = ap->a_vp;
471 struct vattr *vap = ap->a_vap;
472 struct ucred *cred = ap->a_cred;
473 struct thread *td = curthread;
474 struct fuse_vnode_data *fvdat = VTOFUD(vp);
475
476 int err = 0;
477 int dataflags;
478 struct fuse_dispatcher fdi;
479
480 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
481
482 dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags;
483
484 /* Note that we are not bailing out on a dead file system just yet. */
485
486 if (!(dataflags & FSESS_INITED)) {
487 if (!vnode_isvroot(vp)) {
488 fdata_set_dead(fuse_get_mpdata(vnode_mount(vp)));
489 err = ENOTCONN;
490 debug_printf("fuse_getattr b: returning ENOTCONN\n");
491 return err;
492 } else {
493 goto fake;
494 }
495 }
496 fdisp_init(&fdi, 0);
497 if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) {
498 if ((err == ENOTCONN) && vnode_isvroot(vp)) {
499 /* see comment at similar place in fuse_statfs() */
500 fdisp_destroy(&fdi);
501 goto fake;
502 }
503 if (err == ENOENT) {
504 fuse_internal_vnode_disappear(vp);
505 }
506 goto out;
507 }
508 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
509 if (vap != VTOVA(vp)) {
510 memcpy(vap, VTOVA(vp), sizeof(*vap));
511 }
512 if (vap->va_type != vnode_vtype(vp)) {
513 fuse_internal_vnode_disappear(vp);
514 err = ENOENT;
515 goto out;
516 }
517 if ((fvdat->flag & FN_SIZECHANGE) != 0)
518 vap->va_size = fvdat->filesize;
519
520 if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) {
521 /*
522 * This is for those cases when the file size changed without us
523 * knowing, and we want to catch up.
524 */
525 off_t new_filesize = ((struct fuse_attr_out *)
526 fdi.answ)->attr.size;
527
528 if (fvdat->filesize != new_filesize) {
529 fuse_vnode_setsize(vp, cred, new_filesize);
530 }
531 }
532 debug_printf("fuse_getattr e: returning 0\n");
533
534 out:
535 fdisp_destroy(&fdi);
536 return err;
537
538 fake:
539 bzero(vap, sizeof(*vap));
540 vap->va_type = vnode_vtype(vp);
541
542 return 0;
543 }
544
545 /*
546 struct vnop_inactive_args {
547 struct vnode *a_vp;
548 struct thread *a_td;
549 };
550 */
551 static int
552 fuse_vnop_inactive(struct vop_inactive_args *ap)
553 {
554 struct vnode *vp = ap->a_vp;
555 struct thread *td = ap->a_td;
556
557 struct fuse_vnode_data *fvdat = VTOFUD(vp);
558 struct fuse_filehandle *fufh = NULL;
559
560 int type, need_flush = 1;
561
562 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp));
563
564 for (type = 0; type < FUFH_MAXTYPE; type++) {
565 fufh = &(fvdat->fufh[type]);
566 if (FUFH_IS_VALID(fufh)) {
567 if (need_flush && vp->v_type == VREG) {
568 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) {
569 fuse_vnode_savesize(vp, NULL);
570 }
571 if (fuse_data_cache_invalidate ||
572 (fvdat->flag & FN_REVOKED) != 0)
573 fuse_io_invalbuf(vp, td);
574 else
575 fuse_io_flushbuf(vp, MNT_WAIT, td);
576 need_flush = 0;
577 }
578 fuse_filehandle_close(vp, type, td, NULL);
579 }
580 }
581
582 if ((fvdat->flag & FN_REVOKED) != 0 && fuse_reclaim_revoked) {
583 vrecycle(vp);
584 }
585 return 0;
586 }
587
588 /*
589 struct vnop_link_args {
590 struct vnode *a_tdvp;
591 struct vnode *a_vp;
592 struct componentname *a_cnp;
593 };
594 */
595 static int
596 fuse_vnop_link(struct vop_link_args *ap)
597 {
598 struct vnode *vp = ap->a_vp;
599 struct vnode *tdvp = ap->a_tdvp;
600 struct componentname *cnp = ap->a_cnp;
601
602 struct vattr *vap = VTOVA(vp);
603
604 struct fuse_dispatcher fdi;
605 struct fuse_entry_out *feo;
606 struct fuse_link_in fli;
607
608 int err;
609
610 fuse_trace_printf_vnop();
611
612 if (fuse_isdeadfs(vp)) {
613 return ENXIO;
614 }
615 if (vnode_mount(tdvp) != vnode_mount(vp)) {
616 return EXDEV;
617 }
618 if (vap->va_nlink >= FUSE_LINK_MAX) {
619 return EMLINK;
620 }
621 fli.oldnodeid = VTOI(vp);
622
623 fdisp_init(&fdi, 0);
624 fuse_internal_newentry_makerequest(vnode_mount(tdvp), VTOI(tdvp), cnp,
625 FUSE_LINK, &fli, sizeof(fli), &fdi);
626 if ((err = fdisp_wait_answ(&fdi))) {
627 goto out;
628 }
629 feo = fdi.answ;
630
631 err = fuse_internal_checkentry(feo, vnode_vtype(vp));
632 out:
633 fdisp_destroy(&fdi);
634 return err;
635 }
636
637 /*
638 struct vnop_lookup_args {
639 struct vnodeop_desc *a_desc;
640 struct vnode *a_dvp;
641 struct vnode **a_vpp;
642 struct componentname *a_cnp;
643 };
644 */
645 int
646 fuse_vnop_lookup(struct vop_lookup_args *ap)
647 {
648 struct vnode *dvp = ap->a_dvp;
649 struct vnode **vpp = ap->a_vpp;
650 struct componentname *cnp = ap->a_cnp;
651 struct thread *td = cnp->cn_thread;
652 struct ucred *cred = cnp->cn_cred;
653
654 int nameiop = cnp->cn_nameiop;
655 int flags = cnp->cn_flags;
656 int wantparent = flags & (LOCKPARENT | WANTPARENT);
657 int islastcn = flags & ISLASTCN;
658 struct mount *mp = vnode_mount(dvp);
659
660 int err = 0;
661 int lookup_err = 0;
662 struct vnode *vp = NULL;
663
664 struct fuse_dispatcher fdi;
665 enum fuse_opcode op;
666
667 uint64_t nid;
668 struct fuse_access_param facp;
669
670 FS_DEBUG2G("parent_inode=%ju - %*s\n",
671 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);
672
673 if (fuse_isdeadfs(dvp)) {
674 *vpp = NULL;
675 return ENXIO;
676 }
677 if (!vnode_isdir(dvp)) {
678 return ENOTDIR;
679 }
680 if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) {
681 return EROFS;
682 }
683 /*
684 * We do access check prior to doing anything else only in the case
685 * when we are at fs root (we'd like to say, "we are at the first
686 * component", but that's not exactly the same... nevermind).
687 * See further comments at further access checks.
688 */
689
690 bzero(&facp, sizeof(facp));
691 if (vnode_isvroot(dvp)) { /* early permission check hack */
692 if ((err = fuse_internal_access(dvp, VEXEC, &facp, td, cred))) {
693 return err;
694 }
695 }
696 if (flags & ISDOTDOT) {
697 nid = VTOFUD(dvp)->parent_nid;
698 if (nid == 0) {
699 return ENOENT;
700 }
701 fdisp_init(&fdi, 0);
702 op = FUSE_GETATTR;
703 goto calldaemon;
704 } else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') {
705 nid = VTOI(dvp);
706 fdisp_init(&fdi, 0);
707 op = FUSE_GETATTR;
708 goto calldaemon;
709 } else if (fuse_lookup_cache_enable) {
710 err = cache_lookup(dvp, vpp, cnp, NULL, NULL);
711 switch (err) {
712
713 case -1: /* positive match */
714 atomic_add_acq_long(&fuse_lookup_cache_hits, 1);
715 return 0;
716
717 case 0: /* no match in cache */
718 atomic_add_acq_long(&fuse_lookup_cache_misses, 1);
719 break;
720
721 case ENOENT: /* negative match */
722 /* fall through */
723 default:
724 return err;
725 }
726 }
727 nid = VTOI(dvp);
728 fdisp_init(&fdi, cnp->cn_namelen + 1);
729 op = FUSE_LOOKUP;
730
731 calldaemon:
732 fdisp_make(&fdi, op, mp, nid, td, cred);
733
734 if (op == FUSE_LOOKUP) {
735 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
736 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
737 }
738 lookup_err = fdisp_wait_answ(&fdi);
739
740 if ((op == FUSE_LOOKUP) && !lookup_err) { /* lookup call succeeded */
741 nid = ((struct fuse_entry_out *)fdi.answ)->nodeid;
742 if (!nid) {
743 /*
744 * zero nodeid is the same as "not found",
745 * but it's also cacheable (which we keep
746 * keep on doing not as of writing this)
747 */
748 lookup_err = ENOENT;
749 } else if (nid == FUSE_ROOT_ID) {
750 lookup_err = EINVAL;
751 }
752 }
753 if (lookup_err &&
754 (!fdi.answ_stat || lookup_err != ENOENT || op != FUSE_LOOKUP)) {
755 fdisp_destroy(&fdi);
756 return lookup_err;
757 }
758 /* lookup_err, if non-zero, must be ENOENT at this point */
759
760 if (lookup_err) {
761
762 if ((nameiop == CREATE || nameiop == RENAME) && islastcn
763 /* && directory dvp has not been removed */ ) {
764
765 if (vfs_isrdonly(mp)) {
766 err = EROFS;
767 goto out;
768 }
769 #if 0 /* THINK_ABOUT_THIS */
770 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
771 goto out;
772 }
773 #endif
774
775 /*
776 * Possibly record the position of a slot in the
777 * directory large enough for the new component name.
778 * This can be recorded in the vnode private data for
779 * dvp. Set the SAVENAME flag to hold onto the
780 * pathname for use later in VOP_CREATE or VOP_RENAME.
781 */
782 cnp->cn_flags |= SAVENAME;
783
784 err = EJUSTRETURN;
785 goto out;
786 }
787 /* Consider inserting name into cache. */
788
789 /*
790 * No we can't use negative caching, as the fs
791 * changes are out of our control.
792 * False positives' falseness turns out just as things
793 * go by, but false negatives' falseness doesn't.
794 * (and aiding the caching mechanism with extra control
795 * mechanisms comes quite close to beating the whole purpose
796 * caching...)
797 */
798 #if 0
799 if ((cnp->cn_flags & MAKEENTRY) != 0) {
800 FS_DEBUG("inserting NULL into cache\n");
801 cache_enter(dvp, NULL, cnp);
802 }
803 #endif
804 err = ENOENT;
805 goto out;
806
807 } else {
808
809 /* !lookup_err */
810
811 struct fuse_entry_out *feo = NULL;
812 struct fuse_attr *fattr = NULL;
813
814 if (op == FUSE_GETATTR) {
815 fattr = &((struct fuse_attr_out *)fdi.answ)->attr;
816 } else {
817 feo = (struct fuse_entry_out *)fdi.answ;
818 fattr = &(feo->attr);
819 }
820
821 /*
822 * If deleting, and at end of pathname, return parameters
823 * which can be used to remove file. If the wantparent flag
824 * isn't set, we return only the directory, otherwise we go on
825 * and lock the inode, being careful with ".".
826 */
827 if (nameiop == DELETE && islastcn) {
828 /*
829 * Check for write access on directory.
830 */
831 facp.xuid = fattr->uid;
832 facp.facc_flags |= FACCESS_STICKY;
833 err = fuse_internal_access(dvp, VWRITE, &facp, td, cred);
834 facp.facc_flags &= ~FACCESS_XQUERIES;
835
836 if (err) {
837 goto out;
838 }
839 if (nid == VTOI(dvp)) {
840 vref(dvp);
841 *vpp = dvp;
842 } else {
843 err = fuse_vnode_get(dvp->v_mount, nid, dvp,
844 &vp, cnp, IFTOVT(fattr->mode));
845 if (err)
846 goto out;
847 *vpp = vp;
848 }
849
850 /*
851 * Save the name for use in VOP_RMDIR and VOP_REMOVE
852 * later.
853 */
854 cnp->cn_flags |= SAVENAME;
855 goto out;
856
857 }
858 /*
859 * If rewriting (RENAME), return the inode and the
860 * information required to rewrite the present directory
861 * Must get inode of directory entry to verify it's a
862 * regular file, or empty directory.
863 */
864 if (nameiop == RENAME && wantparent && islastcn) {
865
866 #if 0 /* THINK_ABOUT_THIS */
867 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
868 goto out;
869 }
870 #endif
871
872 /*
873 * Check for "."
874 */
875 if (nid == VTOI(dvp)) {
876 err = EISDIR;
877 goto out;
878 }
879 err = fuse_vnode_get(vnode_mount(dvp),
880 nid,
881 dvp,
882 &vp,
883 cnp,
884 IFTOVT(fattr->mode));
885 if (err) {
886 goto out;
887 }
888 *vpp = vp;
889 /*
890 * Save the name for use in VOP_RENAME later.
891 */
892 cnp->cn_flags |= SAVENAME;
893
894 goto out;
895 }
896 if (flags & ISDOTDOT) {
897 struct mount *mp;
898 int ltype;
899
900 /*
901 * Expanded copy of vn_vget_ino() so that
902 * fuse_vnode_get() can be used.
903 */
904 mp = dvp->v_mount;
905 ltype = VOP_ISLOCKED(dvp);
906 err = vfs_busy(mp, MBF_NOWAIT);
907 if (err != 0) {
908 vfs_ref(mp);
909 VOP_UNLOCK(dvp, 0);
910 err = vfs_busy(mp, 0);
911 vn_lock(dvp, ltype | LK_RETRY);
912 vfs_rel(mp);
913 if (err)
914 goto out;
915 if ((dvp->v_iflag & VI_DOOMED) != 0) {
916 err = ENOENT;
917 vfs_unbusy(mp);
918 goto out;
919 }
920 }
921 VOP_UNLOCK(dvp, 0);
922 err = fuse_vnode_get(vnode_mount(dvp),
923 nid,
924 NULL,
925 &vp,
926 cnp,
927 IFTOVT(fattr->mode));
928 vfs_unbusy(mp);
929 vn_lock(dvp, ltype | LK_RETRY);
930 if ((dvp->v_iflag & VI_DOOMED) != 0) {
931 if (err == 0)
932 vput(vp);
933 err = ENOENT;
934 }
935 if (err)
936 goto out;
937 *vpp = vp;
938 } else if (nid == VTOI(dvp)) {
939 vref(dvp);
940 *vpp = dvp;
941 } else {
942 err = fuse_vnode_get(vnode_mount(dvp),
943 nid,
944 dvp,
945 &vp,
946 cnp,
947 IFTOVT(fattr->mode));
948 if (err) {
949 goto out;
950 }
951 fuse_vnode_setparent(vp, dvp);
952 *vpp = vp;
953 }
954
955 if (op == FUSE_GETATTR) {
956 cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ);
957 } else {
958 cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ);
959 }
960
961 /* Insert name into cache if appropriate. */
962
963 /*
964 * Nooo, caching is evil. With caching, we can't avoid stale
965 * information taking over the playground (cached info is not
966 * just positive/negative, it does have qualitative aspects,
967 * too). And a (VOP/FUSE)_GETATTR is always thrown anyway, when
968 * walking down along cached path components, and that's not
969 * any cheaper than FUSE_LOOKUP. This might change with
970 * implementing kernel side attr caching, but... In Linux,
971 * lookup results are not cached, and the daemon is bombarded
972 * with FUSE_LOOKUPS on and on. This shows that by design, the
973 * daemon is expected to handle frequent lookup queries
974 * efficiently, do its caching in userspace, and so on.
975 *
976 * So just leave the name cache alone.
977 */
978
979 /*
980 * Well, now I know, Linux caches lookups, but with a
981 * timeout... So it's the same thing as attribute caching:
982 * we can deal with it when implement timeouts.
983 */
984 #if 0
985 if (cnp->cn_flags & MAKEENTRY) {
986 cache_enter(dvp, *vpp, cnp);
987 }
988 #endif
989 }
990 out:
991 if (!lookup_err) {
992
993 /* No lookup error; need to clean up. */
994
995 if (err) { /* Found inode; exit with no vnode. */
996 if (op == FUSE_LOOKUP) {
997 fuse_internal_forget_send(vnode_mount(dvp), td, cred,
998 nid, 1);
999 }
1000 fdisp_destroy(&fdi);
1001 return err;
1002 } else {
1003 #ifndef NO_EARLY_PERM_CHECK_HACK
1004 if (!islastcn) {
1005 /*
1006 * We have the attributes of the next item
1007 * *now*, and it's a fact, and we do not
1008 * have to do extra work for it (ie, beg the
1009 * daemon), and it neither depends on such
1010 * accidental things like attr caching. So
1011 * the big idea: check credentials *now*,
1012 * not at the beginning of the next call to
1013 * lookup.
1014 *
1015 * The first item of the lookup chain (fs root)
1016 * won't be checked then here, of course, as
1017 * its never "the next". But go and see that
1018 * the root is taken care about at the very
1019 * beginning of this function.
1020 *
1021 * Now, given we want to do the access check
1022 * this way, one might ask: so then why not
1023 * do the access check just after fetching
1024 * the inode and its attributes from the
1025 * daemon? Why bother with producing the
1026 * corresponding vnode at all if something
1027 * is not OK? We know what's the deal as
1028 * soon as we get those attrs... There is
1029 * one bit of info though not given us by
1030 * the daemon: whether his response is
1031 * authorative or not... His response should
1032 * be ignored if something is mounted over
1033 * the dir in question. But that can be
1034 * known only by having the vnode...
1035 */
1036 int tmpvtype = vnode_vtype(*vpp);
1037
1038 bzero(&facp, sizeof(facp));
1039 /*the early perm check hack */
1040 facp.facc_flags |= FACCESS_VA_VALID;
1041
1042 if ((tmpvtype != VDIR) && (tmpvtype != VLNK)) {
1043 err = ENOTDIR;
1044 }
1045 if (!err && !vnode_mountedhere(*vpp)) {
1046 err = fuse_internal_access(*vpp, VEXEC, &facp, td, cred);
1047 }
1048 if (err) {
1049 if (tmpvtype == VLNK)
1050 FS_DEBUG("weird, permission error with a symlink?\n");
1051 vput(*vpp);
1052 *vpp = NULL;
1053 }
1054 }
1055 #endif
1056 }
1057 }
1058 fdisp_destroy(&fdi);
1059
1060 return err;
1061 }
1062
1063 /*
1064 struct vnop_mkdir_args {
1065 struct vnode *a_dvp;
1066 struct vnode **a_vpp;
1067 struct componentname *a_cnp;
1068 struct vattr *a_vap;
1069 };
1070 */
1071 static int
1072 fuse_vnop_mkdir(struct vop_mkdir_args *ap)
1073 {
1074 struct vnode *dvp = ap->a_dvp;
1075 struct vnode **vpp = ap->a_vpp;
1076 struct componentname *cnp = ap->a_cnp;
1077 struct vattr *vap = ap->a_vap;
1078
1079 struct fuse_mkdir_in fmdi;
1080
1081 fuse_trace_printf_vnop();
1082
1083 if (fuse_isdeadfs(dvp)) {
1084 return ENXIO;
1085 }
1086 fmdi.mode = MAKEIMODE(vap->va_type, vap->va_mode);
1087
1088 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKDIR, &fmdi,
1089 sizeof(fmdi), VDIR));
1090 }
1091
1092 /*
1093 struct vnop_mknod_args {
1094 struct vnode *a_dvp;
1095 struct vnode **a_vpp;
1096 struct componentname *a_cnp;
1097 struct vattr *a_vap;
1098 };
1099 */
1100 static int
1101 fuse_vnop_mknod(struct vop_mknod_args *ap)
1102 {
1103
1104 return (EINVAL);
1105 }
1106
1107
1108 /*
1109 struct vnop_open_args {
1110 struct vnode *a_vp;
1111 int a_mode;
1112 struct ucred *a_cred;
1113 struct thread *a_td;
1114 int a_fdidx; / struct file *a_fp;
1115 };
1116 */
1117 static int
1118 fuse_vnop_open(struct vop_open_args *ap)
1119 {
1120 struct vnode *vp = ap->a_vp;
1121 int mode = ap->a_mode;
1122 struct thread *td = ap->a_td;
1123 struct ucred *cred = ap->a_cred;
1124
1125 fufh_type_t fufh_type;
1126 struct fuse_vnode_data *fvdat;
1127
1128 int error, isdir = 0;
1129 int32_t fuse_open_flags;
1130
1131 FS_DEBUG2G("inode=%ju mode=0x%x\n", (uintmax_t)VTOI(vp), mode);
1132
1133 if (fuse_isdeadfs(vp)) {
1134 return ENXIO;
1135 }
1136 fvdat = VTOFUD(vp);
1137
1138 if (vnode_isdir(vp)) {
1139 isdir = 1;
1140 }
1141 fuse_open_flags = 0;
1142 if (isdir) {
1143 fufh_type = FUFH_RDONLY;
1144 } else {
1145 fufh_type = fuse_filehandle_xlate_from_fflags(mode);
1146 /*
1147 * For WRONLY opens, force DIRECT_IO. This is necessary
1148 * since writing a partial block through the buffer cache
1149 * will result in a read of the block and that read won't
1150 * be allowed by the WRONLY open.
1151 */
1152 if (fufh_type == FUFH_WRONLY ||
1153 (fvdat->flag & FN_DIRECTIO) != 0)
1154 fuse_open_flags = FOPEN_DIRECT_IO;
1155 }
1156
1157 if (fuse_filehandle_validrw(vp, fufh_type) != FUFH_INVALID) {
1158 fuse_vnode_open(vp, fuse_open_flags, td);
1159 return 0;
1160 }
1161 error = fuse_filehandle_open(vp, fufh_type, NULL, td, cred);
1162
1163 return error;
1164 }
1165
1166 /*
1167 struct vnop_read_args {
1168 struct vnode *a_vp;
1169 struct uio *a_uio;
1170 int a_ioflag;
1171 struct ucred *a_cred;
1172 };
1173 */
1174 static int
1175 fuse_vnop_read(struct vop_read_args *ap)
1176 {
1177 struct vnode *vp = ap->a_vp;
1178 struct uio *uio = ap->a_uio;
1179 int ioflag = ap->a_ioflag;
1180 struct ucred *cred = ap->a_cred;
1181
1182 FS_DEBUG2G("inode=%ju offset=%jd resid=%zd\n",
1183 (uintmax_t)VTOI(vp), uio->uio_offset, uio->uio_resid);
1184
1185 if (fuse_isdeadfs(vp)) {
1186 return ENXIO;
1187 }
1188
1189 if (VTOFUD(vp)->flag & FN_DIRECTIO) {
1190 ioflag |= IO_DIRECT;
1191 }
1192
1193 return fuse_io_dispatch(vp, uio, ioflag, cred);
1194 }
1195
1196 /*
1197 struct vnop_readdir_args {
1198 struct vnode *a_vp;
1199 struct uio *a_uio;
1200 struct ucred *a_cred;
1201 int *a_eofflag;
1202 int *ncookies;
1203 u_long **a_cookies;
1204 };
1205 */
1206 static int
1207 fuse_vnop_readdir(struct vop_readdir_args *ap)
1208 {
1209 struct vnode *vp = ap->a_vp;
1210 struct uio *uio = ap->a_uio;
1211 struct ucred *cred = ap->a_cred;
1212
1213 struct fuse_filehandle *fufh = NULL;
1214 struct fuse_vnode_data *fvdat;
1215 struct fuse_iov cookediov;
1216
1217 int err = 0;
1218 int freefufh = 0;
1219
1220 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1221
1222 if (fuse_isdeadfs(vp)) {
1223 return ENXIO;
1224 }
1225 if ( /* XXXIP ((uio_iovcnt(uio) > 1)) || */
1226 (uio_resid(uio) < sizeof(struct dirent))) {
1227 return EINVAL;
1228 }
1229 fvdat = VTOFUD(vp);
1230
1231 if (!fuse_filehandle_valid(vp, FUFH_RDONLY)) {
1232 FS_DEBUG("calling readdir() before open()");
1233 err = fuse_filehandle_open(vp, FUFH_RDONLY, &fufh, NULL, cred);
1234 freefufh = 1;
1235 } else {
1236 err = fuse_filehandle_get(vp, FUFH_RDONLY, &fufh);
1237 }
1238 if (err) {
1239 return (err);
1240 }
1241 #define DIRCOOKEDSIZE FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + MAXNAMLEN + 1)
1242 fiov_init(&cookediov, DIRCOOKEDSIZE);
1243
1244 err = fuse_internal_readdir(vp, uio, fufh, &cookediov);
1245
1246 fiov_teardown(&cookediov);
1247 if (freefufh) {
1248 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred);
1249 }
1250 return err;
1251 }
1252
1253 /*
1254 struct vnop_readlink_args {
1255 struct vnode *a_vp;
1256 struct uio *a_uio;
1257 struct ucred *a_cred;
1258 };
1259 */
1260 static int
1261 fuse_vnop_readlink(struct vop_readlink_args *ap)
1262 {
1263 struct vnode *vp = ap->a_vp;
1264 struct uio *uio = ap->a_uio;
1265 struct ucred *cred = ap->a_cred;
1266
1267 struct fuse_dispatcher fdi;
1268 int err;
1269
1270 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1271
1272 if (fuse_isdeadfs(vp)) {
1273 return ENXIO;
1274 }
1275 if (!vnode_islnk(vp)) {
1276 return EINVAL;
1277 }
1278 fdisp_init(&fdi, 0);
1279 err = fdisp_simple_putget_vp(&fdi, FUSE_READLINK, vp, curthread, cred);
1280 if (err) {
1281 goto out;
1282 }
1283 if (((char *)fdi.answ)[0] == '/' &&
1284 fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_PUSH_SYMLINKS_IN) {
1285 char *mpth = vnode_mount(vp)->mnt_stat.f_mntonname;
1286
1287 err = uiomove(mpth, strlen(mpth), uio);
1288 }
1289 if (!err) {
1290 err = uiomove(fdi.answ, fdi.iosize, uio);
1291 }
1292 out:
1293 fdisp_destroy(&fdi);
1294 return err;
1295 }
1296
1297 /*
1298 struct vnop_reclaim_args {
1299 struct vnode *a_vp;
1300 struct thread *a_td;
1301 };
1302 */
1303 static int
1304 fuse_vnop_reclaim(struct vop_reclaim_args *ap)
1305 {
1306 struct vnode *vp = ap->a_vp;
1307 struct thread *td = ap->a_td;
1308
1309 struct fuse_vnode_data *fvdat = VTOFUD(vp);
1310 struct fuse_filehandle *fufh = NULL;
1311
1312 int type;
1313
1314 if (!fvdat) {
1315 panic("FUSE: no vnode data during recycling");
1316 }
1317 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp));
1318
1319 for (type = 0; type < FUFH_MAXTYPE; type++) {
1320 fufh = &(fvdat->fufh[type]);
1321 if (FUFH_IS_VALID(fufh)) {
1322 printf("FUSE: vnode being reclaimed but fufh (type=%d) is valid",
1323 type);
1324 fuse_filehandle_close(vp, type, td, NULL);
1325 }
1326 }
1327
1328 if ((!fuse_isdeadfs(vp)) && (fvdat->nlookup)) {
1329 fuse_internal_forget_send(vnode_mount(vp), td, NULL, VTOI(vp),
1330 fvdat->nlookup);
1331 }
1332 fuse_vnode_setparent(vp, NULL);
1333 cache_purge(vp);
1334 vfs_hash_remove(vp);
1335 vnode_destroy_vobject(vp);
1336 fuse_vnode_destroy(vp);
1337
1338 return 0;
1339 }
1340
1341 /*
1342 struct vnop_remove_args {
1343 struct vnode *a_dvp;
1344 struct vnode *a_vp;
1345 struct componentname *a_cnp;
1346 };
1347 */
1348 static int
1349 fuse_vnop_remove(struct vop_remove_args *ap)
1350 {
1351 struct vnode *dvp = ap->a_dvp;
1352 struct vnode *vp = ap->a_vp;
1353 struct componentname *cnp = ap->a_cnp;
1354
1355 int err;
1356
1357 FS_DEBUG2G("inode=%ju name=%*s\n",
1358 (uintmax_t)VTOI(vp), (int)cnp->cn_namelen, cnp->cn_nameptr);
1359
1360 if (fuse_isdeadfs(vp)) {
1361 return ENXIO;
1362 }
1363 if (vnode_isdir(vp)) {
1364 return EPERM;
1365 }
1366 cache_purge(vp);
1367
1368 err = fuse_internal_remove(dvp, vp, cnp, FUSE_UNLINK);
1369
1370 if (err == 0)
1371 fuse_internal_vnode_disappear(vp);
1372 return err;
1373 }
1374
1375 /*
1376 struct vnop_rename_args {
1377 struct vnode *a_fdvp;
1378 struct vnode *a_fvp;
1379 struct componentname *a_fcnp;
1380 struct vnode *a_tdvp;
1381 struct vnode *a_tvp;
1382 struct componentname *a_tcnp;
1383 };
1384 */
1385 static int
1386 fuse_vnop_rename(struct vop_rename_args *ap)
1387 {
1388 struct vnode *fdvp = ap->a_fdvp;
1389 struct vnode *fvp = ap->a_fvp;
1390 struct componentname *fcnp = ap->a_fcnp;
1391 struct vnode *tdvp = ap->a_tdvp;
1392 struct vnode *tvp = ap->a_tvp;
1393 struct componentname *tcnp = ap->a_tcnp;
1394 struct fuse_data *data;
1395
1396 int err = 0;
1397
1398 FS_DEBUG2G("from: inode=%ju name=%*s -> to: inode=%ju name=%*s\n",
1399 (uintmax_t)VTOI(fvp), (int)fcnp->cn_namelen, fcnp->cn_nameptr,
1400 (uintmax_t)(tvp == NULL ? -1 : VTOI(tvp)),
1401 (int)tcnp->cn_namelen, tcnp->cn_nameptr);
1402
1403 if (fuse_isdeadfs(fdvp)) {
1404 return ENXIO;
1405 }
1406 if (fvp->v_mount != tdvp->v_mount ||
1407 (tvp && fvp->v_mount != tvp->v_mount)) {
1408 FS_DEBUG("cross-device rename: %s -> %s\n",
1409 fcnp->cn_nameptr, (tcnp != NULL ? tcnp->cn_nameptr : "(NULL)"));
1410 err = EXDEV;
1411 goto out;
1412 }
1413 cache_purge(fvp);
1414
1415 /*
1416 * FUSE library is expected to check if target directory is not
1417 * under the source directory in the file system tree.
1418 * Linux performs this check at VFS level.
1419 */
1420 data = fuse_get_mpdata(vnode_mount(tdvp));
1421 sx_xlock(&data->rename_lock);
1422 err = fuse_internal_rename(fdvp, fcnp, tdvp, tcnp);
1423 if (err == 0) {
1424 if (tdvp != fdvp)
1425 fuse_vnode_setparent(fvp, tdvp);
1426 if (tvp != NULL)
1427 fuse_vnode_setparent(tvp, NULL);
1428 }
1429 sx_unlock(&data->rename_lock);
1430
1431 if (tvp != NULL && tvp != fvp) {
1432 cache_purge(tvp);
1433 }
1434 if (vnode_isdir(fvp)) {
1435 if ((tvp != NULL) && vnode_isdir(tvp)) {
1436 cache_purge(tdvp);
1437 }
1438 cache_purge(fdvp);
1439 }
1440 out:
1441 if (tdvp == tvp) {
1442 vrele(tdvp);
1443 } else {
1444 vput(tdvp);
1445 }
1446 if (tvp != NULL) {
1447 vput(tvp);
1448 }
1449 vrele(fdvp);
1450 vrele(fvp);
1451
1452 return err;
1453 }
1454
1455 /*
1456 struct vnop_rmdir_args {
1457 struct vnode *a_dvp;
1458 struct vnode *a_vp;
1459 struct componentname *a_cnp;
1460 } *ap;
1461 */
1462 static int
1463 fuse_vnop_rmdir(struct vop_rmdir_args *ap)
1464 {
1465 struct vnode *dvp = ap->a_dvp;
1466 struct vnode *vp = ap->a_vp;
1467
1468 int err;
1469
1470 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1471
1472 if (fuse_isdeadfs(vp)) {
1473 return ENXIO;
1474 }
1475 if (VTOFUD(vp) == VTOFUD(dvp)) {
1476 return EINVAL;
1477 }
1478 err = fuse_internal_remove(dvp, vp, ap->a_cnp, FUSE_RMDIR);
1479
1480 if (err == 0)
1481 fuse_internal_vnode_disappear(vp);
1482 return err;
1483 }
1484
1485 /*
1486 struct vnop_setattr_args {
1487 struct vnode *a_vp;
1488 struct vattr *a_vap;
1489 struct ucred *a_cred;
1490 struct thread *a_td;
1491 };
1492 */
1493 static int
1494 fuse_vnop_setattr(struct vop_setattr_args *ap)
1495 {
1496 struct vnode *vp = ap->a_vp;
1497 struct vattr *vap = ap->a_vap;
1498 struct ucred *cred = ap->a_cred;
1499 struct thread *td = curthread;
1500
1501 struct fuse_dispatcher fdi;
1502 struct fuse_setattr_in *fsai;
1503 struct fuse_access_param facp;
1504
1505 int err = 0;
1506 enum vtype vtyp;
1507 int sizechanged = 0;
1508 uint64_t newsize = 0;
1509
1510 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1511
1512 if (fuse_isdeadfs(vp)) {
1513 return ENXIO;
1514 }
1515 fdisp_init(&fdi, sizeof(*fsai));
1516 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
1517 fsai = fdi.indata;
1518 fsai->valid = 0;
1519
1520 bzero(&facp, sizeof(facp));
1521
1522 facp.xuid = vap->va_uid;
1523 facp.xgid = vap->va_gid;
1524
1525 if (vap->va_uid != (uid_t)VNOVAL) {
1526 facp.facc_flags |= FACCESS_CHOWN;
1527 fsai->uid = vap->va_uid;
1528 fsai->valid |= FATTR_UID;
1529 }
1530 if (vap->va_gid != (gid_t)VNOVAL) {
1531 facp.facc_flags |= FACCESS_CHOWN;
1532 fsai->gid = vap->va_gid;
1533 fsai->valid |= FATTR_GID;
1534 }
1535 if (vap->va_size != VNOVAL) {
1536
1537 struct fuse_filehandle *fufh = NULL;
1538
1539 /*Truncate to a new value. */
1540 fsai->size = vap->va_size;
1541 sizechanged = 1;
1542 newsize = vap->va_size;
1543 fsai->valid |= FATTR_SIZE;
1544
1545 fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh);
1546 if (fufh) {
1547 fsai->fh = fufh->fh_id;
1548 fsai->valid |= FATTR_FH;
1549 }
1550 }
1551 if (vap->va_atime.tv_sec != VNOVAL) {
1552 fsai->atime = vap->va_atime.tv_sec;
1553 fsai->atimensec = vap->va_atime.tv_nsec;
1554 fsai->valid |= FATTR_ATIME;
1555 }
1556 if (vap->va_mtime.tv_sec != VNOVAL) {
1557 fsai->mtime = vap->va_mtime.tv_sec;
1558 fsai->mtimensec = vap->va_mtime.tv_nsec;
1559 fsai->valid |= FATTR_MTIME;
1560 }
1561 if (vap->va_mode != (mode_t)VNOVAL) {
1562 fsai->mode = vap->va_mode & ALLPERMS;
1563 fsai->valid |= FATTR_MODE;
1564 }
1565 if (!fsai->valid) {
1566 goto out;
1567 }
1568 vtyp = vnode_vtype(vp);
1569
1570 if (fsai->valid & FATTR_SIZE && vtyp == VDIR) {
1571 err = EISDIR;
1572 goto out;
1573 }
1574 if (vfs_isrdonly(vnode_mount(vp)) && (fsai->valid & ~FATTR_SIZE || vtyp == VREG)) {
1575 err = EROFS;
1576 goto out;
1577 }
1578 if (fsai->valid & ~FATTR_SIZE) {
1579 /*err = fuse_internal_access(vp, VADMIN, context, &facp); */
1580 /*XXX */
1581 err = 0;
1582 }
1583 facp.facc_flags &= ~FACCESS_XQUERIES;
1584
1585 if (err && !(fsai->valid & ~(FATTR_ATIME | FATTR_MTIME)) &&
1586 vap->va_vaflags & VA_UTIMES_NULL) {
1587 err = fuse_internal_access(vp, VWRITE, &facp, td, cred);
1588 }
1589 if (err)
1590 goto out;
1591 if ((err = fdisp_wait_answ(&fdi)))
1592 goto out;
1593 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode);
1594
1595 if (vnode_vtype(vp) != vtyp) {
1596 if (vnode_vtype(vp) == VNON && vtyp != VNON) {
1597 debug_printf("FUSE: Dang! vnode_vtype is VNON and vtype isn't.\n");
1598 } else {
1599 /*
1600 * STALE vnode, ditch
1601 *
1602 * The vnode has changed its type "behind our back". There's
1603 * nothing really we can do, so let us just force an internal
1604 * revocation and tell the caller to try again, if interested.
1605 */
1606 fuse_internal_vnode_disappear(vp);
1607 err = EAGAIN;
1608 }
1609 }
1610 if (!err && !sizechanged) {
1611 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
1612 }
1613 out:
1614 fdisp_destroy(&fdi);
1615 if (!err && sizechanged) {
1616 fuse_vnode_setsize(vp, cred, newsize);
1617 VTOFUD(vp)->flag &= ~FN_SIZECHANGE;
1618 }
1619 return err;
1620 }
1621
1622 /*
1623 struct vnop_strategy_args {
1624 struct vnode *a_vp;
1625 struct buf *a_bp;
1626 };
1627 */
1628 static int
1629 fuse_vnop_strategy(struct vop_strategy_args *ap)
1630 {
1631 struct vnode *vp = ap->a_vp;
1632 struct buf *bp = ap->a_bp;
1633
1634 fuse_trace_printf_vnop();
1635
1636 if (!vp || fuse_isdeadfs(vp)) {
1637 bp->b_ioflags |= BIO_ERROR;
1638 bp->b_error = ENXIO;
1639 bufdone(bp);
1640 return ENXIO;
1641 }
1642 if (bp->b_iocmd == BIO_WRITE)
1643 fuse_vnode_refreshsize(vp, NOCRED);
1644
1645 (void)fuse_io_strategy(vp, bp);
1646
1647 /*
1648 * This is a dangerous function. If returns error, that might mean a
1649 * panic. We prefer pretty much anything over being forced to panic
1650 * by a malicious daemon (a demon?). So we just return 0 anyway. You
1651 * should never mind this: this function has its own error
1652 * propagation mechanism via the argument buffer, so
1653 * not-that-melodramatic residents of the call chain still will be
1654 * able to know what to do.
1655 */
1656 return 0;
1657 }
1658
1659
1660 /*
1661 struct vnop_symlink_args {
1662 struct vnode *a_dvp;
1663 struct vnode **a_vpp;
1664 struct componentname *a_cnp;
1665 struct vattr *a_vap;
1666 char *a_target;
1667 };
1668 */
1669 static int
1670 fuse_vnop_symlink(struct vop_symlink_args *ap)
1671 {
1672 struct vnode *dvp = ap->a_dvp;
1673 struct vnode **vpp = ap->a_vpp;
1674 struct componentname *cnp = ap->a_cnp;
1675 char *target = ap->a_target;
1676
1677 struct fuse_dispatcher fdi;
1678
1679 int err;
1680 size_t len;
1681
1682 FS_DEBUG2G("inode=%ju name=%*s\n",
1683 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);
1684
1685 if (fuse_isdeadfs(dvp)) {
1686 return ENXIO;
1687 }
1688 /*
1689 * Unlike the other creator type calls, here we have to create a message
1690 * where the name of the new entry comes first, and the data describing
1691 * the entry comes second.
1692 * Hence we can't rely on our handy fuse_internal_newentry() routine,
1693 * but put together the message manually and just call the core part.
1694 */
1695
1696 len = strlen(target) + 1;
1697 fdisp_init(&fdi, len + cnp->cn_namelen + 1);
1698 fdisp_make_vp(&fdi, FUSE_SYMLINK, dvp, curthread, NULL);
1699
1700 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
1701 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
1702 memcpy((char *)fdi.indata + cnp->cn_namelen + 1, target, len);
1703
1704 err = fuse_internal_newentry_core(dvp, vpp, cnp, VLNK, &fdi);
1705 fdisp_destroy(&fdi);
1706 return err;
1707 }
1708
1709 /*
1710 struct vnop_write_args {
1711 struct vnode *a_vp;
1712 struct uio *a_uio;
1713 int a_ioflag;
1714 struct ucred *a_cred;
1715 };
1716 */
1717 static int
1718 fuse_vnop_write(struct vop_write_args *ap)
1719 {
1720 struct vnode *vp = ap->a_vp;
1721 struct uio *uio = ap->a_uio;
1722 int ioflag = ap->a_ioflag;
1723 struct ucred *cred = ap->a_cred;
1724
1725 fuse_trace_printf_vnop();
1726
1727 if (fuse_isdeadfs(vp)) {
1728 return ENXIO;
1729 }
1730 fuse_vnode_refreshsize(vp, cred);
1731
1732 if (VTOFUD(vp)->flag & FN_DIRECTIO) {
1733 ioflag |= IO_DIRECT;
1734 }
1735
1736 return fuse_io_dispatch(vp, uio, ioflag, cred);
1737 }
1738
1739 /*
1740 struct vnop_getpages_args {
1741 struct vnode *a_vp;
1742 vm_page_t *a_m;
1743 int a_count;
1744 int a_reqpage;
1745 vm_ooffset_t a_offset;
1746 };
1747 */
1748 static int
1749 fuse_vnop_getpages(struct vop_getpages_args *ap)
1750 {
1751 int i, error, nextoff, size, toff, count, npages;
1752 struct uio uio;
1753 struct iovec iov;
1754 vm_offset_t kva;
1755 struct buf *bp;
1756 struct vnode *vp;
1757 struct thread *td;
1758 struct ucred *cred;
1759 vm_page_t *pages;
1760
1761 FS_DEBUG2G("heh\n");
1762
1763 vp = ap->a_vp;
1764 KASSERT(vp->v_object, ("objectless vp passed to getpages"));
1765 td = curthread; /* XXX */
1766 cred = curthread->td_ucred; /* XXX */
1767 pages = ap->a_m;
1768 count = ap->a_count;
1769
1770 if (!fsess_opt_mmap(vnode_mount(vp))) {
1771 FS_DEBUG("called on non-cacheable vnode??\n");
1772 return (VM_PAGER_ERROR);
1773 }
1774 npages = btoc(count);
1775
1776 /*
1777 * If the requested page is partially valid, just return it and
1778 * allow the pager to zero-out the blanks. Partially valid pages
1779 * can only occur at the file EOF.
1780 */
1781
1782 VM_OBJECT_WLOCK(vp->v_object);
1783 fuse_vm_page_lock_queues();
1784 if (pages[ap->a_reqpage]->valid != 0) {
1785 for (i = 0; i < npages; ++i) {
1786 if (i != ap->a_reqpage) {
1787 fuse_vm_page_lock(pages[i]);
1788 vm_page_free(pages[i]);
1789 fuse_vm_page_unlock(pages[i]);
1790 }
1791 }
1792 fuse_vm_page_unlock_queues();
1793 VM_OBJECT_WUNLOCK(vp->v_object);
1794 return 0;
1795 }
1796 fuse_vm_page_unlock_queues();
1797 VM_OBJECT_WUNLOCK(vp->v_object);
1798
1799 /*
1800 * We use only the kva address for the buffer, but this is extremely
1801 * convienient and fast.
1802 */
1803 bp = getpbuf(&fuse_pbuf_freecnt);
1804
1805 kva = (vm_offset_t)bp->b_data;
1806 pmap_qenter(kva, pages, npages);
1807 PCPU_INC(cnt.v_vnodein);
1808 PCPU_ADD(cnt.v_vnodepgsin, npages);
1809
1810 iov.iov_base = (caddr_t)kva;
1811 iov.iov_len = count;
1812 uio.uio_iov = &iov;
1813 uio.uio_iovcnt = 1;
1814 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
1815 uio.uio_resid = count;
1816 uio.uio_segflg = UIO_SYSSPACE;
1817 uio.uio_rw = UIO_READ;
1818 uio.uio_td = td;
1819
1820 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);
1821 pmap_qremove(kva, npages);
1822
1823 relpbuf(bp, &fuse_pbuf_freecnt);
1824
1825 if (error && (uio.uio_resid == count)) {
1826 FS_DEBUG("error %d\n", error);
1827 VM_OBJECT_WLOCK(vp->v_object);
1828 fuse_vm_page_lock_queues();
1829 for (i = 0; i < npages; ++i) {
1830 if (i != ap->a_reqpage) {
1831 fuse_vm_page_lock(pages[i]);
1832 vm_page_free(pages[i]);
1833 fuse_vm_page_unlock(pages[i]);
1834 }
1835 }
1836 fuse_vm_page_unlock_queues();
1837 VM_OBJECT_WUNLOCK(vp->v_object);
1838 return VM_PAGER_ERROR;
1839 }
1840 /*
1841 * Calculate the number of bytes read and validate only that number
1842 * of bytes. Note that due to pending writes, size may be 0. This
1843 * does not mean that the remaining data is invalid!
1844 */
1845
1846 size = count - uio.uio_resid;
1847 VM_OBJECT_WLOCK(vp->v_object);
1848 fuse_vm_page_lock_queues();
1849 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
1850 vm_page_t m;
1851
1852 nextoff = toff + PAGE_SIZE;
1853 m = pages[i];
1854
1855 if (nextoff <= size) {
1856 /*
1857 * Read operation filled an entire page
1858 */
1859 m->valid = VM_PAGE_BITS_ALL;
1860 KASSERT(m->dirty == 0,
1861 ("fuse_getpages: page %p is dirty", m));
1862 } else if (size > toff) {
1863 /*
1864 * Read operation filled a partial page.
1865 */
1866 m->valid = 0;
1867 vm_page_set_valid_range(m, 0, size - toff);
1868 KASSERT(m->dirty == 0,
1869 ("fuse_getpages: page %p is dirty", m));
1870 } else {
1871 /*
1872 * Read operation was short. If no error occured
1873 * we may have hit a zero-fill section. We simply
1874 * leave valid set to 0.
1875 */
1876 ;
1877 }
1878 if (i != ap->a_reqpage)
1879 vm_page_readahead_finish(m);
1880 }
1881 fuse_vm_page_unlock_queues();
1882 VM_OBJECT_WUNLOCK(vp->v_object);
1883 return 0;
1884 }
1885
1886 /*
1887 struct vnop_putpages_args {
1888 struct vnode *a_vp;
1889 vm_page_t *a_m;
1890 int a_count;
1891 int a_sync;
1892 int *a_rtvals;
1893 vm_ooffset_t a_offset;
1894 };
1895 */
1896 static int
1897 fuse_vnop_putpages(struct vop_putpages_args *ap)
1898 {
1899 struct uio uio;
1900 struct iovec iov;
1901 vm_offset_t kva;
1902 struct buf *bp;
1903 int i, error, npages, count;
1904 off_t offset;
1905 int *rtvals;
1906 struct vnode *vp;
1907 struct thread *td;
1908 struct ucred *cred;
1909 vm_page_t *pages;
1910 vm_ooffset_t fsize;
1911
1912 FS_DEBUG2G("heh\n");
1913
1914 vp = ap->a_vp;
1915 KASSERT(vp->v_object, ("objectless vp passed to putpages"));
1916 fsize = vp->v_object->un_pager.vnp.vnp_size;
1917 td = curthread; /* XXX */
1918 cred = curthread->td_ucred; /* XXX */
1919 pages = ap->a_m;
1920 count = ap->a_count;
1921 rtvals = ap->a_rtvals;
1922 npages = btoc(count);
1923 offset = IDX_TO_OFF(pages[0]->pindex);
1924
1925 if (!fsess_opt_mmap(vnode_mount(vp))) {
1926 FS_DEBUG("called on non-cacheable vnode??\n");
1927 }
1928 for (i = 0; i < npages; i++)
1929 rtvals[i] = VM_PAGER_AGAIN;
1930
1931 /*
1932 * When putting pages, do not extend file past EOF.
1933 */
1934
1935 if (offset + count > fsize) {
1936 count = fsize - offset;
1937 if (count < 0)
1938 count = 0;
1939 }
1940 /*
1941 * We use only the kva address for the buffer, but this is extremely
1942 * convienient and fast.
1943 */
1944 bp = getpbuf(&fuse_pbuf_freecnt);
1945
1946 kva = (vm_offset_t)bp->b_data;
1947 pmap_qenter(kva, pages, npages);
1948 PCPU_INC(cnt.v_vnodeout);
1949 PCPU_ADD(cnt.v_vnodepgsout, count);
1950
1951 iov.iov_base = (caddr_t)kva;
1952 iov.iov_len = count;
1953 uio.uio_iov = &iov;
1954 uio.uio_iovcnt = 1;
1955 uio.uio_offset = offset;
1956 uio.uio_resid = count;
1957 uio.uio_segflg = UIO_SYSSPACE;
1958 uio.uio_rw = UIO_WRITE;
1959 uio.uio_td = td;
1960
1961 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);
1962
1963 pmap_qremove(kva, npages);
1964 relpbuf(bp, &fuse_pbuf_freecnt);
1965
1966 if (!error) {
1967 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
1968
1969 for (i = 0; i < nwritten; i++) {
1970 rtvals[i] = VM_PAGER_OK;
1971 VM_OBJECT_WLOCK(pages[i]->object);
1972 vm_page_undirty(pages[i]);
1973 VM_OBJECT_WUNLOCK(pages[i]->object);
1974 }
1975 }
1976 return rtvals[0];
1977 }
1978
1979 /*
1980 struct vnop_print_args {
1981 struct vnode *a_vp;
1982 };
1983 */
1984 static int
1985 fuse_vnop_print(struct vop_print_args *ap)
1986 {
1987 struct fuse_vnode_data *fvdat = VTOFUD(ap->a_vp);
1988
1989 printf("nodeid: %ju, parent nodeid: %ju, nlookup: %ju, flag: %#x\n",
1990 (uintmax_t)VTOILLU(ap->a_vp), (uintmax_t)fvdat->parent_nid,
1991 (uintmax_t)fvdat->nlookup,
1992 fvdat->flag);
1993
1994 return 0;
1995 }
Cache object: 31baba1de4e3aadce2dfabc2ce5ed640
|