1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007-2009 Google Inc. and Amit Singh
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Google Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Copyright (C) 2005 Csaba Henk.
34 * All rights reserved.
35 *
36 * Copyright (c) 2019 The FreeBSD Foundation
37 *
38 * Portions of this software were developed by BFF Storage Systems, LLC under
39 * sponsorship from the FreeBSD Foundation.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 *
50 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 */
62
63 #include <sys/cdefs.h>
64 __FBSDID("$FreeBSD$");
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/counter.h>
69 #include <sys/module.h>
70 #include <sys/errno.h>
71 #include <sys/kernel.h>
72 #include <sys/conf.h>
73 #include <sys/uio.h>
74 #include <sys/malloc.h>
75 #include <sys/queue.h>
76 #include <sys/lock.h>
77 #include <sys/mutex.h>
78 #include <sys/sdt.h>
79 #include <sys/sx.h>
80 #include <sys/proc.h>
81 #include <sys/mount.h>
82 #include <sys/vnode.h>
83 #include <sys/namei.h>
84 #include <sys/stat.h>
85 #include <sys/unistd.h>
86 #include <sys/filedesc.h>
87 #include <sys/file.h>
88 #include <sys/fcntl.h>
89 #include <sys/dirent.h>
90 #include <sys/bio.h>
91 #include <sys/buf.h>
92 #include <sys/sysctl.h>
93 #include <sys/priv.h>
94
95 #include "fuse.h"
96 #include "fuse_file.h"
97 #include "fuse_internal.h"
98 #include "fuse_io.h"
99 #include "fuse_ipc.h"
100 #include "fuse_node.h"
101 #include "fuse_file.h"
102
103 SDT_PROVIDER_DECLARE(fusefs);
104 /*
105 * Fuse trace probe:
106 * arg0: verbosity. Higher numbers give more verbose messages
107 * arg1: Textual message
108 */
109 SDT_PROBE_DEFINE2(fusefs, , internal, trace, "int", "char*");
110
111 #ifdef ZERO_PAD_INCOMPLETE_BUFS
112 static int isbzero(void *buf, size_t len);
113
114 #endif
115
116 counter_u64_t fuse_lookup_cache_hits;
117 counter_u64_t fuse_lookup_cache_misses;
118
119 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_hits, CTLFLAG_RD,
120 &fuse_lookup_cache_hits, "number of positive cache hits in lookup");
121
122 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_misses, CTLFLAG_RD,
123 &fuse_lookup_cache_misses, "number of cache misses in lookup");
124
125 int
126 fuse_internal_get_cached_vnode(struct mount* mp, ino_t ino, int flags,
127 struct vnode **vpp)
128 {
129 struct bintime now;
130 struct thread *td = curthread;
131 uint64_t nodeid = ino;
132 int error;
133
134 *vpp = NULL;
135
136 error = vfs_hash_get(mp, fuse_vnode_hash(nodeid), flags, td, vpp,
137 fuse_vnode_cmp, &nodeid);
138 if (error)
139 return error;
140 /*
141 * Check the entry cache timeout. We have to do this within fusefs
142 * instead of by using cache_enter_time/cache_lookup because those
143 * routines are only intended to work with pathnames, not inodes
144 */
145 if (*vpp != NULL) {
146 getbinuptime(&now);
147 if (bintime_cmp(&(VTOFUD(*vpp)->entry_cache_timeout), &now, >)){
148 counter_u64_add(fuse_lookup_cache_hits, 1);
149 return 0;
150 } else {
151 /* Entry cache timeout */
152 counter_u64_add(fuse_lookup_cache_misses, 1);
153 cache_purge(*vpp);
154 vput(*vpp);
155 *vpp = NULL;
156 }
157 }
158 return 0;
159 }
160
161 SDT_PROBE_DEFINE0(fusefs, , internal, access_vadmin);
162 /* Synchronously send a FUSE_ACCESS operation */
163 int
164 fuse_internal_access(struct vnode *vp,
165 accmode_t mode,
166 struct thread *td,
167 struct ucred *cred)
168 {
169 int err = 0;
170 uint32_t mask = F_OK;
171 int dataflags;
172 struct mount *mp;
173 struct fuse_dispatcher fdi;
174 struct fuse_access_in *fai;
175 struct fuse_data *data;
176
177 mp = vnode_mount(vp);
178
179 data = fuse_get_mpdata(mp);
180 dataflags = data->dataflags;
181
182 if (mode == 0)
183 return 0;
184
185 if (mode & VMODIFY_PERMS && vfs_isrdonly(mp)) {
186 switch (vp->v_type) {
187 case VDIR:
188 /* FALLTHROUGH */
189 case VLNK:
190 /* FALLTHROUGH */
191 case VREG:
192 return EROFS;
193 default:
194 break;
195 }
196 }
197
198 /* Unless explicitly permitted, deny everyone except the fs owner. */
199 if (!(dataflags & FSESS_DAEMON_CAN_SPY)) {
200 if (fuse_match_cred(data->daemoncred, cred))
201 return EPERM;
202 }
203
204 if (dataflags & FSESS_DEFAULT_PERMISSIONS) {
205 struct vattr va;
206
207 fuse_internal_getattr(vp, &va, cred, td);
208 return vaccess(vp->v_type, va.va_mode, va.va_uid,
209 va.va_gid, mode, cred);
210 }
211
212 if (mode & VADMIN) {
213 /*
214 * The FUSE protocol doesn't have an equivalent of VADMIN, so
215 * it's a bug if we ever reach this point with that bit set.
216 */
217 SDT_PROBE0(fusefs, , internal, access_vadmin);
218 }
219
220 if (fsess_not_impl(mp, FUSE_ACCESS))
221 return 0;
222
223 if ((mode & (VWRITE | VAPPEND)) != 0)
224 mask |= W_OK;
225 if ((mode & VREAD) != 0)
226 mask |= R_OK;
227 if ((mode & VEXEC) != 0)
228 mask |= X_OK;
229
230 fdisp_init(&fdi, sizeof(*fai));
231 fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred);
232
233 fai = fdi.indata;
234 fai->mask = mask;
235
236 err = fdisp_wait_answ(&fdi);
237 fdisp_destroy(&fdi);
238
239 if (err == ENOSYS) {
240 fsess_set_notimpl(mp, FUSE_ACCESS);
241 err = 0;
242 }
243 return err;
244 }
245
246 /*
247 * Cache FUSE attributes from attr, in attribute cache associated with vnode
248 * 'vp'. Optionally, if argument 'vap' is not NULL, store a copy of the
249 * converted attributes there as well.
250 *
251 * If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do
252 * return the result to the caller).
253 */
254 void
255 fuse_internal_cache_attrs(struct vnode *vp, struct fuse_attr *attr,
256 uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap,
257 bool from_server)
258 {
259 struct mount *mp;
260 struct fuse_vnode_data *fvdat;
261 struct fuse_data *data;
262 struct vattr *vp_cache_at;
263
264 mp = vnode_mount(vp);
265 fvdat = VTOFUD(vp);
266 data = fuse_get_mpdata(mp);
267
268 ASSERT_VOP_ELOCKED(vp, "fuse_internal_cache_attrs");
269
270 fuse_validity_2_bintime(attr_valid, attr_valid_nsec,
271 &fvdat->attr_cache_timeout);
272
273 if (vnode_isreg(vp) &&
274 fvdat->cached_attrs.va_size != VNOVAL &&
275 attr->size != fvdat->cached_attrs.va_size)
276 {
277 if ( data->cache_mode == FUSE_CACHE_WB &&
278 fvdat->flag & FN_SIZECHANGE)
279 {
280 const char *msg;
281
282 /*
283 * The server changed the file's size even though we're
284 * using writeback cacheing and and we have outstanding
285 * dirty writes! That's a server bug.
286 */
287 if (fuse_libabi_geq(data, 7, 23)) {
288 msg = "writeback cache incoherent!."
289 "To prevent data corruption, disable "
290 "the writeback cache according to your "
291 "FUSE server's documentation.";
292 } else {
293 msg = "writeback cache incoherent!."
294 "To prevent data corruption, disable "
295 "the writeback cache by setting "
296 "vfs.fusefs.data_cache_mode to 0 or 1.";
297 }
298 fuse_warn(data, FSESS_WARN_WB_CACHE_INCOHERENT, msg);
299 }
300 if (fuse_vnode_attr_cache_valid(vp) &&
301 data->cache_mode != FUSE_CACHE_UC)
302 {
303 /*
304 * The server changed the file's size even though we
305 * have it cached and our cache has not yet expired.
306 * That's a bug.
307 */
308 fuse_warn(data, FSESS_WARN_CACHE_INCOHERENT,
309 "cache incoherent! "
310 "To prevent "
311 "data corruption, disable the data cache "
312 "by mounting with -o direct_io, or as "
313 "directed otherwise by your FUSE server's "
314 "documentation.");
315 }
316 }
317
318 /* Fix our buffers if the filesize changed without us knowing */
319 if (vnode_isreg(vp) && attr->size != fvdat->cached_attrs.va_size) {
320 (void)fuse_vnode_setsize(vp, attr->size, from_server);
321 fvdat->cached_attrs.va_size = attr->size;
322 }
323
324 if (attr_valid > 0 || attr_valid_nsec > 0)
325 vp_cache_at = &(fvdat->cached_attrs);
326 else if (vap != NULL)
327 vp_cache_at = vap;
328 else
329 return;
330
331 vattr_null(vp_cache_at);
332 vp_cache_at->va_fsid = mp->mnt_stat.f_fsid.val[0];
333 vp_cache_at->va_fileid = attr->ino;
334 vp_cache_at->va_mode = attr->mode & ~S_IFMT;
335 vp_cache_at->va_nlink = attr->nlink;
336 vp_cache_at->va_uid = attr->uid;
337 vp_cache_at->va_gid = attr->gid;
338 vp_cache_at->va_rdev = attr->rdev;
339 vp_cache_at->va_size = attr->size;
340 /* XXX on i386, seconds are truncated to 32 bits */
341 vp_cache_at->va_atime.tv_sec = attr->atime;
342 vp_cache_at->va_atime.tv_nsec = attr->atimensec;
343 vp_cache_at->va_mtime.tv_sec = attr->mtime;
344 vp_cache_at->va_mtime.tv_nsec = attr->mtimensec;
345 vp_cache_at->va_ctime.tv_sec = attr->ctime;
346 vp_cache_at->va_ctime.tv_nsec = attr->ctimensec;
347 if (fuse_libabi_geq(data, 7, 9) && attr->blksize > 0)
348 vp_cache_at->va_blocksize = attr->blksize;
349 else
350 vp_cache_at->va_blocksize = PAGE_SIZE;
351 vp_cache_at->va_type = IFTOVT(attr->mode);
352 vp_cache_at->va_bytes = attr->blocks * S_BLKSIZE;
353 vp_cache_at->va_flags = 0;
354
355 if (vap != vp_cache_at && vap != NULL)
356 memcpy(vap, vp_cache_at, sizeof(*vap));
357 }
358
359 /* fsync */
360
361 int
362 fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio)
363 {
364 if (tick->tk_aw_ohead.error == ENOSYS) {
365 fsess_set_notimpl(tick->tk_data->mp, fticket_opcode(tick));
366 }
367 return 0;
368 }
369
370 int
371 fuse_internal_fsync(struct vnode *vp,
372 struct thread *td,
373 int waitfor,
374 bool datasync)
375 {
376 struct fuse_fsync_in *ffsi = NULL;
377 struct fuse_dispatcher fdi;
378 struct fuse_filehandle *fufh;
379 struct fuse_vnode_data *fvdat = VTOFUD(vp);
380 struct mount *mp = vnode_mount(vp);
381 int op = FUSE_FSYNC;
382 int err = 0;
383
384 if (fsess_not_impl(vnode_mount(vp),
385 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) {
386 return 0;
387 }
388 if (vnode_isdir(vp))
389 op = FUSE_FSYNCDIR;
390
391 if (fsess_not_impl(mp, op))
392 return 0;
393
394 fdisp_init(&fdi, sizeof(*ffsi));
395 /*
396 * fsync every open file handle for this file, because we can't be sure
397 * which file handle the caller is really referring to.
398 */
399 LIST_FOREACH(fufh, &fvdat->handles, next) {
400 fdi.iosize = sizeof(*ffsi);
401 if (ffsi == NULL)
402 fdisp_make_vp(&fdi, op, vp, td, NULL);
403 else
404 fdisp_refresh_vp(&fdi, op, vp, td, NULL);
405 ffsi = fdi.indata;
406 ffsi->fh = fufh->fh_id;
407 ffsi->fsync_flags = 0;
408
409 if (datasync)
410 ffsi->fsync_flags = FUSE_FSYNC_FDATASYNC;
411
412 if (waitfor == MNT_WAIT) {
413 err = fdisp_wait_answ(&fdi);
414 } else {
415 fuse_insert_callback(fdi.tick,
416 fuse_internal_fsync_callback);
417 fuse_insert_message(fdi.tick, false);
418 }
419 if (err == ENOSYS) {
420 /* ENOSYS means "success, and don't call again" */
421 fsess_set_notimpl(mp, op);
422 err = 0;
423 break;
424 }
425 }
426 fdisp_destroy(&fdi);
427
428 return err;
429 }
430
431 /* Asynchronous invalidation */
432 SDT_PROBE_DEFINE3(fusefs, , internal, invalidate_entry,
433 "struct vnode*", "struct fuse_notify_inval_entry_out*", "char*");
434 int
435 fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio)
436 {
437 struct fuse_notify_inval_entry_out fnieo;
438 struct componentname cn;
439 struct vnode *dvp, *vp;
440 char name[PATH_MAX];
441 int err;
442
443 if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0)
444 return (err);
445
446 if (fnieo.namelen >= sizeof(name))
447 return (EINVAL);
448
449 if ((err = uiomove(name, fnieo.namelen, uio)) != 0)
450 return (err);
451 name[fnieo.namelen] = '\0';
452 /* fusefs does not cache "." or ".." entries */
453 if (strncmp(name, ".", sizeof(".")) == 0 ||
454 strncmp(name, "..", sizeof("..")) == 0)
455 return (0);
456
457 if (fnieo.parent == FUSE_ROOT_ID)
458 err = VFS_ROOT(mp, LK_SHARED, &dvp);
459 else
460 err = fuse_internal_get_cached_vnode( mp, fnieo.parent,
461 LK_SHARED, &dvp);
462 SDT_PROBE3(fusefs, , internal, invalidate_entry, dvp, &fnieo, name);
463 /*
464 * If dvp is not in the cache, then it must've been reclaimed. And
465 * since fuse_vnop_reclaim does a cache_purge, name's entry must've
466 * been invalidated already. So we can safely return if dvp == NULL
467 */
468 if (err != 0 || dvp == NULL)
469 return (err);
470 /*
471 * XXX we can't check dvp's generation because the FUSE invalidate
472 * entry message doesn't include it. Worse case is that we invalidate
473 * an entry that didn't need to be invalidated.
474 */
475
476 cn.cn_nameiop = LOOKUP;
477 cn.cn_flags = 0; /* !MAKEENTRY means free cached entry */
478 cn.cn_cred = curthread->td_ucred;
479 cn.cn_lkflags = LK_SHARED;
480 cn.cn_pnbuf = NULL;
481 cn.cn_nameptr = name;
482 cn.cn_namelen = fnieo.namelen;
483 err = cache_lookup(dvp, &vp, &cn, NULL, NULL);
484 MPASS(err == 0);
485 fuse_vnode_clear_attr_cache(dvp);
486 vput(dvp);
487 return (0);
488 }
489
490 SDT_PROBE_DEFINE2(fusefs, , internal, invalidate_inode,
491 "struct vnode*", "struct fuse_notify_inval_inode_out *");
492 int
493 fuse_internal_invalidate_inode(struct mount *mp, struct uio *uio)
494 {
495 struct fuse_notify_inval_inode_out fniio;
496 struct vnode *vp;
497 int err;
498
499 if ((err = uiomove(&fniio, sizeof(fniio), uio)) != 0)
500 return (err);
501
502 if (fniio.ino == FUSE_ROOT_ID)
503 err = VFS_ROOT(mp, LK_EXCLUSIVE, &vp);
504 else
505 err = fuse_internal_get_cached_vnode(mp, fniio.ino, LK_SHARED,
506 &vp);
507 SDT_PROBE2(fusefs, , internal, invalidate_inode, vp, &fniio);
508 if (err != 0 || vp == NULL)
509 return (err);
510 /*
511 * XXX we can't check vp's generation because the FUSE invalidate
512 * entry message doesn't include it. Worse case is that we invalidate
513 * an inode that didn't need to be invalidated.
514 */
515
516 /*
517 * Flush and invalidate buffers if off >= 0. Technically we only need
518 * to flush and invalidate the range of offsets [off, off + len), but
519 * for simplicity's sake we do everything.
520 */
521 if (fniio.off >= 0)
522 fuse_io_invalbuf(vp, curthread);
523 fuse_vnode_clear_attr_cache(vp);
524 vput(vp);
525 return (0);
526 }
527
528 /* mknod */
529 int
530 fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp,
531 struct componentname *cnp, struct vattr *vap)
532 {
533 struct fuse_data *data;
534 struct fuse_mknod_in fmni;
535 size_t insize;
536
537 data = fuse_get_mpdata(dvp->v_mount);
538
539 fmni.mode = MAKEIMODE(vap->va_type, vap->va_mode);
540 fmni.rdev = vap->va_rdev;
541 if (fuse_libabi_geq(data, 7, 12)) {
542 insize = sizeof(fmni);
543 fmni.umask = curthread->td_proc->p_pd->pd_cmask;
544 fmni.padding = 0;
545 } else {
546 insize = FUSE_COMPAT_MKNOD_IN_SIZE;
547 }
548 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKNOD, &fmni,
549 insize, vap->va_type));
550 }
551
552 /* readdir */
553
554 int
555 fuse_internal_readdir(struct vnode *vp,
556 struct uio *uio,
557 struct fuse_filehandle *fufh,
558 struct fuse_iov *cookediov,
559 int *ncookies,
560 uint64_t *cookies)
561 {
562 int err = 0;
563 struct fuse_dispatcher fdi;
564 struct fuse_read_in *fri = NULL;
565
566 if (uio_resid(uio) == 0)
567 return 0;
568 fdisp_init(&fdi, 0);
569
570 /*
571 * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p
572 * I/O).
573 */
574 while (uio_resid(uio) > 0) {
575 fdi.iosize = sizeof(*fri);
576 fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL);
577 fri = fdi.indata;
578 fri->fh = fufh->fh_id;
579 fri->offset = uio_offset(uio);
580 fri->size = MIN(uio->uio_resid,
581 fuse_get_mpdata(vp->v_mount)->max_read);
582
583 if ((err = fdisp_wait_answ(&fdi)))
584 break;
585 if ((err = fuse_internal_readdir_processdata(uio, fri->size,
586 fdi.answ, fdi.iosize, cookediov, ncookies, &cookies)))
587 break;
588 }
589
590 fdisp_destroy(&fdi);
591 return ((err == -1) ? 0 : err);
592 }
593
594 /*
595 * Return -1 to indicate that this readdir is finished, 0 if it copied
596 * all the directory data read in and it may be possible to read more
597 * and greater than 0 for a failure.
598 */
599 int
600 fuse_internal_readdir_processdata(struct uio *uio,
601 size_t reqsize,
602 void *buf,
603 size_t bufsize,
604 struct fuse_iov *cookediov,
605 int *ncookies,
606 uint64_t **cookiesp)
607 {
608 int err = 0;
609 int oreclen;
610 size_t freclen;
611
612 struct dirent *de;
613 struct fuse_dirent *fudge;
614 uint64_t *cookies;
615
616 cookies = *cookiesp;
617 if (bufsize < FUSE_NAME_OFFSET)
618 return -1;
619 for (;;) {
620 if (bufsize < FUSE_NAME_OFFSET) {
621 err = -1;
622 break;
623 }
624 fudge = (struct fuse_dirent *)buf;
625 freclen = FUSE_DIRENT_SIZE(fudge);
626
627 if (bufsize < freclen) {
628 /*
629 * This indicates a partial directory entry at the
630 * end of the directory data.
631 */
632 err = -1;
633 break;
634 }
635 #ifdef ZERO_PAD_INCOMPLETE_BUFS
636 if (isbzero(buf, FUSE_NAME_OFFSET)) {
637 err = -1;
638 break;
639 }
640 #endif
641
642 if (!fudge->namelen || fudge->namelen > MAXNAMLEN) {
643 err = EINVAL;
644 break;
645 }
646 oreclen = GENERIC_DIRSIZ((struct pseudo_dirent *)
647 &fudge->namelen);
648
649 if (oreclen > uio_resid(uio)) {
650 /* Out of space for the dir so we are done. */
651 err = -1;
652 break;
653 }
654 fiov_adjust(cookediov, oreclen);
655 bzero(cookediov->base, oreclen);
656
657 de = (struct dirent *)cookediov->base;
658 de->d_fileno = fudge->ino;
659 de->d_off = fudge->off;
660 de->d_reclen = oreclen;
661 de->d_type = fudge->type;
662 de->d_namlen = fudge->namelen;
663 memcpy((char *)cookediov->base + sizeof(struct dirent) -
664 MAXNAMLEN - 1,
665 (char *)buf + FUSE_NAME_OFFSET, fudge->namelen);
666 dirent_terminate(de);
667
668 err = uiomove(cookediov->base, cookediov->len, uio);
669 if (err)
670 break;
671 if (cookies != NULL) {
672 if (*ncookies == 0) {
673 err = -1;
674 break;
675 }
676 *cookies = fudge->off;
677 cookies++;
678 (*ncookies)--;
679 }
680 buf = (char *)buf + freclen;
681 bufsize -= freclen;
682 uio_setoffset(uio, fudge->off);
683 }
684 *cookiesp = cookies;
685
686 return err;
687 }
688
689 /* remove */
690
691 int
692 fuse_internal_remove(struct vnode *dvp,
693 struct vnode *vp,
694 struct componentname *cnp,
695 enum fuse_opcode op)
696 {
697 struct fuse_dispatcher fdi;
698 nlink_t nlink;
699 int err = 0;
700
701 fdisp_init(&fdi, cnp->cn_namelen + 1);
702 fdisp_make_vp(&fdi, op, dvp, curthread, cnp->cn_cred);
703
704 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
705 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
706
707 err = fdisp_wait_answ(&fdi);
708 fdisp_destroy(&fdi);
709
710 if (err)
711 return (err);
712
713 /*
714 * Access the cached nlink even if the attr cached has expired. If
715 * it's inaccurate, the worst that will happen is:
716 * 1) We'll recycle the vnode even though the file has another link we
717 * don't know about, costing a bit of cpu time, or
718 * 2) We won't recycle the vnode even though all of its links are gone.
719 * It will linger around until vnlru reclaims it, costing a bit of
720 * temporary memory.
721 */
722 nlink = VTOFUD(vp)->cached_attrs.va_nlink--;
723
724 /*
725 * Purge the parent's attribute cache because the daemon
726 * should've updated its mtime and ctime.
727 */
728 fuse_vnode_clear_attr_cache(dvp);
729
730 /* NB: nlink could be zero if it was never cached */
731 if (nlink <= 1 || vnode_vtype(vp) == VDIR) {
732 fuse_internal_vnode_disappear(vp);
733 } else {
734 cache_purge(vp);
735 fuse_vnode_update(vp, FN_CTIMECHANGE);
736 }
737
738 return err;
739 }
740
741 /* rename */
742
743 int
744 fuse_internal_rename(struct vnode *fdvp,
745 struct componentname *fcnp,
746 struct vnode *tdvp,
747 struct componentname *tcnp)
748 {
749 struct fuse_dispatcher fdi;
750 struct fuse_rename_in *fri;
751 int err = 0;
752
753 fdisp_init(&fdi, sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 2);
754 fdisp_make_vp(&fdi, FUSE_RENAME, fdvp, curthread, tcnp->cn_cred);
755
756 fri = fdi.indata;
757 fri->newdir = VTOI(tdvp);
758 memcpy((char *)fdi.indata + sizeof(*fri), fcnp->cn_nameptr,
759 fcnp->cn_namelen);
760 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen] = '\0';
761 memcpy((char *)fdi.indata + sizeof(*fri) + fcnp->cn_namelen + 1,
762 tcnp->cn_nameptr, tcnp->cn_namelen);
763 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen +
764 tcnp->cn_namelen + 1] = '\0';
765
766 err = fdisp_wait_answ(&fdi);
767 fdisp_destroy(&fdi);
768 return err;
769 }
770
771 /* strategy */
772
773 /* entity creation */
774
775 void
776 fuse_internal_newentry_makerequest(struct mount *mp,
777 uint64_t dnid,
778 struct componentname *cnp,
779 enum fuse_opcode op,
780 void *buf,
781 size_t bufsize,
782 struct fuse_dispatcher *fdip)
783 {
784 fdip->iosize = bufsize + cnp->cn_namelen + 1;
785
786 fdisp_make(fdip, op, mp, dnid, curthread, cnp->cn_cred);
787 memcpy(fdip->indata, buf, bufsize);
788 memcpy((char *)fdip->indata + bufsize, cnp->cn_nameptr, cnp->cn_namelen);
789 ((char *)fdip->indata)[bufsize + cnp->cn_namelen] = '\0';
790 }
791
792 int
793 fuse_internal_newentry_core(struct vnode *dvp,
794 struct vnode **vpp,
795 struct componentname *cnp,
796 enum vtype vtyp,
797 struct fuse_dispatcher *fdip)
798 {
799 int err = 0;
800 struct fuse_entry_out *feo;
801 struct mount *mp = vnode_mount(dvp);
802
803 if ((err = fdisp_wait_answ(fdip))) {
804 return err;
805 }
806 feo = fdip->answ;
807
808 if ((err = fuse_internal_checkentry(feo, vtyp))) {
809 return err;
810 }
811 err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp);
812 if (err) {
813 fuse_internal_forget_send(mp, curthread, cnp->cn_cred,
814 feo->nodeid, 1);
815 return err;
816 }
817
818 /*
819 * Purge the parent's attribute cache because the daemon should've
820 * updated its mtime and ctime
821 */
822 fuse_vnode_clear_attr_cache(dvp);
823
824 fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid,
825 feo->attr_valid_nsec, NULL, true);
826
827 return err;
828 }
829
830 int
831 fuse_internal_newentry(struct vnode *dvp,
832 struct vnode **vpp,
833 struct componentname *cnp,
834 enum fuse_opcode op,
835 void *buf,
836 size_t bufsize,
837 enum vtype vtype)
838 {
839 int err;
840 struct fuse_dispatcher fdi;
841 struct mount *mp = vnode_mount(dvp);
842
843 fdisp_init(&fdi, 0);
844 fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf,
845 bufsize, &fdi);
846 err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi);
847 fdisp_destroy(&fdi);
848
849 return err;
850 }
851
852 /* entity destruction */
853
854 int
855 fuse_internal_forget_callback(struct fuse_ticket *ftick, struct uio *uio)
856 {
857 fuse_internal_forget_send(ftick->tk_data->mp, curthread, NULL,
858 ((struct fuse_in_header *)ftick->tk_ms_fiov.base)->nodeid, 1);
859
860 return 0;
861 }
862
863 void
864 fuse_internal_forget_send(struct mount *mp,
865 struct thread *td,
866 struct ucred *cred,
867 uint64_t nodeid,
868 uint64_t nlookup)
869 {
870
871 struct fuse_dispatcher fdi;
872 struct fuse_forget_in *ffi;
873
874 /*
875 * KASSERT(nlookup > 0, ("zero-times forget for vp #%llu",
876 * (long long unsigned) nodeid));
877 */
878
879 fdisp_init(&fdi, sizeof(*ffi));
880 fdisp_make(&fdi, FUSE_FORGET, mp, nodeid, td, cred);
881
882 ffi = fdi.indata;
883 ffi->nlookup = nlookup;
884
885 fuse_insert_message(fdi.tick, false);
886 fdisp_destroy(&fdi);
887 }
888
889 /* Fetch the vnode's attributes from the daemon*/
890 int
891 fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap,
892 struct ucred *cred, struct thread *td)
893 {
894 struct fuse_dispatcher fdi;
895 struct fuse_vnode_data *fvdat = VTOFUD(vp);
896 struct fuse_getattr_in *fgai;
897 struct fuse_attr_out *fao;
898 off_t old_filesize = fvdat->cached_attrs.va_size;
899 struct timespec old_atime = fvdat->cached_attrs.va_atime;
900 struct timespec old_ctime = fvdat->cached_attrs.va_ctime;
901 struct timespec old_mtime = fvdat->cached_attrs.va_mtime;
902 enum vtype vtyp;
903 int err;
904
905 ASSERT_VOP_LOCKED(vp, __func__);
906
907 fdisp_init(&fdi, sizeof(*fgai));
908 fdisp_make_vp(&fdi, FUSE_GETATTR, vp, td, cred);
909 fgai = fdi.indata;
910 /*
911 * We could look up a file handle and set it in fgai->fh, but that
912 * involves extra runtime work and I'm unaware of any file systems that
913 * care.
914 */
915 fgai->getattr_flags = 0;
916 if ((err = fdisp_wait_answ(&fdi))) {
917 if (err == ENOENT)
918 fuse_internal_vnode_disappear(vp);
919 goto out;
920 }
921
922 fao = (struct fuse_attr_out *)fdi.answ;
923 vtyp = IFTOVT(fao->attr.mode);
924 if (fvdat->flag & FN_SIZECHANGE)
925 fao->attr.size = old_filesize;
926 if (fvdat->flag & FN_ATIMECHANGE) {
927 fao->attr.atime = old_atime.tv_sec;
928 fao->attr.atimensec = old_atime.tv_nsec;
929 }
930 if (fvdat->flag & FN_CTIMECHANGE) {
931 fao->attr.ctime = old_ctime.tv_sec;
932 fao->attr.ctimensec = old_ctime.tv_nsec;
933 }
934 if (fvdat->flag & FN_MTIMECHANGE) {
935 fao->attr.mtime = old_mtime.tv_sec;
936 fao->attr.mtimensec = old_mtime.tv_nsec;
937 }
938 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid,
939 fao->attr_valid_nsec, vap, true);
940 if (vtyp != vnode_vtype(vp)) {
941 fuse_internal_vnode_disappear(vp);
942 err = ENOENT;
943 }
944
945 out:
946 fdisp_destroy(&fdi);
947 return err;
948 }
949
950 /* Read a vnode's attributes from cache or fetch them from the fuse daemon */
951 int
952 fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred,
953 struct thread *td)
954 {
955 struct vattr *attrs;
956
957 if ((attrs = VTOVA(vp)) != NULL) {
958 *vap = *attrs; /* struct copy */
959 return 0;
960 }
961
962 return fuse_internal_do_getattr(vp, vap, cred, td);
963 }
964
965 void
966 fuse_internal_vnode_disappear(struct vnode *vp)
967 {
968 struct fuse_vnode_data *fvdat = VTOFUD(vp);
969
970 ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear");
971 fvdat->flag |= FN_REVOKED;
972 cache_purge(vp);
973 }
974
975 /* fuse start/stop */
976
977 SDT_PROBE_DEFINE2(fusefs, , internal, init_done,
978 "struct fuse_data*", "struct fuse_init_out*");
979 int
980 fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio)
981 {
982 int err = 0;
983 struct fuse_data *data = tick->tk_data;
984 struct fuse_init_out *fiio = NULL;
985
986 if ((err = tick->tk_aw_ohead.error)) {
987 goto out;
988 }
989 if ((err = fticket_pull(tick, uio))) {
990 goto out;
991 }
992 fiio = fticket_resp(tick)->base;
993
994 data->fuse_libabi_major = fiio->major;
995 data->fuse_libabi_minor = fiio->minor;
996 if (!fuse_libabi_geq(data, 7, 4)) {
997 /*
998 * With a little work we could support servers as old as 7.1.
999 * But there would be little payoff.
1000 */
1001 SDT_PROBE2(fusefs, , internal, trace, 1,
1002 "userpace version too low");
1003 err = EPROTONOSUPPORT;
1004 goto out;
1005 }
1006
1007 if (fuse_libabi_geq(data, 7, 5)) {
1008 if (fticket_resp(tick)->len == sizeof(struct fuse_init_out) ||
1009 fticket_resp(tick)->len == FUSE_COMPAT_22_INIT_OUT_SIZE) {
1010 data->max_write = fiio->max_write;
1011 if (fiio->flags & FUSE_ASYNC_READ)
1012 data->dataflags |= FSESS_ASYNC_READ;
1013 if (fiio->flags & FUSE_POSIX_LOCKS)
1014 data->dataflags |= FSESS_POSIX_LOCKS;
1015 if (fiio->flags & FUSE_EXPORT_SUPPORT)
1016 data->dataflags |= FSESS_EXPORT_SUPPORT;
1017 if (fiio->flags & FUSE_NO_OPEN_SUPPORT)
1018 data->dataflags |= FSESS_NO_OPEN_SUPPORT;
1019 if (fiio->flags & FUSE_NO_OPENDIR_SUPPORT)
1020 data->dataflags |= FSESS_NO_OPENDIR_SUPPORT;
1021 /*
1022 * Don't bother to check FUSE_BIG_WRITES, because it's
1023 * redundant with max_write
1024 */
1025 /*
1026 * max_background and congestion_threshold are not
1027 * implemented
1028 */
1029 } else {
1030 err = EINVAL;
1031 }
1032 } else {
1033 /* Old fixed values */
1034 data->max_write = 4096;
1035 }
1036
1037 if (fuse_libabi_geq(data, 7, 6))
1038 data->max_readahead_blocks = fiio->max_readahead / maxbcachebuf;
1039
1040 if (!fuse_libabi_geq(data, 7, 7))
1041 fsess_set_notimpl(data->mp, FUSE_INTERRUPT);
1042
1043 if (!fuse_libabi_geq(data, 7, 8)) {
1044 fsess_set_notimpl(data->mp, FUSE_BMAP);
1045 fsess_set_notimpl(data->mp, FUSE_DESTROY);
1046 }
1047
1048 if (!fuse_libabi_geq(data, 7, 19)) {
1049 fsess_set_notimpl(data->mp, FUSE_FALLOCATE);
1050 }
1051
1052 if (fuse_libabi_geq(data, 7, 23) && fiio->time_gran >= 1 &&
1053 fiio->time_gran <= 1000000000)
1054 data->time_gran = fiio->time_gran;
1055 else
1056 data->time_gran = 1;
1057
1058 if (!fuse_libabi_geq(data, 7, 23))
1059 data->cache_mode = fuse_data_cache_mode;
1060 else if (fiio->flags & FUSE_WRITEBACK_CACHE)
1061 data->cache_mode = FUSE_CACHE_WB;
1062 else
1063 data->cache_mode = FUSE_CACHE_WT;
1064
1065 if (!fuse_libabi_geq(data, 7, 24))
1066 fsess_set_notimpl(data->mp, FUSE_LSEEK);
1067
1068 if (!fuse_libabi_geq(data, 7, 28))
1069 fsess_set_notimpl(data->mp, FUSE_COPY_FILE_RANGE);
1070
1071 out:
1072 if (err) {
1073 fdata_set_dead(data);
1074 }
1075 FUSE_LOCK();
1076 data->dataflags |= FSESS_INITED;
1077 SDT_PROBE2(fusefs, , internal, init_done, data, fiio);
1078 wakeup(&data->ticketer);
1079 FUSE_UNLOCK();
1080
1081 return 0;
1082 }
1083
1084 void
1085 fuse_internal_send_init(struct fuse_data *data, struct thread *td)
1086 {
1087 struct fuse_init_in *fiii;
1088 struct fuse_dispatcher fdi;
1089
1090 fdisp_init(&fdi, sizeof(*fiii));
1091 fdisp_make(&fdi, FUSE_INIT, data->mp, 0, td, NULL);
1092 fiii = fdi.indata;
1093 fiii->major = FUSE_KERNEL_VERSION;
1094 fiii->minor = FUSE_KERNEL_MINOR_VERSION;
1095 /*
1096 * fusefs currently reads ahead no more than one cache block at a time.
1097 * See fuse_read_biobackend
1098 */
1099 fiii->max_readahead = maxbcachebuf;
1100 /*
1101 * Unsupported features:
1102 * FUSE_FILE_OPS: No known FUSE server or client supports it
1103 * FUSE_ATOMIC_O_TRUNC: our VFS cannot support it
1104 * FUSE_DONT_MASK: unlike Linux, FreeBSD always applies the umask, even
1105 * when default ACLs are in use.
1106 * FUSE_SPLICE_WRITE, FUSE_SPLICE_MOVE, FUSE_SPLICE_READ: FreeBSD
1107 * doesn't have splice(2).
1108 * FUSE_FLOCK_LOCKS: not yet implemented
1109 * FUSE_HAS_IOCTL_DIR: not yet implemented
1110 * FUSE_AUTO_INVAL_DATA: not yet implemented
1111 * FUSE_DO_READDIRPLUS: not yet implemented
1112 * FUSE_READDIRPLUS_AUTO: not yet implemented
1113 * FUSE_ASYNC_DIO: not yet implemented
1114 * FUSE_PARALLEL_DIROPS: not yet implemented
1115 * FUSE_HANDLE_KILLPRIV: not yet implemented
1116 * FUSE_POSIX_ACL: not yet implemented
1117 * FUSE_ABORT_ERROR: not yet implemented
1118 * FUSE_CACHE_SYMLINKS: not yet implemented
1119 * FUSE_MAX_PAGES: not yet implemented
1120 */
1121 fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT
1122 | FUSE_BIG_WRITES | FUSE_WRITEBACK_CACHE
1123 | FUSE_NO_OPEN_SUPPORT | FUSE_NO_OPENDIR_SUPPORT;
1124
1125 fuse_insert_callback(fdi.tick, fuse_internal_init_callback);
1126 fuse_insert_message(fdi.tick, false);
1127 fdisp_destroy(&fdi);
1128 }
1129
1130 /*
1131 * Send a FUSE_SETATTR operation with no permissions checks. If cred is NULL,
1132 * send the request with root credentials
1133 */
1134 int fuse_internal_setattr(struct vnode *vp, struct vattr *vap,
1135 struct thread *td, struct ucred *cred)
1136 {
1137 struct fuse_vnode_data *fvdat;
1138 struct fuse_dispatcher fdi;
1139 struct fuse_setattr_in *fsai;
1140 struct mount *mp;
1141 pid_t pid = td->td_proc->p_pid;
1142 struct fuse_data *data;
1143 int err = 0;
1144 enum vtype vtyp;
1145
1146 ASSERT_VOP_ELOCKED(vp, __func__);
1147
1148 mp = vnode_mount(vp);
1149 fvdat = VTOFUD(vp);
1150 data = fuse_get_mpdata(mp);
1151
1152 fdisp_init(&fdi, sizeof(*fsai));
1153 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
1154 if (!cred) {
1155 fdi.finh->uid = 0;
1156 fdi.finh->gid = 0;
1157 }
1158 fsai = fdi.indata;
1159 fsai->valid = 0;
1160
1161 if (vap->va_uid != (uid_t)VNOVAL) {
1162 fsai->uid = vap->va_uid;
1163 fsai->valid |= FATTR_UID;
1164 }
1165 if (vap->va_gid != (gid_t)VNOVAL) {
1166 fsai->gid = vap->va_gid;
1167 fsai->valid |= FATTR_GID;
1168 }
1169 if (vap->va_size != VNOVAL) {
1170 struct fuse_filehandle *fufh = NULL;
1171
1172 /*Truncate to a new value. */
1173 fsai->size = vap->va_size;
1174 fsai->valid |= FATTR_SIZE;
1175
1176 fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid);
1177 if (fufh) {
1178 fsai->fh = fufh->fh_id;
1179 fsai->valid |= FATTR_FH;
1180 }
1181 VTOFUD(vp)->flag &= ~FN_SIZECHANGE;
1182 }
1183 if (vap->va_atime.tv_sec != VNOVAL) {
1184 fsai->atime = vap->va_atime.tv_sec;
1185 fsai->atimensec = vap->va_atime.tv_nsec;
1186 fsai->valid |= FATTR_ATIME;
1187 if (vap->va_vaflags & VA_UTIMES_NULL)
1188 fsai->valid |= FATTR_ATIME_NOW;
1189 } else if (fvdat->flag & FN_ATIMECHANGE) {
1190 fsai->atime = fvdat->cached_attrs.va_atime.tv_sec;
1191 fsai->atimensec = fvdat->cached_attrs.va_atime.tv_nsec;
1192 fsai->valid |= FATTR_ATIME;
1193 }
1194 if (vap->va_mtime.tv_sec != VNOVAL) {
1195 fsai->mtime = vap->va_mtime.tv_sec;
1196 fsai->mtimensec = vap->va_mtime.tv_nsec;
1197 fsai->valid |= FATTR_MTIME;
1198 if (vap->va_vaflags & VA_UTIMES_NULL)
1199 fsai->valid |= FATTR_MTIME_NOW;
1200 } else if (fvdat->flag & FN_MTIMECHANGE) {
1201 fsai->mtime = fvdat->cached_attrs.va_mtime.tv_sec;
1202 fsai->mtimensec = fvdat->cached_attrs.va_mtime.tv_nsec;
1203 fsai->valid |= FATTR_MTIME;
1204 }
1205 if (fuse_libabi_geq(data, 7, 23) && fvdat->flag & FN_CTIMECHANGE) {
1206 fsai->ctime = fvdat->cached_attrs.va_ctime.tv_sec;
1207 fsai->ctimensec = fvdat->cached_attrs.va_ctime.tv_nsec;
1208 fsai->valid |= FATTR_CTIME;
1209 }
1210 if (vap->va_mode != (mode_t)VNOVAL) {
1211 fsai->mode = vap->va_mode & ALLPERMS;
1212 fsai->valid |= FATTR_MODE;
1213 }
1214 if (!fsai->valid) {
1215 goto out;
1216 }
1217
1218 if ((err = fdisp_wait_answ(&fdi)))
1219 goto out;
1220 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode);
1221
1222 if (vnode_vtype(vp) != vtyp) {
1223 if (vnode_vtype(vp) == VNON && vtyp != VNON) {
1224 SDT_PROBE2(fusefs, , internal, trace, 1, "FUSE: Dang! "
1225 "vnode_vtype is VNON and vtype isn't.");
1226 } else {
1227 /*
1228 * STALE vnode, ditch
1229 *
1230 * The vnode has changed its type "behind our back".
1231 * This probably means that the file got deleted and
1232 * recreated on the server, with the same inode.
1233 * There's nothing really we can do, so let us just
1234 * return ENOENT. After all, the entry must not have
1235 * existed in the recent past. If the user tries
1236 * again, it will work.
1237 */
1238 fuse_internal_vnode_disappear(vp);
1239 err = ENOENT;
1240 }
1241 }
1242 if (err == 0) {
1243 struct fuse_attr_out *fao = (struct fuse_attr_out*)fdi.answ;
1244 fuse_vnode_undirty_cached_timestamps(vp, true);
1245 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid,
1246 fao->attr_valid_nsec, NULL, false);
1247 getnanouptime(&fvdat->last_local_modify);
1248 }
1249
1250 out:
1251 fdisp_destroy(&fdi);
1252 return err;
1253 }
1254
1255 /*
1256 * FreeBSD clears the SUID and SGID bits on any write by a non-root user.
1257 */
1258 void
1259 fuse_internal_clear_suid_on_write(struct vnode *vp, struct ucred *cred,
1260 struct thread *td)
1261 {
1262 struct fuse_data *data;
1263 struct mount *mp;
1264 struct vattr va;
1265 int dataflags;
1266
1267 mp = vnode_mount(vp);
1268 data = fuse_get_mpdata(mp);
1269 dataflags = data->dataflags;
1270
1271 ASSERT_VOP_LOCKED(vp, __func__);
1272
1273 if (dataflags & FSESS_DEFAULT_PERMISSIONS) {
1274 if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) {
1275 fuse_internal_getattr(vp, &va, cred, td);
1276 if (va.va_mode & (S_ISUID | S_ISGID)) {
1277 mode_t mode = va.va_mode & ~(S_ISUID | S_ISGID);
1278 /* Clear all vattr fields except mode */
1279 vattr_null(&va);
1280 va.va_mode = mode;
1281
1282 /*
1283 * Ignore fuse_internal_setattr's return value,
1284 * because at this point the write operation has
1285 * already succeeded and we don't want to return
1286 * failing status for that.
1287 */
1288 (void)fuse_internal_setattr(vp, &va, td, NULL);
1289 }
1290 }
1291 }
1292 }
1293
1294 #ifdef ZERO_PAD_INCOMPLETE_BUFS
1295 static int
1296 isbzero(void *buf, size_t len)
1297 {
1298 int i;
1299
1300 for (i = 0; i < len; i++) {
1301 if (((char *)buf)[i])
1302 return (0);
1303 }
1304
1305 return (1);
1306 }
1307
1308 #endif
1309
1310 void
1311 fuse_internal_init(void)
1312 {
1313 fuse_lookup_cache_misses = counter_u64_alloc(M_WAITOK);
1314 fuse_lookup_cache_hits = counter_u64_alloc(M_WAITOK);
1315 }
1316
1317 void
1318 fuse_internal_destroy(void)
1319 {
1320 counter_u64_free(fuse_lookup_cache_hits);
1321 counter_u64_free(fuse_lookup_cache_misses);
1322 }
Cache object: 9c4090e35cee7f3312b49609a0ca493d
|