1 /*-
2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9 * research program
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
61 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
62 */
63
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
66
67 #include <sys/param.h>
68 #include <sys/bio.h>
69 #include <sys/systm.h>
70 #include <sys/buf.h>
71 #include <sys/conf.h>
72 #include <sys/extattr.h>
73 #include <sys/kernel.h>
74 #include <sys/limits.h>
75 #include <sys/malloc.h>
76 #include <sys/mount.h>
77 #include <sys/priv.h>
78 #include <sys/rwlock.h>
79 #include <sys/stat.h>
80 #include <sys/sysctl.h>
81 #include <sys/vmmeter.h>
82 #include <sys/vnode.h>
83
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vnode_pager.h>
91
92 #include <ufs/ufs/extattr.h>
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/inode.h>
95 #include <ufs/ufs/ufs_extern.h>
96 #include <ufs/ufs/ufsmount.h>
97
98 #include <ufs/ffs/fs.h>
99 #include <ufs/ffs/ffs_extern.h>
100 #include "opt_directio.h"
101 #include "opt_ffs.h"
102
103 #ifdef DIRECTIO
104 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
105 #endif
106 static vop_fdatasync_t ffs_fdatasync;
107 static vop_fsync_t ffs_fsync;
108 static vop_getpages_t ffs_getpages;
109 static vop_lock1_t ffs_lock;
110 static vop_read_t ffs_read;
111 static vop_write_t ffs_write;
112 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
113 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
114 struct ucred *cred);
115 static vop_strategy_t ffsext_strategy;
116 static vop_closeextattr_t ffs_closeextattr;
117 static vop_deleteextattr_t ffs_deleteextattr;
118 static vop_getextattr_t ffs_getextattr;
119 static vop_listextattr_t ffs_listextattr;
120 static vop_openextattr_t ffs_openextattr;
121 static vop_setextattr_t ffs_setextattr;
122 static vop_vptofh_t ffs_vptofh;
123
124 /* Global vfs data structures for ufs. */
125 struct vop_vector ffs_vnodeops1 = {
126 .vop_default = &ufs_vnodeops,
127 .vop_fsync = ffs_fsync,
128 .vop_fdatasync = ffs_fdatasync,
129 .vop_getpages = ffs_getpages,
130 .vop_getpages_async = vnode_pager_local_getpages_async,
131 .vop_lock1 = ffs_lock,
132 .vop_read = ffs_read,
133 .vop_reallocblks = ffs_reallocblks,
134 .vop_write = ffs_write,
135 .vop_vptofh = ffs_vptofh,
136 };
137
138 struct vop_vector ffs_fifoops1 = {
139 .vop_default = &ufs_fifoops,
140 .vop_fsync = ffs_fsync,
141 .vop_fdatasync = ffs_fdatasync,
142 .vop_reallocblks = ffs_reallocblks, /* XXX: really ??? */
143 .vop_vptofh = ffs_vptofh,
144 };
145
146 /* Global vfs data structures for ufs. */
147 struct vop_vector ffs_vnodeops2 = {
148 .vop_default = &ufs_vnodeops,
149 .vop_fsync = ffs_fsync,
150 .vop_fdatasync = ffs_fdatasync,
151 .vop_getpages = ffs_getpages,
152 .vop_getpages_async = vnode_pager_local_getpages_async,
153 .vop_lock1 = ffs_lock,
154 .vop_read = ffs_read,
155 .vop_reallocblks = ffs_reallocblks,
156 .vop_write = ffs_write,
157 .vop_closeextattr = ffs_closeextattr,
158 .vop_deleteextattr = ffs_deleteextattr,
159 .vop_getextattr = ffs_getextattr,
160 .vop_listextattr = ffs_listextattr,
161 .vop_openextattr = ffs_openextattr,
162 .vop_setextattr = ffs_setextattr,
163 .vop_vptofh = ffs_vptofh,
164 };
165
166 struct vop_vector ffs_fifoops2 = {
167 .vop_default = &ufs_fifoops,
168 .vop_fsync = ffs_fsync,
169 .vop_fdatasync = ffs_fdatasync,
170 .vop_lock1 = ffs_lock,
171 .vop_reallocblks = ffs_reallocblks,
172 .vop_strategy = ffsext_strategy,
173 .vop_closeextattr = ffs_closeextattr,
174 .vop_deleteextattr = ffs_deleteextattr,
175 .vop_getextattr = ffs_getextattr,
176 .vop_listextattr = ffs_listextattr,
177 .vop_openextattr = ffs_openextattr,
178 .vop_setextattr = ffs_setextattr,
179 .vop_vptofh = ffs_vptofh,
180 };
181
182 /*
183 * Synch an open file.
184 */
185 /* ARGSUSED */
186 static int
187 ffs_fsync(struct vop_fsync_args *ap)
188 {
189 struct vnode *vp;
190 struct bufobj *bo;
191 int error;
192
193 vp = ap->a_vp;
194 bo = &vp->v_bufobj;
195 retry:
196 error = ffs_syncvnode(vp, ap->a_waitfor, 0);
197 if (error)
198 return (error);
199 if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
200 error = softdep_fsync(vp);
201 if (error)
202 return (error);
203
204 /*
205 * The softdep_fsync() function may drop vp lock,
206 * allowing for dirty buffers to reappear on the
207 * bo_dirty list. Recheck and resync as needed.
208 */
209 BO_LOCK(bo);
210 if ((vp->v_type == VREG || vp->v_type == VDIR) &&
211 (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)) {
212 BO_UNLOCK(bo);
213 goto retry;
214 }
215 BO_UNLOCK(bo);
216 }
217 return (0);
218 }
219
220 int
221 ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
222 {
223 struct inode *ip;
224 struct bufobj *bo;
225 struct buf *bp, *nbp;
226 ufs_lbn_t lbn;
227 int error, passes;
228 bool still_dirty, wait;
229
230 ip = VTOI(vp);
231 ip->i_flag &= ~IN_NEEDSYNC;
232 bo = &vp->v_bufobj;
233
234 /*
235 * When doing MNT_WAIT we must first flush all dependencies
236 * on the inode.
237 */
238 if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
239 (error = softdep_sync_metadata(vp)) != 0)
240 return (error);
241
242 /*
243 * Flush all dirty buffers associated with a vnode.
244 */
245 error = 0;
246 passes = 0;
247 wait = false; /* Always do an async pass first. */
248 lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1));
249 BO_LOCK(bo);
250 loop:
251 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
252 bp->b_vflags &= ~BV_SCANNED;
253 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
254 /*
255 * Reasons to skip this buffer: it has already been considered
256 * on this pass, the buffer has dependencies that will cause
257 * it to be redirtied and it has not already been deferred,
258 * or it is already being written.
259 */
260 if ((bp->b_vflags & BV_SCANNED) != 0)
261 continue;
262 bp->b_vflags |= BV_SCANNED;
263 /*
264 * Flush indirects in order, if requested.
265 *
266 * Note that if only datasync is requested, we can
267 * skip indirect blocks when softupdates are not
268 * active. Otherwise we must flush them with data,
269 * since dependencies prevent data block writes.
270 */
271 if (waitfor == MNT_WAIT && bp->b_lblkno <= -NDADDR &&
272 (lbn_level(bp->b_lblkno) >= passes ||
273 ((flags & DATA_ONLY) != 0 && !DOINGSOFTDEP(vp))))
274 continue;
275 if (bp->b_lblkno > lbn)
276 panic("ffs_syncvnode: syncing truncated data.");
277 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
278 BO_UNLOCK(bo);
279 } else if (wait) {
280 if (BUF_LOCK(bp,
281 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
282 BO_LOCKPTR(bo)) != 0) {
283 bp->b_vflags &= ~BV_SCANNED;
284 goto next;
285 }
286 } else
287 continue;
288 if ((bp->b_flags & B_DELWRI) == 0)
289 panic("ffs_fsync: not dirty");
290 /*
291 * Check for dependencies and potentially complete them.
292 */
293 if (!LIST_EMPTY(&bp->b_dep) &&
294 (error = softdep_sync_buf(vp, bp,
295 wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
296 /* I/O error. */
297 if (error != EBUSY) {
298 BUF_UNLOCK(bp);
299 return (error);
300 }
301 /* If we deferred once, don't defer again. */
302 if ((bp->b_flags & B_DEFERRED) == 0) {
303 bp->b_flags |= B_DEFERRED;
304 BUF_UNLOCK(bp);
305 goto next;
306 }
307 }
308 if (wait) {
309 bremfree(bp);
310 if ((error = bwrite(bp)) != 0)
311 return (error);
312 } else if ((bp->b_flags & B_CLUSTEROK)) {
313 (void) vfs_bio_awrite(bp);
314 } else {
315 bremfree(bp);
316 (void) bawrite(bp);
317 }
318 next:
319 /*
320 * Since we may have slept during the I/O, we need
321 * to start from a known point.
322 */
323 BO_LOCK(bo);
324 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
325 }
326 if (waitfor != MNT_WAIT) {
327 BO_UNLOCK(bo);
328 if ((flags & NO_INO_UPDT) != 0)
329 return (0);
330 else
331 return (ffs_update(vp, 0));
332 }
333 /* Drain IO to see if we're done. */
334 bufobj_wwait(bo, 0, 0);
335 /*
336 * Block devices associated with filesystems may have new I/O
337 * requests posted for them even if the vnode is locked, so no
338 * amount of trying will get them clean. We make several passes
339 * as a best effort.
340 *
341 * Regular files may need multiple passes to flush all dependency
342 * work as it is possible that we must write once per indirect
343 * level, once for the leaf, and once for the inode and each of
344 * these will be done with one sync and one async pass.
345 */
346 if (bo->bo_dirty.bv_cnt > 0) {
347 if ((flags & DATA_ONLY) == 0) {
348 still_dirty = true;
349 } else {
350 /*
351 * For data-only sync, dirty indirect buffers
352 * are ignored.
353 */
354 still_dirty = false;
355 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
356 if (bp->b_lblkno > -NDADDR) {
357 still_dirty = true;
358 break;
359 }
360 }
361 }
362
363 if (still_dirty) {
364 /* Write the inode after sync passes to flush deps. */
365 if (wait && DOINGSOFTDEP(vp) &&
366 (flags & NO_INO_UPDT) == 0) {
367 BO_UNLOCK(bo);
368 ffs_update(vp, 1);
369 BO_LOCK(bo);
370 }
371 /* switch between sync/async. */
372 wait = !wait;
373 if (wait || ++passes < NIADDR + 2)
374 goto loop;
375 #ifdef INVARIANTS
376 if (!vn_isdisk(vp, NULL))
377 vn_printf(vp, "ffs_fsync: dirty ");
378 #endif
379 }
380 }
381 BO_UNLOCK(bo);
382 error = 0;
383 if ((flags & DATA_ONLY) == 0) {
384 if ((flags & NO_INO_UPDT) == 0)
385 error = ffs_update(vp, 1);
386 if (DOINGSUJ(vp))
387 softdep_journal_fsync(VTOI(vp));
388 } else if ((ip->i_flags & (IN_SIZEMOD | IN_IBLKDATA)) != 0) {
389 error = ffs_update(vp, 1);
390 }
391 return (error);
392 }
393
394 static int
395 ffs_fdatasync(struct vop_fdatasync_args *ap)
396 {
397
398 return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY));
399 }
400
401 static int
402 ffs_lock(ap)
403 struct vop_lock1_args /* {
404 struct vnode *a_vp;
405 int a_flags;
406 struct thread *a_td;
407 char *file;
408 int line;
409 } */ *ap;
410 {
411 #ifndef NO_FFS_SNAPSHOT
412 struct vnode *vp;
413 int flags;
414 struct lock *lkp;
415 int result;
416
417 switch (ap->a_flags & LK_TYPE_MASK) {
418 case LK_SHARED:
419 case LK_UPGRADE:
420 case LK_EXCLUSIVE:
421 vp = ap->a_vp;
422 flags = ap->a_flags;
423 for (;;) {
424 #ifdef DEBUG_VFS_LOCKS
425 KASSERT(vp->v_holdcnt != 0,
426 ("ffs_lock %p: zero hold count", vp));
427 #endif
428 lkp = vp->v_vnlock;
429 result = _lockmgr_args(lkp, flags, VI_MTX(vp),
430 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
431 ap->a_file, ap->a_line);
432 if (lkp == vp->v_vnlock || result != 0)
433 break;
434 /*
435 * Apparent success, except that the vnode
436 * mutated between snapshot file vnode and
437 * regular file vnode while this process
438 * slept. The lock currently held is not the
439 * right lock. Release it, and try to get the
440 * new lock.
441 */
442 (void) _lockmgr_args(lkp, LK_RELEASE, NULL,
443 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
444 ap->a_file, ap->a_line);
445 if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
446 (LK_INTERLOCK | LK_NOWAIT))
447 return (EBUSY);
448 if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
449 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
450 flags &= ~LK_INTERLOCK;
451 }
452 break;
453 default:
454 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
455 }
456 return (result);
457 #else
458 return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
459 #endif
460 }
461
462 /*
463 * Vnode op for reading.
464 */
465 static int
466 ffs_read(ap)
467 struct vop_read_args /* {
468 struct vnode *a_vp;
469 struct uio *a_uio;
470 int a_ioflag;
471 struct ucred *a_cred;
472 } */ *ap;
473 {
474 struct vnode *vp;
475 struct inode *ip;
476 struct uio *uio;
477 struct fs *fs;
478 struct buf *bp;
479 ufs_lbn_t lbn, nextlbn;
480 off_t bytesinfile;
481 long size, xfersize, blkoffset;
482 ssize_t orig_resid;
483 int error;
484 int seqcount;
485 int ioflag;
486
487 vp = ap->a_vp;
488 uio = ap->a_uio;
489 ioflag = ap->a_ioflag;
490 if (ap->a_ioflag & IO_EXT)
491 #ifdef notyet
492 return (ffs_extread(vp, uio, ioflag));
493 #else
494 panic("ffs_read+IO_EXT");
495 #endif
496 #ifdef DIRECTIO
497 if ((ioflag & IO_DIRECT) != 0) {
498 int workdone;
499
500 error = ffs_rawread(vp, uio, &workdone);
501 if (error != 0 || workdone != 0)
502 return error;
503 }
504 #endif
505
506 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
507 ip = VTOI(vp);
508
509 #ifdef INVARIANTS
510 if (uio->uio_rw != UIO_READ)
511 panic("ffs_read: mode");
512
513 if (vp->v_type == VLNK) {
514 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
515 panic("ffs_read: short symlink");
516 } else if (vp->v_type != VREG && vp->v_type != VDIR)
517 panic("ffs_read: type %d", vp->v_type);
518 #endif
519 orig_resid = uio->uio_resid;
520 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
521 if (orig_resid == 0)
522 return (0);
523 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
524 fs = ITOFS(ip);
525 if (uio->uio_offset < ip->i_size &&
526 uio->uio_offset >= fs->fs_maxfilesize)
527 return (EOVERFLOW);
528
529 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
530 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
531 break;
532 lbn = lblkno(fs, uio->uio_offset);
533 nextlbn = lbn + 1;
534
535 /*
536 * size of buffer. The buffer representing the
537 * end of the file is rounded up to the size of
538 * the block type ( fragment or full block,
539 * depending ).
540 */
541 size = blksize(fs, ip, lbn);
542 blkoffset = blkoff(fs, uio->uio_offset);
543
544 /*
545 * The amount we want to transfer in this iteration is
546 * one FS block less the amount of the data before
547 * our startpoint (duh!)
548 */
549 xfersize = fs->fs_bsize - blkoffset;
550
551 /*
552 * But if we actually want less than the block,
553 * or the file doesn't have a whole block more of data,
554 * then use the lesser number.
555 */
556 if (uio->uio_resid < xfersize)
557 xfersize = uio->uio_resid;
558 if (bytesinfile < xfersize)
559 xfersize = bytesinfile;
560
561 if (lblktosize(fs, nextlbn) >= ip->i_size) {
562 /*
563 * Don't do readahead if this is the end of the file.
564 */
565 error = bread_gb(vp, lbn, size, NOCRED,
566 GB_UNMAPPED, &bp);
567 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
568 /*
569 * Otherwise if we are allowed to cluster,
570 * grab as much as we can.
571 *
572 * XXX This may not be a win if we are not
573 * doing sequential access.
574 */
575 error = cluster_read(vp, ip->i_size, lbn,
576 size, NOCRED, blkoffset + uio->uio_resid,
577 seqcount, GB_UNMAPPED, &bp);
578 } else if (seqcount > 1) {
579 /*
580 * If we are NOT allowed to cluster, then
581 * if we appear to be acting sequentially,
582 * fire off a request for a readahead
583 * as well as a read. Note that the 4th and 5th
584 * arguments point to arrays of the size specified in
585 * the 6th argument.
586 */
587 u_int nextsize = blksize(fs, ip, nextlbn);
588 error = breadn_flags(vp, lbn, size, &nextlbn,
589 &nextsize, 1, NOCRED, GB_UNMAPPED, &bp);
590 } else {
591 /*
592 * Failing all of the above, just read what the
593 * user asked for. Interestingly, the same as
594 * the first option above.
595 */
596 error = bread_gb(vp, lbn, size, NOCRED,
597 GB_UNMAPPED, &bp);
598 }
599 if (error) {
600 brelse(bp);
601 bp = NULL;
602 break;
603 }
604
605 /*
606 * We should only get non-zero b_resid when an I/O error
607 * has occurred, which should cause us to break above.
608 * However, if the short read did not cause an error,
609 * then we want to ensure that we do not uiomove bad
610 * or uninitialized data.
611 */
612 size -= bp->b_resid;
613 if (size < xfersize) {
614 if (size == 0)
615 break;
616 xfersize = size;
617 }
618
619 if (buf_mapped(bp)) {
620 error = vn_io_fault_uiomove((char *)bp->b_data +
621 blkoffset, (int)xfersize, uio);
622 } else {
623 error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
624 (int)xfersize, uio);
625 }
626 if (error)
627 break;
628
629 vfs_bio_brelse(bp, ioflag);
630 }
631
632 /*
633 * This can only happen in the case of an error
634 * because the loop above resets bp to NULL on each iteration
635 * and on normal completion has not set a new value into it.
636 * so it must have come from a 'break' statement
637 */
638 if (bp != NULL)
639 vfs_bio_brelse(bp, ioflag);
640
641 if ((error == 0 || uio->uio_resid != orig_resid) &&
642 (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0 &&
643 (ip->i_flag & IN_ACCESS) == 0) {
644 VI_LOCK(vp);
645 ip->i_flag |= IN_ACCESS;
646 VI_UNLOCK(vp);
647 }
648 return (error);
649 }
650
651 /*
652 * Vnode op for writing.
653 */
654 static int
655 ffs_write(ap)
656 struct vop_write_args /* {
657 struct vnode *a_vp;
658 struct uio *a_uio;
659 int a_ioflag;
660 struct ucred *a_cred;
661 } */ *ap;
662 {
663 struct vnode *vp;
664 struct uio *uio;
665 struct inode *ip;
666 struct fs *fs;
667 struct buf *bp;
668 ufs_lbn_t lbn;
669 off_t osize;
670 ssize_t resid;
671 int seqcount;
672 int blkoffset, error, flags, ioflag, size, xfersize;
673
674 vp = ap->a_vp;
675 uio = ap->a_uio;
676 ioflag = ap->a_ioflag;
677 if (ap->a_ioflag & IO_EXT)
678 #ifdef notyet
679 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
680 #else
681 panic("ffs_write+IO_EXT");
682 #endif
683
684 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
685 ip = VTOI(vp);
686
687 #ifdef INVARIANTS
688 if (uio->uio_rw != UIO_WRITE)
689 panic("ffs_write: mode");
690 #endif
691
692 switch (vp->v_type) {
693 case VREG:
694 if (ioflag & IO_APPEND)
695 uio->uio_offset = ip->i_size;
696 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
697 return (EPERM);
698 /* FALLTHROUGH */
699 case VLNK:
700 break;
701 case VDIR:
702 panic("ffs_write: dir write");
703 break;
704 default:
705 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
706 (int)uio->uio_offset,
707 (int)uio->uio_resid
708 );
709 }
710
711 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
712 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
713 fs = ITOFS(ip);
714 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
715 return (EFBIG);
716 /*
717 * Maybe this should be above the vnode op call, but so long as
718 * file servers have no limits, I don't think it matters.
719 */
720 if (vn_rlimit_fsize(vp, uio, uio->uio_td))
721 return (EFBIG);
722
723 resid = uio->uio_resid;
724 osize = ip->i_size;
725 if (seqcount > BA_SEQMAX)
726 flags = BA_SEQMAX << BA_SEQSHIFT;
727 else
728 flags = seqcount << BA_SEQSHIFT;
729 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
730 flags |= IO_SYNC;
731 flags |= BA_UNMAPPED;
732
733 for (error = 0; uio->uio_resid > 0;) {
734 lbn = lblkno(fs, uio->uio_offset);
735 blkoffset = blkoff(fs, uio->uio_offset);
736 xfersize = fs->fs_bsize - blkoffset;
737 if (uio->uio_resid < xfersize)
738 xfersize = uio->uio_resid;
739 if (uio->uio_offset + xfersize > ip->i_size)
740 vnode_pager_setsize(vp, uio->uio_offset + xfersize);
741
742 /*
743 * We must perform a read-before-write if the transfer size
744 * does not cover the entire buffer.
745 */
746 if (fs->fs_bsize > xfersize)
747 flags |= BA_CLRBUF;
748 else
749 flags &= ~BA_CLRBUF;
750 /* XXX is uio->uio_offset the right thing here? */
751 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
752 ap->a_cred, flags, &bp);
753 if (error != 0) {
754 vnode_pager_setsize(vp, ip->i_size);
755 break;
756 }
757 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
758 bp->b_flags |= B_NOCACHE;
759
760 if (uio->uio_offset + xfersize > ip->i_size) {
761 ip->i_size = uio->uio_offset + xfersize;
762 DIP_SET(ip, i_size, ip->i_size);
763 ip->i_flag |= IN_SIZEMOD | IN_CHANGE;
764 }
765
766 size = blksize(fs, ip, lbn) - bp->b_resid;
767 if (size < xfersize)
768 xfersize = size;
769
770 if (buf_mapped(bp)) {
771 error = vn_io_fault_uiomove((char *)bp->b_data +
772 blkoffset, (int)xfersize, uio);
773 } else {
774 error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
775 (int)xfersize, uio);
776 }
777 /*
778 * If the buffer is not already filled and we encounter an
779 * error while trying to fill it, we have to clear out any
780 * garbage data from the pages instantiated for the buffer.
781 * If we do not, a failed uiomove() during a write can leave
782 * the prior contents of the pages exposed to a userland mmap.
783 *
784 * Note that we need only clear buffers with a transfer size
785 * equal to the block size because buffers with a shorter
786 * transfer size were cleared above by the call to UFS_BALLOC()
787 * with the BA_CLRBUF flag set.
788 *
789 * If the source region for uiomove identically mmaps the
790 * buffer, uiomove() performed the NOP copy, and the buffer
791 * content remains valid because the page fault handler
792 * validated the pages.
793 */
794 if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
795 fs->fs_bsize == xfersize)
796 vfs_bio_clrbuf(bp);
797
798 vfs_bio_set_flags(bp, ioflag);
799
800 /*
801 * If IO_SYNC each buffer is written synchronously. Otherwise
802 * if we have a severe page deficiency write the buffer
803 * asynchronously. Otherwise try to cluster, and if that
804 * doesn't do it then either do an async write (if O_DIRECT),
805 * or a delayed write (if not).
806 */
807 if (ioflag & IO_SYNC) {
808 (void)bwrite(bp);
809 } else if (vm_page_count_severe() ||
810 buf_dirty_count_severe() ||
811 (ioflag & IO_ASYNC)) {
812 bp->b_flags |= B_CLUSTEROK;
813 bawrite(bp);
814 } else if (xfersize + blkoffset == fs->fs_bsize) {
815 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
816 bp->b_flags |= B_CLUSTEROK;
817 cluster_write(vp, bp, ip->i_size, seqcount,
818 GB_UNMAPPED);
819 } else {
820 bawrite(bp);
821 }
822 } else if (ioflag & IO_DIRECT) {
823 bp->b_flags |= B_CLUSTEROK;
824 bawrite(bp);
825 } else {
826 bp->b_flags |= B_CLUSTEROK;
827 bdwrite(bp);
828 }
829 if (error || xfersize == 0)
830 break;
831 ip->i_flag |= IN_CHANGE | IN_UPDATE;
832 }
833 /*
834 * If we successfully wrote any data, and we are not the superuser
835 * we clear the setuid and setgid bits as a precaution against
836 * tampering.
837 */
838 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
839 ap->a_cred) {
840 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
841 ip->i_mode &= ~(ISUID | ISGID);
842 DIP_SET(ip, i_mode, ip->i_mode);
843 }
844 }
845 if (error) {
846 if (ioflag & IO_UNIT) {
847 (void)ffs_truncate(vp, osize,
848 IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred);
849 uio->uio_offset -= resid - uio->uio_resid;
850 uio->uio_resid = resid;
851 }
852 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
853 error = ffs_update(vp, 1);
854 return (error);
855 }
856
857 /*
858 * Extended attribute area reading.
859 */
860 static int
861 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
862 {
863 struct inode *ip;
864 struct ufs2_dinode *dp;
865 struct fs *fs;
866 struct buf *bp;
867 ufs_lbn_t lbn, nextlbn;
868 off_t bytesinfile;
869 long size, xfersize, blkoffset;
870 ssize_t orig_resid;
871 int error;
872
873 ip = VTOI(vp);
874 fs = ITOFS(ip);
875 dp = ip->i_din2;
876
877 #ifdef INVARIANTS
878 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
879 panic("ffs_extread: mode");
880
881 #endif
882 orig_resid = uio->uio_resid;
883 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
884 if (orig_resid == 0)
885 return (0);
886 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
887
888 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
889 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
890 break;
891 lbn = lblkno(fs, uio->uio_offset);
892 nextlbn = lbn + 1;
893
894 /*
895 * size of buffer. The buffer representing the
896 * end of the file is rounded up to the size of
897 * the block type ( fragment or full block,
898 * depending ).
899 */
900 size = sblksize(fs, dp->di_extsize, lbn);
901 blkoffset = blkoff(fs, uio->uio_offset);
902
903 /*
904 * The amount we want to transfer in this iteration is
905 * one FS block less the amount of the data before
906 * our startpoint (duh!)
907 */
908 xfersize = fs->fs_bsize - blkoffset;
909
910 /*
911 * But if we actually want less than the block,
912 * or the file doesn't have a whole block more of data,
913 * then use the lesser number.
914 */
915 if (uio->uio_resid < xfersize)
916 xfersize = uio->uio_resid;
917 if (bytesinfile < xfersize)
918 xfersize = bytesinfile;
919
920 if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
921 /*
922 * Don't do readahead if this is the end of the info.
923 */
924 error = bread(vp, -1 - lbn, size, NOCRED, &bp);
925 } else {
926 /*
927 * If we have a second block, then
928 * fire off a request for a readahead
929 * as well as a read. Note that the 4th and 5th
930 * arguments point to arrays of the size specified in
931 * the 6th argument.
932 */
933 u_int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
934
935 nextlbn = -1 - nextlbn;
936 error = breadn(vp, -1 - lbn,
937 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
938 }
939 if (error) {
940 brelse(bp);
941 bp = NULL;
942 break;
943 }
944
945 /*
946 * We should only get non-zero b_resid when an I/O error
947 * has occurred, which should cause us to break above.
948 * However, if the short read did not cause an error,
949 * then we want to ensure that we do not uiomove bad
950 * or uninitialized data.
951 */
952 size -= bp->b_resid;
953 if (size < xfersize) {
954 if (size == 0)
955 break;
956 xfersize = size;
957 }
958
959 error = uiomove((char *)bp->b_data + blkoffset,
960 (int)xfersize, uio);
961 if (error)
962 break;
963 vfs_bio_brelse(bp, ioflag);
964 }
965
966 /*
967 * This can only happen in the case of an error
968 * because the loop above resets bp to NULL on each iteration
969 * and on normal completion has not set a new value into it.
970 * so it must have come from a 'break' statement
971 */
972 if (bp != NULL)
973 vfs_bio_brelse(bp, ioflag);
974 return (error);
975 }
976
977 /*
978 * Extended attribute area writing.
979 */
980 static int
981 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
982 {
983 struct inode *ip;
984 struct ufs2_dinode *dp;
985 struct fs *fs;
986 struct buf *bp;
987 ufs_lbn_t lbn;
988 off_t osize;
989 ssize_t resid;
990 int blkoffset, error, flags, size, xfersize;
991
992 ip = VTOI(vp);
993 fs = ITOFS(ip);
994 dp = ip->i_din2;
995
996 #ifdef INVARIANTS
997 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
998 panic("ffs_extwrite: mode");
999 #endif
1000
1001 if (ioflag & IO_APPEND)
1002 uio->uio_offset = dp->di_extsize;
1003 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
1004 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
1005 if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize)
1006 return (EFBIG);
1007
1008 resid = uio->uio_resid;
1009 osize = dp->di_extsize;
1010 flags = IO_EXT;
1011 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
1012 flags |= IO_SYNC;
1013
1014 for (error = 0; uio->uio_resid > 0;) {
1015 lbn = lblkno(fs, uio->uio_offset);
1016 blkoffset = blkoff(fs, uio->uio_offset);
1017 xfersize = fs->fs_bsize - blkoffset;
1018 if (uio->uio_resid < xfersize)
1019 xfersize = uio->uio_resid;
1020
1021 /*
1022 * We must perform a read-before-write if the transfer size
1023 * does not cover the entire buffer.
1024 */
1025 if (fs->fs_bsize > xfersize)
1026 flags |= BA_CLRBUF;
1027 else
1028 flags &= ~BA_CLRBUF;
1029 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
1030 ucred, flags, &bp);
1031 if (error != 0)
1032 break;
1033 /*
1034 * If the buffer is not valid we have to clear out any
1035 * garbage data from the pages instantiated for the buffer.
1036 * If we do not, a failed uiomove() during a write can leave
1037 * the prior contents of the pages exposed to a userland
1038 * mmap(). XXX deal with uiomove() errors a better way.
1039 */
1040 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
1041 vfs_bio_clrbuf(bp);
1042
1043 if (uio->uio_offset + xfersize > dp->di_extsize) {
1044 dp->di_extsize = uio->uio_offset + xfersize;
1045 ip->i_flag |= IN_SIZEMOD | IN_CHANGE;
1046 }
1047
1048 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
1049 if (size < xfersize)
1050 xfersize = size;
1051
1052 error =
1053 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
1054
1055 vfs_bio_set_flags(bp, ioflag);
1056
1057 /*
1058 * If IO_SYNC each buffer is written synchronously. Otherwise
1059 * if we have a severe page deficiency write the buffer
1060 * asynchronously. Otherwise try to cluster, and if that
1061 * doesn't do it then either do an async write (if O_DIRECT),
1062 * or a delayed write (if not).
1063 */
1064 if (ioflag & IO_SYNC) {
1065 (void)bwrite(bp);
1066 } else if (vm_page_count_severe() ||
1067 buf_dirty_count_severe() ||
1068 xfersize + blkoffset == fs->fs_bsize ||
1069 (ioflag & (IO_ASYNC | IO_DIRECT)))
1070 bawrite(bp);
1071 else
1072 bdwrite(bp);
1073 if (error || xfersize == 0)
1074 break;
1075 ip->i_flag |= IN_CHANGE;
1076 }
1077 /*
1078 * If we successfully wrote any data, and we are not the superuser
1079 * we clear the setuid and setgid bits as a precaution against
1080 * tampering.
1081 */
1082 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
1083 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
1084 ip->i_mode &= ~(ISUID | ISGID);
1085 dp->di_mode = ip->i_mode;
1086 }
1087 }
1088 if (error) {
1089 if (ioflag & IO_UNIT) {
1090 (void)ffs_truncate(vp, osize,
1091 IO_EXT | (ioflag&IO_SYNC), ucred);
1092 uio->uio_offset -= resid - uio->uio_resid;
1093 uio->uio_resid = resid;
1094 }
1095 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
1096 error = ffs_update(vp, 1);
1097 return (error);
1098 }
1099
1100
1101 /*
1102 * Vnode operating to retrieve a named extended attribute.
1103 *
1104 * Locate a particular EA (nspace:name) in the area (ptr:length), and return
1105 * the length of the EA, and possibly the pointer to the entry and to the data.
1106 */
1107 static int
1108 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac)
1109 {
1110 u_char *p, *pe, *pn, *p0;
1111 int eapad1, eapad2, ealength, ealen, nlen;
1112 uint32_t ul;
1113
1114 pe = ptr + length;
1115 nlen = strlen(name);
1116
1117 for (p = ptr; p < pe; p = pn) {
1118 p0 = p;
1119 bcopy(p, &ul, sizeof(ul));
1120 pn = p + ul;
1121 /* make sure this entry is complete */
1122 if (pn > pe)
1123 break;
1124 p += sizeof(uint32_t);
1125 if (*p != nspace)
1126 continue;
1127 p++;
1128 eapad2 = *p++;
1129 if (*p != nlen)
1130 continue;
1131 p++;
1132 if (bcmp(p, name, nlen))
1133 continue;
1134 ealength = sizeof(uint32_t) + 3 + nlen;
1135 eapad1 = 8 - (ealength % 8);
1136 if (eapad1 == 8)
1137 eapad1 = 0;
1138 ealength += eapad1;
1139 ealen = ul - ealength - eapad2;
1140 p += nlen + eapad1;
1141 if (eap != NULL)
1142 *eap = p0;
1143 if (eac != NULL)
1144 *eac = p;
1145 return (ealen);
1146 }
1147 return(-1);
1148 }
1149
1150 static int
1151 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
1152 {
1153 struct inode *ip;
1154 struct ufs2_dinode *dp;
1155 struct fs *fs;
1156 struct uio luio;
1157 struct iovec liovec;
1158 u_int easize;
1159 int error;
1160 u_char *eae;
1161
1162 ip = VTOI(vp);
1163 fs = ITOFS(ip);
1164 dp = ip->i_din2;
1165 easize = dp->di_extsize;
1166 if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize)
1167 return (EFBIG);
1168
1169 eae = malloc(easize + extra, M_TEMP, M_WAITOK);
1170
1171 liovec.iov_base = eae;
1172 liovec.iov_len = easize;
1173 luio.uio_iov = &liovec;
1174 luio.uio_iovcnt = 1;
1175 luio.uio_offset = 0;
1176 luio.uio_resid = easize;
1177 luio.uio_segflg = UIO_SYSSPACE;
1178 luio.uio_rw = UIO_READ;
1179 luio.uio_td = td;
1180
1181 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
1182 if (error) {
1183 free(eae, M_TEMP);
1184 return(error);
1185 }
1186 *p = eae;
1187 return (0);
1188 }
1189
1190 static void
1191 ffs_lock_ea(struct vnode *vp)
1192 {
1193 struct inode *ip;
1194
1195 ip = VTOI(vp);
1196 VI_LOCK(vp);
1197 while (ip->i_flag & IN_EA_LOCKED) {
1198 ip->i_flag |= IN_EA_LOCKWAIT;
1199 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
1200 0);
1201 }
1202 ip->i_flag |= IN_EA_LOCKED;
1203 VI_UNLOCK(vp);
1204 }
1205
1206 static void
1207 ffs_unlock_ea(struct vnode *vp)
1208 {
1209 struct inode *ip;
1210
1211 ip = VTOI(vp);
1212 VI_LOCK(vp);
1213 if (ip->i_flag & IN_EA_LOCKWAIT)
1214 wakeup(&ip->i_ea_refs);
1215 ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
1216 VI_UNLOCK(vp);
1217 }
1218
1219 static int
1220 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
1221 {
1222 struct inode *ip;
1223 struct ufs2_dinode *dp;
1224 int error;
1225
1226 ip = VTOI(vp);
1227
1228 ffs_lock_ea(vp);
1229 if (ip->i_ea_area != NULL) {
1230 ip->i_ea_refs++;
1231 ffs_unlock_ea(vp);
1232 return (0);
1233 }
1234 dp = ip->i_din2;
1235 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
1236 if (error) {
1237 ffs_unlock_ea(vp);
1238 return (error);
1239 }
1240 ip->i_ea_len = dp->di_extsize;
1241 ip->i_ea_error = 0;
1242 ip->i_ea_refs++;
1243 ffs_unlock_ea(vp);
1244 return (0);
1245 }
1246
1247 /*
1248 * Vnode extattr transaction commit/abort
1249 */
1250 static int
1251 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
1252 {
1253 struct inode *ip;
1254 struct uio luio;
1255 struct iovec liovec;
1256 int error;
1257 struct ufs2_dinode *dp;
1258
1259 ip = VTOI(vp);
1260
1261 ffs_lock_ea(vp);
1262 if (ip->i_ea_area == NULL) {
1263 ffs_unlock_ea(vp);
1264 return (EINVAL);
1265 }
1266 dp = ip->i_din2;
1267 error = ip->i_ea_error;
1268 if (commit && error == 0) {
1269 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
1270 if (cred == NOCRED)
1271 cred = vp->v_mount->mnt_cred;
1272 liovec.iov_base = ip->i_ea_area;
1273 liovec.iov_len = ip->i_ea_len;
1274 luio.uio_iov = &liovec;
1275 luio.uio_iovcnt = 1;
1276 luio.uio_offset = 0;
1277 luio.uio_resid = ip->i_ea_len;
1278 luio.uio_segflg = UIO_SYSSPACE;
1279 luio.uio_rw = UIO_WRITE;
1280 luio.uio_td = td;
1281 /* XXX: I'm not happy about truncating to zero size */
1282 if (ip->i_ea_len < dp->di_extsize)
1283 error = ffs_truncate(vp, 0, IO_EXT, cred);
1284 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
1285 }
1286 if (--ip->i_ea_refs == 0) {
1287 free(ip->i_ea_area, M_TEMP);
1288 ip->i_ea_area = NULL;
1289 ip->i_ea_len = 0;
1290 ip->i_ea_error = 0;
1291 }
1292 ffs_unlock_ea(vp);
1293 return (error);
1294 }
1295
1296 /*
1297 * Vnode extattr strategy routine for fifos.
1298 *
1299 * We need to check for a read or write of the external attributes.
1300 * Otherwise we just fall through and do the usual thing.
1301 */
1302 static int
1303 ffsext_strategy(struct vop_strategy_args *ap)
1304 /*
1305 struct vop_strategy_args {
1306 struct vnodeop_desc *a_desc;
1307 struct vnode *a_vp;
1308 struct buf *a_bp;
1309 };
1310 */
1311 {
1312 struct vnode *vp;
1313 daddr_t lbn;
1314
1315 vp = ap->a_vp;
1316 lbn = ap->a_bp->b_lblkno;
1317 if (I_IS_UFS2(VTOI(vp)) && lbn < 0 && lbn >= -NXADDR)
1318 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
1319 if (vp->v_type == VFIFO)
1320 return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
1321 panic("spec nodes went here");
1322 }
1323
1324 /*
1325 * Vnode extattr transaction commit/abort
1326 */
1327 static int
1328 ffs_openextattr(struct vop_openextattr_args *ap)
1329 /*
1330 struct vop_openextattr_args {
1331 struct vnodeop_desc *a_desc;
1332 struct vnode *a_vp;
1333 IN struct ucred *a_cred;
1334 IN struct thread *a_td;
1335 };
1336 */
1337 {
1338
1339 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1340 return (EOPNOTSUPP);
1341
1342 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
1343 }
1344
1345
1346 /*
1347 * Vnode extattr transaction commit/abort
1348 */
1349 static int
1350 ffs_closeextattr(struct vop_closeextattr_args *ap)
1351 /*
1352 struct vop_closeextattr_args {
1353 struct vnodeop_desc *a_desc;
1354 struct vnode *a_vp;
1355 int a_commit;
1356 IN struct ucred *a_cred;
1357 IN struct thread *a_td;
1358 };
1359 */
1360 {
1361
1362 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1363 return (EOPNOTSUPP);
1364
1365 if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
1366 return (EROFS);
1367
1368 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
1369 }
1370
1371 /*
1372 * Vnode operation to remove a named attribute.
1373 */
1374 static int
1375 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
1376 /*
1377 vop_deleteextattr {
1378 IN struct vnode *a_vp;
1379 IN int a_attrnamespace;
1380 IN const char *a_name;
1381 IN struct ucred *a_cred;
1382 IN struct thread *a_td;
1383 };
1384 */
1385 {
1386 struct inode *ip;
1387 struct fs *fs;
1388 uint32_t ealength, ul;
1389 int ealen, olen, eapad1, eapad2, error, i, easize;
1390 u_char *eae, *p;
1391
1392 ip = VTOI(ap->a_vp);
1393 fs = ITOFS(ip);
1394
1395 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1396 return (EOPNOTSUPP);
1397
1398 if (strlen(ap->a_name) == 0)
1399 return (EINVAL);
1400
1401 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1402 return (EROFS);
1403
1404 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1405 ap->a_cred, ap->a_td, VWRITE);
1406 if (error) {
1407
1408 /*
1409 * ffs_lock_ea is not needed there, because the vnode
1410 * must be exclusively locked.
1411 */
1412 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1413 ip->i_ea_error = error;
1414 return (error);
1415 }
1416
1417 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1418 if (error)
1419 return (error);
1420
1421 ealength = eapad1 = ealen = eapad2 = 0;
1422
1423 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
1424 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1425 easize = ip->i_ea_len;
1426
1427 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1428 &p, NULL);
1429 if (olen == -1) {
1430 /* delete but nonexistent */
1431 free(eae, M_TEMP);
1432 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1433 return(ENOATTR);
1434 }
1435 bcopy(p, &ul, sizeof ul);
1436 i = p - eae + ul;
1437 if (ul != ealength) {
1438 bcopy(p + ul, p + ealength, easize - i);
1439 easize += (ealength - ul);
1440 }
1441 if (easize > NXADDR * fs->fs_bsize) {
1442 free(eae, M_TEMP);
1443 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1444 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1445 ip->i_ea_error = ENOSPC;
1446 return(ENOSPC);
1447 }
1448 p = ip->i_ea_area;
1449 ip->i_ea_area = eae;
1450 ip->i_ea_len = easize;
1451 free(p, M_TEMP);
1452 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1453 return(error);
1454 }
1455
1456 /*
1457 * Vnode operation to retrieve a named extended attribute.
1458 */
1459 static int
1460 ffs_getextattr(struct vop_getextattr_args *ap)
1461 /*
1462 vop_getextattr {
1463 IN struct vnode *a_vp;
1464 IN int a_attrnamespace;
1465 IN const char *a_name;
1466 INOUT struct uio *a_uio;
1467 OUT size_t *a_size;
1468 IN struct ucred *a_cred;
1469 IN struct thread *a_td;
1470 };
1471 */
1472 {
1473 struct inode *ip;
1474 u_char *eae, *p;
1475 unsigned easize;
1476 int error, ealen;
1477
1478 ip = VTOI(ap->a_vp);
1479
1480 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1481 return (EOPNOTSUPP);
1482
1483 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1484 ap->a_cred, ap->a_td, VREAD);
1485 if (error)
1486 return (error);
1487
1488 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1489 if (error)
1490 return (error);
1491
1492 eae = ip->i_ea_area;
1493 easize = ip->i_ea_len;
1494
1495 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1496 NULL, &p);
1497 if (ealen >= 0) {
1498 error = 0;
1499 if (ap->a_size != NULL)
1500 *ap->a_size = ealen;
1501 else if (ap->a_uio != NULL)
1502 error = uiomove(p, ealen, ap->a_uio);
1503 } else
1504 error = ENOATTR;
1505
1506 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1507 return(error);
1508 }
1509
1510 /*
1511 * Vnode operation to retrieve extended attributes on a vnode.
1512 */
1513 static int
1514 ffs_listextattr(struct vop_listextattr_args *ap)
1515 /*
1516 vop_listextattr {
1517 IN struct vnode *a_vp;
1518 IN int a_attrnamespace;
1519 INOUT struct uio *a_uio;
1520 OUT size_t *a_size;
1521 IN struct ucred *a_cred;
1522 IN struct thread *a_td;
1523 };
1524 */
1525 {
1526 struct inode *ip;
1527 u_char *eae, *p, *pe, *pn;
1528 unsigned easize;
1529 uint32_t ul;
1530 int error, ealen;
1531
1532 ip = VTOI(ap->a_vp);
1533
1534 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1535 return (EOPNOTSUPP);
1536
1537 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1538 ap->a_cred, ap->a_td, VREAD);
1539 if (error)
1540 return (error);
1541
1542 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1543 if (error)
1544 return (error);
1545 eae = ip->i_ea_area;
1546 easize = ip->i_ea_len;
1547
1548 error = 0;
1549 if (ap->a_size != NULL)
1550 *ap->a_size = 0;
1551 pe = eae + easize;
1552 for(p = eae; error == 0 && p < pe; p = pn) {
1553 bcopy(p, &ul, sizeof(ul));
1554 pn = p + ul;
1555 if (pn > pe)
1556 break;
1557 p += sizeof(ul);
1558 if (*p++ != ap->a_attrnamespace)
1559 continue;
1560 p++; /* pad2 */
1561 ealen = *p;
1562 if (ap->a_size != NULL) {
1563 *ap->a_size += ealen + 1;
1564 } else if (ap->a_uio != NULL) {
1565 error = uiomove(p, ealen + 1, ap->a_uio);
1566 }
1567 }
1568 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1569 return(error);
1570 }
1571
1572 /*
1573 * Vnode operation to set a named attribute.
1574 */
1575 static int
1576 ffs_setextattr(struct vop_setextattr_args *ap)
1577 /*
1578 vop_setextattr {
1579 IN struct vnode *a_vp;
1580 IN int a_attrnamespace;
1581 IN const char *a_name;
1582 INOUT struct uio *a_uio;
1583 IN struct ucred *a_cred;
1584 IN struct thread *a_td;
1585 };
1586 */
1587 {
1588 struct inode *ip;
1589 struct fs *fs;
1590 uint32_t ealength, ul;
1591 ssize_t ealen;
1592 int olen, eapad1, eapad2, error, i, easize;
1593 u_char *eae, *p;
1594
1595 ip = VTOI(ap->a_vp);
1596 fs = ITOFS(ip);
1597
1598 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1599 return (EOPNOTSUPP);
1600
1601 if (strlen(ap->a_name) == 0)
1602 return (EINVAL);
1603
1604 /* XXX Now unsupported API to delete EAs using NULL uio. */
1605 if (ap->a_uio == NULL)
1606 return (EOPNOTSUPP);
1607
1608 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1609 return (EROFS);
1610
1611 ealen = ap->a_uio->uio_resid;
1612 if (ealen < 0 || ealen > lblktosize(fs, NXADDR))
1613 return (EINVAL);
1614
1615 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1616 ap->a_cred, ap->a_td, VWRITE);
1617 if (error) {
1618
1619 /*
1620 * ffs_lock_ea is not needed there, because the vnode
1621 * must be exclusively locked.
1622 */
1623 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1624 ip->i_ea_error = error;
1625 return (error);
1626 }
1627
1628 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1629 if (error)
1630 return (error);
1631
1632 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
1633 eapad1 = 8 - (ealength % 8);
1634 if (eapad1 == 8)
1635 eapad1 = 0;
1636 eapad2 = 8 - (ealen % 8);
1637 if (eapad2 == 8)
1638 eapad2 = 0;
1639 ealength += eapad1 + ealen + eapad2;
1640
1641 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
1642 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1643 easize = ip->i_ea_len;
1644
1645 olen = ffs_findextattr(eae, easize,
1646 ap->a_attrnamespace, ap->a_name, &p, NULL);
1647 if (olen == -1) {
1648 /* new, append at end */
1649 p = eae + easize;
1650 easize += ealength;
1651 } else {
1652 bcopy(p, &ul, sizeof ul);
1653 i = p - eae + ul;
1654 if (ul != ealength) {
1655 bcopy(p + ul, p + ealength, easize - i);
1656 easize += (ealength - ul);
1657 }
1658 }
1659 if (easize > lblktosize(fs, NXADDR)) {
1660 free(eae, M_TEMP);
1661 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1662 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1663 ip->i_ea_error = ENOSPC;
1664 return(ENOSPC);
1665 }
1666 bcopy(&ealength, p, sizeof(ealength));
1667 p += sizeof(ealength);
1668 *p++ = ap->a_attrnamespace;
1669 *p++ = eapad2;
1670 *p++ = strlen(ap->a_name);
1671 memcpy(p, ap->a_name, strlen(ap->a_name));
1672 p += strlen(ap->a_name);
1673 bzero(p, eapad1);
1674 p += eapad1;
1675 error = uiomove(p, ealen, ap->a_uio);
1676 if (error) {
1677 free(eae, M_TEMP);
1678 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1679 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1680 ip->i_ea_error = error;
1681 return(error);
1682 }
1683 p += ealen;
1684 bzero(p, eapad2);
1685
1686 p = ip->i_ea_area;
1687 ip->i_ea_area = eae;
1688 ip->i_ea_len = easize;
1689 free(p, M_TEMP);
1690 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1691 return(error);
1692 }
1693
1694 /*
1695 * Vnode pointer to File handle
1696 */
1697 static int
1698 ffs_vptofh(struct vop_vptofh_args *ap)
1699 /*
1700 vop_vptofh {
1701 IN struct vnode *a_vp;
1702 IN struct fid *a_fhp;
1703 };
1704 */
1705 {
1706 struct inode *ip;
1707 struct ufid *ufhp;
1708
1709 ip = VTOI(ap->a_vp);
1710 ufhp = (struct ufid *)ap->a_fhp;
1711 ufhp->ufid_len = sizeof(struct ufid);
1712 ufhp->ufid_ino = ip->i_number;
1713 ufhp->ufid_gen = ip->i_gen;
1714 return (0);
1715 }
1716
1717 SYSCTL_DECL(_vfs_ffs);
1718 static int use_buf_pager = 0;
1719 SYSCTL_INT(_vfs_ffs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, &use_buf_pager, 0,
1720 "Always use buffer pager instead of bmap");
1721
1722 static daddr_t
1723 ffs_gbp_getblkno(struct vnode *vp, vm_ooffset_t off)
1724 {
1725
1726 return (lblkno(VFSTOUFS(vp->v_mount)->um_fs, off));
1727 }
1728
1729 static int
1730 ffs_gbp_getblksz(struct vnode *vp, daddr_t lbn)
1731 {
1732
1733 return (blksize(VFSTOUFS(vp->v_mount)->um_fs, VTOI(vp), lbn));
1734 }
1735
1736 static int
1737 ffs_getpages(struct vop_getpages_args *ap)
1738 {
1739 struct vnode *vp;
1740 struct ufsmount *um;
1741
1742 vp = ap->a_vp;
1743 um = VFSTOUFS(vp->v_mount);
1744
1745 if (!use_buf_pager && um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE)
1746 return (vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
1747 ap->a_rbehind, ap->a_rahead, NULL, NULL));
1748 return (vfs_bio_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind,
1749 ap->a_rahead, ffs_gbp_getblkno, ffs_gbp_getblksz));
1750 }
Cache object: 33fb020a1a1e9c8d58f137eb20b3b711
|