1 /*-
2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9 * research program
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
61 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
62 */
63
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
66
67 #include <sys/param.h>
68 #include <sys/bio.h>
69 #include <sys/systm.h>
70 #include <sys/buf.h>
71 #include <sys/conf.h>
72 #include <sys/extattr.h>
73 #include <sys/kernel.h>
74 #include <sys/limits.h>
75 #include <sys/malloc.h>
76 #include <sys/mount.h>
77 #include <sys/priv.h>
78 #include <sys/proc.h>
79 #include <sys/resourcevar.h>
80 #include <sys/signalvar.h>
81 #include <sys/stat.h>
82 #include <sys/vmmeter.h>
83 #include <sys/vnode.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vnode_pager.h>
91
92 #include <ufs/ufs/extattr.h>
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/inode.h>
95 #include <ufs/ufs/ufs_extern.h>
96 #include <ufs/ufs/ufsmount.h>
97
98 #include <ufs/ffs/fs.h>
99 #include <ufs/ffs/ffs_extern.h>
100 #include "opt_directio.h"
101 #include "opt_ffs.h"
102
103 #ifdef DIRECTIO
104 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
105 #endif
106 static vop_fsync_t ffs_fsync;
107 static vop_lock1_t ffs_lock;
108 static vop_getpages_t ffs_getpages;
109 static vop_read_t ffs_read;
110 static vop_write_t ffs_write;
111 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
112 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
113 struct ucred *cred);
114 static vop_strategy_t ffsext_strategy;
115 static vop_closeextattr_t ffs_closeextattr;
116 static vop_deleteextattr_t ffs_deleteextattr;
117 static vop_getextattr_t ffs_getextattr;
118 static vop_listextattr_t ffs_listextattr;
119 static vop_openextattr_t ffs_openextattr;
120 static vop_setextattr_t ffs_setextattr;
121 static vop_vptofh_t ffs_vptofh;
122
123
124 /* Global vfs data structures for ufs. */
125 struct vop_vector ffs_vnodeops1 = {
126 .vop_default = &ufs_vnodeops,
127 .vop_fsync = ffs_fsync,
128 .vop_getpages = ffs_getpages,
129 .vop_lock1 = ffs_lock,
130 .vop_read = ffs_read,
131 .vop_reallocblks = ffs_reallocblks,
132 .vop_write = ffs_write,
133 .vop_vptofh = ffs_vptofh,
134 };
135
136 struct vop_vector ffs_fifoops1 = {
137 .vop_default = &ufs_fifoops,
138 .vop_fsync = ffs_fsync,
139 .vop_reallocblks = ffs_reallocblks, /* XXX: really ??? */
140 .vop_vptofh = ffs_vptofh,
141 };
142
143 /* Global vfs data structures for ufs. */
144 struct vop_vector ffs_vnodeops2 = {
145 .vop_default = &ufs_vnodeops,
146 .vop_fsync = ffs_fsync,
147 .vop_getpages = ffs_getpages,
148 .vop_lock1 = ffs_lock,
149 .vop_read = ffs_read,
150 .vop_reallocblks = ffs_reallocblks,
151 .vop_write = ffs_write,
152 .vop_closeextattr = ffs_closeextattr,
153 .vop_deleteextattr = ffs_deleteextattr,
154 .vop_getextattr = ffs_getextattr,
155 .vop_listextattr = ffs_listextattr,
156 .vop_openextattr = ffs_openextattr,
157 .vop_setextattr = ffs_setextattr,
158 .vop_vptofh = ffs_vptofh,
159 };
160
161 struct vop_vector ffs_fifoops2 = {
162 .vop_default = &ufs_fifoops,
163 .vop_fsync = ffs_fsync,
164 .vop_lock1 = ffs_lock,
165 .vop_reallocblks = ffs_reallocblks,
166 .vop_strategy = ffsext_strategy,
167 .vop_closeextattr = ffs_closeextattr,
168 .vop_deleteextattr = ffs_deleteextattr,
169 .vop_getextattr = ffs_getextattr,
170 .vop_listextattr = ffs_listextattr,
171 .vop_openextattr = ffs_openextattr,
172 .vop_setextattr = ffs_setextattr,
173 .vop_vptofh = ffs_vptofh,
174 };
175
176 /*
177 * Synch an open file.
178 */
179 /* ARGSUSED */
180 static int
181 ffs_fsync(struct vop_fsync_args *ap)
182 {
183 int error;
184
185 error = ffs_syncvnode(ap->a_vp, ap->a_waitfor);
186 if (error)
187 return (error);
188 if (ap->a_waitfor == MNT_WAIT &&
189 (ap->a_vp->v_mount->mnt_flag & MNT_SOFTDEP))
190 error = softdep_fsync(ap->a_vp);
191 return (error);
192 }
193
194 int
195 ffs_syncvnode(struct vnode *vp, int waitfor)
196 {
197 struct inode *ip = VTOI(vp);
198 struct buf *bp;
199 struct buf *nbp;
200 int s, error, wait, passes, skipmeta;
201 ufs_lbn_t lbn;
202
203 wait = (waitfor == MNT_WAIT);
204 lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1));
205
206 /*
207 * Flush all dirty buffers associated with a vnode.
208 */
209 passes = NIADDR + 1;
210 skipmeta = 0;
211 if (wait)
212 skipmeta = 1;
213 s = splbio();
214 VI_LOCK(vp);
215 loop:
216 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs)
217 bp->b_vflags &= ~BV_SCANNED;
218 TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
219 /*
220 * Reasons to skip this buffer: it has already been considered
221 * on this pass, this pass is the first time through on a
222 * synchronous flush request and the buffer being considered
223 * is metadata, the buffer has dependencies that will cause
224 * it to be redirtied and it has not already been deferred,
225 * or it is already being written.
226 */
227 if ((bp->b_vflags & BV_SCANNED) != 0)
228 continue;
229 bp->b_vflags |= BV_SCANNED;
230 if ((skipmeta == 1 && bp->b_lblkno < 0))
231 continue;
232 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
233 continue;
234 VI_UNLOCK(vp);
235 if (!wait && !LIST_EMPTY(&bp->b_dep) &&
236 (bp->b_flags & B_DEFERRED) == 0 &&
237 buf_countdeps(bp, 0)) {
238 bp->b_flags |= B_DEFERRED;
239 BUF_UNLOCK(bp);
240 VI_LOCK(vp);
241 continue;
242 }
243 if ((bp->b_flags & B_DELWRI) == 0)
244 panic("ffs_fsync: not dirty");
245 /*
246 * If this is a synchronous flush request, or it is not a
247 * file or device, start the write on this buffer immediatly.
248 */
249 if (wait || (vp->v_type != VREG && vp->v_type != VBLK)) {
250
251 /*
252 * On our final pass through, do all I/O synchronously
253 * so that we can find out if our flush is failing
254 * because of write errors.
255 */
256 if (passes > 0 || !wait) {
257 if ((bp->b_flags & B_CLUSTEROK) && !wait) {
258 (void) vfs_bio_awrite(bp);
259 } else {
260 bremfree(bp);
261 splx(s);
262 (void) bawrite(bp);
263 s = splbio();
264 }
265 } else {
266 bremfree(bp);
267 splx(s);
268 if ((error = bwrite(bp)) != 0)
269 return (error);
270 s = splbio();
271 }
272 } else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) {
273 /*
274 * If the buffer is for data that has been truncated
275 * off the file, then throw it away.
276 */
277 bremfree(bp);
278 bp->b_flags |= B_INVAL | B_NOCACHE;
279 splx(s);
280 brelse(bp);
281 s = splbio();
282 } else
283 vfs_bio_awrite(bp);
284
285 /*
286 * Since we may have slept during the I/O, we need
287 * to start from a known point.
288 */
289 VI_LOCK(vp);
290 nbp = TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd);
291 }
292 /*
293 * If we were asked to do this synchronously, then go back for
294 * another pass, this time doing the metadata.
295 */
296 if (skipmeta) {
297 skipmeta = 0;
298 goto loop;
299 }
300
301 if (wait) {
302 bufobj_wwait(&vp->v_bufobj, 3, 0);
303 VI_UNLOCK(vp);
304
305 /*
306 * Ensure that any filesystem metatdata associated
307 * with the vnode has been written.
308 */
309 splx(s);
310 if ((error = softdep_sync_metadata(vp)) != 0)
311 return (error);
312 s = splbio();
313
314 VI_LOCK(vp);
315 if (vp->v_bufobj.bo_dirty.bv_cnt > 0) {
316 /*
317 * Block devices associated with filesystems may
318 * have new I/O requests posted for them even if
319 * the vnode is locked, so no amount of trying will
320 * get them clean. Thus we give block devices a
321 * good effort, then just give up. For all other file
322 * types, go around and try again until it is clean.
323 */
324 if (passes > 0) {
325 passes -= 1;
326 goto loop;
327 }
328 #ifdef DIAGNOSTIC
329 if (!vn_isdisk(vp, NULL))
330 vprint("ffs_fsync: dirty", vp);
331 #endif
332 }
333 }
334 VI_UNLOCK(vp);
335 splx(s);
336 return (ffs_update(vp, wait));
337 }
338
339 static int
340 ffs_lock(ap)
341 struct vop_lock1_args /* {
342 struct vnode *a_vp;
343 int a_flags;
344 struct thread *a_td;
345 char *file;
346 int line;
347 } */ *ap;
348 {
349 #ifndef NO_FFS_SNAPSHOT
350 struct vnode *vp;
351 int flags;
352 struct lock *lkp;
353 int result;
354
355 switch (ap->a_flags & LK_TYPE_MASK) {
356 case LK_SHARED:
357 case LK_UPGRADE:
358 case LK_EXCLUSIVE:
359 vp = ap->a_vp;
360 flags = ap->a_flags;
361 for (;;) {
362 /*
363 * vnode interlock must be held to ensure that
364 * the possibly external lock isn't freed,
365 * e.g. when mutating from snapshot file vnode
366 * to regular file vnode.
367 */
368 if ((flags & LK_INTERLOCK) == 0) {
369 VI_LOCK(vp);
370 flags |= LK_INTERLOCK;
371 }
372 lkp = vp->v_vnlock;
373 result = _lockmgr(lkp, flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line);
374 if (lkp == vp->v_vnlock || result != 0)
375 break;
376 /*
377 * Apparent success, except that the vnode
378 * mutated between snapshot file vnode and
379 * regular file vnode while this process
380 * slept. The lock currently held is not the
381 * right lock. Release it, and try to get the
382 * new lock.
383 */
384 (void) _lockmgr(lkp, LK_RELEASE, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line);
385 if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
386 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
387 flags &= ~LK_INTERLOCK;
388 }
389 break;
390 default:
391 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
392 }
393 return (result);
394 #else
395 return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
396 #endif
397 }
398
399 /*
400 * Vnode op for reading.
401 */
402 /* ARGSUSED */
403 static int
404 ffs_read(ap)
405 struct vop_read_args /* {
406 struct vnode *a_vp;
407 struct uio *a_uio;
408 int a_ioflag;
409 struct ucred *a_cred;
410 } */ *ap;
411 {
412 struct vnode *vp;
413 struct inode *ip;
414 struct uio *uio;
415 struct fs *fs;
416 struct buf *bp;
417 ufs_lbn_t lbn, nextlbn;
418 off_t bytesinfile;
419 long size, xfersize, blkoffset;
420 int error, orig_resid;
421 int seqcount;
422 int ioflag;
423
424 vp = ap->a_vp;
425 uio = ap->a_uio;
426 ioflag = ap->a_ioflag;
427 if (ap->a_ioflag & IO_EXT)
428 #ifdef notyet
429 return (ffs_extread(vp, uio, ioflag));
430 #else
431 panic("ffs_read+IO_EXT");
432 #endif
433 #ifdef DIRECTIO
434 if ((ioflag & IO_DIRECT) != 0) {
435 int workdone;
436
437 error = ffs_rawread(vp, uio, &workdone);
438 if (error != 0 || workdone != 0)
439 return error;
440 }
441 #endif
442
443 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
444 ip = VTOI(vp);
445
446 #ifdef DIAGNOSTIC
447 if (uio->uio_rw != UIO_READ)
448 panic("ffs_read: mode");
449
450 if (vp->v_type == VLNK) {
451 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
452 panic("ffs_read: short symlink");
453 } else if (vp->v_type != VREG && vp->v_type != VDIR)
454 panic("ffs_read: type %d", vp->v_type);
455 #endif
456 orig_resid = uio->uio_resid;
457 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
458 if (orig_resid == 0)
459 return (0);
460 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
461 fs = ip->i_fs;
462 if (uio->uio_offset < ip->i_size &&
463 uio->uio_offset >= fs->fs_maxfilesize)
464 return (EOVERFLOW);
465
466 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
467 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
468 break;
469 lbn = lblkno(fs, uio->uio_offset);
470 nextlbn = lbn + 1;
471
472 /*
473 * size of buffer. The buffer representing the
474 * end of the file is rounded up to the size of
475 * the block type ( fragment or full block,
476 * depending ).
477 */
478 size = blksize(fs, ip, lbn);
479 blkoffset = blkoff(fs, uio->uio_offset);
480
481 /*
482 * The amount we want to transfer in this iteration is
483 * one FS block less the amount of the data before
484 * our startpoint (duh!)
485 */
486 xfersize = fs->fs_bsize - blkoffset;
487
488 /*
489 * But if we actually want less than the block,
490 * or the file doesn't have a whole block more of data,
491 * then use the lesser number.
492 */
493 if (uio->uio_resid < xfersize)
494 xfersize = uio->uio_resid;
495 if (bytesinfile < xfersize)
496 xfersize = bytesinfile;
497
498 if (lblktosize(fs, nextlbn) >= ip->i_size) {
499 /*
500 * Don't do readahead if this is the end of the file.
501 */
502 error = bread(vp, lbn, size, NOCRED, &bp);
503 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
504 /*
505 * Otherwise if we are allowed to cluster,
506 * grab as much as we can.
507 *
508 * XXX This may not be a win if we are not
509 * doing sequential access.
510 */
511 error = cluster_read(vp, ip->i_size, lbn,
512 size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp);
513 } else if (seqcount > 1) {
514 /*
515 * If we are NOT allowed to cluster, then
516 * if we appear to be acting sequentially,
517 * fire off a request for a readahead
518 * as well as a read. Note that the 4th and 5th
519 * arguments point to arrays of the size specified in
520 * the 6th argument.
521 */
522 int nextsize = blksize(fs, ip, nextlbn);
523 error = breadn(vp, lbn,
524 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
525 } else {
526 /*
527 * Failing all of the above, just read what the
528 * user asked for. Interestingly, the same as
529 * the first option above.
530 */
531 error = bread(vp, lbn, size, NOCRED, &bp);
532 }
533 if (error) {
534 brelse(bp);
535 bp = NULL;
536 break;
537 }
538
539 /*
540 * If IO_DIRECT then set B_DIRECT for the buffer. This
541 * will cause us to attempt to release the buffer later on
542 * and will cause the buffer cache to attempt to free the
543 * underlying pages.
544 */
545 if (ioflag & IO_DIRECT)
546 bp->b_flags |= B_DIRECT;
547
548 /*
549 * We should only get non-zero b_resid when an I/O error
550 * has occurred, which should cause us to break above.
551 * However, if the short read did not cause an error,
552 * then we want to ensure that we do not uiomove bad
553 * or uninitialized data.
554 */
555 size -= bp->b_resid;
556 if (size < xfersize) {
557 if (size == 0)
558 break;
559 xfersize = size;
560 }
561
562 error = uiomove((char *)bp->b_data + blkoffset,
563 (int)xfersize, uio);
564 if (error)
565 break;
566
567 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
568 (LIST_EMPTY(&bp->b_dep))) {
569 /*
570 * If there are no dependencies, and it's VMIO,
571 * then we don't need the buf, mark it available
572 * for freeing. The VM has the data.
573 */
574 bp->b_flags |= B_RELBUF;
575 brelse(bp);
576 } else {
577 /*
578 * Otherwise let whoever
579 * made the request take care of
580 * freeing it. We just queue
581 * it onto another list.
582 */
583 bqrelse(bp);
584 }
585 }
586
587 /*
588 * This can only happen in the case of an error
589 * because the loop above resets bp to NULL on each iteration
590 * and on normal completion has not set a new value into it.
591 * so it must have come from a 'break' statement
592 */
593 if (bp != NULL) {
594 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
595 (LIST_EMPTY(&bp->b_dep))) {
596 bp->b_flags |= B_RELBUF;
597 brelse(bp);
598 } else {
599 bqrelse(bp);
600 }
601 }
602
603 if ((error == 0 || uio->uio_resid != orig_resid) &&
604 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
605 VI_LOCK(vp);
606 ip->i_flag |= IN_ACCESS;
607 VI_UNLOCK(vp);
608 }
609 return (error);
610 }
611
612 /*
613 * Vnode op for writing.
614 */
615 static int
616 ffs_write(ap)
617 struct vop_write_args /* {
618 struct vnode *a_vp;
619 struct uio *a_uio;
620 int a_ioflag;
621 struct ucred *a_cred;
622 } */ *ap;
623 {
624 struct vnode *vp;
625 struct uio *uio;
626 struct inode *ip;
627 struct fs *fs;
628 struct buf *bp;
629 struct thread *td;
630 ufs_lbn_t lbn;
631 off_t osize;
632 int seqcount;
633 int blkoffset, error, flags, ioflag, resid, size, xfersize;
634
635 vp = ap->a_vp;
636 uio = ap->a_uio;
637 ioflag = ap->a_ioflag;
638 if (ap->a_ioflag & IO_EXT)
639 #ifdef notyet
640 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
641 #else
642 panic("ffs_write+IO_EXT");
643 #endif
644
645 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
646 ip = VTOI(vp);
647
648 #ifdef DIAGNOSTIC
649 if (uio->uio_rw != UIO_WRITE)
650 panic("ffs_write: mode");
651 #endif
652
653 switch (vp->v_type) {
654 case VREG:
655 if (ioflag & IO_APPEND)
656 uio->uio_offset = ip->i_size;
657 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
658 return (EPERM);
659 /* FALLTHROUGH */
660 case VLNK:
661 break;
662 case VDIR:
663 panic("ffs_write: dir write");
664 break;
665 default:
666 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
667 (int)uio->uio_offset,
668 (int)uio->uio_resid
669 );
670 }
671
672 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
673 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
674 fs = ip->i_fs;
675 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
676 return (EFBIG);
677 /*
678 * Maybe this should be above the vnode op call, but so long as
679 * file servers have no limits, I don't think it matters.
680 */
681 td = uio->uio_td;
682 if (vp->v_type == VREG && td != NULL) {
683 PROC_LOCK(td->td_proc);
684 if (uio->uio_offset + uio->uio_resid >
685 lim_cur(td->td_proc, RLIMIT_FSIZE)) {
686 psignal(td->td_proc, SIGXFSZ);
687 PROC_UNLOCK(td->td_proc);
688 return (EFBIG);
689 }
690 PROC_UNLOCK(td->td_proc);
691 }
692
693 resid = uio->uio_resid;
694 osize = ip->i_size;
695 if (seqcount > BA_SEQMAX)
696 flags = BA_SEQMAX << BA_SEQSHIFT;
697 else
698 flags = seqcount << BA_SEQSHIFT;
699 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
700 flags |= IO_SYNC;
701
702 for (error = 0; uio->uio_resid > 0;) {
703 lbn = lblkno(fs, uio->uio_offset);
704 blkoffset = blkoff(fs, uio->uio_offset);
705 xfersize = fs->fs_bsize - blkoffset;
706 if (uio->uio_resid < xfersize)
707 xfersize = uio->uio_resid;
708 if (uio->uio_offset + xfersize > ip->i_size)
709 vnode_pager_setsize(vp, uio->uio_offset + xfersize);
710
711 /*
712 * We must perform a read-before-write if the transfer size
713 * does not cover the entire buffer.
714 */
715 if (fs->fs_bsize > xfersize)
716 flags |= BA_CLRBUF;
717 else
718 flags &= ~BA_CLRBUF;
719 /* XXX is uio->uio_offset the right thing here? */
720 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
721 ap->a_cred, flags, &bp);
722 if (error != 0)
723 break;
724 /*
725 * If the buffer is not valid we have to clear out any
726 * garbage data from the pages instantiated for the buffer.
727 * If we do not, a failed uiomove() during a write can leave
728 * the prior contents of the pages exposed to a userland
729 * mmap(). XXX deal with uiomove() errors a better way.
730 */
731 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
732 vfs_bio_clrbuf(bp);
733 if (ioflag & IO_DIRECT)
734 bp->b_flags |= B_DIRECT;
735 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
736 bp->b_flags |= B_NOCACHE;
737
738 if (uio->uio_offset + xfersize > ip->i_size) {
739 ip->i_size = uio->uio_offset + xfersize;
740 DIP_SET(ip, i_size, ip->i_size);
741 }
742
743 size = blksize(fs, ip, lbn) - bp->b_resid;
744 if (size < xfersize)
745 xfersize = size;
746
747 error =
748 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
749 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
750 (LIST_EMPTY(&bp->b_dep))) {
751 bp->b_flags |= B_RELBUF;
752 }
753
754 /*
755 * If IO_SYNC each buffer is written synchronously. Otherwise
756 * if we have a severe page deficiency write the buffer
757 * asynchronously. Otherwise try to cluster, and if that
758 * doesn't do it then either do an async write (if O_DIRECT),
759 * or a delayed write (if not).
760 */
761 if (ioflag & IO_SYNC) {
762 (void)bwrite(bp);
763 } else if (vm_page_count_severe() ||
764 buf_dirty_count_severe() ||
765 (ioflag & IO_ASYNC)) {
766 bp->b_flags |= B_CLUSTEROK;
767 bawrite(bp);
768 } else if (xfersize + blkoffset == fs->fs_bsize) {
769 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
770 bp->b_flags |= B_CLUSTEROK;
771 cluster_write(vp, bp, ip->i_size, seqcount);
772 } else {
773 bawrite(bp);
774 }
775 } else if (ioflag & IO_DIRECT) {
776 bp->b_flags |= B_CLUSTEROK;
777 bawrite(bp);
778 } else {
779 bp->b_flags |= B_CLUSTEROK;
780 bdwrite(bp);
781 }
782 if (error || xfersize == 0)
783 break;
784 ip->i_flag |= IN_CHANGE | IN_UPDATE;
785 }
786 /*
787 * If we successfully wrote any data, and we are not the superuser
788 * we clear the setuid and setgid bits as a precaution against
789 * tampering.
790 */
791 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
792 ap->a_cred) {
793 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
794 ip->i_mode &= ~(ISUID | ISGID);
795 DIP_SET(ip, i_mode, ip->i_mode);
796 }
797 }
798 if (error) {
799 if (ioflag & IO_UNIT) {
800 (void)ffs_truncate(vp, osize,
801 IO_NORMAL | (ioflag & IO_SYNC),
802 ap->a_cred, uio->uio_td);
803 uio->uio_offset -= resid - uio->uio_resid;
804 uio->uio_resid = resid;
805 }
806 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
807 error = ffs_update(vp, 1);
808 return (error);
809 }
810
811 /*
812 * get page routine
813 */
814 static int
815 ffs_getpages(ap)
816 struct vop_getpages_args *ap;
817 {
818 int i;
819 vm_page_t mreq;
820 int pcount;
821
822 pcount = round_page(ap->a_count) / PAGE_SIZE;
823 mreq = ap->a_m[ap->a_reqpage];
824
825 /*
826 * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
827 * then the entire page is valid. Since the page may be mapped,
828 * user programs might reference data beyond the actual end of file
829 * occuring within the page. We have to zero that data.
830 */
831 VM_OBJECT_LOCK(mreq->object);
832 if (mreq->valid) {
833 if (mreq->valid != VM_PAGE_BITS_ALL)
834 vm_page_zero_invalid(mreq, TRUE);
835 vm_page_lock_queues();
836 for (i = 0; i < pcount; i++) {
837 if (i != ap->a_reqpage) {
838 vm_page_free(ap->a_m[i]);
839 }
840 }
841 vm_page_unlock_queues();
842 VM_OBJECT_UNLOCK(mreq->object);
843 return VM_PAGER_OK;
844 }
845 VM_OBJECT_UNLOCK(mreq->object);
846
847 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
848 ap->a_count,
849 ap->a_reqpage);
850 }
851
852
853 /*
854 * Extended attribute area reading.
855 */
856 static int
857 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
858 {
859 struct inode *ip;
860 struct ufs2_dinode *dp;
861 struct fs *fs;
862 struct buf *bp;
863 ufs_lbn_t lbn, nextlbn;
864 off_t bytesinfile;
865 long size, xfersize, blkoffset;
866 int error, orig_resid;
867
868 ip = VTOI(vp);
869 fs = ip->i_fs;
870 dp = ip->i_din2;
871
872 #ifdef DIAGNOSTIC
873 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
874 panic("ffs_extread: mode");
875
876 #endif
877 orig_resid = uio->uio_resid;
878 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
879 if (orig_resid == 0)
880 return (0);
881 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
882
883 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
884 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
885 break;
886 lbn = lblkno(fs, uio->uio_offset);
887 nextlbn = lbn + 1;
888
889 /*
890 * size of buffer. The buffer representing the
891 * end of the file is rounded up to the size of
892 * the block type ( fragment or full block,
893 * depending ).
894 */
895 size = sblksize(fs, dp->di_extsize, lbn);
896 blkoffset = blkoff(fs, uio->uio_offset);
897
898 /*
899 * The amount we want to transfer in this iteration is
900 * one FS block less the amount of the data before
901 * our startpoint (duh!)
902 */
903 xfersize = fs->fs_bsize - blkoffset;
904
905 /*
906 * But if we actually want less than the block,
907 * or the file doesn't have a whole block more of data,
908 * then use the lesser number.
909 */
910 if (uio->uio_resid < xfersize)
911 xfersize = uio->uio_resid;
912 if (bytesinfile < xfersize)
913 xfersize = bytesinfile;
914
915 if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
916 /*
917 * Don't do readahead if this is the end of the info.
918 */
919 error = bread(vp, -1 - lbn, size, NOCRED, &bp);
920 } else {
921 /*
922 * If we have a second block, then
923 * fire off a request for a readahead
924 * as well as a read. Note that the 4th and 5th
925 * arguments point to arrays of the size specified in
926 * the 6th argument.
927 */
928 int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
929
930 nextlbn = -1 - nextlbn;
931 error = breadn(vp, -1 - lbn,
932 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
933 }
934 if (error) {
935 brelse(bp);
936 bp = NULL;
937 break;
938 }
939
940 /*
941 * If IO_DIRECT then set B_DIRECT for the buffer. This
942 * will cause us to attempt to release the buffer later on
943 * and will cause the buffer cache to attempt to free the
944 * underlying pages.
945 */
946 if (ioflag & IO_DIRECT)
947 bp->b_flags |= B_DIRECT;
948
949 /*
950 * We should only get non-zero b_resid when an I/O error
951 * has occurred, which should cause us to break above.
952 * However, if the short read did not cause an error,
953 * then we want to ensure that we do not uiomove bad
954 * or uninitialized data.
955 */
956 size -= bp->b_resid;
957 if (size < xfersize) {
958 if (size == 0)
959 break;
960 xfersize = size;
961 }
962
963 error = uiomove((char *)bp->b_data + blkoffset,
964 (int)xfersize, uio);
965 if (error)
966 break;
967
968 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
969 (LIST_EMPTY(&bp->b_dep))) {
970 /*
971 * If there are no dependencies, and it's VMIO,
972 * then we don't need the buf, mark it available
973 * for freeing. The VM has the data.
974 */
975 bp->b_flags |= B_RELBUF;
976 brelse(bp);
977 } else {
978 /*
979 * Otherwise let whoever
980 * made the request take care of
981 * freeing it. We just queue
982 * it onto another list.
983 */
984 bqrelse(bp);
985 }
986 }
987
988 /*
989 * This can only happen in the case of an error
990 * because the loop above resets bp to NULL on each iteration
991 * and on normal completion has not set a new value into it.
992 * so it must have come from a 'break' statement
993 */
994 if (bp != NULL) {
995 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
996 (LIST_EMPTY(&bp->b_dep))) {
997 bp->b_flags |= B_RELBUF;
998 brelse(bp);
999 } else {
1000 bqrelse(bp);
1001 }
1002 }
1003
1004 if ((error == 0 || uio->uio_resid != orig_resid) &&
1005 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
1006 VI_LOCK(vp);
1007 ip->i_flag |= IN_ACCESS;
1008 VI_UNLOCK(vp);
1009 }
1010 return (error);
1011 }
1012
1013 /*
1014 * Extended attribute area writing.
1015 */
1016 static int
1017 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
1018 {
1019 struct inode *ip;
1020 struct ufs2_dinode *dp;
1021 struct fs *fs;
1022 struct buf *bp;
1023 ufs_lbn_t lbn;
1024 off_t osize;
1025 int blkoffset, error, flags, resid, size, xfersize;
1026
1027 ip = VTOI(vp);
1028 fs = ip->i_fs;
1029 dp = ip->i_din2;
1030
1031 KASSERT(!(ip->i_flag & IN_SPACECOUNTED), ("inode %u: inode is dead",
1032 ip->i_number));
1033
1034 #ifdef DIAGNOSTIC
1035 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
1036 panic("ffs_extwrite: mode");
1037 #endif
1038
1039 if (ioflag & IO_APPEND)
1040 uio->uio_offset = dp->di_extsize;
1041 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
1042 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
1043 if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize)
1044 return (EFBIG);
1045
1046 resid = uio->uio_resid;
1047 osize = dp->di_extsize;
1048 flags = IO_EXT;
1049 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
1050 flags |= IO_SYNC;
1051
1052 for (error = 0; uio->uio_resid > 0;) {
1053 lbn = lblkno(fs, uio->uio_offset);
1054 blkoffset = blkoff(fs, uio->uio_offset);
1055 xfersize = fs->fs_bsize - blkoffset;
1056 if (uio->uio_resid < xfersize)
1057 xfersize = uio->uio_resid;
1058
1059 /*
1060 * We must perform a read-before-write if the transfer size
1061 * does not cover the entire buffer.
1062 */
1063 if (fs->fs_bsize > xfersize)
1064 flags |= BA_CLRBUF;
1065 else
1066 flags &= ~BA_CLRBUF;
1067 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
1068 ucred, flags, &bp);
1069 if (error != 0)
1070 break;
1071 /*
1072 * If the buffer is not valid we have to clear out any
1073 * garbage data from the pages instantiated for the buffer.
1074 * If we do not, a failed uiomove() during a write can leave
1075 * the prior contents of the pages exposed to a userland
1076 * mmap(). XXX deal with uiomove() errors a better way.
1077 */
1078 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
1079 vfs_bio_clrbuf(bp);
1080 if (ioflag & IO_DIRECT)
1081 bp->b_flags |= B_DIRECT;
1082
1083 if (uio->uio_offset + xfersize > dp->di_extsize)
1084 dp->di_extsize = uio->uio_offset + xfersize;
1085
1086 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
1087 if (size < xfersize)
1088 xfersize = size;
1089
1090 error =
1091 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
1092 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1093 (LIST_EMPTY(&bp->b_dep))) {
1094 bp->b_flags |= B_RELBUF;
1095 }
1096
1097 /*
1098 * If IO_SYNC each buffer is written synchronously. Otherwise
1099 * if we have a severe page deficiency write the buffer
1100 * asynchronously. Otherwise try to cluster, and if that
1101 * doesn't do it then either do an async write (if O_DIRECT),
1102 * or a delayed write (if not).
1103 */
1104 if (ioflag & IO_SYNC) {
1105 (void)bwrite(bp);
1106 } else if (vm_page_count_severe() ||
1107 buf_dirty_count_severe() ||
1108 xfersize + blkoffset == fs->fs_bsize ||
1109 (ioflag & (IO_ASYNC | IO_DIRECT)))
1110 bawrite(bp);
1111 else
1112 bdwrite(bp);
1113 if (error || xfersize == 0)
1114 break;
1115 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1116 }
1117 /*
1118 * If we successfully wrote any data, and we are not the superuser
1119 * we clear the setuid and setgid bits as a precaution against
1120 * tampering.
1121 */
1122 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
1123 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
1124 ip->i_mode &= ~(ISUID | ISGID);
1125 dp->di_mode = ip->i_mode;
1126 }
1127 }
1128 if (error) {
1129 if (ioflag & IO_UNIT) {
1130 (void)ffs_truncate(vp, osize,
1131 IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td);
1132 uio->uio_offset -= resid - uio->uio_resid;
1133 uio->uio_resid = resid;
1134 }
1135 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
1136 error = ffs_update(vp, 1);
1137 return (error);
1138 }
1139
1140
1141 /*
1142 * Vnode operating to retrieve a named extended attribute.
1143 *
1144 * Locate a particular EA (nspace:name) in the area (ptr:length), and return
1145 * the length of the EA, and possibly the pointer to the entry and to the data.
1146 */
1147 static int
1148 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac)
1149 {
1150 u_char *p, *pe, *pn, *p0;
1151 int eapad1, eapad2, ealength, ealen, nlen;
1152 uint32_t ul;
1153
1154 pe = ptr + length;
1155 nlen = strlen(name);
1156
1157 for (p = ptr; p < pe; p = pn) {
1158 p0 = p;
1159 bcopy(p, &ul, sizeof(ul));
1160 pn = p + ul;
1161 /* make sure this entry is complete */
1162 if (pn > pe)
1163 break;
1164 p += sizeof(uint32_t);
1165 if (*p != nspace)
1166 continue;
1167 p++;
1168 eapad2 = *p++;
1169 if (*p != nlen)
1170 continue;
1171 p++;
1172 if (bcmp(p, name, nlen))
1173 continue;
1174 ealength = sizeof(uint32_t) + 3 + nlen;
1175 eapad1 = 8 - (ealength % 8);
1176 if (eapad1 == 8)
1177 eapad1 = 0;
1178 ealength += eapad1;
1179 ealen = ul - ealength - eapad2;
1180 p += nlen + eapad1;
1181 if (eap != NULL)
1182 *eap = p0;
1183 if (eac != NULL)
1184 *eac = p;
1185 return (ealen);
1186 }
1187 return(-1);
1188 }
1189
1190 static int
1191 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
1192 {
1193 struct inode *ip;
1194 struct ufs2_dinode *dp;
1195 struct fs *fs;
1196 struct uio luio;
1197 struct iovec liovec;
1198 int easize, error;
1199 u_char *eae;
1200
1201 ip = VTOI(vp);
1202 fs = ip->i_fs;
1203 dp = ip->i_din2;
1204 easize = dp->di_extsize;
1205 if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize)
1206 return (EFBIG);
1207
1208 eae = malloc(easize + extra, M_TEMP, M_WAITOK);
1209
1210 liovec.iov_base = eae;
1211 liovec.iov_len = easize;
1212 luio.uio_iov = &liovec;
1213 luio.uio_iovcnt = 1;
1214 luio.uio_offset = 0;
1215 luio.uio_resid = easize;
1216 luio.uio_segflg = UIO_SYSSPACE;
1217 luio.uio_rw = UIO_READ;
1218 luio.uio_td = td;
1219
1220 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
1221 if (error) {
1222 free(eae, M_TEMP);
1223 return(error);
1224 }
1225 *p = eae;
1226 return (0);
1227 }
1228
1229 static int
1230 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
1231 {
1232 struct inode *ip;
1233 struct ufs2_dinode *dp;
1234 int error;
1235
1236 ip = VTOI(vp);
1237
1238 if (ip->i_ea_area != NULL)
1239 return (EBUSY);
1240 dp = ip->i_din2;
1241 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
1242 if (error)
1243 return (error);
1244 ip->i_ea_len = dp->di_extsize;
1245 ip->i_ea_error = 0;
1246 return (0);
1247 }
1248
1249 /*
1250 * Vnode extattr transaction commit/abort
1251 */
1252 static int
1253 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
1254 {
1255 struct inode *ip;
1256 struct uio luio;
1257 struct iovec liovec;
1258 int error;
1259 struct ufs2_dinode *dp;
1260
1261 ip = VTOI(vp);
1262 if (ip->i_ea_area == NULL)
1263 return (EINVAL);
1264 dp = ip->i_din2;
1265 error = ip->i_ea_error;
1266 if (commit && error == 0) {
1267 if (cred == NOCRED)
1268 cred = vp->v_mount->mnt_cred;
1269 liovec.iov_base = ip->i_ea_area;
1270 liovec.iov_len = ip->i_ea_len;
1271 luio.uio_iov = &liovec;
1272 luio.uio_iovcnt = 1;
1273 luio.uio_offset = 0;
1274 luio.uio_resid = ip->i_ea_len;
1275 luio.uio_segflg = UIO_SYSSPACE;
1276 luio.uio_rw = UIO_WRITE;
1277 luio.uio_td = td;
1278 /* XXX: I'm not happy about truncating to zero size */
1279 if (ip->i_ea_len < dp->di_extsize)
1280 error = ffs_truncate(vp, 0, IO_EXT, cred, td);
1281 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
1282 }
1283 free(ip->i_ea_area, M_TEMP);
1284 ip->i_ea_area = NULL;
1285 ip->i_ea_len = 0;
1286 ip->i_ea_error = 0;
1287 return (error);
1288 }
1289
1290 /*
1291 * Vnode extattr strategy routine for fifos.
1292 *
1293 * We need to check for a read or write of the external attributes.
1294 * Otherwise we just fall through and do the usual thing.
1295 */
1296 static int
1297 ffsext_strategy(struct vop_strategy_args *ap)
1298 /*
1299 struct vop_strategy_args {
1300 struct vnodeop_desc *a_desc;
1301 struct vnode *a_vp;
1302 struct buf *a_bp;
1303 };
1304 */
1305 {
1306 struct vnode *vp;
1307 daddr_t lbn;
1308
1309 vp = ap->a_vp;
1310 lbn = ap->a_bp->b_lblkno;
1311 if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC &&
1312 lbn < 0 && lbn >= -NXADDR)
1313 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
1314 if (vp->v_type == VFIFO)
1315 return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
1316 panic("spec nodes went here");
1317 }
1318
1319 /*
1320 * Vnode extattr transaction commit/abort
1321 */
1322 static int
1323 ffs_openextattr(struct vop_openextattr_args *ap)
1324 /*
1325 struct vop_openextattr_args {
1326 struct vnodeop_desc *a_desc;
1327 struct vnode *a_vp;
1328 IN struct ucred *a_cred;
1329 IN struct thread *a_td;
1330 };
1331 */
1332 {
1333 struct inode *ip;
1334 struct fs *fs;
1335
1336 ip = VTOI(ap->a_vp);
1337 fs = ip->i_fs;
1338
1339 if (ap->a_vp->v_type == VCHR)
1340 return (EOPNOTSUPP);
1341
1342 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
1343 }
1344
1345
1346 /*
1347 * Vnode extattr transaction commit/abort
1348 */
1349 static int
1350 ffs_closeextattr(struct vop_closeextattr_args *ap)
1351 /*
1352 struct vop_closeextattr_args {
1353 struct vnodeop_desc *a_desc;
1354 struct vnode *a_vp;
1355 int a_commit;
1356 IN struct ucred *a_cred;
1357 IN struct thread *a_td;
1358 };
1359 */
1360 {
1361 struct inode *ip;
1362 struct fs *fs;
1363
1364 ip = VTOI(ap->a_vp);
1365 fs = ip->i_fs;
1366
1367 if (ap->a_vp->v_type == VCHR)
1368 return (EOPNOTSUPP);
1369
1370 if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
1371 return (EROFS);
1372
1373 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
1374 }
1375
1376 /*
1377 * Vnode operation to remove a named attribute.
1378 */
1379 static int
1380 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
1381 /*
1382 vop_deleteextattr {
1383 IN struct vnode *a_vp;
1384 IN int a_attrnamespace;
1385 IN const char *a_name;
1386 IN struct ucred *a_cred;
1387 IN struct thread *a_td;
1388 };
1389 */
1390 {
1391 struct inode *ip;
1392 struct fs *fs;
1393 uint32_t ealength, ul;
1394 int ealen, olen, eapad1, eapad2, error, i, easize;
1395 u_char *eae, *p;
1396 int stand_alone;
1397
1398 ip = VTOI(ap->a_vp);
1399 fs = ip->i_fs;
1400
1401 if (ap->a_vp->v_type == VCHR)
1402 return (EOPNOTSUPP);
1403
1404 if (strlen(ap->a_name) == 0)
1405 return (EINVAL);
1406
1407 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1408 return (EROFS);
1409
1410 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1411 ap->a_cred, ap->a_td, IWRITE);
1412 if (error) {
1413 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1414 ip->i_ea_error = error;
1415 return (error);
1416 }
1417
1418 if (ip->i_ea_area == NULL) {
1419 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1420 if (error)
1421 return (error);
1422 stand_alone = 1;
1423 } else {
1424 stand_alone = 0;
1425 }
1426
1427 ealength = eapad1 = ealen = eapad2 = 0;
1428
1429 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
1430 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1431 easize = ip->i_ea_len;
1432
1433 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1434 &p, NULL);
1435 if (olen == -1) {
1436 /* delete but nonexistent */
1437 free(eae, M_TEMP);
1438 if (stand_alone)
1439 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1440 return(ENOATTR);
1441 }
1442 bcopy(p, &ul, sizeof ul);
1443 i = p - eae + ul;
1444 if (ul != ealength) {
1445 bcopy(p + ul, p + ealength, easize - i);
1446 easize += (ealength - ul);
1447 }
1448 if (easize > NXADDR * fs->fs_bsize) {
1449 free(eae, M_TEMP);
1450 if (stand_alone)
1451 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1452 else if (ip->i_ea_error == 0)
1453 ip->i_ea_error = ENOSPC;
1454 return(ENOSPC);
1455 }
1456 p = ip->i_ea_area;
1457 ip->i_ea_area = eae;
1458 ip->i_ea_len = easize;
1459 free(p, M_TEMP);
1460 if (stand_alone)
1461 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1462 return(error);
1463 }
1464
1465 /*
1466 * Vnode operation to retrieve a named extended attribute.
1467 */
1468 static int
1469 ffs_getextattr(struct vop_getextattr_args *ap)
1470 /*
1471 vop_getextattr {
1472 IN struct vnode *a_vp;
1473 IN int a_attrnamespace;
1474 IN const char *a_name;
1475 INOUT struct uio *a_uio;
1476 OUT size_t *a_size;
1477 IN struct ucred *a_cred;
1478 IN struct thread *a_td;
1479 };
1480 */
1481 {
1482 struct inode *ip;
1483 struct fs *fs;
1484 u_char *eae, *p;
1485 unsigned easize;
1486 int error, ealen, stand_alone;
1487
1488 ip = VTOI(ap->a_vp);
1489 fs = ip->i_fs;
1490
1491 if (ap->a_vp->v_type == VCHR)
1492 return (EOPNOTSUPP);
1493
1494 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1495 ap->a_cred, ap->a_td, IREAD);
1496 if (error)
1497 return (error);
1498
1499 if (ip->i_ea_area == NULL) {
1500 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1501 if (error)
1502 return (error);
1503 stand_alone = 1;
1504 } else {
1505 stand_alone = 0;
1506 }
1507 eae = ip->i_ea_area;
1508 easize = ip->i_ea_len;
1509
1510 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1511 NULL, &p);
1512 if (ealen >= 0) {
1513 error = 0;
1514 if (ap->a_size != NULL)
1515 *ap->a_size = ealen;
1516 else if (ap->a_uio != NULL)
1517 error = uiomove(p, ealen, ap->a_uio);
1518 } else
1519 error = ENOATTR;
1520 if (stand_alone)
1521 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1522 return(error);
1523 }
1524
1525 /*
1526 * Vnode operation to retrieve extended attributes on a vnode.
1527 */
1528 static int
1529 ffs_listextattr(struct vop_listextattr_args *ap)
1530 /*
1531 vop_listextattr {
1532 IN struct vnode *a_vp;
1533 IN int a_attrnamespace;
1534 INOUT struct uio *a_uio;
1535 OUT size_t *a_size;
1536 IN struct ucred *a_cred;
1537 IN struct thread *a_td;
1538 };
1539 */
1540 {
1541 struct inode *ip;
1542 struct fs *fs;
1543 u_char *eae, *p, *pe, *pn;
1544 unsigned easize;
1545 uint32_t ul;
1546 int error, ealen, stand_alone;
1547
1548 ip = VTOI(ap->a_vp);
1549 fs = ip->i_fs;
1550
1551 if (ap->a_vp->v_type == VCHR)
1552 return (EOPNOTSUPP);
1553
1554 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1555 ap->a_cred, ap->a_td, IREAD);
1556 if (error)
1557 return (error);
1558
1559 if (ip->i_ea_area == NULL) {
1560 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1561 if (error)
1562 return (error);
1563 stand_alone = 1;
1564 } else {
1565 stand_alone = 0;
1566 }
1567 eae = ip->i_ea_area;
1568 easize = ip->i_ea_len;
1569
1570 error = 0;
1571 if (ap->a_size != NULL)
1572 *ap->a_size = 0;
1573 pe = eae + easize;
1574 for(p = eae; error == 0 && p < pe; p = pn) {
1575 bcopy(p, &ul, sizeof(ul));
1576 pn = p + ul;
1577 if (pn > pe)
1578 break;
1579 p += sizeof(ul);
1580 if (*p++ != ap->a_attrnamespace)
1581 continue;
1582 p++; /* pad2 */
1583 ealen = *p;
1584 if (ap->a_size != NULL) {
1585 *ap->a_size += ealen + 1;
1586 } else if (ap->a_uio != NULL) {
1587 error = uiomove(p, ealen + 1, ap->a_uio);
1588 }
1589 }
1590 if (stand_alone)
1591 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1592 return(error);
1593 }
1594
1595 /*
1596 * Vnode operation to set a named attribute.
1597 */
1598 static int
1599 ffs_setextattr(struct vop_setextattr_args *ap)
1600 /*
1601 vop_setextattr {
1602 IN struct vnode *a_vp;
1603 IN int a_attrnamespace;
1604 IN const char *a_name;
1605 INOUT struct uio *a_uio;
1606 IN struct ucred *a_cred;
1607 IN struct thread *a_td;
1608 };
1609 */
1610 {
1611 struct inode *ip;
1612 struct fs *fs;
1613 uint32_t ealength, ul;
1614 int ealen, olen, eapad1, eapad2, error, i, easize;
1615 u_char *eae, *p;
1616 int stand_alone;
1617
1618 ip = VTOI(ap->a_vp);
1619 fs = ip->i_fs;
1620
1621 if (ap->a_vp->v_type == VCHR)
1622 return (EOPNOTSUPP);
1623
1624 if (strlen(ap->a_name) == 0)
1625 return (EINVAL);
1626
1627 /* XXX Now unsupported API to delete EAs using NULL uio. */
1628 if (ap->a_uio == NULL)
1629 return (EOPNOTSUPP);
1630
1631 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1632 return (EROFS);
1633
1634 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1635 ap->a_cred, ap->a_td, IWRITE);
1636 if (error) {
1637 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1638 ip->i_ea_error = error;
1639 return (error);
1640 }
1641
1642 if (ip->i_ea_area == NULL) {
1643 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1644 if (error)
1645 return (error);
1646 stand_alone = 1;
1647 } else {
1648 stand_alone = 0;
1649 }
1650
1651 ealen = ap->a_uio->uio_resid;
1652 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
1653 eapad1 = 8 - (ealength % 8);
1654 if (eapad1 == 8)
1655 eapad1 = 0;
1656 eapad2 = 8 - (ealen % 8);
1657 if (eapad2 == 8)
1658 eapad2 = 0;
1659 ealength += eapad1 + ealen + eapad2;
1660
1661 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
1662 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1663 easize = ip->i_ea_len;
1664
1665 olen = ffs_findextattr(eae, easize,
1666 ap->a_attrnamespace, ap->a_name, &p, NULL);
1667 if (olen == -1) {
1668 /* new, append at end */
1669 p = eae + easize;
1670 easize += ealength;
1671 } else {
1672 bcopy(p, &ul, sizeof ul);
1673 i = p - eae + ul;
1674 if (ul != ealength) {
1675 bcopy(p + ul, p + ealength, easize - i);
1676 easize += (ealength - ul);
1677 }
1678 }
1679 if (easize > NXADDR * fs->fs_bsize) {
1680 free(eae, M_TEMP);
1681 if (stand_alone)
1682 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1683 else if (ip->i_ea_error == 0)
1684 ip->i_ea_error = ENOSPC;
1685 return(ENOSPC);
1686 }
1687 bcopy(&ealength, p, sizeof(ealength));
1688 p += sizeof(ealength);
1689 *p++ = ap->a_attrnamespace;
1690 *p++ = eapad2;
1691 *p++ = strlen(ap->a_name);
1692 strcpy(p, ap->a_name);
1693 p += strlen(ap->a_name);
1694 bzero(p, eapad1);
1695 p += eapad1;
1696 error = uiomove(p, ealen, ap->a_uio);
1697 if (error) {
1698 free(eae, M_TEMP);
1699 if (stand_alone)
1700 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1701 else if (ip->i_ea_error == 0)
1702 ip->i_ea_error = error;
1703 return(error);
1704 }
1705 p += ealen;
1706 bzero(p, eapad2);
1707
1708 p = ip->i_ea_area;
1709 ip->i_ea_area = eae;
1710 ip->i_ea_len = easize;
1711 free(p, M_TEMP);
1712 if (stand_alone)
1713 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1714 return(error);
1715 }
1716
1717 /*
1718 * Vnode pointer to File handle
1719 */
1720 static int
1721 ffs_vptofh(struct vop_vptofh_args *ap)
1722 /*
1723 vop_vptofh {
1724 IN struct vnode *a_vp;
1725 IN struct fid *a_fhp;
1726 };
1727 */
1728 {
1729 struct inode *ip;
1730 struct ufid *ufhp;
1731
1732 ip = VTOI(ap->a_vp);
1733 ufhp = (struct ufid *)ap->a_fhp;
1734 ufhp->ufid_len = sizeof(struct ufid);
1735 ufhp->ufid_ino = ip->i_number;
1736 ufhp->ufid_gen = ip->i_gen;
1737 return (0);
1738 }
Cache object: 6fcf7e852d557deef279aaf9f5af3a4b
|