1 /*-
2 * Copyright (c) 2002 Networks Associates Technology, Inc.
3 * All rights reserved.
4 *
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9 * research program
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
60 */
61
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
64
65 #include "opt_quota.h"
66
67 #include <sys/param.h>
68 #include <sys/capsicum.h>
69 #include <sys/systm.h>
70 #include <sys/bio.h>
71 #include <sys/buf.h>
72 #include <sys/conf.h>
73 #include <sys/fcntl.h>
74 #include <sys/file.h>
75 #include <sys/filedesc.h>
76 #include <sys/priv.h>
77 #include <sys/proc.h>
78 #include <sys/vnode.h>
79 #include <sys/mount.h>
80 #include <sys/kernel.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/syslog.h>
84 #include <sys/taskqueue.h>
85
86 #include <security/audit/audit.h>
87
88 #include <geom/geom.h>
89
90 #include <ufs/ufs/dir.h>
91 #include <ufs/ufs/extattr.h>
92 #include <ufs/ufs/quota.h>
93 #include <ufs/ufs/inode.h>
94 #include <ufs/ufs/ufs_extern.h>
95 #include <ufs/ufs/ufsmount.h>
96
97 #include <ufs/ffs/fs.h>
98 #include <ufs/ffs/ffs_extern.h>
99 #include <ufs/ffs/softdep.h>
100
101 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
102 int size, int rsize);
103
104 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
105 static ufs2_daddr_t
106 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
107 static void ffs_blkfree_cg(struct ufsmount *, struct fs *,
108 struct vnode *, ufs2_daddr_t, long, ino_t,
109 struct workhead *);
110 static void ffs_blkfree_trim_completed(struct bio *);
111 static void ffs_blkfree_trim_task(void *ctx, int pending __unused);
112 #ifdef INVARIANTS
113 static int ffs_checkblk(struct inode *, ufs2_daddr_t, long);
114 #endif
115 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int);
116 static ino_t ffs_dirpref(struct inode *);
117 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
118 int, int);
119 static ufs2_daddr_t ffs_hashalloc
120 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
121 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
122 int);
123 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
124 static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
125 static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
126
127 /*
128 * Allocate a block in the filesystem.
129 *
130 * The size of the requested block is given, which must be some
131 * multiple of fs_fsize and <= fs_bsize.
132 * A preference may be optionally specified. If a preference is given
133 * the following hierarchy is used to allocate a block:
134 * 1) allocate the requested block.
135 * 2) allocate a rotationally optimal block in the same cylinder.
136 * 3) allocate a block in the same cylinder group.
137 * 4) quadradically rehash into other cylinder groups, until an
138 * available block is located.
139 * If no block preference is given the following hierarchy is used
140 * to allocate a block:
141 * 1) allocate a block in the cylinder group that contains the
142 * inode for the file.
143 * 2) quadradically rehash into other cylinder groups, until an
144 * available block is located.
145 */
146 int
147 ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp)
148 struct inode *ip;
149 ufs2_daddr_t lbn, bpref;
150 int size, flags;
151 struct ucred *cred;
152 ufs2_daddr_t *bnp;
153 {
154 struct fs *fs;
155 struct ufsmount *ump;
156 ufs2_daddr_t bno;
157 u_int cg, reclaimed;
158 static struct timeval lastfail;
159 static int curfail;
160 int64_t delta;
161 #ifdef QUOTA
162 int error;
163 #endif
164
165 *bnp = 0;
166 ump = ITOUMP(ip);
167 fs = ump->um_fs;
168 mtx_assert(UFS_MTX(ump), MA_OWNED);
169 #ifdef INVARIANTS
170 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
171 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
172 devtoname(ump->um_dev), (long)fs->fs_bsize, size,
173 fs->fs_fsmnt);
174 panic("ffs_alloc: bad size");
175 }
176 if (cred == NOCRED)
177 panic("ffs_alloc: missing credential");
178 #endif /* INVARIANTS */
179 reclaimed = 0;
180 retry:
181 #ifdef QUOTA
182 UFS_UNLOCK(ump);
183 error = chkdq(ip, btodb(size), cred, 0);
184 if (error)
185 return (error);
186 UFS_LOCK(ump);
187 #endif
188 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
189 goto nospace;
190 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
191 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
192 goto nospace;
193 if (bpref >= fs->fs_size)
194 bpref = 0;
195 if (bpref == 0)
196 cg = ino_to_cg(fs, ip->i_number);
197 else
198 cg = dtog(fs, bpref);
199 bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
200 if (bno > 0) {
201 delta = btodb(size);
202 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
203 if (flags & IO_EXT)
204 ip->i_flag |= IN_CHANGE;
205 else
206 ip->i_flag |= IN_CHANGE | IN_UPDATE;
207 *bnp = bno;
208 return (0);
209 }
210 nospace:
211 #ifdef QUOTA
212 UFS_UNLOCK(ump);
213 /*
214 * Restore user's disk quota because allocation failed.
215 */
216 (void) chkdq(ip, -btodb(size), cred, FORCE);
217 UFS_LOCK(ump);
218 #endif
219 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
220 reclaimed = 1;
221 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
222 goto retry;
223 }
224 UFS_UNLOCK(ump);
225 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
226 ffs_fserr(fs, ip->i_number, "filesystem full");
227 uprintf("\n%s: write failed, filesystem is full\n",
228 fs->fs_fsmnt);
229 }
230 return (ENOSPC);
231 }
232
233 /*
234 * Reallocate a fragment to a bigger size
235 *
236 * The number and size of the old block is given, and a preference
237 * and new size is also specified. The allocator attempts to extend
238 * the original block. Failing that, the regular block allocator is
239 * invoked to get an appropriate block.
240 */
241 int
242 ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp)
243 struct inode *ip;
244 ufs2_daddr_t lbprev;
245 ufs2_daddr_t bprev;
246 ufs2_daddr_t bpref;
247 int osize, nsize, flags;
248 struct ucred *cred;
249 struct buf **bpp;
250 {
251 struct vnode *vp;
252 struct fs *fs;
253 struct buf *bp;
254 struct ufsmount *ump;
255 u_int cg, request, reclaimed;
256 int error, gbflags;
257 ufs2_daddr_t bno;
258 static struct timeval lastfail;
259 static int curfail;
260 int64_t delta;
261
262 vp = ITOV(ip);
263 ump = ITOUMP(ip);
264 fs = ump->um_fs;
265 bp = NULL;
266 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
267
268 mtx_assert(UFS_MTX(ump), MA_OWNED);
269 #ifdef INVARIANTS
270 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
271 panic("ffs_realloccg: allocation on suspended filesystem");
272 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
273 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
274 printf(
275 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
276 devtoname(ump->um_dev), (long)fs->fs_bsize, osize,
277 nsize, fs->fs_fsmnt);
278 panic("ffs_realloccg: bad size");
279 }
280 if (cred == NOCRED)
281 panic("ffs_realloccg: missing credential");
282 #endif /* INVARIANTS */
283 reclaimed = 0;
284 retry:
285 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) &&
286 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) {
287 goto nospace;
288 }
289 if (bprev == 0) {
290 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
291 devtoname(ump->um_dev), (long)fs->fs_bsize, (intmax_t)bprev,
292 fs->fs_fsmnt);
293 panic("ffs_realloccg: bad bprev");
294 }
295 UFS_UNLOCK(ump);
296 /*
297 * Allocate the extra space in the buffer.
298 */
299 error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
300 if (error) {
301 brelse(bp);
302 return (error);
303 }
304
305 if (bp->b_blkno == bp->b_lblkno) {
306 if (lbprev >= NDADDR)
307 panic("ffs_realloccg: lbprev out of range");
308 bp->b_blkno = fsbtodb(fs, bprev);
309 }
310
311 #ifdef QUOTA
312 error = chkdq(ip, btodb(nsize - osize), cred, 0);
313 if (error) {
314 brelse(bp);
315 return (error);
316 }
317 #endif
318 /*
319 * Check for extension in the existing location.
320 */
321 *bpp = NULL;
322 cg = dtog(fs, bprev);
323 UFS_LOCK(ump);
324 bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
325 if (bno) {
326 if (bp->b_blkno != fsbtodb(fs, bno))
327 panic("ffs_realloccg: bad blockno");
328 delta = btodb(nsize - osize);
329 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
330 if (flags & IO_EXT)
331 ip->i_flag |= IN_CHANGE;
332 else
333 ip->i_flag |= IN_CHANGE | IN_UPDATE;
334 allocbuf(bp, nsize);
335 bp->b_flags |= B_DONE;
336 vfs_bio_bzero_buf(bp, osize, nsize - osize);
337 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
338 vfs_bio_set_valid(bp, osize, nsize - osize);
339 *bpp = bp;
340 return (0);
341 }
342 /*
343 * Allocate a new disk location.
344 */
345 if (bpref >= fs->fs_size)
346 bpref = 0;
347 switch ((int)fs->fs_optim) {
348 case FS_OPTSPACE:
349 /*
350 * Allocate an exact sized fragment. Although this makes
351 * best use of space, we will waste time relocating it if
352 * the file continues to grow. If the fragmentation is
353 * less than half of the minimum free reserve, we choose
354 * to begin optimizing for time.
355 */
356 request = nsize;
357 if (fs->fs_minfree <= 5 ||
358 fs->fs_cstotal.cs_nffree >
359 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
360 break;
361 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
362 fs->fs_fsmnt);
363 fs->fs_optim = FS_OPTTIME;
364 break;
365 case FS_OPTTIME:
366 /*
367 * At this point we have discovered a file that is trying to
368 * grow a small fragment to a larger fragment. To save time,
369 * we allocate a full sized block, then free the unused portion.
370 * If the file continues to grow, the `ffs_fragextend' call
371 * above will be able to grow it in place without further
372 * copying. If aberrant programs cause disk fragmentation to
373 * grow within 2% of the free reserve, we choose to begin
374 * optimizing for space.
375 */
376 request = fs->fs_bsize;
377 if (fs->fs_cstotal.cs_nffree <
378 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
379 break;
380 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
381 fs->fs_fsmnt);
382 fs->fs_optim = FS_OPTSPACE;
383 break;
384 default:
385 printf("dev = %s, optim = %ld, fs = %s\n",
386 devtoname(ump->um_dev), (long)fs->fs_optim, fs->fs_fsmnt);
387 panic("ffs_realloccg: bad optim");
388 /* NOTREACHED */
389 }
390 bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
391 if (bno > 0) {
392 bp->b_blkno = fsbtodb(fs, bno);
393 if (!DOINGSOFTDEP(vp))
394 ffs_blkfree(ump, fs, ump->um_devvp, bprev, (long)osize,
395 ip->i_number, vp->v_type, NULL);
396 delta = btodb(nsize - osize);
397 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
398 if (flags & IO_EXT)
399 ip->i_flag |= IN_CHANGE;
400 else
401 ip->i_flag |= IN_CHANGE | IN_UPDATE;
402 allocbuf(bp, nsize);
403 bp->b_flags |= B_DONE;
404 vfs_bio_bzero_buf(bp, osize, nsize - osize);
405 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
406 vfs_bio_set_valid(bp, osize, nsize - osize);
407 *bpp = bp;
408 return (0);
409 }
410 #ifdef QUOTA
411 UFS_UNLOCK(ump);
412 /*
413 * Restore user's disk quota because allocation failed.
414 */
415 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
416 UFS_LOCK(ump);
417 #endif
418 nospace:
419 /*
420 * no space available
421 */
422 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
423 reclaimed = 1;
424 UFS_UNLOCK(ump);
425 if (bp) {
426 brelse(bp);
427 bp = NULL;
428 }
429 UFS_LOCK(ump);
430 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
431 goto retry;
432 }
433 UFS_UNLOCK(ump);
434 if (bp)
435 brelse(bp);
436 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) {
437 ffs_fserr(fs, ip->i_number, "filesystem full");
438 uprintf("\n%s: write failed, filesystem is full\n",
439 fs->fs_fsmnt);
440 }
441 return (ENOSPC);
442 }
443
444 /*
445 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
446 *
447 * The vnode and an array of buffer pointers for a range of sequential
448 * logical blocks to be made contiguous is given. The allocator attempts
449 * to find a range of sequential blocks starting as close as possible
450 * from the end of the allocation for the logical block immediately
451 * preceding the current range. If successful, the physical block numbers
452 * in the buffer pointers and in the inode are changed to reflect the new
453 * allocation. If unsuccessful, the allocation is left unchanged. The
454 * success in doing the reallocation is returned. Note that the error
455 * return is not reflected back to the user. Rather the previous block
456 * allocation will be used.
457 */
458
459 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem");
460
461 static int doasyncfree = 1;
462 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
463 "do not force synchronous writes when blocks are reallocated");
464
465 static int doreallocblks = 1;
466 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0,
467 "enable block reallocation");
468
469 static int maxclustersearch = 10;
470 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch,
471 0, "max number of cylinder group to search for contigous blocks");
472
473 #ifdef DEBUG
474 static volatile int prtrealloc = 0;
475 #endif
476
477 int
478 ffs_reallocblks(ap)
479 struct vop_reallocblks_args /* {
480 struct vnode *a_vp;
481 struct cluster_save *a_buflist;
482 } */ *ap;
483 {
484 struct ufsmount *ump;
485
486 /*
487 * If the underlying device can do deletes, then skip reallocating
488 * the blocks of this file into contiguous sequences. Devices that
489 * benefit from BIO_DELETE also benefit from not moving the data.
490 * These devices are flash and therefore work less well with this
491 * optimization. Also skip if reallocblks has been disabled globally.
492 */
493 ump = ap->a_vp->v_mount->mnt_data;
494 if (ump->um_candelete || doreallocblks == 0)
495 return (ENOSPC);
496
497 /*
498 * We can't wait in softdep prealloc as it may fsync and recurse
499 * here. Instead we simply fail to reallocate blocks if this
500 * rare condition arises.
501 */
502 if (DOINGSOFTDEP(ap->a_vp))
503 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
504 return (ENOSPC);
505 if (ump->um_fstype == UFS1)
506 return (ffs_reallocblks_ufs1(ap));
507 return (ffs_reallocblks_ufs2(ap));
508 }
509
510 static int
511 ffs_reallocblks_ufs1(ap)
512 struct vop_reallocblks_args /* {
513 struct vnode *a_vp;
514 struct cluster_save *a_buflist;
515 } */ *ap;
516 {
517 struct fs *fs;
518 struct inode *ip;
519 struct vnode *vp;
520 struct buf *sbp, *ebp;
521 ufs1_daddr_t *bap, *sbap, *ebap;
522 struct cluster_save *buflist;
523 struct ufsmount *ump;
524 ufs_lbn_t start_lbn, end_lbn;
525 ufs1_daddr_t soff, newblk, blkno;
526 ufs2_daddr_t pref;
527 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
528 int i, cg, len, start_lvl, end_lvl, ssize;
529
530 vp = ap->a_vp;
531 ip = VTOI(vp);
532 ump = ITOUMP(ip);
533 fs = ump->um_fs;
534 /*
535 * If we are not tracking block clusters or if we have less than 4%
536 * free blocks left, then do not attempt to cluster. Running with
537 * less than 5% free block reserve is not recommended and those that
538 * choose to do so do not expect to have good file layout.
539 */
540 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
541 return (ENOSPC);
542 buflist = ap->a_buflist;
543 len = buflist->bs_nchildren;
544 start_lbn = buflist->bs_children[0]->b_lblkno;
545 end_lbn = start_lbn + len - 1;
546 #ifdef INVARIANTS
547 for (i = 0; i < len; i++)
548 if (!ffs_checkblk(ip,
549 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
550 panic("ffs_reallocblks: unallocated block 1");
551 for (i = 1; i < len; i++)
552 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
553 panic("ffs_reallocblks: non-logical cluster");
554 blkno = buflist->bs_children[0]->b_blkno;
555 ssize = fsbtodb(fs, fs->fs_frag);
556 for (i = 1; i < len - 1; i++)
557 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
558 panic("ffs_reallocblks: non-physical cluster %d", i);
559 #endif
560 /*
561 * If the cluster crosses the boundary for the first indirect
562 * block, leave space for the indirect block. Indirect blocks
563 * are initially laid out in a position after the last direct
564 * block. Block reallocation would usually destroy locality by
565 * moving the indirect block out of the way to make room for
566 * data blocks if we didn't compensate here. We should also do
567 * this for other indirect block boundaries, but it is only
568 * important for the first one.
569 */
570 if (start_lbn < NDADDR && end_lbn >= NDADDR)
571 return (ENOSPC);
572 /*
573 * If the latest allocation is in a new cylinder group, assume that
574 * the filesystem has decided to move and do not force it back to
575 * the previous cylinder group.
576 */
577 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
578 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
579 return (ENOSPC);
580 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
581 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
582 return (ENOSPC);
583 /*
584 * Get the starting offset and block map for the first block.
585 */
586 if (start_lvl == 0) {
587 sbap = &ip->i_din1->di_db[0];
588 soff = start_lbn;
589 } else {
590 idp = &start_ap[start_lvl - 1];
591 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
592 brelse(sbp);
593 return (ENOSPC);
594 }
595 sbap = (ufs1_daddr_t *)sbp->b_data;
596 soff = idp->in_off;
597 }
598 /*
599 * If the block range spans two block maps, get the second map.
600 */
601 ebap = NULL;
602 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
603 ssize = len;
604 } else {
605 #ifdef INVARIANTS
606 if (start_lvl > 0 &&
607 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
608 panic("ffs_reallocblk: start == end");
609 #endif
610 ssize = len - (idp->in_off + 1);
611 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
612 goto fail;
613 ebap = (ufs1_daddr_t *)ebp->b_data;
614 }
615 /*
616 * Find the preferred location for the cluster. If we have not
617 * previously failed at this endeavor, then follow our standard
618 * preference calculation. If we have failed at it, then pick up
619 * where we last ended our search.
620 */
621 UFS_LOCK(ump);
622 if (ip->i_nextclustercg == -1)
623 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
624 else
625 pref = cgdata(fs, ip->i_nextclustercg);
626 /*
627 * Search the block map looking for an allocation of the desired size.
628 * To avoid wasting too much time, we limit the number of cylinder
629 * groups that we will search.
630 */
631 cg = dtog(fs, pref);
632 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
633 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
634 break;
635 cg += 1;
636 if (cg >= fs->fs_ncg)
637 cg = 0;
638 }
639 /*
640 * If we have failed in our search, record where we gave up for
641 * next time. Otherwise, fall back to our usual search citerion.
642 */
643 if (newblk == 0) {
644 ip->i_nextclustercg = cg;
645 UFS_UNLOCK(ump);
646 goto fail;
647 }
648 ip->i_nextclustercg = -1;
649 /*
650 * We have found a new contiguous block.
651 *
652 * First we have to replace the old block pointers with the new
653 * block pointers in the inode and indirect blocks associated
654 * with the file.
655 */
656 #ifdef DEBUG
657 if (prtrealloc)
658 printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
659 (uintmax_t)ip->i_number,
660 (intmax_t)start_lbn, (intmax_t)end_lbn);
661 #endif
662 blkno = newblk;
663 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
664 if (i == ssize) {
665 bap = ebap;
666 soff = -i;
667 }
668 #ifdef INVARIANTS
669 if (!ffs_checkblk(ip,
670 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
671 panic("ffs_reallocblks: unallocated block 2");
672 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
673 panic("ffs_reallocblks: alloc mismatch");
674 #endif
675 #ifdef DEBUG
676 if (prtrealloc)
677 printf(" %d,", *bap);
678 #endif
679 if (DOINGSOFTDEP(vp)) {
680 if (sbap == &ip->i_din1->di_db[0] && i < ssize)
681 softdep_setup_allocdirect(ip, start_lbn + i,
682 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
683 buflist->bs_children[i]);
684 else
685 softdep_setup_allocindir_page(ip, start_lbn + i,
686 i < ssize ? sbp : ebp, soff + i, blkno,
687 *bap, buflist->bs_children[i]);
688 }
689 *bap++ = blkno;
690 }
691 /*
692 * Next we must write out the modified inode and indirect blocks.
693 * For strict correctness, the writes should be synchronous since
694 * the old block values may have been written to disk. In practise
695 * they are almost never written, but if we are concerned about
696 * strict correctness, the `doasyncfree' flag should be set to zero.
697 *
698 * The test on `doasyncfree' should be changed to test a flag
699 * that shows whether the associated buffers and inodes have
700 * been written. The flag should be set when the cluster is
701 * started and cleared whenever the buffer or inode is flushed.
702 * We can then check below to see if it is set, and do the
703 * synchronous write only when it has been cleared.
704 */
705 if (sbap != &ip->i_din1->di_db[0]) {
706 if (doasyncfree)
707 bdwrite(sbp);
708 else
709 bwrite(sbp);
710 } else {
711 ip->i_flag |= IN_CHANGE | IN_UPDATE;
712 if (!doasyncfree)
713 ffs_update(vp, 1);
714 }
715 if (ssize < len) {
716 if (doasyncfree)
717 bdwrite(ebp);
718 else
719 bwrite(ebp);
720 }
721 /*
722 * Last, free the old blocks and assign the new blocks to the buffers.
723 */
724 #ifdef DEBUG
725 if (prtrealloc)
726 printf("\n\tnew:");
727 #endif
728 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
729 if (!DOINGSOFTDEP(vp))
730 ffs_blkfree(ump, fs, ump->um_devvp,
731 dbtofsb(fs, buflist->bs_children[i]->b_blkno),
732 fs->fs_bsize, ip->i_number, vp->v_type, NULL);
733 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
734 #ifdef INVARIANTS
735 if (!ffs_checkblk(ip,
736 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
737 panic("ffs_reallocblks: unallocated block 3");
738 #endif
739 #ifdef DEBUG
740 if (prtrealloc)
741 printf(" %d,", blkno);
742 #endif
743 }
744 #ifdef DEBUG
745 if (prtrealloc) {
746 prtrealloc--;
747 printf("\n");
748 }
749 #endif
750 return (0);
751
752 fail:
753 if (ssize < len)
754 brelse(ebp);
755 if (sbap != &ip->i_din1->di_db[0])
756 brelse(sbp);
757 return (ENOSPC);
758 }
759
760 static int
761 ffs_reallocblks_ufs2(ap)
762 struct vop_reallocblks_args /* {
763 struct vnode *a_vp;
764 struct cluster_save *a_buflist;
765 } */ *ap;
766 {
767 struct fs *fs;
768 struct inode *ip;
769 struct vnode *vp;
770 struct buf *sbp, *ebp;
771 ufs2_daddr_t *bap, *sbap, *ebap;
772 struct cluster_save *buflist;
773 struct ufsmount *ump;
774 ufs_lbn_t start_lbn, end_lbn;
775 ufs2_daddr_t soff, newblk, blkno, pref;
776 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
777 int i, cg, len, start_lvl, end_lvl, ssize;
778
779 vp = ap->a_vp;
780 ip = VTOI(vp);
781 ump = ITOUMP(ip);
782 fs = ump->um_fs;
783 /*
784 * If we are not tracking block clusters or if we have less than 4%
785 * free blocks left, then do not attempt to cluster. Running with
786 * less than 5% free block reserve is not recommended and those that
787 * choose to do so do not expect to have good file layout.
788 */
789 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
790 return (ENOSPC);
791 buflist = ap->a_buflist;
792 len = buflist->bs_nchildren;
793 start_lbn = buflist->bs_children[0]->b_lblkno;
794 end_lbn = start_lbn + len - 1;
795 #ifdef INVARIANTS
796 for (i = 0; i < len; i++)
797 if (!ffs_checkblk(ip,
798 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
799 panic("ffs_reallocblks: unallocated block 1");
800 for (i = 1; i < len; i++)
801 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
802 panic("ffs_reallocblks: non-logical cluster");
803 blkno = buflist->bs_children[0]->b_blkno;
804 ssize = fsbtodb(fs, fs->fs_frag);
805 for (i = 1; i < len - 1; i++)
806 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
807 panic("ffs_reallocblks: non-physical cluster %d", i);
808 #endif
809 /*
810 * If the cluster crosses the boundary for the first indirect
811 * block, do not move anything in it. Indirect blocks are
812 * usually initially laid out in a position between the data
813 * blocks. Block reallocation would usually destroy locality by
814 * moving the indirect block out of the way to make room for
815 * data blocks if we didn't compensate here. We should also do
816 * this for other indirect block boundaries, but it is only
817 * important for the first one.
818 */
819 if (start_lbn < NDADDR && end_lbn >= NDADDR)
820 return (ENOSPC);
821 /*
822 * If the latest allocation is in a new cylinder group, assume that
823 * the filesystem has decided to move and do not force it back to
824 * the previous cylinder group.
825 */
826 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
827 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
828 return (ENOSPC);
829 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
830 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
831 return (ENOSPC);
832 /*
833 * Get the starting offset and block map for the first block.
834 */
835 if (start_lvl == 0) {
836 sbap = &ip->i_din2->di_db[0];
837 soff = start_lbn;
838 } else {
839 idp = &start_ap[start_lvl - 1];
840 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
841 brelse(sbp);
842 return (ENOSPC);
843 }
844 sbap = (ufs2_daddr_t *)sbp->b_data;
845 soff = idp->in_off;
846 }
847 /*
848 * If the block range spans two block maps, get the second map.
849 */
850 ebap = NULL;
851 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
852 ssize = len;
853 } else {
854 #ifdef INVARIANTS
855 if (start_lvl > 0 &&
856 start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
857 panic("ffs_reallocblk: start == end");
858 #endif
859 ssize = len - (idp->in_off + 1);
860 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
861 goto fail;
862 ebap = (ufs2_daddr_t *)ebp->b_data;
863 }
864 /*
865 * Find the preferred location for the cluster. If we have not
866 * previously failed at this endeavor, then follow our standard
867 * preference calculation. If we have failed at it, then pick up
868 * where we last ended our search.
869 */
870 UFS_LOCK(ump);
871 if (ip->i_nextclustercg == -1)
872 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
873 else
874 pref = cgdata(fs, ip->i_nextclustercg);
875 /*
876 * Search the block map looking for an allocation of the desired size.
877 * To avoid wasting too much time, we limit the number of cylinder
878 * groups that we will search.
879 */
880 cg = dtog(fs, pref);
881 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
882 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
883 break;
884 cg += 1;
885 if (cg >= fs->fs_ncg)
886 cg = 0;
887 }
888 /*
889 * If we have failed in our search, record where we gave up for
890 * next time. Otherwise, fall back to our usual search citerion.
891 */
892 if (newblk == 0) {
893 ip->i_nextclustercg = cg;
894 UFS_UNLOCK(ump);
895 goto fail;
896 }
897 ip->i_nextclustercg = -1;
898 /*
899 * We have found a new contiguous block.
900 *
901 * First we have to replace the old block pointers with the new
902 * block pointers in the inode and indirect blocks associated
903 * with the file.
904 */
905 #ifdef DEBUG
906 if (prtrealloc)
907 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number,
908 (intmax_t)start_lbn, (intmax_t)end_lbn);
909 #endif
910 blkno = newblk;
911 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
912 if (i == ssize) {
913 bap = ebap;
914 soff = -i;
915 }
916 #ifdef INVARIANTS
917 if (!ffs_checkblk(ip,
918 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
919 panic("ffs_reallocblks: unallocated block 2");
920 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
921 panic("ffs_reallocblks: alloc mismatch");
922 #endif
923 #ifdef DEBUG
924 if (prtrealloc)
925 printf(" %jd,", (intmax_t)*bap);
926 #endif
927 if (DOINGSOFTDEP(vp)) {
928 if (sbap == &ip->i_din2->di_db[0] && i < ssize)
929 softdep_setup_allocdirect(ip, start_lbn + i,
930 blkno, *bap, fs->fs_bsize, fs->fs_bsize,
931 buflist->bs_children[i]);
932 else
933 softdep_setup_allocindir_page(ip, start_lbn + i,
934 i < ssize ? sbp : ebp, soff + i, blkno,
935 *bap, buflist->bs_children[i]);
936 }
937 *bap++ = blkno;
938 }
939 /*
940 * Next we must write out the modified inode and indirect blocks.
941 * For strict correctness, the writes should be synchronous since
942 * the old block values may have been written to disk. In practise
943 * they are almost never written, but if we are concerned about
944 * strict correctness, the `doasyncfree' flag should be set to zero.
945 *
946 * The test on `doasyncfree' should be changed to test a flag
947 * that shows whether the associated buffers and inodes have
948 * been written. The flag should be set when the cluster is
949 * started and cleared whenever the buffer or inode is flushed.
950 * We can then check below to see if it is set, and do the
951 * synchronous write only when it has been cleared.
952 */
953 if (sbap != &ip->i_din2->di_db[0]) {
954 if (doasyncfree)
955 bdwrite(sbp);
956 else
957 bwrite(sbp);
958 } else {
959 ip->i_flag |= IN_CHANGE | IN_UPDATE;
960 if (!doasyncfree)
961 ffs_update(vp, 1);
962 }
963 if (ssize < len) {
964 if (doasyncfree)
965 bdwrite(ebp);
966 else
967 bwrite(ebp);
968 }
969 /*
970 * Last, free the old blocks and assign the new blocks to the buffers.
971 */
972 #ifdef DEBUG
973 if (prtrealloc)
974 printf("\n\tnew:");
975 #endif
976 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
977 if (!DOINGSOFTDEP(vp))
978 ffs_blkfree(ump, fs, ump->um_devvp,
979 dbtofsb(fs, buflist->bs_children[i]->b_blkno),
980 fs->fs_bsize, ip->i_number, vp->v_type, NULL);
981 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
982 #ifdef INVARIANTS
983 if (!ffs_checkblk(ip,
984 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
985 panic("ffs_reallocblks: unallocated block 3");
986 #endif
987 #ifdef DEBUG
988 if (prtrealloc)
989 printf(" %jd,", (intmax_t)blkno);
990 #endif
991 }
992 #ifdef DEBUG
993 if (prtrealloc) {
994 prtrealloc--;
995 printf("\n");
996 }
997 #endif
998 return (0);
999
1000 fail:
1001 if (ssize < len)
1002 brelse(ebp);
1003 if (sbap != &ip->i_din2->di_db[0])
1004 brelse(sbp);
1005 return (ENOSPC);
1006 }
1007
1008 /*
1009 * Allocate an inode in the filesystem.
1010 *
1011 * If allocating a directory, use ffs_dirpref to select the inode.
1012 * If allocating in a directory, the following hierarchy is followed:
1013 * 1) allocate the preferred inode.
1014 * 2) allocate an inode in the same cylinder group.
1015 * 3) quadradically rehash into other cylinder groups, until an
1016 * available inode is located.
1017 * If no inode preference is given the following hierarchy is used
1018 * to allocate an inode:
1019 * 1) allocate an inode in cylinder group 0.
1020 * 2) quadradically rehash into other cylinder groups, until an
1021 * available inode is located.
1022 */
1023 int
1024 ffs_valloc(pvp, mode, cred, vpp)
1025 struct vnode *pvp;
1026 int mode;
1027 struct ucred *cred;
1028 struct vnode **vpp;
1029 {
1030 struct inode *pip;
1031 struct fs *fs;
1032 struct inode *ip;
1033 struct timespec ts;
1034 struct ufsmount *ump;
1035 ino_t ino, ipref;
1036 u_int cg;
1037 int error, error1, reclaimed;
1038 static struct timeval lastfail;
1039 static int curfail;
1040
1041 *vpp = NULL;
1042 pip = VTOI(pvp);
1043 ump = ITOUMP(pip);
1044 fs = ump->um_fs;
1045
1046 UFS_LOCK(ump);
1047 reclaimed = 0;
1048 retry:
1049 if (fs->fs_cstotal.cs_nifree == 0)
1050 goto noinodes;
1051
1052 if ((mode & IFMT) == IFDIR)
1053 ipref = ffs_dirpref(pip);
1054 else
1055 ipref = pip->i_number;
1056 if (ipref >= fs->fs_ncg * fs->fs_ipg)
1057 ipref = 0;
1058 cg = ino_to_cg(fs, ipref);
1059 /*
1060 * Track number of dirs created one after another
1061 * in a same cg without intervening by files.
1062 */
1063 if ((mode & IFMT) == IFDIR) {
1064 if (fs->fs_contigdirs[cg] < 255)
1065 fs->fs_contigdirs[cg]++;
1066 } else {
1067 if (fs->fs_contigdirs[cg] > 0)
1068 fs->fs_contigdirs[cg]--;
1069 }
1070 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
1071 (allocfcn_t *)ffs_nodealloccg);
1072 if (ino == 0)
1073 goto noinodes;
1074 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
1075 if (error) {
1076 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
1077 FFSV_FORCEINSMQ);
1078 ffs_vfree(pvp, ino, mode);
1079 if (error1 == 0) {
1080 ip = VTOI(*vpp);
1081 if (ip->i_mode)
1082 goto dup_alloc;
1083 ip->i_flag |= IN_MODIFIED;
1084 vput(*vpp);
1085 }
1086 return (error);
1087 }
1088 ip = VTOI(*vpp);
1089 if (ip->i_mode) {
1090 dup_alloc:
1091 printf("mode = 0%o, inum = %ju, fs = %s\n",
1092 ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt);
1093 panic("ffs_valloc: dup alloc");
1094 }
1095 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */
1096 printf("free inode %s/%lu had %ld blocks\n",
1097 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
1098 DIP_SET(ip, i_blocks, 0);
1099 }
1100 ip->i_flags = 0;
1101 DIP_SET(ip, i_flags, 0);
1102 /*
1103 * Set up a new generation number for this inode.
1104 */
1105 while (ip->i_gen == 0 || ++ip->i_gen == 0)
1106 ip->i_gen = arc4random();
1107 DIP_SET(ip, i_gen, ip->i_gen);
1108 if (fs->fs_magic == FS_UFS2_MAGIC) {
1109 vfs_timestamp(&ts);
1110 ip->i_din2->di_birthtime = ts.tv_sec;
1111 ip->i_din2->di_birthnsec = ts.tv_nsec;
1112 }
1113 ufs_prepare_reclaim(*vpp);
1114 ip->i_flag = 0;
1115 (*vpp)->v_vflag = 0;
1116 (*vpp)->v_type = VNON;
1117 if (fs->fs_magic == FS_UFS2_MAGIC) {
1118 (*vpp)->v_op = &ffs_vnodeops2;
1119 ip->i_flag |= IN_UFS2;
1120 } else {
1121 (*vpp)->v_op = &ffs_vnodeops1;
1122 }
1123 return (0);
1124 noinodes:
1125 if (reclaimed == 0) {
1126 reclaimed = 1;
1127 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
1128 goto retry;
1129 }
1130 UFS_UNLOCK(ump);
1131 if (ppsratecheck(&lastfail, &curfail, 1)) {
1132 ffs_fserr(fs, pip->i_number, "out of inodes");
1133 uprintf("\n%s: create/symlink failed, no inodes free\n",
1134 fs->fs_fsmnt);
1135 }
1136 return (ENOSPC);
1137 }
1138
1139 /*
1140 * Find a cylinder group to place a directory.
1141 *
1142 * The policy implemented by this algorithm is to allocate a
1143 * directory inode in the same cylinder group as its parent
1144 * directory, but also to reserve space for its files inodes
1145 * and data. Restrict the number of directories which may be
1146 * allocated one after another in the same cylinder group
1147 * without intervening allocation of files.
1148 *
1149 * If we allocate a first level directory then force allocation
1150 * in another cylinder group.
1151 */
1152 static ino_t
1153 ffs_dirpref(pip)
1154 struct inode *pip;
1155 {
1156 struct fs *fs;
1157 int cg, prefcg, dirsize, cgsize;
1158 u_int avgifree, avgbfree, avgndir, curdirsize;
1159 u_int minifree, minbfree, maxndir;
1160 u_int mincg, minndir;
1161 u_int maxcontigdirs;
1162
1163 mtx_assert(UFS_MTX(ITOUMP(pip)), MA_OWNED);
1164 fs = ITOFS(pip);
1165
1166 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
1167 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1168 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
1169
1170 /*
1171 * Force allocation in another cg if creating a first level dir.
1172 */
1173 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
1174 if (ITOV(pip)->v_vflag & VV_ROOT) {
1175 prefcg = arc4random() % fs->fs_ncg;
1176 mincg = prefcg;
1177 minndir = fs->fs_ipg;
1178 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1179 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1180 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1181 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1182 mincg = cg;
1183 minndir = fs->fs_cs(fs, cg).cs_ndir;
1184 }
1185 for (cg = 0; cg < prefcg; cg++)
1186 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
1187 fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
1188 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1189 mincg = cg;
1190 minndir = fs->fs_cs(fs, cg).cs_ndir;
1191 }
1192 return ((ino_t)(fs->fs_ipg * mincg));
1193 }
1194
1195 /*
1196 * Count various limits which used for
1197 * optimal allocation of a directory inode.
1198 */
1199 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
1200 minifree = avgifree - avgifree / 4;
1201 if (minifree < 1)
1202 minifree = 1;
1203 minbfree = avgbfree - avgbfree / 4;
1204 if (minbfree < 1)
1205 minbfree = 1;
1206 cgsize = fs->fs_fsize * fs->fs_fpg;
1207 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
1208 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
1209 if (dirsize < curdirsize)
1210 dirsize = curdirsize;
1211 if (dirsize <= 0)
1212 maxcontigdirs = 0; /* dirsize overflowed */
1213 else
1214 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
1215 if (fs->fs_avgfpdir > 0)
1216 maxcontigdirs = min(maxcontigdirs,
1217 fs->fs_ipg / fs->fs_avgfpdir);
1218 if (maxcontigdirs == 0)
1219 maxcontigdirs = 1;
1220
1221 /*
1222 * Limit number of dirs in one cg and reserve space for
1223 * regular files, but only if we have no deficit in
1224 * inodes or space.
1225 *
1226 * We are trying to find a suitable cylinder group nearby
1227 * our preferred cylinder group to place a new directory.
1228 * We scan from our preferred cylinder group forward looking
1229 * for a cylinder group that meets our criterion. If we get
1230 * to the final cylinder group and do not find anything,
1231 * we start scanning forwards from the beginning of the
1232 * filesystem. While it might seem sensible to start scanning
1233 * backwards or even to alternate looking forward and backward,
1234 * this approach fails badly when the filesystem is nearly full.
1235 * Specifically, we first search all the areas that have no space
1236 * and finally try the one preceding that. We repeat this on
1237 * every request and in the case of the final block end up
1238 * searching the entire filesystem. By jumping to the front
1239 * of the filesystem, our future forward searches always look
1240 * in new cylinder groups so finds every possible block after
1241 * one pass over the filesystem.
1242 */
1243 prefcg = ino_to_cg(fs, pip->i_number);
1244 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1245 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1246 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1247 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1248 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1249 return ((ino_t)(fs->fs_ipg * cg));
1250 }
1251 for (cg = 0; cg < prefcg; cg++)
1252 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
1253 fs->fs_cs(fs, cg).cs_nifree >= minifree &&
1254 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
1255 if (fs->fs_contigdirs[cg] < maxcontigdirs)
1256 return ((ino_t)(fs->fs_ipg * cg));
1257 }
1258 /*
1259 * This is a backstop when we have deficit in space.
1260 */
1261 for (cg = prefcg; cg < fs->fs_ncg; cg++)
1262 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1263 return ((ino_t)(fs->fs_ipg * cg));
1264 for (cg = 0; cg < prefcg; cg++)
1265 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
1266 break;
1267 return ((ino_t)(fs->fs_ipg * cg));
1268 }
1269
1270 /*
1271 * Select the desired position for the next block in a file. The file is
1272 * logically divided into sections. The first section is composed of the
1273 * direct blocks and the next fs_maxbpg blocks. Each additional section
1274 * contains fs_maxbpg blocks.
1275 *
1276 * If no blocks have been allocated in the first section, the policy is to
1277 * request a block in the same cylinder group as the inode that describes
1278 * the file. The first indirect is allocated immediately following the last
1279 * direct block and the data blocks for the first indirect immediately
1280 * follow it.
1281 *
1282 * If no blocks have been allocated in any other section, the indirect
1283 * block(s) are allocated in the same cylinder group as its inode in an
1284 * area reserved immediately following the inode blocks. The policy for
1285 * the data blocks is to place them in a cylinder group with a greater than
1286 * average number of free blocks. An appropriate cylinder group is found
1287 * by using a rotor that sweeps the cylinder groups. When a new group of
1288 * blocks is needed, the sweep begins in the cylinder group following the
1289 * cylinder group from which the previous allocation was made. The sweep
1290 * continues until a cylinder group with greater than the average number
1291 * of free blocks is found. If the allocation is for the first block in an
1292 * indirect block or the previous block is a hole, then the information on
1293 * the previous allocation is unavailable; here a best guess is made based
1294 * on the logical block number being allocated.
1295 *
1296 * If a section is already partially allocated, the policy is to
1297 * allocate blocks contiguously within the section if possible.
1298 */
1299 ufs2_daddr_t
1300 ffs_blkpref_ufs1(ip, lbn, indx, bap)
1301 struct inode *ip;
1302 ufs_lbn_t lbn;
1303 int indx;
1304 ufs1_daddr_t *bap;
1305 {
1306 struct fs *fs;
1307 u_int cg, inocg;
1308 u_int avgbfree, startcg;
1309 ufs2_daddr_t pref;
1310
1311 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1312 mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1313 fs = ITOFS(ip);
1314 /*
1315 * Allocation of indirect blocks is indicated by passing negative
1316 * values in indx: -1 for single indirect, -2 for double indirect,
1317 * -3 for triple indirect. As noted below, we attempt to allocate
1318 * the first indirect inline with the file data. For all later
1319 * indirect blocks, the data is often allocated in other cylinder
1320 * groups. However to speed random file access and to speed up
1321 * fsck, the filesystem reserves the first fs_metaspace blocks
1322 * (typically half of fs_minfree) of the data area of each cylinder
1323 * group to hold these later indirect blocks.
1324 */
1325 inocg = ino_to_cg(fs, ip->i_number);
1326 if (indx < 0) {
1327 /*
1328 * Our preference for indirect blocks is the zone at the
1329 * beginning of the inode's cylinder group data area that
1330 * we try to reserve for indirect blocks.
1331 */
1332 pref = cgmeta(fs, inocg);
1333 /*
1334 * If we are allocating the first indirect block, try to
1335 * place it immediately following the last direct block.
1336 */
1337 if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
1338 ip->i_din1->di_db[NDADDR - 1] != 0)
1339 pref = ip->i_din1->di_db[NDADDR - 1] + fs->fs_frag;
1340 return (pref);
1341 }
1342 /*
1343 * If we are allocating the first data block in the first indirect
1344 * block and the indirect has been allocated in the data block area,
1345 * try to place it immediately following the indirect block.
1346 */
1347 if (lbn == NDADDR) {
1348 pref = ip->i_din1->di_ib[0];
1349 if (pref != 0 && pref >= cgdata(fs, inocg) &&
1350 pref < cgbase(fs, inocg + 1))
1351 return (pref + fs->fs_frag);
1352 }
1353 /*
1354 * If we are at the beginning of a file, or we have already allocated
1355 * the maximum number of blocks per cylinder group, or we do not
1356 * have a block allocated immediately preceding us, then we need
1357 * to decide where to start allocating new blocks.
1358 */
1359 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1360 /*
1361 * If we are allocating a directory data block, we want
1362 * to place it in the metadata area.
1363 */
1364 if ((ip->i_mode & IFMT) == IFDIR)
1365 return (cgmeta(fs, inocg));
1366 /*
1367 * Until we fill all the direct and all the first indirect's
1368 * blocks, we try to allocate in the data area of the inode's
1369 * cylinder group.
1370 */
1371 if (lbn < NDADDR + NINDIR(fs))
1372 return (cgdata(fs, inocg));
1373 /*
1374 * Find a cylinder with greater than average number of
1375 * unused data blocks.
1376 */
1377 if (indx == 0 || bap[indx - 1] == 0)
1378 startcg = inocg + lbn / fs->fs_maxbpg;
1379 else
1380 startcg = dtog(fs, bap[indx - 1]) + 1;
1381 startcg %= fs->fs_ncg;
1382 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1383 for (cg = startcg; cg < fs->fs_ncg; cg++)
1384 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1385 fs->fs_cgrotor = cg;
1386 return (cgdata(fs, cg));
1387 }
1388 for (cg = 0; cg <= startcg; cg++)
1389 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1390 fs->fs_cgrotor = cg;
1391 return (cgdata(fs, cg));
1392 }
1393 return (0);
1394 }
1395 /*
1396 * Otherwise, we just always try to lay things out contiguously.
1397 */
1398 return (bap[indx - 1] + fs->fs_frag);
1399 }
1400
1401 /*
1402 * Same as above, but for UFS2
1403 */
1404 ufs2_daddr_t
1405 ffs_blkpref_ufs2(ip, lbn, indx, bap)
1406 struct inode *ip;
1407 ufs_lbn_t lbn;
1408 int indx;
1409 ufs2_daddr_t *bap;
1410 {
1411 struct fs *fs;
1412 u_int cg, inocg;
1413 u_int avgbfree, startcg;
1414 ufs2_daddr_t pref;
1415
1416 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
1417 mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1418 fs = ITOFS(ip);
1419 /*
1420 * Allocation of indirect blocks is indicated by passing negative
1421 * values in indx: -1 for single indirect, -2 for double indirect,
1422 * -3 for triple indirect. As noted below, we attempt to allocate
1423 * the first indirect inline with the file data. For all later
1424 * indirect blocks, the data is often allocated in other cylinder
1425 * groups. However to speed random file access and to speed up
1426 * fsck, the filesystem reserves the first fs_metaspace blocks
1427 * (typically half of fs_minfree) of the data area of each cylinder
1428 * group to hold these later indirect blocks.
1429 */
1430 inocg = ino_to_cg(fs, ip->i_number);
1431 if (indx < 0) {
1432 /*
1433 * Our preference for indirect blocks is the zone at the
1434 * beginning of the inode's cylinder group data area that
1435 * we try to reserve for indirect blocks.
1436 */
1437 pref = cgmeta(fs, inocg);
1438 /*
1439 * If we are allocating the first indirect block, try to
1440 * place it immediately following the last direct block.
1441 */
1442 if (indx == -1 && lbn < NDADDR + NINDIR(fs) &&
1443 ip->i_din2->di_db[NDADDR - 1] != 0)
1444 pref = ip->i_din2->di_db[NDADDR - 1] + fs->fs_frag;
1445 return (pref);
1446 }
1447 /*
1448 * If we are allocating the first data block in the first indirect
1449 * block and the indirect has been allocated in the data block area,
1450 * try to place it immediately following the indirect block.
1451 */
1452 if (lbn == NDADDR) {
1453 pref = ip->i_din2->di_ib[0];
1454 if (pref != 0 && pref >= cgdata(fs, inocg) &&
1455 pref < cgbase(fs, inocg + 1))
1456 return (pref + fs->fs_frag);
1457 }
1458 /*
1459 * If we are at the beginning of a file, or we have already allocated
1460 * the maximum number of blocks per cylinder group, or we do not
1461 * have a block allocated immediately preceding us, then we need
1462 * to decide where to start allocating new blocks.
1463 */
1464 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
1465 /*
1466 * If we are allocating a directory data block, we want
1467 * to place it in the metadata area.
1468 */
1469 if ((ip->i_mode & IFMT) == IFDIR)
1470 return (cgmeta(fs, inocg));
1471 /*
1472 * Until we fill all the direct and all the first indirect's
1473 * blocks, we try to allocate in the data area of the inode's
1474 * cylinder group.
1475 */
1476 if (lbn < NDADDR + NINDIR(fs))
1477 return (cgdata(fs, inocg));
1478 /*
1479 * Find a cylinder with greater than average number of
1480 * unused data blocks.
1481 */
1482 if (indx == 0 || bap[indx - 1] == 0)
1483 startcg = inocg + lbn / fs->fs_maxbpg;
1484 else
1485 startcg = dtog(fs, bap[indx - 1]) + 1;
1486 startcg %= fs->fs_ncg;
1487 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
1488 for (cg = startcg; cg < fs->fs_ncg; cg++)
1489 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1490 fs->fs_cgrotor = cg;
1491 return (cgdata(fs, cg));
1492 }
1493 for (cg = 0; cg <= startcg; cg++)
1494 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
1495 fs->fs_cgrotor = cg;
1496 return (cgdata(fs, cg));
1497 }
1498 return (0);
1499 }
1500 /*
1501 * Otherwise, we just always try to lay things out contiguously.
1502 */
1503 return (bap[indx - 1] + fs->fs_frag);
1504 }
1505
1506 /*
1507 * Implement the cylinder overflow algorithm.
1508 *
1509 * The policy implemented by this algorithm is:
1510 * 1) allocate the block in its requested cylinder group.
1511 * 2) quadradically rehash on the cylinder group number.
1512 * 3) brute force search for a free block.
1513 *
1514 * Must be called with the UFS lock held. Will release the lock on success
1515 * and return with it held on failure.
1516 */
1517 /*VARARGS5*/
1518 static ufs2_daddr_t
1519 ffs_hashalloc(ip, cg, pref, size, rsize, allocator)
1520 struct inode *ip;
1521 u_int cg;
1522 ufs2_daddr_t pref;
1523 int size; /* Search size for data blocks, mode for inodes */
1524 int rsize; /* Real allocated size. */
1525 allocfcn_t *allocator;
1526 {
1527 struct fs *fs;
1528 ufs2_daddr_t result;
1529 u_int i, icg = cg;
1530
1531 mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
1532 #ifdef INVARIANTS
1533 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
1534 panic("ffs_hashalloc: allocation on suspended filesystem");
1535 #endif
1536 fs = ITOFS(ip);
1537 /*
1538 * 1: preferred cylinder group
1539 */
1540 result = (*allocator)(ip, cg, pref, size, rsize);
1541 if (result)
1542 return (result);
1543 /*
1544 * 2: quadratic rehash
1545 */
1546 for (i = 1; i < fs->fs_ncg; i *= 2) {
1547 cg += i;
1548 if (cg >= fs->fs_ncg)
1549 cg -= fs->fs_ncg;
1550 result = (*allocator)(ip, cg, 0, size, rsize);
1551 if (result)
1552 return (result);
1553 }
1554 /*
1555 * 3: brute force search
1556 * Note that we start at i == 2, since 0 was checked initially,
1557 * and 1 is always checked in the quadratic rehash.
1558 */
1559 cg = (icg + 2) % fs->fs_ncg;
1560 for (i = 2; i < fs->fs_ncg; i++) {
1561 result = (*allocator)(ip, cg, 0, size, rsize);
1562 if (result)
1563 return (result);
1564 cg++;
1565 if (cg == fs->fs_ncg)
1566 cg = 0;
1567 }
1568 return (0);
1569 }
1570
1571 /*
1572 * Determine whether a fragment can be extended.
1573 *
1574 * Check to see if the necessary fragments are available, and
1575 * if they are, allocate them.
1576 */
1577 static ufs2_daddr_t
1578 ffs_fragextend(ip, cg, bprev, osize, nsize)
1579 struct inode *ip;
1580 u_int cg;
1581 ufs2_daddr_t bprev;
1582 int osize, nsize;
1583 {
1584 struct fs *fs;
1585 struct cg *cgp;
1586 struct buf *bp;
1587 struct ufsmount *ump;
1588 int nffree;
1589 long bno;
1590 int frags, bbase;
1591 int i, error;
1592 u_int8_t *blksfree;
1593
1594 ump = ITOUMP(ip);
1595 fs = ump->um_fs;
1596 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
1597 return (0);
1598 frags = numfrags(fs, nsize);
1599 bbase = fragnum(fs, bprev);
1600 if (bbase > fragnum(fs, (bprev + frags - 1))) {
1601 /* cannot extend across a block boundary */
1602 return (0);
1603 }
1604 UFS_UNLOCK(ump);
1605 error = bread(ump->um_devvp, fsbtodb(fs, cgtod(fs, cg)),
1606 (int)fs->fs_cgsize, NOCRED, &bp);
1607 if (error)
1608 goto fail;
1609 cgp = (struct cg *)bp->b_data;
1610 if (!cg_chkmagic(cgp))
1611 goto fail;
1612 bp->b_xflags |= BX_BKGRDWRITE;
1613 cgp->cg_old_time = cgp->cg_time = time_second;
1614 bno = dtogd(fs, bprev);
1615 blksfree = cg_blksfree(cgp);
1616 for (i = numfrags(fs, osize); i < frags; i++)
1617 if (isclr(blksfree, bno + i))
1618 goto fail;
1619 /*
1620 * the current fragment can be extended
1621 * deduct the count on fragment being extended into
1622 * increase the count on the remaining fragment (if any)
1623 * allocate the extended piece
1624 */
1625 for (i = frags; i < fs->fs_frag - bbase; i++)
1626 if (isclr(blksfree, bno + i))
1627 break;
1628 cgp->cg_frsum[i - numfrags(fs, osize)]--;
1629 if (i != frags)
1630 cgp->cg_frsum[i - frags]++;
1631 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
1632 clrbit(blksfree, bno + i);
1633 cgp->cg_cs.cs_nffree--;
1634 nffree++;
1635 }
1636 UFS_LOCK(ump);
1637 fs->fs_cstotal.cs_nffree -= nffree;
1638 fs->fs_cs(fs, cg).cs_nffree -= nffree;
1639 fs->fs_fmod = 1;
1640 ACTIVECLEAR(fs, cg);
1641 UFS_UNLOCK(ump);
1642 if (DOINGSOFTDEP(ITOV(ip)))
1643 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
1644 frags, numfrags(fs, osize));
1645 bdwrite(bp);
1646 return (bprev);
1647
1648 fail:
1649 brelse(bp);
1650 UFS_LOCK(ump);
1651 return (0);
1652
1653 }
1654
1655 /*
1656 * Determine whether a block can be allocated.
1657 *
1658 * Check to see if a block of the appropriate size is available,
1659 * and if it is, allocate it.
1660 */
1661 static ufs2_daddr_t
1662 ffs_alloccg(ip, cg, bpref, size, rsize)
1663 struct inode *ip;
1664 u_int cg;
1665 ufs2_daddr_t bpref;
1666 int size;
1667 int rsize;
1668 {
1669 struct fs *fs;
1670 struct cg *cgp;
1671 struct buf *bp;
1672 struct ufsmount *ump;
1673 ufs1_daddr_t bno;
1674 ufs2_daddr_t blkno;
1675 int i, allocsiz, error, frags;
1676 u_int8_t *blksfree;
1677
1678 ump = ITOUMP(ip);
1679 fs = ump->um_fs;
1680 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
1681 return (0);
1682 UFS_UNLOCK(ump);
1683 error = bread(ump->um_devvp, fsbtodb(fs, cgtod(fs, cg)),
1684 (int)fs->fs_cgsize, NOCRED, &bp);
1685 if (error)
1686 goto fail;
1687 cgp = (struct cg *)bp->b_data;
1688 if (!cg_chkmagic(cgp) ||
1689 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
1690 goto fail;
1691 bp->b_xflags |= BX_BKGRDWRITE;
1692 cgp->cg_old_time = cgp->cg_time = time_second;
1693 if (size == fs->fs_bsize) {
1694 UFS_LOCK(ump);
1695 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1696 ACTIVECLEAR(fs, cg);
1697 UFS_UNLOCK(ump);
1698 bdwrite(bp);
1699 return (blkno);
1700 }
1701 /*
1702 * check to see if any fragments are already available
1703 * allocsiz is the size which will be allocated, hacking
1704 * it down to a smaller size if necessary
1705 */
1706 blksfree = cg_blksfree(cgp);
1707 frags = numfrags(fs, size);
1708 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
1709 if (cgp->cg_frsum[allocsiz] != 0)
1710 break;
1711 if (allocsiz == fs->fs_frag) {
1712 /*
1713 * no fragments were available, so a block will be
1714 * allocated, and hacked up
1715 */
1716 if (cgp->cg_cs.cs_nbfree == 0)
1717 goto fail;
1718 UFS_LOCK(ump);
1719 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
1720 ACTIVECLEAR(fs, cg);
1721 UFS_UNLOCK(ump);
1722 bdwrite(bp);
1723 return (blkno);
1724 }
1725 KASSERT(size == rsize,
1726 ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
1727 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
1728 if (bno < 0)
1729 goto fail;
1730 for (i = 0; i < frags; i++)
1731 clrbit(blksfree, bno + i);
1732 cgp->cg_cs.cs_nffree -= frags;
1733 cgp->cg_frsum[allocsiz]--;
1734 if (frags != allocsiz)
1735 cgp->cg_frsum[allocsiz - frags]++;
1736 UFS_LOCK(ump);
1737 fs->fs_cstotal.cs_nffree -= frags;
1738 fs->fs_cs(fs, cg).cs_nffree -= frags;
1739 fs->fs_fmod = 1;
1740 blkno = cgbase(fs, cg) + bno;
1741 ACTIVECLEAR(fs, cg);
1742 UFS_UNLOCK(ump);
1743 if (DOINGSOFTDEP(ITOV(ip)))
1744 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
1745 bdwrite(bp);
1746 return (blkno);
1747
1748 fail:
1749 brelse(bp);
1750 UFS_LOCK(ump);
1751 return (0);
1752 }
1753
1754 /*
1755 * Allocate a block in a cylinder group.
1756 *
1757 * This algorithm implements the following policy:
1758 * 1) allocate the requested block.
1759 * 2) allocate a rotationally optimal block in the same cylinder.
1760 * 3) allocate the next available block on the block rotor for the
1761 * specified cylinder group.
1762 * Note that this routine only allocates fs_bsize blocks; these
1763 * blocks may be fragmented by the routine that allocates them.
1764 */
1765 static ufs2_daddr_t
1766 ffs_alloccgblk(ip, bp, bpref, size)
1767 struct inode *ip;
1768 struct buf *bp;
1769 ufs2_daddr_t bpref;
1770 int size;
1771 {
1772 struct fs *fs;
1773 struct cg *cgp;
1774 struct ufsmount *ump;
1775 ufs1_daddr_t bno;
1776 ufs2_daddr_t blkno;
1777 u_int8_t *blksfree;
1778 int i, cgbpref;
1779
1780 ump = ITOUMP(ip);
1781 fs = ump->um_fs;
1782 mtx_assert(UFS_MTX(ump), MA_OWNED);
1783 cgp = (struct cg *)bp->b_data;
1784 blksfree = cg_blksfree(cgp);
1785 if (bpref == 0) {
1786 bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
1787 } else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
1788 /* map bpref to correct zone in this cg */
1789 if (bpref < cgdata(fs, cgbpref))
1790 bpref = cgmeta(fs, cgp->cg_cgx);
1791 else
1792 bpref = cgdata(fs, cgp->cg_cgx);
1793 }
1794 /*
1795 * if the requested block is available, use it
1796 */
1797 bno = dtogd(fs, blknum(fs, bpref));
1798 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
1799 goto gotit;
1800 /*
1801 * Take the next available block in this cylinder group.
1802 */
1803 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
1804 if (bno < 0)
1805 return (0);
1806 /* Update cg_rotor only if allocated from the data zone */
1807 if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
1808 cgp->cg_rotor = bno;
1809 gotit:
1810 blkno = fragstoblks(fs, bno);
1811 ffs_clrblock(fs, blksfree, (long)blkno);
1812 ffs_clusteracct(fs, cgp, blkno, -1);
1813 cgp->cg_cs.cs_nbfree--;
1814 fs->fs_cstotal.cs_nbfree--;
1815 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1816 fs->fs_fmod = 1;
1817 blkno = cgbase(fs, cgp->cg_cgx) + bno;
1818 /*
1819 * If the caller didn't want the whole block free the frags here.
1820 */
1821 size = numfrags(fs, size);
1822 if (size != fs->fs_frag) {
1823 bno = dtogd(fs, blkno);
1824 for (i = size; i < fs->fs_frag; i++)
1825 setbit(blksfree, bno + i);
1826 i = fs->fs_frag - size;
1827 cgp->cg_cs.cs_nffree += i;
1828 fs->fs_cstotal.cs_nffree += i;
1829 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
1830 fs->fs_fmod = 1;
1831 cgp->cg_frsum[i]++;
1832 }
1833 /* XXX Fixme. */
1834 UFS_UNLOCK(ump);
1835 if (DOINGSOFTDEP(ITOV(ip)))
1836 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno,
1837 size, 0);
1838 UFS_LOCK(ump);
1839 return (blkno);
1840 }
1841
1842 /*
1843 * Determine whether a cluster can be allocated.
1844 *
1845 * We do not currently check for optimal rotational layout if there
1846 * are multiple choices in the same cylinder group. Instead we just
1847 * take the first one that we find following bpref.
1848 */
1849 static ufs2_daddr_t
1850 ffs_clusteralloc(ip, cg, bpref, len)
1851 struct inode *ip;
1852 u_int cg;
1853 ufs2_daddr_t bpref;
1854 int len;
1855 {
1856 struct fs *fs;
1857 struct cg *cgp;
1858 struct buf *bp;
1859 struct ufsmount *ump;
1860 int i, run, bit, map, got;
1861 ufs2_daddr_t bno;
1862 u_char *mapp;
1863 int32_t *lp;
1864 u_int8_t *blksfree;
1865
1866 ump = ITOUMP(ip);
1867 fs = ump->um_fs;
1868 if (fs->fs_maxcluster[cg] < len)
1869 return (0);
1870 UFS_UNLOCK(ump);
1871 if (bread(ump->um_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1872 NOCRED, &bp))
1873 goto fail_lock;
1874 cgp = (struct cg *)bp->b_data;
1875 if (!cg_chkmagic(cgp))
1876 goto fail_lock;
1877 bp->b_xflags |= BX_BKGRDWRITE;
1878 /*
1879 * Check to see if a cluster of the needed size (or bigger) is
1880 * available in this cylinder group.
1881 */
1882 lp = &cg_clustersum(cgp)[len];
1883 for (i = len; i <= fs->fs_contigsumsize; i++)
1884 if (*lp++ > 0)
1885 break;
1886 if (i > fs->fs_contigsumsize) {
1887 /*
1888 * This is the first time looking for a cluster in this
1889 * cylinder group. Update the cluster summary information
1890 * to reflect the true maximum sized cluster so that
1891 * future cluster allocation requests can avoid reading
1892 * the cylinder group map only to find no clusters.
1893 */
1894 lp = &cg_clustersum(cgp)[len - 1];
1895 for (i = len - 1; i > 0; i--)
1896 if (*lp-- > 0)
1897 break;
1898 UFS_LOCK(ump);
1899 fs->fs_maxcluster[cg] = i;
1900 goto fail;
1901 }
1902 /*
1903 * Search the cluster map to find a big enough cluster.
1904 * We take the first one that we find, even if it is larger
1905 * than we need as we prefer to get one close to the previous
1906 * block allocation. We do not search before the current
1907 * preference point as we do not want to allocate a block
1908 * that is allocated before the previous one (as we will
1909 * then have to wait for another pass of the elevator
1910 * algorithm before it will be read). We prefer to fail and
1911 * be recalled to try an allocation in the next cylinder group.
1912 */
1913 if (dtog(fs, bpref) != cg)
1914 bpref = cgdata(fs, cg);
1915 else
1916 bpref = blknum(fs, bpref);
1917 bpref = fragstoblks(fs, dtogd(fs, bpref));
1918 mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1919 map = *mapp++;
1920 bit = 1 << (bpref % NBBY);
1921 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
1922 if ((map & bit) == 0) {
1923 run = 0;
1924 } else {
1925 run++;
1926 if (run == len)
1927 break;
1928 }
1929 if ((got & (NBBY - 1)) != (NBBY - 1)) {
1930 bit <<= 1;
1931 } else {
1932 map = *mapp++;
1933 bit = 1;
1934 }
1935 }
1936 if (got >= cgp->cg_nclusterblks)
1937 goto fail_lock;
1938 /*
1939 * Allocate the cluster that we have found.
1940 */
1941 blksfree = cg_blksfree(cgp);
1942 for (i = 1; i <= len; i++)
1943 if (!ffs_isblock(fs, blksfree, got - run + i))
1944 panic("ffs_clusteralloc: map mismatch");
1945 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
1946 if (dtog(fs, bno) != cg)
1947 panic("ffs_clusteralloc: allocated out of group");
1948 len = blkstofrags(fs, len);
1949 UFS_LOCK(ump);
1950 for (i = 0; i < len; i += fs->fs_frag)
1951 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
1952 panic("ffs_clusteralloc: lost block");
1953 ACTIVECLEAR(fs, cg);
1954 UFS_UNLOCK(ump);
1955 bdwrite(bp);
1956 return (bno);
1957
1958 fail_lock:
1959 UFS_LOCK(ump);
1960 fail:
1961 brelse(bp);
1962 return (0);
1963 }
1964
1965 static inline struct buf *
1966 getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags)
1967 {
1968 struct fs *fs;
1969
1970 fs = ITOFS(ip);
1971 return (getblk(ITODEVVP(ip), fsbtodb(fs, ino_to_fsba(fs,
1972 cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
1973 gbflags));
1974 }
1975
1976 /*
1977 * Synchronous inode initialization is needed only when barrier writes do not
1978 * work as advertised, and will impose a heavy cost on file creation in a newly
1979 * created filesystem.
1980 */
1981 static int doasyncinodeinit = 1;
1982 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncinodeinit, CTLFLAG_RWTUN,
1983 &doasyncinodeinit, 0,
1984 "Perform inode block initialization using asynchronous writes");
1985
1986 /*
1987 * Determine whether an inode can be allocated.
1988 *
1989 * Check to see if an inode is available, and if it is,
1990 * allocate it using the following policy:
1991 * 1) allocate the requested inode.
1992 * 2) allocate the next available inode after the requested
1993 * inode in the specified cylinder group.
1994 */
1995 static ufs2_daddr_t
1996 ffs_nodealloccg(ip, cg, ipref, mode, unused)
1997 struct inode *ip;
1998 u_int cg;
1999 ufs2_daddr_t ipref;
2000 int mode;
2001 int unused;
2002 {
2003 struct fs *fs;
2004 struct cg *cgp;
2005 struct buf *bp, *ibp;
2006 struct ufsmount *ump;
2007 u_int8_t *inosused, *loc;
2008 struct ufs2_dinode *dp2;
2009 int error, start, len, i;
2010 u_int32_t old_initediblk;
2011
2012 ump = ITOUMP(ip);
2013 fs = ump->um_fs;
2014 check_nifree:
2015 if (fs->fs_cs(fs, cg).cs_nifree == 0)
2016 return (0);
2017 UFS_UNLOCK(ump);
2018 error = bread(ump->um_devvp, fsbtodb(fs, cgtod(fs, cg)),
2019 (int)fs->fs_cgsize, NOCRED, &bp);
2020 if (error) {
2021 brelse(bp);
2022 UFS_LOCK(ump);
2023 return (0);
2024 }
2025 cgp = (struct cg *)bp->b_data;
2026 restart:
2027 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
2028 brelse(bp);
2029 UFS_LOCK(ump);
2030 return (0);
2031 }
2032 bp->b_xflags |= BX_BKGRDWRITE;
2033 inosused = cg_inosused(cgp);
2034 if (ipref) {
2035 ipref %= fs->fs_ipg;
2036 if (isclr(inosused, ipref))
2037 goto gotit;
2038 }
2039 start = cgp->cg_irotor / NBBY;
2040 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
2041 loc = memcchr(&inosused[start], 0xff, len);
2042 if (loc == NULL) {
2043 len = start + 1;
2044 start = 0;
2045 loc = memcchr(&inosused[start], 0xff, len);
2046 if (loc == NULL) {
2047 printf("cg = %d, irotor = %ld, fs = %s\n",
2048 cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
2049 panic("ffs_nodealloccg: map corrupted");
2050 /* NOTREACHED */
2051 }
2052 }
2053 ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
2054 gotit:
2055 /*
2056 * Check to see if we need to initialize more inodes.
2057 */
2058 if (fs->fs_magic == FS_UFS2_MAGIC &&
2059 ipref + INOPB(fs) > cgp->cg_initediblk &&
2060 cgp->cg_initediblk < cgp->cg_niblk) {
2061 old_initediblk = cgp->cg_initediblk;
2062
2063 /*
2064 * Free the cylinder group lock before writing the
2065 * initialized inode block. Entering the
2066 * babarrierwrite() with the cylinder group lock
2067 * causes lock order violation between the lock and
2068 * snaplk.
2069 *
2070 * Another thread can decide to initialize the same
2071 * inode block, but whichever thread first gets the
2072 * cylinder group lock after writing the newly
2073 * allocated inode block will update it and the other
2074 * will realize that it has lost and leave the
2075 * cylinder group unchanged.
2076 */
2077 ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
2078 brelse(bp);
2079 if (ibp == NULL) {
2080 /*
2081 * The inode block buffer is already owned by
2082 * another thread, which must initialize it.
2083 * Wait on the buffer to allow another thread
2084 * to finish the updates, with dropped cg
2085 * buffer lock, then retry.
2086 */
2087 ibp = getinobuf(ip, cg, old_initediblk, 0);
2088 brelse(ibp);
2089 UFS_LOCK(ump);
2090 goto check_nifree;
2091 }
2092 bzero(ibp->b_data, (int)fs->fs_bsize);
2093 dp2 = (struct ufs2_dinode *)(ibp->b_data);
2094 for (i = 0; i < INOPB(fs); i++) {
2095 while (dp2->di_gen == 0)
2096 dp2->di_gen = arc4random();
2097 dp2++;
2098 }
2099
2100 /*
2101 * Rather than adding a soft updates dependency to ensure
2102 * that the new inode block is written before it is claimed
2103 * by the cylinder group map, we just do a barrier write
2104 * here. The barrier write will ensure that the inode block
2105 * gets written before the updated cylinder group map can be
2106 * written. The barrier write should only slow down bulk
2107 * loading of newly created filesystems.
2108 */
2109 if (doasyncinodeinit)
2110 babarrierwrite(ibp);
2111 else
2112 bwrite(ibp);
2113
2114 /*
2115 * After the inode block is written, try to update the
2116 * cg initediblk pointer. If another thread beat us
2117 * to it, then leave it unchanged as the other thread
2118 * has already set it correctly.
2119 */
2120 error = bread(ump->um_devvp, fsbtodb(fs, cgtod(fs, cg)),
2121 (int)fs->fs_cgsize, NOCRED, &bp);
2122 UFS_LOCK(ump);
2123 ACTIVECLEAR(fs, cg);
2124 UFS_UNLOCK(ump);
2125 if (error != 0) {
2126 brelse(bp);
2127 return (error);
2128 }
2129 cgp = (struct cg *)bp->b_data;
2130 if (cgp->cg_initediblk == old_initediblk)
2131 cgp->cg_initediblk += INOPB(fs);
2132 goto restart;
2133 }
2134 cgp->cg_old_time = cgp->cg_time = time_second;
2135 cgp->cg_irotor = ipref;
2136 UFS_LOCK(ump);
2137 ACTIVECLEAR(fs, cg);
2138 setbit(inosused, ipref);
2139 cgp->cg_cs.cs_nifree--;
2140 fs->fs_cstotal.cs_nifree--;
2141 fs->fs_cs(fs, cg).cs_nifree--;
2142 fs->fs_fmod = 1;
2143 if ((mode & IFMT) == IFDIR) {
2144 cgp->cg_cs.cs_ndir++;
2145 fs->fs_cstotal.cs_ndir++;
2146 fs->fs_cs(fs, cg).cs_ndir++;
2147 }
2148 UFS_UNLOCK(ump);
2149 if (DOINGSOFTDEP(ITOV(ip)))
2150 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
2151 bdwrite(bp);
2152 return ((ino_t)(cg * fs->fs_ipg + ipref));
2153 }
2154
2155 /*
2156 * Free a block or fragment.
2157 *
2158 * The specified block or fragment is placed back in the
2159 * free map. If a fragment is deallocated, a possible
2160 * block reassembly is checked.
2161 */
2162 static void
2163 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd)
2164 struct ufsmount *ump;
2165 struct fs *fs;
2166 struct vnode *devvp;
2167 ufs2_daddr_t bno;
2168 long size;
2169 ino_t inum;
2170 struct workhead *dephd;
2171 {
2172 struct mount *mp;
2173 struct cg *cgp;
2174 struct buf *bp;
2175 ufs1_daddr_t fragno, cgbno;
2176 ufs2_daddr_t cgblkno;
2177 int i, blk, frags, bbase;
2178 u_int cg;
2179 u_int8_t *blksfree;
2180 struct cdev *dev;
2181
2182 cg = dtog(fs, bno);
2183 if (devvp->v_type == VREG) {
2184 /* devvp is a snapshot */
2185 MPASS(devvp->v_mount->mnt_data == ump);
2186 dev = ump->um_devvp->v_rdev;
2187 cgblkno = fragstoblks(fs, cgtod(fs, cg));
2188 } else if (devvp->v_type == VCHR) {
2189 /* devvp is a normal disk device */
2190 dev = devvp->v_rdev;
2191 cgblkno = fsbtodb(fs, cgtod(fs, cg));
2192 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg");
2193 } else
2194 return;
2195 #ifdef INVARIANTS
2196 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
2197 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
2198 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
2199 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
2200 size, fs->fs_fsmnt);
2201 panic("ffs_blkfree_cg: bad size");
2202 }
2203 #endif
2204 if ((u_int)bno >= fs->fs_size) {
2205 printf("bad block %jd, ino %lu\n", (intmax_t)bno,
2206 (u_long)inum);
2207 ffs_fserr(fs, inum, "bad block");
2208 return;
2209 }
2210 if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2211 brelse(bp);
2212 return;
2213 }
2214 cgp = (struct cg *)bp->b_data;
2215 if (!cg_chkmagic(cgp)) {
2216 brelse(bp);
2217 return;
2218 }
2219 bp->b_xflags |= BX_BKGRDWRITE;
2220 cgp->cg_old_time = cgp->cg_time = time_second;
2221 cgbno = dtogd(fs, bno);
2222 blksfree = cg_blksfree(cgp);
2223 UFS_LOCK(ump);
2224 if (size == fs->fs_bsize) {
2225 fragno = fragstoblks(fs, cgbno);
2226 if (!ffs_isfreeblock(fs, blksfree, fragno)) {
2227 if (devvp->v_type == VREG) {
2228 UFS_UNLOCK(ump);
2229 /* devvp is a snapshot */
2230 brelse(bp);
2231 return;
2232 }
2233 printf("dev = %s, block = %jd, fs = %s\n",
2234 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
2235 panic("ffs_blkfree_cg: freeing free block");
2236 }
2237 ffs_setblock(fs, blksfree, fragno);
2238 ffs_clusteracct(fs, cgp, fragno, 1);
2239 cgp->cg_cs.cs_nbfree++;
2240 fs->fs_cstotal.cs_nbfree++;
2241 fs->fs_cs(fs, cg).cs_nbfree++;
2242 } else {
2243 bbase = cgbno - fragnum(fs, cgbno);
2244 /*
2245 * decrement the counts associated with the old frags
2246 */
2247 blk = blkmap(fs, blksfree, bbase);
2248 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
2249 /*
2250 * deallocate the fragment
2251 */
2252 frags = numfrags(fs, size);
2253 for (i = 0; i < frags; i++) {
2254 if (isset(blksfree, cgbno + i)) {
2255 printf("dev = %s, block = %jd, fs = %s\n",
2256 devtoname(dev), (intmax_t)(bno + i),
2257 fs->fs_fsmnt);
2258 panic("ffs_blkfree_cg: freeing free frag");
2259 }
2260 setbit(blksfree, cgbno + i);
2261 }
2262 cgp->cg_cs.cs_nffree += i;
2263 fs->fs_cstotal.cs_nffree += i;
2264 fs->fs_cs(fs, cg).cs_nffree += i;
2265 /*
2266 * add back in counts associated with the new frags
2267 */
2268 blk = blkmap(fs, blksfree, bbase);
2269 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
2270 /*
2271 * if a complete block has been reassembled, account for it
2272 */
2273 fragno = fragstoblks(fs, bbase);
2274 if (ffs_isblock(fs, blksfree, fragno)) {
2275 cgp->cg_cs.cs_nffree -= fs->fs_frag;
2276 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
2277 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
2278 ffs_clusteracct(fs, cgp, fragno, 1);
2279 cgp->cg_cs.cs_nbfree++;
2280 fs->fs_cstotal.cs_nbfree++;
2281 fs->fs_cs(fs, cg).cs_nbfree++;
2282 }
2283 }
2284 fs->fs_fmod = 1;
2285 ACTIVECLEAR(fs, cg);
2286 UFS_UNLOCK(ump);
2287 mp = UFSTOVFS(ump);
2288 if (MOUNTEDSOFTDEP(mp) && devvp->v_type == VCHR)
2289 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
2290 numfrags(fs, size), dephd);
2291 bdwrite(bp);
2292 }
2293
2294 struct ffs_blkfree_trim_params {
2295 struct task task;
2296 struct ufsmount *ump;
2297 struct vnode *devvp;
2298 ufs2_daddr_t bno;
2299 long size;
2300 ino_t inum;
2301 struct workhead *pdephd;
2302 struct workhead dephd;
2303 };
2304
2305 static void
2306 ffs_blkfree_trim_task(ctx, pending)
2307 void *ctx;
2308 int pending;
2309 {
2310 struct ffs_blkfree_trim_params *tp;
2311
2312 tp = ctx;
2313 ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size,
2314 tp->inum, tp->pdephd);
2315 vn_finished_secondary_write(UFSTOVFS(tp->ump));
2316 atomic_add_int(&tp->ump->um_trim_inflight, -1);
2317 free(tp, M_TEMP);
2318 }
2319
2320 static void
2321 ffs_blkfree_trim_completed(bip)
2322 struct bio *bip;
2323 {
2324 struct ffs_blkfree_trim_params *tp;
2325
2326 tp = bip->bio_caller2;
2327 g_destroy_bio(bip);
2328 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
2329 taskqueue_enqueue(tp->ump->um_trim_tq, &tp->task);
2330 }
2331
2332 void
2333 ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd)
2334 struct ufsmount *ump;
2335 struct fs *fs;
2336 struct vnode *devvp;
2337 ufs2_daddr_t bno;
2338 long size;
2339 ino_t inum;
2340 enum vtype vtype;
2341 struct workhead *dephd;
2342 {
2343 struct mount *mp;
2344 struct bio *bip;
2345 struct ffs_blkfree_trim_params *tp;
2346
2347 /*
2348 * Check to see if a snapshot wants to claim the block.
2349 * Check that devvp is a normal disk device, not a snapshot,
2350 * it has a snapshot(s) associated with it, and one of the
2351 * snapshots wants to claim the block.
2352 */
2353 if (devvp->v_type == VCHR &&
2354 (devvp->v_vflag & VV_COPYONWRITE) &&
2355 ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
2356 return;
2357 }
2358 /*
2359 * Nothing to delay if TRIM is disabled, or the operation is
2360 * performed on the snapshot.
2361 */
2362 if (!ump->um_candelete || devvp->v_type == VREG) {
2363 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
2364 return;
2365 }
2366
2367 /*
2368 * Postpone the set of the free bit in the cg bitmap until the
2369 * BIO_DELETE is completed. Otherwise, due to disk queue
2370 * reordering, TRIM might be issued after we reuse the block
2371 * and write some new data into it.
2372 */
2373 atomic_add_int(&ump->um_trim_inflight, 1);
2374 tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK);
2375 tp->ump = ump;
2376 tp->devvp = devvp;
2377 tp->bno = bno;
2378 tp->size = size;
2379 tp->inum = inum;
2380 if (dephd != NULL) {
2381 LIST_INIT(&tp->dephd);
2382 LIST_SWAP(dephd, &tp->dephd, worklist, wk_list);
2383 tp->pdephd = &tp->dephd;
2384 } else
2385 tp->pdephd = NULL;
2386
2387 bip = g_alloc_bio();
2388 bip->bio_cmd = BIO_DELETE;
2389 bip->bio_offset = dbtob(fsbtodb(fs, bno));
2390 bip->bio_done = ffs_blkfree_trim_completed;
2391 bip->bio_length = size;
2392 bip->bio_caller2 = tp;
2393
2394 mp = UFSTOVFS(ump);
2395 vn_start_secondary_write(NULL, &mp, 0);
2396 g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private);
2397 }
2398
2399 #ifdef INVARIANTS
2400 /*
2401 * Verify allocation of a block or fragment. Returns true if block or
2402 * fragment is allocated, false if it is free.
2403 */
2404 static int
2405 ffs_checkblk(ip, bno, size)
2406 struct inode *ip;
2407 ufs2_daddr_t bno;
2408 long size;
2409 {
2410 struct fs *fs;
2411 struct cg *cgp;
2412 struct buf *bp;
2413 ufs1_daddr_t cgbno;
2414 int i, error, frags, free;
2415 u_int8_t *blksfree;
2416
2417 fs = ITOFS(ip);
2418 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
2419 printf("bsize = %ld, size = %ld, fs = %s\n",
2420 (long)fs->fs_bsize, size, fs->fs_fsmnt);
2421 panic("ffs_checkblk: bad size");
2422 }
2423 if ((u_int)bno >= fs->fs_size)
2424 panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
2425 error = bread(ITODEVVP(ip), fsbtodb(fs, cgtod(fs, dtog(fs, bno))),
2426 (int)fs->fs_cgsize, NOCRED, &bp);
2427 if (error)
2428 panic("ffs_checkblk: cg bread failed");
2429 cgp = (struct cg *)bp->b_data;
2430 if (!cg_chkmagic(cgp))
2431 panic("ffs_checkblk: cg magic mismatch");
2432 bp->b_xflags |= BX_BKGRDWRITE;
2433 blksfree = cg_blksfree(cgp);
2434 cgbno = dtogd(fs, bno);
2435 if (size == fs->fs_bsize) {
2436 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
2437 } else {
2438 frags = numfrags(fs, size);
2439 for (free = 0, i = 0; i < frags; i++)
2440 if (isset(blksfree, cgbno + i))
2441 free++;
2442 if (free != 0 && free != frags)
2443 panic("ffs_checkblk: partially free fragment");
2444 }
2445 brelse(bp);
2446 return (!free);
2447 }
2448 #endif /* INVARIANTS */
2449
2450 /*
2451 * Free an inode.
2452 */
2453 int
2454 ffs_vfree(pvp, ino, mode)
2455 struct vnode *pvp;
2456 ino_t ino;
2457 int mode;
2458 {
2459 struct ufsmount *ump;
2460 struct inode *ip;
2461
2462 if (DOINGSOFTDEP(pvp)) {
2463 softdep_freefile(pvp, ino, mode);
2464 return (0);
2465 }
2466 ip = VTOI(pvp);
2467 ump = VFSTOUFS(pvp->v_mount);
2468 return (ffs_freefile(ump, ump->um_fs, ump->um_devvp, ino, mode, NULL));
2469 }
2470
2471 /*
2472 * Do the actual free operation.
2473 * The specified inode is placed back in the free map.
2474 */
2475 int
2476 ffs_freefile(ump, fs, devvp, ino, mode, wkhd)
2477 struct ufsmount *ump;
2478 struct fs *fs;
2479 struct vnode *devvp;
2480 ino_t ino;
2481 int mode;
2482 struct workhead *wkhd;
2483 {
2484 struct cg *cgp;
2485 struct buf *bp;
2486 ufs2_daddr_t cgbno;
2487 int error;
2488 u_int cg;
2489 u_int8_t *inosused;
2490 struct cdev *dev;
2491
2492 cg = ino_to_cg(fs, ino);
2493 if (devvp->v_type == VREG) {
2494 /* devvp is a snapshot */
2495 MPASS(devvp->v_mount->mnt_data == ump);
2496 dev = ump->um_devvp->v_rdev;
2497 cgbno = fragstoblks(fs, cgtod(fs, cg));
2498 } else if (devvp->v_type == VCHR) {
2499 /* devvp is a normal disk device */
2500 dev = devvp->v_rdev;
2501 cgbno = fsbtodb(fs, cgtod(fs, cg));
2502 } else {
2503 bp = NULL;
2504 return (0);
2505 }
2506 if (ino >= fs->fs_ipg * fs->fs_ncg)
2507 panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
2508 devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
2509 if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) {
2510 brelse(bp);
2511 return (error);
2512 }
2513 cgp = (struct cg *)bp->b_data;
2514 if (!cg_chkmagic(cgp)) {
2515 brelse(bp);
2516 return (0);
2517 }
2518 bp->b_xflags |= BX_BKGRDWRITE;
2519 cgp->cg_old_time = cgp->cg_time = time_second;
2520 inosused = cg_inosused(cgp);
2521 ino %= fs->fs_ipg;
2522 if (isclr(inosused, ino)) {
2523 printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
2524 (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt);
2525 if (fs->fs_ronly == 0)
2526 panic("ffs_freefile: freeing free inode");
2527 }
2528 clrbit(inosused, ino);
2529 if (ino < cgp->cg_irotor)
2530 cgp->cg_irotor = ino;
2531 cgp->cg_cs.cs_nifree++;
2532 UFS_LOCK(ump);
2533 fs->fs_cstotal.cs_nifree++;
2534 fs->fs_cs(fs, cg).cs_nifree++;
2535 if ((mode & IFMT) == IFDIR) {
2536 cgp->cg_cs.cs_ndir--;
2537 fs->fs_cstotal.cs_ndir--;
2538 fs->fs_cs(fs, cg).cs_ndir--;
2539 }
2540 fs->fs_fmod = 1;
2541 ACTIVECLEAR(fs, cg);
2542 UFS_UNLOCK(ump);
2543 if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type == VCHR)
2544 softdep_setup_inofree(UFSTOVFS(ump), bp,
2545 ino + cg * fs->fs_ipg, wkhd);
2546 bdwrite(bp);
2547 return (0);
2548 }
2549
2550 /*
2551 * Check to see if a file is free.
2552 */
2553 int
2554 ffs_checkfreefile(fs, devvp, ino)
2555 struct fs *fs;
2556 struct vnode *devvp;
2557 ino_t ino;
2558 {
2559 struct cg *cgp;
2560 struct buf *bp;
2561 ufs2_daddr_t cgbno;
2562 int ret;
2563 u_int cg;
2564 u_int8_t *inosused;
2565
2566 cg = ino_to_cg(fs, ino);
2567 if (devvp->v_type == VREG) {
2568 /* devvp is a snapshot */
2569 cgbno = fragstoblks(fs, cgtod(fs, cg));
2570 } else if (devvp->v_type == VCHR) {
2571 /* devvp is a normal disk device */
2572 cgbno = fsbtodb(fs, cgtod(fs, cg));
2573 } else {
2574 return (1);
2575 }
2576 if (ino >= fs->fs_ipg * fs->fs_ncg)
2577 return (1);
2578 if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) {
2579 brelse(bp);
2580 return (1);
2581 }
2582 cgp = (struct cg *)bp->b_data;
2583 if (!cg_chkmagic(cgp)) {
2584 brelse(bp);
2585 return (1);
2586 }
2587 inosused = cg_inosused(cgp);
2588 ino %= fs->fs_ipg;
2589 ret = isclr(inosused, ino);
2590 brelse(bp);
2591 return (ret);
2592 }
2593
2594 /*
2595 * Find a block of the specified size in the specified cylinder group.
2596 *
2597 * It is a panic if a request is made to find a block if none are
2598 * available.
2599 */
2600 static ufs1_daddr_t
2601 ffs_mapsearch(fs, cgp, bpref, allocsiz)
2602 struct fs *fs;
2603 struct cg *cgp;
2604 ufs2_daddr_t bpref;
2605 int allocsiz;
2606 {
2607 ufs1_daddr_t bno;
2608 int start, len, loc, i;
2609 int blk, field, subfield, pos;
2610 u_int8_t *blksfree;
2611
2612 /*
2613 * find the fragment by searching through the free block
2614 * map for an appropriate bit pattern
2615 */
2616 if (bpref)
2617 start = dtogd(fs, bpref) / NBBY;
2618 else
2619 start = cgp->cg_frotor / NBBY;
2620 blksfree = cg_blksfree(cgp);
2621 len = howmany(fs->fs_fpg, NBBY) - start;
2622 loc = scanc((u_int)len, (u_char *)&blksfree[start],
2623 fragtbl[fs->fs_frag],
2624 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2625 if (loc == 0) {
2626 len = start + 1;
2627 start = 0;
2628 loc = scanc((u_int)len, (u_char *)&blksfree[0],
2629 fragtbl[fs->fs_frag],
2630 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
2631 if (loc == 0) {
2632 printf("start = %d, len = %d, fs = %s\n",
2633 start, len, fs->fs_fsmnt);
2634 panic("ffs_alloccg: map corrupted");
2635 /* NOTREACHED */
2636 }
2637 }
2638 bno = (start + len - loc) * NBBY;
2639 cgp->cg_frotor = bno;
2640 /*
2641 * found the byte in the map
2642 * sift through the bits to find the selected frag
2643 */
2644 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
2645 blk = blkmap(fs, blksfree, bno);
2646 blk <<= 1;
2647 field = around[allocsiz];
2648 subfield = inside[allocsiz];
2649 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
2650 if ((blk & field) == subfield)
2651 return (bno + pos);
2652 field <<= 1;
2653 subfield <<= 1;
2654 }
2655 }
2656 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
2657 panic("ffs_alloccg: block not in map");
2658 return (-1);
2659 }
2660
2661 /*
2662 * Fserr prints the name of a filesystem with an error diagnostic.
2663 *
2664 * The form of the error message is:
2665 * fs: error message
2666 */
2667 void
2668 ffs_fserr(fs, inum, cp)
2669 struct fs *fs;
2670 ino_t inum;
2671 char *cp;
2672 {
2673 struct thread *td = curthread; /* XXX */
2674 struct proc *p = td->td_proc;
2675
2676 log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
2677 p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
2678 fs->fs_fsmnt, cp);
2679 }
2680
2681 /*
2682 * This function provides the capability for the fsck program to
2683 * update an active filesystem. Fourteen operations are provided:
2684 *
2685 * adjrefcnt(inode, amt) - adjusts the reference count on the
2686 * specified inode by the specified amount. Under normal
2687 * operation the count should always go down. Decrementing
2688 * the count to zero will cause the inode to be freed.
2689 * adjblkcnt(inode, amt) - adjust the number of blocks used by the
2690 * inode by the specified amount.
2691 * adjsize(inode, size) - set the size of the inode to the
2692 * specified size.
2693 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
2694 * adjust the superblock summary.
2695 * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
2696 * are marked as free. Inodes should never have to be marked
2697 * as in use.
2698 * freefiles(inode, count) - file inodes [inode..inode + count - 1]
2699 * are marked as free. Inodes should never have to be marked
2700 * as in use.
2701 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
2702 * are marked as free. Blocks should never have to be marked
2703 * as in use.
2704 * setflags(flags, set/clear) - the fs_flags field has the specified
2705 * flags set (second parameter +1) or cleared (second parameter -1).
2706 * setcwd(dirinode) - set the current directory to dirinode in the
2707 * filesystem associated with the snapshot.
2708 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
2709 * in the current directory is oldvalue then change it to newvalue.
2710 * unlink(nameptr, oldvalue) - Verify that the inode number associated
2711 * with nameptr in the current directory is oldvalue then unlink it.
2712 *
2713 * The following functions may only be used on a quiescent filesystem
2714 * by the soft updates journal. They are not safe to be run on an active
2715 * filesystem.
2716 *
2717 * setinode(inode, dip) - the specified disk inode is replaced with the
2718 * contents pointed to by dip.
2719 * setbufoutput(fd, flags) - output associated with the specified file
2720 * descriptor (which must reference the character device supporting
2721 * the filesystem) switches from using physio to running through the
2722 * buffer cache when flags is set to 1. The descriptor reverts to
2723 * physio for output when flags is set to zero.
2724 */
2725
2726 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
2727
2728 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT,
2729 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count");
2730
2731 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR,
2732 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count");
2733
2734 static SYSCTL_NODE(_vfs_ffs, FFS_SET_SIZE, setsize, CTLFLAG_WR,
2735 sysctl_ffs_fsck, "Set the inode size");
2736
2737 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR,
2738 sysctl_ffs_fsck, "Adjust number of directories");
2739
2740 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR,
2741 sysctl_ffs_fsck, "Adjust number of free blocks");
2742
2743 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR,
2744 sysctl_ffs_fsck, "Adjust number of free inodes");
2745
2746 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR,
2747 sysctl_ffs_fsck, "Adjust number of free frags");
2748
2749 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR,
2750 sysctl_ffs_fsck, "Adjust number of free clusters");
2751
2752 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR,
2753 sysctl_ffs_fsck, "Free Range of Directory Inodes");
2754
2755 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR,
2756 sysctl_ffs_fsck, "Free Range of File Inodes");
2757
2758 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR,
2759 sysctl_ffs_fsck, "Free Range of Blocks");
2760
2761 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR,
2762 sysctl_ffs_fsck, "Change Filesystem Flags");
2763
2764 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR,
2765 sysctl_ffs_fsck, "Set Current Working Directory");
2766
2767 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR,
2768 sysctl_ffs_fsck, "Change Value of .. Entry");
2769
2770 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR,
2771 sysctl_ffs_fsck, "Unlink a Duplicate Name");
2772
2773 static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR,
2774 sysctl_ffs_fsck, "Update an On-Disk Inode");
2775
2776 static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR,
2777 sysctl_ffs_fsck, "Set Buffered Writing for Descriptor");
2778
2779 #define DEBUG 1
2780 #ifdef DEBUG
2781 static int fsckcmds = 0;
2782 SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, "");
2783 #endif /* DEBUG */
2784
2785 static int buffered_write(struct file *, struct uio *, struct ucred *,
2786 int, struct thread *);
2787
2788 static int
2789 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
2790 {
2791 struct thread *td = curthread;
2792 struct fsck_cmd cmd;
2793 struct ufsmount *ump;
2794 struct vnode *vp, *dvp, *fdvp;
2795 struct inode *ip, *dp;
2796 struct mount *mp;
2797 struct fs *fs;
2798 ufs2_daddr_t blkno;
2799 long blkcnt, blksize;
2800 struct file *fp, *vfp;
2801 cap_rights_t rights;
2802 int filetype, error;
2803 static struct fileops *origops, bufferedops;
2804
2805 if (req->newlen > sizeof cmd)
2806 return (EBADRPC);
2807 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0)
2808 return (error);
2809 if (cmd.version != FFS_CMD_VERSION)
2810 return (ERPCMISMATCH);
2811 if ((error = getvnode(td, cmd.handle,
2812 cap_rights_init(&rights, CAP_FSCK), &fp)) != 0)
2813 return (error);
2814 vp = fp->f_data;
2815 if (vp->v_type != VREG && vp->v_type != VDIR) {
2816 fdrop(fp, td);
2817 return (EINVAL);
2818 }
2819 vn_start_write(vp, &mp, V_WAIT);
2820 if (mp == NULL ||
2821 strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
2822 vn_finished_write(mp);
2823 fdrop(fp, td);
2824 return (EINVAL);
2825 }
2826 ump = VFSTOUFS(mp);
2827 if ((mp->mnt_flag & MNT_RDONLY) &&
2828 ump->um_fsckpid != td->td_proc->p_pid) {
2829 vn_finished_write(mp);
2830 fdrop(fp, td);
2831 return (EROFS);
2832 }
2833 fs = ump->um_fs;
2834 filetype = IFREG;
2835
2836 switch (oidp->oid_number) {
2837
2838 case FFS_SET_FLAGS:
2839 #ifdef DEBUG
2840 if (fsckcmds)
2841 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
2842 cmd.size > 0 ? "set" : "clear");
2843 #endif /* DEBUG */
2844 if (cmd.size > 0)
2845 fs->fs_flags |= (long)cmd.value;
2846 else
2847 fs->fs_flags &= ~(long)cmd.value;
2848 break;
2849
2850 case FFS_ADJ_REFCNT:
2851 #ifdef DEBUG
2852 if (fsckcmds) {
2853 printf("%s: adjust inode %jd link count by %jd\n",
2854 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2855 (intmax_t)cmd.size);
2856 }
2857 #endif /* DEBUG */
2858 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2859 break;
2860 ip = VTOI(vp);
2861 ip->i_nlink += cmd.size;
2862 DIP_SET(ip, i_nlink, ip->i_nlink);
2863 ip->i_effnlink += cmd.size;
2864 ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2865 error = ffs_update(vp, 1);
2866 if (DOINGSOFTDEP(vp))
2867 softdep_change_linkcnt(ip);
2868 vput(vp);
2869 break;
2870
2871 case FFS_ADJ_BLKCNT:
2872 #ifdef DEBUG
2873 if (fsckcmds) {
2874 printf("%s: adjust inode %jd block count by %jd\n",
2875 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2876 (intmax_t)cmd.size);
2877 }
2878 #endif /* DEBUG */
2879 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2880 break;
2881 ip = VTOI(vp);
2882 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
2883 ip->i_flag |= IN_CHANGE | IN_MODIFIED;
2884 error = ffs_update(vp, 1);
2885 vput(vp);
2886 break;
2887
2888 case FFS_SET_SIZE:
2889 #ifdef DEBUG
2890 if (fsckcmds) {
2891 printf("%s: set inode %jd size to %jd\n",
2892 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
2893 (intmax_t)cmd.size);
2894 }
2895 #endif /* DEBUG */
2896 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
2897 break;
2898 ip = VTOI(vp);
2899 DIP_SET(ip, i_size, cmd.size);
2900 ip->i_flag |= IN_SIZEMOD | IN_CHANGE | IN_MODIFIED;
2901 error = ffs_update(vp, 1);
2902 vput(vp);
2903 break;
2904
2905 case FFS_DIR_FREE:
2906 filetype = IFDIR;
2907 /* fall through */
2908
2909 case FFS_FILE_FREE:
2910 #ifdef DEBUG
2911 if (fsckcmds) {
2912 if (cmd.size == 1)
2913 printf("%s: free %s inode %ju\n",
2914 mp->mnt_stat.f_mntonname,
2915 filetype == IFDIR ? "directory" : "file",
2916 (uintmax_t)cmd.value);
2917 else
2918 printf("%s: free %s inodes %ju-%ju\n",
2919 mp->mnt_stat.f_mntonname,
2920 filetype == IFDIR ? "directory" : "file",
2921 (uintmax_t)cmd.value,
2922 (uintmax_t)(cmd.value + cmd.size - 1));
2923 }
2924 #endif /* DEBUG */
2925 while (cmd.size > 0) {
2926 if ((error = ffs_freefile(ump, fs, ump->um_devvp,
2927 cmd.value, filetype, NULL)))
2928 break;
2929 cmd.size -= 1;
2930 cmd.value += 1;
2931 }
2932 break;
2933
2934 case FFS_BLK_FREE:
2935 #ifdef DEBUG
2936 if (fsckcmds) {
2937 if (cmd.size == 1)
2938 printf("%s: free block %jd\n",
2939 mp->mnt_stat.f_mntonname,
2940 (intmax_t)cmd.value);
2941 else
2942 printf("%s: free blocks %jd-%jd\n",
2943 mp->mnt_stat.f_mntonname,
2944 (intmax_t)cmd.value,
2945 (intmax_t)cmd.value + cmd.size - 1);
2946 }
2947 #endif /* DEBUG */
2948 blkno = cmd.value;
2949 blkcnt = cmd.size;
2950 blksize = fs->fs_frag - (blkno % fs->fs_frag);
2951 while (blkcnt > 0) {
2952 if (blksize > blkcnt)
2953 blksize = blkcnt;
2954 ffs_blkfree(ump, fs, ump->um_devvp, blkno,
2955 blksize * fs->fs_fsize, ROOTINO, VDIR, NULL);
2956 blkno += blksize;
2957 blkcnt -= blksize;
2958 blksize = fs->fs_frag;
2959 }
2960 break;
2961
2962 /*
2963 * Adjust superblock summaries. fsck(8) is expected to
2964 * submit deltas when necessary.
2965 */
2966 case FFS_ADJ_NDIR:
2967 #ifdef DEBUG
2968 if (fsckcmds) {
2969 printf("%s: adjust number of directories by %jd\n",
2970 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2971 }
2972 #endif /* DEBUG */
2973 fs->fs_cstotal.cs_ndir += cmd.value;
2974 break;
2975
2976 case FFS_ADJ_NBFREE:
2977 #ifdef DEBUG
2978 if (fsckcmds) {
2979 printf("%s: adjust number of free blocks by %+jd\n",
2980 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2981 }
2982 #endif /* DEBUG */
2983 fs->fs_cstotal.cs_nbfree += cmd.value;
2984 break;
2985
2986 case FFS_ADJ_NIFREE:
2987 #ifdef DEBUG
2988 if (fsckcmds) {
2989 printf("%s: adjust number of free inodes by %+jd\n",
2990 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
2991 }
2992 #endif /* DEBUG */
2993 fs->fs_cstotal.cs_nifree += cmd.value;
2994 break;
2995
2996 case FFS_ADJ_NFFREE:
2997 #ifdef DEBUG
2998 if (fsckcmds) {
2999 printf("%s: adjust number of free frags by %+jd\n",
3000 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3001 }
3002 #endif /* DEBUG */
3003 fs->fs_cstotal.cs_nffree += cmd.value;
3004 break;
3005
3006 case FFS_ADJ_NUMCLUSTERS:
3007 #ifdef DEBUG
3008 if (fsckcmds) {
3009 printf("%s: adjust number of free clusters by %+jd\n",
3010 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3011 }
3012 #endif /* DEBUG */
3013 fs->fs_cstotal.cs_numclusters += cmd.value;
3014 break;
3015
3016 case FFS_SET_CWD:
3017 #ifdef DEBUG
3018 if (fsckcmds) {
3019 printf("%s: set current directory to inode %jd\n",
3020 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3021 }
3022 #endif /* DEBUG */
3023 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
3024 break;
3025 AUDIT_ARG_VNODE1(vp);
3026 if ((error = change_dir(vp, td)) != 0) {
3027 vput(vp);
3028 break;
3029 }
3030 VOP_UNLOCK(vp, 0);
3031 pwd_chdir(td, vp);
3032 break;
3033
3034 case FFS_SET_DOTDOT:
3035 #ifdef DEBUG
3036 if (fsckcmds) {
3037 printf("%s: change .. in cwd from %jd to %jd\n",
3038 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
3039 (intmax_t)cmd.size);
3040 }
3041 #endif /* DEBUG */
3042 /*
3043 * First we have to get and lock the parent directory
3044 * to which ".." points.
3045 */
3046 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
3047 if (error)
3048 break;
3049 /*
3050 * Now we get and lock the child directory containing "..".
3051 */
3052 FILEDESC_SLOCK(td->td_proc->p_fd);
3053 dvp = td->td_proc->p_fd->fd_cdir;
3054 FILEDESC_SUNLOCK(td->td_proc->p_fd);
3055 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) {
3056 vput(fdvp);
3057 break;
3058 }
3059 dp = VTOI(dvp);
3060 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */
3061 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
3062 DT_DIR, 0);
3063 cache_purge(fdvp);
3064 cache_purge(dvp);
3065 vput(dvp);
3066 vput(fdvp);
3067 break;
3068
3069 case FFS_UNLINK:
3070 #ifdef DEBUG
3071 if (fsckcmds) {
3072 char buf[32];
3073
3074 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
3075 strncpy(buf, "Name_too_long", 32);
3076 printf("%s: unlink %s (inode %jd)\n",
3077 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
3078 }
3079 #endif /* DEBUG */
3080 /*
3081 * kern_unlinkat will do its own start/finish writes and
3082 * they do not nest, so drop ours here. Setting mp == NULL
3083 * indicates that vn_finished_write is not needed down below.
3084 */
3085 vn_finished_write(mp);
3086 mp = NULL;
3087 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value,
3088 UIO_USERSPACE, (ino_t)cmd.size);
3089 break;
3090
3091 case FFS_SET_INODE:
3092 if (ump->um_fsckpid != td->td_proc->p_pid) {
3093 error = EPERM;
3094 break;
3095 }
3096 #ifdef DEBUG
3097 if (fsckcmds) {
3098 printf("%s: update inode %jd\n",
3099 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
3100 }
3101 #endif /* DEBUG */
3102 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
3103 break;
3104 AUDIT_ARG_VNODE1(vp);
3105 ip = VTOI(vp);
3106 if (I_IS_UFS1(ip))
3107 error = copyin((void *)(intptr_t)cmd.size, ip->i_din1,
3108 sizeof(struct ufs1_dinode));
3109 else
3110 error = copyin((void *)(intptr_t)cmd.size, ip->i_din2,
3111 sizeof(struct ufs2_dinode));
3112 if (error) {
3113 vput(vp);
3114 break;
3115 }
3116 ip->i_flag |= IN_CHANGE | IN_MODIFIED;
3117 error = ffs_update(vp, 1);
3118 vput(vp);
3119 break;
3120
3121 case FFS_SET_BUFOUTPUT:
3122 if (ump->um_fsckpid != td->td_proc->p_pid) {
3123 error = EPERM;
3124 break;
3125 }
3126 if (ITOUMP(VTOI(vp)) != ump) {
3127 error = EINVAL;
3128 break;
3129 }
3130 #ifdef DEBUG
3131 if (fsckcmds) {
3132 printf("%s: %s buffered output for descriptor %jd\n",
3133 mp->mnt_stat.f_mntonname,
3134 cmd.size == 1 ? "enable" : "disable",
3135 (intmax_t)cmd.value);
3136 }
3137 #endif /* DEBUG */
3138 if ((error = getvnode(td, cmd.value,
3139 cap_rights_init(&rights, CAP_FSCK), &vfp)) != 0)
3140 break;
3141 if (vfp->f_vnode->v_type != VCHR) {
3142 fdrop(vfp, td);
3143 error = EINVAL;
3144 break;
3145 }
3146 if (origops == NULL) {
3147 origops = vfp->f_ops;
3148 bcopy((void *)origops, (void *)&bufferedops,
3149 sizeof(bufferedops));
3150 bufferedops.fo_write = buffered_write;
3151 }
3152 if (cmd.size == 1)
3153 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3154 (uintptr_t)&bufferedops);
3155 else
3156 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops,
3157 (uintptr_t)origops);
3158 fdrop(vfp, td);
3159 break;
3160
3161 default:
3162 #ifdef DEBUG
3163 if (fsckcmds) {
3164 printf("Invalid request %d from fsck\n",
3165 oidp->oid_number);
3166 }
3167 #endif /* DEBUG */
3168 error = EINVAL;
3169 break;
3170
3171 }
3172 fdrop(fp, td);
3173 vn_finished_write(mp);
3174 return (error);
3175 }
3176
3177 /*
3178 * Function to switch a descriptor to use the buffer cache to stage
3179 * its I/O. This is needed so that writes to the filesystem device
3180 * will give snapshots a chance to copy modified blocks for which it
3181 * needs to retain copies.
3182 */
3183 static int
3184 buffered_write(fp, uio, active_cred, flags, td)
3185 struct file *fp;
3186 struct uio *uio;
3187 struct ucred *active_cred;
3188 int flags;
3189 struct thread *td;
3190 {
3191 struct vnode *devvp, *vp;
3192 struct inode *ip;
3193 struct buf *bp;
3194 struct fs *fs;
3195 struct filedesc *fdp;
3196 int error;
3197 daddr_t lbn;
3198
3199 /*
3200 * The devvp is associated with the /dev filesystem. To discover
3201 * the filesystem with which the device is associated, we depend
3202 * on the application setting the current directory to a location
3203 * within the filesystem being written. Yes, this is an ugly hack.
3204 */
3205 devvp = fp->f_vnode;
3206 if (!vn_isdisk(devvp, NULL))
3207 return (EINVAL);
3208 fdp = td->td_proc->p_fd;
3209 FILEDESC_SLOCK(fdp);
3210 vp = fdp->fd_cdir;
3211 vref(vp);
3212 FILEDESC_SUNLOCK(fdp);
3213 vn_lock(vp, LK_SHARED | LK_RETRY);
3214 /*
3215 * Check that the current directory vnode indeed belongs to
3216 * UFS before trying to dereference UFS-specific v_data fields.
3217 */
3218 if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) {
3219 vput(vp);
3220 return (EINVAL);
3221 }
3222 ip = VTOI(vp);
3223 if (ITODEVVP(ip) != devvp) {
3224 vput(vp);
3225 return (EINVAL);
3226 }
3227 fs = ITOFS(ip);
3228 vput(vp);
3229 foffset_lock_uio(fp, uio, flags);
3230 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
3231 #ifdef DEBUG
3232 if (fsckcmds) {
3233 printf("%s: buffered write for block %jd\n",
3234 fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset));
3235 }
3236 #endif /* DEBUG */
3237 /*
3238 * All I/O must be contained within a filesystem block, start on
3239 * a fragment boundary, and be a multiple of fragments in length.
3240 */
3241 if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) ||
3242 fragoff(fs, uio->uio_offset) != 0 ||
3243 fragoff(fs, uio->uio_resid) != 0) {
3244 error = EINVAL;
3245 goto out;
3246 }
3247 lbn = numfrags(fs, uio->uio_offset);
3248 bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0);
3249 bp->b_flags |= B_RELBUF;
3250 if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) {
3251 brelse(bp);
3252 goto out;
3253 }
3254 error = bwrite(bp);
3255 out:
3256 VOP_UNLOCK(devvp, 0);
3257 foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF);
3258 return (error);
3259 }
Cache object: 458bab410ad4b2d07df6f5deabd97fde
|