1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ffs_alloc.c 8.8 (Berkeley) 2/21/94
34 * $FreeBSD: src/sys/ufs/ffs/ffs_alloc.c,v 1.26.2.2 1999/09/05 08:23:31 peter Exp $
35 */
36
37 #include "opt_quota.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/buf.h>
42 #include <sys/proc.h>
43 #include <sys/vnode.h>
44 #include <sys/mount.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 #include <sys/syslog.h>
48
49 #include <vm/vm.h>
50
51 #include <ufs/ufs/quota.h>
52 #include <ufs/ufs/inode.h>
53 #include <ufs/ufs/ufs_extern.h>
54
55 #include <ufs/ffs/fs.h>
56 #include <ufs/ffs/ffs_extern.h>
57
58 typedef daddr_t allocfcn_t __P((struct inode *ip, int cg, daddr_t bpref,
59 int size));
60
61 static daddr_t ffs_alloccg __P((struct inode *, int, daddr_t, int));
62 static daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, daddr_t));
63 #ifdef notyet
64 static daddr_t ffs_clusteralloc __P((struct inode *, int, daddr_t, int));
65 #endif
66 static ino_t ffs_dirpref __P((struct fs *));
67 static daddr_t ffs_fragextend __P((struct inode *, int, long, int, int));
68 static void ffs_fserr __P((struct fs *, u_int, char *));
69 static u_long ffs_hashalloc
70 __P((struct inode *, int, long, int, allocfcn_t *));
71 static ino_t ffs_nodealloccg __P((struct inode *, int, daddr_t, int));
72 static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int));
73
74 static void ffs_clusteracct __P((struct fs *, struct cg *, daddr_t, int));
75
76 /*
77 * Allocate a block in the file system.
78 *
79 * The size of the requested block is given, which must be some
80 * multiple of fs_fsize and <= fs_bsize.
81 * A preference may be optionally specified. If a preference is given
82 * the following hierarchy is used to allocate a block:
83 * 1) allocate the requested block.
84 * 2) allocate a rotationally optimal block in the same cylinder.
85 * 3) allocate a block in the same cylinder group.
86 * 4) quadradically rehash into other cylinder groups, until an
87 * available block is located.
88 * If no block preference is given the following heirarchy is used
89 * to allocate a block:
90 * 1) allocate a block in the cylinder group that contains the
91 * inode for the file.
92 * 2) quadradically rehash into other cylinder groups, until an
93 * available block is located.
94 */
95 int
96 ffs_alloc(ip, lbn, bpref, size, cred, bnp)
97 register struct inode *ip;
98 daddr_t lbn, bpref;
99 int size;
100 struct ucred *cred;
101 daddr_t *bnp;
102 {
103 register struct fs *fs;
104 daddr_t bno;
105 int cg;
106 #ifdef QUOTA
107 int error;
108 #endif
109
110
111 *bnp = 0;
112 fs = ip->i_fs;
113 #ifdef DIAGNOSTIC
114 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
115 printf("dev = 0x%lx, bsize = %ld, size = %d, fs = %s\n",
116 (u_long)ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
117 panic("ffs_alloc: bad size");
118 }
119 if (cred == NOCRED)
120 panic("ffs_alloc: missing credential");
121 #endif /* DIAGNOSTIC */
122 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
123 goto nospace;
124 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
125 goto nospace;
126 #ifdef QUOTA
127 error = chkdq(ip, (long)btodb(size), cred, 0);
128 if (error)
129 return (error);
130 #endif
131 if (bpref >= fs->fs_size)
132 bpref = 0;
133 if (bpref == 0)
134 cg = ino_to_cg(fs, ip->i_number);
135 else
136 cg = dtog(fs, bpref);
137 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, ffs_alloccg);
138 if (bno > 0) {
139 ip->i_blocks += btodb(size);
140 ip->i_flag |= IN_CHANGE | IN_UPDATE;
141 *bnp = bno;
142 return (0);
143 }
144 #ifdef QUOTA
145 /*
146 * Restore user's disk quota because allocation failed.
147 */
148 (void) chkdq(ip, (long)-btodb(size), cred, FORCE);
149 #endif
150 nospace:
151 ffs_fserr(fs, cred->cr_uid, "file system full");
152 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
153 return (ENOSPC);
154 }
155
156 /*
157 * Reallocate a fragment to a bigger size
158 *
159 * The number and size of the old block is given, and a preference
160 * and new size is also specified. The allocator attempts to extend
161 * the original block. Failing that, the regular block allocator is
162 * invoked to get an appropriate block.
163 */
164 int
165 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp)
166 register struct inode *ip;
167 daddr_t lbprev;
168 daddr_t bpref;
169 int osize, nsize;
170 struct ucred *cred;
171 struct buf **bpp;
172 {
173 register struct fs *fs;
174 struct buf *bp;
175 int cg, request, error;
176 daddr_t bprev, bno;
177
178 *bpp = 0;
179 fs = ip->i_fs;
180 #ifdef DIAGNOSTIC
181 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
182 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
183 printf(
184 "dev = 0x%lx, bsize = %ld, osize = %d, "
185 "nsize = %d, fs = %s\n",
186 (u_long)ip->i_dev, fs->fs_bsize, osize,
187 nsize, fs->fs_fsmnt);
188 panic("ffs_realloccg: bad size");
189 }
190 if (cred == NOCRED)
191 panic("ffs_realloccg: missing credential");
192 #endif /* DIAGNOSTIC */
193 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
194 goto nospace;
195 if ((bprev = ip->i_db[lbprev]) == 0) {
196 printf("dev = 0x%lx, bsize = %ld, bprev = %ld, fs = %s\n",
197 (u_long) ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
198 panic("ffs_realloccg: bad bprev");
199 }
200 /*
201 * Allocate the extra space in the buffer.
202 */
203 error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp);
204 if (error) {
205 brelse(bp);
206 return (error);
207 }
208
209 if( bp->b_blkno == bp->b_lblkno) {
210 if( lbprev >= NDADDR)
211 panic("ffs_realloccg: lbprev out of range");
212 bp->b_blkno = fsbtodb(fs, bprev);
213 }
214
215 #ifdef QUOTA
216 error = chkdq(ip, (long)btodb(nsize - osize), cred, 0);
217 if (error) {
218 brelse(bp);
219 return (error);
220 }
221 #endif
222 /*
223 * Check for extension in the existing location.
224 */
225 cg = dtog(fs, bprev);
226 bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize);
227 if (bno) {
228 if (bp->b_blkno != fsbtodb(fs, bno))
229 panic("bad blockno");
230 ip->i_blocks += btodb(nsize - osize);
231 ip->i_flag |= IN_CHANGE | IN_UPDATE;
232 allocbuf(bp, nsize);
233 bp->b_flags |= B_DONE;
234 bzero((char *)bp->b_data + osize, (u_int)nsize - osize);
235 *bpp = bp;
236 return (0);
237 }
238 /*
239 * Allocate a new disk location.
240 */
241 if (bpref >= fs->fs_size)
242 bpref = 0;
243 switch ((int)fs->fs_optim) {
244 case FS_OPTSPACE:
245 /*
246 * Allocate an exact sized fragment. Although this makes
247 * best use of space, we will waste time relocating it if
248 * the file continues to grow. If the fragmentation is
249 * less than half of the minimum free reserve, we choose
250 * to begin optimizing for time.
251 */
252 request = nsize;
253 if (fs->fs_minfree <= 5 ||
254 fs->fs_cstotal.cs_nffree >
255 fs->fs_dsize * fs->fs_minfree / (2 * 100))
256 break;
257 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
258 fs->fs_fsmnt);
259 fs->fs_optim = FS_OPTTIME;
260 break;
261 case FS_OPTTIME:
262 /*
263 * At this point we have discovered a file that is trying to
264 * grow a small fragment to a larger fragment. To save time,
265 * we allocate a full sized block, then free the unused portion.
266 * If the file continues to grow, the `ffs_fragextend' call
267 * above will be able to grow it in place without further
268 * copying. If aberrant programs cause disk fragmentation to
269 * grow within 2% of the free reserve, we choose to begin
270 * optimizing for space.
271 */
272 request = fs->fs_bsize;
273 if (fs->fs_cstotal.cs_nffree <
274 fs->fs_dsize * (fs->fs_minfree - 2) / 100)
275 break;
276 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
277 fs->fs_fsmnt);
278 fs->fs_optim = FS_OPTSPACE;
279 break;
280 default:
281 printf("dev = 0x%lx, optim = %ld, fs = %s\n",
282 (u_long)ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
283 panic("ffs_realloccg: bad optim");
284 /* NOTREACHED */
285 }
286 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, ffs_alloccg);
287 if (bno > 0) {
288 bp->b_blkno = fsbtodb(fs, bno);
289 ffs_blkfree(ip, bprev, (long)osize);
290 if (nsize < request)
291 ffs_blkfree(ip, bno + numfrags(fs, nsize),
292 (long)(request - nsize));
293 ip->i_blocks += btodb(nsize - osize);
294 ip->i_flag |= IN_CHANGE | IN_UPDATE;
295 allocbuf(bp, nsize);
296 bp->b_flags |= B_DONE;
297 bzero((char *)bp->b_data + osize, (u_int)nsize - osize);
298 *bpp = bp;
299 return (0);
300 }
301 #ifdef QUOTA
302 /*
303 * Restore user's disk quota because allocation failed.
304 */
305 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE);
306 #endif
307 brelse(bp);
308 nospace:
309 /*
310 * no space available
311 */
312 ffs_fserr(fs, cred->cr_uid, "file system full");
313 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
314 return (ENOSPC);
315 }
316
317 /*
318 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
319 *
320 * The vnode and an array of buffer pointers for a range of sequential
321 * logical blocks to be made contiguous is given. The allocator attempts
322 * to find a range of sequential blocks starting as close as possible to
323 * an fs_rotdelay offset from the end of the allocation for the logical
324 * block immediately preceeding the current range. If successful, the
325 * physical block numbers in the buffer pointers and in the inode are
326 * changed to reflect the new allocation. If unsuccessful, the allocation
327 * is left unchanged. The success in doing the reallocation is returned.
328 * Note that the error return is not reflected back to the user. Rather
329 * the previous block allocation will be used.
330 */
331 static int doasyncfree = 1;
332 SYSCTL_INT(_debug, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, "");
333 int
334 ffs_reallocblks(ap)
335 struct vop_reallocblks_args /* {
336 struct vnode *a_vp;
337 struct cluster_save *a_buflist;
338 } */ *ap;
339 {
340 #if !defined (not_yes)
341 return (ENOSPC);
342 #else
343 struct fs *fs;
344 struct inode *ip;
345 struct vnode *vp;
346 struct buf *sbp, *ebp;
347 daddr_t *bap, *sbap, *ebap = 0;
348 struct cluster_save *buflist;
349 daddr_t start_lbn, end_lbn, soff, newblk, blkno;
350 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
351 int i, len, start_lvl, end_lvl, pref, ssize;
352 struct timeval tv;
353
354 vp = ap->a_vp;
355 ip = VTOI(vp);
356 fs = ip->i_fs;
357 if (fs->fs_contigsumsize <= 0)
358 return (ENOSPC);
359 buflist = ap->a_buflist;
360 len = buflist->bs_nchildren;
361 start_lbn = buflist->bs_children[0]->b_lblkno;
362 end_lbn = start_lbn + len - 1;
363 #ifdef DIAGNOSTIC
364 for (i = 1; i < len; i++)
365 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
366 panic("ffs_reallocblks: non-cluster");
367 #endif
368 /*
369 * If the latest allocation is in a new cylinder group, assume that
370 * the filesystem has decided to move and do not force it back to
371 * the previous cylinder group.
372 */
373 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
374 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
375 return (ENOSPC);
376 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
377 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
378 return (ENOSPC);
379 /*
380 * Get the starting offset and block map for the first block.
381 */
382 if (start_lvl == 0) {
383 sbap = &ip->i_db[0];
384 soff = start_lbn;
385 } else {
386 idp = &start_ap[start_lvl - 1];
387 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
388 brelse(sbp);
389 return (ENOSPC);
390 }
391 sbap = (daddr_t *)sbp->b_data;
392 soff = idp->in_off;
393 }
394 /*
395 * Find the preferred location for the cluster.
396 */
397 pref = ffs_blkpref(ip, start_lbn, soff, sbap);
398 /*
399 * If the block range spans two block maps, get the second map.
400 */
401 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
402 ssize = len;
403 } else {
404 #ifdef DIAGNOSTIC
405 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn)
406 panic("ffs_reallocblk: start == end");
407 #endif
408 ssize = len - (idp->in_off + 1);
409 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
410 goto fail;
411 ebap = (daddr_t *)ebp->b_data;
412 }
413 /*
414 * Search the block map looking for an allocation of the desired size.
415 */
416 if ((newblk = (daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref,
417 len, ffs_clusteralloc)) == 0)
418 goto fail;
419 /*
420 * We have found a new contiguous block.
421 *
422 * First we have to replace the old block pointers with the new
423 * block pointers in the inode and indirect blocks associated
424 * with the file.
425 */
426 blkno = newblk;
427 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
428 if (i == ssize)
429 bap = ebap;
430 #ifdef DIAGNOSTIC
431 if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap))
432 panic("ffs_reallocblks: alloc mismatch");
433 #endif
434 *bap++ = blkno;
435 }
436 /*
437 * Next we must write out the modified inode and indirect blocks.
438 * For strict correctness, the writes should be synchronous since
439 * the old block values may have been written to disk. In practise
440 * they are almost never written, but if we are concerned about
441 * strict correctness, the `doasyncfree' flag should be set to zero.
442 *
443 * The test on `doasyncfree' should be changed to test a flag
444 * that shows whether the associated buffers and inodes have
445 * been written. The flag should be set when the cluster is
446 * started and cleared whenever the buffer or inode is flushed.
447 * We can then check below to see if it is set, and do the
448 * synchronous write only when it has been cleared.
449 */
450 if (sbap != &ip->i_db[0]) {
451 if (doasyncfree)
452 bdwrite(sbp);
453 else
454 bwrite(sbp);
455 } else {
456 ip->i_flag |= IN_CHANGE | IN_UPDATE;
457 if (!doasyncfree) {
458 tv = time;
459 VOP_UPDATE(vp, &tv, &tv, 1);
460 }
461 }
462 if (ssize < len)
463 if (doasyncfree)
464 bdwrite(ebp);
465 else
466 bwrite(ebp);
467 /*
468 * Last, free the old blocks and assign the new blocks to the buffers.
469 */
470 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
471 ffs_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno),
472 fs->fs_bsize);
473 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
474 }
475 return (0);
476
477 fail:
478 if (ssize < len)
479 brelse(ebp);
480 if (sbap != &ip->i_db[0])
481 brelse(sbp);
482 return (ENOSPC);
483 #endif
484 }
485
486 /*
487 * Allocate an inode in the file system.
488 *
489 * If allocating a directory, use ffs_dirpref to select the inode.
490 * If allocating in a directory, the following hierarchy is followed:
491 * 1) allocate the preferred inode.
492 * 2) allocate an inode in the same cylinder group.
493 * 3) quadradically rehash into other cylinder groups, until an
494 * available inode is located.
495 * If no inode preference is given the following heirarchy is used
496 * to allocate an inode:
497 * 1) allocate an inode in cylinder group 0.
498 * 2) quadradically rehash into other cylinder groups, until an
499 * available inode is located.
500 */
501 int
502 ffs_valloc(ap)
503 struct vop_valloc_args /* {
504 struct vnode *a_pvp;
505 int a_mode;
506 struct ucred *a_cred;
507 struct vnode **a_vpp;
508 } */ *ap;
509 {
510 register struct vnode *pvp = ap->a_pvp;
511 register struct inode *pip;
512 register struct fs *fs;
513 register struct inode *ip;
514 mode_t mode = ap->a_mode;
515 ino_t ino, ipref;
516 int cg, error;
517
518 *ap->a_vpp = NULL;
519 pip = VTOI(pvp);
520 fs = pip->i_fs;
521 if (fs->fs_cstotal.cs_nifree == 0)
522 goto noinodes;
523
524 if ((mode & IFMT) == IFDIR)
525 ipref = ffs_dirpref(fs);
526 else
527 ipref = pip->i_number;
528 if (ipref >= fs->fs_ncg * fs->fs_ipg)
529 ipref = 0;
530 cg = ino_to_cg(fs, ipref);
531 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode,
532 (allocfcn_t *)ffs_nodealloccg);
533 if (ino == 0)
534 goto noinodes;
535 error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp);
536 if (error) {
537 VOP_VFREE(pvp, ino, mode);
538 return (error);
539 }
540 ip = VTOI(*ap->a_vpp);
541 if (ip->i_mode) {
542 printf("mode = 0%o, inum = %ld, fs = %s\n",
543 ip->i_mode, ip->i_number, fs->fs_fsmnt);
544 panic("ffs_valloc: dup alloc");
545 }
546 if (ip->i_blocks) { /* XXX */
547 printf("free inode %s/%ld had %ld blocks\n",
548 fs->fs_fsmnt, ino, ip->i_blocks);
549 ip->i_blocks = 0;
550 }
551 ip->i_flags = 0;
552 /*
553 * Set up a new generation number for this inode.
554 */
555 if (ip->i_gen == 0 || ++(ip->i_gen) == 0)
556 ip->i_gen = random() / 2 + 1;
557 return (0);
558 noinodes:
559 ffs_fserr(fs, ap->a_cred->cr_uid, "out of inodes");
560 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
561 return (ENOSPC);
562 }
563
564 /*
565 * Find a cylinder to place a directory.
566 *
567 * The policy implemented by this algorithm is to select from
568 * among those cylinder groups with above the average number of
569 * free inodes, the one with the smallest number of directories.
570 */
571 static ino_t
572 ffs_dirpref(fs)
573 register struct fs *fs;
574 {
575 int cg, minndir, mincg, avgifree;
576
577 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
578 minndir = fs->fs_ipg;
579 mincg = 0;
580 for (cg = 0; cg < fs->fs_ncg; cg++)
581 if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
582 fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
583 mincg = cg;
584 minndir = fs->fs_cs(fs, cg).cs_ndir;
585 }
586 return ((ino_t)(fs->fs_ipg * mincg));
587 }
588
589 /*
590 * Select the desired position for the next block in a file. The file is
591 * logically divided into sections. The first section is composed of the
592 * direct blocks. Each additional section contains fs_maxbpg blocks.
593 *
594 * If no blocks have been allocated in the first section, the policy is to
595 * request a block in the same cylinder group as the inode that describes
596 * the file. If no blocks have been allocated in any other section, the
597 * policy is to place the section in a cylinder group with a greater than
598 * average number of free blocks. An appropriate cylinder group is found
599 * by using a rotor that sweeps the cylinder groups. When a new group of
600 * blocks is needed, the sweep begins in the cylinder group following the
601 * cylinder group from which the previous allocation was made. The sweep
602 * continues until a cylinder group with greater than the average number
603 * of free blocks is found. If the allocation is for the first block in an
604 * indirect block, the information on the previous allocation is unavailable;
605 * here a best guess is made based upon the logical block number being
606 * allocated.
607 *
608 * If a section is already partially allocated, the policy is to
609 * contiguously allocate fs_maxcontig blocks. The end of one of these
610 * contiguous blocks and the beginning of the next is physically separated
611 * so that the disk head will be in transit between them for at least
612 * fs_rotdelay milliseconds. This is to allow time for the processor to
613 * schedule another I/O transfer.
614 */
615 daddr_t
616 ffs_blkpref(ip, lbn, indx, bap)
617 struct inode *ip;
618 daddr_t lbn;
619 int indx;
620 daddr_t *bap;
621 {
622 register struct fs *fs;
623 register int cg;
624 int avgbfree, startcg;
625 daddr_t nextblk;
626
627 fs = ip->i_fs;
628 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
629 if (lbn < NDADDR) {
630 cg = ino_to_cg(fs, ip->i_number);
631 return (fs->fs_fpg * cg + fs->fs_frag);
632 }
633 /*
634 * Find a cylinder with greater than average number of
635 * unused data blocks.
636 */
637 if (indx == 0 || bap[indx - 1] == 0)
638 startcg =
639 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
640 else
641 startcg = dtog(fs, bap[indx - 1]) + 1;
642 startcg %= fs->fs_ncg;
643 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
644 for (cg = startcg; cg < fs->fs_ncg; cg++)
645 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
646 fs->fs_cgrotor = cg;
647 return (fs->fs_fpg * cg + fs->fs_frag);
648 }
649 for (cg = 0; cg <= startcg; cg++)
650 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
651 fs->fs_cgrotor = cg;
652 return (fs->fs_fpg * cg + fs->fs_frag);
653 }
654 return (0);
655 }
656 /*
657 * One or more previous blocks have been laid out. If less
658 * than fs_maxcontig previous blocks are contiguous, the
659 * next block is requested contiguously, otherwise it is
660 * requested rotationally delayed by fs_rotdelay milliseconds.
661 */
662 nextblk = bap[indx - 1] + fs->fs_frag;
663 if (fs->fs_rotdelay == 0 || indx < fs->fs_maxcontig ||
664 bap[indx - fs->fs_maxcontig] +
665 blkstofrags(fs, fs->fs_maxcontig) != nextblk)
666 return (nextblk);
667 /*
668 * Here we convert ms of delay to frags as:
669 * (frags) = (ms) * (rev/sec) * (sect/rev) /
670 * ((sect/frag) * (ms/sec))
671 * then round up to the next block.
672 */
673 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
674 (NSPF(fs) * 1000), fs->fs_frag);
675 return (nextblk);
676 }
677
678 /*
679 * Implement the cylinder overflow algorithm.
680 *
681 * The policy implemented by this algorithm is:
682 * 1) allocate the block in its requested cylinder group.
683 * 2) quadradically rehash on the cylinder group number.
684 * 3) brute force search for a free block.
685 */
686 /*VARARGS5*/
687 static u_long
688 ffs_hashalloc(ip, cg, pref, size, allocator)
689 struct inode *ip;
690 int cg;
691 long pref;
692 int size; /* size for data blocks, mode for inodes */
693 allocfcn_t *allocator;
694 {
695 register struct fs *fs;
696 long result; /* XXX why not same type as we return? */
697 int i, icg = cg;
698
699 fs = ip->i_fs;
700 /*
701 * 1: preferred cylinder group
702 */
703 result = (*allocator)(ip, cg, pref, size);
704 if (result)
705 return (result);
706 /*
707 * 2: quadratic rehash
708 */
709 for (i = 1; i < fs->fs_ncg; i *= 2) {
710 cg += i;
711 if (cg >= fs->fs_ncg)
712 cg -= fs->fs_ncg;
713 result = (*allocator)(ip, cg, 0, size);
714 if (result)
715 return (result);
716 }
717 /*
718 * 3: brute force search
719 * Note that we start at i == 2, since 0 was checked initially,
720 * and 1 is always checked in the quadratic rehash.
721 */
722 cg = (icg + 2) % fs->fs_ncg;
723 for (i = 2; i < fs->fs_ncg; i++) {
724 result = (*allocator)(ip, cg, 0, size);
725 if (result)
726 return (result);
727 cg++;
728 if (cg == fs->fs_ncg)
729 cg = 0;
730 }
731 return (0);
732 }
733
734 /*
735 * Determine whether a fragment can be extended.
736 *
737 * Check to see if the necessary fragments are available, and
738 * if they are, allocate them.
739 */
740 static daddr_t
741 ffs_fragextend(ip, cg, bprev, osize, nsize)
742 struct inode *ip;
743 int cg;
744 long bprev;
745 int osize, nsize;
746 {
747 register struct fs *fs;
748 register struct cg *cgp;
749 struct buf *bp;
750 long bno;
751 int frags, bbase;
752 int i, error;
753
754 fs = ip->i_fs;
755 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
756 return (0);
757 frags = numfrags(fs, nsize);
758 bbase = fragnum(fs, bprev);
759 if (bbase > fragnum(fs, (bprev + frags - 1))) {
760 /* cannot extend across a block boundary */
761 return (0);
762 }
763 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
764 (int)fs->fs_cgsize, NOCRED, &bp);
765 if (error) {
766 brelse(bp);
767 return (0);
768 }
769 cgp = (struct cg *)bp->b_data;
770 if (!cg_chkmagic(cgp)) {
771 brelse(bp);
772 return (0);
773 }
774 cgp->cg_time = time.tv_sec;
775 bno = dtogd(fs, bprev);
776 for (i = numfrags(fs, osize); i < frags; i++)
777 if (isclr(cg_blksfree(cgp), bno + i)) {
778 brelse(bp);
779 return (0);
780 }
781 /*
782 * the current fragment can be extended
783 * deduct the count on fragment being extended into
784 * increase the count on the remaining fragment (if any)
785 * allocate the extended piece
786 */
787 for (i = frags; i < fs->fs_frag - bbase; i++)
788 if (isclr(cg_blksfree(cgp), bno + i))
789 break;
790 cgp->cg_frsum[i - numfrags(fs, osize)]--;
791 if (i != frags)
792 cgp->cg_frsum[i - frags]++;
793 for (i = numfrags(fs, osize); i < frags; i++) {
794 clrbit(cg_blksfree(cgp), bno + i);
795 cgp->cg_cs.cs_nffree--;
796 fs->fs_cstotal.cs_nffree--;
797 fs->fs_cs(fs, cg).cs_nffree--;
798 }
799 fs->fs_fmod = 1;
800 bdwrite(bp);
801 return (bprev);
802 }
803
804 /*
805 * Determine whether a block can be allocated.
806 *
807 * Check to see if a block of the appropriate size is available,
808 * and if it is, allocate it.
809 */
810 static daddr_t
811 ffs_alloccg(ip, cg, bpref, size)
812 struct inode *ip;
813 int cg;
814 daddr_t bpref;
815 int size;
816 {
817 register struct fs *fs;
818 register struct cg *cgp;
819 struct buf *bp;
820 register int i;
821 int error, bno, frags, allocsiz;
822
823 fs = ip->i_fs;
824 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
825 return (0);
826 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
827 (int)fs->fs_cgsize, NOCRED, &bp);
828 if (error) {
829 brelse(bp);
830 return (0);
831 }
832 cgp = (struct cg *)bp->b_data;
833 if (!cg_chkmagic(cgp) ||
834 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
835 brelse(bp);
836 return (0);
837 }
838 cgp->cg_time = time.tv_sec;
839 if (size == fs->fs_bsize) {
840 bno = ffs_alloccgblk(fs, cgp, bpref);
841 bdwrite(bp);
842 return (bno);
843 }
844 /*
845 * check to see if any fragments are already available
846 * allocsiz is the size which will be allocated, hacking
847 * it down to a smaller size if necessary
848 */
849 frags = numfrags(fs, size);
850 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
851 if (cgp->cg_frsum[allocsiz] != 0)
852 break;
853 if (allocsiz == fs->fs_frag) {
854 /*
855 * no fragments were available, so a block will be
856 * allocated, and hacked up
857 */
858 if (cgp->cg_cs.cs_nbfree == 0) {
859 brelse(bp);
860 return (0);
861 }
862 bno = ffs_alloccgblk(fs, cgp, bpref);
863 bpref = dtogd(fs, bno);
864 for (i = frags; i < fs->fs_frag; i++)
865 setbit(cg_blksfree(cgp), bpref + i);
866 i = fs->fs_frag - frags;
867 cgp->cg_cs.cs_nffree += i;
868 fs->fs_cstotal.cs_nffree += i;
869 fs->fs_cs(fs, cg).cs_nffree += i;
870 fs->fs_fmod = 1;
871 cgp->cg_frsum[i]++;
872 bdwrite(bp);
873 return (bno);
874 }
875 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
876 if (bno < 0) {
877 brelse(bp);
878 return (0);
879 }
880 for (i = 0; i < frags; i++)
881 clrbit(cg_blksfree(cgp), bno + i);
882 cgp->cg_cs.cs_nffree -= frags;
883 fs->fs_cstotal.cs_nffree -= frags;
884 fs->fs_cs(fs, cg).cs_nffree -= frags;
885 fs->fs_fmod = 1;
886 cgp->cg_frsum[allocsiz]--;
887 if (frags != allocsiz)
888 cgp->cg_frsum[allocsiz - frags]++;
889 bdwrite(bp);
890 return (cg * fs->fs_fpg + bno);
891 }
892
893 /*
894 * Allocate a block in a cylinder group.
895 *
896 * This algorithm implements the following policy:
897 * 1) allocate the requested block.
898 * 2) allocate a rotationally optimal block in the same cylinder.
899 * 3) allocate the next available block on the block rotor for the
900 * specified cylinder group.
901 * Note that this routine only allocates fs_bsize blocks; these
902 * blocks may be fragmented by the routine that allocates them.
903 */
904 static daddr_t
905 ffs_alloccgblk(fs, cgp, bpref)
906 register struct fs *fs;
907 register struct cg *cgp;
908 daddr_t bpref;
909 {
910 daddr_t bno, blkno;
911 int cylno, pos, delta;
912 short *cylbp;
913 register int i;
914
915 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) {
916 bpref = cgp->cg_rotor;
917 goto norot;
918 }
919 bpref = blknum(fs, bpref);
920 bpref = dtogd(fs, bpref);
921 /*
922 * if the requested block is available, use it
923 */
924 if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) {
925 bno = bpref;
926 goto gotit;
927 }
928 if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) {
929 /*
930 * Block layout information is not available.
931 * Leaving bpref unchanged means we take the
932 * next available free block following the one
933 * we just allocated. Hopefully this will at
934 * least hit a track cache on drives of unknown
935 * geometry (e.g. SCSI).
936 */
937 goto norot;
938 }
939 /*
940 * check for a block available on the same cylinder
941 */
942 cylno = cbtocylno(fs, bpref);
943 if (cg_blktot(cgp)[cylno] == 0)
944 goto norot;
945 /*
946 * check the summary information to see if a block is
947 * available in the requested cylinder starting at the
948 * requested rotational position and proceeding around.
949 */
950 cylbp = cg_blks(fs, cgp, cylno);
951 pos = cbtorpos(fs, bpref);
952 for (i = pos; i < fs->fs_nrpos; i++)
953 if (cylbp[i] > 0)
954 break;
955 if (i == fs->fs_nrpos)
956 for (i = 0; i < pos; i++)
957 if (cylbp[i] > 0)
958 break;
959 if (cylbp[i] > 0) {
960 /*
961 * found a rotational position, now find the actual
962 * block. A panic if none is actually there.
963 */
964 pos = cylno % fs->fs_cpc;
965 bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
966 if (fs_postbl(fs, pos)[i] == -1) {
967 printf("pos = %d, i = %d, fs = %s\n",
968 pos, i, fs->fs_fsmnt);
969 panic("ffs_alloccgblk: cyl groups corrupted");
970 }
971 for (i = fs_postbl(fs, pos)[i];; ) {
972 if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) {
973 bno = blkstofrags(fs, (bno + i));
974 goto gotit;
975 }
976 delta = fs_rotbl(fs)[i];
977 if (delta <= 0 ||
978 delta + i > fragstoblks(fs, fs->fs_fpg))
979 break;
980 i += delta;
981 }
982 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
983 panic("ffs_alloccgblk: can't find blk in cyl");
984 }
985 norot:
986 /*
987 * no blocks in the requested cylinder, so take next
988 * available one in this cylinder group.
989 */
990 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
991 if (bno < 0)
992 return (0);
993 cgp->cg_rotor = bno;
994 gotit:
995 blkno = fragstoblks(fs, bno);
996 ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno);
997 ffs_clusteracct(fs, cgp, blkno, -1);
998 cgp->cg_cs.cs_nbfree--;
999 fs->fs_cstotal.cs_nbfree--;
1000 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
1001 cylno = cbtocylno(fs, bno);
1002 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--;
1003 cg_blktot(cgp)[cylno]--;
1004 fs->fs_fmod = 1;
1005 return (cgp->cg_cgx * fs->fs_fpg + bno);
1006 }
1007
1008 #ifdef notyet
1009 /*
1010 * Determine whether a cluster can be allocated.
1011 *
1012 * We do not currently check for optimal rotational layout if there
1013 * are multiple choices in the same cylinder group. Instead we just
1014 * take the first one that we find following bpref.
1015 */
1016 static daddr_t
1017 ffs_clusteralloc(ip, cg, bpref, len)
1018 struct inode *ip;
1019 int cg;
1020 daddr_t bpref;
1021 int len;
1022 {
1023 register struct fs *fs;
1024 register struct cg *cgp;
1025 struct buf *bp;
1026 int i, run, bno, bit, map;
1027 u_char *mapp;
1028
1029 fs = ip->i_fs;
1030 if (fs->fs_cs(fs, cg).cs_nbfree < len)
1031 return (NULL);
1032 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize,
1033 NOCRED, &bp))
1034 goto fail;
1035 cgp = (struct cg *)bp->b_data;
1036 if (!cg_chkmagic(cgp))
1037 goto fail;
1038 /*
1039 * Check to see if a cluster of the needed size (or bigger) is
1040 * available in this cylinder group.
1041 */
1042 for (i = len; i <= fs->fs_contigsumsize; i++)
1043 if (cg_clustersum(cgp)[i] > 0)
1044 break;
1045 if (i > fs->fs_contigsumsize)
1046 goto fail;
1047 /*
1048 * Search the cluster map to find a big enough cluster.
1049 * We take the first one that we find, even if it is larger
1050 * than we need as we prefer to get one close to the previous
1051 * block allocation. We do not search before the current
1052 * preference point as we do not want to allocate a block
1053 * that is allocated before the previous one (as we will
1054 * then have to wait for another pass of the elevator
1055 * algorithm before it will be read). We prefer to fail and
1056 * be recalled to try an allocation in the next cylinder group.
1057 */
1058 if (dtog(fs, bpref) != cg)
1059 bpref = 0;
1060 else
1061 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref)));
1062 mapp = &cg_clustersfree(cgp)[bpref / NBBY];
1063 map = *mapp++;
1064 bit = 1 << (bpref % NBBY);
1065 for (run = 0, i = bpref; i < cgp->cg_nclusterblks; i++) {
1066 if ((map & bit) == 0) {
1067 run = 0;
1068 } else {
1069 run++;
1070 if (run == len)
1071 break;
1072 }
1073 if ((i & (NBBY - 1)) != (NBBY - 1)) {
1074 bit <<= 1;
1075 } else {
1076 map = *mapp++;
1077 bit = 1;
1078 }
1079 }
1080 if (i == cgp->cg_nclusterblks)
1081 goto fail;
1082 /*
1083 * Allocate the cluster that we have found.
1084 */
1085 bno = cg * fs->fs_fpg + blkstofrags(fs, i - run + 1);
1086 len = blkstofrags(fs, len);
1087 for (i = 0; i < len; i += fs->fs_frag)
1088 if (ffs_alloccgblk(fs, cgp, bno + i) != bno + i)
1089 panic("ffs_clusteralloc: lost block");
1090 bdwrite(bp);
1091 return (bno);
1092
1093 fail:
1094 brelse(bp);
1095 return (0);
1096 }
1097 #endif
1098
1099 /*
1100 * Determine whether an inode can be allocated.
1101 *
1102 * Check to see if an inode is available, and if it is,
1103 * allocate it using the following policy:
1104 * 1) allocate the requested inode.
1105 * 2) allocate the next available inode after the requested
1106 * inode in the specified cylinder group.
1107 */
1108 static ino_t
1109 ffs_nodealloccg(ip, cg, ipref, mode)
1110 struct inode *ip;
1111 int cg;
1112 daddr_t ipref;
1113 int mode;
1114 {
1115 register struct fs *fs;
1116 register struct cg *cgp;
1117 struct buf *bp;
1118 int error, start, len, loc, map, i;
1119
1120 fs = ip->i_fs;
1121 if (fs->fs_cs(fs, cg).cs_nifree == 0)
1122 return (0);
1123 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1124 (int)fs->fs_cgsize, NOCRED, &bp);
1125 if (error) {
1126 brelse(bp);
1127 return (0);
1128 }
1129 cgp = (struct cg *)bp->b_data;
1130 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) {
1131 brelse(bp);
1132 return (0);
1133 }
1134 cgp->cg_time = time.tv_sec;
1135 if (ipref) {
1136 ipref %= fs->fs_ipg;
1137 if (isclr(cg_inosused(cgp), ipref))
1138 goto gotit;
1139 }
1140 start = cgp->cg_irotor / NBBY;
1141 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
1142 loc = skpc(0xff, len, &cg_inosused(cgp)[start]);
1143 if (loc == 0) {
1144 len = start + 1;
1145 start = 0;
1146 loc = skpc(0xff, len, &cg_inosused(cgp)[0]);
1147 if (loc == 0) {
1148 printf("cg = %d, irotor = %ld, fs = %s\n",
1149 cg, cgp->cg_irotor, fs->fs_fsmnt);
1150 panic("ffs_nodealloccg: map corrupted");
1151 /* NOTREACHED */
1152 }
1153 }
1154 i = start + len - loc;
1155 map = cg_inosused(cgp)[i];
1156 ipref = i * NBBY;
1157 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
1158 if ((map & i) == 0) {
1159 cgp->cg_irotor = ipref;
1160 goto gotit;
1161 }
1162 }
1163 printf("fs = %s\n", fs->fs_fsmnt);
1164 panic("ffs_nodealloccg: block not in map");
1165 /* NOTREACHED */
1166 gotit:
1167 setbit(cg_inosused(cgp), ipref);
1168 cgp->cg_cs.cs_nifree--;
1169 fs->fs_cstotal.cs_nifree--;
1170 fs->fs_cs(fs, cg).cs_nifree--;
1171 fs->fs_fmod = 1;
1172 if ((mode & IFMT) == IFDIR) {
1173 cgp->cg_cs.cs_ndir++;
1174 fs->fs_cstotal.cs_ndir++;
1175 fs->fs_cs(fs, cg).cs_ndir++;
1176 }
1177 bdwrite(bp);
1178 return (cg * fs->fs_ipg + ipref);
1179 }
1180
1181 /*
1182 * Free a block or fragment.
1183 *
1184 * The specified block or fragment is placed back in the
1185 * free map. If a fragment is deallocated, a possible
1186 * block reassembly is checked.
1187 */
1188 void
1189 ffs_blkfree(ip, bno, size)
1190 register struct inode *ip;
1191 daddr_t bno;
1192 long size;
1193 {
1194 register struct fs *fs;
1195 register struct cg *cgp;
1196 struct buf *bp;
1197 daddr_t blkno;
1198 int i, error, cg, blk, frags, bbase;
1199
1200 fs = ip->i_fs;
1201 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
1202 printf("dev = 0x%lx, bsize = %ld, size = %ld, fs = %s\n",
1203 (u_long)ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
1204 panic("blkfree: bad size");
1205 }
1206 cg = dtog(fs, bno);
1207 if ((u_int)bno >= fs->fs_size) {
1208 printf("bad block %ld, ino %ld\n", bno, ip->i_number);
1209 ffs_fserr(fs, ip->i_uid, "bad block");
1210 return;
1211 }
1212 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1213 (int)fs->fs_cgsize, NOCRED, &bp);
1214 if (error) {
1215 brelse(bp);
1216 return;
1217 }
1218 cgp = (struct cg *)bp->b_data;
1219 if (!cg_chkmagic(cgp)) {
1220 brelse(bp);
1221 return;
1222 }
1223 cgp->cg_time = time.tv_sec;
1224 bno = dtogd(fs, bno);
1225 if (size == fs->fs_bsize) {
1226 blkno = fragstoblks(fs, bno);
1227 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) {
1228 printf("dev = 0x%lx, block = %ld, fs = %s\n",
1229 (u_long) ip->i_dev, bno, fs->fs_fsmnt);
1230 panic("blkfree: freeing free block");
1231 }
1232 ffs_setblock(fs, cg_blksfree(cgp), blkno);
1233 ffs_clusteracct(fs, cgp, blkno, 1);
1234 cgp->cg_cs.cs_nbfree++;
1235 fs->fs_cstotal.cs_nbfree++;
1236 fs->fs_cs(fs, cg).cs_nbfree++;
1237 i = cbtocylno(fs, bno);
1238 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++;
1239 cg_blktot(cgp)[i]++;
1240 } else {
1241 bbase = bno - fragnum(fs, bno);
1242 /*
1243 * decrement the counts associated with the old frags
1244 */
1245 blk = blkmap(fs, cg_blksfree(cgp), bbase);
1246 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
1247 /*
1248 * deallocate the fragment
1249 */
1250 frags = numfrags(fs, size);
1251 for (i = 0; i < frags; i++) {
1252 if (isset(cg_blksfree(cgp), bno + i)) {
1253 printf("dev = 0x%lx, block = %ld, fs = %s\n",
1254 (u_long) ip->i_dev, bno + i, fs->fs_fsmnt);
1255 panic("blkfree: freeing free frag");
1256 }
1257 setbit(cg_blksfree(cgp), bno + i);
1258 }
1259 cgp->cg_cs.cs_nffree += i;
1260 fs->fs_cstotal.cs_nffree += i;
1261 fs->fs_cs(fs, cg).cs_nffree += i;
1262 /*
1263 * add back in counts associated with the new frags
1264 */
1265 blk = blkmap(fs, cg_blksfree(cgp), bbase);
1266 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
1267 /*
1268 * if a complete block has been reassembled, account for it
1269 */
1270 blkno = fragstoblks(fs, bbase);
1271 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) {
1272 cgp->cg_cs.cs_nffree -= fs->fs_frag;
1273 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
1274 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
1275 ffs_clusteracct(fs, cgp, blkno, 1);
1276 cgp->cg_cs.cs_nbfree++;
1277 fs->fs_cstotal.cs_nbfree++;
1278 fs->fs_cs(fs, cg).cs_nbfree++;
1279 i = cbtocylno(fs, bbase);
1280 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++;
1281 cg_blktot(cgp)[i]++;
1282 }
1283 }
1284 fs->fs_fmod = 1;
1285 bdwrite(bp);
1286 }
1287
1288 /*
1289 * Free an inode.
1290 *
1291 * The specified inode is placed back in the free map.
1292 */
1293 int
1294 ffs_vfree(ap)
1295 struct vop_vfree_args /* {
1296 struct vnode *a_pvp;
1297 ino_t a_ino;
1298 int a_mode;
1299 } */ *ap;
1300 {
1301 register struct fs *fs;
1302 register struct cg *cgp;
1303 register struct inode *pip;
1304 ino_t ino = ap->a_ino;
1305 struct buf *bp;
1306 int error, cg;
1307
1308 pip = VTOI(ap->a_pvp);
1309 fs = pip->i_fs;
1310 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg)
1311 panic("ifree: range: dev = 0x%x, ino = %d, fs = %s",
1312 pip->i_dev, ino, fs->fs_fsmnt);
1313 cg = ino_to_cg(fs, ino);
1314 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)),
1315 (int)fs->fs_cgsize, NOCRED, &bp);
1316 if (error) {
1317 brelse(bp);
1318 return (0);
1319 }
1320 cgp = (struct cg *)bp->b_data;
1321 if (!cg_chkmagic(cgp)) {
1322 brelse(bp);
1323 return (0);
1324 }
1325 cgp->cg_time = time.tv_sec;
1326 ino %= fs->fs_ipg;
1327 if (isclr(cg_inosused(cgp), ino)) {
1328 printf("dev = 0x%lx, ino = %ld, fs = %s\n",
1329 (u_long)pip->i_dev, ino, fs->fs_fsmnt);
1330 if (fs->fs_ronly == 0)
1331 panic("ifree: freeing free inode");
1332 }
1333 clrbit(cg_inosused(cgp), ino);
1334 if (ino < cgp->cg_irotor)
1335 cgp->cg_irotor = ino;
1336 cgp->cg_cs.cs_nifree++;
1337 fs->fs_cstotal.cs_nifree++;
1338 fs->fs_cs(fs, cg).cs_nifree++;
1339 if ((ap->a_mode & IFMT) == IFDIR) {
1340 cgp->cg_cs.cs_ndir--;
1341 fs->fs_cstotal.cs_ndir--;
1342 fs->fs_cs(fs, cg).cs_ndir--;
1343 }
1344 fs->fs_fmod = 1;
1345 bdwrite(bp);
1346 return (0);
1347 }
1348
1349 /*
1350 * Find a block of the specified size in the specified cylinder group.
1351 *
1352 * It is a panic if a request is made to find a block if none are
1353 * available.
1354 */
1355 static daddr_t
1356 ffs_mapsearch(fs, cgp, bpref, allocsiz)
1357 register struct fs *fs;
1358 register struct cg *cgp;
1359 daddr_t bpref;
1360 int allocsiz;
1361 {
1362 daddr_t bno;
1363 int start, len, loc, i;
1364 int blk, field, subfield, pos;
1365
1366 /*
1367 * find the fragment by searching through the free block
1368 * map for an appropriate bit pattern
1369 */
1370 if (bpref)
1371 start = dtogd(fs, bpref) / NBBY;
1372 else
1373 start = cgp->cg_frotor / NBBY;
1374 len = howmany(fs->fs_fpg, NBBY) - start;
1375 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start],
1376 (u_char *)fragtbl[fs->fs_frag],
1377 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
1378 if (loc == 0) {
1379 len = start + 1;
1380 start = 0;
1381 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0],
1382 (u_char *)fragtbl[fs->fs_frag],
1383 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
1384 if (loc == 0) {
1385 printf("start = %d, len = %d, fs = %s\n",
1386 start, len, fs->fs_fsmnt);
1387 panic("ffs_alloccg: map corrupted");
1388 /* NOTREACHED */
1389 }
1390 }
1391 bno = (start + len - loc) * NBBY;
1392 cgp->cg_frotor = bno;
1393 /*
1394 * found the byte in the map
1395 * sift through the bits to find the selected frag
1396 */
1397 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
1398 blk = blkmap(fs, cg_blksfree(cgp), bno);
1399 blk <<= 1;
1400 field = around[allocsiz];
1401 subfield = inside[allocsiz];
1402 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
1403 if ((blk & field) == subfield)
1404 return (bno + pos);
1405 field <<= 1;
1406 subfield <<= 1;
1407 }
1408 }
1409 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
1410 panic("ffs_alloccg: block not in map");
1411 return (-1);
1412 }
1413
1414 /*
1415 * Update the cluster map because of an allocation or free.
1416 *
1417 * Cnt == 1 means free; cnt == -1 means allocating.
1418 */
1419 static void
1420 ffs_clusteracct(fs, cgp, blkno, cnt)
1421 struct fs *fs;
1422 struct cg *cgp;
1423 daddr_t blkno;
1424 int cnt;
1425 {
1426 long *sump;
1427 u_char *freemapp, *mapp;
1428 int i, start, end, forw, back, map, bit;
1429
1430 if (fs->fs_contigsumsize <= 0)
1431 return;
1432 freemapp = cg_clustersfree(cgp);
1433 sump = cg_clustersum(cgp);
1434 /*
1435 * Allocate or clear the actual block.
1436 */
1437 if (cnt > 0)
1438 setbit(freemapp, blkno);
1439 else
1440 clrbit(freemapp, blkno);
1441 /*
1442 * Find the size of the cluster going forward.
1443 */
1444 start = blkno + 1;
1445 end = start + fs->fs_contigsumsize;
1446 if (end >= cgp->cg_nclusterblks)
1447 end = cgp->cg_nclusterblks;
1448 mapp = &freemapp[start / NBBY];
1449 map = *mapp++;
1450 bit = 1 << (start % NBBY);
1451 for (i = start; i < end; i++) {
1452 if ((map & bit) == 0)
1453 break;
1454 if ((i & (NBBY - 1)) != (NBBY - 1)) {
1455 bit <<= 1;
1456 } else {
1457 map = *mapp++;
1458 bit = 1;
1459 }
1460 }
1461 forw = i - start;
1462 /*
1463 * Find the size of the cluster going backward.
1464 */
1465 start = blkno - 1;
1466 end = start - fs->fs_contigsumsize;
1467 if (end < 0)
1468 end = -1;
1469 mapp = &freemapp[start / NBBY];
1470 map = *mapp--;
1471 bit = 1 << (start % NBBY);
1472 for (i = start; i > end; i--) {
1473 if ((map & bit) == 0)
1474 break;
1475 if ((i & (NBBY - 1)) != 0) {
1476 bit >>= 1;
1477 } else {
1478 map = *mapp--;
1479 bit = 1 << (NBBY - 1);
1480 }
1481 }
1482 back = start - i;
1483 /*
1484 * Account for old cluster and the possibly new forward and
1485 * back clusters.
1486 */
1487 i = back + forw + 1;
1488 if (i > fs->fs_contigsumsize)
1489 i = fs->fs_contigsumsize;
1490 sump[i] += cnt;
1491 if (back > 0)
1492 sump[back] -= cnt;
1493 if (forw > 0)
1494 sump[forw] -= cnt;
1495 }
1496
1497 /*
1498 * Fserr prints the name of a file system with an error diagnostic.
1499 *
1500 * The form of the error message is:
1501 * fs: error message
1502 */
1503 static void
1504 ffs_fserr(fs, uid, cp)
1505 struct fs *fs;
1506 u_int uid;
1507 char *cp;
1508 {
1509 struct proc *p = curproc; /* XXX */
1510
1511 log(LOG_ERR, "pid %d (%s), uid %d on %s: %s\n", p ? p->p_pid : -1,
1512 p ? p->p_comm : "-", uid, fs->fs_fsmnt, cp);
1513 }
Cache object: 328763d209ec1c0e5f6f698ec8a17207
|