1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright 1998, 2000 Marshall Kirk McKusick.
5 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org>
6 * All rights reserved.
7 *
8 * The soft updates code is derived from the appendix of a University
9 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
10 * "Soft Updates: A Solution to the Metadata Update Problem in File
11 * Systems", CSE-TR-254-95, August 1995).
12 *
13 * Further information about soft updates can be obtained from:
14 *
15 * Marshall Kirk McKusick http://www.mckusick.com/softdep/
16 * 1614 Oxford Street mckusick@mckusick.com
17 * Berkeley, CA 94709-1608 +1-510-843-9542
18 * USA
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * 1. Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * 2. Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in the
28 * documentation and/or other materials provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
31 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
33 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
36 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
37 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00
42 */
43
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include "opt_ffs.h"
48 #include "opt_quota.h"
49 #include "opt_ddb.h"
50
51 #include <sys/param.h>
52 #include <sys/kernel.h>
53 #include <sys/systm.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/kdb.h>
57 #include <sys/kthread.h>
58 #include <sys/ktr.h>
59 #include <sys/limits.h>
60 #include <sys/lock.h>
61 #include <sys/malloc.h>
62 #include <sys/mount.h>
63 #include <sys/mutex.h>
64 #include <sys/namei.h>
65 #include <sys/priv.h>
66 #include <sys/proc.h>
67 #include <sys/racct.h>
68 #include <sys/rwlock.h>
69 #include <sys/stat.h>
70 #include <sys/sysctl.h>
71 #include <sys/syslog.h>
72 #include <sys/vnode.h>
73 #include <sys/conf.h>
74
75 #include <ufs/ufs/dir.h>
76 #include <ufs/ufs/extattr.h>
77 #include <ufs/ufs/quota.h>
78 #include <ufs/ufs/inode.h>
79 #include <ufs/ufs/ufsmount.h>
80 #include <ufs/ffs/fs.h>
81 #include <ufs/ffs/softdep.h>
82 #include <ufs/ffs/ffs_extern.h>
83 #include <ufs/ufs/ufs_extern.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_object.h>
88
89 #include <geom/geom.h>
90 #include <geom/geom_vfs.h>
91
92 #include <ddb/ddb.h>
93
94 #define KTR_SUJ 0 /* Define to KTR_SPARE. */
95
96 #ifndef SOFTUPDATES
97
98 int
99 softdep_flushfiles(struct mount *oldmnt,
100 int flags,
101 struct thread *td)
102 {
103
104 panic("softdep_flushfiles called");
105 }
106
107 int
108 softdep_mount(struct vnode *devvp,
109 struct mount *mp,
110 struct fs *fs,
111 struct ucred *cred)
112 {
113
114 return (0);
115 }
116
117 void
118 softdep_initialize(void)
119 {
120
121 return;
122 }
123
124 void
125 softdep_uninitialize(void)
126 {
127
128 return;
129 }
130
131 void
132 softdep_unmount(struct mount *mp)
133 {
134
135 panic("softdep_unmount called");
136 }
137
138 void
139 softdep_setup_sbupdate(struct ufsmount *ump,
140 struct fs *fs,
141 struct buf *bp)
142 {
143
144 panic("softdep_setup_sbupdate called");
145 }
146
147 void
148 softdep_setup_inomapdep(struct buf *bp,
149 struct inode *ip,
150 ino_t newinum,
151 int mode)
152 {
153
154 panic("softdep_setup_inomapdep called");
155 }
156
157 void
158 softdep_setup_blkmapdep(struct buf *bp,
159 struct mount *mp,
160 ufs2_daddr_t newblkno,
161 int frags,
162 int oldfrags)
163 {
164
165 panic("softdep_setup_blkmapdep called");
166 }
167
168 void
169 softdep_setup_allocdirect(struct inode *ip,
170 ufs_lbn_t lbn,
171 ufs2_daddr_t newblkno,
172 ufs2_daddr_t oldblkno,
173 long newsize,
174 long oldsize,
175 struct buf *bp)
176 {
177
178 panic("softdep_setup_allocdirect called");
179 }
180
181 void
182 softdep_setup_allocext(struct inode *ip,
183 ufs_lbn_t lbn,
184 ufs2_daddr_t newblkno,
185 ufs2_daddr_t oldblkno,
186 long newsize,
187 long oldsize,
188 struct buf *bp)
189 {
190
191 panic("softdep_setup_allocext called");
192 }
193
194 void
195 softdep_setup_allocindir_page(struct inode *ip,
196 ufs_lbn_t lbn,
197 struct buf *bp,
198 int ptrno,
199 ufs2_daddr_t newblkno,
200 ufs2_daddr_t oldblkno,
201 struct buf *nbp)
202 {
203
204 panic("softdep_setup_allocindir_page called");
205 }
206
207 void
208 softdep_setup_allocindir_meta(struct buf *nbp,
209 struct inode *ip,
210 struct buf *bp,
211 int ptrno,
212 ufs2_daddr_t newblkno)
213 {
214
215 panic("softdep_setup_allocindir_meta called");
216 }
217
218 void
219 softdep_journal_freeblocks(struct inode *ip,
220 struct ucred *cred,
221 off_t length,
222 int flags)
223 {
224
225 panic("softdep_journal_freeblocks called");
226 }
227
228 void
229 softdep_journal_fsync(struct inode *ip)
230 {
231
232 panic("softdep_journal_fsync called");
233 }
234
235 void
236 softdep_setup_freeblocks(struct inode *ip,
237 off_t length,
238 int flags)
239 {
240
241 panic("softdep_setup_freeblocks called");
242 }
243
244 void
245 softdep_freefile(struct vnode *pvp,
246 ino_t ino,
247 int mode)
248 {
249
250 panic("softdep_freefile called");
251 }
252
253 int
254 softdep_setup_directory_add(struct buf *bp,
255 struct inode *dp,
256 off_t diroffset,
257 ino_t newinum,
258 struct buf *newdirbp,
259 int isnewblk)
260 {
261
262 panic("softdep_setup_directory_add called");
263 }
264
265 void
266 softdep_change_directoryentry_offset(struct buf *bp,
267 struct inode *dp,
268 caddr_t base,
269 caddr_t oldloc,
270 caddr_t newloc,
271 int entrysize)
272 {
273
274 panic("softdep_change_directoryentry_offset called");
275 }
276
277 void
278 softdep_setup_remove(struct buf *bp,
279 struct inode *dp,
280 struct inode *ip,
281 int isrmdir)
282 {
283
284 panic("softdep_setup_remove called");
285 }
286
287 void
288 softdep_setup_directory_change(struct buf *bp,
289 struct inode *dp,
290 struct inode *ip,
291 ino_t newinum,
292 int isrmdir)
293 {
294
295 panic("softdep_setup_directory_change called");
296 }
297
298 void
299 softdep_setup_blkfree(struct mount *mp,
300 struct buf *bp,
301 ufs2_daddr_t blkno,
302 int frags,
303 struct workhead *wkhd)
304 {
305
306 panic("%s called", __FUNCTION__);
307 }
308
309 void
310 softdep_setup_inofree(struct mount *mp,
311 struct buf *bp,
312 ino_t ino,
313 struct workhead *wkhd)
314 {
315
316 panic("%s called", __FUNCTION__);
317 }
318
319 void
320 softdep_setup_unlink(struct inode *dp, struct inode *ip)
321 {
322
323 panic("%s called", __FUNCTION__);
324 }
325
326 void
327 softdep_setup_link(struct inode *dp, struct inode *ip)
328 {
329
330 panic("%s called", __FUNCTION__);
331 }
332
333 void
334 softdep_revert_link(struct inode *dp, struct inode *ip)
335 {
336
337 panic("%s called", __FUNCTION__);
338 }
339
340 void
341 softdep_setup_rmdir(struct inode *dp, struct inode *ip)
342 {
343
344 panic("%s called", __FUNCTION__);
345 }
346
347 void
348 softdep_revert_rmdir(struct inode *dp, struct inode *ip)
349 {
350
351 panic("%s called", __FUNCTION__);
352 }
353
354 void
355 softdep_setup_create(struct inode *dp, struct inode *ip)
356 {
357
358 panic("%s called", __FUNCTION__);
359 }
360
361 void
362 softdep_revert_create(struct inode *dp, struct inode *ip)
363 {
364
365 panic("%s called", __FUNCTION__);
366 }
367
368 void
369 softdep_setup_mkdir(struct inode *dp, struct inode *ip)
370 {
371
372 panic("%s called", __FUNCTION__);
373 }
374
375 void
376 softdep_revert_mkdir(struct inode *dp, struct inode *ip)
377 {
378
379 panic("%s called", __FUNCTION__);
380 }
381
382 void
383 softdep_setup_dotdot_link(struct inode *dp, struct inode *ip)
384 {
385
386 panic("%s called", __FUNCTION__);
387 }
388
389 int
390 softdep_prealloc(struct vnode *vp, int waitok)
391 {
392
393 panic("%s called", __FUNCTION__);
394 }
395
396 int
397 softdep_journal_lookup(struct mount *mp, struct vnode **vpp)
398 {
399
400 return (ENOENT);
401 }
402
403 void
404 softdep_change_linkcnt(struct inode *ip)
405 {
406
407 panic("softdep_change_linkcnt called");
408 }
409
410 void
411 softdep_load_inodeblock(struct inode *ip)
412 {
413
414 panic("softdep_load_inodeblock called");
415 }
416
417 void
418 softdep_update_inodeblock(struct inode *ip,
419 struct buf *bp,
420 int waitfor)
421 {
422
423 panic("softdep_update_inodeblock called");
424 }
425
426 int
427 softdep_fsync(struct vnode *vp) /* the "in_core" copy of the inode */
428 {
429
430 return (0);
431 }
432
433 void
434 softdep_fsync_mountdev(struct vnode *vp)
435 {
436
437 return;
438 }
439
440 int
441 softdep_flushworklist(struct mount *oldmnt,
442 int *countp,
443 struct thread *td)
444 {
445
446 *countp = 0;
447 return (0);
448 }
449
450 int
451 softdep_sync_metadata(struct vnode *vp)
452 {
453
454 panic("softdep_sync_metadata called");
455 }
456
457 int
458 softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
459 {
460
461 panic("softdep_sync_buf called");
462 }
463
464 int
465 softdep_slowdown(struct vnode *vp)
466 {
467
468 panic("softdep_slowdown called");
469 }
470
471 int
472 softdep_request_cleanup(struct fs *fs,
473 struct vnode *vp,
474 struct ucred *cred,
475 int resource)
476 {
477
478 return (0);
479 }
480
481 int
482 softdep_check_suspend(struct mount *mp,
483 struct vnode *devvp,
484 int softdep_depcnt,
485 int softdep_accdepcnt,
486 int secondary_writes,
487 int secondary_accwrites)
488 {
489 struct bufobj *bo;
490 int error;
491
492 (void) softdep_depcnt,
493 (void) softdep_accdepcnt;
494
495 bo = &devvp->v_bufobj;
496 ASSERT_BO_WLOCKED(bo);
497
498 MNT_ILOCK(mp);
499 while (mp->mnt_secondary_writes != 0) {
500 BO_UNLOCK(bo);
501 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
502 (PUSER - 1) | PDROP, "secwr", 0);
503 BO_LOCK(bo);
504 MNT_ILOCK(mp);
505 }
506
507 /*
508 * Reasons for needing more work before suspend:
509 * - Dirty buffers on devvp.
510 * - Secondary writes occurred after start of vnode sync loop
511 */
512 error = 0;
513 if (bo->bo_numoutput > 0 ||
514 bo->bo_dirty.bv_cnt > 0 ||
515 secondary_writes != 0 ||
516 mp->mnt_secondary_writes != 0 ||
517 secondary_accwrites != mp->mnt_secondary_accwrites)
518 error = EAGAIN;
519 BO_UNLOCK(bo);
520 return (error);
521 }
522
523 void
524 softdep_get_depcounts(struct mount *mp,
525 int *softdepactivep,
526 int *softdepactiveaccp)
527 {
528 (void) mp;
529 *softdepactivep = 0;
530 *softdepactiveaccp = 0;
531 }
532
533 void
534 softdep_buf_append(struct buf *bp, struct workhead *wkhd)
535 {
536
537 panic("softdep_buf_appendwork called");
538 }
539
540 void
541 softdep_inode_append(struct inode *ip,
542 struct ucred *cred,
543 struct workhead *wkhd)
544 {
545
546 panic("softdep_inode_appendwork called");
547 }
548
549 void
550 softdep_freework(struct workhead *wkhd)
551 {
552
553 panic("softdep_freework called");
554 }
555
556 int
557 softdep_prerename(struct vnode *fdvp,
558 struct vnode *fvp,
559 struct vnode *tdvp,
560 struct vnode *tvp)
561 {
562
563 panic("softdep_prerename called");
564 }
565
566 int
567 softdep_prelink(struct vnode *dvp,
568 struct vnode *vp,
569 struct componentname *cnp)
570 {
571
572 panic("softdep_prelink called");
573 }
574
575 #else
576
577 FEATURE(softupdates, "FFS soft-updates support");
578
579 static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
580 "soft updates stats");
581 static SYSCTL_NODE(_debug_softdep, OID_AUTO, total,
582 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
583 "total dependencies allocated");
584 static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse,
585 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
586 "high use dependencies allocated");
587 static SYSCTL_NODE(_debug_softdep, OID_AUTO, current,
588 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
589 "current dependencies allocated");
590 static SYSCTL_NODE(_debug_softdep, OID_AUTO, write,
591 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
592 "current dependencies written");
593
594 unsigned long dep_current[D_LAST + 1];
595 unsigned long dep_highuse[D_LAST + 1];
596 unsigned long dep_total[D_LAST + 1];
597 unsigned long dep_write[D_LAST + 1];
598
599 #define SOFTDEP_TYPE(type, str, long) \
600 static MALLOC_DEFINE(M_ ## type, #str, long); \
601 SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD, \
602 &dep_total[D_ ## type], 0, ""); \
603 SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, \
604 &dep_current[D_ ## type], 0, ""); \
605 SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, \
606 &dep_highuse[D_ ## type], 0, ""); \
607 SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, \
608 &dep_write[D_ ## type], 0, "");
609
610 SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies");
611 SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies");
612 SOFTDEP_TYPE(BMSAFEMAP, bmsafemap,
613 "Block or frag allocated from cyl group map");
614 SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency");
615 SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode");
616 SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies");
617 SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block");
618 SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode");
619 SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode");
620 SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated");
621 SOFTDEP_TYPE(DIRADD, diradd, "New directory entry");
622 SOFTDEP_TYPE(MKDIR, mkdir, "New directory");
623 SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted");
624 SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block");
625 SOFTDEP_TYPE(FREEWORK, freework, "free an inode block");
626 SOFTDEP_TYPE(FREEDEP, freedep, "track a block free");
627 SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add");
628 SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove");
629 SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move");
630 SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block");
631 SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block");
632 SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag");
633 SOFTDEP_TYPE(JSEG, jseg, "Journal segment");
634 SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete");
635 SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency");
636 SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation");
637 SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete");
638
639 static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel");
640
641 static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes");
642 static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations");
643 static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data");
644
645 #define M_SOFTDEP_FLAGS (M_WAITOK)
646
647 /*
648 * translate from workitem type to memory type
649 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
650 */
651 static struct malloc_type *memtype[] = {
652 NULL,
653 M_PAGEDEP,
654 M_INODEDEP,
655 M_BMSAFEMAP,
656 M_NEWBLK,
657 M_ALLOCDIRECT,
658 M_INDIRDEP,
659 M_ALLOCINDIR,
660 M_FREEFRAG,
661 M_FREEBLKS,
662 M_FREEFILE,
663 M_DIRADD,
664 M_MKDIR,
665 M_DIRREM,
666 M_NEWDIRBLK,
667 M_FREEWORK,
668 M_FREEDEP,
669 M_JADDREF,
670 M_JREMREF,
671 M_JMVREF,
672 M_JNEWBLK,
673 M_JFREEBLK,
674 M_JFREEFRAG,
675 M_JSEG,
676 M_JSEGDEP,
677 M_SBDEP,
678 M_JTRUNC,
679 M_JFSYNC,
680 M_SENTINEL
681 };
682
683 #define DtoM(type) (memtype[type])
684
685 /*
686 * Names of malloc types.
687 */
688 #define TYPENAME(type) \
689 ((unsigned)(type) <= D_LAST && (unsigned)(type) >= D_FIRST ? \
690 memtype[type]->ks_shortdesc : "???")
691 /*
692 * End system adaptation definitions.
693 */
694
695 #define DOTDOT_OFFSET offsetof(struct dirtemplate, dotdot_ino)
696 #define DOT_OFFSET offsetof(struct dirtemplate, dot_ino)
697
698 /*
699 * Internal function prototypes.
700 */
701 static void check_clear_deps(struct mount *);
702 static void softdep_error(char *, int);
703 static int softdep_prerename_vnode(struct ufsmount *, struct vnode *);
704 static int softdep_process_worklist(struct mount *, int);
705 static int softdep_waitidle(struct mount *, int);
706 static void drain_output(struct vnode *);
707 static struct buf *getdirtybuf(struct buf *, struct rwlock *, int);
708 static int check_inodedep_free(struct inodedep *);
709 static void clear_remove(struct mount *);
710 static void clear_inodedeps(struct mount *);
711 static void unlinked_inodedep(struct mount *, struct inodedep *);
712 static void clear_unlinked_inodedep(struct inodedep *);
713 static struct inodedep *first_unlinked_inodedep(struct ufsmount *);
714 static int flush_pagedep_deps(struct vnode *, struct mount *,
715 struct diraddhd *, struct buf *);
716 static int free_pagedep(struct pagedep *);
717 static int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t);
718 static int flush_inodedep_deps(struct vnode *, struct mount *, ino_t);
719 static int flush_deplist(struct allocdirectlst *, int, int *);
720 static int sync_cgs(struct mount *, int);
721 static int handle_written_filepage(struct pagedep *, struct buf *, int);
722 static int handle_written_sbdep(struct sbdep *, struct buf *);
723 static void initiate_write_sbdep(struct sbdep *);
724 static void diradd_inode_written(struct diradd *, struct inodedep *);
725 static int handle_written_indirdep(struct indirdep *, struct buf *,
726 struct buf**, int);
727 static int handle_written_inodeblock(struct inodedep *, struct buf *, int);
728 static int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *,
729 uint8_t *);
730 static int handle_written_bmsafemap(struct bmsafemap *, struct buf *, int);
731 static void handle_written_jaddref(struct jaddref *);
732 static void handle_written_jremref(struct jremref *);
733 static void handle_written_jseg(struct jseg *, struct buf *);
734 static void handle_written_jnewblk(struct jnewblk *);
735 static void handle_written_jblkdep(struct jblkdep *);
736 static void handle_written_jfreefrag(struct jfreefrag *);
737 static void complete_jseg(struct jseg *);
738 static void complete_jsegs(struct jseg *);
739 static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *);
740 static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *);
741 static void jremref_write(struct jremref *, struct jseg *, uint8_t *);
742 static void jmvref_write(struct jmvref *, struct jseg *, uint8_t *);
743 static void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *);
744 static void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data);
745 static void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *);
746 static void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *);
747 static void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *);
748 static inline void inoref_write(struct inoref *, struct jseg *,
749 struct jrefrec *);
750 static void handle_allocdirect_partdone(struct allocdirect *,
751 struct workhead *);
752 static struct jnewblk *cancel_newblk(struct newblk *, struct worklist *,
753 struct workhead *);
754 static void indirdep_complete(struct indirdep *);
755 static int indirblk_lookup(struct mount *, ufs2_daddr_t);
756 static void indirblk_insert(struct freework *);
757 static void indirblk_remove(struct freework *);
758 static void handle_allocindir_partdone(struct allocindir *);
759 static void initiate_write_filepage(struct pagedep *, struct buf *);
760 static void initiate_write_indirdep(struct indirdep*, struct buf *);
761 static void handle_written_mkdir(struct mkdir *, int);
762 static int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *,
763 uint8_t *);
764 static void initiate_write_bmsafemap(struct bmsafemap *, struct buf *);
765 static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *);
766 static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *);
767 static void handle_workitem_freefile(struct freefile *);
768 static int handle_workitem_remove(struct dirrem *, int);
769 static struct dirrem *newdirrem(struct buf *, struct inode *,
770 struct inode *, int, struct dirrem **);
771 static struct indirdep *indirdep_lookup(struct mount *, struct inode *,
772 struct buf *);
773 static void cancel_indirdep(struct indirdep *, struct buf *,
774 struct freeblks *);
775 static void free_indirdep(struct indirdep *);
776 static void free_diradd(struct diradd *, struct workhead *);
777 static void merge_diradd(struct inodedep *, struct diradd *);
778 static void complete_diradd(struct diradd *);
779 static struct diradd *diradd_lookup(struct pagedep *, int);
780 static struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *,
781 struct jremref *);
782 static struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *,
783 struct jremref *);
784 static void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *,
785 struct jremref *, struct jremref *);
786 static void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *,
787 struct jremref *);
788 static void cancel_allocindir(struct allocindir *, struct buf *bp,
789 struct freeblks *, int);
790 static int setup_trunc_indir(struct freeblks *, struct inode *,
791 ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t);
792 static void complete_trunc_indir(struct freework *);
793 static void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *,
794 int);
795 static void complete_mkdir(struct mkdir *);
796 static void free_newdirblk(struct newdirblk *);
797 static void free_jremref(struct jremref *);
798 static void free_jaddref(struct jaddref *);
799 static void free_jsegdep(struct jsegdep *);
800 static void free_jsegs(struct jblocks *);
801 static void rele_jseg(struct jseg *);
802 static void free_jseg(struct jseg *, struct jblocks *);
803 static void free_jnewblk(struct jnewblk *);
804 static void free_jblkdep(struct jblkdep *);
805 static void free_jfreefrag(struct jfreefrag *);
806 static void free_freedep(struct freedep *);
807 static void journal_jremref(struct dirrem *, struct jremref *,
808 struct inodedep *);
809 static void cancel_jnewblk(struct jnewblk *, struct workhead *);
810 static int cancel_jaddref(struct jaddref *, struct inodedep *,
811 struct workhead *);
812 static void cancel_jfreefrag(struct jfreefrag *);
813 static inline void setup_freedirect(struct freeblks *, struct inode *,
814 int, int);
815 static inline void setup_freeext(struct freeblks *, struct inode *, int, int);
816 static inline void setup_freeindir(struct freeblks *, struct inode *, int,
817 ufs_lbn_t, int);
818 static inline struct freeblks *newfreeblks(struct mount *, struct inode *);
819 static void freeblks_free(struct ufsmount *, struct freeblks *, int);
820 static void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t);
821 static ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t);
822 static int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int);
823 static void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t,
824 int, int);
825 static void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int);
826 static int cancel_pagedep(struct pagedep *, struct freeblks *, int);
827 static int deallocate_dependencies(struct buf *, struct freeblks *, int);
828 static void newblk_freefrag(struct newblk*);
829 static void free_newblk(struct newblk *);
830 static void cancel_allocdirect(struct allocdirectlst *,
831 struct allocdirect *, struct freeblks *);
832 static int check_inode_unwritten(struct inodedep *);
833 static int free_inodedep(struct inodedep *);
834 static void freework_freeblock(struct freework *, u_long);
835 static void freework_enqueue(struct freework *);
836 static int handle_workitem_freeblocks(struct freeblks *, int);
837 static int handle_complete_freeblocks(struct freeblks *, int);
838 static void handle_workitem_indirblk(struct freework *);
839 static void handle_written_freework(struct freework *);
840 static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *);
841 static struct worklist *jnewblk_merge(struct worklist *, struct worklist *,
842 struct workhead *);
843 static struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *,
844 struct inodedep *, struct allocindir *, ufs_lbn_t);
845 static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t,
846 ufs2_daddr_t, ufs_lbn_t);
847 static void handle_workitem_freefrag(struct freefrag *);
848 static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long,
849 ufs_lbn_t, u_long);
850 static void allocdirect_merge(struct allocdirectlst *,
851 struct allocdirect *, struct allocdirect *);
852 static struct freefrag *allocindir_merge(struct allocindir *,
853 struct allocindir *);
854 static int bmsafemap_find(struct bmsafemap_hashhead *, int,
855 struct bmsafemap **);
856 static struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *,
857 int cg, struct bmsafemap *);
858 static int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int,
859 struct newblk **);
860 static int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **);
861 static int inodedep_find(struct inodedep_hashhead *, ino_t,
862 struct inodedep **);
863 static int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **);
864 static int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t,
865 int, struct pagedep **);
866 static int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t,
867 struct pagedep **);
868 static void pause_timer(void *);
869 static int request_cleanup(struct mount *, int);
870 static int softdep_request_cleanup_flush(struct mount *, struct ufsmount *);
871 static void schedule_cleanup(struct mount *);
872 static void softdep_ast_cleanup_proc(struct thread *, int);
873 static struct ufsmount *softdep_bp_to_mp(struct buf *bp);
874 static int process_worklist_item(struct mount *, int, int);
875 static void process_removes(struct vnode *);
876 static void process_truncates(struct vnode *);
877 static void jwork_move(struct workhead *, struct workhead *);
878 static void jwork_insert(struct workhead *, struct jsegdep *);
879 static void add_to_worklist(struct worklist *, int);
880 static void wake_worklist(struct worklist *);
881 static void wait_worklist(struct worklist *, char *);
882 static void remove_from_worklist(struct worklist *);
883 static void softdep_flush(void *);
884 static void softdep_flushjournal(struct mount *);
885 static int softdep_speedup(struct ufsmount *);
886 static void worklist_speedup(struct mount *);
887 static int journal_mount(struct mount *, struct fs *, struct ucred *);
888 static void journal_unmount(struct ufsmount *);
889 static int journal_space(struct ufsmount *, int);
890 static void journal_suspend(struct ufsmount *);
891 static int journal_unsuspend(struct ufsmount *ump);
892 static void add_to_journal(struct worklist *);
893 static void remove_from_journal(struct worklist *);
894 static bool softdep_excess_items(struct ufsmount *, int);
895 static void softdep_process_journal(struct mount *, struct worklist *, int);
896 static struct jremref *newjremref(struct dirrem *, struct inode *,
897 struct inode *ip, off_t, nlink_t);
898 static struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t,
899 uint16_t);
900 static inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t,
901 uint16_t);
902 static inline struct jsegdep *inoref_jseg(struct inoref *);
903 static struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t);
904 static struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t,
905 ufs2_daddr_t, int);
906 static void adjust_newfreework(struct freeblks *, int);
907 static struct jtrunc *newjtrunc(struct freeblks *, off_t, int);
908 static void move_newblock_dep(struct jaddref *, struct inodedep *);
909 static void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t);
910 static struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *,
911 ufs2_daddr_t, long, ufs_lbn_t);
912 static struct freework *newfreework(struct ufsmount *, struct freeblks *,
913 struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int);
914 static int jwait(struct worklist *, int);
915 static struct inodedep *inodedep_lookup_ip(struct inode *);
916 static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *);
917 static struct freefile *handle_bufwait(struct inodedep *, struct workhead *);
918 static void handle_jwork(struct workhead *);
919 static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *,
920 struct mkdir **);
921 static struct jblocks *jblocks_create(void);
922 static ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *);
923 static void jblocks_free(struct jblocks *, struct mount *, int);
924 static void jblocks_destroy(struct jblocks *);
925 static void jblocks_add(struct jblocks *, ufs2_daddr_t, int);
926
927 /*
928 * Exported softdep operations.
929 */
930 static void softdep_disk_io_initiation(struct buf *);
931 static void softdep_disk_write_complete(struct buf *);
932 static void softdep_deallocate_dependencies(struct buf *);
933 static int softdep_count_dependencies(struct buf *bp, int);
934
935 /*
936 * Global lock over all of soft updates.
937 */
938 static struct mtx lk;
939 MTX_SYSINIT(softdep_lock, &lk, "global softdep", MTX_DEF);
940
941 #define ACQUIRE_GBLLOCK(lk) mtx_lock(lk)
942 #define FREE_GBLLOCK(lk) mtx_unlock(lk)
943 #define GBLLOCK_OWNED(lk) mtx_assert((lk), MA_OWNED)
944
945 /*
946 * Per-filesystem soft-updates locking.
947 */
948 #define LOCK_PTR(ump) (&(ump)->um_softdep->sd_fslock)
949 #define TRY_ACQUIRE_LOCK(ump) rw_try_wlock(&(ump)->um_softdep->sd_fslock)
950 #define ACQUIRE_LOCK(ump) rw_wlock(&(ump)->um_softdep->sd_fslock)
951 #define FREE_LOCK(ump) rw_wunlock(&(ump)->um_softdep->sd_fslock)
952 #define LOCK_OWNED(ump) rw_assert(&(ump)->um_softdep->sd_fslock, \
953 RA_WLOCKED)
954
955 #define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock)
956 #define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock)
957
958 /*
959 * Worklist queue management.
960 * These routines require that the lock be held.
961 */
962 #ifndef /* NOT */ INVARIANTS
963 #define WORKLIST_INSERT(head, item) do { \
964 (item)->wk_state |= ONWORKLIST; \
965 LIST_INSERT_HEAD(head, item, wk_list); \
966 } while (0)
967 #define WORKLIST_REMOVE(item) do { \
968 (item)->wk_state &= ~ONWORKLIST; \
969 LIST_REMOVE(item, wk_list); \
970 } while (0)
971 #define WORKLIST_INSERT_UNLOCKED WORKLIST_INSERT
972 #define WORKLIST_REMOVE_UNLOCKED WORKLIST_REMOVE
973
974 #else /* INVARIANTS */
975 static void worklist_insert(struct workhead *, struct worklist *, int,
976 const char *, int);
977 static void worklist_remove(struct worklist *, int, const char *, int);
978
979 #define WORKLIST_INSERT(head, item) \
980 worklist_insert(head, item, 1, __func__, __LINE__)
981 #define WORKLIST_INSERT_UNLOCKED(head, item)\
982 worklist_insert(head, item, 0, __func__, __LINE__)
983 #define WORKLIST_REMOVE(item)\
984 worklist_remove(item, 1, __func__, __LINE__)
985 #define WORKLIST_REMOVE_UNLOCKED(item)\
986 worklist_remove(item, 0, __func__, __LINE__)
987
988 static void
989 worklist_insert(struct workhead *head,
990 struct worklist *item,
991 int locked,
992 const char *func,
993 int line)
994 {
995
996 if (locked)
997 LOCK_OWNED(VFSTOUFS(item->wk_mp));
998 if (item->wk_state & ONWORKLIST)
999 panic("worklist_insert: %p %s(0x%X) already on list, "
1000 "added in function %s at line %d",
1001 item, TYPENAME(item->wk_type), item->wk_state,
1002 item->wk_func, item->wk_line);
1003 item->wk_state |= ONWORKLIST;
1004 item->wk_func = func;
1005 item->wk_line = line;
1006 LIST_INSERT_HEAD(head, item, wk_list);
1007 }
1008
1009 static void
1010 worklist_remove(struct worklist *item,
1011 int locked,
1012 const char *func,
1013 int line)
1014 {
1015
1016 if (locked)
1017 LOCK_OWNED(VFSTOUFS(item->wk_mp));
1018 if ((item->wk_state & ONWORKLIST) == 0)
1019 panic("worklist_remove: %p %s(0x%X) not on list, "
1020 "removed in function %s at line %d",
1021 item, TYPENAME(item->wk_type), item->wk_state,
1022 item->wk_func, item->wk_line);
1023 item->wk_state &= ~ONWORKLIST;
1024 item->wk_func = func;
1025 item->wk_line = line;
1026 LIST_REMOVE(item, wk_list);
1027 }
1028 #endif /* INVARIANTS */
1029
1030 /*
1031 * Merge two jsegdeps keeping only the oldest one as newer references
1032 * can't be discarded until after older references.
1033 */
1034 static inline struct jsegdep *
1035 jsegdep_merge(struct jsegdep *one, struct jsegdep *two)
1036 {
1037 struct jsegdep *swp;
1038
1039 if (two == NULL)
1040 return (one);
1041
1042 if (one->jd_seg->js_seq > two->jd_seg->js_seq) {
1043 swp = one;
1044 one = two;
1045 two = swp;
1046 }
1047 WORKLIST_REMOVE(&two->jd_list);
1048 free_jsegdep(two);
1049
1050 return (one);
1051 }
1052
1053 /*
1054 * If two freedeps are compatible free one to reduce list size.
1055 */
1056 static inline struct freedep *
1057 freedep_merge(struct freedep *one, struct freedep *two)
1058 {
1059 if (two == NULL)
1060 return (one);
1061
1062 if (one->fd_freework == two->fd_freework) {
1063 WORKLIST_REMOVE(&two->fd_list);
1064 free_freedep(two);
1065 }
1066 return (one);
1067 }
1068
1069 /*
1070 * Move journal work from one list to another. Duplicate freedeps and
1071 * jsegdeps are coalesced to keep the lists as small as possible.
1072 */
1073 static void
1074 jwork_move(struct workhead *dst, struct workhead *src)
1075 {
1076 struct freedep *freedep;
1077 struct jsegdep *jsegdep;
1078 struct worklist *wkn;
1079 struct worklist *wk;
1080
1081 KASSERT(dst != src,
1082 ("jwork_move: dst == src"));
1083 freedep = NULL;
1084 jsegdep = NULL;
1085 LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) {
1086 if (wk->wk_type == D_JSEGDEP)
1087 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1088 else if (wk->wk_type == D_FREEDEP)
1089 freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1090 }
1091
1092 while ((wk = LIST_FIRST(src)) != NULL) {
1093 WORKLIST_REMOVE(wk);
1094 WORKLIST_INSERT(dst, wk);
1095 if (wk->wk_type == D_JSEGDEP) {
1096 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1097 continue;
1098 }
1099 if (wk->wk_type == D_FREEDEP)
1100 freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1101 }
1102 }
1103
1104 static void
1105 jwork_insert(struct workhead *dst, struct jsegdep *jsegdep)
1106 {
1107 struct jsegdep *jsegdepn;
1108 struct worklist *wk;
1109
1110 LIST_FOREACH(wk, dst, wk_list)
1111 if (wk->wk_type == D_JSEGDEP)
1112 break;
1113 if (wk == NULL) {
1114 WORKLIST_INSERT(dst, &jsegdep->jd_list);
1115 return;
1116 }
1117 jsegdepn = WK_JSEGDEP(wk);
1118 if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) {
1119 WORKLIST_REMOVE(wk);
1120 free_jsegdep(jsegdepn);
1121 WORKLIST_INSERT(dst, &jsegdep->jd_list);
1122 } else
1123 free_jsegdep(jsegdep);
1124 }
1125
1126 /*
1127 * Routines for tracking and managing workitems.
1128 */
1129 static void workitem_free(struct worklist *, int);
1130 static void workitem_alloc(struct worklist *, int, struct mount *);
1131 static void workitem_reassign(struct worklist *, int);
1132
1133 #define WORKITEM_FREE(item, type) \
1134 workitem_free((struct worklist *)(item), (type))
1135 #define WORKITEM_REASSIGN(item, type) \
1136 workitem_reassign((struct worklist *)(item), (type))
1137
1138 static void
1139 workitem_free(struct worklist *item, int type)
1140 {
1141 struct ufsmount *ump;
1142
1143 #ifdef INVARIANTS
1144 if (item->wk_state & ONWORKLIST)
1145 panic("workitem_free: %s(0x%X) still on list, "
1146 "added in function %s at line %d",
1147 TYPENAME(item->wk_type), item->wk_state,
1148 item->wk_func, item->wk_line);
1149 if (item->wk_type != type && type != D_NEWBLK)
1150 panic("workitem_free: type mismatch %s != %s",
1151 TYPENAME(item->wk_type), TYPENAME(type));
1152 #endif
1153 if (item->wk_state & IOWAITING)
1154 wakeup(item);
1155 ump = VFSTOUFS(item->wk_mp);
1156 LOCK_OWNED(ump);
1157 KASSERT(ump->softdep_deps > 0,
1158 ("workitem_free: %s: softdep_deps going negative",
1159 ump->um_fs->fs_fsmnt));
1160 if (--ump->softdep_deps == 0 && ump->softdep_req)
1161 wakeup(&ump->softdep_deps);
1162 KASSERT(dep_current[item->wk_type] > 0,
1163 ("workitem_free: %s: dep_current[%s] going negative",
1164 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1165 KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1166 ("workitem_free: %s: softdep_curdeps[%s] going negative",
1167 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1168 atomic_subtract_long(&dep_current[item->wk_type], 1);
1169 ump->softdep_curdeps[item->wk_type] -= 1;
1170 LIST_REMOVE(item, wk_all);
1171 free(item, DtoM(type));
1172 }
1173
1174 static void
1175 workitem_alloc(struct worklist *item,
1176 int type,
1177 struct mount *mp)
1178 {
1179 struct ufsmount *ump;
1180
1181 item->wk_type = type;
1182 item->wk_mp = mp;
1183 item->wk_state = 0;
1184
1185 ump = VFSTOUFS(mp);
1186 ACQUIRE_GBLLOCK(&lk);
1187 dep_current[type]++;
1188 if (dep_current[type] > dep_highuse[type])
1189 dep_highuse[type] = dep_current[type];
1190 dep_total[type]++;
1191 FREE_GBLLOCK(&lk);
1192 ACQUIRE_LOCK(ump);
1193 ump->softdep_curdeps[type] += 1;
1194 ump->softdep_deps++;
1195 ump->softdep_accdeps++;
1196 LIST_INSERT_HEAD(&ump->softdep_alldeps[type], item, wk_all);
1197 FREE_LOCK(ump);
1198 }
1199
1200 static void
1201 workitem_reassign(struct worklist *item, int newtype)
1202 {
1203 struct ufsmount *ump;
1204
1205 ump = VFSTOUFS(item->wk_mp);
1206 LOCK_OWNED(ump);
1207 KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1208 ("workitem_reassign: %s: softdep_curdeps[%s] going negative",
1209 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1210 ump->softdep_curdeps[item->wk_type] -= 1;
1211 ump->softdep_curdeps[newtype] += 1;
1212 KASSERT(dep_current[item->wk_type] > 0,
1213 ("workitem_reassign: %s: dep_current[%s] going negative",
1214 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1215 ACQUIRE_GBLLOCK(&lk);
1216 dep_current[newtype]++;
1217 dep_current[item->wk_type]--;
1218 if (dep_current[newtype] > dep_highuse[newtype])
1219 dep_highuse[newtype] = dep_current[newtype];
1220 dep_total[newtype]++;
1221 FREE_GBLLOCK(&lk);
1222 item->wk_type = newtype;
1223 LIST_REMOVE(item, wk_all);
1224 LIST_INSERT_HEAD(&ump->softdep_alldeps[newtype], item, wk_all);
1225 }
1226
1227 /*
1228 * Workitem queue management
1229 */
1230 static int max_softdeps; /* maximum number of structs before slowdown */
1231 static int tickdelay = 2; /* number of ticks to pause during slowdown */
1232 static int proc_waiting; /* tracks whether we have a timeout posted */
1233 static int *stat_countp; /* statistic to count in proc_waiting timeout */
1234 static struct callout softdep_callout;
1235 static int req_clear_inodedeps; /* syncer process flush some inodedeps */
1236 static int req_clear_remove; /* syncer process flush some freeblks */
1237 static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */
1238
1239 /*
1240 * runtime statistics
1241 */
1242 static int stat_flush_threads; /* number of softdep flushing threads */
1243 static int stat_worklist_push; /* number of worklist cleanups */
1244 static int stat_delayed_inact; /* number of delayed inactivation cleanups */
1245 static int stat_blk_limit_push; /* number of times block limit neared */
1246 static int stat_ino_limit_push; /* number of times inode limit neared */
1247 static int stat_blk_limit_hit; /* number of times block slowdown imposed */
1248 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */
1249 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */
1250 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */
1251 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */
1252 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
1253 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */
1254 static int stat_jaddref; /* bufs redirtied as ino bitmap can not write */
1255 static int stat_jnewblk; /* bufs redirtied as blk bitmap can not write */
1256 static int stat_journal_min; /* Times hit journal min threshold */
1257 static int stat_journal_low; /* Times hit journal low threshold */
1258 static int stat_journal_wait; /* Times blocked in jwait(). */
1259 static int stat_jwait_filepage; /* Times blocked in jwait() for filepage. */
1260 static int stat_jwait_freeblks; /* Times blocked in jwait() for freeblks. */
1261 static int stat_jwait_inode; /* Times blocked in jwait() for inodes. */
1262 static int stat_jwait_newblk; /* Times blocked in jwait() for newblks. */
1263 static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */
1264 static int stat_cleanup_blkrequests; /* Number of block cleanup requests */
1265 static int stat_cleanup_inorequests; /* Number of inode cleanup requests */
1266 static int stat_cleanup_retries; /* Number of cleanups that needed to flush */
1267 static int stat_cleanup_failures; /* Number of cleanup requests that failed */
1268 static int stat_emptyjblocks; /* Number of potentially empty journal blocks */
1269
1270 SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW,
1271 &max_softdeps, 0, "");
1272 SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW,
1273 &tickdelay, 0, "");
1274 SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD,
1275 &stat_flush_threads, 0, "");
1276 SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push,
1277 CTLFLAG_RW | CTLFLAG_STATS, &stat_worklist_push, 0,"");
1278 SYSCTL_INT(_debug_softdep, OID_AUTO, delayed_inactivations, CTLFLAG_RD,
1279 &stat_delayed_inact, 0, "");
1280 SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push,
1281 CTLFLAG_RW | CTLFLAG_STATS, &stat_blk_limit_push, 0,"");
1282 SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push,
1283 CTLFLAG_RW | CTLFLAG_STATS, &stat_ino_limit_push, 0,"");
1284 SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit,
1285 CTLFLAG_RW | CTLFLAG_STATS, &stat_blk_limit_hit, 0, "");
1286 SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit,
1287 CTLFLAG_RW | CTLFLAG_STATS, &stat_ino_limit_hit, 0, "");
1288 SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit,
1289 CTLFLAG_RW | CTLFLAG_STATS, &stat_sync_limit_hit, 0, "");
1290 SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs,
1291 CTLFLAG_RW | CTLFLAG_STATS, &stat_indir_blk_ptrs, 0, "");
1292 SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap,
1293 CTLFLAG_RW | CTLFLAG_STATS, &stat_inode_bitmap, 0, "");
1294 SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs,
1295 CTLFLAG_RW | CTLFLAG_STATS, &stat_direct_blk_ptrs, 0, "");
1296 SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry,
1297 CTLFLAG_RW | CTLFLAG_STATS, &stat_dir_entry, 0, "");
1298 SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback,
1299 CTLFLAG_RW | CTLFLAG_STATS, &stat_jaddref, 0, "");
1300 SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback,
1301 CTLFLAG_RW | CTLFLAG_STATS, &stat_jnewblk, 0, "");
1302 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low,
1303 CTLFLAG_RW | CTLFLAG_STATS, &stat_journal_low, 0, "");
1304 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min,
1305 CTLFLAG_RW | CTLFLAG_STATS, &stat_journal_min, 0, "");
1306 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait,
1307 CTLFLAG_RW | CTLFLAG_STATS, &stat_journal_wait, 0, "");
1308 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage,
1309 CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_filepage, 0, "");
1310 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks,
1311 CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_freeblks, 0, "");
1312 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode,
1313 CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_inode, 0, "");
1314 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk,
1315 CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_newblk, 0, "");
1316 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests,
1317 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_blkrequests, 0, "");
1318 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests,
1319 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_inorequests, 0, "");
1320 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay,
1321 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_high_delay, 0, "");
1322 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries,
1323 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_retries, 0, "");
1324 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures,
1325 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_failures, 0, "");
1326
1327 SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW,
1328 &softdep_flushcache, 0, "");
1329 SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD,
1330 &stat_emptyjblocks, 0, "");
1331
1332 SYSCTL_DECL(_vfs_ffs);
1333
1334 /* Whether to recompute the summary at mount time */
1335 static int compute_summary_at_mount = 0;
1336 SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW,
1337 &compute_summary_at_mount, 0, "Recompute summary at mount");
1338 static int print_threads = 0;
1339 SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW,
1340 &print_threads, 0, "Notify flusher thread start/stop");
1341
1342 /* List of all filesystems mounted with soft updates */
1343 static TAILQ_HEAD(, mount_softdeps) softdepmounts;
1344
1345 static void
1346 get_parent_vp_unlock_bp(struct mount *mp,
1347 struct buf *bp,
1348 struct diraddhd *diraddhdp,
1349 struct diraddhd *unfinishedp)
1350 {
1351 struct diradd *dap;
1352
1353 /*
1354 * Requeue unfinished dependencies before
1355 * unlocking buffer, which could make
1356 * diraddhdp invalid.
1357 */
1358 ACQUIRE_LOCK(VFSTOUFS(mp));
1359 while ((dap = LIST_FIRST(unfinishedp)) != NULL) {
1360 LIST_REMOVE(dap, da_pdlist);
1361 LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist);
1362 }
1363 FREE_LOCK(VFSTOUFS(mp));
1364
1365 bp->b_vflags &= ~BV_SCANNED;
1366 BUF_NOREC(bp);
1367 BUF_UNLOCK(bp);
1368 }
1369
1370 /*
1371 * This function fetches inode inum on mount point mp. We already
1372 * hold a locked vnode vp, and might have a locked buffer bp belonging
1373 * to vp.
1374
1375 * We must not block on acquiring the new inode lock as we will get
1376 * into a lock-order reversal with the buffer lock and possibly get a
1377 * deadlock. Thus if we cannot instantiate the requested vnode
1378 * without sleeping on its lock, we must unlock the vnode and the
1379 * buffer before doing a blocking on the vnode lock. We return
1380 * ERELOOKUP if we have had to unlock either the vnode or the buffer so
1381 * that the caller can reassess its state.
1382 *
1383 * Top-level VFS code (for syscalls and other consumers, e.g. callers
1384 * of VOP_FSYNC() in syncer) check for ERELOOKUP and restart at safe
1385 * point.
1386 *
1387 * Since callers expect to operate on fully constructed vnode, we also
1388 * recheck v_data after relock, and return ENOENT if NULL.
1389 *
1390 * If unlocking bp, we must unroll dequeueing its unfinished
1391 * dependencies, and clear scan flag, before unlocking. If unlocking
1392 * vp while it is under deactivation, we re-queue deactivation.
1393 */
1394 static int
1395 get_parent_vp(struct vnode *vp,
1396 struct mount *mp,
1397 ino_t inum,
1398 struct buf *bp,
1399 struct diraddhd *diraddhdp,
1400 struct diraddhd *unfinishedp,
1401 struct vnode **rvp)
1402 {
1403 struct vnode *pvp;
1404 int error;
1405 bool bplocked;
1406
1407 ASSERT_VOP_ELOCKED(vp, "child vnode must be locked");
1408 for (bplocked = true, pvp = NULL;;) {
1409 error = ffs_vgetf(mp, inum, LK_EXCLUSIVE | LK_NOWAIT, &pvp,
1410 FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP);
1411 if (error == 0) {
1412 /*
1413 * Since we could have unlocked vp, the inode
1414 * number could no longer indicate a
1415 * constructed node. In this case, we must
1416 * restart the syscall.
1417 */
1418 if (VTOI(pvp)->i_mode == 0 || !bplocked) {
1419 if (bp != NULL && bplocked)
1420 get_parent_vp_unlock_bp(mp, bp,
1421 diraddhdp, unfinishedp);
1422 if (VTOI(pvp)->i_mode == 0)
1423 vgone(pvp);
1424 error = ERELOOKUP;
1425 goto out2;
1426 }
1427 goto out1;
1428 }
1429 if (bp != NULL && bplocked) {
1430 get_parent_vp_unlock_bp(mp, bp, diraddhdp, unfinishedp);
1431 bplocked = false;
1432 }
1433
1434 /*
1435 * Do not drop vnode lock while inactivating during
1436 * vunref. This would result in leaks of the VI flags
1437 * and reclaiming of non-truncated vnode. Instead,
1438 * re-schedule inactivation hoping that we would be
1439 * able to sync inode later.
1440 */
1441 if ((vp->v_iflag & VI_DOINGINACT) != 0 &&
1442 (vp->v_vflag & VV_UNREF) != 0) {
1443 VI_LOCK(vp);
1444 vp->v_iflag |= VI_OWEINACT;
1445 VI_UNLOCK(vp);
1446 return (ERELOOKUP);
1447 }
1448
1449 VOP_UNLOCK(vp);
1450 error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &pvp,
1451 FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP);
1452 if (error != 0) {
1453 MPASS(error != ERELOOKUP);
1454 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1455 break;
1456 }
1457 if (VTOI(pvp)->i_mode == 0) {
1458 vgone(pvp);
1459 vput(pvp);
1460 pvp = NULL;
1461 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1462 error = ERELOOKUP;
1463 break;
1464 }
1465 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
1466 if (error == 0)
1467 break;
1468 vput(pvp);
1469 pvp = NULL;
1470 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1471 if (vp->v_data == NULL) {
1472 error = ENOENT;
1473 break;
1474 }
1475 }
1476 if (bp != NULL) {
1477 MPASS(!bplocked);
1478 error = ERELOOKUP;
1479 }
1480 out2:
1481 if (error != 0 && pvp != NULL) {
1482 vput(pvp);
1483 pvp = NULL;
1484 }
1485 out1:
1486 *rvp = pvp;
1487 ASSERT_VOP_ELOCKED(vp, "child vnode must be locked on return");
1488 return (error);
1489 }
1490
1491 /*
1492 * This function cleans the worklist for a filesystem.
1493 * Each filesystem running with soft dependencies gets its own
1494 * thread to run in this function. The thread is started up in
1495 * softdep_mount and shutdown in softdep_unmount. They show up
1496 * as part of the kernel "bufdaemon" process whose process
1497 * entry is available in bufdaemonproc.
1498 */
1499 static int searchfailed;
1500 extern struct proc *bufdaemonproc;
1501 static void
1502 softdep_flush(void *addr)
1503 {
1504 struct mount *mp;
1505 struct thread *td;
1506 struct ufsmount *ump;
1507 int cleanups;
1508
1509 td = curthread;
1510 td->td_pflags |= TDP_NORUNNINGBUF;
1511 mp = (struct mount *)addr;
1512 ump = VFSTOUFS(mp);
1513 atomic_add_int(&stat_flush_threads, 1);
1514 ACQUIRE_LOCK(ump);
1515 ump->softdep_flags &= ~FLUSH_STARTING;
1516 wakeup(&ump->softdep_flushtd);
1517 FREE_LOCK(ump);
1518 if (print_threads) {
1519 if (stat_flush_threads == 1)
1520 printf("Running %s at pid %d\n", bufdaemonproc->p_comm,
1521 bufdaemonproc->p_pid);
1522 printf("Start thread %s\n", td->td_name);
1523 }
1524 for (;;) {
1525 while (softdep_process_worklist(mp, 0) > 0 ||
1526 (MOUNTEDSUJ(mp) &&
1527 VFSTOUFS(mp)->softdep_jblocks->jb_suspended))
1528 kthread_suspend_check();
1529 ACQUIRE_LOCK(ump);
1530 if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1531 msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM,
1532 "sdflush", hz / 2);
1533 ump->softdep_flags &= ~FLUSH_CLEANUP;
1534 /*
1535 * Check to see if we are done and need to exit.
1536 */
1537 if ((ump->softdep_flags & FLUSH_EXIT) == 0) {
1538 FREE_LOCK(ump);
1539 continue;
1540 }
1541 ump->softdep_flags &= ~FLUSH_EXIT;
1542 cleanups = ump->um_softdep->sd_cleanups;
1543 FREE_LOCK(ump);
1544 wakeup(&ump->softdep_flags);
1545 if (print_threads) {
1546 printf("Stop thread %s: searchfailed %d, "
1547 "did cleanups %d\n",
1548 td->td_name, searchfailed, cleanups);
1549 }
1550 atomic_subtract_int(&stat_flush_threads, 1);
1551 kthread_exit();
1552 panic("kthread_exit failed\n");
1553 }
1554 }
1555
1556 static void
1557 worklist_speedup(struct mount *mp)
1558 {
1559 struct ufsmount *ump;
1560
1561 ump = VFSTOUFS(mp);
1562 LOCK_OWNED(ump);
1563 if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1564 ump->softdep_flags |= FLUSH_CLEANUP;
1565 wakeup(&ump->softdep_flushtd);
1566 }
1567
1568 static void
1569 softdep_send_speedup(struct ufsmount *ump,
1570 off_t shortage,
1571 u_int flags)
1572 {
1573 struct buf *bp;
1574
1575 if ((ump->um_flags & UM_CANSPEEDUP) == 0)
1576 return;
1577
1578 bp = malloc(sizeof(*bp), M_TRIM, M_WAITOK | M_ZERO);
1579 bp->b_iocmd = BIO_SPEEDUP;
1580 bp->b_ioflags = flags;
1581 bp->b_bcount = omin(shortage, LONG_MAX);
1582 g_vfs_strategy(ump->um_bo, bp);
1583 bufwait(bp);
1584 free(bp, M_TRIM);
1585 }
1586
1587 static int
1588 softdep_speedup(struct ufsmount *ump)
1589 {
1590 struct ufsmount *altump;
1591 struct mount_softdeps *sdp;
1592
1593 LOCK_OWNED(ump);
1594 worklist_speedup(ump->um_mountp);
1595 bd_speedup();
1596 /*
1597 * If we have global shortages, then we need other
1598 * filesystems to help with the cleanup. Here we wakeup a
1599 * flusher thread for a filesystem that is over its fair
1600 * share of resources.
1601 */
1602 if (req_clear_inodedeps || req_clear_remove) {
1603 ACQUIRE_GBLLOCK(&lk);
1604 TAILQ_FOREACH(sdp, &softdepmounts, sd_next) {
1605 if ((altump = sdp->sd_ump) == ump)
1606 continue;
1607 if (((req_clear_inodedeps &&
1608 altump->softdep_curdeps[D_INODEDEP] >
1609 max_softdeps / stat_flush_threads) ||
1610 (req_clear_remove &&
1611 altump->softdep_curdeps[D_DIRREM] >
1612 (max_softdeps / 2) / stat_flush_threads)) &&
1613 TRY_ACQUIRE_LOCK(altump))
1614 break;
1615 }
1616 if (sdp == NULL) {
1617 searchfailed++;
1618 FREE_GBLLOCK(&lk);
1619 } else {
1620 /*
1621 * Move to the end of the list so we pick a
1622 * different one on out next try.
1623 */
1624 TAILQ_REMOVE(&softdepmounts, sdp, sd_next);
1625 TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
1626 FREE_GBLLOCK(&lk);
1627 if ((altump->softdep_flags &
1628 (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1629 altump->softdep_flags |= FLUSH_CLEANUP;
1630 altump->um_softdep->sd_cleanups++;
1631 wakeup(&altump->softdep_flushtd);
1632 FREE_LOCK(altump);
1633 }
1634 }
1635 return (speedup_syncer());
1636 }
1637
1638 /*
1639 * Add an item to the end of the work queue.
1640 * This routine requires that the lock be held.
1641 * This is the only routine that adds items to the list.
1642 * The following routine is the only one that removes items
1643 * and does so in order from first to last.
1644 */
1645
1646 #define WK_HEAD 0x0001 /* Add to HEAD. */
1647 #define WK_NODELAY 0x0002 /* Process immediately. */
1648
1649 static void
1650 add_to_worklist(struct worklist *wk, int flags)
1651 {
1652 struct ufsmount *ump;
1653
1654 ump = VFSTOUFS(wk->wk_mp);
1655 LOCK_OWNED(ump);
1656 if (wk->wk_state & ONWORKLIST)
1657 panic("add_to_worklist: %s(0x%X) already on list",
1658 TYPENAME(wk->wk_type), wk->wk_state);
1659 wk->wk_state |= ONWORKLIST;
1660 if (ump->softdep_on_worklist == 0) {
1661 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1662 ump->softdep_worklist_tail = wk;
1663 } else if (flags & WK_HEAD) {
1664 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1665 } else {
1666 LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list);
1667 ump->softdep_worklist_tail = wk;
1668 }
1669 ump->softdep_on_worklist += 1;
1670 if (flags & WK_NODELAY)
1671 worklist_speedup(wk->wk_mp);
1672 }
1673
1674 /*
1675 * Remove the item to be processed. If we are removing the last
1676 * item on the list, we need to recalculate the tail pointer.
1677 */
1678 static void
1679 remove_from_worklist(struct worklist *wk)
1680 {
1681 struct ufsmount *ump;
1682
1683 ump = VFSTOUFS(wk->wk_mp);
1684 if (ump->softdep_worklist_tail == wk)
1685 ump->softdep_worklist_tail =
1686 (struct worklist *)wk->wk_list.le_prev;
1687 WORKLIST_REMOVE(wk);
1688 ump->softdep_on_worklist -= 1;
1689 }
1690
1691 static void
1692 wake_worklist(struct worklist *wk)
1693 {
1694 if (wk->wk_state & IOWAITING) {
1695 wk->wk_state &= ~IOWAITING;
1696 wakeup(wk);
1697 }
1698 }
1699
1700 static void
1701 wait_worklist(struct worklist *wk, char *wmesg)
1702 {
1703 struct ufsmount *ump;
1704
1705 ump = VFSTOUFS(wk->wk_mp);
1706 wk->wk_state |= IOWAITING;
1707 msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0);
1708 }
1709
1710 /*
1711 * Process that runs once per second to handle items in the background queue.
1712 *
1713 * Note that we ensure that everything is done in the order in which they
1714 * appear in the queue. The code below depends on this property to ensure
1715 * that blocks of a file are freed before the inode itself is freed. This
1716 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
1717 * until all the old ones have been purged from the dependency lists.
1718 */
1719 static int
1720 softdep_process_worklist(struct mount *mp, int full)
1721 {
1722 int cnt, matchcnt;
1723 struct ufsmount *ump;
1724 long starttime;
1725
1726 KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp"));
1727 ump = VFSTOUFS(mp);
1728 if (ump->um_softdep == NULL)
1729 return (0);
1730 matchcnt = 0;
1731 ACQUIRE_LOCK(ump);
1732 starttime = time_second;
1733 softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0);
1734 check_clear_deps(mp);
1735 while (ump->softdep_on_worklist > 0) {
1736 if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0)
1737 break;
1738 else
1739 matchcnt += cnt;
1740 check_clear_deps(mp);
1741 /*
1742 * We do not generally want to stop for buffer space, but if
1743 * we are really being a buffer hog, we will stop and wait.
1744 */
1745 if (should_yield()) {
1746 FREE_LOCK(ump);
1747 kern_yield(PRI_USER);
1748 bwillwrite();
1749 ACQUIRE_LOCK(ump);
1750 }
1751 /*
1752 * Never allow processing to run for more than one
1753 * second. This gives the syncer thread the opportunity
1754 * to pause if appropriate.
1755 */
1756 if (!full && starttime != time_second)
1757 break;
1758 }
1759 if (full == 0)
1760 journal_unsuspend(ump);
1761 FREE_LOCK(ump);
1762 return (matchcnt);
1763 }
1764
1765 /*
1766 * Process all removes associated with a vnode if we are running out of
1767 * journal space. Any other process which attempts to flush these will
1768 * be unable as we have the vnodes locked.
1769 */
1770 static void
1771 process_removes(struct vnode *vp)
1772 {
1773 struct inodedep *inodedep;
1774 struct dirrem *dirrem;
1775 struct ufsmount *ump;
1776 struct mount *mp;
1777 ino_t inum;
1778
1779 mp = vp->v_mount;
1780 ump = VFSTOUFS(mp);
1781 LOCK_OWNED(ump);
1782 inum = VTOI(vp)->i_number;
1783 for (;;) {
1784 top:
1785 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1786 return;
1787 LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) {
1788 /*
1789 * If another thread is trying to lock this vnode
1790 * it will fail but we must wait for it to do so
1791 * before we can proceed.
1792 */
1793 if (dirrem->dm_state & INPROGRESS) {
1794 wait_worklist(&dirrem->dm_list, "pwrwait");
1795 goto top;
1796 }
1797 if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) ==
1798 (COMPLETE | ONWORKLIST))
1799 break;
1800 }
1801 if (dirrem == NULL)
1802 return;
1803 remove_from_worklist(&dirrem->dm_list);
1804 FREE_LOCK(ump);
1805 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1806 panic("process_removes: suspended filesystem");
1807 handle_workitem_remove(dirrem, 0);
1808 vn_finished_secondary_write(mp);
1809 ACQUIRE_LOCK(ump);
1810 }
1811 }
1812
1813 /*
1814 * Process all truncations associated with a vnode if we are running out
1815 * of journal space. This is called when the vnode lock is already held
1816 * and no other process can clear the truncation. This function returns
1817 * a value greater than zero if it did any work.
1818 */
1819 static void
1820 process_truncates(struct vnode *vp)
1821 {
1822 struct inodedep *inodedep;
1823 struct freeblks *freeblks;
1824 struct ufsmount *ump;
1825 struct mount *mp;
1826 ino_t inum;
1827 int cgwait;
1828
1829 mp = vp->v_mount;
1830 ump = VFSTOUFS(mp);
1831 LOCK_OWNED(ump);
1832 inum = VTOI(vp)->i_number;
1833 for (;;) {
1834 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1835 return;
1836 cgwait = 0;
1837 TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) {
1838 /* Journal entries not yet written. */
1839 if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) {
1840 jwait(&LIST_FIRST(
1841 &freeblks->fb_jblkdephd)->jb_list,
1842 MNT_WAIT);
1843 break;
1844 }
1845 /* Another thread is executing this item. */
1846 if (freeblks->fb_state & INPROGRESS) {
1847 wait_worklist(&freeblks->fb_list, "ptrwait");
1848 break;
1849 }
1850 /* Freeblks is waiting on a inode write. */
1851 if ((freeblks->fb_state & COMPLETE) == 0) {
1852 FREE_LOCK(ump);
1853 ffs_update(vp, 1);
1854 ACQUIRE_LOCK(ump);
1855 break;
1856 }
1857 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) ==
1858 (ALLCOMPLETE | ONWORKLIST)) {
1859 remove_from_worklist(&freeblks->fb_list);
1860 freeblks->fb_state |= INPROGRESS;
1861 FREE_LOCK(ump);
1862 if (vn_start_secondary_write(NULL, &mp,
1863 V_NOWAIT))
1864 panic("process_truncates: "
1865 "suspended filesystem");
1866 handle_workitem_freeblocks(freeblks, 0);
1867 vn_finished_secondary_write(mp);
1868 ACQUIRE_LOCK(ump);
1869 break;
1870 }
1871 if (freeblks->fb_cgwait)
1872 cgwait++;
1873 }
1874 if (cgwait) {
1875 FREE_LOCK(ump);
1876 sync_cgs(mp, MNT_WAIT);
1877 ffs_sync_snap(mp, MNT_WAIT);
1878 ACQUIRE_LOCK(ump);
1879 continue;
1880 }
1881 if (freeblks == NULL)
1882 break;
1883 }
1884 return;
1885 }
1886
1887 /*
1888 * Process one item on the worklist.
1889 */
1890 static int
1891 process_worklist_item(struct mount *mp,
1892 int target,
1893 int flags)
1894 {
1895 struct worklist sentinel;
1896 struct worklist *wk;
1897 struct ufsmount *ump;
1898 int matchcnt;
1899 int error;
1900
1901 KASSERT(mp != NULL, ("process_worklist_item: NULL mp"));
1902 /*
1903 * If we are being called because of a process doing a
1904 * copy-on-write, then it is not safe to write as we may
1905 * recurse into the copy-on-write routine.
1906 */
1907 if (curthread->td_pflags & TDP_COWINPROGRESS)
1908 return (-1);
1909 PHOLD(curproc); /* Don't let the stack go away. */
1910 ump = VFSTOUFS(mp);
1911 LOCK_OWNED(ump);
1912 matchcnt = 0;
1913 sentinel.wk_mp = NULL;
1914 sentinel.wk_type = D_SENTINEL;
1915 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list);
1916 for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL;
1917 wk = LIST_NEXT(&sentinel, wk_list)) {
1918 if (wk->wk_type == D_SENTINEL) {
1919 LIST_REMOVE(&sentinel, wk_list);
1920 LIST_INSERT_AFTER(wk, &sentinel, wk_list);
1921 continue;
1922 }
1923 if (wk->wk_state & INPROGRESS)
1924 panic("process_worklist_item: %p already in progress.",
1925 wk);
1926 wk->wk_state |= INPROGRESS;
1927 remove_from_worklist(wk);
1928 FREE_LOCK(ump);
1929 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1930 panic("process_worklist_item: suspended filesystem");
1931 switch (wk->wk_type) {
1932 case D_DIRREM:
1933 /* removal of a directory entry */
1934 error = handle_workitem_remove(WK_DIRREM(wk), flags);
1935 break;
1936
1937 case D_FREEBLKS:
1938 /* releasing blocks and/or fragments from a file */
1939 error = handle_workitem_freeblocks(WK_FREEBLKS(wk),
1940 flags);
1941 break;
1942
1943 case D_FREEFRAG:
1944 /* releasing a fragment when replaced as a file grows */
1945 handle_workitem_freefrag(WK_FREEFRAG(wk));
1946 error = 0;
1947 break;
1948
1949 case D_FREEFILE:
1950 /* releasing an inode when its link count drops to 0 */
1951 handle_workitem_freefile(WK_FREEFILE(wk));
1952 error = 0;
1953 break;
1954
1955 default:
1956 panic("%s_process_worklist: Unknown type %s",
1957 "softdep", TYPENAME(wk->wk_type));
1958 /* NOTREACHED */
1959 }
1960 vn_finished_secondary_write(mp);
1961 ACQUIRE_LOCK(ump);
1962 if (error == 0) {
1963 if (++matchcnt == target)
1964 break;
1965 continue;
1966 }
1967 /*
1968 * We have to retry the worklist item later. Wake up any
1969 * waiters who may be able to complete it immediately and
1970 * add the item back to the head so we don't try to execute
1971 * it again.
1972 */
1973 wk->wk_state &= ~INPROGRESS;
1974 wake_worklist(wk);
1975 add_to_worklist(wk, WK_HEAD);
1976 }
1977 /* Sentinal could've become the tail from remove_from_worklist. */
1978 if (ump->softdep_worklist_tail == &sentinel)
1979 ump->softdep_worklist_tail =
1980 (struct worklist *)sentinel.wk_list.le_prev;
1981 LIST_REMOVE(&sentinel, wk_list);
1982 PRELE(curproc);
1983 return (matchcnt);
1984 }
1985
1986 /*
1987 * Move dependencies from one buffer to another.
1988 */
1989 int
1990 softdep_move_dependencies(struct buf *oldbp, struct buf *newbp)
1991 {
1992 struct worklist *wk, *wktail;
1993 struct ufsmount *ump;
1994 int dirty;
1995
1996 if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL)
1997 return (0);
1998 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
1999 ("softdep_move_dependencies called on non-softdep filesystem"));
2000 dirty = 0;
2001 wktail = NULL;
2002 ump = VFSTOUFS(wk->wk_mp);
2003 ACQUIRE_LOCK(ump);
2004 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
2005 LIST_REMOVE(wk, wk_list);
2006 if (wk->wk_type == D_BMSAFEMAP &&
2007 bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp))
2008 dirty = 1;
2009 if (wktail == NULL)
2010 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
2011 else
2012 LIST_INSERT_AFTER(wktail, wk, wk_list);
2013 wktail = wk;
2014 }
2015 FREE_LOCK(ump);
2016
2017 return (dirty);
2018 }
2019
2020 /*
2021 * Purge the work list of all items associated with a particular mount point.
2022 */
2023 int
2024 softdep_flushworklist(struct mount *oldmnt,
2025 int *countp,
2026 struct thread *td)
2027 {
2028 struct vnode *devvp;
2029 struct ufsmount *ump;
2030 int count, error;
2031
2032 /*
2033 * Alternately flush the block device associated with the mount
2034 * point and process any dependencies that the flushing
2035 * creates. We continue until no more worklist dependencies
2036 * are found.
2037 */
2038 *countp = 0;
2039 error = 0;
2040 ump = VFSTOUFS(oldmnt);
2041 devvp = ump->um_devvp;
2042 while ((count = softdep_process_worklist(oldmnt, 1)) > 0) {
2043 *countp += count;
2044 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
2045 error = VOP_FSYNC(devvp, MNT_WAIT, td);
2046 VOP_UNLOCK(devvp);
2047 if (error != 0)
2048 break;
2049 }
2050 return (error);
2051 }
2052
2053 #define SU_WAITIDLE_RETRIES 20
2054 static int
2055 softdep_waitidle(struct mount *mp, int flags __unused)
2056 {
2057 struct ufsmount *ump;
2058 struct vnode *devvp;
2059 struct thread *td;
2060 int error, i;
2061
2062 ump = VFSTOUFS(mp);
2063 KASSERT(ump->um_softdep != NULL,
2064 ("softdep_waitidle called on non-softdep filesystem"));
2065 devvp = ump->um_devvp;
2066 td = curthread;
2067 error = 0;
2068 ACQUIRE_LOCK(ump);
2069 for (i = 0; i < SU_WAITIDLE_RETRIES && ump->softdep_deps != 0; i++) {
2070 ump->softdep_req = 1;
2071 KASSERT((flags & FORCECLOSE) == 0 ||
2072 ump->softdep_on_worklist == 0,
2073 ("softdep_waitidle: work added after flush"));
2074 msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM | PDROP,
2075 "softdeps", 10 * hz);
2076 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
2077 error = VOP_FSYNC(devvp, MNT_WAIT, td);
2078 VOP_UNLOCK(devvp);
2079 ACQUIRE_LOCK(ump);
2080 if (error != 0)
2081 break;
2082 }
2083 ump->softdep_req = 0;
2084 if (i == SU_WAITIDLE_RETRIES && error == 0 && ump->softdep_deps != 0) {
2085 error = EBUSY;
2086 printf("softdep_waitidle: Failed to flush worklist for %p\n",
2087 mp);
2088 }
2089 FREE_LOCK(ump);
2090 return (error);
2091 }
2092
2093 /*
2094 * Flush all vnodes and worklist items associated with a specified mount point.
2095 */
2096 int
2097 softdep_flushfiles(struct mount *oldmnt,
2098 int flags,
2099 struct thread *td)
2100 {
2101 struct ufsmount *ump __unused;
2102 #ifdef QUOTA
2103 int i;
2104 #endif
2105 int error, early, depcount, loopcnt, retry_flush_count, retry;
2106 int morework;
2107
2108 ump = VFSTOUFS(oldmnt);
2109 KASSERT(ump->um_softdep != NULL,
2110 ("softdep_flushfiles called on non-softdep filesystem"));
2111 loopcnt = 10;
2112 retry_flush_count = 3;
2113 retry_flush:
2114 error = 0;
2115
2116 /*
2117 * Alternately flush the vnodes associated with the mount
2118 * point and process any dependencies that the flushing
2119 * creates. In theory, this loop can happen at most twice,
2120 * but we give it a few extra just to be sure.
2121 */
2122 for (; loopcnt > 0; loopcnt--) {
2123 /*
2124 * Do another flush in case any vnodes were brought in
2125 * as part of the cleanup operations.
2126 */
2127 early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag &
2128 MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH;
2129 if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0)
2130 break;
2131 if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 ||
2132 depcount == 0)
2133 break;
2134 }
2135 /*
2136 * If we are unmounting then it is an error to fail. If we
2137 * are simply trying to downgrade to read-only, then filesystem
2138 * activity can keep us busy forever, so we just fail with EBUSY.
2139 */
2140 if (loopcnt == 0) {
2141 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
2142 panic("softdep_flushfiles: looping");
2143 error = EBUSY;
2144 }
2145 if (!error)
2146 error = softdep_waitidle(oldmnt, flags);
2147 if (!error) {
2148 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) {
2149 retry = 0;
2150 MNT_ILOCK(oldmnt);
2151 morework = oldmnt->mnt_nvnodelistsize > 0;
2152 #ifdef QUOTA
2153 UFS_LOCK(ump);
2154 for (i = 0; i < MAXQUOTAS; i++) {
2155 if (ump->um_quotas[i] != NULLVP)
2156 morework = 1;
2157 }
2158 UFS_UNLOCK(ump);
2159 #endif
2160 if (morework) {
2161 if (--retry_flush_count > 0) {
2162 retry = 1;
2163 loopcnt = 3;
2164 } else
2165 error = EBUSY;
2166 }
2167 MNT_IUNLOCK(oldmnt);
2168 if (retry)
2169 goto retry_flush;
2170 }
2171 }
2172 return (error);
2173 }
2174
2175 /*
2176 * Structure hashing.
2177 *
2178 * There are four types of structures that can be looked up:
2179 * 1) pagedep structures identified by mount point, inode number,
2180 * and logical block.
2181 * 2) inodedep structures identified by mount point and inode number.
2182 * 3) newblk structures identified by mount point and
2183 * physical block number.
2184 * 4) bmsafemap structures identified by mount point and
2185 * cylinder group number.
2186 *
2187 * The "pagedep" and "inodedep" dependency structures are hashed
2188 * separately from the file blocks and inodes to which they correspond.
2189 * This separation helps when the in-memory copy of an inode or
2190 * file block must be replaced. It also obviates the need to access
2191 * an inode or file page when simply updating (or de-allocating)
2192 * dependency structures. Lookup of newblk structures is needed to
2193 * find newly allocated blocks when trying to associate them with
2194 * their allocdirect or allocindir structure.
2195 *
2196 * The lookup routines optionally create and hash a new instance when
2197 * an existing entry is not found. The bmsafemap lookup routine always
2198 * allocates a new structure if an existing one is not found.
2199 */
2200 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */
2201
2202 /*
2203 * Structures and routines associated with pagedep caching.
2204 */
2205 #define PAGEDEP_HASH(ump, inum, lbn) \
2206 (&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size])
2207
2208 static int
2209 pagedep_find(struct pagedep_hashhead *pagedephd,
2210 ino_t ino,
2211 ufs_lbn_t lbn,
2212 struct pagedep **pagedeppp)
2213 {
2214 struct pagedep *pagedep;
2215
2216 LIST_FOREACH(pagedep, pagedephd, pd_hash) {
2217 if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) {
2218 *pagedeppp = pagedep;
2219 return (1);
2220 }
2221 }
2222 *pagedeppp = NULL;
2223 return (0);
2224 }
2225 /*
2226 * Look up a pagedep. Return 1 if found, 0 otherwise.
2227 * If not found, allocate if DEPALLOC flag is passed.
2228 * Found or allocated entry is returned in pagedeppp.
2229 */
2230 static int
2231 pagedep_lookup(struct mount *mp,
2232 struct buf *bp,
2233 ino_t ino,
2234 ufs_lbn_t lbn,
2235 int flags,
2236 struct pagedep **pagedeppp)
2237 {
2238 struct pagedep *pagedep;
2239 struct pagedep_hashhead *pagedephd;
2240 struct worklist *wk;
2241 struct ufsmount *ump;
2242 int ret;
2243 int i;
2244
2245 ump = VFSTOUFS(mp);
2246 LOCK_OWNED(ump);
2247 if (bp) {
2248 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
2249 if (wk->wk_type == D_PAGEDEP) {
2250 *pagedeppp = WK_PAGEDEP(wk);
2251 return (1);
2252 }
2253 }
2254 }
2255 pagedephd = PAGEDEP_HASH(ump, ino, lbn);
2256 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2257 if (ret) {
2258 if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp)
2259 WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list);
2260 return (1);
2261 }
2262 if ((flags & DEPALLOC) == 0)
2263 return (0);
2264 FREE_LOCK(ump);
2265 pagedep = malloc(sizeof(struct pagedep),
2266 M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO);
2267 workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp);
2268 ACQUIRE_LOCK(ump);
2269 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2270 if (*pagedeppp) {
2271 /*
2272 * This should never happen since we only create pagedeps
2273 * with the vnode lock held. Could be an assert.
2274 */
2275 WORKITEM_FREE(pagedep, D_PAGEDEP);
2276 return (ret);
2277 }
2278 pagedep->pd_ino = ino;
2279 pagedep->pd_lbn = lbn;
2280 LIST_INIT(&pagedep->pd_dirremhd);
2281 LIST_INIT(&pagedep->pd_pendinghd);
2282 for (i = 0; i < DAHASHSZ; i++)
2283 LIST_INIT(&pagedep->pd_diraddhd[i]);
2284 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
2285 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2286 *pagedeppp = pagedep;
2287 return (0);
2288 }
2289
2290 /*
2291 * Structures and routines associated with inodedep caching.
2292 */
2293 #define INODEDEP_HASH(ump, inum) \
2294 (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size])
2295
2296 static int
2297 inodedep_find(struct inodedep_hashhead *inodedephd,
2298 ino_t inum,
2299 struct inodedep **inodedeppp)
2300 {
2301 struct inodedep *inodedep;
2302
2303 LIST_FOREACH(inodedep, inodedephd, id_hash)
2304 if (inum == inodedep->id_ino)
2305 break;
2306 if (inodedep) {
2307 *inodedeppp = inodedep;
2308 return (1);
2309 }
2310 *inodedeppp = NULL;
2311
2312 return (0);
2313 }
2314 /*
2315 * Look up an inodedep. Return 1 if found, 0 if not found.
2316 * If not found, allocate if DEPALLOC flag is passed.
2317 * Found or allocated entry is returned in inodedeppp.
2318 */
2319 static int
2320 inodedep_lookup(struct mount *mp,
2321 ino_t inum,
2322 int flags,
2323 struct inodedep **inodedeppp)
2324 {
2325 struct inodedep *inodedep;
2326 struct inodedep_hashhead *inodedephd;
2327 struct ufsmount *ump;
2328 struct fs *fs;
2329
2330 ump = VFSTOUFS(mp);
2331 LOCK_OWNED(ump);
2332 fs = ump->um_fs;
2333 inodedephd = INODEDEP_HASH(ump, inum);
2334
2335 if (inodedep_find(inodedephd, inum, inodedeppp))
2336 return (1);
2337 if ((flags & DEPALLOC) == 0)
2338 return (0);
2339 /*
2340 * If the system is over its limit and our filesystem is
2341 * responsible for more than our share of that usage and
2342 * we are not in a rush, request some inodedep cleanup.
2343 */
2344 if (softdep_excess_items(ump, D_INODEDEP))
2345 schedule_cleanup(mp);
2346 else
2347 FREE_LOCK(ump);
2348 inodedep = malloc(sizeof(struct inodedep),
2349 M_INODEDEP, M_SOFTDEP_FLAGS);
2350 workitem_alloc(&inodedep->id_list, D_INODEDEP, mp);
2351 ACQUIRE_LOCK(ump);
2352 if (inodedep_find(inodedephd, inum, inodedeppp)) {
2353 WORKITEM_FREE(inodedep, D_INODEDEP);
2354 return (1);
2355 }
2356 inodedep->id_fs = fs;
2357 inodedep->id_ino = inum;
2358 inodedep->id_state = ALLCOMPLETE;
2359 inodedep->id_nlinkdelta = 0;
2360 inodedep->id_nlinkwrote = -1;
2361 inodedep->id_savedino1 = NULL;
2362 inodedep->id_savedsize = -1;
2363 inodedep->id_savedextsize = -1;
2364 inodedep->id_savednlink = -1;
2365 inodedep->id_bmsafemap = NULL;
2366 inodedep->id_mkdiradd = NULL;
2367 LIST_INIT(&inodedep->id_dirremhd);
2368 LIST_INIT(&inodedep->id_pendinghd);
2369 LIST_INIT(&inodedep->id_inowait);
2370 LIST_INIT(&inodedep->id_bufwait);
2371 TAILQ_INIT(&inodedep->id_inoreflst);
2372 TAILQ_INIT(&inodedep->id_inoupdt);
2373 TAILQ_INIT(&inodedep->id_newinoupdt);
2374 TAILQ_INIT(&inodedep->id_extupdt);
2375 TAILQ_INIT(&inodedep->id_newextupdt);
2376 TAILQ_INIT(&inodedep->id_freeblklst);
2377 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
2378 *inodedeppp = inodedep;
2379 return (0);
2380 }
2381
2382 /*
2383 * Structures and routines associated with newblk caching.
2384 */
2385 #define NEWBLK_HASH(ump, inum) \
2386 (&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size])
2387
2388 static int
2389 newblk_find(struct newblk_hashhead *newblkhd,
2390 ufs2_daddr_t newblkno,
2391 int flags,
2392 struct newblk **newblkpp)
2393 {
2394 struct newblk *newblk;
2395
2396 LIST_FOREACH(newblk, newblkhd, nb_hash) {
2397 if (newblkno != newblk->nb_newblkno)
2398 continue;
2399 /*
2400 * If we're creating a new dependency don't match those that
2401 * have already been converted to allocdirects. This is for
2402 * a frag extend.
2403 */
2404 if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK)
2405 continue;
2406 break;
2407 }
2408 if (newblk) {
2409 *newblkpp = newblk;
2410 return (1);
2411 }
2412 *newblkpp = NULL;
2413 return (0);
2414 }
2415
2416 /*
2417 * Look up a newblk. Return 1 if found, 0 if not found.
2418 * If not found, allocate if DEPALLOC flag is passed.
2419 * Found or allocated entry is returned in newblkpp.
2420 */
2421 static int
2422 newblk_lookup(struct mount *mp,
2423 ufs2_daddr_t newblkno,
2424 int flags,
2425 struct newblk **newblkpp)
2426 {
2427 struct newblk *newblk;
2428 struct newblk_hashhead *newblkhd;
2429 struct ufsmount *ump;
2430
2431 ump = VFSTOUFS(mp);
2432 LOCK_OWNED(ump);
2433 newblkhd = NEWBLK_HASH(ump, newblkno);
2434 if (newblk_find(newblkhd, newblkno, flags, newblkpp))
2435 return (1);
2436 if ((flags & DEPALLOC) == 0)
2437 return (0);
2438 if (softdep_excess_items(ump, D_NEWBLK) ||
2439 softdep_excess_items(ump, D_ALLOCDIRECT) ||
2440 softdep_excess_items(ump, D_ALLOCINDIR))
2441 schedule_cleanup(mp);
2442 else
2443 FREE_LOCK(ump);
2444 newblk = malloc(sizeof(union allblk), M_NEWBLK,
2445 M_SOFTDEP_FLAGS | M_ZERO);
2446 workitem_alloc(&newblk->nb_list, D_NEWBLK, mp);
2447 ACQUIRE_LOCK(ump);
2448 if (newblk_find(newblkhd, newblkno, flags, newblkpp)) {
2449 WORKITEM_FREE(newblk, D_NEWBLK);
2450 return (1);
2451 }
2452 newblk->nb_freefrag = NULL;
2453 LIST_INIT(&newblk->nb_indirdeps);
2454 LIST_INIT(&newblk->nb_newdirblk);
2455 LIST_INIT(&newblk->nb_jwork);
2456 newblk->nb_state = ATTACHED;
2457 newblk->nb_newblkno = newblkno;
2458 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
2459 *newblkpp = newblk;
2460 return (0);
2461 }
2462
2463 /*
2464 * Structures and routines associated with freed indirect block caching.
2465 */
2466 #define INDIR_HASH(ump, blkno) \
2467 (&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size])
2468
2469 /*
2470 * Lookup an indirect block in the indir hash table. The freework is
2471 * removed and potentially freed. The caller must do a blocking journal
2472 * write before writing to the blkno.
2473 */
2474 static int
2475 indirblk_lookup(struct mount *mp, ufs2_daddr_t blkno)
2476 {
2477 struct freework *freework;
2478 struct indir_hashhead *wkhd;
2479 struct ufsmount *ump;
2480
2481 ump = VFSTOUFS(mp);
2482 wkhd = INDIR_HASH(ump, blkno);
2483 TAILQ_FOREACH(freework, wkhd, fw_next) {
2484 if (freework->fw_blkno != blkno)
2485 continue;
2486 indirblk_remove(freework);
2487 return (1);
2488 }
2489 return (0);
2490 }
2491
2492 /*
2493 * Insert an indirect block represented by freework into the indirblk
2494 * hash table so that it may prevent the block from being re-used prior
2495 * to the journal being written.
2496 */
2497 static void
2498 indirblk_insert(struct freework *freework)
2499 {
2500 struct jblocks *jblocks;
2501 struct jseg *jseg;
2502 struct ufsmount *ump;
2503
2504 ump = VFSTOUFS(freework->fw_list.wk_mp);
2505 jblocks = ump->softdep_jblocks;
2506 jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst);
2507 if (jseg == NULL)
2508 return;
2509
2510 LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs);
2511 TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework,
2512 fw_next);
2513 freework->fw_state &= ~DEPCOMPLETE;
2514 }
2515
2516 static void
2517 indirblk_remove(struct freework *freework)
2518 {
2519 struct ufsmount *ump;
2520
2521 ump = VFSTOUFS(freework->fw_list.wk_mp);
2522 LIST_REMOVE(freework, fw_segs);
2523 TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next);
2524 freework->fw_state |= DEPCOMPLETE;
2525 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
2526 WORKITEM_FREE(freework, D_FREEWORK);
2527 }
2528
2529 /*
2530 * Executed during filesystem system initialization before
2531 * mounting any filesystems.
2532 */
2533 void
2534 softdep_initialize(void)
2535 {
2536
2537 TAILQ_INIT(&softdepmounts);
2538 #ifdef __LP64__
2539 max_softdeps = desiredvnodes * 4;
2540 #else
2541 max_softdeps = desiredvnodes * 2;
2542 #endif
2543
2544 /* initialise bioops hack */
2545 bioops.io_start = softdep_disk_io_initiation;
2546 bioops.io_complete = softdep_disk_write_complete;
2547 bioops.io_deallocate = softdep_deallocate_dependencies;
2548 bioops.io_countdeps = softdep_count_dependencies;
2549 ast_register(TDA_UFS, ASTR_KCLEAR | ASTR_ASTF_REQUIRED, 0,
2550 softdep_ast_cleanup_proc);
2551
2552 /* Initialize the callout with an mtx. */
2553 callout_init_mtx(&softdep_callout, &lk, 0);
2554 }
2555
2556 /*
2557 * Executed after all filesystems have been unmounted during
2558 * filesystem module unload.
2559 */
2560 void
2561 softdep_uninitialize(void)
2562 {
2563
2564 /* clear bioops hack */
2565 bioops.io_start = NULL;
2566 bioops.io_complete = NULL;
2567 bioops.io_deallocate = NULL;
2568 bioops.io_countdeps = NULL;
2569 ast_deregister(TDA_UFS);
2570
2571 callout_drain(&softdep_callout);
2572 }
2573
2574 /*
2575 * Called at mount time to notify the dependency code that a
2576 * filesystem wishes to use it.
2577 */
2578 int
2579 softdep_mount(struct vnode *devvp,
2580 struct mount *mp,
2581 struct fs *fs,
2582 struct ucred *cred)
2583 {
2584 struct csum_total cstotal;
2585 struct mount_softdeps *sdp;
2586 struct ufsmount *ump;
2587 struct cg *cgp;
2588 struct buf *bp;
2589 u_int cyl, i;
2590 int error;
2591
2592 ump = VFSTOUFS(mp);
2593
2594 sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA,
2595 M_WAITOK | M_ZERO);
2596 rw_init(&sdp->sd_fslock, "SUrw");
2597 sdp->sd_ump = ump;
2598 LIST_INIT(&sdp->sd_workitem_pending);
2599 LIST_INIT(&sdp->sd_journal_pending);
2600 TAILQ_INIT(&sdp->sd_unlinked);
2601 LIST_INIT(&sdp->sd_dirtycg);
2602 sdp->sd_worklist_tail = NULL;
2603 sdp->sd_on_worklist = 0;
2604 sdp->sd_deps = 0;
2605 LIST_INIT(&sdp->sd_mkdirlisthd);
2606 sdp->sd_pdhash = hashinit(desiredvnodes / 5, M_PAGEDEP,
2607 &sdp->sd_pdhashsize);
2608 sdp->sd_pdnextclean = 0;
2609 sdp->sd_idhash = hashinit(desiredvnodes, M_INODEDEP,
2610 &sdp->sd_idhashsize);
2611 sdp->sd_idnextclean = 0;
2612 sdp->sd_newblkhash = hashinit(max_softdeps / 2, M_NEWBLK,
2613 &sdp->sd_newblkhashsize);
2614 sdp->sd_bmhash = hashinit(1024, M_BMSAFEMAP, &sdp->sd_bmhashsize);
2615 i = 1 << (ffs(desiredvnodes / 10) - 1);
2616 sdp->sd_indirhash = malloc(i * sizeof(struct indir_hashhead),
2617 M_FREEWORK, M_WAITOK);
2618 sdp->sd_indirhashsize = i - 1;
2619 for (i = 0; i <= sdp->sd_indirhashsize; i++)
2620 TAILQ_INIT(&sdp->sd_indirhash[i]);
2621 for (i = 0; i <= D_LAST; i++)
2622 LIST_INIT(&sdp->sd_alldeps[i]);
2623 ACQUIRE_GBLLOCK(&lk);
2624 TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
2625 FREE_GBLLOCK(&lk);
2626
2627 ump->um_softdep = sdp;
2628 MNT_ILOCK(mp);
2629 mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP;
2630 if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) {
2631 mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) |
2632 MNTK_SOFTDEP | MNTK_NOASYNC;
2633 }
2634 MNT_IUNLOCK(mp);
2635
2636 if ((fs->fs_flags & FS_SUJ) &&
2637 (error = journal_mount(mp, fs, cred)) != 0) {
2638 printf("Failed to start journal: %d\n", error);
2639 softdep_unmount(mp);
2640 return (error);
2641 }
2642 /*
2643 * Start our flushing thread in the bufdaemon process.
2644 */
2645 ACQUIRE_LOCK(ump);
2646 ump->softdep_flags |= FLUSH_STARTING;
2647 FREE_LOCK(ump);
2648 kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc,
2649 &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker",
2650 mp->mnt_stat.f_mntonname);
2651 ACQUIRE_LOCK(ump);
2652 while ((ump->softdep_flags & FLUSH_STARTING) != 0) {
2653 msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdstart",
2654 hz / 2);
2655 }
2656 FREE_LOCK(ump);
2657 /*
2658 * When doing soft updates, the counters in the
2659 * superblock may have gotten out of sync. Recomputation
2660 * can take a long time and can be deferred for background
2661 * fsck. However, the old behavior of scanning the cylinder
2662 * groups and recalculating them at mount time is available
2663 * by setting vfs.ffs.compute_summary_at_mount to one.
2664 */
2665 if (compute_summary_at_mount == 0 || fs->fs_clean != 0)
2666 return (0);
2667 bzero(&cstotal, sizeof cstotal);
2668 for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
2669 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
2670 fs->fs_cgsize, cred, &bp)) != 0) {
2671 brelse(bp);
2672 softdep_unmount(mp);
2673 return (error);
2674 }
2675 cgp = (struct cg *)bp->b_data;
2676 cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
2677 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
2678 cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
2679 cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
2680 fs->fs_cs(fs, cyl) = cgp->cg_cs;
2681 brelse(bp);
2682 }
2683 #ifdef INVARIANTS
2684 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
2685 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt);
2686 #endif
2687 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
2688 return (0);
2689 }
2690
2691 void
2692 softdep_unmount(struct mount *mp)
2693 {
2694 struct ufsmount *ump;
2695 struct mount_softdeps *ums;
2696
2697 ump = VFSTOUFS(mp);
2698 KASSERT(ump->um_softdep != NULL,
2699 ("softdep_unmount called on non-softdep filesystem"));
2700 MNT_ILOCK(mp);
2701 mp->mnt_flag &= ~MNT_SOFTDEP;
2702 if ((mp->mnt_flag & MNT_SUJ) == 0) {
2703 MNT_IUNLOCK(mp);
2704 } else {
2705 mp->mnt_flag &= ~MNT_SUJ;
2706 MNT_IUNLOCK(mp);
2707 journal_unmount(ump);
2708 }
2709 /*
2710 * Shut down our flushing thread. Check for NULL is if
2711 * softdep_mount errors out before the thread has been created.
2712 */
2713 if (ump->softdep_flushtd != NULL) {
2714 ACQUIRE_LOCK(ump);
2715 ump->softdep_flags |= FLUSH_EXIT;
2716 wakeup(&ump->softdep_flushtd);
2717 while ((ump->softdep_flags & FLUSH_EXIT) != 0) {
2718 msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM,
2719 "sdwait", 0);
2720 }
2721 KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0,
2722 ("Thread shutdown failed"));
2723 FREE_LOCK(ump);
2724 }
2725
2726 /*
2727 * We are no longer have softdep structure attached to ump.
2728 */
2729 ums = ump->um_softdep;
2730 ACQUIRE_GBLLOCK(&lk);
2731 TAILQ_REMOVE(&softdepmounts, ums, sd_next);
2732 FREE_GBLLOCK(&lk);
2733 ump->um_softdep = NULL;
2734
2735 KASSERT(ums->sd_on_journal == 0,
2736 ("ump %p ums %p on_journal %d", ump, ums, ums->sd_on_journal));
2737 KASSERT(ums->sd_on_worklist == 0,
2738 ("ump %p ums %p on_worklist %d", ump, ums, ums->sd_on_worklist));
2739 KASSERT(ums->sd_deps == 0,
2740 ("ump %p ums %p deps %d", ump, ums, ums->sd_deps));
2741
2742 /*
2743 * Free up our resources.
2744 */
2745 rw_destroy(&ums->sd_fslock);
2746 hashdestroy(ums->sd_pdhash, M_PAGEDEP, ums->sd_pdhashsize);
2747 hashdestroy(ums->sd_idhash, M_INODEDEP, ums->sd_idhashsize);
2748 hashdestroy(ums->sd_newblkhash, M_NEWBLK, ums->sd_newblkhashsize);
2749 hashdestroy(ums->sd_bmhash, M_BMSAFEMAP, ums->sd_bmhashsize);
2750 free(ums->sd_indirhash, M_FREEWORK);
2751 #ifdef INVARIANTS
2752 for (int i = 0; i <= D_LAST; i++) {
2753 KASSERT(ums->sd_curdeps[i] == 0,
2754 ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt,
2755 TYPENAME(i), ums->sd_curdeps[i]));
2756 KASSERT(LIST_EMPTY(&ums->sd_alldeps[i]),
2757 ("Unmount %s: Dep type %s not empty (%p)",
2758 ump->um_fs->fs_fsmnt,
2759 TYPENAME(i), LIST_FIRST(&ums->sd_alldeps[i])));
2760 }
2761 #endif
2762 free(ums, M_MOUNTDATA);
2763 }
2764
2765 static struct jblocks *
2766 jblocks_create(void)
2767 {
2768 struct jblocks *jblocks;
2769
2770 jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO);
2771 TAILQ_INIT(&jblocks->jb_segs);
2772 jblocks->jb_avail = 10;
2773 jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2774 M_JBLOCKS, M_WAITOK | M_ZERO);
2775
2776 return (jblocks);
2777 }
2778
2779 static ufs2_daddr_t
2780 jblocks_alloc(struct jblocks *jblocks,
2781 int bytes,
2782 int *actual)
2783 {
2784 ufs2_daddr_t daddr;
2785 struct jextent *jext;
2786 int freecnt;
2787 int blocks;
2788
2789 blocks = bytes / DEV_BSIZE;
2790 jext = &jblocks->jb_extent[jblocks->jb_head];
2791 freecnt = jext->je_blocks - jblocks->jb_off;
2792 if (freecnt == 0) {
2793 jblocks->jb_off = 0;
2794 if (++jblocks->jb_head > jblocks->jb_used)
2795 jblocks->jb_head = 0;
2796 jext = &jblocks->jb_extent[jblocks->jb_head];
2797 freecnt = jext->je_blocks;
2798 }
2799 if (freecnt > blocks)
2800 freecnt = blocks;
2801 *actual = freecnt * DEV_BSIZE;
2802 daddr = jext->je_daddr + jblocks->jb_off;
2803 jblocks->jb_off += freecnt;
2804 jblocks->jb_free -= freecnt;
2805
2806 return (daddr);
2807 }
2808
2809 static void
2810 jblocks_free(struct jblocks *jblocks,
2811 struct mount *mp,
2812 int bytes)
2813 {
2814
2815 LOCK_OWNED(VFSTOUFS(mp));
2816 jblocks->jb_free += bytes / DEV_BSIZE;
2817 if (jblocks->jb_suspended)
2818 worklist_speedup(mp);
2819 wakeup(jblocks);
2820 }
2821
2822 static void
2823 jblocks_destroy(struct jblocks *jblocks)
2824 {
2825
2826 if (jblocks->jb_extent)
2827 free(jblocks->jb_extent, M_JBLOCKS);
2828 free(jblocks, M_JBLOCKS);
2829 }
2830
2831 static void
2832 jblocks_add(struct jblocks *jblocks,
2833 ufs2_daddr_t daddr,
2834 int blocks)
2835 {
2836 struct jextent *jext;
2837
2838 jblocks->jb_blocks += blocks;
2839 jblocks->jb_free += blocks;
2840 jext = &jblocks->jb_extent[jblocks->jb_used];
2841 /* Adding the first block. */
2842 if (jext->je_daddr == 0) {
2843 jext->je_daddr = daddr;
2844 jext->je_blocks = blocks;
2845 return;
2846 }
2847 /* Extending the last extent. */
2848 if (jext->je_daddr + jext->je_blocks == daddr) {
2849 jext->je_blocks += blocks;
2850 return;
2851 }
2852 /* Adding a new extent. */
2853 if (++jblocks->jb_used == jblocks->jb_avail) {
2854 jblocks->jb_avail *= 2;
2855 jext = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2856 M_JBLOCKS, M_WAITOK | M_ZERO);
2857 memcpy(jext, jblocks->jb_extent,
2858 sizeof(struct jextent) * jblocks->jb_used);
2859 free(jblocks->jb_extent, M_JBLOCKS);
2860 jblocks->jb_extent = jext;
2861 }
2862 jext = &jblocks->jb_extent[jblocks->jb_used];
2863 jext->je_daddr = daddr;
2864 jext->je_blocks = blocks;
2865 return;
2866 }
2867
2868 int
2869 softdep_journal_lookup(struct mount *mp, struct vnode **vpp)
2870 {
2871 struct componentname cnp;
2872 struct vnode *dvp;
2873 ino_t sujournal;
2874 int error;
2875
2876 error = VFS_VGET(mp, UFS_ROOTINO, LK_EXCLUSIVE, &dvp);
2877 if (error)
2878 return (error);
2879 bzero(&cnp, sizeof(cnp));
2880 cnp.cn_nameiop = LOOKUP;
2881 cnp.cn_flags = ISLASTCN;
2882 cnp.cn_cred = curthread->td_ucred;
2883 cnp.cn_pnbuf = SUJ_FILE;
2884 cnp.cn_nameptr = SUJ_FILE;
2885 cnp.cn_namelen = strlen(SUJ_FILE);
2886 error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal);
2887 vput(dvp);
2888 if (error != 0)
2889 return (error);
2890 error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp);
2891 return (error);
2892 }
2893
2894 /*
2895 * Open and verify the journal file.
2896 */
2897 static int
2898 journal_mount(struct mount *mp,
2899 struct fs *fs,
2900 struct ucred *cred)
2901 {
2902 struct jblocks *jblocks;
2903 struct ufsmount *ump;
2904 struct vnode *vp;
2905 struct inode *ip;
2906 ufs2_daddr_t blkno;
2907 int bcount;
2908 int error;
2909 int i;
2910
2911 ump = VFSTOUFS(mp);
2912 ump->softdep_journal_tail = NULL;
2913 ump->softdep_on_journal = 0;
2914 ump->softdep_accdeps = 0;
2915 ump->softdep_req = 0;
2916 ump->softdep_jblocks = NULL;
2917 error = softdep_journal_lookup(mp, &vp);
2918 if (error != 0) {
2919 printf("Failed to find journal. Use tunefs to create one\n");
2920 return (error);
2921 }
2922 ip = VTOI(vp);
2923 if (ip->i_size < SUJ_MIN) {
2924 error = ENOSPC;
2925 goto out;
2926 }
2927 bcount = lblkno(fs, ip->i_size); /* Only use whole blocks. */
2928 jblocks = jblocks_create();
2929 for (i = 0; i < bcount; i++) {
2930 error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL);
2931 if (error)
2932 break;
2933 jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag));
2934 }
2935 if (error) {
2936 jblocks_destroy(jblocks);
2937 goto out;
2938 }
2939 jblocks->jb_low = jblocks->jb_free / 3; /* Reserve 33%. */
2940 jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */
2941 ump->softdep_jblocks = jblocks;
2942
2943 MNT_ILOCK(mp);
2944 mp->mnt_flag |= MNT_SUJ;
2945 MNT_IUNLOCK(mp);
2946
2947 /*
2948 * Only validate the journal contents if the
2949 * filesystem is clean, otherwise we write the logs
2950 * but they'll never be used. If the filesystem was
2951 * still dirty when we mounted it the journal is
2952 * invalid and a new journal can only be valid if it
2953 * starts from a clean mount.
2954 */
2955 if (fs->fs_clean) {
2956 DIP_SET(ip, i_modrev, fs->fs_mtime);
2957 ip->i_flags |= IN_MODIFIED;
2958 ffs_update(vp, 1);
2959 }
2960 out:
2961 vput(vp);
2962 return (error);
2963 }
2964
2965 static void
2966 journal_unmount(struct ufsmount *ump)
2967 {
2968
2969 if (ump->softdep_jblocks)
2970 jblocks_destroy(ump->softdep_jblocks);
2971 ump->softdep_jblocks = NULL;
2972 }
2973
2974 /*
2975 * Called when a journal record is ready to be written. Space is allocated
2976 * and the journal entry is created when the journal is flushed to stable
2977 * store.
2978 */
2979 static void
2980 add_to_journal(struct worklist *wk)
2981 {
2982 struct ufsmount *ump;
2983
2984 ump = VFSTOUFS(wk->wk_mp);
2985 LOCK_OWNED(ump);
2986 if (wk->wk_state & ONWORKLIST)
2987 panic("add_to_journal: %s(0x%X) already on list",
2988 TYPENAME(wk->wk_type), wk->wk_state);
2989 wk->wk_state |= ONWORKLIST | DEPCOMPLETE;
2990 if (LIST_EMPTY(&ump->softdep_journal_pending)) {
2991 ump->softdep_jblocks->jb_age = ticks;
2992 LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list);
2993 } else
2994 LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list);
2995 ump->softdep_journal_tail = wk;
2996 ump->softdep_on_journal += 1;
2997 }
2998
2999 /*
3000 * Remove an arbitrary item for the journal worklist maintain the tail
3001 * pointer. This happens when a new operation obviates the need to
3002 * journal an old operation.
3003 */
3004 static void
3005 remove_from_journal(struct worklist *wk)
3006 {
3007 struct ufsmount *ump;
3008
3009 ump = VFSTOUFS(wk->wk_mp);
3010 LOCK_OWNED(ump);
3011 #ifdef INVARIANTS
3012 {
3013 struct worklist *wkn;
3014
3015 LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list)
3016 if (wkn == wk)
3017 break;
3018 if (wkn == NULL)
3019 panic("remove_from_journal: %p is not in journal", wk);
3020 }
3021 #endif
3022 /*
3023 * We emulate a TAILQ to save space in most structures which do not
3024 * require TAILQ semantics. Here we must update the tail position
3025 * when removing the tail which is not the final entry. This works
3026 * only if the worklist linkage are at the beginning of the structure.
3027 */
3028 if (ump->softdep_journal_tail == wk)
3029 ump->softdep_journal_tail =
3030 (struct worklist *)wk->wk_list.le_prev;
3031 WORKLIST_REMOVE(wk);
3032 ump->softdep_on_journal -= 1;
3033 }
3034
3035 /*
3036 * Check for journal space as well as dependency limits so the prelink
3037 * code can throttle both journaled and non-journaled filesystems.
3038 * Threshold is 0 for low and 1 for min.
3039 */
3040 static int
3041 journal_space(struct ufsmount *ump, int thresh)
3042 {
3043 struct jblocks *jblocks;
3044 int limit, avail;
3045
3046 jblocks = ump->softdep_jblocks;
3047 if (jblocks == NULL)
3048 return (1);
3049 /*
3050 * We use a tighter restriction here to prevent request_cleanup()
3051 * running in threads from running into locks we currently hold.
3052 * We have to be over the limit and our filesystem has to be
3053 * responsible for more than our share of that usage.
3054 */
3055 limit = (max_softdeps / 10) * 9;
3056 if (dep_current[D_INODEDEP] > limit &&
3057 ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads)
3058 return (0);
3059 if (thresh)
3060 thresh = jblocks->jb_min;
3061 else
3062 thresh = jblocks->jb_low;
3063 avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE;
3064 avail = jblocks->jb_free - avail;
3065
3066 return (avail > thresh);
3067 }
3068
3069 static void
3070 journal_suspend(struct ufsmount *ump)
3071 {
3072 struct jblocks *jblocks;
3073 struct mount *mp;
3074 bool set;
3075
3076 mp = UFSTOVFS(ump);
3077 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0)
3078 return;
3079
3080 jblocks = ump->softdep_jblocks;
3081 vfs_op_enter(mp);
3082 set = false;
3083 MNT_ILOCK(mp);
3084 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
3085 stat_journal_min++;
3086 mp->mnt_kern_flag |= MNTK_SUSPEND;
3087 mp->mnt_susp_owner = ump->softdep_flushtd;
3088 set = true;
3089 }
3090 jblocks->jb_suspended = 1;
3091 MNT_IUNLOCK(mp);
3092 if (!set)
3093 vfs_op_exit(mp);
3094 }
3095
3096 static int
3097 journal_unsuspend(struct ufsmount *ump)
3098 {
3099 struct jblocks *jblocks;
3100 struct mount *mp;
3101
3102 mp = UFSTOVFS(ump);
3103 jblocks = ump->softdep_jblocks;
3104
3105 if (jblocks != NULL && jblocks->jb_suspended &&
3106 journal_space(ump, jblocks->jb_min)) {
3107 jblocks->jb_suspended = 0;
3108 FREE_LOCK(ump);
3109 mp->mnt_susp_owner = curthread;
3110 vfs_write_resume(mp, 0);
3111 ACQUIRE_LOCK(ump);
3112 return (1);
3113 }
3114 return (0);
3115 }
3116
3117 static void
3118 journal_check_space(struct ufsmount *ump)
3119 {
3120 struct mount *mp;
3121
3122 LOCK_OWNED(ump);
3123
3124 if (journal_space(ump, 0) == 0) {
3125 softdep_speedup(ump);
3126 mp = UFSTOVFS(ump);
3127 FREE_LOCK(ump);
3128 VFS_SYNC(mp, MNT_NOWAIT);
3129 ffs_sbupdate(ump, MNT_WAIT, 0);
3130 ACQUIRE_LOCK(ump);
3131 if (journal_space(ump, 1) == 0)
3132 journal_suspend(ump);
3133 }
3134 }
3135
3136 /*
3137 * Called before any allocation function to be certain that there is
3138 * sufficient space in the journal prior to creating any new records.
3139 * Since in the case of block allocation we may have multiple locked
3140 * buffers at the time of the actual allocation we can not block
3141 * when the journal records are created. Doing so would create a deadlock
3142 * if any of these buffers needed to be flushed to reclaim space. Instead
3143 * we require a sufficiently large amount of available space such that
3144 * each thread in the system could have passed this allocation check and
3145 * still have sufficient free space. With 20% of a minimum journal size
3146 * of 1MB we have 6553 records available.
3147 */
3148 int
3149 softdep_prealloc(struct vnode *vp, int waitok)
3150 {
3151 struct ufsmount *ump;
3152
3153 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
3154 ("softdep_prealloc called on non-softdep filesystem"));
3155 /*
3156 * Nothing to do if we are not running journaled soft updates.
3157 * If we currently hold the snapshot lock, we must avoid
3158 * handling other resources that could cause deadlock. Do not
3159 * touch quotas vnode since it is typically recursed with
3160 * other vnode locks held.
3161 */
3162 if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)) ||
3163 (vp->v_vflag & VV_SYSTEM) != 0)
3164 return (0);
3165 ump = VFSTOUFS(vp->v_mount);
3166 ACQUIRE_LOCK(ump);
3167 if (journal_space(ump, 0)) {
3168 FREE_LOCK(ump);
3169 return (0);
3170 }
3171 stat_journal_low++;
3172 FREE_LOCK(ump);
3173 if (waitok == MNT_NOWAIT)
3174 return (ENOSPC);
3175 /*
3176 * Attempt to sync this vnode once to flush any journal
3177 * work attached to it.
3178 */
3179 if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0)
3180 ffs_syncvnode(vp, waitok, 0);
3181 ACQUIRE_LOCK(ump);
3182 process_removes(vp);
3183 process_truncates(vp);
3184 journal_check_space(ump);
3185 FREE_LOCK(ump);
3186
3187 return (0);
3188 }
3189
3190 /*
3191 * Try hard to sync all data and metadata for the vnode, and workitems
3192 * flushing which might conflict with the vnode lock. This is a
3193 * helper for softdep_prerename().
3194 */
3195 static int
3196 softdep_prerename_vnode(struct ufsmount *ump, struct vnode *vp)
3197 {
3198 int error;
3199
3200 ASSERT_VOP_ELOCKED(vp, "prehandle");
3201 if (vp->v_data == NULL)
3202 return (0);
3203 error = VOP_FSYNC(vp, MNT_WAIT, curthread);
3204 if (error != 0)
3205 return (error);
3206 ACQUIRE_LOCK(ump);
3207 process_removes(vp);
3208 process_truncates(vp);
3209 FREE_LOCK(ump);
3210 return (0);
3211 }
3212
3213 /*
3214 * Must be called from VOP_RENAME() after all vnodes are locked.
3215 * Ensures that there is enough journal space for rename. It is
3216 * sufficiently different from softdep_prelink() by having to handle
3217 * four vnodes.
3218 */
3219 int
3220 softdep_prerename(struct vnode *fdvp,
3221 struct vnode *fvp,
3222 struct vnode *tdvp,
3223 struct vnode *tvp)
3224 {
3225 struct ufsmount *ump;
3226 int error;
3227
3228 ump = VFSTOUFS(fdvp->v_mount);
3229
3230 if (journal_space(ump, 0))
3231 return (0);
3232
3233 VOP_UNLOCK(tdvp);
3234 VOP_UNLOCK(fvp);
3235 if (tvp != NULL && tvp != tdvp)
3236 VOP_UNLOCK(tvp);
3237
3238 error = softdep_prerename_vnode(ump, fdvp);
3239 VOP_UNLOCK(fdvp);
3240 if (error != 0)
3241 return (error);
3242
3243 VOP_LOCK(fvp, LK_EXCLUSIVE | LK_RETRY);
3244 error = softdep_prerename_vnode(ump, fvp);
3245 VOP_UNLOCK(fvp);
3246 if (error != 0)
3247 return (error);
3248
3249 if (tdvp != fdvp) {
3250 VOP_LOCK(tdvp, LK_EXCLUSIVE | LK_RETRY);
3251 error = softdep_prerename_vnode(ump, tdvp);
3252 VOP_UNLOCK(tdvp);
3253 if (error != 0)
3254 return (error);
3255 }
3256
3257 if (tvp != fvp && tvp != NULL) {
3258 VOP_LOCK(tvp, LK_EXCLUSIVE | LK_RETRY);
3259 error = softdep_prerename_vnode(ump, tvp);
3260 VOP_UNLOCK(tvp);
3261 if (error != 0)
3262 return (error);
3263 }
3264
3265 ACQUIRE_LOCK(ump);
3266 softdep_speedup(ump);
3267 process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
3268 journal_check_space(ump);
3269 FREE_LOCK(ump);
3270 return (ERELOOKUP);
3271 }
3272
3273 /*
3274 * Before adjusting a link count on a vnode verify that we have sufficient
3275 * journal space. If not, process operations that depend on the currently
3276 * locked pair of vnodes to try to flush space as the syncer, buf daemon,
3277 * and softdep flush threads can not acquire these locks to reclaim space.
3278 *
3279 * Returns 0 if all owned locks are still valid and were not dropped
3280 * in the process, in other case it returns either an error from sync,
3281 * or ERELOOKUP if any of the locks were re-acquired. In the later
3282 * case, the state of the vnodes cannot be relied upon and our VFS
3283 * syscall must be restarted at top level from the lookup.
3284 */
3285 int
3286 softdep_prelink(struct vnode *dvp,
3287 struct vnode *vp,
3288 struct componentname *cnp)
3289 {
3290 struct ufsmount *ump;
3291 struct nameidata *ndp;
3292
3293 ASSERT_VOP_ELOCKED(dvp, "prelink dvp");
3294 if (vp != NULL)
3295 ASSERT_VOP_ELOCKED(vp, "prelink vp");
3296 ump = VFSTOUFS(dvp->v_mount);
3297
3298 /*
3299 * Nothing to do if we have sufficient journal space. We skip
3300 * flushing when vp is a snapshot to avoid deadlock where
3301 * another thread is trying to update the inodeblock for dvp
3302 * and is waiting on snaplk that vp holds.
3303 */
3304 if (journal_space(ump, 0) || (vp != NULL && IS_SNAPSHOT(VTOI(vp))))
3305 return (0);
3306
3307 /*
3308 * Check if the journal space consumption can in theory be
3309 * accounted on dvp and vp. If the vnodes metadata was not
3310 * changed comparing with the previous round-trip into
3311 * softdep_prelink(), as indicated by the seqc generation
3312 * recorded in the nameidata, then there is no point in
3313 * starting the sync.
3314 */
3315 ndp = __containerof(cnp, struct nameidata, ni_cnd);
3316 if (!seqc_in_modify(ndp->ni_dvp_seqc) &&
3317 vn_seqc_consistent(dvp, ndp->ni_dvp_seqc) &&
3318 (vp == NULL || (!seqc_in_modify(ndp->ni_vp_seqc) &&
3319 vn_seqc_consistent(vp, ndp->ni_vp_seqc))))
3320 return (0);
3321
3322 stat_journal_low++;
3323 if (vp != NULL) {
3324 VOP_UNLOCK(dvp);
3325 ffs_syncvnode(vp, MNT_NOWAIT, 0);
3326 vn_lock_pair(dvp, false, vp, true);
3327 if (dvp->v_data == NULL)
3328 goto out;
3329 }
3330 if (vp != NULL)
3331 VOP_UNLOCK(vp);
3332 ffs_syncvnode(dvp, MNT_WAIT, 0);
3333 /* Process vp before dvp as it may create .. removes. */
3334 if (vp != NULL) {
3335 VOP_UNLOCK(dvp);
3336 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3337 if (vp->v_data == NULL) {
3338 vn_lock_pair(dvp, false, vp, true);
3339 goto out;
3340 }
3341 ACQUIRE_LOCK(ump);
3342 process_removes(vp);
3343 process_truncates(vp);
3344 FREE_LOCK(ump);
3345 VOP_UNLOCK(vp);
3346 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
3347 if (dvp->v_data == NULL) {
3348 vn_lock_pair(dvp, true, vp, false);
3349 goto out;
3350 }
3351 }
3352
3353 ACQUIRE_LOCK(ump);
3354 process_removes(dvp);
3355 process_truncates(dvp);
3356 VOP_UNLOCK(dvp);
3357 softdep_speedup(ump);
3358
3359 process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
3360 journal_check_space(ump);
3361 FREE_LOCK(ump);
3362
3363 vn_lock_pair(dvp, false, vp, false);
3364 out:
3365 ndp->ni_dvp_seqc = vn_seqc_read_any(dvp);
3366 if (vp != NULL)
3367 ndp->ni_vp_seqc = vn_seqc_read_any(vp);
3368 return (ERELOOKUP);
3369 }
3370
3371 static void
3372 jseg_write(struct ufsmount *ump,
3373 struct jseg *jseg,
3374 uint8_t *data)
3375 {
3376 struct jsegrec *rec;
3377
3378 rec = (struct jsegrec *)data;
3379 rec->jsr_seq = jseg->js_seq;
3380 rec->jsr_oldest = jseg->js_oldseq;
3381 rec->jsr_cnt = jseg->js_cnt;
3382 rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize;
3383 rec->jsr_crc = 0;
3384 rec->jsr_time = ump->um_fs->fs_mtime;
3385 }
3386
3387 static inline void
3388 inoref_write(struct inoref *inoref,
3389 struct jseg *jseg,
3390 struct jrefrec *rec)
3391 {
3392
3393 inoref->if_jsegdep->jd_seg = jseg;
3394 rec->jr_ino = inoref->if_ino;
3395 rec->jr_parent = inoref->if_parent;
3396 rec->jr_nlink = inoref->if_nlink;
3397 rec->jr_mode = inoref->if_mode;
3398 rec->jr_diroff = inoref->if_diroff;
3399 }
3400
3401 static void
3402 jaddref_write(struct jaddref *jaddref,
3403 struct jseg *jseg,
3404 uint8_t *data)
3405 {
3406 struct jrefrec *rec;
3407
3408 rec = (struct jrefrec *)data;
3409 rec->jr_op = JOP_ADDREF;
3410 inoref_write(&jaddref->ja_ref, jseg, rec);
3411 }
3412
3413 static void
3414 jremref_write(struct jremref *jremref,
3415 struct jseg *jseg,
3416 uint8_t *data)
3417 {
3418 struct jrefrec *rec;
3419
3420 rec = (struct jrefrec *)data;
3421 rec->jr_op = JOP_REMREF;
3422 inoref_write(&jremref->jr_ref, jseg, rec);
3423 }
3424
3425 static void
3426 jmvref_write(struct jmvref *jmvref,
3427 struct jseg *jseg,
3428 uint8_t *data)
3429 {
3430 struct jmvrec *rec;
3431
3432 rec = (struct jmvrec *)data;
3433 rec->jm_op = JOP_MVREF;
3434 rec->jm_ino = jmvref->jm_ino;
3435 rec->jm_parent = jmvref->jm_parent;
3436 rec->jm_oldoff = jmvref->jm_oldoff;
3437 rec->jm_newoff = jmvref->jm_newoff;
3438 }
3439
3440 static void
3441 jnewblk_write(struct jnewblk *jnewblk,
3442 struct jseg *jseg,
3443 uint8_t *data)
3444 {
3445 struct jblkrec *rec;
3446
3447 jnewblk->jn_jsegdep->jd_seg = jseg;
3448 rec = (struct jblkrec *)data;
3449 rec->jb_op = JOP_NEWBLK;
3450 rec->jb_ino = jnewblk->jn_ino;
3451 rec->jb_blkno = jnewblk->jn_blkno;
3452 rec->jb_lbn = jnewblk->jn_lbn;
3453 rec->jb_frags = jnewblk->jn_frags;
3454 rec->jb_oldfrags = jnewblk->jn_oldfrags;
3455 }
3456
3457 static void
3458 jfreeblk_write(struct jfreeblk *jfreeblk,
3459 struct jseg *jseg,
3460 uint8_t *data)
3461 {
3462 struct jblkrec *rec;
3463
3464 jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg;
3465 rec = (struct jblkrec *)data;
3466 rec->jb_op = JOP_FREEBLK;
3467 rec->jb_ino = jfreeblk->jf_ino;
3468 rec->jb_blkno = jfreeblk->jf_blkno;
3469 rec->jb_lbn = jfreeblk->jf_lbn;
3470 rec->jb_frags = jfreeblk->jf_frags;
3471 rec->jb_oldfrags = 0;
3472 }
3473
3474 static void
3475 jfreefrag_write(struct jfreefrag *jfreefrag,
3476 struct jseg *jseg,
3477 uint8_t *data)
3478 {
3479 struct jblkrec *rec;
3480
3481 jfreefrag->fr_jsegdep->jd_seg = jseg;
3482 rec = (struct jblkrec *)data;
3483 rec->jb_op = JOP_FREEBLK;
3484 rec->jb_ino = jfreefrag->fr_ino;
3485 rec->jb_blkno = jfreefrag->fr_blkno;
3486 rec->jb_lbn = jfreefrag->fr_lbn;
3487 rec->jb_frags = jfreefrag->fr_frags;
3488 rec->jb_oldfrags = 0;
3489 }
3490
3491 static void
3492 jtrunc_write(struct jtrunc *jtrunc,
3493 struct jseg *jseg,
3494 uint8_t *data)
3495 {
3496 struct jtrncrec *rec;
3497
3498 jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg;
3499 rec = (struct jtrncrec *)data;
3500 rec->jt_op = JOP_TRUNC;
3501 rec->jt_ino = jtrunc->jt_ino;
3502 rec->jt_size = jtrunc->jt_size;
3503 rec->jt_extsize = jtrunc->jt_extsize;
3504 }
3505
3506 static void
3507 jfsync_write(struct jfsync *jfsync,
3508 struct jseg *jseg,
3509 uint8_t *data)
3510 {
3511 struct jtrncrec *rec;
3512
3513 rec = (struct jtrncrec *)data;
3514 rec->jt_op = JOP_SYNC;
3515 rec->jt_ino = jfsync->jfs_ino;
3516 rec->jt_size = jfsync->jfs_size;
3517 rec->jt_extsize = jfsync->jfs_extsize;
3518 }
3519
3520 static void
3521 softdep_flushjournal(struct mount *mp)
3522 {
3523 struct jblocks *jblocks;
3524 struct ufsmount *ump;
3525
3526 if (MOUNTEDSUJ(mp) == 0)
3527 return;
3528 ump = VFSTOUFS(mp);
3529 jblocks = ump->softdep_jblocks;
3530 ACQUIRE_LOCK(ump);
3531 while (ump->softdep_on_journal) {
3532 jblocks->jb_needseg = 1;
3533 softdep_process_journal(mp, NULL, MNT_WAIT);
3534 }
3535 FREE_LOCK(ump);
3536 }
3537
3538 static void softdep_synchronize_completed(struct bio *);
3539 static void softdep_synchronize(struct bio *, struct ufsmount *, void *);
3540
3541 static void
3542 softdep_synchronize_completed(struct bio *bp)
3543 {
3544 struct jseg *oldest;
3545 struct jseg *jseg;
3546 struct ufsmount *ump;
3547
3548 /*
3549 * caller1 marks the last segment written before we issued the
3550 * synchronize cache.
3551 */
3552 jseg = bp->bio_caller1;
3553 if (jseg == NULL) {
3554 g_destroy_bio(bp);
3555 return;
3556 }
3557 ump = VFSTOUFS(jseg->js_list.wk_mp);
3558 ACQUIRE_LOCK(ump);
3559 oldest = NULL;
3560 /*
3561 * Mark all the journal entries waiting on the synchronize cache
3562 * as completed so they may continue on.
3563 */
3564 while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) {
3565 jseg->js_state |= COMPLETE;
3566 oldest = jseg;
3567 jseg = TAILQ_PREV(jseg, jseglst, js_next);
3568 }
3569 /*
3570 * Restart deferred journal entry processing from the oldest
3571 * completed jseg.
3572 */
3573 if (oldest)
3574 complete_jsegs(oldest);
3575
3576 FREE_LOCK(ump);
3577 g_destroy_bio(bp);
3578 }
3579
3580 /*
3581 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering
3582 * barriers. The journal must be written prior to any blocks that depend
3583 * on it and the journal can not be released until the blocks have be
3584 * written. This code handles both barriers simultaneously.
3585 */
3586 static void
3587 softdep_synchronize(struct bio *bp,
3588 struct ufsmount *ump,
3589 void *caller1)
3590 {
3591
3592 bp->bio_cmd = BIO_FLUSH;
3593 bp->bio_flags |= BIO_ORDERED;
3594 bp->bio_data = NULL;
3595 bp->bio_offset = ump->um_cp->provider->mediasize;
3596 bp->bio_length = 0;
3597 bp->bio_done = softdep_synchronize_completed;
3598 bp->bio_caller1 = caller1;
3599 g_io_request(bp, ump->um_cp);
3600 }
3601
3602 /*
3603 * Flush some journal records to disk.
3604 */
3605 static void
3606 softdep_process_journal(struct mount *mp,
3607 struct worklist *needwk,
3608 int flags)
3609 {
3610 struct jblocks *jblocks;
3611 struct ufsmount *ump;
3612 struct worklist *wk;
3613 struct jseg *jseg;
3614 struct buf *bp;
3615 struct bio *bio;
3616 uint8_t *data;
3617 struct fs *fs;
3618 int shouldflush;
3619 int segwritten;
3620 int jrecmin; /* Minimum records per block. */
3621 int jrecmax; /* Maximum records per block. */
3622 int size;
3623 int cnt;
3624 int off;
3625 int devbsize;
3626
3627 ump = VFSTOUFS(mp);
3628 if (ump->um_softdep == NULL || ump->um_softdep->sd_jblocks == NULL)
3629 return;
3630 shouldflush = softdep_flushcache;
3631 bio = NULL;
3632 jseg = NULL;
3633 LOCK_OWNED(ump);
3634 fs = ump->um_fs;
3635 jblocks = ump->softdep_jblocks;
3636 devbsize = ump->um_devvp->v_bufobj.bo_bsize;
3637 /*
3638 * We write anywhere between a disk block and fs block. The upper
3639 * bound is picked to prevent buffer cache fragmentation and limit
3640 * processing time per I/O.
3641 */
3642 jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */
3643 jrecmax = (fs->fs_bsize / devbsize) * jrecmin;
3644 segwritten = 0;
3645 for (;;) {
3646 cnt = ump->softdep_on_journal;
3647 /*
3648 * Criteria for writing a segment:
3649 * 1) We have a full block.
3650 * 2) We're called from jwait() and haven't found the
3651 * journal item yet.
3652 * 3) Always write if needseg is set.
3653 * 4) If we are called from process_worklist and have
3654 * not yet written anything we write a partial block
3655 * to enforce a 1 second maximum latency on journal
3656 * entries.
3657 */
3658 if (cnt < (jrecmax - 1) && needwk == NULL &&
3659 jblocks->jb_needseg == 0 && (segwritten || cnt == 0))
3660 break;
3661 cnt++;
3662 /*
3663 * Verify some free journal space. softdep_prealloc() should
3664 * guarantee that we don't run out so this is indicative of
3665 * a problem with the flow control. Try to recover
3666 * gracefully in any event.
3667 */
3668 while (jblocks->jb_free == 0) {
3669 if (flags != MNT_WAIT)
3670 break;
3671 printf("softdep: Out of journal space!\n");
3672 softdep_speedup(ump);
3673 msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz);
3674 }
3675 FREE_LOCK(ump);
3676 jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS);
3677 workitem_alloc(&jseg->js_list, D_JSEG, mp);
3678 LIST_INIT(&jseg->js_entries);
3679 LIST_INIT(&jseg->js_indirs);
3680 jseg->js_state = ATTACHED;
3681 if (shouldflush == 0)
3682 jseg->js_state |= COMPLETE;
3683 else if (bio == NULL)
3684 bio = g_alloc_bio();
3685 jseg->js_jblocks = jblocks;
3686 bp = geteblk(fs->fs_bsize, 0);
3687 ACQUIRE_LOCK(ump);
3688 /*
3689 * If there was a race while we were allocating the block
3690 * and jseg the entry we care about was likely written.
3691 * We bail out in both the WAIT and NOWAIT case and assume
3692 * the caller will loop if the entry it cares about is
3693 * not written.
3694 */
3695 cnt = ump->softdep_on_journal;
3696 if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) {
3697 bp->b_flags |= B_INVAL | B_NOCACHE;
3698 WORKITEM_FREE(jseg, D_JSEG);
3699 FREE_LOCK(ump);
3700 brelse(bp);
3701 ACQUIRE_LOCK(ump);
3702 break;
3703 }
3704 /*
3705 * Calculate the disk block size required for the available
3706 * records rounded to the min size.
3707 */
3708 if (cnt == 0)
3709 size = devbsize;
3710 else if (cnt < jrecmax)
3711 size = howmany(cnt, jrecmin) * devbsize;
3712 else
3713 size = fs->fs_bsize;
3714 /*
3715 * Allocate a disk block for this journal data and account
3716 * for truncation of the requested size if enough contiguous
3717 * space was not available.
3718 */
3719 bp->b_blkno = jblocks_alloc(jblocks, size, &size);
3720 bp->b_lblkno = bp->b_blkno;
3721 bp->b_offset = bp->b_blkno * DEV_BSIZE;
3722 bp->b_bcount = size;
3723 bp->b_flags &= ~B_INVAL;
3724 bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY;
3725 /*
3726 * Initialize our jseg with cnt records. Assign the next
3727 * sequence number to it and link it in-order.
3728 */
3729 cnt = MIN(cnt, (size / devbsize) * jrecmin);
3730 jseg->js_buf = bp;
3731 jseg->js_cnt = cnt;
3732 jseg->js_refs = cnt + 1; /* Self ref. */
3733 jseg->js_size = size;
3734 jseg->js_seq = jblocks->jb_nextseq++;
3735 if (jblocks->jb_oldestseg == NULL)
3736 jblocks->jb_oldestseg = jseg;
3737 jseg->js_oldseq = jblocks->jb_oldestseg->js_seq;
3738 TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next);
3739 if (jblocks->jb_writeseg == NULL)
3740 jblocks->jb_writeseg = jseg;
3741 /*
3742 * Start filling in records from the pending list.
3743 */
3744 data = bp->b_data;
3745 off = 0;
3746
3747 /*
3748 * Always put a header on the first block.
3749 * XXX As with below, there might not be a chance to get
3750 * into the loop. Ensure that something valid is written.
3751 */
3752 jseg_write(ump, jseg, data);
3753 off += JREC_SIZE;
3754 data = bp->b_data + off;
3755
3756 /*
3757 * XXX Something is wrong here. There's no work to do,
3758 * but we need to perform and I/O and allow it to complete
3759 * anyways.
3760 */
3761 if (LIST_EMPTY(&ump->softdep_journal_pending))
3762 stat_emptyjblocks++;
3763
3764 while ((wk = LIST_FIRST(&ump->softdep_journal_pending))
3765 != NULL) {
3766 if (cnt == 0)
3767 break;
3768 /* Place a segment header on every device block. */
3769 if ((off % devbsize) == 0) {
3770 jseg_write(ump, jseg, data);
3771 off += JREC_SIZE;
3772 data = bp->b_data + off;
3773 }
3774 if (wk == needwk)
3775 needwk = NULL;
3776 remove_from_journal(wk);
3777 wk->wk_state |= INPROGRESS;
3778 WORKLIST_INSERT(&jseg->js_entries, wk);
3779 switch (wk->wk_type) {
3780 case D_JADDREF:
3781 jaddref_write(WK_JADDREF(wk), jseg, data);
3782 break;
3783 case D_JREMREF:
3784 jremref_write(WK_JREMREF(wk), jseg, data);
3785 break;
3786 case D_JMVREF:
3787 jmvref_write(WK_JMVREF(wk), jseg, data);
3788 break;
3789 case D_JNEWBLK:
3790 jnewblk_write(WK_JNEWBLK(wk), jseg, data);
3791 break;
3792 case D_JFREEBLK:
3793 jfreeblk_write(WK_JFREEBLK(wk), jseg, data);
3794 break;
3795 case D_JFREEFRAG:
3796 jfreefrag_write(WK_JFREEFRAG(wk), jseg, data);
3797 break;
3798 case D_JTRUNC:
3799 jtrunc_write(WK_JTRUNC(wk), jseg, data);
3800 break;
3801 case D_JFSYNC:
3802 jfsync_write(WK_JFSYNC(wk), jseg, data);
3803 break;
3804 default:
3805 panic("process_journal: Unknown type %s",
3806 TYPENAME(wk->wk_type));
3807 /* NOTREACHED */
3808 }
3809 off += JREC_SIZE;
3810 data = bp->b_data + off;
3811 cnt--;
3812 }
3813
3814 /* Clear any remaining space so we don't leak kernel data */
3815 if (size > off)
3816 bzero(data, size - off);
3817
3818 /*
3819 * Write this one buffer and continue.
3820 */
3821 segwritten = 1;
3822 jblocks->jb_needseg = 0;
3823 WORKLIST_INSERT(&bp->b_dep, &jseg->js_list);
3824 FREE_LOCK(ump);
3825 bp->b_xflags |= BX_CVTENXIO;
3826 pbgetvp(ump->um_devvp, bp);
3827 /*
3828 * We only do the blocking wait once we find the journal
3829 * entry we're looking for.
3830 */
3831 if (needwk == NULL && flags == MNT_WAIT)
3832 bwrite(bp);
3833 else
3834 bawrite(bp);
3835 ACQUIRE_LOCK(ump);
3836 }
3837 /*
3838 * If we wrote a segment issue a synchronize cache so the journal
3839 * is reflected on disk before the data is written. Since reclaiming
3840 * journal space also requires writing a journal record this
3841 * process also enforces a barrier before reclamation.
3842 */
3843 if (segwritten && shouldflush) {
3844 softdep_synchronize(bio, ump,
3845 TAILQ_LAST(&jblocks->jb_segs, jseglst));
3846 } else if (bio)
3847 g_destroy_bio(bio);
3848 /*
3849 * If we've suspended the filesystem because we ran out of journal
3850 * space either try to sync it here to make some progress or
3851 * unsuspend it if we already have.
3852 */
3853 if (flags == 0 && jblocks->jb_suspended) {
3854 if (journal_unsuspend(ump))
3855 return;
3856 FREE_LOCK(ump);
3857 VFS_SYNC(mp, MNT_NOWAIT);
3858 ffs_sbupdate(ump, MNT_WAIT, 0);
3859 ACQUIRE_LOCK(ump);
3860 }
3861 }
3862
3863 /*
3864 * Complete a jseg, allowing all dependencies awaiting journal writes
3865 * to proceed. Each journal dependency also attaches a jsegdep to dependent
3866 * structures so that the journal segment can be freed to reclaim space.
3867 */
3868 static void
3869 complete_jseg(struct jseg *jseg)
3870 {
3871 struct worklist *wk;
3872 struct jmvref *jmvref;
3873 #ifdef INVARIANTS
3874 int i = 0;
3875 #endif
3876
3877 while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) {
3878 WORKLIST_REMOVE(wk);
3879 wk->wk_state &= ~INPROGRESS;
3880 wk->wk_state |= COMPLETE;
3881 KASSERT(i++ < jseg->js_cnt,
3882 ("handle_written_jseg: overflow %d >= %d",
3883 i - 1, jseg->js_cnt));
3884 switch (wk->wk_type) {
3885 case D_JADDREF:
3886 handle_written_jaddref(WK_JADDREF(wk));
3887 break;
3888 case D_JREMREF:
3889 handle_written_jremref(WK_JREMREF(wk));
3890 break;
3891 case D_JMVREF:
3892 rele_jseg(jseg); /* No jsegdep. */
3893 jmvref = WK_JMVREF(wk);
3894 LIST_REMOVE(jmvref, jm_deps);
3895 if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0)
3896 free_pagedep(jmvref->jm_pagedep);
3897 WORKITEM_FREE(jmvref, D_JMVREF);
3898 break;
3899 case D_JNEWBLK:
3900 handle_written_jnewblk(WK_JNEWBLK(wk));
3901 break;
3902 case D_JFREEBLK:
3903 handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep);
3904 break;
3905 case D_JTRUNC:
3906 handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep);
3907 break;
3908 case D_JFSYNC:
3909 rele_jseg(jseg); /* No jsegdep. */
3910 WORKITEM_FREE(wk, D_JFSYNC);
3911 break;
3912 case D_JFREEFRAG:
3913 handle_written_jfreefrag(WK_JFREEFRAG(wk));
3914 break;
3915 default:
3916 panic("handle_written_jseg: Unknown type %s",
3917 TYPENAME(wk->wk_type));
3918 /* NOTREACHED */
3919 }
3920 }
3921 /* Release the self reference so the structure may be freed. */
3922 rele_jseg(jseg);
3923 }
3924
3925 /*
3926 * Determine which jsegs are ready for completion processing. Waits for
3927 * synchronize cache to complete as well as forcing in-order completion
3928 * of journal entries.
3929 */
3930 static void
3931 complete_jsegs(struct jseg *jseg)
3932 {
3933 struct jblocks *jblocks;
3934 struct jseg *jsegn;
3935
3936 jblocks = jseg->js_jblocks;
3937 /*
3938 * Don't allow out of order completions. If this isn't the first
3939 * block wait for it to write before we're done.
3940 */
3941 if (jseg != jblocks->jb_writeseg)
3942 return;
3943 /* Iterate through available jsegs processing their entries. */
3944 while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) {
3945 jblocks->jb_oldestwrseq = jseg->js_oldseq;
3946 jsegn = TAILQ_NEXT(jseg, js_next);
3947 complete_jseg(jseg);
3948 jseg = jsegn;
3949 }
3950 jblocks->jb_writeseg = jseg;
3951 /*
3952 * Attempt to free jsegs now that oldestwrseq may have advanced.
3953 */
3954 free_jsegs(jblocks);
3955 }
3956
3957 /*
3958 * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle
3959 * the final completions.
3960 */
3961 static void
3962 handle_written_jseg(struct jseg *jseg, struct buf *bp)
3963 {
3964
3965 if (jseg->js_refs == 0)
3966 panic("handle_written_jseg: No self-reference on %p", jseg);
3967 jseg->js_state |= DEPCOMPLETE;
3968 /*
3969 * We'll never need this buffer again, set flags so it will be
3970 * discarded.
3971 */
3972 bp->b_flags |= B_INVAL | B_NOCACHE;
3973 pbrelvp(bp);
3974 complete_jsegs(jseg);
3975 }
3976
3977 static inline struct jsegdep *
3978 inoref_jseg(struct inoref *inoref)
3979 {
3980 struct jsegdep *jsegdep;
3981
3982 jsegdep = inoref->if_jsegdep;
3983 inoref->if_jsegdep = NULL;
3984
3985 return (jsegdep);
3986 }
3987
3988 /*
3989 * Called once a jremref has made it to stable store. The jremref is marked
3990 * complete and we attempt to free it. Any pagedeps writes sleeping waiting
3991 * for the jremref to complete will be awoken by free_jremref.
3992 */
3993 static void
3994 handle_written_jremref(struct jremref *jremref)
3995 {
3996 struct inodedep *inodedep;
3997 struct jsegdep *jsegdep;
3998 struct dirrem *dirrem;
3999
4000 /* Grab the jsegdep. */
4001 jsegdep = inoref_jseg(&jremref->jr_ref);
4002 /*
4003 * Remove us from the inoref list.
4004 */
4005 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino,
4006 0, &inodedep) == 0)
4007 panic("handle_written_jremref: Lost inodedep");
4008 TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
4009 /*
4010 * Complete the dirrem.
4011 */
4012 dirrem = jremref->jr_dirrem;
4013 jremref->jr_dirrem = NULL;
4014 LIST_REMOVE(jremref, jr_deps);
4015 jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT;
4016 jwork_insert(&dirrem->dm_jwork, jsegdep);
4017 if (LIST_EMPTY(&dirrem->dm_jremrefhd) &&
4018 (dirrem->dm_state & COMPLETE) != 0)
4019 add_to_worklist(&dirrem->dm_list, 0);
4020 free_jremref(jremref);
4021 }
4022
4023 /*
4024 * Called once a jaddref has made it to stable store. The dependency is
4025 * marked complete and any dependent structures are added to the inode
4026 * bufwait list to be completed as soon as it is written. If a bitmap write
4027 * depends on this entry we move the inode into the inodedephd of the
4028 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap.
4029 */
4030 static void
4031 handle_written_jaddref(struct jaddref *jaddref)
4032 {
4033 struct jsegdep *jsegdep;
4034 struct inodedep *inodedep;
4035 struct diradd *diradd;
4036 struct mkdir *mkdir;
4037
4038 /* Grab the jsegdep. */
4039 jsegdep = inoref_jseg(&jaddref->ja_ref);
4040 mkdir = NULL;
4041 diradd = NULL;
4042 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4043 0, &inodedep) == 0)
4044 panic("handle_written_jaddref: Lost inodedep.");
4045 if (jaddref->ja_diradd == NULL)
4046 panic("handle_written_jaddref: No dependency");
4047 if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) {
4048 diradd = jaddref->ja_diradd;
4049 WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list);
4050 } else if (jaddref->ja_state & MKDIR_PARENT) {
4051 mkdir = jaddref->ja_mkdir;
4052 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list);
4053 } else if (jaddref->ja_state & MKDIR_BODY)
4054 mkdir = jaddref->ja_mkdir;
4055 else
4056 panic("handle_written_jaddref: Unknown dependency %p",
4057 jaddref->ja_diradd);
4058 jaddref->ja_diradd = NULL; /* also clears ja_mkdir */
4059 /*
4060 * Remove us from the inode list.
4061 */
4062 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps);
4063 /*
4064 * The mkdir may be waiting on the jaddref to clear before freeing.
4065 */
4066 if (mkdir) {
4067 KASSERT(mkdir->md_list.wk_type == D_MKDIR,
4068 ("handle_written_jaddref: Incorrect type for mkdir %s",
4069 TYPENAME(mkdir->md_list.wk_type)));
4070 mkdir->md_jaddref = NULL;
4071 diradd = mkdir->md_diradd;
4072 mkdir->md_state |= DEPCOMPLETE;
4073 complete_mkdir(mkdir);
4074 }
4075 jwork_insert(&diradd->da_jwork, jsegdep);
4076 if (jaddref->ja_state & NEWBLOCK) {
4077 inodedep->id_state |= ONDEPLIST;
4078 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd,
4079 inodedep, id_deps);
4080 }
4081 free_jaddref(jaddref);
4082 }
4083
4084 /*
4085 * Called once a jnewblk journal is written. The allocdirect or allocindir
4086 * is placed in the bmsafemap to await notification of a written bitmap. If
4087 * the operation was canceled we add the segdep to the appropriate
4088 * dependency to free the journal space once the canceling operation
4089 * completes.
4090 */
4091 static void
4092 handle_written_jnewblk(struct jnewblk *jnewblk)
4093 {
4094 struct bmsafemap *bmsafemap;
4095 struct freefrag *freefrag;
4096 struct freework *freework;
4097 struct jsegdep *jsegdep;
4098 struct newblk *newblk;
4099
4100 /* Grab the jsegdep. */
4101 jsegdep = jnewblk->jn_jsegdep;
4102 jnewblk->jn_jsegdep = NULL;
4103 if (jnewblk->jn_dep == NULL)
4104 panic("handle_written_jnewblk: No dependency for the segdep.");
4105 switch (jnewblk->jn_dep->wk_type) {
4106 case D_NEWBLK:
4107 case D_ALLOCDIRECT:
4108 case D_ALLOCINDIR:
4109 /*
4110 * Add the written block to the bmsafemap so it can
4111 * be notified when the bitmap is on disk.
4112 */
4113 newblk = WK_NEWBLK(jnewblk->jn_dep);
4114 newblk->nb_jnewblk = NULL;
4115 if ((newblk->nb_state & GOINGAWAY) == 0) {
4116 bmsafemap = newblk->nb_bmsafemap;
4117 newblk->nb_state |= ONDEPLIST;
4118 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk,
4119 nb_deps);
4120 }
4121 jwork_insert(&newblk->nb_jwork, jsegdep);
4122 break;
4123 case D_FREEFRAG:
4124 /*
4125 * A newblock being removed by a freefrag when replaced by
4126 * frag extension.
4127 */
4128 freefrag = WK_FREEFRAG(jnewblk->jn_dep);
4129 freefrag->ff_jdep = NULL;
4130 jwork_insert(&freefrag->ff_jwork, jsegdep);
4131 break;
4132 case D_FREEWORK:
4133 /*
4134 * A direct block was removed by truncate.
4135 */
4136 freework = WK_FREEWORK(jnewblk->jn_dep);
4137 freework->fw_jnewblk = NULL;
4138 jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep);
4139 break;
4140 default:
4141 panic("handle_written_jnewblk: Unknown type %d.",
4142 jnewblk->jn_dep->wk_type);
4143 }
4144 jnewblk->jn_dep = NULL;
4145 free_jnewblk(jnewblk);
4146 }
4147
4148 /*
4149 * Cancel a jfreefrag that won't be needed, probably due to colliding with
4150 * an in-flight allocation that has not yet been committed. Divorce us
4151 * from the freefrag and mark it DEPCOMPLETE so that it may be added
4152 * to the worklist.
4153 */
4154 static void
4155 cancel_jfreefrag(struct jfreefrag *jfreefrag)
4156 {
4157 struct freefrag *freefrag;
4158
4159 if (jfreefrag->fr_jsegdep) {
4160 free_jsegdep(jfreefrag->fr_jsegdep);
4161 jfreefrag->fr_jsegdep = NULL;
4162 }
4163 freefrag = jfreefrag->fr_freefrag;
4164 jfreefrag->fr_freefrag = NULL;
4165 free_jfreefrag(jfreefrag);
4166 freefrag->ff_state |= DEPCOMPLETE;
4167 CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno);
4168 }
4169
4170 /*
4171 * Free a jfreefrag when the parent freefrag is rendered obsolete.
4172 */
4173 static void
4174 free_jfreefrag(struct jfreefrag *jfreefrag)
4175 {
4176
4177 if (jfreefrag->fr_state & INPROGRESS)
4178 WORKLIST_REMOVE(&jfreefrag->fr_list);
4179 else if (jfreefrag->fr_state & ONWORKLIST)
4180 remove_from_journal(&jfreefrag->fr_list);
4181 if (jfreefrag->fr_freefrag != NULL)
4182 panic("free_jfreefrag: Still attached to a freefrag.");
4183 WORKITEM_FREE(jfreefrag, D_JFREEFRAG);
4184 }
4185
4186 /*
4187 * Called when the journal write for a jfreefrag completes. The parent
4188 * freefrag is added to the worklist if this completes its dependencies.
4189 */
4190 static void
4191 handle_written_jfreefrag(struct jfreefrag *jfreefrag)
4192 {
4193 struct jsegdep *jsegdep;
4194 struct freefrag *freefrag;
4195
4196 /* Grab the jsegdep. */
4197 jsegdep = jfreefrag->fr_jsegdep;
4198 jfreefrag->fr_jsegdep = NULL;
4199 freefrag = jfreefrag->fr_freefrag;
4200 if (freefrag == NULL)
4201 panic("handle_written_jfreefrag: No freefrag.");
4202 freefrag->ff_state |= DEPCOMPLETE;
4203 freefrag->ff_jdep = NULL;
4204 jwork_insert(&freefrag->ff_jwork, jsegdep);
4205 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
4206 add_to_worklist(&freefrag->ff_list, 0);
4207 jfreefrag->fr_freefrag = NULL;
4208 free_jfreefrag(jfreefrag);
4209 }
4210
4211 /*
4212 * Called when the journal write for a jfreeblk completes. The jfreeblk
4213 * is removed from the freeblks list of pending journal writes and the
4214 * jsegdep is moved to the freeblks jwork to be completed when all blocks
4215 * have been reclaimed.
4216 */
4217 static void
4218 handle_written_jblkdep(struct jblkdep *jblkdep)
4219 {
4220 struct freeblks *freeblks;
4221 struct jsegdep *jsegdep;
4222
4223 /* Grab the jsegdep. */
4224 jsegdep = jblkdep->jb_jsegdep;
4225 jblkdep->jb_jsegdep = NULL;
4226 freeblks = jblkdep->jb_freeblks;
4227 LIST_REMOVE(jblkdep, jb_deps);
4228 jwork_insert(&freeblks->fb_jwork, jsegdep);
4229 /*
4230 * If the freeblks is all journaled, we can add it to the worklist.
4231 */
4232 if (LIST_EMPTY(&freeblks->fb_jblkdephd) &&
4233 (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
4234 add_to_worklist(&freeblks->fb_list, WK_NODELAY);
4235
4236 free_jblkdep(jblkdep);
4237 }
4238
4239 static struct jsegdep *
4240 newjsegdep(struct worklist *wk)
4241 {
4242 struct jsegdep *jsegdep;
4243
4244 jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS);
4245 workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp);
4246 jsegdep->jd_seg = NULL;
4247
4248 return (jsegdep);
4249 }
4250
4251 static struct jmvref *
4252 newjmvref(struct inode *dp,
4253 ino_t ino,
4254 off_t oldoff,
4255 off_t newoff)
4256 {
4257 struct jmvref *jmvref;
4258
4259 jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS);
4260 workitem_alloc(&jmvref->jm_list, D_JMVREF, ITOVFS(dp));
4261 jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE;
4262 jmvref->jm_parent = dp->i_number;
4263 jmvref->jm_ino = ino;
4264 jmvref->jm_oldoff = oldoff;
4265 jmvref->jm_newoff = newoff;
4266
4267 return (jmvref);
4268 }
4269
4270 /*
4271 * Allocate a new jremref that tracks the removal of ip from dp with the
4272 * directory entry offset of diroff. Mark the entry as ATTACHED and
4273 * DEPCOMPLETE as we have all the information required for the journal write
4274 * and the directory has already been removed from the buffer. The caller
4275 * is responsible for linking the jremref into the pagedep and adding it
4276 * to the journal to write. The MKDIR_PARENT flag is set if we're doing
4277 * a DOTDOT addition so handle_workitem_remove() can properly assign
4278 * the jsegdep when we're done.
4279 */
4280 static struct jremref *
4281 newjremref(struct dirrem *dirrem,
4282 struct inode *dp,
4283 struct inode *ip,
4284 off_t diroff,
4285 nlink_t nlink)
4286 {
4287 struct jremref *jremref;
4288
4289 jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS);
4290 workitem_alloc(&jremref->jr_list, D_JREMREF, ITOVFS(dp));
4291 jremref->jr_state = ATTACHED;
4292 newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff,
4293 nlink, ip->i_mode);
4294 jremref->jr_dirrem = dirrem;
4295
4296 return (jremref);
4297 }
4298
4299 static inline void
4300 newinoref(struct inoref *inoref,
4301 ino_t ino,
4302 ino_t parent,
4303 off_t diroff,
4304 nlink_t nlink,
4305 uint16_t mode)
4306 {
4307
4308 inoref->if_jsegdep = newjsegdep(&inoref->if_list);
4309 inoref->if_diroff = diroff;
4310 inoref->if_ino = ino;
4311 inoref->if_parent = parent;
4312 inoref->if_nlink = nlink;
4313 inoref->if_mode = mode;
4314 }
4315
4316 /*
4317 * Allocate a new jaddref to track the addition of ino to dp at diroff. The
4318 * directory offset may not be known until later. The caller is responsible
4319 * adding the entry to the journal when this information is available. nlink
4320 * should be the link count prior to the addition and mode is only required
4321 * to have the correct FMT.
4322 */
4323 static struct jaddref *
4324 newjaddref(struct inode *dp,
4325 ino_t ino,
4326 off_t diroff,
4327 int16_t nlink,
4328 uint16_t mode)
4329 {
4330 struct jaddref *jaddref;
4331
4332 jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS);
4333 workitem_alloc(&jaddref->ja_list, D_JADDREF, ITOVFS(dp));
4334 jaddref->ja_state = ATTACHED;
4335 jaddref->ja_mkdir = NULL;
4336 newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode);
4337
4338 return (jaddref);
4339 }
4340
4341 /*
4342 * Create a new free dependency for a freework. The caller is responsible
4343 * for adjusting the reference count when it has the lock held. The freedep
4344 * will track an outstanding bitmap write that will ultimately clear the
4345 * freework to continue.
4346 */
4347 static struct freedep *
4348 newfreedep(struct freework *freework)
4349 {
4350 struct freedep *freedep;
4351
4352 freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS);
4353 workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp);
4354 freedep->fd_freework = freework;
4355
4356 return (freedep);
4357 }
4358
4359 /*
4360 * Free a freedep structure once the buffer it is linked to is written. If
4361 * this is the last reference to the freework schedule it for completion.
4362 */
4363 static void
4364 free_freedep(struct freedep *freedep)
4365 {
4366 struct freework *freework;
4367
4368 freework = freedep->fd_freework;
4369 freework->fw_freeblks->fb_cgwait--;
4370 if (--freework->fw_ref == 0)
4371 freework_enqueue(freework);
4372 WORKITEM_FREE(freedep, D_FREEDEP);
4373 }
4374
4375 /*
4376 * Allocate a new freework structure that may be a level in an indirect
4377 * when parent is not NULL or a top level block when it is. The top level
4378 * freework structures are allocated without the per-filesystem lock held
4379 * and before the freeblks is visible outside of softdep_setup_freeblocks().
4380 */
4381 static struct freework *
4382 newfreework(struct ufsmount *ump,
4383 struct freeblks *freeblks,
4384 struct freework *parent,
4385 ufs_lbn_t lbn,
4386 ufs2_daddr_t nb,
4387 int frags,
4388 int off,
4389 int journal)
4390 {
4391 struct freework *freework;
4392
4393 freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS);
4394 workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp);
4395 freework->fw_state = ATTACHED;
4396 freework->fw_jnewblk = NULL;
4397 freework->fw_freeblks = freeblks;
4398 freework->fw_parent = parent;
4399 freework->fw_lbn = lbn;
4400 freework->fw_blkno = nb;
4401 freework->fw_frags = frags;
4402 freework->fw_indir = NULL;
4403 freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 ||
4404 lbn >= -UFS_NXADDR) ? 0 : NINDIR(ump->um_fs) + 1;
4405 freework->fw_start = freework->fw_off = off;
4406 if (journal)
4407 newjfreeblk(freeblks, lbn, nb, frags);
4408 if (parent == NULL) {
4409 ACQUIRE_LOCK(ump);
4410 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
4411 freeblks->fb_ref++;
4412 FREE_LOCK(ump);
4413 }
4414
4415 return (freework);
4416 }
4417
4418 /*
4419 * Eliminate a jfreeblk for a block that does not need journaling.
4420 */
4421 static void
4422 cancel_jfreeblk(struct freeblks *freeblks, ufs2_daddr_t blkno)
4423 {
4424 struct jfreeblk *jfreeblk;
4425 struct jblkdep *jblkdep;
4426
4427 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) {
4428 if (jblkdep->jb_list.wk_type != D_JFREEBLK)
4429 continue;
4430 jfreeblk = WK_JFREEBLK(&jblkdep->jb_list);
4431 if (jfreeblk->jf_blkno == blkno)
4432 break;
4433 }
4434 if (jblkdep == NULL)
4435 return;
4436 CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno);
4437 free_jsegdep(jblkdep->jb_jsegdep);
4438 LIST_REMOVE(jblkdep, jb_deps);
4439 WORKITEM_FREE(jfreeblk, D_JFREEBLK);
4440 }
4441
4442 /*
4443 * Allocate a new jfreeblk to journal top level block pointer when truncating
4444 * a file. The caller must add this to the worklist when the per-filesystem
4445 * lock is held.
4446 */
4447 static struct jfreeblk *
4448 newjfreeblk(struct freeblks *freeblks,
4449 ufs_lbn_t lbn,
4450 ufs2_daddr_t blkno,
4451 int frags)
4452 {
4453 struct jfreeblk *jfreeblk;
4454
4455 jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS);
4456 workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK,
4457 freeblks->fb_list.wk_mp);
4458 jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list);
4459 jfreeblk->jf_dep.jb_freeblks = freeblks;
4460 jfreeblk->jf_ino = freeblks->fb_inum;
4461 jfreeblk->jf_lbn = lbn;
4462 jfreeblk->jf_blkno = blkno;
4463 jfreeblk->jf_frags = frags;
4464 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps);
4465
4466 return (jfreeblk);
4467 }
4468
4469 /*
4470 * The journal is only prepared to handle full-size block numbers, so we
4471 * have to adjust the record to reflect the change to a full-size block.
4472 * For example, suppose we have a block made up of fragments 8-15 and
4473 * want to free its last two fragments. We are given a request that says:
4474 * FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0
4475 * where frags are the number of fragments to free and oldfrags are the
4476 * number of fragments to keep. To block align it, we have to change it to
4477 * have a valid full-size blkno, so it becomes:
4478 * FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6
4479 */
4480 static void
4481 adjust_newfreework(struct freeblks *freeblks, int frag_offset)
4482 {
4483 struct jfreeblk *jfreeblk;
4484
4485 KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL &&
4486 LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK),
4487 ("adjust_newfreework: Missing freeblks dependency"));
4488
4489 jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd));
4490 jfreeblk->jf_blkno -= frag_offset;
4491 jfreeblk->jf_frags += frag_offset;
4492 }
4493
4494 /*
4495 * Allocate a new jtrunc to track a partial truncation.
4496 */
4497 static struct jtrunc *
4498 newjtrunc(struct freeblks *freeblks,
4499 off_t size,
4500 int extsize)
4501 {
4502 struct jtrunc *jtrunc;
4503
4504 jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS);
4505 workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC,
4506 freeblks->fb_list.wk_mp);
4507 jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list);
4508 jtrunc->jt_dep.jb_freeblks = freeblks;
4509 jtrunc->jt_ino = freeblks->fb_inum;
4510 jtrunc->jt_size = size;
4511 jtrunc->jt_extsize = extsize;
4512 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps);
4513
4514 return (jtrunc);
4515 }
4516
4517 /*
4518 * If we're canceling a new bitmap we have to search for another ref
4519 * to move into the bmsafemap dep. This might be better expressed
4520 * with another structure.
4521 */
4522 static void
4523 move_newblock_dep(struct jaddref *jaddref, struct inodedep *inodedep)
4524 {
4525 struct inoref *inoref;
4526 struct jaddref *jaddrefn;
4527
4528 jaddrefn = NULL;
4529 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4530 inoref = TAILQ_NEXT(inoref, if_deps)) {
4531 if ((jaddref->ja_state & NEWBLOCK) &&
4532 inoref->if_list.wk_type == D_JADDREF) {
4533 jaddrefn = (struct jaddref *)inoref;
4534 break;
4535 }
4536 }
4537 if (jaddrefn == NULL)
4538 return;
4539 jaddrefn->ja_state &= ~(ATTACHED | UNDONE);
4540 jaddrefn->ja_state |= jaddref->ja_state &
4541 (ATTACHED | UNDONE | NEWBLOCK);
4542 jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK);
4543 jaddref->ja_state |= ATTACHED;
4544 LIST_REMOVE(jaddref, ja_bmdeps);
4545 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn,
4546 ja_bmdeps);
4547 }
4548
4549 /*
4550 * Cancel a jaddref either before it has been written or while it is being
4551 * written. This happens when a link is removed before the add reaches
4552 * the disk. The jaddref dependency is kept linked into the bmsafemap
4553 * and inode to prevent the link count or bitmap from reaching the disk
4554 * until handle_workitem_remove() re-adjusts the counts and bitmaps as
4555 * required.
4556 *
4557 * Returns 1 if the canceled addref requires journaling of the remove and
4558 * 0 otherwise.
4559 */
4560 static int
4561 cancel_jaddref(struct jaddref *jaddref,
4562 struct inodedep *inodedep,
4563 struct workhead *wkhd)
4564 {
4565 struct inoref *inoref;
4566 struct jsegdep *jsegdep;
4567 int needsj;
4568
4569 KASSERT((jaddref->ja_state & COMPLETE) == 0,
4570 ("cancel_jaddref: Canceling complete jaddref"));
4571 if (jaddref->ja_state & (INPROGRESS | COMPLETE))
4572 needsj = 1;
4573 else
4574 needsj = 0;
4575 if (inodedep == NULL)
4576 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4577 0, &inodedep) == 0)
4578 panic("cancel_jaddref: Lost inodedep");
4579 /*
4580 * We must adjust the nlink of any reference operation that follows
4581 * us so that it is consistent with the in-memory reference. This
4582 * ensures that inode nlink rollbacks always have the correct link.
4583 */
4584 if (needsj == 0) {
4585 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4586 inoref = TAILQ_NEXT(inoref, if_deps)) {
4587 if (inoref->if_state & GOINGAWAY)
4588 break;
4589 inoref->if_nlink--;
4590 }
4591 }
4592 jsegdep = inoref_jseg(&jaddref->ja_ref);
4593 if (jaddref->ja_state & NEWBLOCK)
4594 move_newblock_dep(jaddref, inodedep);
4595 wake_worklist(&jaddref->ja_list);
4596 jaddref->ja_mkdir = NULL;
4597 if (jaddref->ja_state & INPROGRESS) {
4598 jaddref->ja_state &= ~INPROGRESS;
4599 WORKLIST_REMOVE(&jaddref->ja_list);
4600 jwork_insert(wkhd, jsegdep);
4601 } else {
4602 free_jsegdep(jsegdep);
4603 if (jaddref->ja_state & DEPCOMPLETE)
4604 remove_from_journal(&jaddref->ja_list);
4605 }
4606 jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE);
4607 /*
4608 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove
4609 * can arrange for them to be freed with the bitmap. Otherwise we
4610 * no longer need this addref attached to the inoreflst and it
4611 * will incorrectly adjust nlink if we leave it.
4612 */
4613 if ((jaddref->ja_state & NEWBLOCK) == 0) {
4614 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
4615 if_deps);
4616 jaddref->ja_state |= COMPLETE;
4617 free_jaddref(jaddref);
4618 return (needsj);
4619 }
4620 /*
4621 * Leave the head of the list for jsegdeps for fast merging.
4622 */
4623 if (LIST_FIRST(wkhd) != NULL) {
4624 jaddref->ja_state |= ONWORKLIST;
4625 LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list);
4626 } else
4627 WORKLIST_INSERT(wkhd, &jaddref->ja_list);
4628
4629 return (needsj);
4630 }
4631
4632 /*
4633 * Attempt to free a jaddref structure when some work completes. This
4634 * should only succeed once the entry is written and all dependencies have
4635 * been notified.
4636 */
4637 static void
4638 free_jaddref(struct jaddref *jaddref)
4639 {
4640
4641 if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE)
4642 return;
4643 if (jaddref->ja_ref.if_jsegdep)
4644 panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n",
4645 jaddref, jaddref->ja_state);
4646 if (jaddref->ja_state & NEWBLOCK)
4647 LIST_REMOVE(jaddref, ja_bmdeps);
4648 if (jaddref->ja_state & (INPROGRESS | ONWORKLIST))
4649 panic("free_jaddref: Bad state %p(0x%X)",
4650 jaddref, jaddref->ja_state);
4651 if (jaddref->ja_mkdir != NULL)
4652 panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state);
4653 WORKITEM_FREE(jaddref, D_JADDREF);
4654 }
4655
4656 /*
4657 * Free a jremref structure once it has been written or discarded.
4658 */
4659 static void
4660 free_jremref(struct jremref *jremref)
4661 {
4662
4663 if (jremref->jr_ref.if_jsegdep)
4664 free_jsegdep(jremref->jr_ref.if_jsegdep);
4665 if (jremref->jr_state & INPROGRESS)
4666 panic("free_jremref: IO still pending");
4667 WORKITEM_FREE(jremref, D_JREMREF);
4668 }
4669
4670 /*
4671 * Free a jnewblk structure.
4672 */
4673 static void
4674 free_jnewblk(struct jnewblk *jnewblk)
4675 {
4676
4677 if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE)
4678 return;
4679 LIST_REMOVE(jnewblk, jn_deps);
4680 if (jnewblk->jn_dep != NULL)
4681 panic("free_jnewblk: Dependency still attached.");
4682 WORKITEM_FREE(jnewblk, D_JNEWBLK);
4683 }
4684
4685 /*
4686 * Cancel a jnewblk which has been been made redundant by frag extension.
4687 */
4688 static void
4689 cancel_jnewblk(struct jnewblk *jnewblk, struct workhead *wkhd)
4690 {
4691 struct jsegdep *jsegdep;
4692
4693 CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno);
4694 jsegdep = jnewblk->jn_jsegdep;
4695 if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL)
4696 panic("cancel_jnewblk: Invalid state");
4697 jnewblk->jn_jsegdep = NULL;
4698 jnewblk->jn_dep = NULL;
4699 jnewblk->jn_state |= GOINGAWAY;
4700 if (jnewblk->jn_state & INPROGRESS) {
4701 jnewblk->jn_state &= ~INPROGRESS;
4702 WORKLIST_REMOVE(&jnewblk->jn_list);
4703 jwork_insert(wkhd, jsegdep);
4704 } else {
4705 free_jsegdep(jsegdep);
4706 remove_from_journal(&jnewblk->jn_list);
4707 }
4708 wake_worklist(&jnewblk->jn_list);
4709 WORKLIST_INSERT(wkhd, &jnewblk->jn_list);
4710 }
4711
4712 static void
4713 free_jblkdep(struct jblkdep *jblkdep)
4714 {
4715
4716 if (jblkdep->jb_list.wk_type == D_JFREEBLK)
4717 WORKITEM_FREE(jblkdep, D_JFREEBLK);
4718 else if (jblkdep->jb_list.wk_type == D_JTRUNC)
4719 WORKITEM_FREE(jblkdep, D_JTRUNC);
4720 else
4721 panic("free_jblkdep: Unexpected type %s",
4722 TYPENAME(jblkdep->jb_list.wk_type));
4723 }
4724
4725 /*
4726 * Free a single jseg once it is no longer referenced in memory or on
4727 * disk. Reclaim journal blocks and dependencies waiting for the segment
4728 * to disappear.
4729 */
4730 static void
4731 free_jseg(struct jseg *jseg, struct jblocks *jblocks)
4732 {
4733 struct freework *freework;
4734
4735 /*
4736 * Free freework structures that were lingering to indicate freed
4737 * indirect blocks that forced journal write ordering on reallocate.
4738 */
4739 while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL)
4740 indirblk_remove(freework);
4741 if (jblocks->jb_oldestseg == jseg)
4742 jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next);
4743 TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next);
4744 jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size);
4745 KASSERT(LIST_EMPTY(&jseg->js_entries),
4746 ("free_jseg: Freed jseg has valid entries."));
4747 WORKITEM_FREE(jseg, D_JSEG);
4748 }
4749
4750 /*
4751 * Free all jsegs that meet the criteria for being reclaimed and update
4752 * oldestseg.
4753 */
4754 static void
4755 free_jsegs(struct jblocks *jblocks)
4756 {
4757 struct jseg *jseg;
4758
4759 /*
4760 * Free only those jsegs which have none allocated before them to
4761 * preserve the journal space ordering.
4762 */
4763 while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) {
4764 /*
4765 * Only reclaim space when nothing depends on this journal
4766 * set and another set has written that it is no longer
4767 * valid.
4768 */
4769 if (jseg->js_refs != 0) {
4770 jblocks->jb_oldestseg = jseg;
4771 return;
4772 }
4773 if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE)
4774 break;
4775 if (jseg->js_seq > jblocks->jb_oldestwrseq)
4776 break;
4777 /*
4778 * We can free jsegs that didn't write entries when
4779 * oldestwrseq == js_seq.
4780 */
4781 if (jseg->js_seq == jblocks->jb_oldestwrseq &&
4782 jseg->js_cnt != 0)
4783 break;
4784 free_jseg(jseg, jblocks);
4785 }
4786 /*
4787 * If we exited the loop above we still must discover the
4788 * oldest valid segment.
4789 */
4790 if (jseg)
4791 for (jseg = jblocks->jb_oldestseg; jseg != NULL;
4792 jseg = TAILQ_NEXT(jseg, js_next))
4793 if (jseg->js_refs != 0)
4794 break;
4795 jblocks->jb_oldestseg = jseg;
4796 /*
4797 * The journal has no valid records but some jsegs may still be
4798 * waiting on oldestwrseq to advance. We force a small record
4799 * out to permit these lingering records to be reclaimed.
4800 */
4801 if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs))
4802 jblocks->jb_needseg = 1;
4803 }
4804
4805 /*
4806 * Release one reference to a jseg and free it if the count reaches 0. This
4807 * should eventually reclaim journal space as well.
4808 */
4809 static void
4810 rele_jseg(struct jseg *jseg)
4811 {
4812
4813 KASSERT(jseg->js_refs > 0,
4814 ("free_jseg: Invalid refcnt %d", jseg->js_refs));
4815 if (--jseg->js_refs != 0)
4816 return;
4817 free_jsegs(jseg->js_jblocks);
4818 }
4819
4820 /*
4821 * Release a jsegdep and decrement the jseg count.
4822 */
4823 static void
4824 free_jsegdep(struct jsegdep *jsegdep)
4825 {
4826
4827 if (jsegdep->jd_seg)
4828 rele_jseg(jsegdep->jd_seg);
4829 WORKITEM_FREE(jsegdep, D_JSEGDEP);
4830 }
4831
4832 /*
4833 * Wait for a journal item to make it to disk. Initiate journal processing
4834 * if required.
4835 */
4836 static int
4837 jwait(struct worklist *wk, int waitfor)
4838 {
4839
4840 LOCK_OWNED(VFSTOUFS(wk->wk_mp));
4841 /*
4842 * Blocking journal waits cause slow synchronous behavior. Record
4843 * stats on the frequency of these blocking operations.
4844 */
4845 if (waitfor == MNT_WAIT) {
4846 stat_journal_wait++;
4847 switch (wk->wk_type) {
4848 case D_JREMREF:
4849 case D_JMVREF:
4850 stat_jwait_filepage++;
4851 break;
4852 case D_JTRUNC:
4853 case D_JFREEBLK:
4854 stat_jwait_freeblks++;
4855 break;
4856 case D_JNEWBLK:
4857 stat_jwait_newblk++;
4858 break;
4859 case D_JADDREF:
4860 stat_jwait_inode++;
4861 break;
4862 default:
4863 break;
4864 }
4865 }
4866 /*
4867 * If IO has not started we process the journal. We can't mark the
4868 * worklist item as IOWAITING because we drop the lock while
4869 * processing the journal and the worklist entry may be freed after
4870 * this point. The caller may call back in and re-issue the request.
4871 */
4872 if ((wk->wk_state & INPROGRESS) == 0) {
4873 softdep_process_journal(wk->wk_mp, wk, waitfor);
4874 if (waitfor != MNT_WAIT)
4875 return (EBUSY);
4876 return (0);
4877 }
4878 if (waitfor != MNT_WAIT)
4879 return (EBUSY);
4880 wait_worklist(wk, "jwait");
4881 return (0);
4882 }
4883
4884 /*
4885 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as
4886 * appropriate. This is a convenience function to reduce duplicate code
4887 * for the setup and revert functions below.
4888 */
4889 static struct inodedep *
4890 inodedep_lookup_ip(struct inode *ip)
4891 {
4892 struct inodedep *inodedep;
4893
4894 KASSERT(ip->i_nlink >= ip->i_effnlink,
4895 ("inodedep_lookup_ip: bad delta"));
4896 (void) inodedep_lookup(ITOVFS(ip), ip->i_number, DEPALLOC,
4897 &inodedep);
4898 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
4899 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
4900
4901 return (inodedep);
4902 }
4903
4904 /*
4905 * Called prior to creating a new inode and linking it to a directory. The
4906 * jaddref structure must already be allocated by softdep_setup_inomapdep
4907 * and it is discovered here so we can initialize the mode and update
4908 * nlinkdelta.
4909 */
4910 void
4911 softdep_setup_create(struct inode *dp, struct inode *ip)
4912 {
4913 struct inodedep *inodedep;
4914 struct jaddref *jaddref __diagused;
4915 struct vnode *dvp;
4916
4917 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
4918 ("softdep_setup_create called on non-softdep filesystem"));
4919 KASSERT(ip->i_nlink == 1,
4920 ("softdep_setup_create: Invalid link count."));
4921 dvp = ITOV(dp);
4922 ACQUIRE_LOCK(ITOUMP(dp));
4923 inodedep = inodedep_lookup_ip(ip);
4924 if (DOINGSUJ(dvp)) {
4925 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4926 inoreflst);
4927 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
4928 ("softdep_setup_create: No addref structure present."));
4929 }
4930 FREE_LOCK(ITOUMP(dp));
4931 }
4932
4933 /*
4934 * Create a jaddref structure to track the addition of a DOTDOT link when
4935 * we are reparenting an inode as part of a rename. This jaddref will be
4936 * found by softdep_setup_directory_change. Adjusts nlinkdelta for
4937 * non-journaling softdep.
4938 */
4939 void
4940 softdep_setup_dotdot_link(struct inode *dp, struct inode *ip)
4941 {
4942 struct inodedep *inodedep;
4943 struct jaddref *jaddref;
4944 struct vnode *dvp;
4945
4946 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
4947 ("softdep_setup_dotdot_link called on non-softdep filesystem"));
4948 dvp = ITOV(dp);
4949 jaddref = NULL;
4950 /*
4951 * We don't set MKDIR_PARENT as this is not tied to a mkdir and
4952 * is used as a normal link would be.
4953 */
4954 if (DOINGSUJ(dvp))
4955 jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4956 dp->i_effnlink - 1, dp->i_mode);
4957 ACQUIRE_LOCK(ITOUMP(dp));
4958 inodedep = inodedep_lookup_ip(dp);
4959 if (jaddref)
4960 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4961 if_deps);
4962 FREE_LOCK(ITOUMP(dp));
4963 }
4964
4965 /*
4966 * Create a jaddref structure to track a new link to an inode. The directory
4967 * offset is not known until softdep_setup_directory_add or
4968 * softdep_setup_directory_change. Adjusts nlinkdelta for non-journaling
4969 * softdep.
4970 */
4971 void
4972 softdep_setup_link(struct inode *dp, struct inode *ip)
4973 {
4974 struct inodedep *inodedep;
4975 struct jaddref *jaddref;
4976 struct vnode *dvp;
4977
4978 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
4979 ("softdep_setup_link called on non-softdep filesystem"));
4980 dvp = ITOV(dp);
4981 jaddref = NULL;
4982 if (DOINGSUJ(dvp))
4983 jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1,
4984 ip->i_mode);
4985 ACQUIRE_LOCK(ITOUMP(dp));
4986 inodedep = inodedep_lookup_ip(ip);
4987 if (jaddref)
4988 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4989 if_deps);
4990 FREE_LOCK(ITOUMP(dp));
4991 }
4992
4993 /*
4994 * Called to create the jaddref structures to track . and .. references as
4995 * well as lookup and further initialize the incomplete jaddref created
4996 * by softdep_setup_inomapdep when the inode was allocated. Adjusts
4997 * nlinkdelta for non-journaling softdep.
4998 */
4999 void
5000 softdep_setup_mkdir(struct inode *dp, struct inode *ip)
5001 {
5002 struct inodedep *inodedep;
5003 struct jaddref *dotdotaddref;
5004 struct jaddref *dotaddref;
5005 struct jaddref *jaddref;
5006 struct vnode *dvp;
5007
5008 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5009 ("softdep_setup_mkdir called on non-softdep filesystem"));
5010 dvp = ITOV(dp);
5011 dotaddref = dotdotaddref = NULL;
5012 if (DOINGSUJ(dvp)) {
5013 dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1,
5014 ip->i_mode);
5015 dotaddref->ja_state |= MKDIR_BODY;
5016 dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
5017 dp->i_effnlink - 1, dp->i_mode);
5018 dotdotaddref->ja_state |= MKDIR_PARENT;
5019 }
5020 ACQUIRE_LOCK(ITOUMP(dp));
5021 inodedep = inodedep_lookup_ip(ip);
5022 if (DOINGSUJ(dvp)) {
5023 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5024 inoreflst);
5025 KASSERT(jaddref != NULL,
5026 ("softdep_setup_mkdir: No addref structure present."));
5027 KASSERT(jaddref->ja_parent == dp->i_number,
5028 ("softdep_setup_mkdir: bad parent %ju",
5029 (uintmax_t)jaddref->ja_parent));
5030 TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref,
5031 if_deps);
5032 }
5033 inodedep = inodedep_lookup_ip(dp);
5034 if (DOINGSUJ(dvp))
5035 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst,
5036 &dotdotaddref->ja_ref, if_deps);
5037 FREE_LOCK(ITOUMP(dp));
5038 }
5039
5040 /*
5041 * Called to track nlinkdelta of the inode and parent directories prior to
5042 * unlinking a directory.
5043 */
5044 void
5045 softdep_setup_rmdir(struct inode *dp, struct inode *ip)
5046 {
5047
5048 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5049 ("softdep_setup_rmdir called on non-softdep filesystem"));
5050 ACQUIRE_LOCK(ITOUMP(dp));
5051 (void) inodedep_lookup_ip(ip);
5052 (void) inodedep_lookup_ip(dp);
5053 FREE_LOCK(ITOUMP(dp));
5054 }
5055
5056 /*
5057 * Called to track nlinkdelta of the inode and parent directories prior to
5058 * unlink.
5059 */
5060 void
5061 softdep_setup_unlink(struct inode *dp, struct inode *ip)
5062 {
5063
5064 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5065 ("softdep_setup_unlink called on non-softdep filesystem"));
5066 ACQUIRE_LOCK(ITOUMP(dp));
5067 (void) inodedep_lookup_ip(ip);
5068 (void) inodedep_lookup_ip(dp);
5069 FREE_LOCK(ITOUMP(dp));
5070 }
5071
5072 /*
5073 * Called to release the journal structures created by a failed non-directory
5074 * creation. Adjusts nlinkdelta for non-journaling softdep.
5075 */
5076 void
5077 softdep_revert_create(struct inode *dp, struct inode *ip)
5078 {
5079 struct inodedep *inodedep;
5080 struct jaddref *jaddref;
5081 struct vnode *dvp;
5082
5083 KASSERT(MOUNTEDSOFTDEP(ITOVFS((dp))) != 0,
5084 ("softdep_revert_create called on non-softdep filesystem"));
5085 dvp = ITOV(dp);
5086 ACQUIRE_LOCK(ITOUMP(dp));
5087 inodedep = inodedep_lookup_ip(ip);
5088 if (DOINGSUJ(dvp)) {
5089 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5090 inoreflst);
5091 KASSERT(jaddref->ja_parent == dp->i_number,
5092 ("softdep_revert_create: addref parent mismatch"));
5093 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
5094 }
5095 FREE_LOCK(ITOUMP(dp));
5096 }
5097
5098 /*
5099 * Called to release the journal structures created by a failed link
5100 * addition. Adjusts nlinkdelta for non-journaling softdep.
5101 */
5102 void
5103 softdep_revert_link(struct inode *dp, struct inode *ip)
5104 {
5105 struct inodedep *inodedep;
5106 struct jaddref *jaddref;
5107 struct vnode *dvp;
5108
5109 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5110 ("softdep_revert_link called on non-softdep filesystem"));
5111 dvp = ITOV(dp);
5112 ACQUIRE_LOCK(ITOUMP(dp));
5113 inodedep = inodedep_lookup_ip(ip);
5114 if (DOINGSUJ(dvp)) {
5115 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5116 inoreflst);
5117 KASSERT(jaddref->ja_parent == dp->i_number,
5118 ("softdep_revert_link: addref parent mismatch"));
5119 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
5120 }
5121 FREE_LOCK(ITOUMP(dp));
5122 }
5123
5124 /*
5125 * Called to release the journal structures created by a failed mkdir
5126 * attempt. Adjusts nlinkdelta for non-journaling softdep.
5127 */
5128 void
5129 softdep_revert_mkdir(struct inode *dp, struct inode *ip)
5130 {
5131 struct inodedep *inodedep;
5132 struct jaddref *jaddref;
5133 struct jaddref *dotaddref;
5134 struct vnode *dvp;
5135
5136 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5137 ("softdep_revert_mkdir called on non-softdep filesystem"));
5138 dvp = ITOV(dp);
5139
5140 ACQUIRE_LOCK(ITOUMP(dp));
5141 inodedep = inodedep_lookup_ip(dp);
5142 if (DOINGSUJ(dvp)) {
5143 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5144 inoreflst);
5145 KASSERT(jaddref->ja_parent == ip->i_number,
5146 ("softdep_revert_mkdir: dotdot addref parent mismatch"));
5147 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
5148 }
5149 inodedep = inodedep_lookup_ip(ip);
5150 if (DOINGSUJ(dvp)) {
5151 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5152 inoreflst);
5153 KASSERT(jaddref->ja_parent == dp->i_number,
5154 ("softdep_revert_mkdir: addref parent mismatch"));
5155 dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
5156 inoreflst, if_deps);
5157 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
5158 KASSERT(dotaddref->ja_parent == ip->i_number,
5159 ("softdep_revert_mkdir: dot addref parent mismatch"));
5160 cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait);
5161 }
5162 FREE_LOCK(ITOUMP(dp));
5163 }
5164
5165 /*
5166 * Called to correct nlinkdelta after a failed rmdir.
5167 */
5168 void
5169 softdep_revert_rmdir(struct inode *dp, struct inode *ip)
5170 {
5171
5172 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5173 ("softdep_revert_rmdir called on non-softdep filesystem"));
5174 ACQUIRE_LOCK(ITOUMP(dp));
5175 (void) inodedep_lookup_ip(ip);
5176 (void) inodedep_lookup_ip(dp);
5177 FREE_LOCK(ITOUMP(dp));
5178 }
5179
5180 /*
5181 * Protecting the freemaps (or bitmaps).
5182 *
5183 * To eliminate the need to execute fsck before mounting a filesystem
5184 * after a power failure, one must (conservatively) guarantee that the
5185 * on-disk copy of the bitmaps never indicate that a live inode or block is
5186 * free. So, when a block or inode is allocated, the bitmap should be
5187 * updated (on disk) before any new pointers. When a block or inode is
5188 * freed, the bitmap should not be updated until all pointers have been
5189 * reset. The latter dependency is handled by the delayed de-allocation
5190 * approach described below for block and inode de-allocation. The former
5191 * dependency is handled by calling the following procedure when a block or
5192 * inode is allocated. When an inode is allocated an "inodedep" is created
5193 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
5194 * Each "inodedep" is also inserted into the hash indexing structure so
5195 * that any additional link additions can be made dependent on the inode
5196 * allocation.
5197 *
5198 * The ufs filesystem maintains a number of free block counts (e.g., per
5199 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
5200 * in addition to the bitmaps. These counts are used to improve efficiency
5201 * during allocation and therefore must be consistent with the bitmaps.
5202 * There is no convenient way to guarantee post-crash consistency of these
5203 * counts with simple update ordering, for two main reasons: (1) The counts
5204 * and bitmaps for a single cylinder group block are not in the same disk
5205 * sector. If a disk write is interrupted (e.g., by power failure), one may
5206 * be written and the other not. (2) Some of the counts are located in the
5207 * superblock rather than the cylinder group block. So, we focus our soft
5208 * updates implementation on protecting the bitmaps. When mounting a
5209 * filesystem, we recompute the auxiliary counts from the bitmaps.
5210 */
5211
5212 /*
5213 * Called just after updating the cylinder group block to allocate an inode.
5214 */
5215 void
5216 softdep_setup_inomapdep(
5217 struct buf *bp, /* buffer for cylgroup block with inode map */
5218 struct inode *ip, /* inode related to allocation */
5219 ino_t newinum, /* new inode number being allocated */
5220 int mode)
5221 {
5222 struct inodedep *inodedep;
5223 struct bmsafemap *bmsafemap;
5224 struct jaddref *jaddref;
5225 struct mount *mp;
5226 struct fs *fs;
5227
5228 mp = ITOVFS(ip);
5229 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5230 ("softdep_setup_inomapdep called on non-softdep filesystem"));
5231 fs = VFSTOUFS(mp)->um_fs;
5232 jaddref = NULL;
5233
5234 /*
5235 * Allocate the journal reference add structure so that the bitmap
5236 * can be dependent on it.
5237 */
5238 if (MOUNTEDSUJ(mp)) {
5239 jaddref = newjaddref(ip, newinum, 0, 0, mode);
5240 jaddref->ja_state |= NEWBLOCK;
5241 }
5242
5243 /*
5244 * Create a dependency for the newly allocated inode.
5245 * Panic if it already exists as something is seriously wrong.
5246 * Otherwise add it to the dependency list for the buffer holding
5247 * the cylinder group map from which it was allocated.
5248 *
5249 * We have to preallocate a bmsafemap entry in case it is needed
5250 * in bmsafemap_lookup since once we allocate the inodedep, we
5251 * have to finish initializing it before we can FREE_LOCK().
5252 * By preallocating, we avoid FREE_LOCK() while doing a malloc
5253 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before
5254 * creating the inodedep as it can be freed during the time
5255 * that we FREE_LOCK() while allocating the inodedep. We must
5256 * call workitem_alloc() before entering the locked section as
5257 * it also acquires the lock and we must avoid trying doing so
5258 * recursively.
5259 */
5260 bmsafemap = malloc(sizeof(struct bmsafemap),
5261 M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5262 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5263 ACQUIRE_LOCK(ITOUMP(ip));
5264 if ((inodedep_lookup(mp, newinum, DEPALLOC, &inodedep)))
5265 panic("softdep_setup_inomapdep: dependency %p for new"
5266 "inode already exists", inodedep);
5267 bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap);
5268 if (jaddref) {
5269 LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps);
5270 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
5271 if_deps);
5272 } else {
5273 inodedep->id_state |= ONDEPLIST;
5274 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
5275 }
5276 inodedep->id_bmsafemap = bmsafemap;
5277 inodedep->id_state &= ~DEPCOMPLETE;
5278 FREE_LOCK(ITOUMP(ip));
5279 }
5280
5281 /*
5282 * Called just after updating the cylinder group block to
5283 * allocate block or fragment.
5284 */
5285 void
5286 softdep_setup_blkmapdep(
5287 struct buf *bp, /* buffer for cylgroup block with block map */
5288 struct mount *mp, /* filesystem doing allocation */
5289 ufs2_daddr_t newblkno, /* number of newly allocated block */
5290 int frags, /* Number of fragments. */
5291 int oldfrags) /* Previous number of fragments for extend. */
5292 {
5293 struct newblk *newblk;
5294 struct bmsafemap *bmsafemap;
5295 struct jnewblk *jnewblk;
5296 struct ufsmount *ump;
5297 struct fs *fs;
5298
5299 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5300 ("softdep_setup_blkmapdep called on non-softdep filesystem"));
5301 ump = VFSTOUFS(mp);
5302 fs = ump->um_fs;
5303 jnewblk = NULL;
5304 /*
5305 * Create a dependency for the newly allocated block.
5306 * Add it to the dependency list for the buffer holding
5307 * the cylinder group map from which it was allocated.
5308 */
5309 if (MOUNTEDSUJ(mp)) {
5310 jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS);
5311 workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp);
5312 jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list);
5313 jnewblk->jn_state = ATTACHED;
5314 jnewblk->jn_blkno = newblkno;
5315 jnewblk->jn_frags = frags;
5316 jnewblk->jn_oldfrags = oldfrags;
5317 #ifdef INVARIANTS
5318 {
5319 struct cg *cgp;
5320 uint8_t *blksfree;
5321 long bno;
5322 int i;
5323
5324 cgp = (struct cg *)bp->b_data;
5325 blksfree = cg_blksfree(cgp);
5326 bno = dtogd(fs, jnewblk->jn_blkno);
5327 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags;
5328 i++) {
5329 if (isset(blksfree, bno + i))
5330 panic("softdep_setup_blkmapdep: "
5331 "free fragment %d from %d-%d "
5332 "state 0x%X dep %p", i,
5333 jnewblk->jn_oldfrags,
5334 jnewblk->jn_frags,
5335 jnewblk->jn_state,
5336 jnewblk->jn_dep);
5337 }
5338 }
5339 #endif
5340 }
5341
5342 CTR3(KTR_SUJ,
5343 "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d",
5344 newblkno, frags, oldfrags);
5345 ACQUIRE_LOCK(ump);
5346 if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0)
5347 panic("softdep_setup_blkmapdep: found block");
5348 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp,
5349 dtog(fs, newblkno), NULL);
5350 if (jnewblk) {
5351 jnewblk->jn_dep = (struct worklist *)newblk;
5352 LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps);
5353 } else {
5354 newblk->nb_state |= ONDEPLIST;
5355 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
5356 }
5357 newblk->nb_bmsafemap = bmsafemap;
5358 newblk->nb_jnewblk = jnewblk;
5359 FREE_LOCK(ump);
5360 }
5361
5362 #define BMSAFEMAP_HASH(ump, cg) \
5363 (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size])
5364
5365 static int
5366 bmsafemap_find(
5367 struct bmsafemap_hashhead *bmsafemaphd,
5368 int cg,
5369 struct bmsafemap **bmsafemapp)
5370 {
5371 struct bmsafemap *bmsafemap;
5372
5373 LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash)
5374 if (bmsafemap->sm_cg == cg)
5375 break;
5376 if (bmsafemap) {
5377 *bmsafemapp = bmsafemap;
5378 return (1);
5379 }
5380 *bmsafemapp = NULL;
5381
5382 return (0);
5383 }
5384
5385 /*
5386 * Find the bmsafemap associated with a cylinder group buffer.
5387 * If none exists, create one. The buffer must be locked when
5388 * this routine is called and this routine must be called with
5389 * the softdep lock held. To avoid giving up the lock while
5390 * allocating a new bmsafemap, a preallocated bmsafemap may be
5391 * provided. If it is provided but not needed, it is freed.
5392 */
5393 static struct bmsafemap *
5394 bmsafemap_lookup(struct mount *mp,
5395 struct buf *bp,
5396 int cg,
5397 struct bmsafemap *newbmsafemap)
5398 {
5399 struct bmsafemap_hashhead *bmsafemaphd;
5400 struct bmsafemap *bmsafemap, *collision;
5401 struct worklist *wk;
5402 struct ufsmount *ump;
5403
5404 ump = VFSTOUFS(mp);
5405 LOCK_OWNED(ump);
5406 KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer"));
5407 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5408 if (wk->wk_type == D_BMSAFEMAP) {
5409 if (newbmsafemap)
5410 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5411 return (WK_BMSAFEMAP(wk));
5412 }
5413 }
5414 bmsafemaphd = BMSAFEMAP_HASH(ump, cg);
5415 if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) {
5416 if (newbmsafemap)
5417 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5418 return (bmsafemap);
5419 }
5420 if (newbmsafemap) {
5421 bmsafemap = newbmsafemap;
5422 } else {
5423 FREE_LOCK(ump);
5424 bmsafemap = malloc(sizeof(struct bmsafemap),
5425 M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5426 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5427 ACQUIRE_LOCK(ump);
5428 }
5429 bmsafemap->sm_buf = bp;
5430 LIST_INIT(&bmsafemap->sm_inodedephd);
5431 LIST_INIT(&bmsafemap->sm_inodedepwr);
5432 LIST_INIT(&bmsafemap->sm_newblkhd);
5433 LIST_INIT(&bmsafemap->sm_newblkwr);
5434 LIST_INIT(&bmsafemap->sm_jaddrefhd);
5435 LIST_INIT(&bmsafemap->sm_jnewblkhd);
5436 LIST_INIT(&bmsafemap->sm_freehd);
5437 LIST_INIT(&bmsafemap->sm_freewr);
5438 if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) {
5439 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
5440 return (collision);
5441 }
5442 bmsafemap->sm_cg = cg;
5443 LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash);
5444 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
5445 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
5446 return (bmsafemap);
5447 }
5448
5449 /*
5450 * Direct block allocation dependencies.
5451 *
5452 * When a new block is allocated, the corresponding disk locations must be
5453 * initialized (with zeros or new data) before the on-disk inode points to
5454 * them. Also, the freemap from which the block was allocated must be
5455 * updated (on disk) before the inode's pointer. These two dependencies are
5456 * independent of each other and are needed for all file blocks and indirect
5457 * blocks that are pointed to directly by the inode. Just before the
5458 * "in-core" version of the inode is updated with a newly allocated block
5459 * number, a procedure (below) is called to setup allocation dependency
5460 * structures. These structures are removed when the corresponding
5461 * dependencies are satisfied or when the block allocation becomes obsolete
5462 * (i.e., the file is deleted, the block is de-allocated, or the block is a
5463 * fragment that gets upgraded). All of these cases are handled in
5464 * procedures described later.
5465 *
5466 * When a file extension causes a fragment to be upgraded, either to a larger
5467 * fragment or to a full block, the on-disk location may change (if the
5468 * previous fragment could not simply be extended). In this case, the old
5469 * fragment must be de-allocated, but not until after the inode's pointer has
5470 * been updated. In most cases, this is handled by later procedures, which
5471 * will construct a "freefrag" structure to be added to the workitem queue
5472 * when the inode update is complete (or obsolete). The main exception to
5473 * this is when an allocation occurs while a pending allocation dependency
5474 * (for the same block pointer) remains. This case is handled in the main
5475 * allocation dependency setup procedure by immediately freeing the
5476 * unreferenced fragments.
5477 */
5478 void
5479 softdep_setup_allocdirect(
5480 struct inode *ip, /* inode to which block is being added */
5481 ufs_lbn_t off, /* block pointer within inode */
5482 ufs2_daddr_t newblkno, /* disk block number being added */
5483 ufs2_daddr_t oldblkno, /* previous block number, 0 unless frag */
5484 long newsize, /* size of new block */
5485 long oldsize, /* size of new block */
5486 struct buf *bp) /* bp for allocated block */
5487 {
5488 struct allocdirect *adp, *oldadp;
5489 struct allocdirectlst *adphead;
5490 struct freefrag *freefrag;
5491 struct inodedep *inodedep;
5492 struct pagedep *pagedep;
5493 struct jnewblk *jnewblk;
5494 struct newblk *newblk;
5495 struct mount *mp;
5496 ufs_lbn_t lbn;
5497
5498 lbn = bp->b_lblkno;
5499 mp = ITOVFS(ip);
5500 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5501 ("softdep_setup_allocdirect called on non-softdep filesystem"));
5502 if (oldblkno && oldblkno != newblkno)
5503 /*
5504 * The usual case is that a smaller fragment that
5505 * was just allocated has been replaced with a bigger
5506 * fragment or a full-size block. If it is marked as
5507 * B_DELWRI, the current contents have not been written
5508 * to disk. It is possible that the block was written
5509 * earlier, but very uncommon. If the block has never
5510 * been written, there is no need to send a BIO_DELETE
5511 * for it when it is freed. The gain from avoiding the
5512 * TRIMs for the common case of unwritten blocks far
5513 * exceeds the cost of the write amplification for the
5514 * uncommon case of failing to send a TRIM for a block
5515 * that had been written.
5516 */
5517 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn,
5518 (bp->b_flags & B_DELWRI) != 0 ? NOTRIM_KEY : SINGLETON_KEY);
5519 else
5520 freefrag = NULL;
5521
5522 CTR6(KTR_SUJ,
5523 "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd "
5524 "off %jd newsize %ld oldsize %d",
5525 ip->i_number, newblkno, oldblkno, off, newsize, oldsize);
5526 ACQUIRE_LOCK(ITOUMP(ip));
5527 if (off >= UFS_NDADDR) {
5528 if (lbn > 0)
5529 panic("softdep_setup_allocdirect: bad lbn %jd, off %jd",
5530 lbn, off);
5531 /* allocating an indirect block */
5532 if (oldblkno != 0)
5533 panic("softdep_setup_allocdirect: non-zero indir");
5534 } else {
5535 if (off != lbn)
5536 panic("softdep_setup_allocdirect: lbn %jd != off %jd",
5537 lbn, off);
5538 /*
5539 * Allocating a direct block.
5540 *
5541 * If we are allocating a directory block, then we must
5542 * allocate an associated pagedep to track additions and
5543 * deletions.
5544 */
5545 if ((ip->i_mode & IFMT) == IFDIR)
5546 pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC,
5547 &pagedep);
5548 }
5549 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5550 panic("softdep_setup_allocdirect: lost block");
5551 KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5552 ("softdep_setup_allocdirect: newblk already initialized"));
5553 /*
5554 * Convert the newblk to an allocdirect.
5555 */
5556 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5557 adp = (struct allocdirect *)newblk;
5558 newblk->nb_freefrag = freefrag;
5559 adp->ad_offset = off;
5560 adp->ad_oldblkno = oldblkno;
5561 adp->ad_newsize = newsize;
5562 adp->ad_oldsize = oldsize;
5563
5564 /*
5565 * Finish initializing the journal.
5566 */
5567 if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5568 jnewblk->jn_ino = ip->i_number;
5569 jnewblk->jn_lbn = lbn;
5570 add_to_journal(&jnewblk->jn_list);
5571 }
5572 if (freefrag && freefrag->ff_jdep != NULL &&
5573 freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5574 add_to_journal(freefrag->ff_jdep);
5575 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5576 adp->ad_inodedep = inodedep;
5577
5578 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5579 /*
5580 * The list of allocdirects must be kept in sorted and ascending
5581 * order so that the rollback routines can quickly determine the
5582 * first uncommitted block (the size of the file stored on disk
5583 * ends at the end of the lowest committed fragment, or if there
5584 * are no fragments, at the end of the highest committed block).
5585 * Since files generally grow, the typical case is that the new
5586 * block is to be added at the end of the list. We speed this
5587 * special case by checking against the last allocdirect in the
5588 * list before laboriously traversing the list looking for the
5589 * insertion point.
5590 */
5591 adphead = &inodedep->id_newinoupdt;
5592 oldadp = TAILQ_LAST(adphead, allocdirectlst);
5593 if (oldadp == NULL || oldadp->ad_offset <= off) {
5594 /* insert at end of list */
5595 TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5596 if (oldadp != NULL && oldadp->ad_offset == off)
5597 allocdirect_merge(adphead, adp, oldadp);
5598 FREE_LOCK(ITOUMP(ip));
5599 return;
5600 }
5601 TAILQ_FOREACH(oldadp, adphead, ad_next) {
5602 if (oldadp->ad_offset >= off)
5603 break;
5604 }
5605 if (oldadp == NULL)
5606 panic("softdep_setup_allocdirect: lost entry");
5607 /* insert in middle of list */
5608 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5609 if (oldadp->ad_offset == off)
5610 allocdirect_merge(adphead, adp, oldadp);
5611
5612 FREE_LOCK(ITOUMP(ip));
5613 }
5614
5615 /*
5616 * Merge a newer and older journal record to be stored either in a
5617 * newblock or freefrag. This handles aggregating journal records for
5618 * fragment allocation into a second record as well as replacing a
5619 * journal free with an aborted journal allocation. A segment for the
5620 * oldest record will be placed on wkhd if it has been written. If not
5621 * the segment for the newer record will suffice.
5622 */
5623 static struct worklist *
5624 jnewblk_merge(struct worklist *new,
5625 struct worklist *old,
5626 struct workhead *wkhd)
5627 {
5628 struct jnewblk *njnewblk;
5629 struct jnewblk *jnewblk;
5630
5631 /* Handle NULLs to simplify callers. */
5632 if (new == NULL)
5633 return (old);
5634 if (old == NULL)
5635 return (new);
5636 /* Replace a jfreefrag with a jnewblk. */
5637 if (new->wk_type == D_JFREEFRAG) {
5638 if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno)
5639 panic("jnewblk_merge: blkno mismatch: %p, %p",
5640 old, new);
5641 cancel_jfreefrag(WK_JFREEFRAG(new));
5642 return (old);
5643 }
5644 if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK)
5645 panic("jnewblk_merge: Bad type: old %d new %d\n",
5646 old->wk_type, new->wk_type);
5647 /*
5648 * Handle merging of two jnewblk records that describe
5649 * different sets of fragments in the same block.
5650 */
5651 jnewblk = WK_JNEWBLK(old);
5652 njnewblk = WK_JNEWBLK(new);
5653 if (jnewblk->jn_blkno != njnewblk->jn_blkno)
5654 panic("jnewblk_merge: Merging disparate blocks.");
5655 /*
5656 * The record may be rolled back in the cg.
5657 */
5658 if (jnewblk->jn_state & UNDONE) {
5659 jnewblk->jn_state &= ~UNDONE;
5660 njnewblk->jn_state |= UNDONE;
5661 njnewblk->jn_state &= ~ATTACHED;
5662 }
5663 /*
5664 * We modify the newer addref and free the older so that if neither
5665 * has been written the most up-to-date copy will be on disk. If
5666 * both have been written but rolled back we only temporarily need
5667 * one of them to fix the bits when the cg write completes.
5668 */
5669 jnewblk->jn_state |= ATTACHED | COMPLETE;
5670 njnewblk->jn_oldfrags = jnewblk->jn_oldfrags;
5671 cancel_jnewblk(jnewblk, wkhd);
5672 WORKLIST_REMOVE(&jnewblk->jn_list);
5673 free_jnewblk(jnewblk);
5674 return (new);
5675 }
5676
5677 /*
5678 * Replace an old allocdirect dependency with a newer one.
5679 */
5680 static void
5681 allocdirect_merge(
5682 struct allocdirectlst *adphead, /* head of list holding allocdirects */
5683 struct allocdirect *newadp, /* allocdirect being added */
5684 struct allocdirect *oldadp) /* existing allocdirect being checked */
5685 {
5686 struct worklist *wk;
5687 struct freefrag *freefrag;
5688
5689 freefrag = NULL;
5690 LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp));
5691 if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
5692 newadp->ad_oldsize != oldadp->ad_newsize ||
5693 newadp->ad_offset >= UFS_NDADDR)
5694 panic("%s %jd != new %jd || old size %ld != new %ld",
5695 "allocdirect_merge: old blkno",
5696 (intmax_t)newadp->ad_oldblkno,
5697 (intmax_t)oldadp->ad_newblkno,
5698 newadp->ad_oldsize, oldadp->ad_newsize);
5699 newadp->ad_oldblkno = oldadp->ad_oldblkno;
5700 newadp->ad_oldsize = oldadp->ad_oldsize;
5701 /*
5702 * If the old dependency had a fragment to free or had never
5703 * previously had a block allocated, then the new dependency
5704 * can immediately post its freefrag and adopt the old freefrag.
5705 * This action is done by swapping the freefrag dependencies.
5706 * The new dependency gains the old one's freefrag, and the
5707 * old one gets the new one and then immediately puts it on
5708 * the worklist when it is freed by free_newblk. It is
5709 * not possible to do this swap when the old dependency had a
5710 * non-zero size but no previous fragment to free. This condition
5711 * arises when the new block is an extension of the old block.
5712 * Here, the first part of the fragment allocated to the new
5713 * dependency is part of the block currently claimed on disk by
5714 * the old dependency, so cannot legitimately be freed until the
5715 * conditions for the new dependency are fulfilled.
5716 */
5717 freefrag = newadp->ad_freefrag;
5718 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
5719 newadp->ad_freefrag = oldadp->ad_freefrag;
5720 oldadp->ad_freefrag = freefrag;
5721 }
5722 /*
5723 * If we are tracking a new directory-block allocation,
5724 * move it from the old allocdirect to the new allocdirect.
5725 */
5726 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) {
5727 WORKLIST_REMOVE(wk);
5728 if (!LIST_EMPTY(&oldadp->ad_newdirblk))
5729 panic("allocdirect_merge: extra newdirblk");
5730 WORKLIST_INSERT(&newadp->ad_newdirblk, wk);
5731 }
5732 TAILQ_REMOVE(adphead, oldadp, ad_next);
5733 /*
5734 * We need to move any journal dependencies over to the freefrag
5735 * that releases this block if it exists. Otherwise we are
5736 * extending an existing block and we'll wait until that is
5737 * complete to release the journal space and extend the
5738 * new journal to cover this old space as well.
5739 */
5740 if (freefrag == NULL) {
5741 if (oldadp->ad_newblkno != newadp->ad_newblkno)
5742 panic("allocdirect_merge: %jd != %jd",
5743 oldadp->ad_newblkno, newadp->ad_newblkno);
5744 newadp->ad_block.nb_jnewblk = (struct jnewblk *)
5745 jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list,
5746 &oldadp->ad_block.nb_jnewblk->jn_list,
5747 &newadp->ad_block.nb_jwork);
5748 oldadp->ad_block.nb_jnewblk = NULL;
5749 cancel_newblk(&oldadp->ad_block, NULL,
5750 &newadp->ad_block.nb_jwork);
5751 } else {
5752 wk = (struct worklist *) cancel_newblk(&oldadp->ad_block,
5753 &freefrag->ff_list, &freefrag->ff_jwork);
5754 freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk,
5755 &freefrag->ff_jwork);
5756 }
5757 free_newblk(&oldadp->ad_block);
5758 }
5759
5760 /*
5761 * Allocate a jfreefrag structure to journal a single block free.
5762 */
5763 static struct jfreefrag *
5764 newjfreefrag(struct freefrag *freefrag,
5765 struct inode *ip,
5766 ufs2_daddr_t blkno,
5767 long size,
5768 ufs_lbn_t lbn)
5769 {
5770 struct jfreefrag *jfreefrag;
5771 struct fs *fs;
5772
5773 fs = ITOFS(ip);
5774 jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG,
5775 M_SOFTDEP_FLAGS);
5776 workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, ITOVFS(ip));
5777 jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list);
5778 jfreefrag->fr_state = ATTACHED | DEPCOMPLETE;
5779 jfreefrag->fr_ino = ip->i_number;
5780 jfreefrag->fr_lbn = lbn;
5781 jfreefrag->fr_blkno = blkno;
5782 jfreefrag->fr_frags = numfrags(fs, size);
5783 jfreefrag->fr_freefrag = freefrag;
5784
5785 return (jfreefrag);
5786 }
5787
5788 /*
5789 * Allocate a new freefrag structure.
5790 */
5791 static struct freefrag *
5792 newfreefrag(struct inode *ip,
5793 ufs2_daddr_t blkno,
5794 long size,
5795 ufs_lbn_t lbn,
5796 u_long key)
5797 {
5798 struct freefrag *freefrag;
5799 struct ufsmount *ump;
5800 struct fs *fs;
5801
5802 CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd",
5803 ip->i_number, blkno, size, lbn);
5804 ump = ITOUMP(ip);
5805 fs = ump->um_fs;
5806 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
5807 panic("newfreefrag: frag size");
5808 freefrag = malloc(sizeof(struct freefrag),
5809 M_FREEFRAG, M_SOFTDEP_FLAGS);
5810 workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ump));
5811 freefrag->ff_state = ATTACHED;
5812 LIST_INIT(&freefrag->ff_jwork);
5813 freefrag->ff_inum = ip->i_number;
5814 freefrag->ff_vtype = ITOV(ip)->v_type;
5815 freefrag->ff_blkno = blkno;
5816 freefrag->ff_fragsize = size;
5817 freefrag->ff_key = key;
5818
5819 if (MOUNTEDSUJ(UFSTOVFS(ump))) {
5820 freefrag->ff_jdep = (struct worklist *)
5821 newjfreefrag(freefrag, ip, blkno, size, lbn);
5822 } else {
5823 freefrag->ff_state |= DEPCOMPLETE;
5824 freefrag->ff_jdep = NULL;
5825 }
5826
5827 return (freefrag);
5828 }
5829
5830 /*
5831 * This workitem de-allocates fragments that were replaced during
5832 * file block allocation.
5833 */
5834 static void
5835 handle_workitem_freefrag(struct freefrag *freefrag)
5836 {
5837 struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp);
5838 struct workhead wkhd;
5839
5840 CTR3(KTR_SUJ,
5841 "handle_workitem_freefrag: ino %d blkno %jd size %ld",
5842 freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize);
5843 /*
5844 * It would be illegal to add new completion items to the
5845 * freefrag after it was schedule to be done so it must be
5846 * safe to modify the list head here.
5847 */
5848 LIST_INIT(&wkhd);
5849 ACQUIRE_LOCK(ump);
5850 LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list);
5851 /*
5852 * If the journal has not been written we must cancel it here.
5853 */
5854 if (freefrag->ff_jdep) {
5855 if (freefrag->ff_jdep->wk_type != D_JNEWBLK)
5856 panic("handle_workitem_freefrag: Unexpected type %d\n",
5857 freefrag->ff_jdep->wk_type);
5858 cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd);
5859 }
5860 FREE_LOCK(ump);
5861 ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno,
5862 freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype,
5863 &wkhd, freefrag->ff_key);
5864 ACQUIRE_LOCK(ump);
5865 WORKITEM_FREE(freefrag, D_FREEFRAG);
5866 FREE_LOCK(ump);
5867 }
5868
5869 /*
5870 * Set up a dependency structure for an external attributes data block.
5871 * This routine follows much of the structure of softdep_setup_allocdirect.
5872 * See the description of softdep_setup_allocdirect above for details.
5873 */
5874 void
5875 softdep_setup_allocext(
5876 struct inode *ip,
5877 ufs_lbn_t off,
5878 ufs2_daddr_t newblkno,
5879 ufs2_daddr_t oldblkno,
5880 long newsize,
5881 long oldsize,
5882 struct buf *bp)
5883 {
5884 struct allocdirect *adp, *oldadp;
5885 struct allocdirectlst *adphead;
5886 struct freefrag *freefrag;
5887 struct inodedep *inodedep;
5888 struct jnewblk *jnewblk;
5889 struct newblk *newblk;
5890 struct mount *mp;
5891 struct ufsmount *ump;
5892 ufs_lbn_t lbn;
5893
5894 mp = ITOVFS(ip);
5895 ump = VFSTOUFS(mp);
5896 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5897 ("softdep_setup_allocext called on non-softdep filesystem"));
5898 KASSERT(off < UFS_NXADDR,
5899 ("softdep_setup_allocext: lbn %lld > UFS_NXADDR", (long long)off));
5900
5901 lbn = bp->b_lblkno;
5902 if (oldblkno && oldblkno != newblkno)
5903 /*
5904 * The usual case is that a smaller fragment that
5905 * was just allocated has been replaced with a bigger
5906 * fragment or a full-size block. If it is marked as
5907 * B_DELWRI, the current contents have not been written
5908 * to disk. It is possible that the block was written
5909 * earlier, but very uncommon. If the block has never
5910 * been written, there is no need to send a BIO_DELETE
5911 * for it when it is freed. The gain from avoiding the
5912 * TRIMs for the common case of unwritten blocks far
5913 * exceeds the cost of the write amplification for the
5914 * uncommon case of failing to send a TRIM for a block
5915 * that had been written.
5916 */
5917 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn,
5918 (bp->b_flags & B_DELWRI) != 0 ? NOTRIM_KEY : SINGLETON_KEY);
5919 else
5920 freefrag = NULL;
5921
5922 ACQUIRE_LOCK(ump);
5923 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5924 panic("softdep_setup_allocext: lost block");
5925 KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5926 ("softdep_setup_allocext: newblk already initialized"));
5927 /*
5928 * Convert the newblk to an allocdirect.
5929 */
5930 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5931 adp = (struct allocdirect *)newblk;
5932 newblk->nb_freefrag = freefrag;
5933 adp->ad_offset = off;
5934 adp->ad_oldblkno = oldblkno;
5935 adp->ad_newsize = newsize;
5936 adp->ad_oldsize = oldsize;
5937 adp->ad_state |= EXTDATA;
5938
5939 /*
5940 * Finish initializing the journal.
5941 */
5942 if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5943 jnewblk->jn_ino = ip->i_number;
5944 jnewblk->jn_lbn = lbn;
5945 add_to_journal(&jnewblk->jn_list);
5946 }
5947 if (freefrag && freefrag->ff_jdep != NULL &&
5948 freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5949 add_to_journal(freefrag->ff_jdep);
5950 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5951 adp->ad_inodedep = inodedep;
5952
5953 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5954 /*
5955 * The list of allocdirects must be kept in sorted and ascending
5956 * order so that the rollback routines can quickly determine the
5957 * first uncommitted block (the size of the file stored on disk
5958 * ends at the end of the lowest committed fragment, or if there
5959 * are no fragments, at the end of the highest committed block).
5960 * Since files generally grow, the typical case is that the new
5961 * block is to be added at the end of the list. We speed this
5962 * special case by checking against the last allocdirect in the
5963 * list before laboriously traversing the list looking for the
5964 * insertion point.
5965 */
5966 adphead = &inodedep->id_newextupdt;
5967 oldadp = TAILQ_LAST(adphead, allocdirectlst);
5968 if (oldadp == NULL || oldadp->ad_offset <= off) {
5969 /* insert at end of list */
5970 TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5971 if (oldadp != NULL && oldadp->ad_offset == off)
5972 allocdirect_merge(adphead, adp, oldadp);
5973 FREE_LOCK(ump);
5974 return;
5975 }
5976 TAILQ_FOREACH(oldadp, adphead, ad_next) {
5977 if (oldadp->ad_offset >= off)
5978 break;
5979 }
5980 if (oldadp == NULL)
5981 panic("softdep_setup_allocext: lost entry");
5982 /* insert in middle of list */
5983 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5984 if (oldadp->ad_offset == off)
5985 allocdirect_merge(adphead, adp, oldadp);
5986 FREE_LOCK(ump);
5987 }
5988
5989 /*
5990 * Indirect block allocation dependencies.
5991 *
5992 * The same dependencies that exist for a direct block also exist when
5993 * a new block is allocated and pointed to by an entry in a block of
5994 * indirect pointers. The undo/redo states described above are also
5995 * used here. Because an indirect block contains many pointers that
5996 * may have dependencies, a second copy of the entire in-memory indirect
5997 * block is kept. The buffer cache copy is always completely up-to-date.
5998 * The second copy, which is used only as a source for disk writes,
5999 * contains only the safe pointers (i.e., those that have no remaining
6000 * update dependencies). The second copy is freed when all pointers
6001 * are safe. The cache is not allowed to replace indirect blocks with
6002 * pending update dependencies. If a buffer containing an indirect
6003 * block with dependencies is written, these routines will mark it
6004 * dirty again. It can only be successfully written once all the
6005 * dependencies are removed. The ffs_fsync routine in conjunction with
6006 * softdep_sync_metadata work together to get all the dependencies
6007 * removed so that a file can be successfully written to disk. Three
6008 * procedures are used when setting up indirect block pointer
6009 * dependencies. The division is necessary because of the organization
6010 * of the "balloc" routine and because of the distinction between file
6011 * pages and file metadata blocks.
6012 */
6013
6014 /*
6015 * Allocate a new allocindir structure.
6016 */
6017 static struct allocindir *
6018 newallocindir(
6019 struct inode *ip, /* inode for file being extended */
6020 int ptrno, /* offset of pointer in indirect block */
6021 ufs2_daddr_t newblkno, /* disk block number being added */
6022 ufs2_daddr_t oldblkno, /* previous block number, 0 if none */
6023 ufs_lbn_t lbn)
6024 {
6025 struct newblk *newblk;
6026 struct allocindir *aip;
6027 struct freefrag *freefrag;
6028 struct jnewblk *jnewblk;
6029
6030 if (oldblkno)
6031 freefrag = newfreefrag(ip, oldblkno, ITOFS(ip)->fs_bsize, lbn,
6032 SINGLETON_KEY);
6033 else
6034 freefrag = NULL;
6035 ACQUIRE_LOCK(ITOUMP(ip));
6036 if (newblk_lookup(ITOVFS(ip), newblkno, 0, &newblk) == 0)
6037 panic("new_allocindir: lost block");
6038 KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
6039 ("newallocindir: newblk already initialized"));
6040 WORKITEM_REASSIGN(newblk, D_ALLOCINDIR);
6041 newblk->nb_freefrag = freefrag;
6042 aip = (struct allocindir *)newblk;
6043 aip->ai_offset = ptrno;
6044 aip->ai_oldblkno = oldblkno;
6045 aip->ai_lbn = lbn;
6046 if ((jnewblk = newblk->nb_jnewblk) != NULL) {
6047 jnewblk->jn_ino = ip->i_number;
6048 jnewblk->jn_lbn = lbn;
6049 add_to_journal(&jnewblk->jn_list);
6050 }
6051 if (freefrag && freefrag->ff_jdep != NULL &&
6052 freefrag->ff_jdep->wk_type == D_JFREEFRAG)
6053 add_to_journal(freefrag->ff_jdep);
6054 return (aip);
6055 }
6056
6057 /*
6058 * Called just before setting an indirect block pointer
6059 * to a newly allocated file page.
6060 */
6061 void
6062 softdep_setup_allocindir_page(
6063 struct inode *ip, /* inode for file being extended */
6064 ufs_lbn_t lbn, /* allocated block number within file */
6065 struct buf *bp, /* buffer with indirect blk referencing page */
6066 int ptrno, /* offset of pointer in indirect block */
6067 ufs2_daddr_t newblkno, /* disk block number being added */
6068 ufs2_daddr_t oldblkno, /* previous block number, 0 if none */
6069 struct buf *nbp) /* buffer holding allocated page */
6070 {
6071 struct inodedep *inodedep;
6072 struct freefrag *freefrag;
6073 struct allocindir *aip;
6074 struct pagedep *pagedep;
6075 struct mount *mp;
6076 struct ufsmount *ump;
6077
6078 mp = ITOVFS(ip);
6079 ump = VFSTOUFS(mp);
6080 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6081 ("softdep_setup_allocindir_page called on non-softdep filesystem"));
6082 KASSERT(lbn == nbp->b_lblkno,
6083 ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd",
6084 lbn, bp->b_lblkno));
6085 CTR4(KTR_SUJ,
6086 "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd "
6087 "lbn %jd", ip->i_number, newblkno, oldblkno, lbn);
6088 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page");
6089 aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn);
6090 (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6091 /*
6092 * If we are allocating a directory page, then we must
6093 * allocate an associated pagedep to track additions and
6094 * deletions.
6095 */
6096 if ((ip->i_mode & IFMT) == IFDIR)
6097 pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep);
6098 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
6099 freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn);
6100 FREE_LOCK(ump);
6101 if (freefrag)
6102 handle_workitem_freefrag(freefrag);
6103 }
6104
6105 /*
6106 * Called just before setting an indirect block pointer to a
6107 * newly allocated indirect block.
6108 */
6109 void
6110 softdep_setup_allocindir_meta(
6111 struct buf *nbp, /* newly allocated indirect block */
6112 struct inode *ip, /* inode for file being extended */
6113 struct buf *bp, /* indirect block referencing allocated block */
6114 int ptrno, /* offset of pointer in indirect block */
6115 ufs2_daddr_t newblkno) /* disk block number being added */
6116 {
6117 struct inodedep *inodedep;
6118 struct allocindir *aip;
6119 struct ufsmount *ump;
6120 ufs_lbn_t lbn;
6121
6122 ump = ITOUMP(ip);
6123 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
6124 ("softdep_setup_allocindir_meta called on non-softdep filesystem"));
6125 CTR3(KTR_SUJ,
6126 "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d",
6127 ip->i_number, newblkno, ptrno);
6128 lbn = nbp->b_lblkno;
6129 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta");
6130 aip = newallocindir(ip, ptrno, newblkno, 0, lbn);
6131 inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep);
6132 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
6133 if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn))
6134 panic("softdep_setup_allocindir_meta: Block already existed");
6135 FREE_LOCK(ump);
6136 }
6137
6138 static void
6139 indirdep_complete(struct indirdep *indirdep)
6140 {
6141 struct allocindir *aip;
6142
6143 LIST_REMOVE(indirdep, ir_next);
6144 indirdep->ir_state |= DEPCOMPLETE;
6145
6146 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) {
6147 LIST_REMOVE(aip, ai_next);
6148 free_newblk(&aip->ai_block);
6149 }
6150 /*
6151 * If this indirdep is not attached to a buf it was simply waiting
6152 * on completion to clear completehd. free_indirdep() asserts
6153 * that nothing is dangling.
6154 */
6155 if ((indirdep->ir_state & ONWORKLIST) == 0)
6156 free_indirdep(indirdep);
6157 }
6158
6159 static struct indirdep *
6160 indirdep_lookup(struct mount *mp,
6161 struct inode *ip,
6162 struct buf *bp)
6163 {
6164 struct indirdep *indirdep, *newindirdep;
6165 struct newblk *newblk;
6166 struct ufsmount *ump;
6167 struct worklist *wk;
6168 struct fs *fs;
6169 ufs2_daddr_t blkno;
6170
6171 ump = VFSTOUFS(mp);
6172 LOCK_OWNED(ump);
6173 indirdep = NULL;
6174 newindirdep = NULL;
6175 fs = ump->um_fs;
6176 for (;;) {
6177 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
6178 if (wk->wk_type != D_INDIRDEP)
6179 continue;
6180 indirdep = WK_INDIRDEP(wk);
6181 break;
6182 }
6183 /* Found on the buffer worklist, no new structure to free. */
6184 if (indirdep != NULL && newindirdep == NULL)
6185 return (indirdep);
6186 if (indirdep != NULL && newindirdep != NULL)
6187 panic("indirdep_lookup: simultaneous create");
6188 /* None found on the buffer and a new structure is ready. */
6189 if (indirdep == NULL && newindirdep != NULL)
6190 break;
6191 /* None found and no new structure available. */
6192 FREE_LOCK(ump);
6193 newindirdep = malloc(sizeof(struct indirdep),
6194 M_INDIRDEP, M_SOFTDEP_FLAGS);
6195 workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp);
6196 newindirdep->ir_state = ATTACHED;
6197 if (I_IS_UFS1(ip))
6198 newindirdep->ir_state |= UFS1FMT;
6199 TAILQ_INIT(&newindirdep->ir_trunc);
6200 newindirdep->ir_saveddata = NULL;
6201 LIST_INIT(&newindirdep->ir_deplisthd);
6202 LIST_INIT(&newindirdep->ir_donehd);
6203 LIST_INIT(&newindirdep->ir_writehd);
6204 LIST_INIT(&newindirdep->ir_completehd);
6205 if (bp->b_blkno == bp->b_lblkno) {
6206 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp,
6207 NULL, NULL);
6208 bp->b_blkno = blkno;
6209 }
6210 newindirdep->ir_freeblks = NULL;
6211 newindirdep->ir_savebp =
6212 getblk(ump->um_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0);
6213 newindirdep->ir_bp = bp;
6214 BUF_KERNPROC(newindirdep->ir_savebp);
6215 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
6216 ACQUIRE_LOCK(ump);
6217 }
6218 indirdep = newindirdep;
6219 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
6220 /*
6221 * If the block is not yet allocated we don't set DEPCOMPLETE so
6222 * that we don't free dependencies until the pointers are valid.
6223 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather
6224 * than using the hash.
6225 */
6226 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk))
6227 LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next);
6228 else
6229 indirdep->ir_state |= DEPCOMPLETE;
6230 return (indirdep);
6231 }
6232
6233 /*
6234 * Called to finish the allocation of the "aip" allocated
6235 * by one of the two routines above.
6236 */
6237 static struct freefrag *
6238 setup_allocindir_phase2(
6239 struct buf *bp, /* in-memory copy of the indirect block */
6240 struct inode *ip, /* inode for file being extended */
6241 struct inodedep *inodedep, /* Inodedep for ip */
6242 struct allocindir *aip, /* allocindir allocated by the above routines */
6243 ufs_lbn_t lbn) /* Logical block number for this block. */
6244 {
6245 struct fs *fs __diagused;
6246 struct indirdep *indirdep;
6247 struct allocindir *oldaip;
6248 struct freefrag *freefrag;
6249 struct mount *mp;
6250 struct ufsmount *ump;
6251
6252 mp = ITOVFS(ip);
6253 ump = VFSTOUFS(mp);
6254 LOCK_OWNED(ump);
6255 fs = ump->um_fs;
6256 if (bp->b_lblkno >= 0)
6257 panic("setup_allocindir_phase2: not indir blk");
6258 KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs),
6259 ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset));
6260 indirdep = indirdep_lookup(mp, ip, bp);
6261 KASSERT(indirdep->ir_savebp != NULL,
6262 ("setup_allocindir_phase2 NULL ir_savebp"));
6263 aip->ai_indirdep = indirdep;
6264 /*
6265 * Check for an unwritten dependency for this indirect offset. If
6266 * there is, merge the old dependency into the new one. This happens
6267 * as a result of reallocblk only.
6268 */
6269 freefrag = NULL;
6270 if (aip->ai_oldblkno != 0) {
6271 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) {
6272 if (oldaip->ai_offset == aip->ai_offset) {
6273 freefrag = allocindir_merge(aip, oldaip);
6274 goto done;
6275 }
6276 }
6277 LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) {
6278 if (oldaip->ai_offset == aip->ai_offset) {
6279 freefrag = allocindir_merge(aip, oldaip);
6280 goto done;
6281 }
6282 }
6283 }
6284 done:
6285 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
6286 return (freefrag);
6287 }
6288
6289 /*
6290 * Merge two allocindirs which refer to the same block. Move newblock
6291 * dependencies and setup the freefrags appropriately.
6292 */
6293 static struct freefrag *
6294 allocindir_merge(
6295 struct allocindir *aip,
6296 struct allocindir *oldaip)
6297 {
6298 struct freefrag *freefrag;
6299 struct worklist *wk;
6300
6301 if (oldaip->ai_newblkno != aip->ai_oldblkno)
6302 panic("allocindir_merge: blkno");
6303 aip->ai_oldblkno = oldaip->ai_oldblkno;
6304 freefrag = aip->ai_freefrag;
6305 aip->ai_freefrag = oldaip->ai_freefrag;
6306 oldaip->ai_freefrag = NULL;
6307 KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag"));
6308 /*
6309 * If we are tracking a new directory-block allocation,
6310 * move it from the old allocindir to the new allocindir.
6311 */
6312 if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) {
6313 WORKLIST_REMOVE(wk);
6314 if (!LIST_EMPTY(&oldaip->ai_newdirblk))
6315 panic("allocindir_merge: extra newdirblk");
6316 WORKLIST_INSERT(&aip->ai_newdirblk, wk);
6317 }
6318 /*
6319 * We can skip journaling for this freefrag and just complete
6320 * any pending journal work for the allocindir that is being
6321 * removed after the freefrag completes.
6322 */
6323 if (freefrag->ff_jdep)
6324 cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep));
6325 LIST_REMOVE(oldaip, ai_next);
6326 freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block,
6327 &freefrag->ff_list, &freefrag->ff_jwork);
6328 free_newblk(&oldaip->ai_block);
6329
6330 return (freefrag);
6331 }
6332
6333 static inline void
6334 setup_freedirect(
6335 struct freeblks *freeblks,
6336 struct inode *ip,
6337 int i,
6338 int needj)
6339 {
6340 struct ufsmount *ump;
6341 ufs2_daddr_t blkno;
6342 int frags;
6343
6344 blkno = DIP(ip, i_db[i]);
6345 if (blkno == 0)
6346 return;
6347 DIP_SET(ip, i_db[i], 0);
6348 ump = ITOUMP(ip);
6349 frags = sblksize(ump->um_fs, ip->i_size, i);
6350 frags = numfrags(ump->um_fs, frags);
6351 newfreework(ump, freeblks, NULL, i, blkno, frags, 0, needj);
6352 }
6353
6354 static inline void
6355 setup_freeext(
6356 struct freeblks *freeblks,
6357 struct inode *ip,
6358 int i,
6359 int needj)
6360 {
6361 struct ufsmount *ump;
6362 ufs2_daddr_t blkno;
6363 int frags;
6364
6365 blkno = ip->i_din2->di_extb[i];
6366 if (blkno == 0)
6367 return;
6368 ip->i_din2->di_extb[i] = 0;
6369 ump = ITOUMP(ip);
6370 frags = sblksize(ump->um_fs, ip->i_din2->di_extsize, i);
6371 frags = numfrags(ump->um_fs, frags);
6372 newfreework(ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj);
6373 }
6374
6375 static inline void
6376 setup_freeindir(
6377 struct freeblks *freeblks,
6378 struct inode *ip,
6379 int i,
6380 ufs_lbn_t lbn,
6381 int needj)
6382 {
6383 struct ufsmount *ump;
6384 ufs2_daddr_t blkno;
6385
6386 blkno = DIP(ip, i_ib[i]);
6387 if (blkno == 0)
6388 return;
6389 DIP_SET(ip, i_ib[i], 0);
6390 ump = ITOUMP(ip);
6391 newfreework(ump, freeblks, NULL, lbn, blkno, ump->um_fs->fs_frag,
6392 0, needj);
6393 }
6394
6395 static inline struct freeblks *
6396 newfreeblks(struct mount *mp, struct inode *ip)
6397 {
6398 struct freeblks *freeblks;
6399
6400 freeblks = malloc(sizeof(struct freeblks),
6401 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO);
6402 workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp);
6403 LIST_INIT(&freeblks->fb_jblkdephd);
6404 LIST_INIT(&freeblks->fb_jwork);
6405 freeblks->fb_ref = 0;
6406 freeblks->fb_cgwait = 0;
6407 freeblks->fb_state = ATTACHED;
6408 freeblks->fb_uid = ip->i_uid;
6409 freeblks->fb_inum = ip->i_number;
6410 freeblks->fb_vtype = ITOV(ip)->v_type;
6411 freeblks->fb_modrev = DIP(ip, i_modrev);
6412 freeblks->fb_devvp = ITODEVVP(ip);
6413 freeblks->fb_chkcnt = 0;
6414 freeblks->fb_len = 0;
6415
6416 return (freeblks);
6417 }
6418
6419 static void
6420 trunc_indirdep(
6421 struct indirdep *indirdep,
6422 struct freeblks *freeblks,
6423 struct buf *bp,
6424 int off)
6425 {
6426 struct allocindir *aip, *aipn;
6427
6428 /*
6429 * The first set of allocindirs won't be in savedbp.
6430 */
6431 LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn)
6432 if (aip->ai_offset > off)
6433 cancel_allocindir(aip, bp, freeblks, 1);
6434 LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn)
6435 if (aip->ai_offset > off)
6436 cancel_allocindir(aip, bp, freeblks, 1);
6437 /*
6438 * These will exist in savedbp.
6439 */
6440 LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn)
6441 if (aip->ai_offset > off)
6442 cancel_allocindir(aip, NULL, freeblks, 0);
6443 LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn)
6444 if (aip->ai_offset > off)
6445 cancel_allocindir(aip, NULL, freeblks, 0);
6446 }
6447
6448 /*
6449 * Follow the chain of indirects down to lastlbn creating a freework
6450 * structure for each. This will be used to start indir_trunc() at
6451 * the right offset and create the journal records for the parrtial
6452 * truncation. A second step will handle the truncated dependencies.
6453 */
6454 static int
6455 setup_trunc_indir(
6456 struct freeblks *freeblks,
6457 struct inode *ip,
6458 ufs_lbn_t lbn,
6459 ufs_lbn_t lastlbn,
6460 ufs2_daddr_t blkno)
6461 {
6462 struct indirdep *indirdep;
6463 struct indirdep *indirn;
6464 struct freework *freework;
6465 struct newblk *newblk;
6466 struct mount *mp;
6467 struct ufsmount *ump;
6468 struct buf *bp;
6469 uint8_t *start;
6470 uint8_t *end;
6471 ufs_lbn_t lbnadd;
6472 int level;
6473 int error;
6474 int off;
6475
6476 freework = NULL;
6477 if (blkno == 0)
6478 return (0);
6479 mp = freeblks->fb_list.wk_mp;
6480 ump = VFSTOUFS(mp);
6481 /*
6482 * Here, calls to VOP_BMAP() will fail. However, we already have
6483 * the on-disk address, so we just pass it to bread() instead of
6484 * having bread() attempt to calculate it using VOP_BMAP().
6485 */
6486 error = ffs_breadz(ump, ITOV(ip), lbn, blkptrtodb(ump, blkno),
6487 (int)mp->mnt_stat.f_iosize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
6488 if (error)
6489 return (error);
6490 level = lbn_level(lbn);
6491 lbnadd = lbn_offset(ump->um_fs, level);
6492 /*
6493 * Compute the offset of the last block we want to keep. Store
6494 * in the freework the first block we want to completely free.
6495 */
6496 off = (lastlbn - -(lbn + level)) / lbnadd;
6497 if (off + 1 == NINDIR(ump->um_fs))
6498 goto nowork;
6499 freework = newfreework(ump, freeblks, NULL, lbn, blkno, 0, off + 1, 0);
6500 /*
6501 * Link the freework into the indirdep. This will prevent any new
6502 * allocations from proceeding until we are finished with the
6503 * truncate and the block is written.
6504 */
6505 ACQUIRE_LOCK(ump);
6506 indirdep = indirdep_lookup(mp, ip, bp);
6507 if (indirdep->ir_freeblks)
6508 panic("setup_trunc_indir: indirdep already truncated.");
6509 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next);
6510 freework->fw_indir = indirdep;
6511 /*
6512 * Cancel any allocindirs that will not make it to disk.
6513 * We have to do this for all copies of the indirdep that
6514 * live on this newblk.
6515 */
6516 if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
6517 if (newblk_lookup(mp, dbtofsb(ump->um_fs, bp->b_blkno), 0,
6518 &newblk) == 0)
6519 panic("setup_trunc_indir: lost block");
6520 LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next)
6521 trunc_indirdep(indirn, freeblks, bp, off);
6522 } else
6523 trunc_indirdep(indirdep, freeblks, bp, off);
6524 FREE_LOCK(ump);
6525 /*
6526 * Creation is protected by the buf lock. The saveddata is only
6527 * needed if a full truncation follows a partial truncation but it
6528 * is difficult to allocate in that case so we fetch it anyway.
6529 */
6530 if (indirdep->ir_saveddata == NULL)
6531 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
6532 M_SOFTDEP_FLAGS);
6533 nowork:
6534 /* Fetch the blkno of the child and the zero start offset. */
6535 if (I_IS_UFS1(ip)) {
6536 blkno = ((ufs1_daddr_t *)bp->b_data)[off];
6537 start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1];
6538 } else {
6539 blkno = ((ufs2_daddr_t *)bp->b_data)[off];
6540 start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1];
6541 }
6542 if (freework) {
6543 /* Zero the truncated pointers. */
6544 end = bp->b_data + bp->b_bcount;
6545 bzero(start, end - start);
6546 bdwrite(bp);
6547 } else
6548 bqrelse(bp);
6549 if (level == 0)
6550 return (0);
6551 lbn++; /* adjust level */
6552 lbn -= (off * lbnadd);
6553 return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno);
6554 }
6555
6556 /*
6557 * Complete the partial truncation of an indirect block setup by
6558 * setup_trunc_indir(). This zeros the truncated pointers in the saved
6559 * copy and writes them to disk before the freeblks is allowed to complete.
6560 */
6561 static void
6562 complete_trunc_indir(struct freework *freework)
6563 {
6564 struct freework *fwn;
6565 struct indirdep *indirdep;
6566 struct ufsmount *ump;
6567 struct buf *bp;
6568 uintptr_t start;
6569 int count;
6570
6571 ump = VFSTOUFS(freework->fw_list.wk_mp);
6572 LOCK_OWNED(ump);
6573 indirdep = freework->fw_indir;
6574 for (;;) {
6575 bp = indirdep->ir_bp;
6576 /* See if the block was discarded. */
6577 if (bp == NULL)
6578 break;
6579 /* Inline part of getdirtybuf(). We dont want bremfree. */
6580 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0)
6581 break;
6582 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
6583 LOCK_PTR(ump)) == 0)
6584 BUF_UNLOCK(bp);
6585 ACQUIRE_LOCK(ump);
6586 }
6587 freework->fw_state |= DEPCOMPLETE;
6588 TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next);
6589 /*
6590 * Zero the pointers in the saved copy.
6591 */
6592 if (indirdep->ir_state & UFS1FMT)
6593 start = sizeof(ufs1_daddr_t);
6594 else
6595 start = sizeof(ufs2_daddr_t);
6596 start *= freework->fw_start;
6597 count = indirdep->ir_savebp->b_bcount - start;
6598 start += (uintptr_t)indirdep->ir_savebp->b_data;
6599 bzero((char *)start, count);
6600 /*
6601 * We need to start the next truncation in the list if it has not
6602 * been started yet.
6603 */
6604 fwn = TAILQ_FIRST(&indirdep->ir_trunc);
6605 if (fwn != NULL) {
6606 if (fwn->fw_freeblks == indirdep->ir_freeblks)
6607 TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next);
6608 if ((fwn->fw_state & ONWORKLIST) == 0)
6609 freework_enqueue(fwn);
6610 }
6611 /*
6612 * If bp is NULL the block was fully truncated, restore
6613 * the saved block list otherwise free it if it is no
6614 * longer needed.
6615 */
6616 if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
6617 if (bp == NULL)
6618 bcopy(indirdep->ir_saveddata,
6619 indirdep->ir_savebp->b_data,
6620 indirdep->ir_savebp->b_bcount);
6621 free(indirdep->ir_saveddata, M_INDIRDEP);
6622 indirdep->ir_saveddata = NULL;
6623 }
6624 /*
6625 * When bp is NULL there is a full truncation pending. We
6626 * must wait for this full truncation to be journaled before
6627 * we can release this freework because the disk pointers will
6628 * never be written as zero.
6629 */
6630 if (bp == NULL) {
6631 if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd))
6632 handle_written_freework(freework);
6633 else
6634 WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd,
6635 &freework->fw_list);
6636 if (fwn == NULL) {
6637 freework->fw_indir = (void *)0x0000deadbeef0000;
6638 bp = indirdep->ir_savebp;
6639 indirdep->ir_savebp = NULL;
6640 free_indirdep(indirdep);
6641 FREE_LOCK(ump);
6642 brelse(bp);
6643 ACQUIRE_LOCK(ump);
6644 }
6645 } else {
6646 /* Complete when the real copy is written. */
6647 WORKLIST_INSERT(&bp->b_dep, &freework->fw_list);
6648 BUF_UNLOCK(bp);
6649 }
6650 }
6651
6652 /*
6653 * Calculate the number of blocks we are going to release where datablocks
6654 * is the current total and length is the new file size.
6655 */
6656 static ufs2_daddr_t
6657 blkcount(struct fs *fs,
6658 ufs2_daddr_t datablocks,
6659 off_t length)
6660 {
6661 off_t totblks, numblks;
6662
6663 totblks = 0;
6664 numblks = howmany(length, fs->fs_bsize);
6665 if (numblks <= UFS_NDADDR) {
6666 totblks = howmany(length, fs->fs_fsize);
6667 goto out;
6668 }
6669 totblks = blkstofrags(fs, numblks);
6670 numblks -= UFS_NDADDR;
6671 /*
6672 * Count all single, then double, then triple indirects required.
6673 * Subtracting one indirects worth of blocks for each pass
6674 * acknowledges one of each pointed to by the inode.
6675 */
6676 for (;;) {
6677 totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs)));
6678 numblks -= NINDIR(fs);
6679 if (numblks <= 0)
6680 break;
6681 numblks = howmany(numblks, NINDIR(fs));
6682 }
6683 out:
6684 totblks = fsbtodb(fs, totblks);
6685 /*
6686 * Handle sparse files. We can't reclaim more blocks than the inode
6687 * references. We will correct it later in handle_complete_freeblks()
6688 * when we know the real count.
6689 */
6690 if (totblks > datablocks)
6691 return (0);
6692 return (datablocks - totblks);
6693 }
6694
6695 /*
6696 * Handle freeblocks for journaled softupdate filesystems.
6697 *
6698 * Contrary to normal softupdates, we must preserve the block pointers in
6699 * indirects until their subordinates are free. This is to avoid journaling
6700 * every block that is freed which may consume more space than the journal
6701 * itself. The recovery program will see the free block journals at the
6702 * base of the truncated area and traverse them to reclaim space. The
6703 * pointers in the inode may be cleared immediately after the journal
6704 * records are written because each direct and indirect pointer in the
6705 * inode is recorded in a journal. This permits full truncation to proceed
6706 * asynchronously. The write order is journal -> inode -> cgs -> indirects.
6707 *
6708 * The algorithm is as follows:
6709 * 1) Traverse the in-memory state and create journal entries to release
6710 * the relevant blocks and full indirect trees.
6711 * 2) Traverse the indirect block chain adding partial truncation freework
6712 * records to indirects in the path to lastlbn. The freework will
6713 * prevent new allocation dependencies from being satisfied in this
6714 * indirect until the truncation completes.
6715 * 3) Read and lock the inode block, performing an update with the new size
6716 * and pointers. This prevents truncated data from becoming valid on
6717 * disk through step 4.
6718 * 4) Reap unsatisfied dependencies that are beyond the truncated area,
6719 * eliminate journal work for those records that do not require it.
6720 * 5) Schedule the journal records to be written followed by the inode block.
6721 * 6) Allocate any necessary frags for the end of file.
6722 * 7) Zero any partially truncated blocks.
6723 *
6724 * From this truncation proceeds asynchronously using the freework and
6725 * indir_trunc machinery. The file will not be extended again into a
6726 * partially truncated indirect block until all work is completed but
6727 * the normal dependency mechanism ensures that it is rolled back/forward
6728 * as appropriate. Further truncation may occur without delay and is
6729 * serialized in indir_trunc().
6730 */
6731 void
6732 softdep_journal_freeblocks(
6733 struct inode *ip, /* The inode whose length is to be reduced */
6734 struct ucred *cred,
6735 off_t length, /* The new length for the file */
6736 int flags) /* IO_EXT and/or IO_NORMAL */
6737 {
6738 struct freeblks *freeblks, *fbn;
6739 struct worklist *wk, *wkn;
6740 struct inodedep *inodedep;
6741 struct jblkdep *jblkdep;
6742 struct allocdirect *adp, *adpn;
6743 struct ufsmount *ump;
6744 struct fs *fs;
6745 struct buf *bp;
6746 struct vnode *vp;
6747 struct mount *mp;
6748 daddr_t dbn;
6749 ufs2_daddr_t extblocks, datablocks;
6750 ufs_lbn_t tmpval, lbn, lastlbn;
6751 int frags, lastoff, iboff, allocblock, needj, error, i;
6752
6753 ump = ITOUMP(ip);
6754 mp = UFSTOVFS(ump);
6755 fs = ump->um_fs;
6756 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6757 ("softdep_journal_freeblocks called on non-softdep filesystem"));
6758 vp = ITOV(ip);
6759 needj = 1;
6760 iboff = -1;
6761 allocblock = 0;
6762 extblocks = 0;
6763 datablocks = 0;
6764 frags = 0;
6765 freeblks = newfreeblks(mp, ip);
6766 ACQUIRE_LOCK(ump);
6767 /*
6768 * If we're truncating a removed file that will never be written
6769 * we don't need to journal the block frees. The canceled journals
6770 * for the allocations will suffice.
6771 */
6772 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6773 if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED &&
6774 length == 0)
6775 needj = 0;
6776 CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d",
6777 ip->i_number, length, needj);
6778 FREE_LOCK(ump);
6779 /*
6780 * Calculate the lbn that we are truncating to. This results in -1
6781 * if we're truncating the 0 bytes. So it is the last lbn we want
6782 * to keep, not the first lbn we want to truncate.
6783 */
6784 lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1;
6785 lastoff = blkoff(fs, length);
6786 /*
6787 * Compute frags we are keeping in lastlbn. 0 means all.
6788 */
6789 if (lastlbn >= 0 && lastlbn < UFS_NDADDR) {
6790 frags = fragroundup(fs, lastoff);
6791 /* adp offset of last valid allocdirect. */
6792 iboff = lastlbn;
6793 } else if (lastlbn > 0)
6794 iboff = UFS_NDADDR;
6795 if (fs->fs_magic == FS_UFS2_MAGIC)
6796 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6797 /*
6798 * Handle normal data blocks and indirects. This section saves
6799 * values used after the inode update to complete frag and indirect
6800 * truncation.
6801 */
6802 if ((flags & IO_NORMAL) != 0) {
6803 /*
6804 * Handle truncation of whole direct and indirect blocks.
6805 */
6806 for (i = iboff + 1; i < UFS_NDADDR; i++)
6807 setup_freedirect(freeblks, ip, i, needj);
6808 for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR;
6809 i < UFS_NIADDR;
6810 i++, lbn += tmpval, tmpval *= NINDIR(fs)) {
6811 /* Release a whole indirect tree. */
6812 if (lbn > lastlbn) {
6813 setup_freeindir(freeblks, ip, i, -lbn -i,
6814 needj);
6815 continue;
6816 }
6817 iboff = i + UFS_NDADDR;
6818 /*
6819 * Traverse partially truncated indirect tree.
6820 */
6821 if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn)
6822 setup_trunc_indir(freeblks, ip, -lbn - i,
6823 lastlbn, DIP(ip, i_ib[i]));
6824 }
6825 /*
6826 * Handle partial truncation to a frag boundary.
6827 */
6828 if (frags) {
6829 ufs2_daddr_t blkno;
6830 long oldfrags;
6831
6832 oldfrags = blksize(fs, ip, lastlbn);
6833 blkno = DIP(ip, i_db[lastlbn]);
6834 if (blkno && oldfrags != frags) {
6835 oldfrags -= frags;
6836 oldfrags = numfrags(fs, oldfrags);
6837 blkno += numfrags(fs, frags);
6838 newfreework(ump, freeblks, NULL, lastlbn,
6839 blkno, oldfrags, 0, needj);
6840 if (needj)
6841 adjust_newfreework(freeblks,
6842 numfrags(fs, frags));
6843 } else if (blkno == 0)
6844 allocblock = 1;
6845 }
6846 /*
6847 * Add a journal record for partial truncate if we are
6848 * handling indirect blocks. Non-indirects need no extra
6849 * journaling.
6850 */
6851 if (length != 0 && lastlbn >= UFS_NDADDR) {
6852 UFS_INODE_SET_FLAG(ip, IN_TRUNCATED);
6853 newjtrunc(freeblks, length, 0);
6854 }
6855 ip->i_size = length;
6856 DIP_SET(ip, i_size, ip->i_size);
6857 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
6858 datablocks = DIP(ip, i_blocks) - extblocks;
6859 if (length != 0)
6860 datablocks = blkcount(fs, datablocks, length);
6861 freeblks->fb_len = length;
6862 }
6863 if ((flags & IO_EXT) != 0) {
6864 for (i = 0; i < UFS_NXADDR; i++)
6865 setup_freeext(freeblks, ip, i, needj);
6866 ip->i_din2->di_extsize = 0;
6867 datablocks += extblocks;
6868 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
6869 }
6870 #ifdef QUOTA
6871 /* Reference the quotas in case the block count is wrong in the end. */
6872 quotaref(vp, freeblks->fb_quota);
6873 (void) chkdq(ip, -datablocks, NOCRED, FORCE);
6874 #endif
6875 freeblks->fb_chkcnt = -datablocks;
6876 UFS_LOCK(ump);
6877 fs->fs_pendingblocks += datablocks;
6878 UFS_UNLOCK(ump);
6879 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6880 /*
6881 * Handle truncation of incomplete alloc direct dependencies. We
6882 * hold the inode block locked to prevent incomplete dependencies
6883 * from reaching the disk while we are eliminating those that
6884 * have been truncated. This is a partially inlined ffs_update().
6885 */
6886 ufs_itimes(vp);
6887 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
6888 dbn = fsbtodb(fs, ino_to_fsba(fs, ip->i_number));
6889 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize,
6890 NULL, NULL, 0, cred, 0, NULL, &bp);
6891 if (error) {
6892 softdep_error("softdep_journal_freeblocks", error);
6893 return;
6894 }
6895 if (bp->b_bufsize == fs->fs_bsize)
6896 bp->b_flags |= B_CLUSTEROK;
6897 softdep_update_inodeblock(ip, bp, 0);
6898 if (ump->um_fstype == UFS1) {
6899 *((struct ufs1_dinode *)bp->b_data +
6900 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
6901 } else {
6902 ffs_update_dinode_ckhash(fs, ip->i_din2);
6903 *((struct ufs2_dinode *)bp->b_data +
6904 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
6905 }
6906 ACQUIRE_LOCK(ump);
6907 (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6908 if ((inodedep->id_state & IOSTARTED) != 0)
6909 panic("softdep_setup_freeblocks: inode busy");
6910 /*
6911 * Add the freeblks structure to the list of operations that
6912 * must await the zero'ed inode being written to disk. If we
6913 * still have a bitmap dependency (needj), then the inode
6914 * has never been written to disk, so we can process the
6915 * freeblks below once we have deleted the dependencies.
6916 */
6917 if (needj)
6918 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6919 else
6920 freeblks->fb_state |= COMPLETE;
6921 if ((flags & IO_NORMAL) != 0) {
6922 TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) {
6923 if (adp->ad_offset > iboff)
6924 cancel_allocdirect(&inodedep->id_inoupdt, adp,
6925 freeblks);
6926 /*
6927 * Truncate the allocdirect. We could eliminate
6928 * or modify journal records as well.
6929 */
6930 else if (adp->ad_offset == iboff && frags)
6931 adp->ad_newsize = frags;
6932 }
6933 }
6934 if ((flags & IO_EXT) != 0)
6935 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
6936 cancel_allocdirect(&inodedep->id_extupdt, adp,
6937 freeblks);
6938 /*
6939 * Scan the bufwait list for newblock dependencies that will never
6940 * make it to disk.
6941 */
6942 LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) {
6943 if (wk->wk_type != D_ALLOCDIRECT)
6944 continue;
6945 adp = WK_ALLOCDIRECT(wk);
6946 if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) ||
6947 ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) {
6948 cancel_jfreeblk(freeblks, adp->ad_newblkno);
6949 cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork);
6950 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
6951 }
6952 }
6953 /*
6954 * Add journal work.
6955 */
6956 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps)
6957 add_to_journal(&jblkdep->jb_list);
6958 FREE_LOCK(ump);
6959 bdwrite(bp);
6960 /*
6961 * Truncate dependency structures beyond length.
6962 */
6963 trunc_dependencies(ip, freeblks, lastlbn, frags, flags);
6964 /*
6965 * This is only set when we need to allocate a fragment because
6966 * none existed at the end of a frag-sized file. It handles only
6967 * allocating a new, zero filled block.
6968 */
6969 if (allocblock) {
6970 ip->i_size = length - lastoff;
6971 DIP_SET(ip, i_size, ip->i_size);
6972 error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp);
6973 if (error != 0) {
6974 softdep_error("softdep_journal_freeblks", error);
6975 return;
6976 }
6977 ip->i_size = length;
6978 DIP_SET(ip, i_size, length);
6979 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_UPDATE);
6980 allocbuf(bp, frags);
6981 ffs_update(vp, 0);
6982 bawrite(bp);
6983 } else if (lastoff != 0 && vp->v_type != VDIR) {
6984 int size;
6985
6986 /*
6987 * Zero the end of a truncated frag or block.
6988 */
6989 size = sblksize(fs, length, lastlbn);
6990 error = bread(vp, lastlbn, size, cred, &bp);
6991 if (error == 0) {
6992 bzero((char *)bp->b_data + lastoff, size - lastoff);
6993 bawrite(bp);
6994 } else if (!ffs_fsfail_cleanup(ump, error)) {
6995 softdep_error("softdep_journal_freeblks", error);
6996 return;
6997 }
6998 }
6999 ACQUIRE_LOCK(ump);
7000 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
7001 TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next);
7002 freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST;
7003 /*
7004 * We zero earlier truncations so they don't erroneously
7005 * update i_blocks.
7006 */
7007 if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0)
7008 TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next)
7009 fbn->fb_len = 0;
7010 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE &&
7011 LIST_EMPTY(&freeblks->fb_jblkdephd))
7012 freeblks->fb_state |= INPROGRESS;
7013 else
7014 freeblks = NULL;
7015 FREE_LOCK(ump);
7016 if (freeblks)
7017 handle_workitem_freeblocks(freeblks, 0);
7018 trunc_pages(ip, length, extblocks, flags);
7019
7020 }
7021
7022 /*
7023 * Flush a JOP_SYNC to the journal.
7024 */
7025 void
7026 softdep_journal_fsync(struct inode *ip)
7027 {
7028 struct jfsync *jfsync;
7029 struct ufsmount *ump;
7030
7031 ump = ITOUMP(ip);
7032 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
7033 ("softdep_journal_fsync called on non-softdep filesystem"));
7034 if ((ip->i_flag & IN_TRUNCATED) == 0)
7035 return;
7036 ip->i_flag &= ~IN_TRUNCATED;
7037 jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO);
7038 workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ump));
7039 jfsync->jfs_size = ip->i_size;
7040 jfsync->jfs_ino = ip->i_number;
7041 ACQUIRE_LOCK(ump);
7042 add_to_journal(&jfsync->jfs_list);
7043 jwait(&jfsync->jfs_list, MNT_WAIT);
7044 FREE_LOCK(ump);
7045 }
7046
7047 /*
7048 * Block de-allocation dependencies.
7049 *
7050 * When blocks are de-allocated, the on-disk pointers must be nullified before
7051 * the blocks are made available for use by other files. (The true
7052 * requirement is that old pointers must be nullified before new on-disk
7053 * pointers are set. We chose this slightly more stringent requirement to
7054 * reduce complexity.) Our implementation handles this dependency by updating
7055 * the inode (or indirect block) appropriately but delaying the actual block
7056 * de-allocation (i.e., freemap and free space count manipulation) until
7057 * after the updated versions reach stable storage. After the disk is
7058 * updated, the blocks can be safely de-allocated whenever it is convenient.
7059 * This implementation handles only the common case of reducing a file's
7060 * length to zero. Other cases are handled by the conventional synchronous
7061 * write approach.
7062 *
7063 * The ffs implementation with which we worked double-checks
7064 * the state of the block pointers and file size as it reduces
7065 * a file's length. Some of this code is replicated here in our
7066 * soft updates implementation. The freeblks->fb_chkcnt field is
7067 * used to transfer a part of this information to the procedure
7068 * that eventually de-allocates the blocks.
7069 *
7070 * This routine should be called from the routine that shortens
7071 * a file's length, before the inode's size or block pointers
7072 * are modified. It will save the block pointer information for
7073 * later release and zero the inode so that the calling routine
7074 * can release it.
7075 */
7076 void
7077 softdep_setup_freeblocks(
7078 struct inode *ip, /* The inode whose length is to be reduced */
7079 off_t length, /* The new length for the file */
7080 int flags) /* IO_EXT and/or IO_NORMAL */
7081 {
7082 struct ufs1_dinode *dp1;
7083 struct ufs2_dinode *dp2;
7084 struct freeblks *freeblks;
7085 struct inodedep *inodedep;
7086 struct allocdirect *adp;
7087 struct ufsmount *ump;
7088 struct buf *bp;
7089 struct fs *fs;
7090 ufs2_daddr_t extblocks, datablocks;
7091 struct mount *mp;
7092 int i, delay, error;
7093 ufs_lbn_t tmpval;
7094 ufs_lbn_t lbn;
7095
7096 ump = ITOUMP(ip);
7097 mp = UFSTOVFS(ump);
7098 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
7099 ("softdep_setup_freeblocks called on non-softdep filesystem"));
7100 CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld",
7101 ip->i_number, length);
7102 KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length"));
7103 fs = ump->um_fs;
7104 if ((error = bread(ump->um_devvp,
7105 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
7106 (int)fs->fs_bsize, NOCRED, &bp)) != 0) {
7107 if (!ffs_fsfail_cleanup(ump, error))
7108 softdep_error("softdep_setup_freeblocks", error);
7109 return;
7110 }
7111 freeblks = newfreeblks(mp, ip);
7112 extblocks = 0;
7113 datablocks = 0;
7114 if (fs->fs_magic == FS_UFS2_MAGIC)
7115 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
7116 if ((flags & IO_NORMAL) != 0) {
7117 for (i = 0; i < UFS_NDADDR; i++)
7118 setup_freedirect(freeblks, ip, i, 0);
7119 for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR;
7120 i < UFS_NIADDR;
7121 i++, lbn += tmpval, tmpval *= NINDIR(fs))
7122 setup_freeindir(freeblks, ip, i, -lbn -i, 0);
7123 ip->i_size = 0;
7124 DIP_SET(ip, i_size, 0);
7125 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
7126 datablocks = DIP(ip, i_blocks) - extblocks;
7127 }
7128 if ((flags & IO_EXT) != 0) {
7129 for (i = 0; i < UFS_NXADDR; i++)
7130 setup_freeext(freeblks, ip, i, 0);
7131 ip->i_din2->di_extsize = 0;
7132 datablocks += extblocks;
7133 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
7134 }
7135 #ifdef QUOTA
7136 /* Reference the quotas in case the block count is wrong in the end. */
7137 quotaref(ITOV(ip), freeblks->fb_quota);
7138 (void) chkdq(ip, -datablocks, NOCRED, FORCE);
7139 #endif
7140 freeblks->fb_chkcnt = -datablocks;
7141 UFS_LOCK(ump);
7142 fs->fs_pendingblocks += datablocks;
7143 UFS_UNLOCK(ump);
7144 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
7145 /*
7146 * Push the zero'ed inode to its disk buffer so that we are free
7147 * to delete its dependencies below. Once the dependencies are gone
7148 * the buffer can be safely released.
7149 */
7150 if (ump->um_fstype == UFS1) {
7151 dp1 = ((struct ufs1_dinode *)bp->b_data +
7152 ino_to_fsbo(fs, ip->i_number));
7153 ip->i_din1->di_freelink = dp1->di_freelink;
7154 *dp1 = *ip->i_din1;
7155 } else {
7156 dp2 = ((struct ufs2_dinode *)bp->b_data +
7157 ino_to_fsbo(fs, ip->i_number));
7158 ip->i_din2->di_freelink = dp2->di_freelink;
7159 ffs_update_dinode_ckhash(fs, ip->i_din2);
7160 *dp2 = *ip->i_din2;
7161 }
7162 /*
7163 * Find and eliminate any inode dependencies.
7164 */
7165 ACQUIRE_LOCK(ump);
7166 (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
7167 if ((inodedep->id_state & IOSTARTED) != 0)
7168 panic("softdep_setup_freeblocks: inode busy");
7169 /*
7170 * Add the freeblks structure to the list of operations that
7171 * must await the zero'ed inode being written to disk. If we
7172 * still have a bitmap dependency (delay == 0), then the inode
7173 * has never been written to disk, so we can process the
7174 * freeblks below once we have deleted the dependencies.
7175 */
7176 delay = (inodedep->id_state & DEPCOMPLETE);
7177 if (delay)
7178 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
7179 else
7180 freeblks->fb_state |= COMPLETE;
7181 /*
7182 * Because the file length has been truncated to zero, any
7183 * pending block allocation dependency structures associated
7184 * with this inode are obsolete and can simply be de-allocated.
7185 * We must first merge the two dependency lists to get rid of
7186 * any duplicate freefrag structures, then purge the merged list.
7187 * If we still have a bitmap dependency, then the inode has never
7188 * been written to disk, so we can free any fragments without delay.
7189 */
7190 if (flags & IO_NORMAL) {
7191 merge_inode_lists(&inodedep->id_newinoupdt,
7192 &inodedep->id_inoupdt);
7193 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
7194 cancel_allocdirect(&inodedep->id_inoupdt, adp,
7195 freeblks);
7196 }
7197 if (flags & IO_EXT) {
7198 merge_inode_lists(&inodedep->id_newextupdt,
7199 &inodedep->id_extupdt);
7200 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
7201 cancel_allocdirect(&inodedep->id_extupdt, adp,
7202 freeblks);
7203 }
7204 FREE_LOCK(ump);
7205 bdwrite(bp);
7206 trunc_dependencies(ip, freeblks, -1, 0, flags);
7207 ACQUIRE_LOCK(ump);
7208 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
7209 (void) free_inodedep(inodedep);
7210 freeblks->fb_state |= DEPCOMPLETE;
7211 /*
7212 * If the inode with zeroed block pointers is now on disk
7213 * we can start freeing blocks.
7214 */
7215 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
7216 freeblks->fb_state |= INPROGRESS;
7217 else
7218 freeblks = NULL;
7219 FREE_LOCK(ump);
7220 if (freeblks)
7221 handle_workitem_freeblocks(freeblks, 0);
7222 trunc_pages(ip, length, extblocks, flags);
7223 }
7224
7225 /*
7226 * Eliminate pages from the page cache that back parts of this inode and
7227 * adjust the vnode pager's idea of our size. This prevents stale data
7228 * from hanging around in the page cache.
7229 */
7230 static void
7231 trunc_pages(
7232 struct inode *ip,
7233 off_t length,
7234 ufs2_daddr_t extblocks,
7235 int flags)
7236 {
7237 struct vnode *vp;
7238 struct fs *fs;
7239 ufs_lbn_t lbn;
7240 off_t end, extend;
7241
7242 vp = ITOV(ip);
7243 fs = ITOFS(ip);
7244 extend = OFF_TO_IDX(lblktosize(fs, -extblocks));
7245 if ((flags & IO_EXT) != 0)
7246 vn_pages_remove(vp, extend, 0);
7247 if ((flags & IO_NORMAL) == 0)
7248 return;
7249 BO_LOCK(&vp->v_bufobj);
7250 drain_output(vp);
7251 BO_UNLOCK(&vp->v_bufobj);
7252 /*
7253 * The vnode pager eliminates file pages we eliminate indirects
7254 * below.
7255 */
7256 vnode_pager_setsize(vp, length);
7257 /*
7258 * Calculate the end based on the last indirect we want to keep. If
7259 * the block extends into indirects we can just use the negative of
7260 * its lbn. Doubles and triples exist at lower numbers so we must
7261 * be careful not to remove those, if they exist. double and triple
7262 * indirect lbns do not overlap with others so it is not important
7263 * to verify how many levels are required.
7264 */
7265 lbn = lblkno(fs, length);
7266 if (lbn >= UFS_NDADDR) {
7267 /* Calculate the virtual lbn of the triple indirect. */
7268 lbn = -lbn - (UFS_NIADDR - 1);
7269 end = OFF_TO_IDX(lblktosize(fs, lbn));
7270 } else
7271 end = extend;
7272 vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end);
7273 }
7274
7275 /*
7276 * See if the buf bp is in the range eliminated by truncation.
7277 */
7278 static int
7279 trunc_check_buf(
7280 struct buf *bp,
7281 int *blkoffp,
7282 ufs_lbn_t lastlbn,
7283 int lastoff,
7284 int flags)
7285 {
7286 ufs_lbn_t lbn;
7287
7288 *blkoffp = 0;
7289 /* Only match ext/normal blocks as appropriate. */
7290 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) ||
7291 ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0))
7292 return (0);
7293 /* ALTDATA is always a full truncation. */
7294 if ((bp->b_xflags & BX_ALTDATA) != 0)
7295 return (1);
7296 /* -1 is full truncation. */
7297 if (lastlbn == -1)
7298 return (1);
7299 /*
7300 * If this is a partial truncate we only want those
7301 * blocks and indirect blocks that cover the range
7302 * we're after.
7303 */
7304 lbn = bp->b_lblkno;
7305 if (lbn < 0)
7306 lbn = -(lbn + lbn_level(lbn));
7307 if (lbn < lastlbn)
7308 return (0);
7309 /* Here we only truncate lblkno if it's partial. */
7310 if (lbn == lastlbn) {
7311 if (lastoff == 0)
7312 return (0);
7313 *blkoffp = lastoff;
7314 }
7315 return (1);
7316 }
7317
7318 /*
7319 * Eliminate any dependencies that exist in memory beyond lblkno:off
7320 */
7321 static void
7322 trunc_dependencies(
7323 struct inode *ip,
7324 struct freeblks *freeblks,
7325 ufs_lbn_t lastlbn,
7326 int lastoff,
7327 int flags)
7328 {
7329 struct bufobj *bo;
7330 struct vnode *vp;
7331 struct buf *bp;
7332 int blkoff;
7333
7334 /*
7335 * We must wait for any I/O in progress to finish so that
7336 * all potential buffers on the dirty list will be visible.
7337 * Once they are all there, walk the list and get rid of
7338 * any dependencies.
7339 */
7340 vp = ITOV(ip);
7341 bo = &vp->v_bufobj;
7342 BO_LOCK(bo);
7343 drain_output(vp);
7344 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
7345 bp->b_vflags &= ~BV_SCANNED;
7346 restart:
7347 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
7348 if (bp->b_vflags & BV_SCANNED)
7349 continue;
7350 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7351 bp->b_vflags |= BV_SCANNED;
7352 continue;
7353 }
7354 KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer"));
7355 if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL)
7356 goto restart;
7357 BO_UNLOCK(bo);
7358 if (deallocate_dependencies(bp, freeblks, blkoff))
7359 bqrelse(bp);
7360 else
7361 brelse(bp);
7362 BO_LOCK(bo);
7363 goto restart;
7364 }
7365 /*
7366 * Now do the work of vtruncbuf while also matching indirect blocks.
7367 */
7368 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs)
7369 bp->b_vflags &= ~BV_SCANNED;
7370 cleanrestart:
7371 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) {
7372 if (bp->b_vflags & BV_SCANNED)
7373 continue;
7374 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7375 bp->b_vflags |= BV_SCANNED;
7376 continue;
7377 }
7378 if (BUF_LOCK(bp,
7379 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
7380 BO_LOCKPTR(bo)) == ENOLCK) {
7381 BO_LOCK(bo);
7382 goto cleanrestart;
7383 }
7384 BO_LOCK(bo);
7385 bp->b_vflags |= BV_SCANNED;
7386 BO_UNLOCK(bo);
7387 bremfree(bp);
7388 if (blkoff != 0) {
7389 allocbuf(bp, blkoff);
7390 bqrelse(bp);
7391 } else {
7392 bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF;
7393 brelse(bp);
7394 }
7395 BO_LOCK(bo);
7396 goto cleanrestart;
7397 }
7398 drain_output(vp);
7399 BO_UNLOCK(bo);
7400 }
7401
7402 static int
7403 cancel_pagedep(
7404 struct pagedep *pagedep,
7405 struct freeblks *freeblks,
7406 int blkoff)
7407 {
7408 struct jremref *jremref;
7409 struct jmvref *jmvref;
7410 struct dirrem *dirrem, *tmp;
7411 int i;
7412
7413 /*
7414 * Copy any directory remove dependencies to the list
7415 * to be processed after the freeblks proceeds. If
7416 * directory entry never made it to disk they
7417 * can be dumped directly onto the work list.
7418 */
7419 LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) {
7420 /* Skip this directory removal if it is intended to remain. */
7421 if (dirrem->dm_offset < blkoff)
7422 continue;
7423 /*
7424 * If there are any dirrems we wait for the journal write
7425 * to complete and then restart the buf scan as the lock
7426 * has been dropped.
7427 */
7428 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) {
7429 jwait(&jremref->jr_list, MNT_WAIT);
7430 return (ERESTART);
7431 }
7432 LIST_REMOVE(dirrem, dm_next);
7433 dirrem->dm_dirinum = pagedep->pd_ino;
7434 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list);
7435 }
7436 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) {
7437 jwait(&jmvref->jm_list, MNT_WAIT);
7438 return (ERESTART);
7439 }
7440 /*
7441 * When we're partially truncating a pagedep we just want to flush
7442 * journal entries and return. There can not be any adds in the
7443 * truncated portion of the directory and newblk must remain if
7444 * part of the block remains.
7445 */
7446 if (blkoff != 0) {
7447 struct diradd *dap;
7448
7449 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
7450 if (dap->da_offset > blkoff)
7451 panic("cancel_pagedep: diradd %p off %d > %d",
7452 dap, dap->da_offset, blkoff);
7453 for (i = 0; i < DAHASHSZ; i++)
7454 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist)
7455 if (dap->da_offset > blkoff)
7456 panic("cancel_pagedep: diradd %p off %d > %d",
7457 dap, dap->da_offset, blkoff);
7458 return (0);
7459 }
7460 /*
7461 * There should be no directory add dependencies present
7462 * as the directory could not be truncated until all
7463 * children were removed.
7464 */
7465 KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL,
7466 ("deallocate_dependencies: pendinghd != NULL"));
7467 for (i = 0; i < DAHASHSZ; i++)
7468 KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL,
7469 ("deallocate_dependencies: diraddhd != NULL"));
7470 if ((pagedep->pd_state & NEWBLOCK) != 0)
7471 free_newdirblk(pagedep->pd_newdirblk);
7472 if (free_pagedep(pagedep) == 0)
7473 panic("Failed to free pagedep %p", pagedep);
7474 return (0);
7475 }
7476
7477 /*
7478 * Reclaim any dependency structures from a buffer that is about to
7479 * be reallocated to a new vnode. The buffer must be locked, thus,
7480 * no I/O completion operations can occur while we are manipulating
7481 * its associated dependencies. The mutex is held so that other I/O's
7482 * associated with related dependencies do not occur.
7483 */
7484 static int
7485 deallocate_dependencies(
7486 struct buf *bp,
7487 struct freeblks *freeblks,
7488 int off)
7489 {
7490 struct indirdep *indirdep;
7491 struct pagedep *pagedep;
7492 struct worklist *wk, *wkn;
7493 struct ufsmount *ump;
7494
7495 ump = softdep_bp_to_mp(bp);
7496 if (ump == NULL)
7497 goto done;
7498 ACQUIRE_LOCK(ump);
7499 LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) {
7500 switch (wk->wk_type) {
7501 case D_INDIRDEP:
7502 indirdep = WK_INDIRDEP(wk);
7503 if (bp->b_lblkno >= 0 ||
7504 bp->b_blkno != indirdep->ir_savebp->b_lblkno)
7505 panic("deallocate_dependencies: not indir");
7506 cancel_indirdep(indirdep, bp, freeblks);
7507 continue;
7508
7509 case D_PAGEDEP:
7510 pagedep = WK_PAGEDEP(wk);
7511 if (cancel_pagedep(pagedep, freeblks, off)) {
7512 FREE_LOCK(ump);
7513 return (ERESTART);
7514 }
7515 continue;
7516
7517 case D_ALLOCINDIR:
7518 /*
7519 * Simply remove the allocindir, we'll find it via
7520 * the indirdep where we can clear pointers if
7521 * needed.
7522 */
7523 WORKLIST_REMOVE(wk);
7524 continue;
7525
7526 case D_FREEWORK:
7527 /*
7528 * A truncation is waiting for the zero'd pointers
7529 * to be written. It can be freed when the freeblks
7530 * is journaled.
7531 */
7532 WORKLIST_REMOVE(wk);
7533 wk->wk_state |= ONDEPLIST;
7534 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
7535 break;
7536
7537 case D_ALLOCDIRECT:
7538 if (off != 0)
7539 continue;
7540 /* FALLTHROUGH */
7541 default:
7542 panic("deallocate_dependencies: Unexpected type %s",
7543 TYPENAME(wk->wk_type));
7544 /* NOTREACHED */
7545 }
7546 }
7547 FREE_LOCK(ump);
7548 done:
7549 /*
7550 * Don't throw away this buf, we were partially truncating and
7551 * some deps may always remain.
7552 */
7553 if (off) {
7554 allocbuf(bp, off);
7555 bp->b_vflags |= BV_SCANNED;
7556 return (EBUSY);
7557 }
7558 bp->b_flags |= B_INVAL | B_NOCACHE;
7559
7560 return (0);
7561 }
7562
7563 /*
7564 * An allocdirect is being canceled due to a truncate. We must make sure
7565 * the journal entry is released in concert with the blkfree that releases
7566 * the storage. Completed journal entries must not be released until the
7567 * space is no longer pointed to by the inode or in the bitmap.
7568 */
7569 static void
7570 cancel_allocdirect(
7571 struct allocdirectlst *adphead,
7572 struct allocdirect *adp,
7573 struct freeblks *freeblks)
7574 {
7575 struct freework *freework;
7576 struct newblk *newblk;
7577 struct worklist *wk;
7578
7579 TAILQ_REMOVE(adphead, adp, ad_next);
7580 newblk = (struct newblk *)adp;
7581 freework = NULL;
7582 /*
7583 * Find the correct freework structure.
7584 */
7585 LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) {
7586 if (wk->wk_type != D_FREEWORK)
7587 continue;
7588 freework = WK_FREEWORK(wk);
7589 if (freework->fw_blkno == newblk->nb_newblkno)
7590 break;
7591 }
7592 if (freework == NULL)
7593 panic("cancel_allocdirect: Freework not found");
7594 /*
7595 * If a newblk exists at all we still have the journal entry that
7596 * initiated the allocation so we do not need to journal the free.
7597 */
7598 cancel_jfreeblk(freeblks, freework->fw_blkno);
7599 /*
7600 * If the journal hasn't been written the jnewblk must be passed
7601 * to the call to ffs_blkfree that reclaims the space. We accomplish
7602 * this by linking the journal dependency into the freework to be
7603 * freed when freework_freeblock() is called. If the journal has
7604 * been written we can simply reclaim the journal space when the
7605 * freeblks work is complete.
7606 */
7607 freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list,
7608 &freeblks->fb_jwork);
7609 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
7610 }
7611
7612 /*
7613 * Cancel a new block allocation. May be an indirect or direct block. We
7614 * remove it from various lists and return any journal record that needs to
7615 * be resolved by the caller.
7616 *
7617 * A special consideration is made for indirects which were never pointed
7618 * at on disk and will never be found once this block is released.
7619 */
7620 static struct jnewblk *
7621 cancel_newblk(
7622 struct newblk *newblk,
7623 struct worklist *wk,
7624 struct workhead *wkhd)
7625 {
7626 struct jnewblk *jnewblk;
7627
7628 CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno);
7629
7630 newblk->nb_state |= GOINGAWAY;
7631 /*
7632 * Previously we traversed the completedhd on each indirdep
7633 * attached to this newblk to cancel them and gather journal
7634 * work. Since we need only the oldest journal segment and
7635 * the lowest point on the tree will always have the oldest
7636 * journal segment we are free to release the segments
7637 * of any subordinates and may leave the indirdep list to
7638 * indirdep_complete() when this newblk is freed.
7639 */
7640 if (newblk->nb_state & ONDEPLIST) {
7641 newblk->nb_state &= ~ONDEPLIST;
7642 LIST_REMOVE(newblk, nb_deps);
7643 }
7644 if (newblk->nb_state & ONWORKLIST)
7645 WORKLIST_REMOVE(&newblk->nb_list);
7646 /*
7647 * If the journal entry hasn't been written we save a pointer to
7648 * the dependency that frees it until it is written or the
7649 * superseding operation completes.
7650 */
7651 jnewblk = newblk->nb_jnewblk;
7652 if (jnewblk != NULL && wk != NULL) {
7653 newblk->nb_jnewblk = NULL;
7654 jnewblk->jn_dep = wk;
7655 }
7656 if (!LIST_EMPTY(&newblk->nb_jwork))
7657 jwork_move(wkhd, &newblk->nb_jwork);
7658 /*
7659 * When truncating we must free the newdirblk early to remove
7660 * the pagedep from the hash before returning.
7661 */
7662 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7663 free_newdirblk(WK_NEWDIRBLK(wk));
7664 if (!LIST_EMPTY(&newblk->nb_newdirblk))
7665 panic("cancel_newblk: extra newdirblk");
7666
7667 return (jnewblk);
7668 }
7669
7670 /*
7671 * Schedule the freefrag associated with a newblk to be released once
7672 * the pointers are written and the previous block is no longer needed.
7673 */
7674 static void
7675 newblk_freefrag(struct newblk *newblk)
7676 {
7677 struct freefrag *freefrag;
7678
7679 if (newblk->nb_freefrag == NULL)
7680 return;
7681 freefrag = newblk->nb_freefrag;
7682 newblk->nb_freefrag = NULL;
7683 freefrag->ff_state |= COMPLETE;
7684 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
7685 add_to_worklist(&freefrag->ff_list, 0);
7686 }
7687
7688 /*
7689 * Free a newblk. Generate a new freefrag work request if appropriate.
7690 * This must be called after the inode pointer and any direct block pointers
7691 * are valid or fully removed via truncate or frag extension.
7692 */
7693 static void
7694 free_newblk(struct newblk *newblk)
7695 {
7696 struct indirdep *indirdep;
7697 struct worklist *wk;
7698
7699 KASSERT(newblk->nb_jnewblk == NULL,
7700 ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk));
7701 KASSERT(newblk->nb_list.wk_type != D_NEWBLK,
7702 ("free_newblk: unclaimed newblk"));
7703 LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp));
7704 newblk_freefrag(newblk);
7705 if (newblk->nb_state & ONDEPLIST)
7706 LIST_REMOVE(newblk, nb_deps);
7707 if (newblk->nb_state & ONWORKLIST)
7708 WORKLIST_REMOVE(&newblk->nb_list);
7709 LIST_REMOVE(newblk, nb_hash);
7710 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7711 free_newdirblk(WK_NEWDIRBLK(wk));
7712 if (!LIST_EMPTY(&newblk->nb_newdirblk))
7713 panic("free_newblk: extra newdirblk");
7714 while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL)
7715 indirdep_complete(indirdep);
7716 handle_jwork(&newblk->nb_jwork);
7717 WORKITEM_FREE(newblk, D_NEWBLK);
7718 }
7719
7720 /*
7721 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep.
7722 */
7723 static void
7724 free_newdirblk(struct newdirblk *newdirblk)
7725 {
7726 struct pagedep *pagedep;
7727 struct diradd *dap;
7728 struct worklist *wk;
7729
7730 LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp));
7731 WORKLIST_REMOVE(&newdirblk->db_list);
7732 /*
7733 * If the pagedep is still linked onto the directory buffer
7734 * dependency chain, then some of the entries on the
7735 * pd_pendinghd list may not be committed to disk yet. In
7736 * this case, we will simply clear the NEWBLOCK flag and
7737 * let the pd_pendinghd list be processed when the pagedep
7738 * is next written. If the pagedep is no longer on the buffer
7739 * dependency chain, then all the entries on the pd_pending
7740 * list are committed to disk and we can free them here.
7741 */
7742 pagedep = newdirblk->db_pagedep;
7743 pagedep->pd_state &= ~NEWBLOCK;
7744 if ((pagedep->pd_state & ONWORKLIST) == 0) {
7745 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
7746 free_diradd(dap, NULL);
7747 /*
7748 * If no dependencies remain, the pagedep will be freed.
7749 */
7750 free_pagedep(pagedep);
7751 }
7752 /* Should only ever be one item in the list. */
7753 while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) {
7754 WORKLIST_REMOVE(wk);
7755 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
7756 }
7757 WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
7758 }
7759
7760 /*
7761 * Prepare an inode to be freed. The actual free operation is not
7762 * done until the zero'ed inode has been written to disk.
7763 */
7764 void
7765 softdep_freefile(
7766 struct vnode *pvp,
7767 ino_t ino,
7768 int mode)
7769 {
7770 struct inode *ip = VTOI(pvp);
7771 struct inodedep *inodedep;
7772 struct freefile *freefile;
7773 struct freeblks *freeblks;
7774 struct ufsmount *ump;
7775
7776 ump = ITOUMP(ip);
7777 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
7778 ("softdep_freefile called on non-softdep filesystem"));
7779 /*
7780 * This sets up the inode de-allocation dependency.
7781 */
7782 freefile = malloc(sizeof(struct freefile),
7783 M_FREEFILE, M_SOFTDEP_FLAGS);
7784 workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount);
7785 freefile->fx_mode = mode;
7786 freefile->fx_oldinum = ino;
7787 freefile->fx_devvp = ump->um_devvp;
7788 LIST_INIT(&freefile->fx_jwork);
7789 UFS_LOCK(ump);
7790 ump->um_fs->fs_pendinginodes += 1;
7791 UFS_UNLOCK(ump);
7792
7793 /*
7794 * If the inodedep does not exist, then the zero'ed inode has
7795 * been written to disk. If the allocated inode has never been
7796 * written to disk, then the on-disk inode is zero'ed. In either
7797 * case we can free the file immediately. If the journal was
7798 * canceled before being written the inode will never make it to
7799 * disk and we must send the canceled journal entrys to
7800 * ffs_freefile() to be cleared in conjunction with the bitmap.
7801 * Any blocks waiting on the inode to write can be safely freed
7802 * here as it will never been written.
7803 */
7804 ACQUIRE_LOCK(ump);
7805 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7806 if (inodedep) {
7807 /*
7808 * Clear out freeblks that no longer need to reference
7809 * this inode.
7810 */
7811 while ((freeblks =
7812 TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) {
7813 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks,
7814 fb_next);
7815 freeblks->fb_state &= ~ONDEPLIST;
7816 }
7817 /*
7818 * Remove this inode from the unlinked list.
7819 */
7820 if (inodedep->id_state & UNLINKED) {
7821 /*
7822 * Save the journal work to be freed with the bitmap
7823 * before we clear UNLINKED. Otherwise it can be lost
7824 * if the inode block is written.
7825 */
7826 handle_bufwait(inodedep, &freefile->fx_jwork);
7827 clear_unlinked_inodedep(inodedep);
7828 /*
7829 * Re-acquire inodedep as we've dropped the
7830 * per-filesystem lock in clear_unlinked_inodedep().
7831 */
7832 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7833 }
7834 }
7835 if (inodedep == NULL || check_inode_unwritten(inodedep)) {
7836 FREE_LOCK(ump);
7837 handle_workitem_freefile(freefile);
7838 return;
7839 }
7840 if ((inodedep->id_state & DEPCOMPLETE) == 0)
7841 inodedep->id_state |= GOINGAWAY;
7842 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
7843 FREE_LOCK(ump);
7844 if (ip->i_number == ino)
7845 UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
7846 }
7847
7848 /*
7849 * Check to see if an inode has never been written to disk. If
7850 * so free the inodedep and return success, otherwise return failure.
7851 *
7852 * If we still have a bitmap dependency, then the inode has never
7853 * been written to disk. Drop the dependency as it is no longer
7854 * necessary since the inode is being deallocated. We set the
7855 * ALLCOMPLETE flags since the bitmap now properly shows that the
7856 * inode is not allocated. Even if the inode is actively being
7857 * written, it has been rolled back to its zero'ed state, so we
7858 * are ensured that a zero inode is what is on the disk. For short
7859 * lived files, this change will usually result in removing all the
7860 * dependencies from the inode so that it can be freed immediately.
7861 */
7862 static int
7863 check_inode_unwritten(struct inodedep *inodedep)
7864 {
7865
7866 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7867
7868 if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 ||
7869 !LIST_EMPTY(&inodedep->id_dirremhd) ||
7870 !LIST_EMPTY(&inodedep->id_pendinghd) ||
7871 !LIST_EMPTY(&inodedep->id_bufwait) ||
7872 !LIST_EMPTY(&inodedep->id_inowait) ||
7873 !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7874 !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7875 !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7876 !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7877 !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7878 !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7879 inodedep->id_mkdiradd != NULL ||
7880 inodedep->id_nlinkdelta != 0)
7881 return (0);
7882 /*
7883 * Another process might be in initiate_write_inodeblock_ufs[12]
7884 * trying to allocate memory without holding "Softdep Lock".
7885 */
7886 if ((inodedep->id_state & IOSTARTED) != 0 &&
7887 inodedep->id_savedino1 == NULL)
7888 return (0);
7889
7890 if (inodedep->id_state & ONDEPLIST)
7891 LIST_REMOVE(inodedep, id_deps);
7892 inodedep->id_state &= ~ONDEPLIST;
7893 inodedep->id_state |= ALLCOMPLETE;
7894 inodedep->id_bmsafemap = NULL;
7895 if (inodedep->id_state & ONWORKLIST)
7896 WORKLIST_REMOVE(&inodedep->id_list);
7897 if (inodedep->id_savedino1 != NULL) {
7898 free(inodedep->id_savedino1, M_SAVEDINO);
7899 inodedep->id_savedino1 = NULL;
7900 }
7901 if (free_inodedep(inodedep) == 0)
7902 panic("check_inode_unwritten: busy inode");
7903 return (1);
7904 }
7905
7906 static int
7907 check_inodedep_free(struct inodedep *inodedep)
7908 {
7909
7910 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7911 if ((inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
7912 !LIST_EMPTY(&inodedep->id_dirremhd) ||
7913 !LIST_EMPTY(&inodedep->id_pendinghd) ||
7914 !LIST_EMPTY(&inodedep->id_bufwait) ||
7915 !LIST_EMPTY(&inodedep->id_inowait) ||
7916 !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7917 !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7918 !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7919 !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7920 !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7921 !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7922 inodedep->id_mkdiradd != NULL ||
7923 inodedep->id_nlinkdelta != 0 ||
7924 inodedep->id_savedino1 != NULL)
7925 return (0);
7926 return (1);
7927 }
7928
7929 /*
7930 * Try to free an inodedep structure. Return 1 if it could be freed.
7931 */
7932 static int
7933 free_inodedep(struct inodedep *inodedep)
7934 {
7935
7936 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7937 if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 ||
7938 !check_inodedep_free(inodedep))
7939 return (0);
7940 if (inodedep->id_state & ONDEPLIST)
7941 LIST_REMOVE(inodedep, id_deps);
7942 LIST_REMOVE(inodedep, id_hash);
7943 WORKITEM_FREE(inodedep, D_INODEDEP);
7944 return (1);
7945 }
7946
7947 /*
7948 * Free the block referenced by a freework structure. The parent freeblks
7949 * structure is released and completed when the final cg bitmap reaches
7950 * the disk. This routine may be freeing a jnewblk which never made it to
7951 * disk in which case we do not have to wait as the operation is undone
7952 * in memory immediately.
7953 */
7954 static void
7955 freework_freeblock(struct freework *freework, u_long key)
7956 {
7957 struct freeblks *freeblks;
7958 struct jnewblk *jnewblk;
7959 struct ufsmount *ump;
7960 struct workhead wkhd;
7961 struct fs *fs;
7962 int bsize;
7963 int needj;
7964
7965 ump = VFSTOUFS(freework->fw_list.wk_mp);
7966 LOCK_OWNED(ump);
7967 /*
7968 * Handle partial truncate separately.
7969 */
7970 if (freework->fw_indir) {
7971 complete_trunc_indir(freework);
7972 return;
7973 }
7974 freeblks = freework->fw_freeblks;
7975 fs = ump->um_fs;
7976 needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0;
7977 bsize = lfragtosize(fs, freework->fw_frags);
7978 LIST_INIT(&wkhd);
7979 /*
7980 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives
7981 * on the indirblk hashtable and prevents premature freeing.
7982 */
7983 freework->fw_state |= DEPCOMPLETE;
7984 /*
7985 * SUJ needs to wait for the segment referencing freed indirect
7986 * blocks to expire so that we know the checker will not confuse
7987 * a re-allocated indirect block with its old contents.
7988 */
7989 if (needj && freework->fw_lbn <= -UFS_NDADDR)
7990 indirblk_insert(freework);
7991 /*
7992 * If we are canceling an existing jnewblk pass it to the free
7993 * routine, otherwise pass the freeblk which will ultimately
7994 * release the freeblks. If we're not journaling, we can just
7995 * free the freeblks immediately.
7996 */
7997 jnewblk = freework->fw_jnewblk;
7998 if (jnewblk != NULL) {
7999 cancel_jnewblk(jnewblk, &wkhd);
8000 needj = 0;
8001 } else if (needj) {
8002 freework->fw_state |= DELAYEDFREE;
8003 freeblks->fb_cgwait++;
8004 WORKLIST_INSERT(&wkhd, &freework->fw_list);
8005 }
8006 FREE_LOCK(ump);
8007 freeblks_free(ump, freeblks, btodb(bsize));
8008 CTR4(KTR_SUJ,
8009 "freework_freeblock: ino %jd blkno %jd lbn %jd size %d",
8010 freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize);
8011 ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize,
8012 freeblks->fb_inum, freeblks->fb_vtype, &wkhd, key);
8013 ACQUIRE_LOCK(ump);
8014 /*
8015 * The jnewblk will be discarded and the bits in the map never
8016 * made it to disk. We can immediately free the freeblk.
8017 */
8018 if (needj == 0)
8019 handle_written_freework(freework);
8020 }
8021
8022 /*
8023 * We enqueue freework items that need processing back on the freeblks and
8024 * add the freeblks to the worklist. This makes it easier to find all work
8025 * required to flush a truncation in process_truncates().
8026 */
8027 static void
8028 freework_enqueue(struct freework *freework)
8029 {
8030 struct freeblks *freeblks;
8031
8032 freeblks = freework->fw_freeblks;
8033 if ((freework->fw_state & INPROGRESS) == 0)
8034 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
8035 if ((freeblks->fb_state &
8036 (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE &&
8037 LIST_EMPTY(&freeblks->fb_jblkdephd))
8038 add_to_worklist(&freeblks->fb_list, WK_NODELAY);
8039 }
8040
8041 /*
8042 * Start, continue, or finish the process of freeing an indirect block tree.
8043 * The free operation may be paused at any point with fw_off containing the
8044 * offset to restart from. This enables us to implement some flow control
8045 * for large truncates which may fan out and generate a huge number of
8046 * dependencies.
8047 */
8048 static void
8049 handle_workitem_indirblk(struct freework *freework)
8050 {
8051 struct freeblks *freeblks;
8052 struct ufsmount *ump;
8053 struct fs *fs;
8054
8055 freeblks = freework->fw_freeblks;
8056 ump = VFSTOUFS(freeblks->fb_list.wk_mp);
8057 fs = ump->um_fs;
8058 if (freework->fw_state & DEPCOMPLETE) {
8059 handle_written_freework(freework);
8060 return;
8061 }
8062 if (freework->fw_off == NINDIR(fs)) {
8063 freework_freeblock(freework, SINGLETON_KEY);
8064 return;
8065 }
8066 freework->fw_state |= INPROGRESS;
8067 FREE_LOCK(ump);
8068 indir_trunc(freework, fsbtodb(fs, freework->fw_blkno),
8069 freework->fw_lbn);
8070 ACQUIRE_LOCK(ump);
8071 }
8072
8073 /*
8074 * Called when a freework structure attached to a cg buf is written. The
8075 * ref on either the parent or the freeblks structure is released and
8076 * the freeblks is added back to the worklist if there is more work to do.
8077 */
8078 static void
8079 handle_written_freework(struct freework *freework)
8080 {
8081 struct freeblks *freeblks;
8082 struct freework *parent;
8083
8084 freeblks = freework->fw_freeblks;
8085 parent = freework->fw_parent;
8086 if (freework->fw_state & DELAYEDFREE)
8087 freeblks->fb_cgwait--;
8088 freework->fw_state |= COMPLETE;
8089 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
8090 WORKITEM_FREE(freework, D_FREEWORK);
8091 if (parent) {
8092 if (--parent->fw_ref == 0)
8093 freework_enqueue(parent);
8094 return;
8095 }
8096 if (--freeblks->fb_ref != 0)
8097 return;
8098 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) ==
8099 ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd))
8100 add_to_worklist(&freeblks->fb_list, WK_NODELAY);
8101 }
8102
8103 /*
8104 * This workitem routine performs the block de-allocation.
8105 * The workitem is added to the pending list after the updated
8106 * inode block has been written to disk. As mentioned above,
8107 * checks regarding the number of blocks de-allocated (compared
8108 * to the number of blocks allocated for the file) are also
8109 * performed in this function.
8110 */
8111 static int
8112 handle_workitem_freeblocks(struct freeblks *freeblks, int flags)
8113 {
8114 struct freework *freework;
8115 struct newblk *newblk;
8116 struct allocindir *aip;
8117 struct ufsmount *ump;
8118 struct worklist *wk;
8119 u_long key;
8120
8121 KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd),
8122 ("handle_workitem_freeblocks: Journal entries not written."));
8123 ump = VFSTOUFS(freeblks->fb_list.wk_mp);
8124 key = ffs_blkrelease_start(ump, freeblks->fb_devvp, freeblks->fb_inum);
8125 ACQUIRE_LOCK(ump);
8126 while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) {
8127 WORKLIST_REMOVE(wk);
8128 switch (wk->wk_type) {
8129 case D_DIRREM:
8130 wk->wk_state |= COMPLETE;
8131 add_to_worklist(wk, 0);
8132 continue;
8133
8134 case D_ALLOCDIRECT:
8135 free_newblk(WK_NEWBLK(wk));
8136 continue;
8137
8138 case D_ALLOCINDIR:
8139 aip = WK_ALLOCINDIR(wk);
8140 freework = NULL;
8141 if (aip->ai_state & DELAYEDFREE) {
8142 FREE_LOCK(ump);
8143 freework = newfreework(ump, freeblks, NULL,
8144 aip->ai_lbn, aip->ai_newblkno,
8145 ump->um_fs->fs_frag, 0, 0);
8146 ACQUIRE_LOCK(ump);
8147 }
8148 newblk = WK_NEWBLK(wk);
8149 if (newblk->nb_jnewblk) {
8150 freework->fw_jnewblk = newblk->nb_jnewblk;
8151 newblk->nb_jnewblk->jn_dep = &freework->fw_list;
8152 newblk->nb_jnewblk = NULL;
8153 }
8154 free_newblk(newblk);
8155 continue;
8156
8157 case D_FREEWORK:
8158 freework = WK_FREEWORK(wk);
8159 if (freework->fw_lbn <= -UFS_NDADDR)
8160 handle_workitem_indirblk(freework);
8161 else
8162 freework_freeblock(freework, key);
8163 continue;
8164 default:
8165 panic("handle_workitem_freeblocks: Unknown type %s",
8166 TYPENAME(wk->wk_type));
8167 }
8168 }
8169 if (freeblks->fb_ref != 0) {
8170 freeblks->fb_state &= ~INPROGRESS;
8171 wake_worklist(&freeblks->fb_list);
8172 freeblks = NULL;
8173 }
8174 FREE_LOCK(ump);
8175 ffs_blkrelease_finish(ump, key);
8176 if (freeblks)
8177 return handle_complete_freeblocks(freeblks, flags);
8178 return (0);
8179 }
8180
8181 /*
8182 * Handle completion of block free via truncate. This allows fs_pending
8183 * to track the actual free block count more closely than if we only updated
8184 * it at the end. We must be careful to handle cases where the block count
8185 * on free was incorrect.
8186 */
8187 static void
8188 freeblks_free(struct ufsmount *ump,
8189 struct freeblks *freeblks,
8190 int blocks)
8191 {
8192 struct fs *fs;
8193 ufs2_daddr_t remain;
8194
8195 UFS_LOCK(ump);
8196 remain = -freeblks->fb_chkcnt;
8197 freeblks->fb_chkcnt += blocks;
8198 if (remain > 0) {
8199 if (remain < blocks)
8200 blocks = remain;
8201 fs = ump->um_fs;
8202 fs->fs_pendingblocks -= blocks;
8203 }
8204 UFS_UNLOCK(ump);
8205 }
8206
8207 /*
8208 * Once all of the freework workitems are complete we can retire the
8209 * freeblocks dependency and any journal work awaiting completion. This
8210 * can not be called until all other dependencies are stable on disk.
8211 */
8212 static int
8213 handle_complete_freeblocks(struct freeblks *freeblks, int flags)
8214 {
8215 struct inodedep *inodedep;
8216 struct inode *ip;
8217 struct vnode *vp;
8218 struct fs *fs;
8219 struct ufsmount *ump;
8220 ufs2_daddr_t spare;
8221
8222 ump = VFSTOUFS(freeblks->fb_list.wk_mp);
8223 fs = ump->um_fs;
8224 flags = LK_EXCLUSIVE | flags;
8225 spare = freeblks->fb_chkcnt;
8226
8227 /*
8228 * If we did not release the expected number of blocks we may have
8229 * to adjust the inode block count here. Only do so if it wasn't
8230 * a truncation to zero and the modrev still matches.
8231 */
8232 if (spare && freeblks->fb_len != 0) {
8233 if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum,
8234 flags, &vp, FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP) != 0)
8235 return (EBUSY);
8236 ip = VTOI(vp);
8237 if (ip->i_mode == 0) {
8238 vgone(vp);
8239 } else if (DIP(ip, i_modrev) == freeblks->fb_modrev) {
8240 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare);
8241 UFS_INODE_SET_FLAG(ip, IN_CHANGE);
8242 /*
8243 * We must wait so this happens before the
8244 * journal is reclaimed.
8245 */
8246 ffs_update(vp, 1);
8247 }
8248 vput(vp);
8249 }
8250 if (spare < 0) {
8251 UFS_LOCK(ump);
8252 fs->fs_pendingblocks += spare;
8253 UFS_UNLOCK(ump);
8254 }
8255 #ifdef QUOTA
8256 /* Handle spare. */
8257 if (spare)
8258 quotaadj(freeblks->fb_quota, ump, -spare);
8259 quotarele(freeblks->fb_quota);
8260 #endif
8261 ACQUIRE_LOCK(ump);
8262 if (freeblks->fb_state & ONDEPLIST) {
8263 inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum,
8264 0, &inodedep);
8265 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next);
8266 freeblks->fb_state &= ~ONDEPLIST;
8267 if (TAILQ_EMPTY(&inodedep->id_freeblklst))
8268 free_inodedep(inodedep);
8269 }
8270 /*
8271 * All of the freeblock deps must be complete prior to this call
8272 * so it's now safe to complete earlier outstanding journal entries.
8273 */
8274 handle_jwork(&freeblks->fb_jwork);
8275 WORKITEM_FREE(freeblks, D_FREEBLKS);
8276 FREE_LOCK(ump);
8277 return (0);
8278 }
8279
8280 /*
8281 * Release blocks associated with the freeblks and stored in the indirect
8282 * block dbn. If level is greater than SINGLE, the block is an indirect block
8283 * and recursive calls to indirtrunc must be used to cleanse other indirect
8284 * blocks.
8285 *
8286 * This handles partial and complete truncation of blocks. Partial is noted
8287 * with goingaway == 0. In this case the freework is completed after the
8288 * zero'd indirects are written to disk. For full truncation the freework
8289 * is completed after the block is freed.
8290 */
8291 static void
8292 indir_trunc(struct freework *freework,
8293 ufs2_daddr_t dbn,
8294 ufs_lbn_t lbn)
8295 {
8296 struct freework *nfreework;
8297 struct workhead wkhd;
8298 struct freeblks *freeblks;
8299 struct buf *bp;
8300 struct fs *fs;
8301 struct indirdep *indirdep;
8302 struct mount *mp;
8303 struct ufsmount *ump;
8304 ufs1_daddr_t *bap1;
8305 ufs2_daddr_t nb, nnb, *bap2;
8306 ufs_lbn_t lbnadd, nlbn;
8307 u_long key;
8308 int nblocks, ufs1fmt, freedblocks;
8309 int goingaway, freedeps, needj, level, cnt, i, error;
8310
8311 freeblks = freework->fw_freeblks;
8312 mp = freeblks->fb_list.wk_mp;
8313 ump = VFSTOUFS(mp);
8314 fs = ump->um_fs;
8315 /*
8316 * Get buffer of block pointers to be freed. There are three cases:
8317 *
8318 * 1) Partial truncate caches the indirdep pointer in the freework
8319 * which provides us a back copy to the save bp which holds the
8320 * pointers we want to clear. When this completes the zero
8321 * pointers are written to the real copy.
8322 * 2) The indirect is being completely truncated, cancel_indirdep()
8323 * eliminated the real copy and placed the indirdep on the saved
8324 * copy. The indirdep and buf are discarded when this completes.
8325 * 3) The indirect was not in memory, we read a copy off of the disk
8326 * using the devvp and drop and invalidate the buffer when we're
8327 * done.
8328 */
8329 goingaway = 1;
8330 indirdep = NULL;
8331 if (freework->fw_indir != NULL) {
8332 goingaway = 0;
8333 indirdep = freework->fw_indir;
8334 bp = indirdep->ir_savebp;
8335 if (bp == NULL || bp->b_blkno != dbn)
8336 panic("indir_trunc: Bad saved buf %p blkno %jd",
8337 bp, (intmax_t)dbn);
8338 } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) {
8339 /*
8340 * The lock prevents the buf dep list from changing and
8341 * indirects on devvp should only ever have one dependency.
8342 */
8343 indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep));
8344 if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0)
8345 panic("indir_trunc: Bad indirdep %p from buf %p",
8346 indirdep, bp);
8347 } else {
8348 error = ffs_breadz(ump, freeblks->fb_devvp, dbn, dbn,
8349 (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
8350 if (error)
8351 return;
8352 }
8353 ACQUIRE_LOCK(ump);
8354 /* Protects against a race with complete_trunc_indir(). */
8355 freework->fw_state &= ~INPROGRESS;
8356 /*
8357 * If we have an indirdep we need to enforce the truncation order
8358 * and discard it when it is complete.
8359 */
8360 if (indirdep) {
8361 if (freework != TAILQ_FIRST(&indirdep->ir_trunc) &&
8362 !TAILQ_EMPTY(&indirdep->ir_trunc)) {
8363 /*
8364 * Add the complete truncate to the list on the
8365 * indirdep to enforce in-order processing.
8366 */
8367 if (freework->fw_indir == NULL)
8368 TAILQ_INSERT_TAIL(&indirdep->ir_trunc,
8369 freework, fw_next);
8370 FREE_LOCK(ump);
8371 return;
8372 }
8373 /*
8374 * If we're goingaway, free the indirdep. Otherwise it will
8375 * linger until the write completes.
8376 */
8377 if (goingaway) {
8378 KASSERT(indirdep->ir_savebp == bp,
8379 ("indir_trunc: losing ir_savebp %p",
8380 indirdep->ir_savebp));
8381 indirdep->ir_savebp = NULL;
8382 free_indirdep(indirdep);
8383 }
8384 }
8385 FREE_LOCK(ump);
8386 /* Initialize pointers depending on block size. */
8387 if (ump->um_fstype == UFS1) {
8388 bap1 = (ufs1_daddr_t *)bp->b_data;
8389 nb = bap1[freework->fw_off];
8390 ufs1fmt = 1;
8391 bap2 = NULL;
8392 } else {
8393 bap2 = (ufs2_daddr_t *)bp->b_data;
8394 nb = bap2[freework->fw_off];
8395 ufs1fmt = 0;
8396 bap1 = NULL;
8397 }
8398 level = lbn_level(lbn);
8399 needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0;
8400 lbnadd = lbn_offset(fs, level);
8401 nblocks = btodb(fs->fs_bsize);
8402 nfreework = freework;
8403 freedeps = 0;
8404 cnt = 0;
8405 /*
8406 * Reclaim blocks. Traverses into nested indirect levels and
8407 * arranges for the current level to be freed when subordinates
8408 * are free when journaling.
8409 */
8410 key = ffs_blkrelease_start(ump, freeblks->fb_devvp, freeblks->fb_inum);
8411 for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) {
8412 if (UFS_CHECK_BLKNO(mp, freeblks->fb_inum, nb,
8413 fs->fs_bsize) != 0)
8414 nb = 0;
8415 if (i != NINDIR(fs) - 1) {
8416 if (ufs1fmt)
8417 nnb = bap1[i+1];
8418 else
8419 nnb = bap2[i+1];
8420 } else
8421 nnb = 0;
8422 if (nb == 0)
8423 continue;
8424 cnt++;
8425 if (level != 0) {
8426 nlbn = (lbn + 1) - (i * lbnadd);
8427 if (needj != 0) {
8428 nfreework = newfreework(ump, freeblks, freework,
8429 nlbn, nb, fs->fs_frag, 0, 0);
8430 freedeps++;
8431 }
8432 indir_trunc(nfreework, fsbtodb(fs, nb), nlbn);
8433 } else {
8434 struct freedep *freedep;
8435
8436 /*
8437 * Attempt to aggregate freedep dependencies for
8438 * all blocks being released to the same CG.
8439 */
8440 LIST_INIT(&wkhd);
8441 if (needj != 0 &&
8442 (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) {
8443 freedep = newfreedep(freework);
8444 WORKLIST_INSERT_UNLOCKED(&wkhd,
8445 &freedep->fd_list);
8446 freedeps++;
8447 }
8448 CTR3(KTR_SUJ,
8449 "indir_trunc: ino %jd blkno %jd size %d",
8450 freeblks->fb_inum, nb, fs->fs_bsize);
8451 ffs_blkfree(ump, fs, freeblks->fb_devvp, nb,
8452 fs->fs_bsize, freeblks->fb_inum,
8453 freeblks->fb_vtype, &wkhd, key);
8454 }
8455 }
8456 ffs_blkrelease_finish(ump, key);
8457 if (goingaway) {
8458 bp->b_flags |= B_INVAL | B_NOCACHE;
8459 brelse(bp);
8460 }
8461 freedblocks = 0;
8462 if (level == 0)
8463 freedblocks = (nblocks * cnt);
8464 if (needj == 0)
8465 freedblocks += nblocks;
8466 freeblks_free(ump, freeblks, freedblocks);
8467 /*
8468 * If we are journaling set up the ref counts and offset so this
8469 * indirect can be completed when its children are free.
8470 */
8471 if (needj) {
8472 ACQUIRE_LOCK(ump);
8473 freework->fw_off = i;
8474 freework->fw_ref += freedeps;
8475 freework->fw_ref -= NINDIR(fs) + 1;
8476 if (level == 0)
8477 freeblks->fb_cgwait += freedeps;
8478 if (freework->fw_ref == 0)
8479 freework_freeblock(freework, SINGLETON_KEY);
8480 FREE_LOCK(ump);
8481 return;
8482 }
8483 /*
8484 * If we're not journaling we can free the indirect now.
8485 */
8486 dbn = dbtofsb(fs, dbn);
8487 CTR3(KTR_SUJ,
8488 "indir_trunc 2: ino %jd blkno %jd size %d",
8489 freeblks->fb_inum, dbn, fs->fs_bsize);
8490 ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize,
8491 freeblks->fb_inum, freeblks->fb_vtype, NULL, SINGLETON_KEY);
8492 /* Non SUJ softdep does single-threaded truncations. */
8493 if (freework->fw_blkno == dbn) {
8494 freework->fw_state |= ALLCOMPLETE;
8495 ACQUIRE_LOCK(ump);
8496 handle_written_freework(freework);
8497 FREE_LOCK(ump);
8498 }
8499 return;
8500 }
8501
8502 /*
8503 * Cancel an allocindir when it is removed via truncation. When bp is not
8504 * NULL the indirect never appeared on disk and is scheduled to be freed
8505 * independently of the indir so we can more easily track journal work.
8506 */
8507 static void
8508 cancel_allocindir(
8509 struct allocindir *aip,
8510 struct buf *bp,
8511 struct freeblks *freeblks,
8512 int trunc)
8513 {
8514 struct indirdep *indirdep;
8515 struct freefrag *freefrag;
8516 struct newblk *newblk;
8517
8518 newblk = (struct newblk *)aip;
8519 LIST_REMOVE(aip, ai_next);
8520 /*
8521 * We must eliminate the pointer in bp if it must be freed on its
8522 * own due to partial truncate or pending journal work.
8523 */
8524 if (bp && (trunc || newblk->nb_jnewblk)) {
8525 /*
8526 * Clear the pointer and mark the aip to be freed
8527 * directly if it never existed on disk.
8528 */
8529 aip->ai_state |= DELAYEDFREE;
8530 indirdep = aip->ai_indirdep;
8531 if (indirdep->ir_state & UFS1FMT)
8532 ((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8533 else
8534 ((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8535 }
8536 /*
8537 * When truncating the previous pointer will be freed via
8538 * savedbp. Eliminate the freefrag which would dup free.
8539 */
8540 if (trunc && (freefrag = newblk->nb_freefrag) != NULL) {
8541 newblk->nb_freefrag = NULL;
8542 if (freefrag->ff_jdep)
8543 cancel_jfreefrag(
8544 WK_JFREEFRAG(freefrag->ff_jdep));
8545 jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork);
8546 WORKITEM_FREE(freefrag, D_FREEFRAG);
8547 }
8548 /*
8549 * If the journal hasn't been written the jnewblk must be passed
8550 * to the call to ffs_blkfree that reclaims the space. We accomplish
8551 * this by leaving the journal dependency on the newblk to be freed
8552 * when a freework is created in handle_workitem_freeblocks().
8553 */
8554 cancel_newblk(newblk, NULL, &freeblks->fb_jwork);
8555 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
8556 }
8557
8558 /*
8559 * Create the mkdir dependencies for . and .. in a new directory. Link them
8560 * in to a newdirblk so any subsequent additions are tracked properly. The
8561 * caller is responsible for adding the mkdir1 dependency to the journal
8562 * and updating id_mkdiradd. This function returns with the per-filesystem
8563 * lock held.
8564 */
8565 static struct mkdir *
8566 setup_newdir(
8567 struct diradd *dap,
8568 ino_t newinum,
8569 ino_t dinum,
8570 struct buf *newdirbp,
8571 struct mkdir **mkdirp)
8572 {
8573 struct newblk *newblk;
8574 struct pagedep *pagedep;
8575 struct inodedep *inodedep;
8576 struct newdirblk *newdirblk;
8577 struct mkdir *mkdir1, *mkdir2;
8578 struct worklist *wk;
8579 struct jaddref *jaddref;
8580 struct ufsmount *ump;
8581 struct mount *mp;
8582
8583 mp = dap->da_list.wk_mp;
8584 ump = VFSTOUFS(mp);
8585 newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK,
8586 M_SOFTDEP_FLAGS);
8587 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8588 LIST_INIT(&newdirblk->db_mkdir);
8589 mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8590 workitem_alloc(&mkdir1->md_list, D_MKDIR, mp);
8591 mkdir1->md_state = ATTACHED | MKDIR_BODY;
8592 mkdir1->md_diradd = dap;
8593 mkdir1->md_jaddref = NULL;
8594 mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8595 workitem_alloc(&mkdir2->md_list, D_MKDIR, mp);
8596 mkdir2->md_state = ATTACHED | MKDIR_PARENT;
8597 mkdir2->md_diradd = dap;
8598 mkdir2->md_jaddref = NULL;
8599 if (MOUNTEDSUJ(mp) == 0) {
8600 mkdir1->md_state |= DEPCOMPLETE;
8601 mkdir2->md_state |= DEPCOMPLETE;
8602 }
8603 /*
8604 * Dependency on "." and ".." being written to disk.
8605 */
8606 mkdir1->md_buf = newdirbp;
8607 ACQUIRE_LOCK(VFSTOUFS(mp));
8608 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs);
8609 /*
8610 * We must link the pagedep, allocdirect, and newdirblk for
8611 * the initial file page so the pointer to the new directory
8612 * is not written until the directory contents are live and
8613 * any subsequent additions are not marked live until the
8614 * block is reachable via the inode.
8615 */
8616 if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0)
8617 panic("setup_newdir: lost pagedep");
8618 LIST_FOREACH(wk, &newdirbp->b_dep, wk_list)
8619 if (wk->wk_type == D_ALLOCDIRECT)
8620 break;
8621 if (wk == NULL)
8622 panic("setup_newdir: lost allocdirect");
8623 if (pagedep->pd_state & NEWBLOCK)
8624 panic("setup_newdir: NEWBLOCK already set");
8625 newblk = WK_NEWBLK(wk);
8626 pagedep->pd_state |= NEWBLOCK;
8627 pagedep->pd_newdirblk = newdirblk;
8628 newdirblk->db_pagedep = pagedep;
8629 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8630 WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list);
8631 /*
8632 * Look up the inodedep for the parent directory so that we
8633 * can link mkdir2 into the pending dotdot jaddref or
8634 * the inode write if there is none. If the inode is
8635 * ALLCOMPLETE and no jaddref is present all dependencies have
8636 * been satisfied and mkdir2 can be freed.
8637 */
8638 inodedep_lookup(mp, dinum, 0, &inodedep);
8639 if (MOUNTEDSUJ(mp)) {
8640 if (inodedep == NULL)
8641 panic("setup_newdir: Lost parent.");
8642 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8643 inoreflst);
8644 KASSERT(jaddref != NULL && jaddref->ja_parent == newinum &&
8645 (jaddref->ja_state & MKDIR_PARENT),
8646 ("setup_newdir: bad dotdot jaddref %p", jaddref));
8647 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8648 mkdir2->md_jaddref = jaddref;
8649 jaddref->ja_mkdir = mkdir2;
8650 } else if (inodedep == NULL ||
8651 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
8652 dap->da_state &= ~MKDIR_PARENT;
8653 WORKITEM_FREE(mkdir2, D_MKDIR);
8654 mkdir2 = NULL;
8655 } else {
8656 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8657 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list);
8658 }
8659 *mkdirp = mkdir2;
8660
8661 return (mkdir1);
8662 }
8663
8664 /*
8665 * Directory entry addition dependencies.
8666 *
8667 * When adding a new directory entry, the inode (with its incremented link
8668 * count) must be written to disk before the directory entry's pointer to it.
8669 * Also, if the inode is newly allocated, the corresponding freemap must be
8670 * updated (on disk) before the directory entry's pointer. These requirements
8671 * are met via undo/redo on the directory entry's pointer, which consists
8672 * simply of the inode number.
8673 *
8674 * As directory entries are added and deleted, the free space within a
8675 * directory block can become fragmented. The ufs filesystem will compact
8676 * a fragmented directory block to make space for a new entry. When this
8677 * occurs, the offsets of previously added entries change. Any "diradd"
8678 * dependency structures corresponding to these entries must be updated with
8679 * the new offsets.
8680 */
8681
8682 /*
8683 * This routine is called after the in-memory inode's link
8684 * count has been incremented, but before the directory entry's
8685 * pointer to the inode has been set.
8686 */
8687 int
8688 softdep_setup_directory_add(
8689 struct buf *bp, /* buffer containing directory block */
8690 struct inode *dp, /* inode for directory */
8691 off_t diroffset, /* offset of new entry in directory */
8692 ino_t newinum, /* inode referenced by new directory entry */
8693 struct buf *newdirbp, /* non-NULL => contents of new mkdir */
8694 int isnewblk) /* entry is in a newly allocated block */
8695 {
8696 int offset; /* offset of new entry within directory block */
8697 ufs_lbn_t lbn; /* block in directory containing new entry */
8698 struct fs *fs;
8699 struct diradd *dap;
8700 struct newblk *newblk;
8701 struct pagedep *pagedep;
8702 struct inodedep *inodedep;
8703 struct newdirblk *newdirblk;
8704 struct mkdir *mkdir1, *mkdir2;
8705 struct jaddref *jaddref;
8706 struct ufsmount *ump;
8707 struct mount *mp;
8708 int isindir;
8709
8710 mp = ITOVFS(dp);
8711 ump = VFSTOUFS(mp);
8712 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8713 ("softdep_setup_directory_add called on non-softdep filesystem"));
8714 /*
8715 * Whiteouts have no dependencies.
8716 */
8717 if (newinum == UFS_WINO) {
8718 if (newdirbp != NULL)
8719 bdwrite(newdirbp);
8720 return (0);
8721 }
8722 jaddref = NULL;
8723 mkdir1 = mkdir2 = NULL;
8724 fs = ump->um_fs;
8725 lbn = lblkno(fs, diroffset);
8726 offset = blkoff(fs, diroffset);
8727 dap = malloc(sizeof(struct diradd), M_DIRADD,
8728 M_SOFTDEP_FLAGS|M_ZERO);
8729 workitem_alloc(&dap->da_list, D_DIRADD, mp);
8730 dap->da_offset = offset;
8731 dap->da_newinum = newinum;
8732 dap->da_state = ATTACHED;
8733 LIST_INIT(&dap->da_jwork);
8734 isindir = bp->b_lblkno >= UFS_NDADDR;
8735 newdirblk = NULL;
8736 if (isnewblk &&
8737 (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) {
8738 newdirblk = malloc(sizeof(struct newdirblk),
8739 M_NEWDIRBLK, M_SOFTDEP_FLAGS);
8740 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8741 LIST_INIT(&newdirblk->db_mkdir);
8742 }
8743 /*
8744 * If we're creating a new directory setup the dependencies and set
8745 * the dap state to wait for them. Otherwise it's COMPLETE and
8746 * we can move on.
8747 */
8748 if (newdirbp == NULL) {
8749 dap->da_state |= DEPCOMPLETE;
8750 ACQUIRE_LOCK(ump);
8751 } else {
8752 dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
8753 mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp,
8754 &mkdir2);
8755 }
8756 /*
8757 * Link into parent directory pagedep to await its being written.
8758 */
8759 pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep);
8760 #ifdef INVARIANTS
8761 if (diradd_lookup(pagedep, offset) != NULL)
8762 panic("softdep_setup_directory_add: %p already at off %d\n",
8763 diradd_lookup(pagedep, offset), offset);
8764 #endif
8765 dap->da_pagedep = pagedep;
8766 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
8767 da_pdlist);
8768 inodedep_lookup(mp, newinum, DEPALLOC, &inodedep);
8769 /*
8770 * If we're journaling, link the diradd into the jaddref so it
8771 * may be completed after the journal entry is written. Otherwise,
8772 * link the diradd into its inodedep. If the inode is not yet
8773 * written place it on the bufwait list, otherwise do the post-inode
8774 * write processing to put it on the id_pendinghd list.
8775 */
8776 if (MOUNTEDSUJ(mp)) {
8777 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8778 inoreflst);
8779 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
8780 ("softdep_setup_directory_add: bad jaddref %p", jaddref));
8781 jaddref->ja_diroff = diroffset;
8782 jaddref->ja_diradd = dap;
8783 add_to_journal(&jaddref->ja_list);
8784 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
8785 diradd_inode_written(dap, inodedep);
8786 else
8787 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
8788 /*
8789 * Add the journal entries for . and .. links now that the primary
8790 * link is written.
8791 */
8792 if (mkdir1 != NULL && MOUNTEDSUJ(mp)) {
8793 jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
8794 inoreflst, if_deps);
8795 KASSERT(jaddref != NULL &&
8796 jaddref->ja_ino == jaddref->ja_parent &&
8797 (jaddref->ja_state & MKDIR_BODY),
8798 ("softdep_setup_directory_add: bad dot jaddref %p",
8799 jaddref));
8800 mkdir1->md_jaddref = jaddref;
8801 jaddref->ja_mkdir = mkdir1;
8802 /*
8803 * It is important that the dotdot journal entry
8804 * is added prior to the dot entry since dot writes
8805 * both the dot and dotdot links. These both must
8806 * be added after the primary link for the journal
8807 * to remain consistent.
8808 */
8809 add_to_journal(&mkdir2->md_jaddref->ja_list);
8810 add_to_journal(&jaddref->ja_list);
8811 }
8812 /*
8813 * If we are adding a new directory remember this diradd so that if
8814 * we rename it we can keep the dot and dotdot dependencies. If
8815 * we are adding a new name for an inode that has a mkdiradd we
8816 * must be in rename and we have to move the dot and dotdot
8817 * dependencies to this new name. The old name is being orphaned
8818 * soon.
8819 */
8820 if (mkdir1 != NULL) {
8821 if (inodedep->id_mkdiradd != NULL)
8822 panic("softdep_setup_directory_add: Existing mkdir");
8823 inodedep->id_mkdiradd = dap;
8824 } else if (inodedep->id_mkdiradd)
8825 merge_diradd(inodedep, dap);
8826 if (newdirblk != NULL) {
8827 /*
8828 * There is nothing to do if we are already tracking
8829 * this block.
8830 */
8831 if ((pagedep->pd_state & NEWBLOCK) != 0) {
8832 WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
8833 FREE_LOCK(ump);
8834 return (0);
8835 }
8836 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)
8837 == 0)
8838 panic("softdep_setup_directory_add: lost entry");
8839 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8840 pagedep->pd_state |= NEWBLOCK;
8841 pagedep->pd_newdirblk = newdirblk;
8842 newdirblk->db_pagedep = pagedep;
8843 FREE_LOCK(ump);
8844 /*
8845 * If we extended into an indirect signal direnter to sync.
8846 */
8847 if (isindir)
8848 return (1);
8849 return (0);
8850 }
8851 FREE_LOCK(ump);
8852 return (0);
8853 }
8854
8855 /*
8856 * This procedure is called to change the offset of a directory
8857 * entry when compacting a directory block which must be owned
8858 * exclusively by the caller. Note that the actual entry movement
8859 * must be done in this procedure to ensure that no I/O completions
8860 * occur while the move is in progress.
8861 */
8862 void
8863 softdep_change_directoryentry_offset(
8864 struct buf *bp, /* Buffer holding directory block. */
8865 struct inode *dp, /* inode for directory */
8866 caddr_t base, /* address of dp->i_offset */
8867 caddr_t oldloc, /* address of old directory location */
8868 caddr_t newloc, /* address of new directory location */
8869 int entrysize) /* size of directory entry */
8870 {
8871 int offset, oldoffset, newoffset;
8872 struct pagedep *pagedep;
8873 struct jmvref *jmvref;
8874 struct diradd *dap;
8875 struct direct *de;
8876 struct mount *mp;
8877 struct ufsmount *ump;
8878 ufs_lbn_t lbn;
8879 int flags;
8880
8881 mp = ITOVFS(dp);
8882 ump = VFSTOUFS(mp);
8883 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8884 ("softdep_change_directoryentry_offset called on "
8885 "non-softdep filesystem"));
8886 de = (struct direct *)oldloc;
8887 jmvref = NULL;
8888 flags = 0;
8889 /*
8890 * Moves are always journaled as it would be too complex to
8891 * determine if any affected adds or removes are present in the
8892 * journal.
8893 */
8894 if (MOUNTEDSUJ(mp)) {
8895 flags = DEPALLOC;
8896 jmvref = newjmvref(dp, de->d_ino,
8897 I_OFFSET(dp) + (oldloc - base),
8898 I_OFFSET(dp) + (newloc - base));
8899 }
8900 lbn = lblkno(ump->um_fs, I_OFFSET(dp));
8901 offset = blkoff(ump->um_fs, I_OFFSET(dp));
8902 oldoffset = offset + (oldloc - base);
8903 newoffset = offset + (newloc - base);
8904 ACQUIRE_LOCK(ump);
8905 if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0)
8906 goto done;
8907 dap = diradd_lookup(pagedep, oldoffset);
8908 if (dap) {
8909 dap->da_offset = newoffset;
8910 newoffset = DIRADDHASH(newoffset);
8911 oldoffset = DIRADDHASH(oldoffset);
8912 if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE &&
8913 newoffset != oldoffset) {
8914 LIST_REMOVE(dap, da_pdlist);
8915 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset],
8916 dap, da_pdlist);
8917 }
8918 }
8919 done:
8920 if (jmvref) {
8921 jmvref->jm_pagedep = pagedep;
8922 LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps);
8923 add_to_journal(&jmvref->jm_list);
8924 }
8925 bcopy(oldloc, newloc, entrysize);
8926 FREE_LOCK(ump);
8927 }
8928
8929 /*
8930 * Move the mkdir dependencies and journal work from one diradd to another
8931 * when renaming a directory. The new name must depend on the mkdir deps
8932 * completing as the old name did. Directories can only have one valid link
8933 * at a time so one must be canonical.
8934 */
8935 static void
8936 merge_diradd(struct inodedep *inodedep, struct diradd *newdap)
8937 {
8938 struct diradd *olddap;
8939 struct mkdir *mkdir, *nextmd;
8940 struct ufsmount *ump;
8941 short state;
8942
8943 olddap = inodedep->id_mkdiradd;
8944 inodedep->id_mkdiradd = newdap;
8945 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8946 newdap->da_state &= ~DEPCOMPLETE;
8947 ump = VFSTOUFS(inodedep->id_list.wk_mp);
8948 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
8949 mkdir = nextmd) {
8950 nextmd = LIST_NEXT(mkdir, md_mkdirs);
8951 if (mkdir->md_diradd != olddap)
8952 continue;
8953 mkdir->md_diradd = newdap;
8954 state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY);
8955 newdap->da_state |= state;
8956 olddap->da_state &= ~state;
8957 if ((olddap->da_state &
8958 (MKDIR_PARENT | MKDIR_BODY)) == 0)
8959 break;
8960 }
8961 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8962 panic("merge_diradd: unfound ref");
8963 }
8964 /*
8965 * Any mkdir related journal items are not safe to be freed until
8966 * the new name is stable.
8967 */
8968 jwork_move(&newdap->da_jwork, &olddap->da_jwork);
8969 olddap->da_state |= DEPCOMPLETE;
8970 complete_diradd(olddap);
8971 }
8972
8973 /*
8974 * Move the diradd to the pending list when all diradd dependencies are
8975 * complete.
8976 */
8977 static void
8978 complete_diradd(struct diradd *dap)
8979 {
8980 struct pagedep *pagedep;
8981
8982 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
8983 if (dap->da_state & DIRCHG)
8984 pagedep = dap->da_previous->dm_pagedep;
8985 else
8986 pagedep = dap->da_pagedep;
8987 LIST_REMOVE(dap, da_pdlist);
8988 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
8989 }
8990 }
8991
8992 /*
8993 * Cancel a diradd when a dirrem overlaps with it. We must cancel the journal
8994 * add entries and conditionally journal the remove.
8995 */
8996 static void
8997 cancel_diradd(
8998 struct diradd *dap,
8999 struct dirrem *dirrem,
9000 struct jremref *jremref,
9001 struct jremref *dotremref,
9002 struct jremref *dotdotremref)
9003 {
9004 struct inodedep *inodedep;
9005 struct jaddref *jaddref;
9006 struct inoref *inoref;
9007 struct ufsmount *ump;
9008 struct mkdir *mkdir;
9009
9010 /*
9011 * If no remove references were allocated we're on a non-journaled
9012 * filesystem and can skip the cancel step.
9013 */
9014 if (jremref == NULL) {
9015 free_diradd(dap, NULL);
9016 return;
9017 }
9018 /*
9019 * Cancel the primary name an free it if it does not require
9020 * journaling.
9021 */
9022 if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum,
9023 0, &inodedep) != 0) {
9024 /* Abort the addref that reference this diradd. */
9025 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
9026 if (inoref->if_list.wk_type != D_JADDREF)
9027 continue;
9028 jaddref = (struct jaddref *)inoref;
9029 if (jaddref->ja_diradd != dap)
9030 continue;
9031 if (cancel_jaddref(jaddref, inodedep,
9032 &dirrem->dm_jwork) == 0) {
9033 free_jremref(jremref);
9034 jremref = NULL;
9035 }
9036 break;
9037 }
9038 }
9039 /*
9040 * Cancel subordinate names and free them if they do not require
9041 * journaling.
9042 */
9043 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
9044 ump = VFSTOUFS(dap->da_list.wk_mp);
9045 LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) {
9046 if (mkdir->md_diradd != dap)
9047 continue;
9048 if ((jaddref = mkdir->md_jaddref) == NULL)
9049 continue;
9050 mkdir->md_jaddref = NULL;
9051 if (mkdir->md_state & MKDIR_PARENT) {
9052 if (cancel_jaddref(jaddref, NULL,
9053 &dirrem->dm_jwork) == 0) {
9054 free_jremref(dotdotremref);
9055 dotdotremref = NULL;
9056 }
9057 } else {
9058 if (cancel_jaddref(jaddref, inodedep,
9059 &dirrem->dm_jwork) == 0) {
9060 free_jremref(dotremref);
9061 dotremref = NULL;
9062 }
9063 }
9064 }
9065 }
9066
9067 if (jremref)
9068 journal_jremref(dirrem, jremref, inodedep);
9069 if (dotremref)
9070 journal_jremref(dirrem, dotremref, inodedep);
9071 if (dotdotremref)
9072 journal_jremref(dirrem, dotdotremref, NULL);
9073 jwork_move(&dirrem->dm_jwork, &dap->da_jwork);
9074 free_diradd(dap, &dirrem->dm_jwork);
9075 }
9076
9077 /*
9078 * Free a diradd dependency structure.
9079 */
9080 static void
9081 free_diradd(struct diradd *dap, struct workhead *wkhd)
9082 {
9083 struct dirrem *dirrem;
9084 struct pagedep *pagedep;
9085 struct inodedep *inodedep;
9086 struct mkdir *mkdir, *nextmd;
9087 struct ufsmount *ump;
9088
9089 ump = VFSTOUFS(dap->da_list.wk_mp);
9090 LOCK_OWNED(ump);
9091 LIST_REMOVE(dap, da_pdlist);
9092 if (dap->da_state & ONWORKLIST)
9093 WORKLIST_REMOVE(&dap->da_list);
9094 if ((dap->da_state & DIRCHG) == 0) {
9095 pagedep = dap->da_pagedep;
9096 } else {
9097 dirrem = dap->da_previous;
9098 pagedep = dirrem->dm_pagedep;
9099 dirrem->dm_dirinum = pagedep->pd_ino;
9100 dirrem->dm_state |= COMPLETE;
9101 if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9102 add_to_worklist(&dirrem->dm_list, 0);
9103 }
9104 if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum,
9105 0, &inodedep) != 0)
9106 if (inodedep->id_mkdiradd == dap)
9107 inodedep->id_mkdiradd = NULL;
9108 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
9109 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
9110 mkdir = nextmd) {
9111 nextmd = LIST_NEXT(mkdir, md_mkdirs);
9112 if (mkdir->md_diradd != dap)
9113 continue;
9114 dap->da_state &=
9115 ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
9116 LIST_REMOVE(mkdir, md_mkdirs);
9117 if (mkdir->md_state & ONWORKLIST)
9118 WORKLIST_REMOVE(&mkdir->md_list);
9119 if (mkdir->md_jaddref != NULL)
9120 panic("free_diradd: Unexpected jaddref");
9121 WORKITEM_FREE(mkdir, D_MKDIR);
9122 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
9123 break;
9124 }
9125 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
9126 panic("free_diradd: unfound ref");
9127 }
9128 if (inodedep)
9129 free_inodedep(inodedep);
9130 /*
9131 * Free any journal segments waiting for the directory write.
9132 */
9133 handle_jwork(&dap->da_jwork);
9134 WORKITEM_FREE(dap, D_DIRADD);
9135 }
9136
9137 /*
9138 * Directory entry removal dependencies.
9139 *
9140 * When removing a directory entry, the entry's inode pointer must be
9141 * zero'ed on disk before the corresponding inode's link count is decremented
9142 * (possibly freeing the inode for re-use). This dependency is handled by
9143 * updating the directory entry but delaying the inode count reduction until
9144 * after the directory block has been written to disk. After this point, the
9145 * inode count can be decremented whenever it is convenient.
9146 */
9147
9148 /*
9149 * This routine should be called immediately after removing
9150 * a directory entry. The inode's link count should not be
9151 * decremented by the calling procedure -- the soft updates
9152 * code will do this task when it is safe.
9153 */
9154 void
9155 softdep_setup_remove(
9156 struct buf *bp, /* buffer containing directory block */
9157 struct inode *dp, /* inode for the directory being modified */
9158 struct inode *ip, /* inode for directory entry being removed */
9159 int isrmdir) /* indicates if doing RMDIR */
9160 {
9161 struct dirrem *dirrem, *prevdirrem;
9162 struct inodedep *inodedep;
9163 struct ufsmount *ump;
9164 int direct;
9165
9166 ump = ITOUMP(ip);
9167 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9168 ("softdep_setup_remove called on non-softdep filesystem"));
9169 /*
9170 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. We want
9171 * newdirrem() to setup the full directory remove which requires
9172 * isrmdir > 1.
9173 */
9174 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9175 /*
9176 * Add the dirrem to the inodedep's pending remove list for quick
9177 * discovery later.
9178 */
9179 if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0)
9180 panic("softdep_setup_remove: Lost inodedep.");
9181 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
9182 dirrem->dm_state |= ONDEPLIST;
9183 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9184
9185 /*
9186 * If the COMPLETE flag is clear, then there were no active
9187 * entries and we want to roll back to a zeroed entry until
9188 * the new inode is committed to disk. If the COMPLETE flag is
9189 * set then we have deleted an entry that never made it to
9190 * disk. If the entry we deleted resulted from a name change,
9191 * then the old name still resides on disk. We cannot delete
9192 * its inode (returned to us in prevdirrem) until the zeroed
9193 * directory entry gets to disk. The new inode has never been
9194 * referenced on the disk, so can be deleted immediately.
9195 */
9196 if ((dirrem->dm_state & COMPLETE) == 0) {
9197 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
9198 dm_next);
9199 FREE_LOCK(ump);
9200 } else {
9201 if (prevdirrem != NULL)
9202 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
9203 prevdirrem, dm_next);
9204 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
9205 direct = LIST_EMPTY(&dirrem->dm_jremrefhd);
9206 FREE_LOCK(ump);
9207 if (direct)
9208 handle_workitem_remove(dirrem, 0);
9209 }
9210 }
9211
9212 /*
9213 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the
9214 * pd_pendinghd list of a pagedep.
9215 */
9216 static struct diradd *
9217 diradd_lookup(struct pagedep *pagedep, int offset)
9218 {
9219 struct diradd *dap;
9220
9221 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
9222 if (dap->da_offset == offset)
9223 return (dap);
9224 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
9225 if (dap->da_offset == offset)
9226 return (dap);
9227 return (NULL);
9228 }
9229
9230 /*
9231 * Search for a .. diradd dependency in a directory that is being removed.
9232 * If the directory was renamed to a new parent we have a diradd rather
9233 * than a mkdir for the .. entry. We need to cancel it now before
9234 * it is found in truncate().
9235 */
9236 static struct jremref *
9237 cancel_diradd_dotdot(struct inode *ip,
9238 struct dirrem *dirrem,
9239 struct jremref *jremref)
9240 {
9241 struct pagedep *pagedep;
9242 struct diradd *dap;
9243 struct worklist *wk;
9244
9245 if (pagedep_lookup(ITOVFS(ip), NULL, ip->i_number, 0, 0, &pagedep) == 0)
9246 return (jremref);
9247 dap = diradd_lookup(pagedep, DOTDOT_OFFSET);
9248 if (dap == NULL)
9249 return (jremref);
9250 cancel_diradd(dap, dirrem, jremref, NULL, NULL);
9251 /*
9252 * Mark any journal work as belonging to the parent so it is freed
9253 * with the .. reference.
9254 */
9255 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
9256 wk->wk_state |= MKDIR_PARENT;
9257 return (NULL);
9258 }
9259
9260 /*
9261 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to
9262 * replace it with a dirrem/diradd pair as a result of re-parenting a
9263 * directory. This ensures that we don't simultaneously have a mkdir and
9264 * a diradd for the same .. entry.
9265 */
9266 static struct jremref *
9267 cancel_mkdir_dotdot(struct inode *ip,
9268 struct dirrem *dirrem,
9269 struct jremref *jremref)
9270 {
9271 struct inodedep *inodedep;
9272 struct jaddref *jaddref;
9273 struct ufsmount *ump;
9274 struct mkdir *mkdir;
9275 struct diradd *dap;
9276 struct mount *mp;
9277
9278 mp = ITOVFS(ip);
9279 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9280 return (jremref);
9281 dap = inodedep->id_mkdiradd;
9282 if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0)
9283 return (jremref);
9284 ump = VFSTOUFS(inodedep->id_list.wk_mp);
9285 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
9286 mkdir = LIST_NEXT(mkdir, md_mkdirs))
9287 if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT)
9288 break;
9289 if (mkdir == NULL)
9290 panic("cancel_mkdir_dotdot: Unable to find mkdir\n");
9291 if ((jaddref = mkdir->md_jaddref) != NULL) {
9292 mkdir->md_jaddref = NULL;
9293 jaddref->ja_state &= ~MKDIR_PARENT;
9294 if (inodedep_lookup(mp, jaddref->ja_ino, 0, &inodedep) == 0)
9295 panic("cancel_mkdir_dotdot: Lost parent inodedep");
9296 if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) {
9297 journal_jremref(dirrem, jremref, inodedep);
9298 jremref = NULL;
9299 }
9300 }
9301 if (mkdir->md_state & ONWORKLIST)
9302 WORKLIST_REMOVE(&mkdir->md_list);
9303 mkdir->md_state |= ALLCOMPLETE;
9304 complete_mkdir(mkdir);
9305 return (jremref);
9306 }
9307
9308 static void
9309 journal_jremref(struct dirrem *dirrem,
9310 struct jremref *jremref,
9311 struct inodedep *inodedep)
9312 {
9313
9314 if (inodedep == NULL)
9315 if (inodedep_lookup(jremref->jr_list.wk_mp,
9316 jremref->jr_ref.if_ino, 0, &inodedep) == 0)
9317 panic("journal_jremref: Lost inodedep");
9318 LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps);
9319 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
9320 add_to_journal(&jremref->jr_list);
9321 }
9322
9323 static void
9324 dirrem_journal(
9325 struct dirrem *dirrem,
9326 struct jremref *jremref,
9327 struct jremref *dotremref,
9328 struct jremref *dotdotremref)
9329 {
9330 struct inodedep *inodedep;
9331
9332 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0,
9333 &inodedep) == 0)
9334 panic("dirrem_journal: Lost inodedep");
9335 journal_jremref(dirrem, jremref, inodedep);
9336 if (dotremref)
9337 journal_jremref(dirrem, dotremref, inodedep);
9338 if (dotdotremref)
9339 journal_jremref(dirrem, dotdotremref, NULL);
9340 }
9341
9342 /*
9343 * Allocate a new dirrem if appropriate and return it along with
9344 * its associated pagedep. Called without a lock, returns with lock.
9345 */
9346 static struct dirrem *
9347 newdirrem(
9348 struct buf *bp, /* buffer containing directory block */
9349 struct inode *dp, /* inode for the directory being modified */
9350 struct inode *ip, /* inode for directory entry being removed */
9351 int isrmdir, /* indicates if doing RMDIR */
9352 struct dirrem **prevdirremp) /* previously referenced inode, if any */
9353 {
9354 int offset;
9355 ufs_lbn_t lbn;
9356 struct diradd *dap;
9357 struct dirrem *dirrem;
9358 struct pagedep *pagedep;
9359 struct jremref *jremref;
9360 struct jremref *dotremref;
9361 struct jremref *dotdotremref;
9362 struct vnode *dvp;
9363 struct ufsmount *ump;
9364
9365 /*
9366 * Whiteouts have no deletion dependencies.
9367 */
9368 if (ip == NULL)
9369 panic("newdirrem: whiteout");
9370 dvp = ITOV(dp);
9371 ump = ITOUMP(dp);
9372
9373 /*
9374 * If the system is over its limit and our filesystem is
9375 * responsible for more than our share of that usage and
9376 * we are not a snapshot, request some inodedep cleanup.
9377 * Limiting the number of dirrem structures will also limit
9378 * the number of freefile and freeblks structures.
9379 */
9380 ACQUIRE_LOCK(ump);
9381 if (!IS_SNAPSHOT(ip) && softdep_excess_items(ump, D_DIRREM))
9382 schedule_cleanup(UFSTOVFS(ump));
9383 else
9384 FREE_LOCK(ump);
9385 dirrem = malloc(sizeof(struct dirrem), M_DIRREM, M_SOFTDEP_FLAGS |
9386 M_ZERO);
9387 workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount);
9388 LIST_INIT(&dirrem->dm_jremrefhd);
9389 LIST_INIT(&dirrem->dm_jwork);
9390 dirrem->dm_state = isrmdir ? RMDIR : 0;
9391 dirrem->dm_oldinum = ip->i_number;
9392 *prevdirremp = NULL;
9393 /*
9394 * Allocate remove reference structures to track journal write
9395 * dependencies. We will always have one for the link and
9396 * when doing directories we will always have one more for dot.
9397 * When renaming a directory we skip the dotdot link change so
9398 * this is not needed.
9399 */
9400 jremref = dotremref = dotdotremref = NULL;
9401 if (DOINGSUJ(dvp)) {
9402 if (isrmdir) {
9403 jremref = newjremref(dirrem, dp, ip, I_OFFSET(dp),
9404 ip->i_effnlink + 2);
9405 dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET,
9406 ip->i_effnlink + 1);
9407 dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET,
9408 dp->i_effnlink + 1);
9409 dotdotremref->jr_state |= MKDIR_PARENT;
9410 } else
9411 jremref = newjremref(dirrem, dp, ip, I_OFFSET(dp),
9412 ip->i_effnlink + 1);
9413 }
9414 ACQUIRE_LOCK(ump);
9415 lbn = lblkno(ump->um_fs, I_OFFSET(dp));
9416 offset = blkoff(ump->um_fs, I_OFFSET(dp));
9417 pagedep_lookup(UFSTOVFS(ump), bp, dp->i_number, lbn, DEPALLOC,
9418 &pagedep);
9419 dirrem->dm_pagedep = pagedep;
9420 dirrem->dm_offset = offset;
9421 /*
9422 * If we're renaming a .. link to a new directory, cancel any
9423 * existing MKDIR_PARENT mkdir. If it has already been canceled
9424 * the jremref is preserved for any potential diradd in this
9425 * location. This can not coincide with a rmdir.
9426 */
9427 if (I_OFFSET(dp) == DOTDOT_OFFSET) {
9428 if (isrmdir)
9429 panic("newdirrem: .. directory change during remove?");
9430 jremref = cancel_mkdir_dotdot(dp, dirrem, jremref);
9431 }
9432 /*
9433 * If we're removing a directory search for the .. dependency now and
9434 * cancel it. Any pending journal work will be added to the dirrem
9435 * to be completed when the workitem remove completes.
9436 */
9437 if (isrmdir)
9438 dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref);
9439 /*
9440 * Check for a diradd dependency for the same directory entry.
9441 * If present, then both dependencies become obsolete and can
9442 * be de-allocated.
9443 */
9444 dap = diradd_lookup(pagedep, offset);
9445 if (dap == NULL) {
9446 /*
9447 * Link the jremref structures into the dirrem so they are
9448 * written prior to the pagedep.
9449 */
9450 if (jremref)
9451 dirrem_journal(dirrem, jremref, dotremref,
9452 dotdotremref);
9453 return (dirrem);
9454 }
9455 /*
9456 * Must be ATTACHED at this point.
9457 */
9458 if ((dap->da_state & ATTACHED) == 0)
9459 panic("newdirrem: not ATTACHED");
9460 if (dap->da_newinum != ip->i_number)
9461 panic("newdirrem: inum %ju should be %ju",
9462 (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum);
9463 /*
9464 * If we are deleting a changed name that never made it to disk,
9465 * then return the dirrem describing the previous inode (which
9466 * represents the inode currently referenced from this entry on disk).
9467 */
9468 if ((dap->da_state & DIRCHG) != 0) {
9469 *prevdirremp = dap->da_previous;
9470 dap->da_state &= ~DIRCHG;
9471 dap->da_pagedep = pagedep;
9472 }
9473 /*
9474 * We are deleting an entry that never made it to disk.
9475 * Mark it COMPLETE so we can delete its inode immediately.
9476 */
9477 dirrem->dm_state |= COMPLETE;
9478 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref);
9479 #ifdef INVARIANTS
9480 if (isrmdir == 0) {
9481 struct worklist *wk;
9482
9483 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
9484 if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT))
9485 panic("bad wk %p (0x%X)\n", wk, wk->wk_state);
9486 }
9487 #endif
9488
9489 return (dirrem);
9490 }
9491
9492 /*
9493 * Directory entry change dependencies.
9494 *
9495 * Changing an existing directory entry requires that an add operation
9496 * be completed first followed by a deletion. The semantics for the addition
9497 * are identical to the description of adding a new entry above except
9498 * that the rollback is to the old inode number rather than zero. Once
9499 * the addition dependency is completed, the removal is done as described
9500 * in the removal routine above.
9501 */
9502
9503 /*
9504 * This routine should be called immediately after changing
9505 * a directory entry. The inode's link count should not be
9506 * decremented by the calling procedure -- the soft updates
9507 * code will perform this task when it is safe.
9508 */
9509 void
9510 softdep_setup_directory_change(
9511 struct buf *bp, /* buffer containing directory block */
9512 struct inode *dp, /* inode for the directory being modified */
9513 struct inode *ip, /* inode for directory entry being removed */
9514 ino_t newinum, /* new inode number for changed entry */
9515 int isrmdir) /* indicates if doing RMDIR */
9516 {
9517 int offset;
9518 struct diradd *dap = NULL;
9519 struct dirrem *dirrem, *prevdirrem;
9520 struct pagedep *pagedep;
9521 struct inodedep *inodedep;
9522 struct jaddref *jaddref;
9523 struct mount *mp;
9524 struct ufsmount *ump;
9525
9526 mp = ITOVFS(dp);
9527 ump = VFSTOUFS(mp);
9528 offset = blkoff(ump->um_fs, I_OFFSET(dp));
9529 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
9530 ("softdep_setup_directory_change called on non-softdep filesystem"));
9531
9532 /*
9533 * Whiteouts do not need diradd dependencies.
9534 */
9535 if (newinum != UFS_WINO) {
9536 dap = malloc(sizeof(struct diradd),
9537 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO);
9538 workitem_alloc(&dap->da_list, D_DIRADD, mp);
9539 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
9540 dap->da_offset = offset;
9541 dap->da_newinum = newinum;
9542 LIST_INIT(&dap->da_jwork);
9543 }
9544
9545 /*
9546 * Allocate a new dirrem and ACQUIRE_LOCK.
9547 */
9548 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9549 pagedep = dirrem->dm_pagedep;
9550 /*
9551 * The possible values for isrmdir:
9552 * 0 - non-directory file rename
9553 * 1 - directory rename within same directory
9554 * inum - directory rename to new directory of given inode number
9555 * When renaming to a new directory, we are both deleting and
9556 * creating a new directory entry, so the link count on the new
9557 * directory should not change. Thus we do not need the followup
9558 * dirrem which is usually done in handle_workitem_remove. We set
9559 * the DIRCHG flag to tell handle_workitem_remove to skip the
9560 * followup dirrem.
9561 */
9562 if (isrmdir > 1)
9563 dirrem->dm_state |= DIRCHG;
9564
9565 /*
9566 * Whiteouts have no additional dependencies,
9567 * so just put the dirrem on the correct list.
9568 */
9569 if (newinum == UFS_WINO) {
9570 if ((dirrem->dm_state & COMPLETE) == 0) {
9571 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
9572 dm_next);
9573 } else {
9574 dirrem->dm_dirinum = pagedep->pd_ino;
9575 if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9576 add_to_worklist(&dirrem->dm_list, 0);
9577 }
9578 FREE_LOCK(ump);
9579 return;
9580 }
9581 /*
9582 * Add the dirrem to the inodedep's pending remove list for quick
9583 * discovery later. A valid nlinkdelta ensures that this lookup
9584 * will not fail.
9585 */
9586 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9587 panic("softdep_setup_directory_change: Lost inodedep.");
9588 dirrem->dm_state |= ONDEPLIST;
9589 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9590
9591 /*
9592 * If the COMPLETE flag is clear, then there were no active
9593 * entries and we want to roll back to the previous inode until
9594 * the new inode is committed to disk. If the COMPLETE flag is
9595 * set, then we have deleted an entry that never made it to disk.
9596 * If the entry we deleted resulted from a name change, then the old
9597 * inode reference still resides on disk. Any rollback that we do
9598 * needs to be to that old inode (returned to us in prevdirrem). If
9599 * the entry we deleted resulted from a create, then there is
9600 * no entry on the disk, so we want to roll back to zero rather
9601 * than the uncommitted inode. In either of the COMPLETE cases we
9602 * want to immediately free the unwritten and unreferenced inode.
9603 */
9604 if ((dirrem->dm_state & COMPLETE) == 0) {
9605 dap->da_previous = dirrem;
9606 } else {
9607 if (prevdirrem != NULL) {
9608 dap->da_previous = prevdirrem;
9609 } else {
9610 dap->da_state &= ~DIRCHG;
9611 dap->da_pagedep = pagedep;
9612 }
9613 dirrem->dm_dirinum = pagedep->pd_ino;
9614 if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9615 add_to_worklist(&dirrem->dm_list, 0);
9616 }
9617 /*
9618 * Lookup the jaddref for this journal entry. We must finish
9619 * initializing it and make the diradd write dependent on it.
9620 * If we're not journaling, put it on the id_bufwait list if the
9621 * inode is not yet written. If it is written, do the post-inode
9622 * write processing to put it on the id_pendinghd list.
9623 */
9624 inodedep_lookup(mp, newinum, DEPALLOC, &inodedep);
9625 if (MOUNTEDSUJ(mp)) {
9626 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
9627 inoreflst);
9628 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
9629 ("softdep_setup_directory_change: bad jaddref %p",
9630 jaddref));
9631 jaddref->ja_diroff = I_OFFSET(dp);
9632 jaddref->ja_diradd = dap;
9633 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9634 dap, da_pdlist);
9635 add_to_journal(&jaddref->ja_list);
9636 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
9637 dap->da_state |= COMPLETE;
9638 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
9639 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
9640 } else {
9641 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9642 dap, da_pdlist);
9643 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
9644 }
9645 /*
9646 * If we're making a new name for a directory that has not been
9647 * committed when need to move the dot and dotdot references to
9648 * this new name.
9649 */
9650 if (inodedep->id_mkdiradd && I_OFFSET(dp) != DOTDOT_OFFSET)
9651 merge_diradd(inodedep, dap);
9652 FREE_LOCK(ump);
9653 }
9654
9655 /*
9656 * Called whenever the link count on an inode is changed.
9657 * It creates an inode dependency so that the new reference(s)
9658 * to the inode cannot be committed to disk until the updated
9659 * inode has been written.
9660 */
9661 void
9662 softdep_change_linkcnt(
9663 struct inode *ip) /* the inode with the increased link count */
9664 {
9665 struct inodedep *inodedep;
9666 struct ufsmount *ump;
9667
9668 ump = ITOUMP(ip);
9669 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9670 ("softdep_change_linkcnt called on non-softdep filesystem"));
9671 ACQUIRE_LOCK(ump);
9672 inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep);
9673 if (ip->i_nlink < ip->i_effnlink)
9674 panic("softdep_change_linkcnt: bad delta");
9675 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9676 FREE_LOCK(ump);
9677 }
9678
9679 /*
9680 * Attach a sbdep dependency to the superblock buf so that we can keep
9681 * track of the head of the linked list of referenced but unlinked inodes.
9682 */
9683 void
9684 softdep_setup_sbupdate(
9685 struct ufsmount *ump,
9686 struct fs *fs,
9687 struct buf *bp)
9688 {
9689 struct sbdep *sbdep;
9690 struct worklist *wk;
9691
9692 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9693 ("softdep_setup_sbupdate called on non-softdep filesystem"));
9694 LIST_FOREACH(wk, &bp->b_dep, wk_list)
9695 if (wk->wk_type == D_SBDEP)
9696 break;
9697 if (wk != NULL)
9698 return;
9699 sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS);
9700 workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump));
9701 sbdep->sb_fs = fs;
9702 sbdep->sb_ump = ump;
9703 ACQUIRE_LOCK(ump);
9704 WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list);
9705 FREE_LOCK(ump);
9706 }
9707
9708 /*
9709 * Return the first unlinked inodedep which is ready to be the head of the
9710 * list. The inodedep and all those after it must have valid next pointers.
9711 */
9712 static struct inodedep *
9713 first_unlinked_inodedep(struct ufsmount *ump)
9714 {
9715 struct inodedep *inodedep;
9716 struct inodedep *idp;
9717
9718 LOCK_OWNED(ump);
9719 for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst);
9720 inodedep; inodedep = idp) {
9721 if ((inodedep->id_state & UNLINKNEXT) == 0)
9722 return (NULL);
9723 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9724 if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0)
9725 break;
9726 if ((inodedep->id_state & UNLINKPREV) == 0)
9727 break;
9728 }
9729 return (inodedep);
9730 }
9731
9732 /*
9733 * Set the sujfree unlinked head pointer prior to writing a superblock.
9734 */
9735 static void
9736 initiate_write_sbdep(struct sbdep *sbdep)
9737 {
9738 struct inodedep *inodedep;
9739 struct fs *bpfs;
9740 struct fs *fs;
9741
9742 bpfs = sbdep->sb_fs;
9743 fs = sbdep->sb_ump->um_fs;
9744 inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9745 if (inodedep) {
9746 fs->fs_sujfree = inodedep->id_ino;
9747 inodedep->id_state |= UNLINKPREV;
9748 } else
9749 fs->fs_sujfree = 0;
9750 bpfs->fs_sujfree = fs->fs_sujfree;
9751 /*
9752 * Because we have made changes to the superblock, we need to
9753 * recompute its check-hash.
9754 */
9755 bpfs->fs_ckhash = ffs_calc_sbhash(bpfs);
9756 }
9757
9758 /*
9759 * After a superblock is written determine whether it must be written again
9760 * due to a changing unlinked list head.
9761 */
9762 static int
9763 handle_written_sbdep(struct sbdep *sbdep, struct buf *bp)
9764 {
9765 struct inodedep *inodedep;
9766 struct fs *fs;
9767
9768 LOCK_OWNED(sbdep->sb_ump);
9769 fs = sbdep->sb_fs;
9770 /*
9771 * If the superblock doesn't match the in-memory list start over.
9772 */
9773 inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9774 if ((inodedep && fs->fs_sujfree != inodedep->id_ino) ||
9775 (inodedep == NULL && fs->fs_sujfree != 0)) {
9776 bdirty(bp);
9777 return (1);
9778 }
9779 WORKITEM_FREE(sbdep, D_SBDEP);
9780 if (fs->fs_sujfree == 0)
9781 return (0);
9782 /*
9783 * Now that we have a record of this inode in stable store allow it
9784 * to be written to free up pending work. Inodes may see a lot of
9785 * write activity after they are unlinked which we must not hold up.
9786 */
9787 for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
9788 if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS)
9789 panic("handle_written_sbdep: Bad inodedep %p (0x%X)",
9790 inodedep, inodedep->id_state);
9791 if (inodedep->id_state & UNLINKONLIST)
9792 break;
9793 inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST;
9794 }
9795
9796 return (0);
9797 }
9798
9799 /*
9800 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list.
9801 */
9802 static void
9803 unlinked_inodedep( struct mount *mp, struct inodedep *inodedep)
9804 {
9805 struct ufsmount *ump;
9806
9807 ump = VFSTOUFS(mp);
9808 LOCK_OWNED(ump);
9809 if (MOUNTEDSUJ(mp) == 0)
9810 return;
9811 ump->um_fs->fs_fmod = 1;
9812 if (inodedep->id_state & UNLINKED)
9813 panic("unlinked_inodedep: %p already unlinked\n", inodedep);
9814 inodedep->id_state |= UNLINKED;
9815 TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked);
9816 }
9817
9818 /*
9819 * Remove an inodedep from the unlinked inodedep list. This may require
9820 * disk writes if the inode has made it that far.
9821 */
9822 static void
9823 clear_unlinked_inodedep( struct inodedep *inodedep)
9824 {
9825 struct ufs2_dinode *dip;
9826 struct ufsmount *ump;
9827 struct inodedep *idp;
9828 struct inodedep *idn;
9829 struct fs *fs, *bpfs;
9830 struct buf *bp;
9831 daddr_t dbn;
9832 ino_t ino;
9833 ino_t nino;
9834 ino_t pino;
9835 int error;
9836
9837 ump = VFSTOUFS(inodedep->id_list.wk_mp);
9838 fs = ump->um_fs;
9839 ino = inodedep->id_ino;
9840 error = 0;
9841 for (;;) {
9842 LOCK_OWNED(ump);
9843 KASSERT((inodedep->id_state & UNLINKED) != 0,
9844 ("clear_unlinked_inodedep: inodedep %p not unlinked",
9845 inodedep));
9846 /*
9847 * If nothing has yet been written simply remove us from
9848 * the in memory list and return. This is the most common
9849 * case where handle_workitem_remove() loses the final
9850 * reference.
9851 */
9852 if ((inodedep->id_state & UNLINKLINKS) == 0)
9853 break;
9854 /*
9855 * If we have a NEXT pointer and no PREV pointer we can simply
9856 * clear NEXT's PREV and remove ourselves from the list. Be
9857 * careful not to clear PREV if the superblock points at
9858 * next as well.
9859 */
9860 idn = TAILQ_NEXT(inodedep, id_unlinked);
9861 if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) {
9862 if (idn && fs->fs_sujfree != idn->id_ino)
9863 idn->id_state &= ~UNLINKPREV;
9864 break;
9865 }
9866 /*
9867 * Here we have an inodedep which is actually linked into
9868 * the list. We must remove it by forcing a write to the
9869 * link before us, whether it be the superblock or an inode.
9870 * Unfortunately the list may change while we're waiting
9871 * on the buf lock for either resource so we must loop until
9872 * we lock the right one. If both the superblock and an
9873 * inode point to this inode we must clear the inode first
9874 * followed by the superblock.
9875 */
9876 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9877 pino = 0;
9878 if (idp && (idp->id_state & UNLINKNEXT))
9879 pino = idp->id_ino;
9880 FREE_LOCK(ump);
9881 if (pino == 0) {
9882 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9883 (int)fs->fs_sbsize, 0, 0, 0);
9884 } else {
9885 dbn = fsbtodb(fs, ino_to_fsba(fs, pino));
9886 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn,
9887 (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL,
9888 &bp);
9889 }
9890 ACQUIRE_LOCK(ump);
9891 if (error)
9892 break;
9893 /* If the list has changed restart the loop. */
9894 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9895 nino = 0;
9896 if (idp && (idp->id_state & UNLINKNEXT))
9897 nino = idp->id_ino;
9898 if (nino != pino ||
9899 (inodedep->id_state & UNLINKPREV) != UNLINKPREV) {
9900 FREE_LOCK(ump);
9901 brelse(bp);
9902 ACQUIRE_LOCK(ump);
9903 continue;
9904 }
9905 nino = 0;
9906 idn = TAILQ_NEXT(inodedep, id_unlinked);
9907 if (idn)
9908 nino = idn->id_ino;
9909 /*
9910 * Remove us from the in memory list. After this we cannot
9911 * access the inodedep.
9912 */
9913 KASSERT((inodedep->id_state & UNLINKED) != 0,
9914 ("clear_unlinked_inodedep: inodedep %p not unlinked",
9915 inodedep));
9916 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9917 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9918 FREE_LOCK(ump);
9919 /*
9920 * The predecessor's next pointer is manually updated here
9921 * so that the NEXT flag is never cleared for an element
9922 * that is in the list.
9923 */
9924 if (pino == 0) {
9925 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9926 bpfs = (struct fs *)bp->b_data;
9927 ffs_oldfscompat_write(bpfs, ump);
9928 softdep_setup_sbupdate(ump, bpfs, bp);
9929 /*
9930 * Because we may have made changes to the superblock,
9931 * we need to recompute its check-hash.
9932 */
9933 bpfs->fs_ckhash = ffs_calc_sbhash(bpfs);
9934 } else if (fs->fs_magic == FS_UFS1_MAGIC) {
9935 ((struct ufs1_dinode *)bp->b_data +
9936 ino_to_fsbo(fs, pino))->di_freelink = nino;
9937 } else {
9938 dip = (struct ufs2_dinode *)bp->b_data +
9939 ino_to_fsbo(fs, pino);
9940 dip->di_freelink = nino;
9941 ffs_update_dinode_ckhash(fs, dip);
9942 }
9943 /*
9944 * If the bwrite fails we have no recourse to recover. The
9945 * filesystem is corrupted already.
9946 */
9947 bwrite(bp);
9948 ACQUIRE_LOCK(ump);
9949 /*
9950 * If the superblock pointer still needs to be cleared force
9951 * a write here.
9952 */
9953 if (fs->fs_sujfree == ino) {
9954 FREE_LOCK(ump);
9955 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9956 (int)fs->fs_sbsize, 0, 0, 0);
9957 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9958 bpfs = (struct fs *)bp->b_data;
9959 ffs_oldfscompat_write(bpfs, ump);
9960 softdep_setup_sbupdate(ump, bpfs, bp);
9961 /*
9962 * Because we may have made changes to the superblock,
9963 * we need to recompute its check-hash.
9964 */
9965 bpfs->fs_ckhash = ffs_calc_sbhash(bpfs);
9966 bwrite(bp);
9967 ACQUIRE_LOCK(ump);
9968 }
9969
9970 if (fs->fs_sujfree != ino)
9971 return;
9972 panic("clear_unlinked_inodedep: Failed to clear free head");
9973 }
9974 if (inodedep->id_ino == fs->fs_sujfree)
9975 panic("clear_unlinked_inodedep: Freeing head of free list");
9976 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9977 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9978 return;
9979 }
9980
9981 /*
9982 * This workitem decrements the inode's link count.
9983 * If the link count reaches zero, the file is removed.
9984 */
9985 static int
9986 handle_workitem_remove(struct dirrem *dirrem, int flags)
9987 {
9988 struct inodedep *inodedep;
9989 struct workhead dotdotwk;
9990 struct worklist *wk;
9991 struct ufsmount *ump;
9992 struct mount *mp;
9993 struct vnode *vp;
9994 struct inode *ip;
9995 ino_t oldinum;
9996
9997 if (dirrem->dm_state & ONWORKLIST)
9998 panic("handle_workitem_remove: dirrem %p still on worklist",
9999 dirrem);
10000 oldinum = dirrem->dm_oldinum;
10001 mp = dirrem->dm_list.wk_mp;
10002 ump = VFSTOUFS(mp);
10003 flags |= LK_EXCLUSIVE;
10004 if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ |
10005 FFSV_FORCEINODEDEP) != 0)
10006 return (EBUSY);
10007 ip = VTOI(vp);
10008 MPASS(ip->i_mode != 0);
10009 ACQUIRE_LOCK(ump);
10010 if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0)
10011 panic("handle_workitem_remove: lost inodedep");
10012 if (dirrem->dm_state & ONDEPLIST)
10013 LIST_REMOVE(dirrem, dm_inonext);
10014 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
10015 ("handle_workitem_remove: Journal entries not written."));
10016
10017 /*
10018 * Move all dependencies waiting on the remove to complete
10019 * from the dirrem to the inode inowait list to be completed
10020 * after the inode has been updated and written to disk.
10021 *
10022 * Any marked MKDIR_PARENT are saved to be completed when the
10023 * dotdot ref is removed unless DIRCHG is specified. For
10024 * directory change operations there will be no further
10025 * directory writes and the jsegdeps need to be moved along
10026 * with the rest to be completed when the inode is free or
10027 * stable in the inode free list.
10028 */
10029 LIST_INIT(&dotdotwk);
10030 while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) {
10031 WORKLIST_REMOVE(wk);
10032 if ((dirrem->dm_state & DIRCHG) == 0 &&
10033 wk->wk_state & MKDIR_PARENT) {
10034 wk->wk_state &= ~MKDIR_PARENT;
10035 WORKLIST_INSERT(&dotdotwk, wk);
10036 continue;
10037 }
10038 WORKLIST_INSERT(&inodedep->id_inowait, wk);
10039 }
10040 LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list);
10041 /*
10042 * Normal file deletion.
10043 */
10044 if ((dirrem->dm_state & RMDIR) == 0) {
10045 ip->i_nlink--;
10046 KASSERT(ip->i_nlink >= 0, ("handle_workitem_remove: file ino "
10047 "%ju negative i_nlink %d", (intmax_t)ip->i_number,
10048 ip->i_nlink));
10049 DIP_SET(ip, i_nlink, ip->i_nlink);
10050 UFS_INODE_SET_FLAG(ip, IN_CHANGE);
10051 if (ip->i_nlink < ip->i_effnlink)
10052 panic("handle_workitem_remove: bad file delta");
10053 if (ip->i_nlink == 0)
10054 unlinked_inodedep(mp, inodedep);
10055 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
10056 KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
10057 ("handle_workitem_remove: worklist not empty. %s",
10058 TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type)));
10059 WORKITEM_FREE(dirrem, D_DIRREM);
10060 FREE_LOCK(ump);
10061 goto out;
10062 }
10063 /*
10064 * Directory deletion. Decrement reference count for both the
10065 * just deleted parent directory entry and the reference for ".".
10066 * Arrange to have the reference count on the parent decremented
10067 * to account for the loss of "..".
10068 */
10069 ip->i_nlink -= 2;
10070 KASSERT(ip->i_nlink >= 0, ("handle_workitem_remove: directory ino "
10071 "%ju negative i_nlink %d", (intmax_t)ip->i_number, ip->i_nlink));
10072 DIP_SET(ip, i_nlink, ip->i_nlink);
10073 UFS_INODE_SET_FLAG(ip, IN_CHANGE);
10074 if (ip->i_nlink < ip->i_effnlink)
10075 panic("handle_workitem_remove: bad dir delta");
10076 if (ip->i_nlink == 0)
10077 unlinked_inodedep(mp, inodedep);
10078 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
10079 /*
10080 * Rename a directory to a new parent. Since, we are both deleting
10081 * and creating a new directory entry, the link count on the new
10082 * directory should not change. Thus we skip the followup dirrem.
10083 */
10084 if (dirrem->dm_state & DIRCHG) {
10085 KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
10086 ("handle_workitem_remove: DIRCHG and worklist not empty."));
10087 WORKITEM_FREE(dirrem, D_DIRREM);
10088 FREE_LOCK(ump);
10089 goto out;
10090 }
10091 dirrem->dm_state = ONDEPLIST;
10092 dirrem->dm_oldinum = dirrem->dm_dirinum;
10093 /*
10094 * Place the dirrem on the parent's diremhd list.
10095 */
10096 if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0)
10097 panic("handle_workitem_remove: lost dir inodedep");
10098 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
10099 /*
10100 * If the allocated inode has never been written to disk, then
10101 * the on-disk inode is zero'ed and we can remove the file
10102 * immediately. When journaling if the inode has been marked
10103 * unlinked and not DEPCOMPLETE we know it can never be written.
10104 */
10105 inodedep_lookup(mp, oldinum, 0, &inodedep);
10106 if (inodedep == NULL ||
10107 (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED ||
10108 check_inode_unwritten(inodedep)) {
10109 FREE_LOCK(ump);
10110 vput(vp);
10111 return handle_workitem_remove(dirrem, flags);
10112 }
10113 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
10114 FREE_LOCK(ump);
10115 UFS_INODE_SET_FLAG(ip, IN_CHANGE);
10116 out:
10117 ffs_update(vp, 0);
10118 vput(vp);
10119 return (0);
10120 }
10121
10122 /*
10123 * Inode de-allocation dependencies.
10124 *
10125 * When an inode's link count is reduced to zero, it can be de-allocated. We
10126 * found it convenient to postpone de-allocation until after the inode is
10127 * written to disk with its new link count (zero). At this point, all of the
10128 * on-disk inode's block pointers are nullified and, with careful dependency
10129 * list ordering, all dependencies related to the inode will be satisfied and
10130 * the corresponding dependency structures de-allocated. So, if/when the
10131 * inode is reused, there will be no mixing of old dependencies with new
10132 * ones. This artificial dependency is set up by the block de-allocation
10133 * procedure above (softdep_setup_freeblocks) and completed by the
10134 * following procedure.
10135 */
10136 static void
10137 handle_workitem_freefile(struct freefile *freefile)
10138 {
10139 struct workhead wkhd;
10140 struct fs *fs;
10141 struct ufsmount *ump;
10142 int error;
10143 #ifdef INVARIANTS
10144 struct inodedep *idp;
10145 #endif
10146
10147 ump = VFSTOUFS(freefile->fx_list.wk_mp);
10148 fs = ump->um_fs;
10149 #ifdef INVARIANTS
10150 ACQUIRE_LOCK(ump);
10151 error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp);
10152 FREE_LOCK(ump);
10153 if (error)
10154 panic("handle_workitem_freefile: inodedep %p survived", idp);
10155 #endif
10156 UFS_LOCK(ump);
10157 fs->fs_pendinginodes -= 1;
10158 UFS_UNLOCK(ump);
10159 LIST_INIT(&wkhd);
10160 LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list);
10161 if ((error = ffs_freefile(ump, fs, freefile->fx_devvp,
10162 freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0)
10163 softdep_error("handle_workitem_freefile", error);
10164 ACQUIRE_LOCK(ump);
10165 WORKITEM_FREE(freefile, D_FREEFILE);
10166 FREE_LOCK(ump);
10167 }
10168
10169 /*
10170 * Helper function which unlinks marker element from work list and returns
10171 * the next element on the list.
10172 */
10173 static __inline struct worklist *
10174 markernext(struct worklist *marker)
10175 {
10176 struct worklist *next;
10177
10178 next = LIST_NEXT(marker, wk_list);
10179 LIST_REMOVE(marker, wk_list);
10180 return next;
10181 }
10182
10183 /*
10184 * Disk writes.
10185 *
10186 * The dependency structures constructed above are most actively used when file
10187 * system blocks are written to disk. No constraints are placed on when a
10188 * block can be written, but unsatisfied update dependencies are made safe by
10189 * modifying (or replacing) the source memory for the duration of the disk
10190 * write. When the disk write completes, the memory block is again brought
10191 * up-to-date.
10192 *
10193 * In-core inode structure reclamation.
10194 *
10195 * Because there are a finite number of "in-core" inode structures, they are
10196 * reused regularly. By transferring all inode-related dependencies to the
10197 * in-memory inode block and indexing them separately (via "inodedep"s), we
10198 * can allow "in-core" inode structures to be reused at any time and avoid
10199 * any increase in contention.
10200 *
10201 * Called just before entering the device driver to initiate a new disk I/O.
10202 * The buffer must be locked, thus, no I/O completion operations can occur
10203 * while we are manipulating its associated dependencies.
10204 */
10205 static void
10206 softdep_disk_io_initiation(
10207 struct buf *bp) /* structure describing disk write to occur */
10208 {
10209 struct worklist *wk;
10210 struct worklist marker;
10211 struct inodedep *inodedep;
10212 struct freeblks *freeblks;
10213 struct jblkdep *jblkdep;
10214 struct newblk *newblk;
10215 struct ufsmount *ump;
10216
10217 /*
10218 * We only care about write operations. There should never
10219 * be dependencies for reads.
10220 */
10221 if (bp->b_iocmd != BIO_WRITE)
10222 panic("softdep_disk_io_initiation: not write");
10223
10224 if (bp->b_vflags & BV_BKGRDINPROG)
10225 panic("softdep_disk_io_initiation: Writing buffer with "
10226 "background write in progress: %p", bp);
10227
10228 ump = softdep_bp_to_mp(bp);
10229 if (ump == NULL)
10230 return;
10231
10232 marker.wk_type = D_LAST + 1; /* Not a normal workitem */
10233 PHOLD(curproc); /* Don't swap out kernel stack */
10234 ACQUIRE_LOCK(ump);
10235 /*
10236 * Do any necessary pre-I/O processing.
10237 */
10238 for (wk = LIST_FIRST(&bp->b_dep); wk != NULL;
10239 wk = markernext(&marker)) {
10240 LIST_INSERT_AFTER(wk, &marker, wk_list);
10241 switch (wk->wk_type) {
10242 case D_PAGEDEP:
10243 initiate_write_filepage(WK_PAGEDEP(wk), bp);
10244 continue;
10245
10246 case D_INODEDEP:
10247 inodedep = WK_INODEDEP(wk);
10248 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC)
10249 initiate_write_inodeblock_ufs1(inodedep, bp);
10250 else
10251 initiate_write_inodeblock_ufs2(inodedep, bp);
10252 continue;
10253
10254 case D_INDIRDEP:
10255 initiate_write_indirdep(WK_INDIRDEP(wk), bp);
10256 continue;
10257
10258 case D_BMSAFEMAP:
10259 initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp);
10260 continue;
10261
10262 case D_JSEG:
10263 WK_JSEG(wk)->js_buf = NULL;
10264 continue;
10265
10266 case D_FREEBLKS:
10267 freeblks = WK_FREEBLKS(wk);
10268 jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd);
10269 /*
10270 * We have to wait for the freeblks to be journaled
10271 * before we can write an inodeblock with updated
10272 * pointers. Be careful to arrange the marker so
10273 * we revisit the freeblks if it's not removed by
10274 * the first jwait().
10275 */
10276 if (jblkdep != NULL) {
10277 LIST_REMOVE(&marker, wk_list);
10278 LIST_INSERT_BEFORE(wk, &marker, wk_list);
10279 jwait(&jblkdep->jb_list, MNT_WAIT);
10280 }
10281 continue;
10282 case D_ALLOCDIRECT:
10283 case D_ALLOCINDIR:
10284 /*
10285 * We have to wait for the jnewblk to be journaled
10286 * before we can write to a block if the contents
10287 * may be confused with an earlier file's indirect
10288 * at recovery time. Handle the marker as described
10289 * above.
10290 */
10291 newblk = WK_NEWBLK(wk);
10292 if (newblk->nb_jnewblk != NULL &&
10293 indirblk_lookup(newblk->nb_list.wk_mp,
10294 newblk->nb_newblkno)) {
10295 LIST_REMOVE(&marker, wk_list);
10296 LIST_INSERT_BEFORE(wk, &marker, wk_list);
10297 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
10298 }
10299 continue;
10300
10301 case D_SBDEP:
10302 initiate_write_sbdep(WK_SBDEP(wk));
10303 continue;
10304
10305 case D_MKDIR:
10306 case D_FREEWORK:
10307 case D_FREEDEP:
10308 case D_JSEGDEP:
10309 continue;
10310
10311 default:
10312 panic("handle_disk_io_initiation: Unexpected type %s",
10313 TYPENAME(wk->wk_type));
10314 /* NOTREACHED */
10315 }
10316 }
10317 FREE_LOCK(ump);
10318 PRELE(curproc); /* Allow swapout of kernel stack */
10319 }
10320
10321 /*
10322 * Called from within the procedure above to deal with unsatisfied
10323 * allocation dependencies in a directory. The buffer must be locked,
10324 * thus, no I/O completion operations can occur while we are
10325 * manipulating its associated dependencies.
10326 */
10327 static void
10328 initiate_write_filepage(struct pagedep *pagedep, struct buf *bp)
10329 {
10330 struct jremref *jremref;
10331 struct jmvref *jmvref;
10332 struct dirrem *dirrem;
10333 struct diradd *dap;
10334 struct direct *ep;
10335 int i;
10336
10337 if (pagedep->pd_state & IOSTARTED) {
10338 /*
10339 * This can only happen if there is a driver that does not
10340 * understand chaining. Here biodone will reissue the call
10341 * to strategy for the incomplete buffers.
10342 */
10343 printf("initiate_write_filepage: already started\n");
10344 return;
10345 }
10346 pagedep->pd_state |= IOSTARTED;
10347 /*
10348 * Wait for all journal remove dependencies to hit the disk.
10349 * We can not allow any potentially conflicting directory adds
10350 * to be visible before removes and rollback is too difficult.
10351 * The per-filesystem lock may be dropped and re-acquired, however
10352 * we hold the buf locked so the dependency can not go away.
10353 */
10354 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next)
10355 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL)
10356 jwait(&jremref->jr_list, MNT_WAIT);
10357 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL)
10358 jwait(&jmvref->jm_list, MNT_WAIT);
10359 for (i = 0; i < DAHASHSZ; i++) {
10360 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
10361 ep = (struct direct *)
10362 ((char *)bp->b_data + dap->da_offset);
10363 if (ep->d_ino != dap->da_newinum)
10364 panic("%s: dir inum %ju != new %ju",
10365 "initiate_write_filepage",
10366 (uintmax_t)ep->d_ino,
10367 (uintmax_t)dap->da_newinum);
10368 if (dap->da_state & DIRCHG)
10369 ep->d_ino = dap->da_previous->dm_oldinum;
10370 else
10371 ep->d_ino = 0;
10372 dap->da_state &= ~ATTACHED;
10373 dap->da_state |= UNDONE;
10374 }
10375 }
10376 }
10377
10378 /*
10379 * Version of initiate_write_inodeblock that handles UFS1 dinodes.
10380 * Note that any bug fixes made to this routine must be done in the
10381 * version found below.
10382 *
10383 * Called from within the procedure above to deal with unsatisfied
10384 * allocation dependencies in an inodeblock. The buffer must be
10385 * locked, thus, no I/O completion operations can occur while we
10386 * are manipulating its associated dependencies.
10387 */
10388 static void
10389 initiate_write_inodeblock_ufs1(
10390 struct inodedep *inodedep,
10391 struct buf *bp) /* The inode block */
10392 {
10393 struct allocdirect *adp, *lastadp;
10394 struct ufs1_dinode *dp;
10395 struct ufs1_dinode *sip;
10396 struct inoref *inoref;
10397 struct ufsmount *ump;
10398 struct fs *fs;
10399 ufs_lbn_t i;
10400 #ifdef INVARIANTS
10401 ufs_lbn_t prevlbn = 0;
10402 #endif
10403 int deplist __diagused;
10404
10405 if (inodedep->id_state & IOSTARTED)
10406 panic("initiate_write_inodeblock_ufs1: already started");
10407 inodedep->id_state |= IOSTARTED;
10408 fs = inodedep->id_fs;
10409 ump = VFSTOUFS(inodedep->id_list.wk_mp);
10410 LOCK_OWNED(ump);
10411 dp = (struct ufs1_dinode *)bp->b_data +
10412 ino_to_fsbo(fs, inodedep->id_ino);
10413
10414 /*
10415 * If we're on the unlinked list but have not yet written our
10416 * next pointer initialize it here.
10417 */
10418 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10419 struct inodedep *inon;
10420
10421 inon = TAILQ_NEXT(inodedep, id_unlinked);
10422 dp->di_freelink = inon ? inon->id_ino : 0;
10423 }
10424 /*
10425 * If the bitmap is not yet written, then the allocated
10426 * inode cannot be written to disk.
10427 */
10428 if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10429 if (inodedep->id_savedino1 != NULL)
10430 panic("initiate_write_inodeblock_ufs1: I/O underway");
10431 FREE_LOCK(ump);
10432 sip = malloc(sizeof(struct ufs1_dinode),
10433 M_SAVEDINO, M_SOFTDEP_FLAGS);
10434 ACQUIRE_LOCK(ump);
10435 inodedep->id_savedino1 = sip;
10436 *inodedep->id_savedino1 = *dp;
10437 bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
10438 dp->di_gen = inodedep->id_savedino1->di_gen;
10439 dp->di_freelink = inodedep->id_savedino1->di_freelink;
10440 return;
10441 }
10442 /*
10443 * If no dependencies, then there is nothing to roll back.
10444 */
10445 inodedep->id_savedsize = dp->di_size;
10446 inodedep->id_savedextsize = 0;
10447 inodedep->id_savednlink = dp->di_nlink;
10448 if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10449 TAILQ_EMPTY(&inodedep->id_inoreflst))
10450 return;
10451 /*
10452 * Revert the link count to that of the first unwritten journal entry.
10453 */
10454 inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10455 if (inoref)
10456 dp->di_nlink = inoref->if_nlink;
10457 /*
10458 * Set the dependencies to busy.
10459 */
10460 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10461 adp = TAILQ_NEXT(adp, ad_next)) {
10462 #ifdef INVARIANTS
10463 if (deplist != 0 && prevlbn >= adp->ad_offset)
10464 panic("softdep_write_inodeblock: lbn order");
10465 prevlbn = adp->ad_offset;
10466 if (adp->ad_offset < UFS_NDADDR &&
10467 dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10468 panic("initiate_write_inodeblock_ufs1: "
10469 "direct pointer #%jd mismatch %d != %jd",
10470 (intmax_t)adp->ad_offset,
10471 dp->di_db[adp->ad_offset],
10472 (intmax_t)adp->ad_newblkno);
10473 if (adp->ad_offset >= UFS_NDADDR &&
10474 dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno)
10475 panic("initiate_write_inodeblock_ufs1: "
10476 "indirect pointer #%jd mismatch %d != %jd",
10477 (intmax_t)adp->ad_offset - UFS_NDADDR,
10478 dp->di_ib[adp->ad_offset - UFS_NDADDR],
10479 (intmax_t)adp->ad_newblkno);
10480 deplist |= 1 << adp->ad_offset;
10481 if ((adp->ad_state & ATTACHED) == 0)
10482 panic("initiate_write_inodeblock_ufs1: "
10483 "Unknown state 0x%x", adp->ad_state);
10484 #endif /* INVARIANTS */
10485 adp->ad_state &= ~ATTACHED;
10486 adp->ad_state |= UNDONE;
10487 }
10488 /*
10489 * The on-disk inode cannot claim to be any larger than the last
10490 * fragment that has been written. Otherwise, the on-disk inode
10491 * might have fragments that were not the last block in the file
10492 * which would corrupt the filesystem.
10493 */
10494 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10495 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10496 if (adp->ad_offset >= UFS_NDADDR)
10497 break;
10498 dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10499 /* keep going until hitting a rollback to a frag */
10500 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10501 continue;
10502 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10503 for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) {
10504 #ifdef INVARIANTS
10505 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10506 panic("initiate_write_inodeblock_ufs1: "
10507 "lost dep1");
10508 #endif /* INVARIANTS */
10509 dp->di_db[i] = 0;
10510 }
10511 for (i = 0; i < UFS_NIADDR; i++) {
10512 #ifdef INVARIANTS
10513 if (dp->di_ib[i] != 0 &&
10514 (deplist & ((1 << UFS_NDADDR) << i)) == 0)
10515 panic("initiate_write_inodeblock_ufs1: "
10516 "lost dep2");
10517 #endif /* INVARIANTS */
10518 dp->di_ib[i] = 0;
10519 }
10520 return;
10521 }
10522 /*
10523 * If we have zero'ed out the last allocated block of the file,
10524 * roll back the size to the last currently allocated block.
10525 * We know that this last allocated block is a full-sized as
10526 * we already checked for fragments in the loop above.
10527 */
10528 if (lastadp != NULL &&
10529 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10530 for (i = lastadp->ad_offset; i >= 0; i--)
10531 if (dp->di_db[i] != 0)
10532 break;
10533 dp->di_size = (i + 1) * fs->fs_bsize;
10534 }
10535 /*
10536 * The only dependencies are for indirect blocks.
10537 *
10538 * The file size for indirect block additions is not guaranteed.
10539 * Such a guarantee would be non-trivial to achieve. The conventional
10540 * synchronous write implementation also does not make this guarantee.
10541 * Fsck should catch and fix discrepancies. Arguably, the file size
10542 * can be over-estimated without destroying integrity when the file
10543 * moves into the indirect blocks (i.e., is large). If we want to
10544 * postpone fsck, we are stuck with this argument.
10545 */
10546 for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10547 dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0;
10548 }
10549
10550 /*
10551 * Version of initiate_write_inodeblock that handles UFS2 dinodes.
10552 * Note that any bug fixes made to this routine must be done in the
10553 * version found above.
10554 *
10555 * Called from within the procedure above to deal with unsatisfied
10556 * allocation dependencies in an inodeblock. The buffer must be
10557 * locked, thus, no I/O completion operations can occur while we
10558 * are manipulating its associated dependencies.
10559 */
10560 static void
10561 initiate_write_inodeblock_ufs2(
10562 struct inodedep *inodedep,
10563 struct buf *bp) /* The inode block */
10564 {
10565 struct allocdirect *adp, *lastadp;
10566 struct ufs2_dinode *dp;
10567 struct ufs2_dinode *sip;
10568 struct inoref *inoref;
10569 struct ufsmount *ump;
10570 struct fs *fs;
10571 ufs_lbn_t i;
10572 #ifdef INVARIANTS
10573 ufs_lbn_t prevlbn = 0;
10574 #endif
10575 int deplist __diagused;
10576
10577 if (inodedep->id_state & IOSTARTED)
10578 panic("initiate_write_inodeblock_ufs2: already started");
10579 inodedep->id_state |= IOSTARTED;
10580 fs = inodedep->id_fs;
10581 ump = VFSTOUFS(inodedep->id_list.wk_mp);
10582 LOCK_OWNED(ump);
10583 dp = (struct ufs2_dinode *)bp->b_data +
10584 ino_to_fsbo(fs, inodedep->id_ino);
10585
10586 /*
10587 * If we're on the unlinked list but have not yet written our
10588 * next pointer initialize it here.
10589 */
10590 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10591 struct inodedep *inon;
10592
10593 inon = TAILQ_NEXT(inodedep, id_unlinked);
10594 dp->di_freelink = inon ? inon->id_ino : 0;
10595 ffs_update_dinode_ckhash(fs, dp);
10596 }
10597 /*
10598 * If the bitmap is not yet written, then the allocated
10599 * inode cannot be written to disk.
10600 */
10601 if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10602 if (inodedep->id_savedino2 != NULL)
10603 panic("initiate_write_inodeblock_ufs2: I/O underway");
10604 FREE_LOCK(ump);
10605 sip = malloc(sizeof(struct ufs2_dinode),
10606 M_SAVEDINO, M_SOFTDEP_FLAGS);
10607 ACQUIRE_LOCK(ump);
10608 inodedep->id_savedino2 = sip;
10609 *inodedep->id_savedino2 = *dp;
10610 bzero((caddr_t)dp, sizeof(struct ufs2_dinode));
10611 dp->di_gen = inodedep->id_savedino2->di_gen;
10612 dp->di_freelink = inodedep->id_savedino2->di_freelink;
10613 return;
10614 }
10615 /*
10616 * If no dependencies, then there is nothing to roll back.
10617 */
10618 inodedep->id_savedsize = dp->di_size;
10619 inodedep->id_savedextsize = dp->di_extsize;
10620 inodedep->id_savednlink = dp->di_nlink;
10621 if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10622 TAILQ_EMPTY(&inodedep->id_extupdt) &&
10623 TAILQ_EMPTY(&inodedep->id_inoreflst))
10624 return;
10625 /*
10626 * Revert the link count to that of the first unwritten journal entry.
10627 */
10628 inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10629 if (inoref)
10630 dp->di_nlink = inoref->if_nlink;
10631
10632 /*
10633 * Set the ext data dependencies to busy.
10634 */
10635 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10636 adp = TAILQ_NEXT(adp, ad_next)) {
10637 #ifdef INVARIANTS
10638 if (deplist != 0 && prevlbn >= adp->ad_offset)
10639 panic("initiate_write_inodeblock_ufs2: lbn order");
10640 prevlbn = adp->ad_offset;
10641 if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno)
10642 panic("initiate_write_inodeblock_ufs2: "
10643 "ext pointer #%jd mismatch %jd != %jd",
10644 (intmax_t)adp->ad_offset,
10645 (intmax_t)dp->di_extb[adp->ad_offset],
10646 (intmax_t)adp->ad_newblkno);
10647 deplist |= 1 << adp->ad_offset;
10648 if ((adp->ad_state & ATTACHED) == 0)
10649 panic("initiate_write_inodeblock_ufs2: Unknown "
10650 "state 0x%x", adp->ad_state);
10651 #endif /* INVARIANTS */
10652 adp->ad_state &= ~ATTACHED;
10653 adp->ad_state |= UNDONE;
10654 }
10655 /*
10656 * The on-disk inode cannot claim to be any larger than the last
10657 * fragment that has been written. Otherwise, the on-disk inode
10658 * might have fragments that were not the last block in the ext
10659 * data which would corrupt the filesystem.
10660 */
10661 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10662 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10663 dp->di_extb[adp->ad_offset] = adp->ad_oldblkno;
10664 /* keep going until hitting a rollback to a frag */
10665 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10666 continue;
10667 dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10668 for (i = adp->ad_offset + 1; i < UFS_NXADDR; i++) {
10669 #ifdef INVARIANTS
10670 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0)
10671 panic("initiate_write_inodeblock_ufs2: "
10672 "lost dep1");
10673 #endif /* INVARIANTS */
10674 dp->di_extb[i] = 0;
10675 }
10676 lastadp = NULL;
10677 break;
10678 }
10679 /*
10680 * If we have zero'ed out the last allocated block of the ext
10681 * data, roll back the size to the last currently allocated block.
10682 * We know that this last allocated block is a full-sized as
10683 * we already checked for fragments in the loop above.
10684 */
10685 if (lastadp != NULL &&
10686 dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10687 for (i = lastadp->ad_offset; i >= 0; i--)
10688 if (dp->di_extb[i] != 0)
10689 break;
10690 dp->di_extsize = (i + 1) * fs->fs_bsize;
10691 }
10692 /*
10693 * Set the file data dependencies to busy.
10694 */
10695 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10696 adp = TAILQ_NEXT(adp, ad_next)) {
10697 #ifdef INVARIANTS
10698 if (deplist != 0 && prevlbn >= adp->ad_offset)
10699 panic("softdep_write_inodeblock: lbn order");
10700 if ((adp->ad_state & ATTACHED) == 0)
10701 panic("inodedep %p and adp %p not attached", inodedep, adp);
10702 prevlbn = adp->ad_offset;
10703 if (!ffs_fsfail_cleanup(ump, 0) &&
10704 adp->ad_offset < UFS_NDADDR &&
10705 dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10706 panic("initiate_write_inodeblock_ufs2: "
10707 "direct pointer #%jd mismatch %jd != %jd",
10708 (intmax_t)adp->ad_offset,
10709 (intmax_t)dp->di_db[adp->ad_offset],
10710 (intmax_t)adp->ad_newblkno);
10711 if (!ffs_fsfail_cleanup(ump, 0) &&
10712 adp->ad_offset >= UFS_NDADDR &&
10713 dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno)
10714 panic("initiate_write_inodeblock_ufs2: "
10715 "indirect pointer #%jd mismatch %jd != %jd",
10716 (intmax_t)adp->ad_offset - UFS_NDADDR,
10717 (intmax_t)dp->di_ib[adp->ad_offset - UFS_NDADDR],
10718 (intmax_t)adp->ad_newblkno);
10719 deplist |= 1 << adp->ad_offset;
10720 if ((adp->ad_state & ATTACHED) == 0)
10721 panic("initiate_write_inodeblock_ufs2: Unknown "
10722 "state 0x%x", adp->ad_state);
10723 #endif /* INVARIANTS */
10724 adp->ad_state &= ~ATTACHED;
10725 adp->ad_state |= UNDONE;
10726 }
10727 /*
10728 * The on-disk inode cannot claim to be any larger than the last
10729 * fragment that has been written. Otherwise, the on-disk inode
10730 * might have fragments that were not the last block in the file
10731 * which would corrupt the filesystem.
10732 */
10733 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10734 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10735 if (adp->ad_offset >= UFS_NDADDR)
10736 break;
10737 dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10738 /* keep going until hitting a rollback to a frag */
10739 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10740 continue;
10741 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10742 for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) {
10743 #ifdef INVARIANTS
10744 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10745 panic("initiate_write_inodeblock_ufs2: "
10746 "lost dep2");
10747 #endif /* INVARIANTS */
10748 dp->di_db[i] = 0;
10749 }
10750 for (i = 0; i < UFS_NIADDR; i++) {
10751 #ifdef INVARIANTS
10752 if (dp->di_ib[i] != 0 &&
10753 (deplist & ((1 << UFS_NDADDR) << i)) == 0)
10754 panic("initiate_write_inodeblock_ufs2: "
10755 "lost dep3");
10756 #endif /* INVARIANTS */
10757 dp->di_ib[i] = 0;
10758 }
10759 ffs_update_dinode_ckhash(fs, dp);
10760 return;
10761 }
10762 /*
10763 * If we have zero'ed out the last allocated block of the file,
10764 * roll back the size to the last currently allocated block.
10765 * We know that this last allocated block is a full-sized as
10766 * we already checked for fragments in the loop above.
10767 */
10768 if (lastadp != NULL &&
10769 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10770 for (i = lastadp->ad_offset; i >= 0; i--)
10771 if (dp->di_db[i] != 0)
10772 break;
10773 dp->di_size = (i + 1) * fs->fs_bsize;
10774 }
10775 /*
10776 * The only dependencies are for indirect blocks.
10777 *
10778 * The file size for indirect block additions is not guaranteed.
10779 * Such a guarantee would be non-trivial to achieve. The conventional
10780 * synchronous write implementation also does not make this guarantee.
10781 * Fsck should catch and fix discrepancies. Arguably, the file size
10782 * can be over-estimated without destroying integrity when the file
10783 * moves into the indirect blocks (i.e., is large). If we want to
10784 * postpone fsck, we are stuck with this argument.
10785 */
10786 for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10787 dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0;
10788 ffs_update_dinode_ckhash(fs, dp);
10789 }
10790
10791 /*
10792 * Cancel an indirdep as a result of truncation. Release all of the
10793 * children allocindirs and place their journal work on the appropriate
10794 * list.
10795 */
10796 static void
10797 cancel_indirdep(
10798 struct indirdep *indirdep,
10799 struct buf *bp,
10800 struct freeblks *freeblks)
10801 {
10802 struct allocindir *aip;
10803
10804 /*
10805 * None of the indirect pointers will ever be visible,
10806 * so they can simply be tossed. GOINGAWAY ensures
10807 * that allocated pointers will be saved in the buffer
10808 * cache until they are freed. Note that they will
10809 * only be able to be found by their physical address
10810 * since the inode mapping the logical address will
10811 * be gone. The save buffer used for the safe copy
10812 * was allocated in setup_allocindir_phase2 using
10813 * the physical address so it could be used for this
10814 * purpose. Hence we swap the safe copy with the real
10815 * copy, allowing the safe copy to be freed and holding
10816 * on to the real copy for later use in indir_trunc.
10817 */
10818 if (indirdep->ir_state & GOINGAWAY)
10819 panic("cancel_indirdep: already gone");
10820 if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
10821 indirdep->ir_state |= DEPCOMPLETE;
10822 LIST_REMOVE(indirdep, ir_next);
10823 }
10824 indirdep->ir_state |= GOINGAWAY;
10825 /*
10826 * Pass in bp for blocks still have journal writes
10827 * pending so we can cancel them on their own.
10828 */
10829 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != NULL)
10830 cancel_allocindir(aip, bp, freeblks, 0);
10831 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL)
10832 cancel_allocindir(aip, NULL, freeblks, 0);
10833 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL)
10834 cancel_allocindir(aip, NULL, freeblks, 0);
10835 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL)
10836 cancel_allocindir(aip, NULL, freeblks, 0);
10837 /*
10838 * If there are pending partial truncations we need to keep the
10839 * old block copy around until they complete. This is because
10840 * the current b_data is not a perfect superset of the available
10841 * blocks.
10842 */
10843 if (TAILQ_EMPTY(&indirdep->ir_trunc))
10844 bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount);
10845 else
10846 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10847 WORKLIST_REMOVE(&indirdep->ir_list);
10848 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list);
10849 indirdep->ir_bp = NULL;
10850 indirdep->ir_freeblks = freeblks;
10851 }
10852
10853 /*
10854 * Free an indirdep once it no longer has new pointers to track.
10855 */
10856 static void
10857 free_indirdep(struct indirdep *indirdep)
10858 {
10859
10860 KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc),
10861 ("free_indirdep: Indir trunc list not empty."));
10862 KASSERT(LIST_EMPTY(&indirdep->ir_completehd),
10863 ("free_indirdep: Complete head not empty."));
10864 KASSERT(LIST_EMPTY(&indirdep->ir_writehd),
10865 ("free_indirdep: write head not empty."));
10866 KASSERT(LIST_EMPTY(&indirdep->ir_donehd),
10867 ("free_indirdep: done head not empty."));
10868 KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd),
10869 ("free_indirdep: deplist head not empty."));
10870 KASSERT((indirdep->ir_state & DEPCOMPLETE),
10871 ("free_indirdep: %p still on newblk list.", indirdep));
10872 KASSERT(indirdep->ir_saveddata == NULL,
10873 ("free_indirdep: %p still has saved data.", indirdep));
10874 KASSERT(indirdep->ir_savebp == NULL,
10875 ("free_indirdep: %p still has savebp buffer.", indirdep));
10876 if (indirdep->ir_state & ONWORKLIST)
10877 WORKLIST_REMOVE(&indirdep->ir_list);
10878 WORKITEM_FREE(indirdep, D_INDIRDEP);
10879 }
10880
10881 /*
10882 * Called before a write to an indirdep. This routine is responsible for
10883 * rolling back pointers to a safe state which includes only those
10884 * allocindirs which have been completed.
10885 */
10886 static void
10887 initiate_write_indirdep(struct indirdep *indirdep, struct buf *bp)
10888 {
10889 struct ufsmount *ump;
10890
10891 indirdep->ir_state |= IOSTARTED;
10892 if (indirdep->ir_state & GOINGAWAY)
10893 panic("disk_io_initiation: indirdep gone");
10894 /*
10895 * If there are no remaining dependencies, this will be writing
10896 * the real pointers.
10897 */
10898 if (LIST_EMPTY(&indirdep->ir_deplisthd) &&
10899 TAILQ_EMPTY(&indirdep->ir_trunc))
10900 return;
10901 /*
10902 * Replace up-to-date version with safe version.
10903 */
10904 if (indirdep->ir_saveddata == NULL) {
10905 ump = VFSTOUFS(indirdep->ir_list.wk_mp);
10906 LOCK_OWNED(ump);
10907 FREE_LOCK(ump);
10908 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
10909 M_SOFTDEP_FLAGS);
10910 ACQUIRE_LOCK(ump);
10911 }
10912 indirdep->ir_state &= ~ATTACHED;
10913 indirdep->ir_state |= UNDONE;
10914 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10915 bcopy(indirdep->ir_savebp->b_data, bp->b_data,
10916 bp->b_bcount);
10917 }
10918
10919 /*
10920 * Called when an inode has been cleared in a cg bitmap. This finally
10921 * eliminates any canceled jaddrefs
10922 */
10923 void
10924 softdep_setup_inofree(struct mount *mp,
10925 struct buf *bp,
10926 ino_t ino,
10927 struct workhead *wkhd)
10928 {
10929 struct worklist *wk, *wkn;
10930 struct inodedep *inodedep;
10931 struct ufsmount *ump;
10932 uint8_t *inosused;
10933 struct cg *cgp;
10934 struct fs *fs;
10935
10936 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
10937 ("softdep_setup_inofree called on non-softdep filesystem"));
10938 ump = VFSTOUFS(mp);
10939 ACQUIRE_LOCK(ump);
10940 if (!ffs_fsfail_cleanup(ump, 0)) {
10941 fs = ump->um_fs;
10942 cgp = (struct cg *)bp->b_data;
10943 inosused = cg_inosused(cgp);
10944 if (isset(inosused, ino % fs->fs_ipg))
10945 panic("softdep_setup_inofree: inode %ju not freed.",
10946 (uintmax_t)ino);
10947 }
10948 if (inodedep_lookup(mp, ino, 0, &inodedep))
10949 panic("softdep_setup_inofree: ino %ju has existing inodedep %p",
10950 (uintmax_t)ino, inodedep);
10951 if (wkhd) {
10952 LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) {
10953 if (wk->wk_type != D_JADDREF)
10954 continue;
10955 WORKLIST_REMOVE(wk);
10956 /*
10957 * We can free immediately even if the jaddref
10958 * isn't attached in a background write as now
10959 * the bitmaps are reconciled.
10960 */
10961 wk->wk_state |= COMPLETE | ATTACHED;
10962 free_jaddref(WK_JADDREF(wk));
10963 }
10964 jwork_move(&bp->b_dep, wkhd);
10965 }
10966 FREE_LOCK(ump);
10967 }
10968
10969 /*
10970 * Called via ffs_blkfree() after a set of frags has been cleared from a cg
10971 * map. Any dependencies waiting for the write to clear are added to the
10972 * buf's list and any jnewblks that are being canceled are discarded
10973 * immediately.
10974 */
10975 void
10976 softdep_setup_blkfree(
10977 struct mount *mp,
10978 struct buf *bp,
10979 ufs2_daddr_t blkno,
10980 int frags,
10981 struct workhead *wkhd)
10982 {
10983 struct bmsafemap *bmsafemap;
10984 struct jnewblk *jnewblk;
10985 struct ufsmount *ump;
10986 struct worklist *wk;
10987 struct fs *fs;
10988 #ifdef INVARIANTS
10989 uint8_t *blksfree;
10990 struct cg *cgp;
10991 ufs2_daddr_t jstart;
10992 ufs2_daddr_t jend;
10993 ufs2_daddr_t end;
10994 long bno;
10995 int i;
10996 #endif
10997
10998 CTR3(KTR_SUJ,
10999 "softdep_setup_blkfree: blkno %jd frags %d wk head %p",
11000 blkno, frags, wkhd);
11001
11002 ump = VFSTOUFS(mp);
11003 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
11004 ("softdep_setup_blkfree called on non-softdep filesystem"));
11005 ACQUIRE_LOCK(ump);
11006 /* Lookup the bmsafemap so we track when it is dirty. */
11007 fs = ump->um_fs;
11008 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
11009 /*
11010 * Detach any jnewblks which have been canceled. They must linger
11011 * until the bitmap is cleared again by ffs_blkfree() to prevent
11012 * an unjournaled allocation from hitting the disk.
11013 */
11014 if (wkhd) {
11015 while ((wk = LIST_FIRST(wkhd)) != NULL) {
11016 CTR2(KTR_SUJ,
11017 "softdep_setup_blkfree: blkno %jd wk type %d",
11018 blkno, wk->wk_type);
11019 WORKLIST_REMOVE(wk);
11020 if (wk->wk_type != D_JNEWBLK) {
11021 WORKLIST_INSERT(&bmsafemap->sm_freehd, wk);
11022 continue;
11023 }
11024 jnewblk = WK_JNEWBLK(wk);
11025 KASSERT(jnewblk->jn_state & GOINGAWAY,
11026 ("softdep_setup_blkfree: jnewblk not canceled."));
11027 #ifdef INVARIANTS
11028 /*
11029 * Assert that this block is free in the bitmap
11030 * before we discard the jnewblk.
11031 */
11032 cgp = (struct cg *)bp->b_data;
11033 blksfree = cg_blksfree(cgp);
11034 bno = dtogd(fs, jnewblk->jn_blkno);
11035 for (i = jnewblk->jn_oldfrags;
11036 i < jnewblk->jn_frags; i++) {
11037 if (isset(blksfree, bno + i))
11038 continue;
11039 panic("softdep_setup_blkfree: not free");
11040 }
11041 #endif
11042 /*
11043 * Even if it's not attached we can free immediately
11044 * as the new bitmap is correct.
11045 */
11046 wk->wk_state |= COMPLETE | ATTACHED;
11047 free_jnewblk(jnewblk);
11048 }
11049 }
11050
11051 #ifdef INVARIANTS
11052 /*
11053 * Assert that we are not freeing a block which has an outstanding
11054 * allocation dependency.
11055 */
11056 fs = VFSTOUFS(mp)->um_fs;
11057 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
11058 end = blkno + frags;
11059 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
11060 /*
11061 * Don't match against blocks that will be freed when the
11062 * background write is done.
11063 */
11064 if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) ==
11065 (COMPLETE | DEPCOMPLETE))
11066 continue;
11067 jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags;
11068 jend = jnewblk->jn_blkno + jnewblk->jn_frags;
11069 if ((blkno >= jstart && blkno < jend) ||
11070 (end > jstart && end <= jend)) {
11071 printf("state 0x%X %jd - %d %d dep %p\n",
11072 jnewblk->jn_state, jnewblk->jn_blkno,
11073 jnewblk->jn_oldfrags, jnewblk->jn_frags,
11074 jnewblk->jn_dep);
11075 panic("softdep_setup_blkfree: "
11076 "%jd-%jd(%d) overlaps with %jd-%jd",
11077 blkno, end, frags, jstart, jend);
11078 }
11079 }
11080 #endif
11081 FREE_LOCK(ump);
11082 }
11083
11084 /*
11085 * Revert a block allocation when the journal record that describes it
11086 * is not yet written.
11087 */
11088 static int
11089 jnewblk_rollback(
11090 struct jnewblk *jnewblk,
11091 struct fs *fs,
11092 struct cg *cgp,
11093 uint8_t *blksfree)
11094 {
11095 ufs1_daddr_t fragno;
11096 long cgbno, bbase;
11097 int frags, blk;
11098 int i;
11099
11100 frags = 0;
11101 cgbno = dtogd(fs, jnewblk->jn_blkno);
11102 /*
11103 * We have to test which frags need to be rolled back. We may
11104 * be operating on a stale copy when doing background writes.
11105 */
11106 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++)
11107 if (isclr(blksfree, cgbno + i))
11108 frags++;
11109 if (frags == 0)
11110 return (0);
11111 /*
11112 * This is mostly ffs_blkfree() sans some validation and
11113 * superblock updates.
11114 */
11115 if (frags == fs->fs_frag) {
11116 fragno = fragstoblks(fs, cgbno);
11117 ffs_setblock(fs, blksfree, fragno);
11118 ffs_clusteracct(fs, cgp, fragno, 1);
11119 cgp->cg_cs.cs_nbfree++;
11120 } else {
11121 cgbno += jnewblk->jn_oldfrags;
11122 bbase = cgbno - fragnum(fs, cgbno);
11123 /* Decrement the old frags. */
11124 blk = blkmap(fs, blksfree, bbase);
11125 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
11126 /* Deallocate the fragment */
11127 for (i = 0; i < frags; i++)
11128 setbit(blksfree, cgbno + i);
11129 cgp->cg_cs.cs_nffree += frags;
11130 /* Add back in counts associated with the new frags */
11131 blk = blkmap(fs, blksfree, bbase);
11132 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
11133 /* If a complete block has been reassembled, account for it. */
11134 fragno = fragstoblks(fs, bbase);
11135 if (ffs_isblock(fs, blksfree, fragno)) {
11136 cgp->cg_cs.cs_nffree -= fs->fs_frag;
11137 ffs_clusteracct(fs, cgp, fragno, 1);
11138 cgp->cg_cs.cs_nbfree++;
11139 }
11140 }
11141 stat_jnewblk++;
11142 jnewblk->jn_state &= ~ATTACHED;
11143 jnewblk->jn_state |= UNDONE;
11144
11145 return (frags);
11146 }
11147
11148 static void
11149 initiate_write_bmsafemap(
11150 struct bmsafemap *bmsafemap,
11151 struct buf *bp) /* The cg block. */
11152 {
11153 struct jaddref *jaddref;
11154 struct jnewblk *jnewblk;
11155 uint8_t *inosused;
11156 uint8_t *blksfree;
11157 struct cg *cgp;
11158 struct fs *fs;
11159 ino_t ino;
11160
11161 /*
11162 * If this is a background write, we did this at the time that
11163 * the copy was made, so do not need to do it again.
11164 */
11165 if (bmsafemap->sm_state & IOSTARTED)
11166 return;
11167 bmsafemap->sm_state |= IOSTARTED;
11168 /*
11169 * Clear any inode allocations which are pending journal writes.
11170 */
11171 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) {
11172 cgp = (struct cg *)bp->b_data;
11173 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11174 inosused = cg_inosused(cgp);
11175 LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) {
11176 ino = jaddref->ja_ino % fs->fs_ipg;
11177 if (isset(inosused, ino)) {
11178 if ((jaddref->ja_mode & IFMT) == IFDIR)
11179 cgp->cg_cs.cs_ndir--;
11180 cgp->cg_cs.cs_nifree++;
11181 clrbit(inosused, ino);
11182 jaddref->ja_state &= ~ATTACHED;
11183 jaddref->ja_state |= UNDONE;
11184 stat_jaddref++;
11185 } else
11186 panic("initiate_write_bmsafemap: inode %ju "
11187 "marked free", (uintmax_t)jaddref->ja_ino);
11188 }
11189 }
11190 /*
11191 * Clear any block allocations which are pending journal writes.
11192 */
11193 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
11194 cgp = (struct cg *)bp->b_data;
11195 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11196 blksfree = cg_blksfree(cgp);
11197 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
11198 if (jnewblk_rollback(jnewblk, fs, cgp, blksfree))
11199 continue;
11200 panic("initiate_write_bmsafemap: block %jd "
11201 "marked free", jnewblk->jn_blkno);
11202 }
11203 }
11204 /*
11205 * Move allocation lists to the written lists so they can be
11206 * cleared once the block write is complete.
11207 */
11208 LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr,
11209 inodedep, id_deps);
11210 LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
11211 newblk, nb_deps);
11212 LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist,
11213 wk_list);
11214 }
11215
11216 void
11217 softdep_handle_error(struct buf *bp)
11218 {
11219 struct ufsmount *ump;
11220
11221 ump = softdep_bp_to_mp(bp);
11222 if (ump == NULL)
11223 return;
11224
11225 if (ffs_fsfail_cleanup(ump, bp->b_error)) {
11226 /*
11227 * No future writes will succeed, so the on-disk image is safe.
11228 * Pretend that this write succeeded so that the softdep state
11229 * will be cleaned up naturally.
11230 */
11231 bp->b_ioflags &= ~BIO_ERROR;
11232 bp->b_error = 0;
11233 }
11234 }
11235
11236 /*
11237 * This routine is called during the completion interrupt
11238 * service routine for a disk write (from the procedure called
11239 * by the device driver to inform the filesystem caches of
11240 * a request completion). It should be called early in this
11241 * procedure, before the block is made available to other
11242 * processes or other routines are called.
11243 *
11244 */
11245 static void
11246 softdep_disk_write_complete(
11247 struct buf *bp) /* describes the completed disk write */
11248 {
11249 struct worklist *wk;
11250 struct worklist *owk;
11251 struct ufsmount *ump;
11252 struct workhead reattach;
11253 struct freeblks *freeblks;
11254 struct buf *sbp;
11255
11256 ump = softdep_bp_to_mp(bp);
11257 KASSERT(LIST_EMPTY(&bp->b_dep) || ump != NULL,
11258 ("softdep_disk_write_complete: softdep_bp_to_mp returned NULL "
11259 "with outstanding dependencies for buffer %p", bp));
11260 if (ump == NULL)
11261 return;
11262 if ((bp->b_ioflags & BIO_ERROR) != 0)
11263 softdep_handle_error(bp);
11264 /*
11265 * If an error occurred while doing the write, then the data
11266 * has not hit the disk and the dependencies cannot be processed.
11267 * But we do have to go through and roll forward any dependencies
11268 * that were rolled back before the disk write.
11269 */
11270 sbp = NULL;
11271 ACQUIRE_LOCK(ump);
11272 if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0) {
11273 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
11274 switch (wk->wk_type) {
11275 case D_PAGEDEP:
11276 handle_written_filepage(WK_PAGEDEP(wk), bp, 0);
11277 continue;
11278
11279 case D_INODEDEP:
11280 handle_written_inodeblock(WK_INODEDEP(wk),
11281 bp, 0);
11282 continue;
11283
11284 case D_BMSAFEMAP:
11285 handle_written_bmsafemap(WK_BMSAFEMAP(wk),
11286 bp, 0);
11287 continue;
11288
11289 case D_INDIRDEP:
11290 handle_written_indirdep(WK_INDIRDEP(wk),
11291 bp, &sbp, 0);
11292 continue;
11293 default:
11294 /* nothing to roll forward */
11295 continue;
11296 }
11297 }
11298 FREE_LOCK(ump);
11299 if (sbp)
11300 brelse(sbp);
11301 return;
11302 }
11303 LIST_INIT(&reattach);
11304
11305 /*
11306 * Ump SU lock must not be released anywhere in this code segment.
11307 */
11308 owk = NULL;
11309 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
11310 WORKLIST_REMOVE(wk);
11311 atomic_add_long(&dep_write[wk->wk_type], 1);
11312 if (wk == owk)
11313 panic("duplicate worklist: %p\n", wk);
11314 owk = wk;
11315 switch (wk->wk_type) {
11316 case D_PAGEDEP:
11317 if (handle_written_filepage(WK_PAGEDEP(wk), bp,
11318 WRITESUCCEEDED))
11319 WORKLIST_INSERT(&reattach, wk);
11320 continue;
11321
11322 case D_INODEDEP:
11323 if (handle_written_inodeblock(WK_INODEDEP(wk), bp,
11324 WRITESUCCEEDED))
11325 WORKLIST_INSERT(&reattach, wk);
11326 continue;
11327
11328 case D_BMSAFEMAP:
11329 if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp,
11330 WRITESUCCEEDED))
11331 WORKLIST_INSERT(&reattach, wk);
11332 continue;
11333
11334 case D_MKDIR:
11335 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
11336 continue;
11337
11338 case D_ALLOCDIRECT:
11339 wk->wk_state |= COMPLETE;
11340 handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL);
11341 continue;
11342
11343 case D_ALLOCINDIR:
11344 wk->wk_state |= COMPLETE;
11345 handle_allocindir_partdone(WK_ALLOCINDIR(wk));
11346 continue;
11347
11348 case D_INDIRDEP:
11349 if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp,
11350 WRITESUCCEEDED))
11351 WORKLIST_INSERT(&reattach, wk);
11352 continue;
11353
11354 case D_FREEBLKS:
11355 wk->wk_state |= COMPLETE;
11356 freeblks = WK_FREEBLKS(wk);
11357 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE &&
11358 LIST_EMPTY(&freeblks->fb_jblkdephd))
11359 add_to_worklist(wk, WK_NODELAY);
11360 continue;
11361
11362 case D_FREEWORK:
11363 handle_written_freework(WK_FREEWORK(wk));
11364 break;
11365
11366 case D_JSEGDEP:
11367 free_jsegdep(WK_JSEGDEP(wk));
11368 continue;
11369
11370 case D_JSEG:
11371 handle_written_jseg(WK_JSEG(wk), bp);
11372 continue;
11373
11374 case D_SBDEP:
11375 if (handle_written_sbdep(WK_SBDEP(wk), bp))
11376 WORKLIST_INSERT(&reattach, wk);
11377 continue;
11378
11379 case D_FREEDEP:
11380 free_freedep(WK_FREEDEP(wk));
11381 continue;
11382
11383 default:
11384 panic("handle_disk_write_complete: Unknown type %s",
11385 TYPENAME(wk->wk_type));
11386 /* NOTREACHED */
11387 }
11388 }
11389 /*
11390 * Reattach any requests that must be redone.
11391 */
11392 while ((wk = LIST_FIRST(&reattach)) != NULL) {
11393 WORKLIST_REMOVE(wk);
11394 WORKLIST_INSERT(&bp->b_dep, wk);
11395 }
11396 FREE_LOCK(ump);
11397 if (sbp)
11398 brelse(sbp);
11399 }
11400
11401 /*
11402 * Called from within softdep_disk_write_complete above.
11403 */
11404 static void
11405 handle_allocdirect_partdone(
11406 struct allocdirect *adp, /* the completed allocdirect */
11407 struct workhead *wkhd) /* Work to do when inode is writtne. */
11408 {
11409 struct allocdirectlst *listhead;
11410 struct allocdirect *listadp;
11411 struct inodedep *inodedep;
11412 long bsize;
11413
11414 LOCK_OWNED(VFSTOUFS(adp->ad_block.nb_list.wk_mp));
11415 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11416 return;
11417 /*
11418 * The on-disk inode cannot claim to be any larger than the last
11419 * fragment that has been written. Otherwise, the on-disk inode
11420 * might have fragments that were not the last block in the file
11421 * which would corrupt the filesystem. Thus, we cannot free any
11422 * allocdirects after one whose ad_oldblkno claims a fragment as
11423 * these blocks must be rolled back to zero before writing the inode.
11424 * We check the currently active set of allocdirects in id_inoupdt
11425 * or id_extupdt as appropriate.
11426 */
11427 inodedep = adp->ad_inodedep;
11428 bsize = inodedep->id_fs->fs_bsize;
11429 if (adp->ad_state & EXTDATA)
11430 listhead = &inodedep->id_extupdt;
11431 else
11432 listhead = &inodedep->id_inoupdt;
11433 TAILQ_FOREACH(listadp, listhead, ad_next) {
11434 /* found our block */
11435 if (listadp == adp)
11436 break;
11437 /* continue if ad_oldlbn is not a fragment */
11438 if (listadp->ad_oldsize == 0 ||
11439 listadp->ad_oldsize == bsize)
11440 continue;
11441 /* hit a fragment */
11442 return;
11443 }
11444 /*
11445 * If we have reached the end of the current list without
11446 * finding the just finished dependency, then it must be
11447 * on the future dependency list. Future dependencies cannot
11448 * be freed until they are moved to the current list.
11449 */
11450 if (listadp == NULL) {
11451 #ifdef INVARIANTS
11452 if (adp->ad_state & EXTDATA)
11453 listhead = &inodedep->id_newextupdt;
11454 else
11455 listhead = &inodedep->id_newinoupdt;
11456 TAILQ_FOREACH(listadp, listhead, ad_next)
11457 /* found our block */
11458 if (listadp == adp)
11459 break;
11460 if (listadp == NULL)
11461 panic("handle_allocdirect_partdone: lost dep");
11462 #endif /* INVARIANTS */
11463 return;
11464 }
11465 /*
11466 * If we have found the just finished dependency, then queue
11467 * it along with anything that follows it that is complete.
11468 * Since the pointer has not yet been written in the inode
11469 * as the dependency prevents it, place the allocdirect on the
11470 * bufwait list where it will be freed once the pointer is
11471 * valid.
11472 */
11473 if (wkhd == NULL)
11474 wkhd = &inodedep->id_bufwait;
11475 for (; adp; adp = listadp) {
11476 listadp = TAILQ_NEXT(adp, ad_next);
11477 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11478 return;
11479 TAILQ_REMOVE(listhead, adp, ad_next);
11480 WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list);
11481 }
11482 }
11483
11484 /*
11485 * Called from within softdep_disk_write_complete above. This routine
11486 * completes successfully written allocindirs.
11487 */
11488 static void
11489 handle_allocindir_partdone(
11490 struct allocindir *aip) /* the completed allocindir */
11491 {
11492 struct indirdep *indirdep;
11493
11494 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
11495 return;
11496 indirdep = aip->ai_indirdep;
11497 LIST_REMOVE(aip, ai_next);
11498 /*
11499 * Don't set a pointer while the buffer is undergoing IO or while
11500 * we have active truncations.
11501 */
11502 if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) {
11503 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
11504 return;
11505 }
11506 if (indirdep->ir_state & UFS1FMT)
11507 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11508 aip->ai_newblkno;
11509 else
11510 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11511 aip->ai_newblkno;
11512 /*
11513 * Await the pointer write before freeing the allocindir.
11514 */
11515 LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next);
11516 }
11517
11518 /*
11519 * Release segments held on a jwork list.
11520 */
11521 static void
11522 handle_jwork(struct workhead *wkhd)
11523 {
11524 struct worklist *wk;
11525
11526 while ((wk = LIST_FIRST(wkhd)) != NULL) {
11527 WORKLIST_REMOVE(wk);
11528 switch (wk->wk_type) {
11529 case D_JSEGDEP:
11530 free_jsegdep(WK_JSEGDEP(wk));
11531 continue;
11532 case D_FREEDEP:
11533 free_freedep(WK_FREEDEP(wk));
11534 continue;
11535 case D_FREEFRAG:
11536 rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep));
11537 WORKITEM_FREE(wk, D_FREEFRAG);
11538 continue;
11539 case D_FREEWORK:
11540 handle_written_freework(WK_FREEWORK(wk));
11541 continue;
11542 default:
11543 panic("handle_jwork: Unknown type %s\n",
11544 TYPENAME(wk->wk_type));
11545 }
11546 }
11547 }
11548
11549 /*
11550 * Handle the bufwait list on an inode when it is safe to release items
11551 * held there. This normally happens after an inode block is written but
11552 * may be delayed and handled later if there are pending journal items that
11553 * are not yet safe to be released.
11554 */
11555 static struct freefile *
11556 handle_bufwait(
11557 struct inodedep *inodedep,
11558 struct workhead *refhd)
11559 {
11560 struct jaddref *jaddref;
11561 struct freefile *freefile;
11562 struct worklist *wk;
11563
11564 freefile = NULL;
11565 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
11566 WORKLIST_REMOVE(wk);
11567 switch (wk->wk_type) {
11568 case D_FREEFILE:
11569 /*
11570 * We defer adding freefile to the worklist
11571 * until all other additions have been made to
11572 * ensure that it will be done after all the
11573 * old blocks have been freed.
11574 */
11575 if (freefile != NULL)
11576 panic("handle_bufwait: freefile");
11577 freefile = WK_FREEFILE(wk);
11578 continue;
11579
11580 case D_MKDIR:
11581 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
11582 continue;
11583
11584 case D_DIRADD:
11585 diradd_inode_written(WK_DIRADD(wk), inodedep);
11586 continue;
11587
11588 case D_FREEFRAG:
11589 wk->wk_state |= COMPLETE;
11590 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE)
11591 add_to_worklist(wk, 0);
11592 continue;
11593
11594 case D_DIRREM:
11595 wk->wk_state |= COMPLETE;
11596 add_to_worklist(wk, 0);
11597 continue;
11598
11599 case D_ALLOCDIRECT:
11600 case D_ALLOCINDIR:
11601 free_newblk(WK_NEWBLK(wk));
11602 continue;
11603
11604 case D_JNEWBLK:
11605 wk->wk_state |= COMPLETE;
11606 free_jnewblk(WK_JNEWBLK(wk));
11607 continue;
11608
11609 /*
11610 * Save freed journal segments and add references on
11611 * the supplied list which will delay their release
11612 * until the cg bitmap is cleared on disk.
11613 */
11614 case D_JSEGDEP:
11615 if (refhd == NULL)
11616 free_jsegdep(WK_JSEGDEP(wk));
11617 else
11618 WORKLIST_INSERT(refhd, wk);
11619 continue;
11620
11621 case D_JADDREF:
11622 jaddref = WK_JADDREF(wk);
11623 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
11624 if_deps);
11625 /*
11626 * Transfer any jaddrefs to the list to be freed with
11627 * the bitmap if we're handling a removed file.
11628 */
11629 if (refhd == NULL) {
11630 wk->wk_state |= COMPLETE;
11631 free_jaddref(jaddref);
11632 } else
11633 WORKLIST_INSERT(refhd, wk);
11634 continue;
11635
11636 default:
11637 panic("handle_bufwait: Unknown type %p(%s)",
11638 wk, TYPENAME(wk->wk_type));
11639 /* NOTREACHED */
11640 }
11641 }
11642 return (freefile);
11643 }
11644 /*
11645 * Called from within softdep_disk_write_complete above to restore
11646 * in-memory inode block contents to their most up-to-date state. Note
11647 * that this routine is always called from interrupt level with further
11648 * interrupts from this device blocked.
11649 *
11650 * If the write did not succeed, we will do all the roll-forward
11651 * operations, but we will not take the actions that will allow its
11652 * dependencies to be processed.
11653 */
11654 static int
11655 handle_written_inodeblock(
11656 struct inodedep *inodedep,
11657 struct buf *bp, /* buffer containing the inode block */
11658 int flags)
11659 {
11660 struct freefile *freefile;
11661 struct allocdirect *adp, *nextadp;
11662 struct ufs1_dinode *dp1 = NULL;
11663 struct ufs2_dinode *dp2 = NULL;
11664 struct workhead wkhd;
11665 int hadchanges, fstype;
11666 ino_t freelink;
11667
11668 LIST_INIT(&wkhd);
11669 hadchanges = 0;
11670 freefile = NULL;
11671 if ((inodedep->id_state & IOSTARTED) == 0)
11672 panic("handle_written_inodeblock: not started");
11673 inodedep->id_state &= ~IOSTARTED;
11674 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) {
11675 fstype = UFS1;
11676 dp1 = (struct ufs1_dinode *)bp->b_data +
11677 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11678 freelink = dp1->di_freelink;
11679 } else {
11680 fstype = UFS2;
11681 dp2 = (struct ufs2_dinode *)bp->b_data +
11682 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11683 freelink = dp2->di_freelink;
11684 }
11685 /*
11686 * Leave this inodeblock dirty until it's in the list.
11687 */
11688 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED &&
11689 (flags & WRITESUCCEEDED)) {
11690 struct inodedep *inon;
11691
11692 inon = TAILQ_NEXT(inodedep, id_unlinked);
11693 if ((inon == NULL && freelink == 0) ||
11694 (inon && inon->id_ino == freelink)) {
11695 if (inon)
11696 inon->id_state |= UNLINKPREV;
11697 inodedep->id_state |= UNLINKNEXT;
11698 }
11699 hadchanges = 1;
11700 }
11701 /*
11702 * If we had to rollback the inode allocation because of
11703 * bitmaps being incomplete, then simply restore it.
11704 * Keep the block dirty so that it will not be reclaimed until
11705 * all associated dependencies have been cleared and the
11706 * corresponding updates written to disk.
11707 */
11708 if (inodedep->id_savedino1 != NULL) {
11709 hadchanges = 1;
11710 if (fstype == UFS1)
11711 *dp1 = *inodedep->id_savedino1;
11712 else
11713 *dp2 = *inodedep->id_savedino2;
11714 free(inodedep->id_savedino1, M_SAVEDINO);
11715 inodedep->id_savedino1 = NULL;
11716 if ((bp->b_flags & B_DELWRI) == 0)
11717 stat_inode_bitmap++;
11718 bdirty(bp);
11719 /*
11720 * If the inode is clear here and GOINGAWAY it will never
11721 * be written. Process the bufwait and clear any pending
11722 * work which may include the freefile.
11723 */
11724 if (inodedep->id_state & GOINGAWAY)
11725 goto bufwait;
11726 return (1);
11727 }
11728 if (flags & WRITESUCCEEDED)
11729 inodedep->id_state |= COMPLETE;
11730 /*
11731 * Roll forward anything that had to be rolled back before
11732 * the inode could be updated.
11733 */
11734 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
11735 nextadp = TAILQ_NEXT(adp, ad_next);
11736 if (adp->ad_state & ATTACHED)
11737 panic("handle_written_inodeblock: new entry");
11738 if (fstype == UFS1) {
11739 if (adp->ad_offset < UFS_NDADDR) {
11740 if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11741 panic("%s %s #%jd mismatch %d != %jd",
11742 "handle_written_inodeblock:",
11743 "direct pointer",
11744 (intmax_t)adp->ad_offset,
11745 dp1->di_db[adp->ad_offset],
11746 (intmax_t)adp->ad_oldblkno);
11747 dp1->di_db[adp->ad_offset] = adp->ad_newblkno;
11748 } else {
11749 if (dp1->di_ib[adp->ad_offset - UFS_NDADDR] !=
11750 0)
11751 panic("%s: %s #%jd allocated as %d",
11752 "handle_written_inodeblock",
11753 "indirect pointer",
11754 (intmax_t)adp->ad_offset -
11755 UFS_NDADDR,
11756 dp1->di_ib[adp->ad_offset -
11757 UFS_NDADDR]);
11758 dp1->di_ib[adp->ad_offset - UFS_NDADDR] =
11759 adp->ad_newblkno;
11760 }
11761 } else {
11762 if (adp->ad_offset < UFS_NDADDR) {
11763 if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11764 panic("%s: %s #%jd %s %jd != %jd",
11765 "handle_written_inodeblock",
11766 "direct pointer",
11767 (intmax_t)adp->ad_offset, "mismatch",
11768 (intmax_t)dp2->di_db[adp->ad_offset],
11769 (intmax_t)adp->ad_oldblkno);
11770 dp2->di_db[adp->ad_offset] = adp->ad_newblkno;
11771 } else {
11772 if (dp2->di_ib[adp->ad_offset - UFS_NDADDR] !=
11773 0)
11774 panic("%s: %s #%jd allocated as %jd",
11775 "handle_written_inodeblock",
11776 "indirect pointer",
11777 (intmax_t)adp->ad_offset -
11778 UFS_NDADDR,
11779 (intmax_t)
11780 dp2->di_ib[adp->ad_offset -
11781 UFS_NDADDR]);
11782 dp2->di_ib[adp->ad_offset - UFS_NDADDR] =
11783 adp->ad_newblkno;
11784 }
11785 }
11786 adp->ad_state &= ~UNDONE;
11787 adp->ad_state |= ATTACHED;
11788 hadchanges = 1;
11789 }
11790 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) {
11791 nextadp = TAILQ_NEXT(adp, ad_next);
11792 if (adp->ad_state & ATTACHED)
11793 panic("handle_written_inodeblock: new entry");
11794 if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno)
11795 panic("%s: direct pointers #%jd %s %jd != %jd",
11796 "handle_written_inodeblock",
11797 (intmax_t)adp->ad_offset, "mismatch",
11798 (intmax_t)dp2->di_extb[adp->ad_offset],
11799 (intmax_t)adp->ad_oldblkno);
11800 dp2->di_extb[adp->ad_offset] = adp->ad_newblkno;
11801 adp->ad_state &= ~UNDONE;
11802 adp->ad_state |= ATTACHED;
11803 hadchanges = 1;
11804 }
11805 if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
11806 stat_direct_blk_ptrs++;
11807 /*
11808 * Reset the file size to its most up-to-date value.
11809 */
11810 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1)
11811 panic("handle_written_inodeblock: bad size");
11812 if (inodedep->id_savednlink > UFS_LINK_MAX)
11813 panic("handle_written_inodeblock: Invalid link count "
11814 "%jd for inodedep %p", (uintmax_t)inodedep->id_savednlink,
11815 inodedep);
11816 if (fstype == UFS1) {
11817 if (dp1->di_nlink != inodedep->id_savednlink) {
11818 dp1->di_nlink = inodedep->id_savednlink;
11819 hadchanges = 1;
11820 }
11821 if (dp1->di_size != inodedep->id_savedsize) {
11822 dp1->di_size = inodedep->id_savedsize;
11823 hadchanges = 1;
11824 }
11825 } else {
11826 if (dp2->di_nlink != inodedep->id_savednlink) {
11827 dp2->di_nlink = inodedep->id_savednlink;
11828 hadchanges = 1;
11829 }
11830 if (dp2->di_size != inodedep->id_savedsize) {
11831 dp2->di_size = inodedep->id_savedsize;
11832 hadchanges = 1;
11833 }
11834 if (dp2->di_extsize != inodedep->id_savedextsize) {
11835 dp2->di_extsize = inodedep->id_savedextsize;
11836 hadchanges = 1;
11837 }
11838 }
11839 inodedep->id_savedsize = -1;
11840 inodedep->id_savedextsize = -1;
11841 inodedep->id_savednlink = -1;
11842 /*
11843 * If there were any rollbacks in the inode block, then it must be
11844 * marked dirty so that its will eventually get written back in
11845 * its correct form.
11846 */
11847 if (hadchanges) {
11848 if (fstype == UFS2)
11849 ffs_update_dinode_ckhash(inodedep->id_fs, dp2);
11850 bdirty(bp);
11851 }
11852 bufwait:
11853 /*
11854 * If the write did not succeed, we have done all the roll-forward
11855 * operations, but we cannot take the actions that will allow its
11856 * dependencies to be processed.
11857 */
11858 if ((flags & WRITESUCCEEDED) == 0)
11859 return (hadchanges);
11860 /*
11861 * Process any allocdirects that completed during the update.
11862 */
11863 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
11864 handle_allocdirect_partdone(adp, &wkhd);
11865 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
11866 handle_allocdirect_partdone(adp, &wkhd);
11867 /*
11868 * Process deallocations that were held pending until the
11869 * inode had been written to disk. Freeing of the inode
11870 * is delayed until after all blocks have been freed to
11871 * avoid creation of new <vfsid, inum, lbn> triples
11872 * before the old ones have been deleted. Completely
11873 * unlinked inodes are not processed until the unlinked
11874 * inode list is written or the last reference is removed.
11875 */
11876 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) {
11877 freefile = handle_bufwait(inodedep, NULL);
11878 if (freefile && !LIST_EMPTY(&wkhd)) {
11879 WORKLIST_INSERT(&wkhd, &freefile->fx_list);
11880 freefile = NULL;
11881 }
11882 }
11883 /*
11884 * Move rolled forward dependency completions to the bufwait list
11885 * now that those that were already written have been processed.
11886 */
11887 if (!LIST_EMPTY(&wkhd) && hadchanges == 0)
11888 panic("handle_written_inodeblock: bufwait but no changes");
11889 jwork_move(&inodedep->id_bufwait, &wkhd);
11890
11891 if (freefile != NULL) {
11892 /*
11893 * If the inode is goingaway it was never written. Fake up
11894 * the state here so free_inodedep() can succeed.
11895 */
11896 if (inodedep->id_state & GOINGAWAY)
11897 inodedep->id_state |= COMPLETE | DEPCOMPLETE;
11898 if (free_inodedep(inodedep) == 0)
11899 panic("handle_written_inodeblock: live inodedep %p",
11900 inodedep);
11901 add_to_worklist(&freefile->fx_list, 0);
11902 return (0);
11903 }
11904
11905 /*
11906 * If no outstanding dependencies, free it.
11907 */
11908 if (free_inodedep(inodedep) ||
11909 (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 &&
11910 TAILQ_FIRST(&inodedep->id_inoupdt) == 0 &&
11911 TAILQ_FIRST(&inodedep->id_extupdt) == 0 &&
11912 LIST_FIRST(&inodedep->id_bufwait) == 0))
11913 return (0);
11914 return (hadchanges);
11915 }
11916
11917 /*
11918 * Perform needed roll-forwards and kick off any dependencies that
11919 * can now be processed.
11920 *
11921 * If the write did not succeed, we will do all the roll-forward
11922 * operations, but we will not take the actions that will allow its
11923 * dependencies to be processed.
11924 */
11925 static int
11926 handle_written_indirdep(
11927 struct indirdep *indirdep,
11928 struct buf *bp,
11929 struct buf **bpp,
11930 int flags)
11931 {
11932 struct allocindir *aip;
11933 struct buf *sbp;
11934 int chgs;
11935
11936 if (indirdep->ir_state & GOINGAWAY)
11937 panic("handle_written_indirdep: indirdep gone");
11938 if ((indirdep->ir_state & IOSTARTED) == 0)
11939 panic("handle_written_indirdep: IO not started");
11940 chgs = 0;
11941 /*
11942 * If there were rollbacks revert them here.
11943 */
11944 if (indirdep->ir_saveddata) {
11945 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
11946 if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11947 free(indirdep->ir_saveddata, M_INDIRDEP);
11948 indirdep->ir_saveddata = NULL;
11949 }
11950 chgs = 1;
11951 }
11952 indirdep->ir_state &= ~(UNDONE | IOSTARTED);
11953 indirdep->ir_state |= ATTACHED;
11954 /*
11955 * If the write did not succeed, we have done all the roll-forward
11956 * operations, but we cannot take the actions that will allow its
11957 * dependencies to be processed.
11958 */
11959 if ((flags & WRITESUCCEEDED) == 0) {
11960 stat_indir_blk_ptrs++;
11961 bdirty(bp);
11962 return (1);
11963 }
11964 /*
11965 * Move allocindirs with written pointers to the completehd if
11966 * the indirdep's pointer is not yet written. Otherwise
11967 * free them here.
11968 */
11969 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL) {
11970 LIST_REMOVE(aip, ai_next);
11971 if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
11972 LIST_INSERT_HEAD(&indirdep->ir_completehd, aip,
11973 ai_next);
11974 newblk_freefrag(&aip->ai_block);
11975 continue;
11976 }
11977 free_newblk(&aip->ai_block);
11978 }
11979 /*
11980 * Move allocindirs that have finished dependency processing from
11981 * the done list to the write list after updating the pointers.
11982 */
11983 if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11984 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) {
11985 handle_allocindir_partdone(aip);
11986 if (aip == LIST_FIRST(&indirdep->ir_donehd))
11987 panic("disk_write_complete: not gone");
11988 chgs = 1;
11989 }
11990 }
11991 /*
11992 * Preserve the indirdep if there were any changes or if it is not
11993 * yet valid on disk.
11994 */
11995 if (chgs) {
11996 stat_indir_blk_ptrs++;
11997 bdirty(bp);
11998 return (1);
11999 }
12000 /*
12001 * If there were no changes we can discard the savedbp and detach
12002 * ourselves from the buf. We are only carrying completed pointers
12003 * in this case.
12004 */
12005 sbp = indirdep->ir_savebp;
12006 sbp->b_flags |= B_INVAL | B_NOCACHE;
12007 indirdep->ir_savebp = NULL;
12008 indirdep->ir_bp = NULL;
12009 if (*bpp != NULL)
12010 panic("handle_written_indirdep: bp already exists.");
12011 *bpp = sbp;
12012 /*
12013 * The indirdep may not be freed until its parent points at it.
12014 */
12015 if (indirdep->ir_state & DEPCOMPLETE)
12016 free_indirdep(indirdep);
12017
12018 return (0);
12019 }
12020
12021 /*
12022 * Process a diradd entry after its dependent inode has been written.
12023 */
12024 static void
12025 diradd_inode_written(
12026 struct diradd *dap,
12027 struct inodedep *inodedep)
12028 {
12029
12030 LOCK_OWNED(VFSTOUFS(dap->da_list.wk_mp));
12031 dap->da_state |= COMPLETE;
12032 complete_diradd(dap);
12033 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
12034 }
12035
12036 /*
12037 * Returns true if the bmsafemap will have rollbacks when written. Must only
12038 * be called with the per-filesystem lock and the buf lock on the cg held.
12039 */
12040 static int
12041 bmsafemap_backgroundwrite(
12042 struct bmsafemap *bmsafemap,
12043 struct buf *bp)
12044 {
12045 int dirty;
12046
12047 LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp));
12048 dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
12049 !LIST_EMPTY(&bmsafemap->sm_jnewblkhd);
12050 /*
12051 * If we're initiating a background write we need to process the
12052 * rollbacks as they exist now, not as they exist when IO starts.
12053 * No other consumers will look at the contents of the shadowed
12054 * buf so this is safe to do here.
12055 */
12056 if (bp->b_xflags & BX_BKGRDMARKER)
12057 initiate_write_bmsafemap(bmsafemap, bp);
12058
12059 return (dirty);
12060 }
12061
12062 /*
12063 * Re-apply an allocation when a cg write is complete.
12064 */
12065 static int
12066 jnewblk_rollforward(
12067 struct jnewblk *jnewblk,
12068 struct fs *fs,
12069 struct cg *cgp,
12070 uint8_t *blksfree)
12071 {
12072 ufs1_daddr_t fragno;
12073 ufs2_daddr_t blkno;
12074 long cgbno, bbase;
12075 int frags, blk;
12076 int i;
12077
12078 frags = 0;
12079 cgbno = dtogd(fs, jnewblk->jn_blkno);
12080 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) {
12081 if (isclr(blksfree, cgbno + i))
12082 panic("jnewblk_rollforward: re-allocated fragment");
12083 frags++;
12084 }
12085 if (frags == fs->fs_frag) {
12086 blkno = fragstoblks(fs, cgbno);
12087 ffs_clrblock(fs, blksfree, (long)blkno);
12088 ffs_clusteracct(fs, cgp, blkno, -1);
12089 cgp->cg_cs.cs_nbfree--;
12090 } else {
12091 bbase = cgbno - fragnum(fs, cgbno);
12092 cgbno += jnewblk->jn_oldfrags;
12093 /* If a complete block had been reassembled, account for it. */
12094 fragno = fragstoblks(fs, bbase);
12095 if (ffs_isblock(fs, blksfree, fragno)) {
12096 cgp->cg_cs.cs_nffree += fs->fs_frag;
12097 ffs_clusteracct(fs, cgp, fragno, -1);
12098 cgp->cg_cs.cs_nbfree--;
12099 }
12100 /* Decrement the old frags. */
12101 blk = blkmap(fs, blksfree, bbase);
12102 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
12103 /* Allocate the fragment */
12104 for (i = 0; i < frags; i++)
12105 clrbit(blksfree, cgbno + i);
12106 cgp->cg_cs.cs_nffree -= frags;
12107 /* Add back in counts associated with the new frags */
12108 blk = blkmap(fs, blksfree, bbase);
12109 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
12110 }
12111 return (frags);
12112 }
12113
12114 /*
12115 * Complete a write to a bmsafemap structure. Roll forward any bitmap
12116 * changes if it's not a background write. Set all written dependencies
12117 * to DEPCOMPLETE and free the structure if possible.
12118 *
12119 * If the write did not succeed, we will do all the roll-forward
12120 * operations, but we will not take the actions that will allow its
12121 * dependencies to be processed.
12122 */
12123 static int
12124 handle_written_bmsafemap(
12125 struct bmsafemap *bmsafemap,
12126 struct buf *bp,
12127 int flags)
12128 {
12129 struct newblk *newblk;
12130 struct inodedep *inodedep;
12131 struct jaddref *jaddref, *jatmp;
12132 struct jnewblk *jnewblk, *jntmp;
12133 struct ufsmount *ump;
12134 uint8_t *inosused;
12135 uint8_t *blksfree;
12136 struct cg *cgp;
12137 struct fs *fs;
12138 ino_t ino;
12139 int foreground;
12140 int chgs;
12141
12142 if ((bmsafemap->sm_state & IOSTARTED) == 0)
12143 panic("handle_written_bmsafemap: Not started\n");
12144 ump = VFSTOUFS(bmsafemap->sm_list.wk_mp);
12145 chgs = 0;
12146 bmsafemap->sm_state &= ~IOSTARTED;
12147 foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0;
12148 /*
12149 * If write was successful, release journal work that was waiting
12150 * on the write. Otherwise move the work back.
12151 */
12152 if (flags & WRITESUCCEEDED)
12153 handle_jwork(&bmsafemap->sm_freewr);
12154 else
12155 LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr,
12156 worklist, wk_list);
12157
12158 /*
12159 * Restore unwritten inode allocation pending jaddref writes.
12160 */
12161 if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) {
12162 cgp = (struct cg *)bp->b_data;
12163 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
12164 inosused = cg_inosused(cgp);
12165 LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd,
12166 ja_bmdeps, jatmp) {
12167 if ((jaddref->ja_state & UNDONE) == 0)
12168 continue;
12169 ino = jaddref->ja_ino % fs->fs_ipg;
12170 if (isset(inosused, ino))
12171 panic("handle_written_bmsafemap: "
12172 "re-allocated inode");
12173 /* Do the roll-forward only if it's a real copy. */
12174 if (foreground) {
12175 if ((jaddref->ja_mode & IFMT) == IFDIR)
12176 cgp->cg_cs.cs_ndir++;
12177 cgp->cg_cs.cs_nifree--;
12178 setbit(inosused, ino);
12179 chgs = 1;
12180 }
12181 jaddref->ja_state &= ~UNDONE;
12182 jaddref->ja_state |= ATTACHED;
12183 free_jaddref(jaddref);
12184 }
12185 }
12186 /*
12187 * Restore any block allocations which are pending journal writes.
12188 */
12189 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
12190 cgp = (struct cg *)bp->b_data;
12191 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
12192 blksfree = cg_blksfree(cgp);
12193 LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps,
12194 jntmp) {
12195 if ((jnewblk->jn_state & UNDONE) == 0)
12196 continue;
12197 /* Do the roll-forward only if it's a real copy. */
12198 if (foreground &&
12199 jnewblk_rollforward(jnewblk, fs, cgp, blksfree))
12200 chgs = 1;
12201 jnewblk->jn_state &= ~(UNDONE | NEWBLOCK);
12202 jnewblk->jn_state |= ATTACHED;
12203 free_jnewblk(jnewblk);
12204 }
12205 }
12206 /*
12207 * If the write did not succeed, we have done all the roll-forward
12208 * operations, but we cannot take the actions that will allow its
12209 * dependencies to be processed.
12210 */
12211 if ((flags & WRITESUCCEEDED) == 0) {
12212 LIST_CONCAT(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
12213 newblk, nb_deps);
12214 LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr,
12215 worklist, wk_list);
12216 if (foreground)
12217 bdirty(bp);
12218 return (1);
12219 }
12220 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) {
12221 newblk->nb_state |= DEPCOMPLETE;
12222 newblk->nb_state &= ~ONDEPLIST;
12223 newblk->nb_bmsafemap = NULL;
12224 LIST_REMOVE(newblk, nb_deps);
12225 if (newblk->nb_list.wk_type == D_ALLOCDIRECT)
12226 handle_allocdirect_partdone(
12227 WK_ALLOCDIRECT(&newblk->nb_list), NULL);
12228 else if (newblk->nb_list.wk_type == D_ALLOCINDIR)
12229 handle_allocindir_partdone(
12230 WK_ALLOCINDIR(&newblk->nb_list));
12231 else if (newblk->nb_list.wk_type != D_NEWBLK)
12232 panic("handle_written_bmsafemap: Unexpected type: %s",
12233 TYPENAME(newblk->nb_list.wk_type));
12234 }
12235 while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) {
12236 inodedep->id_state |= DEPCOMPLETE;
12237 inodedep->id_state &= ~ONDEPLIST;
12238 LIST_REMOVE(inodedep, id_deps);
12239 inodedep->id_bmsafemap = NULL;
12240 }
12241 LIST_REMOVE(bmsafemap, sm_next);
12242 if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) &&
12243 LIST_EMPTY(&bmsafemap->sm_jnewblkhd) &&
12244 LIST_EMPTY(&bmsafemap->sm_newblkhd) &&
12245 LIST_EMPTY(&bmsafemap->sm_inodedephd) &&
12246 LIST_EMPTY(&bmsafemap->sm_freehd)) {
12247 LIST_REMOVE(bmsafemap, sm_hash);
12248 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
12249 return (0);
12250 }
12251 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
12252 if (foreground)
12253 bdirty(bp);
12254 return (1);
12255 }
12256
12257 /*
12258 * Try to free a mkdir dependency.
12259 */
12260 static void
12261 complete_mkdir(struct mkdir *mkdir)
12262 {
12263 struct diradd *dap;
12264
12265 if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE)
12266 return;
12267 LIST_REMOVE(mkdir, md_mkdirs);
12268 dap = mkdir->md_diradd;
12269 dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
12270 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) {
12271 dap->da_state |= DEPCOMPLETE;
12272 complete_diradd(dap);
12273 }
12274 WORKITEM_FREE(mkdir, D_MKDIR);
12275 }
12276
12277 /*
12278 * Handle the completion of a mkdir dependency.
12279 */
12280 static void
12281 handle_written_mkdir(struct mkdir *mkdir, int type)
12282 {
12283
12284 if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type)
12285 panic("handle_written_mkdir: bad type");
12286 mkdir->md_state |= COMPLETE;
12287 complete_mkdir(mkdir);
12288 }
12289
12290 static int
12291 free_pagedep(struct pagedep *pagedep)
12292 {
12293 int i;
12294
12295 if (pagedep->pd_state & NEWBLOCK)
12296 return (0);
12297 if (!LIST_EMPTY(&pagedep->pd_dirremhd))
12298 return (0);
12299 for (i = 0; i < DAHASHSZ; i++)
12300 if (!LIST_EMPTY(&pagedep->pd_diraddhd[i]))
12301 return (0);
12302 if (!LIST_EMPTY(&pagedep->pd_pendinghd))
12303 return (0);
12304 if (!LIST_EMPTY(&pagedep->pd_jmvrefhd))
12305 return (0);
12306 if (pagedep->pd_state & ONWORKLIST)
12307 WORKLIST_REMOVE(&pagedep->pd_list);
12308 LIST_REMOVE(pagedep, pd_hash);
12309 WORKITEM_FREE(pagedep, D_PAGEDEP);
12310
12311 return (1);
12312 }
12313
12314 /*
12315 * Called from within softdep_disk_write_complete above.
12316 * A write operation was just completed. Removed inodes can
12317 * now be freed and associated block pointers may be committed.
12318 * Note that this routine is always called from interrupt level
12319 * with further interrupts from this device blocked.
12320 *
12321 * If the write did not succeed, we will do all the roll-forward
12322 * operations, but we will not take the actions that will allow its
12323 * dependencies to be processed.
12324 */
12325 static int
12326 handle_written_filepage(
12327 struct pagedep *pagedep,
12328 struct buf *bp, /* buffer containing the written page */
12329 int flags)
12330 {
12331 struct dirrem *dirrem;
12332 struct diradd *dap, *nextdap;
12333 struct direct *ep;
12334 int i, chgs;
12335
12336 if ((pagedep->pd_state & IOSTARTED) == 0)
12337 panic("handle_written_filepage: not started");
12338 pagedep->pd_state &= ~IOSTARTED;
12339 if ((flags & WRITESUCCEEDED) == 0)
12340 goto rollforward;
12341 /*
12342 * Process any directory removals that have been committed.
12343 */
12344 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
12345 LIST_REMOVE(dirrem, dm_next);
12346 dirrem->dm_state |= COMPLETE;
12347 dirrem->dm_dirinum = pagedep->pd_ino;
12348 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
12349 ("handle_written_filepage: Journal entries not written."));
12350 add_to_worklist(&dirrem->dm_list, 0);
12351 }
12352 /*
12353 * Free any directory additions that have been committed.
12354 * If it is a newly allocated block, we have to wait until
12355 * the on-disk directory inode claims the new block.
12356 */
12357 if ((pagedep->pd_state & NEWBLOCK) == 0)
12358 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
12359 free_diradd(dap, NULL);
12360 rollforward:
12361 /*
12362 * Uncommitted directory entries must be restored.
12363 */
12364 for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
12365 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
12366 dap = nextdap) {
12367 nextdap = LIST_NEXT(dap, da_pdlist);
12368 if (dap->da_state & ATTACHED)
12369 panic("handle_written_filepage: attached");
12370 ep = (struct direct *)
12371 ((char *)bp->b_data + dap->da_offset);
12372 ep->d_ino = dap->da_newinum;
12373 dap->da_state &= ~UNDONE;
12374 dap->da_state |= ATTACHED;
12375 chgs = 1;
12376 /*
12377 * If the inode referenced by the directory has
12378 * been written out, then the dependency can be
12379 * moved to the pending list.
12380 */
12381 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
12382 LIST_REMOVE(dap, da_pdlist);
12383 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
12384 da_pdlist);
12385 }
12386 }
12387 }
12388 /*
12389 * If there were any rollbacks in the directory, then it must be
12390 * marked dirty so that its will eventually get written back in
12391 * its correct form.
12392 */
12393 if (chgs || (flags & WRITESUCCEEDED) == 0) {
12394 if ((bp->b_flags & B_DELWRI) == 0)
12395 stat_dir_entry++;
12396 bdirty(bp);
12397 return (1);
12398 }
12399 /*
12400 * If we are not waiting for a new directory block to be
12401 * claimed by its inode, then the pagedep will be freed.
12402 * Otherwise it will remain to track any new entries on
12403 * the page in case they are fsync'ed.
12404 */
12405 free_pagedep(pagedep);
12406 return (0);
12407 }
12408
12409 /*
12410 * Writing back in-core inode structures.
12411 *
12412 * The filesystem only accesses an inode's contents when it occupies an
12413 * "in-core" inode structure. These "in-core" structures are separate from
12414 * the page frames used to cache inode blocks. Only the latter are
12415 * transferred to/from the disk. So, when the updated contents of the
12416 * "in-core" inode structure are copied to the corresponding in-memory inode
12417 * block, the dependencies are also transferred. The following procedure is
12418 * called when copying a dirty "in-core" inode to a cached inode block.
12419 */
12420
12421 /*
12422 * Called when an inode is loaded from disk. If the effective link count
12423 * differed from the actual link count when it was last flushed, then we
12424 * need to ensure that the correct effective link count is put back.
12425 */
12426 void
12427 softdep_load_inodeblock(
12428 struct inode *ip) /* the "in_core" copy of the inode */
12429 {
12430 struct inodedep *inodedep;
12431 struct ufsmount *ump;
12432
12433 ump = ITOUMP(ip);
12434 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
12435 ("softdep_load_inodeblock called on non-softdep filesystem"));
12436 /*
12437 * Check for alternate nlink count.
12438 */
12439 ip->i_effnlink = ip->i_nlink;
12440 ACQUIRE_LOCK(ump);
12441 if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0) {
12442 FREE_LOCK(ump);
12443 return;
12444 }
12445 if (ip->i_nlink != inodedep->id_nlinkwrote &&
12446 inodedep->id_nlinkwrote != -1) {
12447 KASSERT(ip->i_nlink == 0 &&
12448 (ump->um_flags & UM_FSFAIL_CLEANUP) != 0,
12449 ("read bad i_nlink value"));
12450 ip->i_effnlink = ip->i_nlink = inodedep->id_nlinkwrote;
12451 }
12452 ip->i_effnlink -= inodedep->id_nlinkdelta;
12453 KASSERT(ip->i_effnlink >= 0,
12454 ("softdep_load_inodeblock: negative i_effnlink"));
12455 FREE_LOCK(ump);
12456 }
12457
12458 /*
12459 * This routine is called just before the "in-core" inode
12460 * information is to be copied to the in-memory inode block.
12461 * Recall that an inode block contains several inodes. If
12462 * the force flag is set, then the dependencies will be
12463 * cleared so that the update can always be made. Note that
12464 * the buffer is locked when this routine is called, so we
12465 * will never be in the middle of writing the inode block
12466 * to disk.
12467 */
12468 void
12469 softdep_update_inodeblock(
12470 struct inode *ip, /* the "in_core" copy of the inode */
12471 struct buf *bp, /* the buffer containing the inode block */
12472 int waitfor) /* nonzero => update must be allowed */
12473 {
12474 struct inodedep *inodedep;
12475 struct inoref *inoref;
12476 struct ufsmount *ump;
12477 struct worklist *wk;
12478 struct mount *mp;
12479 struct buf *ibp;
12480 struct fs *fs;
12481 int error;
12482
12483 ump = ITOUMP(ip);
12484 mp = UFSTOVFS(ump);
12485 KASSERT(MOUNTEDSOFTDEP(mp) != 0,
12486 ("softdep_update_inodeblock called on non-softdep filesystem"));
12487 fs = ump->um_fs;
12488 /*
12489 * Preserve the freelink that is on disk. clear_unlinked_inodedep()
12490 * does not have access to the in-core ip so must write directly into
12491 * the inode block buffer when setting freelink.
12492 */
12493 if (fs->fs_magic == FS_UFS1_MAGIC)
12494 DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data +
12495 ino_to_fsbo(fs, ip->i_number))->di_freelink);
12496 else
12497 DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data +
12498 ino_to_fsbo(fs, ip->i_number))->di_freelink);
12499 /*
12500 * If the effective link count is not equal to the actual link
12501 * count, then we must track the difference in an inodedep while
12502 * the inode is (potentially) tossed out of the cache. Otherwise,
12503 * if there is no existing inodedep, then there are no dependencies
12504 * to track.
12505 */
12506 ACQUIRE_LOCK(ump);
12507 again:
12508 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12509 FREE_LOCK(ump);
12510 if (ip->i_effnlink != ip->i_nlink)
12511 panic("softdep_update_inodeblock: bad link count");
12512 return;
12513 }
12514 KASSERT(ip->i_nlink >= inodedep->id_nlinkdelta,
12515 ("softdep_update_inodeblock inconsistent ip %p i_nlink %d "
12516 "inodedep %p id_nlinkdelta %jd",
12517 ip, ip->i_nlink, inodedep, (intmax_t)inodedep->id_nlinkdelta));
12518 inodedep->id_nlinkwrote = ip->i_nlink;
12519 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink)
12520 panic("softdep_update_inodeblock: bad delta");
12521 /*
12522 * If we're flushing all dependencies we must also move any waiting
12523 * for journal writes onto the bufwait list prior to I/O.
12524 */
12525 if (waitfor) {
12526 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12527 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12528 == DEPCOMPLETE) {
12529 jwait(&inoref->if_list, MNT_WAIT);
12530 goto again;
12531 }
12532 }
12533 }
12534 /*
12535 * Changes have been initiated. Anything depending on these
12536 * changes cannot occur until this inode has been written.
12537 */
12538 inodedep->id_state &= ~COMPLETE;
12539 if ((inodedep->id_state & ONWORKLIST) == 0)
12540 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
12541 /*
12542 * Any new dependencies associated with the incore inode must
12543 * now be moved to the list associated with the buffer holding
12544 * the in-memory copy of the inode. Once merged process any
12545 * allocdirects that are completed by the merger.
12546 */
12547 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt);
12548 if (!TAILQ_EMPTY(&inodedep->id_inoupdt))
12549 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt),
12550 NULL);
12551 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt);
12552 if (!TAILQ_EMPTY(&inodedep->id_extupdt))
12553 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt),
12554 NULL);
12555 /*
12556 * Now that the inode has been pushed into the buffer, the
12557 * operations dependent on the inode being written to disk
12558 * can be moved to the id_bufwait so that they will be
12559 * processed when the buffer I/O completes.
12560 */
12561 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
12562 WORKLIST_REMOVE(wk);
12563 WORKLIST_INSERT(&inodedep->id_bufwait, wk);
12564 }
12565 /*
12566 * Newly allocated inodes cannot be written until the bitmap
12567 * that allocates them have been written (indicated by
12568 * DEPCOMPLETE being set in id_state). If we are doing a
12569 * forced sync (e.g., an fsync on a file), we force the bitmap
12570 * to be written so that the update can be done.
12571 */
12572 if (waitfor == 0) {
12573 FREE_LOCK(ump);
12574 return;
12575 }
12576 retry:
12577 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) {
12578 FREE_LOCK(ump);
12579 return;
12580 }
12581 ibp = inodedep->id_bmsafemap->sm_buf;
12582 ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT);
12583 if (ibp == NULL) {
12584 /*
12585 * If ibp came back as NULL, the dependency could have been
12586 * freed while we slept. Look it up again, and check to see
12587 * that it has completed.
12588 */
12589 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
12590 goto retry;
12591 FREE_LOCK(ump);
12592 return;
12593 }
12594 FREE_LOCK(ump);
12595 if ((error = bwrite(ibp)) != 0)
12596 softdep_error("softdep_update_inodeblock: bwrite", error);
12597 }
12598
12599 /*
12600 * Merge the a new inode dependency list (such as id_newinoupdt) into an
12601 * old inode dependency list (such as id_inoupdt).
12602 */
12603 static void
12604 merge_inode_lists(
12605 struct allocdirectlst *newlisthead,
12606 struct allocdirectlst *oldlisthead)
12607 {
12608 struct allocdirect *listadp, *newadp;
12609
12610 newadp = TAILQ_FIRST(newlisthead);
12611 if (newadp != NULL)
12612 LOCK_OWNED(VFSTOUFS(newadp->ad_block.nb_list.wk_mp));
12613 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) {
12614 if (listadp->ad_offset < newadp->ad_offset) {
12615 listadp = TAILQ_NEXT(listadp, ad_next);
12616 continue;
12617 }
12618 TAILQ_REMOVE(newlisthead, newadp, ad_next);
12619 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
12620 if (listadp->ad_offset == newadp->ad_offset) {
12621 allocdirect_merge(oldlisthead, newadp,
12622 listadp);
12623 listadp = newadp;
12624 }
12625 newadp = TAILQ_FIRST(newlisthead);
12626 }
12627 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) {
12628 TAILQ_REMOVE(newlisthead, newadp, ad_next);
12629 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next);
12630 }
12631 }
12632
12633 /*
12634 * If we are doing an fsync, then we must ensure that any directory
12635 * entries for the inode have been written after the inode gets to disk.
12636 */
12637 int
12638 softdep_fsync(
12639 struct vnode *vp) /* the "in_core" copy of the inode */
12640 {
12641 struct inodedep *inodedep;
12642 struct pagedep *pagedep;
12643 struct inoref *inoref;
12644 struct ufsmount *ump;
12645 struct worklist *wk;
12646 struct diradd *dap;
12647 struct mount *mp;
12648 struct vnode *pvp;
12649 struct inode *ip;
12650 struct buf *bp;
12651 struct fs *fs;
12652 struct thread *td = curthread;
12653 int error, flushparent, pagedep_new_block;
12654 ino_t parentino;
12655 ufs_lbn_t lbn;
12656
12657 ip = VTOI(vp);
12658 mp = vp->v_mount;
12659 ump = VFSTOUFS(mp);
12660 fs = ump->um_fs;
12661 if (MOUNTEDSOFTDEP(mp) == 0)
12662 return (0);
12663 ACQUIRE_LOCK(ump);
12664 restart:
12665 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12666 FREE_LOCK(ump);
12667 return (0);
12668 }
12669 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12670 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12671 == DEPCOMPLETE) {
12672 jwait(&inoref->if_list, MNT_WAIT);
12673 goto restart;
12674 }
12675 }
12676 if (!LIST_EMPTY(&inodedep->id_inowait) ||
12677 !TAILQ_EMPTY(&inodedep->id_extupdt) ||
12678 !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
12679 !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
12680 !TAILQ_EMPTY(&inodedep->id_newinoupdt))
12681 panic("softdep_fsync: pending ops %p", inodedep);
12682 for (error = 0, flushparent = 0; ; ) {
12683 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
12684 break;
12685 if (wk->wk_type != D_DIRADD)
12686 panic("softdep_fsync: Unexpected type %s",
12687 TYPENAME(wk->wk_type));
12688 dap = WK_DIRADD(wk);
12689 /*
12690 * Flush our parent if this directory entry has a MKDIR_PARENT
12691 * dependency or is contained in a newly allocated block.
12692 */
12693 if (dap->da_state & DIRCHG)
12694 pagedep = dap->da_previous->dm_pagedep;
12695 else
12696 pagedep = dap->da_pagedep;
12697 parentino = pagedep->pd_ino;
12698 lbn = pagedep->pd_lbn;
12699 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE)
12700 panic("softdep_fsync: dirty");
12701 if ((dap->da_state & MKDIR_PARENT) ||
12702 (pagedep->pd_state & NEWBLOCK))
12703 flushparent = 1;
12704 else
12705 flushparent = 0;
12706 /*
12707 * If we are being fsync'ed as part of vgone'ing this vnode,
12708 * then we will not be able to release and recover the
12709 * vnode below, so we just have to give up on writing its
12710 * directory entry out. It will eventually be written, just
12711 * not now, but then the user was not asking to have it
12712 * written, so we are not breaking any promises.
12713 */
12714 if (VN_IS_DOOMED(vp))
12715 break;
12716 /*
12717 * We prevent deadlock by always fetching inodes from the
12718 * root, moving down the directory tree. Thus, when fetching
12719 * our parent directory, we first try to get the lock. If
12720 * that fails, we must unlock ourselves before requesting
12721 * the lock on our parent. See the comment in ufs_lookup
12722 * for details on possible races.
12723 */
12724 FREE_LOCK(ump);
12725 error = get_parent_vp(vp, mp, parentino, NULL, NULL, NULL,
12726 &pvp);
12727 if (error == ERELOOKUP)
12728 error = 0;
12729 if (error != 0)
12730 return (error);
12731 /*
12732 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps
12733 * that are contained in direct blocks will be resolved by
12734 * doing a ffs_update. Pagedeps contained in indirect blocks
12735 * may require a complete sync'ing of the directory. So, we
12736 * try the cheap and fast ffs_update first, and if that fails,
12737 * then we do the slower ffs_syncvnode of the directory.
12738 */
12739 if (flushparent) {
12740 int locked;
12741
12742 if ((error = ffs_update(pvp, 1)) != 0) {
12743 vput(pvp);
12744 return (error);
12745 }
12746 ACQUIRE_LOCK(ump);
12747 locked = 1;
12748 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) {
12749 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) {
12750 if (wk->wk_type != D_DIRADD)
12751 panic("softdep_fsync: Unexpected type %s",
12752 TYPENAME(wk->wk_type));
12753 dap = WK_DIRADD(wk);
12754 if (dap->da_state & DIRCHG)
12755 pagedep = dap->da_previous->dm_pagedep;
12756 else
12757 pagedep = dap->da_pagedep;
12758 pagedep_new_block = pagedep->pd_state & NEWBLOCK;
12759 FREE_LOCK(ump);
12760 locked = 0;
12761 if (pagedep_new_block) {
12762 VOP_UNLOCK(vp);
12763 error = ffs_syncvnode(pvp,
12764 MNT_WAIT, 0);
12765 if (error == 0)
12766 error = ERELOOKUP;
12767 vput(pvp);
12768 vn_lock(vp, LK_EXCLUSIVE |
12769 LK_RETRY);
12770 return (error);
12771 }
12772 }
12773 }
12774 if (locked)
12775 FREE_LOCK(ump);
12776 }
12777 /*
12778 * Flush directory page containing the inode's name.
12779 */
12780 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred,
12781 &bp);
12782 if (error == 0)
12783 error = bwrite(bp);
12784 else
12785 brelse(bp);
12786 vput(pvp);
12787 if (!ffs_fsfail_cleanup(ump, error))
12788 return (error);
12789 ACQUIRE_LOCK(ump);
12790 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
12791 break;
12792 }
12793 FREE_LOCK(ump);
12794 return (0);
12795 }
12796
12797 /*
12798 * Flush all the dirty bitmaps associated with the block device
12799 * before flushing the rest of the dirty blocks so as to reduce
12800 * the number of dependencies that will have to be rolled back.
12801 *
12802 * XXX Unused?
12803 */
12804 void
12805 softdep_fsync_mountdev(struct vnode *vp)
12806 {
12807 struct buf *bp, *nbp;
12808 struct worklist *wk;
12809 struct bufobj *bo;
12810
12811 if (!vn_isdisk(vp))
12812 panic("softdep_fsync_mountdev: vnode not a disk");
12813 bo = &vp->v_bufobj;
12814 restart:
12815 BO_LOCK(bo);
12816 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
12817 /*
12818 * If it is already scheduled, skip to the next buffer.
12819 */
12820 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
12821 continue;
12822
12823 if ((bp->b_flags & B_DELWRI) == 0)
12824 panic("softdep_fsync_mountdev: not dirty");
12825 /*
12826 * We are only interested in bitmaps with outstanding
12827 * dependencies.
12828 */
12829 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL ||
12830 wk->wk_type != D_BMSAFEMAP ||
12831 (bp->b_vflags & BV_BKGRDINPROG)) {
12832 BUF_UNLOCK(bp);
12833 continue;
12834 }
12835 BO_UNLOCK(bo);
12836 bremfree(bp);
12837 (void) bawrite(bp);
12838 goto restart;
12839 }
12840 drain_output(vp);
12841 BO_UNLOCK(bo);
12842 }
12843
12844 /*
12845 * Sync all cylinder groups that were dirty at the time this function is
12846 * called. Newly dirtied cgs will be inserted before the sentinel. This
12847 * is used to flush freedep activity that may be holding up writes to a
12848 * indirect block.
12849 */
12850 static int
12851 sync_cgs(struct mount *mp, int waitfor)
12852 {
12853 struct bmsafemap *bmsafemap;
12854 struct bmsafemap *sentinel;
12855 struct ufsmount *ump;
12856 struct buf *bp;
12857 int error;
12858
12859 sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK);
12860 sentinel->sm_cg = -1;
12861 ump = VFSTOUFS(mp);
12862 error = 0;
12863 ACQUIRE_LOCK(ump);
12864 LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next);
12865 for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL;
12866 bmsafemap = LIST_NEXT(sentinel, sm_next)) {
12867 /* Skip sentinels and cgs with no work to release. */
12868 if (bmsafemap->sm_cg == -1 ||
12869 (LIST_EMPTY(&bmsafemap->sm_freehd) &&
12870 LIST_EMPTY(&bmsafemap->sm_freewr))) {
12871 LIST_REMOVE(sentinel, sm_next);
12872 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12873 continue;
12874 }
12875 /*
12876 * If we don't get the lock and we're waiting try again, if
12877 * not move on to the next buf and try to sync it.
12878 */
12879 bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor);
12880 if (bp == NULL && waitfor == MNT_WAIT)
12881 continue;
12882 LIST_REMOVE(sentinel, sm_next);
12883 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12884 if (bp == NULL)
12885 continue;
12886 FREE_LOCK(ump);
12887 if (waitfor == MNT_NOWAIT)
12888 bawrite(bp);
12889 else
12890 error = bwrite(bp);
12891 ACQUIRE_LOCK(ump);
12892 if (error)
12893 break;
12894 }
12895 LIST_REMOVE(sentinel, sm_next);
12896 FREE_LOCK(ump);
12897 free(sentinel, M_BMSAFEMAP);
12898 return (error);
12899 }
12900
12901 /*
12902 * This routine is called when we are trying to synchronously flush a
12903 * file. This routine must eliminate any filesystem metadata dependencies
12904 * so that the syncing routine can succeed.
12905 */
12906 int
12907 softdep_sync_metadata(struct vnode *vp)
12908 {
12909 struct inode *ip;
12910 int error;
12911
12912 ip = VTOI(vp);
12913 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
12914 ("softdep_sync_metadata called on non-softdep filesystem"));
12915 /*
12916 * Ensure that any direct block dependencies have been cleared,
12917 * truncations are started, and inode references are journaled.
12918 */
12919 ACQUIRE_LOCK(VFSTOUFS(vp->v_mount));
12920 /*
12921 * Write all journal records to prevent rollbacks on devvp.
12922 */
12923 if (vp->v_type == VCHR)
12924 softdep_flushjournal(vp->v_mount);
12925 error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number);
12926 /*
12927 * Ensure that all truncates are written so we won't find deps on
12928 * indirect blocks.
12929 */
12930 process_truncates(vp);
12931 FREE_LOCK(VFSTOUFS(vp->v_mount));
12932
12933 return (error);
12934 }
12935
12936 /*
12937 * This routine is called when we are attempting to sync a buf with
12938 * dependencies. If waitfor is MNT_NOWAIT it attempts to schedule any
12939 * other IO it can but returns EBUSY if the buffer is not yet able to
12940 * be written. Dependencies which will not cause rollbacks will always
12941 * return 0.
12942 */
12943 int
12944 softdep_sync_buf(struct vnode *vp,
12945 struct buf *bp,
12946 int waitfor)
12947 {
12948 struct indirdep *indirdep;
12949 struct pagedep *pagedep;
12950 struct allocindir *aip;
12951 struct newblk *newblk;
12952 struct ufsmount *ump;
12953 struct buf *nbp;
12954 struct worklist *wk;
12955 int i, error;
12956
12957 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
12958 ("softdep_sync_buf called on non-softdep filesystem"));
12959 /*
12960 * For VCHR we just don't want to force flush any dependencies that
12961 * will cause rollbacks.
12962 */
12963 if (vp->v_type == VCHR) {
12964 if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0))
12965 return (EBUSY);
12966 return (0);
12967 }
12968 ump = VFSTOUFS(vp->v_mount);
12969 ACQUIRE_LOCK(ump);
12970 /*
12971 * As we hold the buffer locked, none of its dependencies
12972 * will disappear.
12973 */
12974 error = 0;
12975 top:
12976 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
12977 switch (wk->wk_type) {
12978 case D_ALLOCDIRECT:
12979 case D_ALLOCINDIR:
12980 newblk = WK_NEWBLK(wk);
12981 if (newblk->nb_jnewblk != NULL) {
12982 if (waitfor == MNT_NOWAIT) {
12983 error = EBUSY;
12984 goto out_unlock;
12985 }
12986 jwait(&newblk->nb_jnewblk->jn_list, waitfor);
12987 goto top;
12988 }
12989 if (newblk->nb_state & DEPCOMPLETE ||
12990 waitfor == MNT_NOWAIT)
12991 continue;
12992 nbp = newblk->nb_bmsafemap->sm_buf;
12993 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
12994 if (nbp == NULL)
12995 goto top;
12996 FREE_LOCK(ump);
12997 if ((error = bwrite(nbp)) != 0)
12998 goto out;
12999 ACQUIRE_LOCK(ump);
13000 continue;
13001
13002 case D_INDIRDEP:
13003 indirdep = WK_INDIRDEP(wk);
13004 if (waitfor == MNT_NOWAIT) {
13005 if (!TAILQ_EMPTY(&indirdep->ir_trunc) ||
13006 !LIST_EMPTY(&indirdep->ir_deplisthd)) {
13007 error = EBUSY;
13008 goto out_unlock;
13009 }
13010 }
13011 if (!TAILQ_EMPTY(&indirdep->ir_trunc))
13012 panic("softdep_sync_buf: truncation pending.");
13013 restart:
13014 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
13015 newblk = (struct newblk *)aip;
13016 if (newblk->nb_jnewblk != NULL) {
13017 jwait(&newblk->nb_jnewblk->jn_list,
13018 waitfor);
13019 goto restart;
13020 }
13021 if (newblk->nb_state & DEPCOMPLETE)
13022 continue;
13023 nbp = newblk->nb_bmsafemap->sm_buf;
13024 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
13025 if (nbp == NULL)
13026 goto restart;
13027 FREE_LOCK(ump);
13028 if ((error = bwrite(nbp)) != 0)
13029 goto out;
13030 ACQUIRE_LOCK(ump);
13031 goto restart;
13032 }
13033 continue;
13034
13035 case D_PAGEDEP:
13036 /*
13037 * Only flush directory entries in synchronous passes.
13038 */
13039 if (waitfor != MNT_WAIT) {
13040 error = EBUSY;
13041 goto out_unlock;
13042 }
13043 /*
13044 * While syncing snapshots, we must allow recursive
13045 * lookups.
13046 */
13047 BUF_AREC(bp);
13048 /*
13049 * We are trying to sync a directory that may
13050 * have dependencies on both its own metadata
13051 * and/or dependencies on the inodes of any
13052 * recently allocated files. We walk its diradd
13053 * lists pushing out the associated inode.
13054 */
13055 pagedep = WK_PAGEDEP(wk);
13056 for (i = 0; i < DAHASHSZ; i++) {
13057 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0)
13058 continue;
13059 error = flush_pagedep_deps(vp, wk->wk_mp,
13060 &pagedep->pd_diraddhd[i], bp);
13061 if (error != 0) {
13062 if (error != ERELOOKUP)
13063 BUF_NOREC(bp);
13064 goto out_unlock;
13065 }
13066 }
13067 BUF_NOREC(bp);
13068 continue;
13069
13070 case D_FREEWORK:
13071 case D_FREEDEP:
13072 case D_JSEGDEP:
13073 case D_JNEWBLK:
13074 continue;
13075
13076 default:
13077 panic("softdep_sync_buf: Unknown type %s",
13078 TYPENAME(wk->wk_type));
13079 /* NOTREACHED */
13080 }
13081 }
13082 out_unlock:
13083 FREE_LOCK(ump);
13084 out:
13085 return (error);
13086 }
13087
13088 /*
13089 * Flush the dependencies associated with an inodedep.
13090 */
13091 static int
13092 flush_inodedep_deps(
13093 struct vnode *vp,
13094 struct mount *mp,
13095 ino_t ino)
13096 {
13097 struct inodedep *inodedep;
13098 struct inoref *inoref;
13099 struct ufsmount *ump;
13100 int error, waitfor;
13101
13102 /*
13103 * This work is done in two passes. The first pass grabs most
13104 * of the buffers and begins asynchronously writing them. The
13105 * only way to wait for these asynchronous writes is to sleep
13106 * on the filesystem vnode which may stay busy for a long time
13107 * if the filesystem is active. So, instead, we make a second
13108 * pass over the dependencies blocking on each write. In the
13109 * usual case we will be blocking against a write that we
13110 * initiated, so when it is done the dependency will have been
13111 * resolved. Thus the second pass is expected to end quickly.
13112 * We give a brief window at the top of the loop to allow
13113 * any pending I/O to complete.
13114 */
13115 ump = VFSTOUFS(mp);
13116 LOCK_OWNED(ump);
13117 for (error = 0, waitfor = MNT_NOWAIT; ; ) {
13118 if (error)
13119 return (error);
13120 FREE_LOCK(ump);
13121 ACQUIRE_LOCK(ump);
13122 restart:
13123 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
13124 return (0);
13125 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
13126 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
13127 == DEPCOMPLETE) {
13128 jwait(&inoref->if_list, MNT_WAIT);
13129 goto restart;
13130 }
13131 }
13132 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) ||
13133 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) ||
13134 flush_deplist(&inodedep->id_extupdt, waitfor, &error) ||
13135 flush_deplist(&inodedep->id_newextupdt, waitfor, &error))
13136 continue;
13137 /*
13138 * If pass2, we are done, otherwise do pass 2.
13139 */
13140 if (waitfor == MNT_WAIT)
13141 break;
13142 waitfor = MNT_WAIT;
13143 }
13144 /*
13145 * Try freeing inodedep in case all dependencies have been removed.
13146 */
13147 if (inodedep_lookup(mp, ino, 0, &inodedep) != 0)
13148 (void) free_inodedep(inodedep);
13149 return (0);
13150 }
13151
13152 /*
13153 * Flush an inode dependency list.
13154 */
13155 static int
13156 flush_deplist(
13157 struct allocdirectlst *listhead,
13158 int waitfor,
13159 int *errorp)
13160 {
13161 struct allocdirect *adp;
13162 struct newblk *newblk;
13163 struct ufsmount *ump;
13164 struct buf *bp;
13165
13166 if ((adp = TAILQ_FIRST(listhead)) == NULL)
13167 return (0);
13168 ump = VFSTOUFS(adp->ad_list.wk_mp);
13169 LOCK_OWNED(ump);
13170 TAILQ_FOREACH(adp, listhead, ad_next) {
13171 newblk = (struct newblk *)adp;
13172 if (newblk->nb_jnewblk != NULL) {
13173 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
13174 return (1);
13175 }
13176 if (newblk->nb_state & DEPCOMPLETE)
13177 continue;
13178 bp = newblk->nb_bmsafemap->sm_buf;
13179 bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor);
13180 if (bp == NULL) {
13181 if (waitfor == MNT_NOWAIT)
13182 continue;
13183 return (1);
13184 }
13185 FREE_LOCK(ump);
13186 if (waitfor == MNT_NOWAIT)
13187 bawrite(bp);
13188 else
13189 *errorp = bwrite(bp);
13190 ACQUIRE_LOCK(ump);
13191 return (1);
13192 }
13193 return (0);
13194 }
13195
13196 /*
13197 * Flush dependencies associated with an allocdirect block.
13198 */
13199 static int
13200 flush_newblk_dep(
13201 struct vnode *vp,
13202 struct mount *mp,
13203 ufs_lbn_t lbn)
13204 {
13205 struct newblk *newblk;
13206 struct ufsmount *ump;
13207 struct bufobj *bo;
13208 struct inode *ip;
13209 struct buf *bp;
13210 ufs2_daddr_t blkno;
13211 int error;
13212
13213 error = 0;
13214 bo = &vp->v_bufobj;
13215 ip = VTOI(vp);
13216 blkno = DIP(ip, i_db[lbn]);
13217 if (blkno == 0)
13218 panic("flush_newblk_dep: Missing block");
13219 ump = VFSTOUFS(mp);
13220 ACQUIRE_LOCK(ump);
13221 /*
13222 * Loop until all dependencies related to this block are satisfied.
13223 * We must be careful to restart after each sleep in case a write
13224 * completes some part of this process for us.
13225 */
13226 for (;;) {
13227 if (newblk_lookup(mp, blkno, 0, &newblk) == 0) {
13228 FREE_LOCK(ump);
13229 break;
13230 }
13231 if (newblk->nb_list.wk_type != D_ALLOCDIRECT)
13232 panic("flush_newblk_dep: Bad newblk %p", newblk);
13233 /*
13234 * Flush the journal.
13235 */
13236 if (newblk->nb_jnewblk != NULL) {
13237 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
13238 continue;
13239 }
13240 /*
13241 * Write the bitmap dependency.
13242 */
13243 if ((newblk->nb_state & DEPCOMPLETE) == 0) {
13244 bp = newblk->nb_bmsafemap->sm_buf;
13245 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
13246 if (bp == NULL)
13247 continue;
13248 FREE_LOCK(ump);
13249 error = bwrite(bp);
13250 if (error)
13251 break;
13252 ACQUIRE_LOCK(ump);
13253 continue;
13254 }
13255 /*
13256 * Write the buffer.
13257 */
13258 FREE_LOCK(ump);
13259 BO_LOCK(bo);
13260 bp = gbincore(bo, lbn);
13261 if (bp != NULL) {
13262 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
13263 LK_INTERLOCK, BO_LOCKPTR(bo));
13264 if (error == ENOLCK) {
13265 ACQUIRE_LOCK(ump);
13266 error = 0;
13267 continue; /* Slept, retry */
13268 }
13269 if (error != 0)
13270 break; /* Failed */
13271 if (bp->b_flags & B_DELWRI) {
13272 bremfree(bp);
13273 error = bwrite(bp);
13274 if (error)
13275 break;
13276 } else
13277 BUF_UNLOCK(bp);
13278 } else
13279 BO_UNLOCK(bo);
13280 /*
13281 * We have to wait for the direct pointers to
13282 * point at the newdirblk before the dependency
13283 * will go away.
13284 */
13285 error = ffs_update(vp, 1);
13286 if (error)
13287 break;
13288 ACQUIRE_LOCK(ump);
13289 }
13290 return (error);
13291 }
13292
13293 /*
13294 * Eliminate a pagedep dependency by flushing out all its diradd dependencies.
13295 */
13296 static int
13297 flush_pagedep_deps(
13298 struct vnode *pvp,
13299 struct mount *mp,
13300 struct diraddhd *diraddhdp,
13301 struct buf *locked_bp)
13302 {
13303 struct inodedep *inodedep;
13304 struct inoref *inoref;
13305 struct ufsmount *ump;
13306 struct diradd *dap;
13307 struct vnode *vp;
13308 int error = 0;
13309 struct buf *bp;
13310 ino_t inum;
13311 struct diraddhd unfinished;
13312
13313 LIST_INIT(&unfinished);
13314 ump = VFSTOUFS(mp);
13315 LOCK_OWNED(ump);
13316 restart:
13317 while ((dap = LIST_FIRST(diraddhdp)) != NULL) {
13318 /*
13319 * Flush ourselves if this directory entry
13320 * has a MKDIR_PARENT dependency.
13321 */
13322 if (dap->da_state & MKDIR_PARENT) {
13323 FREE_LOCK(ump);
13324 if ((error = ffs_update(pvp, 1)) != 0)
13325 break;
13326 ACQUIRE_LOCK(ump);
13327 /*
13328 * If that cleared dependencies, go on to next.
13329 */
13330 if (dap != LIST_FIRST(diraddhdp))
13331 continue;
13332 /*
13333 * All MKDIR_PARENT dependencies and all the
13334 * NEWBLOCK pagedeps that are contained in direct
13335 * blocks were resolved by doing above ffs_update.
13336 * Pagedeps contained in indirect blocks may
13337 * require a complete sync'ing of the directory.
13338 * We are in the midst of doing a complete sync,
13339 * so if they are not resolved in this pass we
13340 * defer them for now as they will be sync'ed by
13341 * our caller shortly.
13342 */
13343 LIST_REMOVE(dap, da_pdlist);
13344 LIST_INSERT_HEAD(&unfinished, dap, da_pdlist);
13345 continue;
13346 }
13347 /*
13348 * A newly allocated directory must have its "." and
13349 * ".." entries written out before its name can be
13350 * committed in its parent.
13351 */
13352 inum = dap->da_newinum;
13353 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
13354 panic("flush_pagedep_deps: lost inode1");
13355 /*
13356 * Wait for any pending journal adds to complete so we don't
13357 * cause rollbacks while syncing.
13358 */
13359 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
13360 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
13361 == DEPCOMPLETE) {
13362 jwait(&inoref->if_list, MNT_WAIT);
13363 goto restart;
13364 }
13365 }
13366 if (dap->da_state & MKDIR_BODY) {
13367 FREE_LOCK(ump);
13368 error = get_parent_vp(pvp, mp, inum, locked_bp,
13369 diraddhdp, &unfinished, &vp);
13370 if (error != 0)
13371 break;
13372 error = flush_newblk_dep(vp, mp, 0);
13373 /*
13374 * If we still have the dependency we might need to
13375 * update the vnode to sync the new link count to
13376 * disk.
13377 */
13378 if (error == 0 && dap == LIST_FIRST(diraddhdp))
13379 error = ffs_update(vp, 1);
13380 vput(vp);
13381 if (error != 0)
13382 break;
13383 ACQUIRE_LOCK(ump);
13384 /*
13385 * If that cleared dependencies, go on to next.
13386 */
13387 if (dap != LIST_FIRST(diraddhdp))
13388 continue;
13389 if (dap->da_state & MKDIR_BODY) {
13390 inodedep_lookup(UFSTOVFS(ump), inum, 0,
13391 &inodedep);
13392 panic("flush_pagedep_deps: MKDIR_BODY "
13393 "inodedep %p dap %p vp %p",
13394 inodedep, dap, vp);
13395 }
13396 }
13397 /*
13398 * Flush the inode on which the directory entry depends.
13399 * Having accounted for MKDIR_PARENT and MKDIR_BODY above,
13400 * the only remaining dependency is that the updated inode
13401 * count must get pushed to disk. The inode has already
13402 * been pushed into its inode buffer (via VOP_UPDATE) at
13403 * the time of the reference count change. So we need only
13404 * locate that buffer, ensure that there will be no rollback
13405 * caused by a bitmap dependency, then write the inode buffer.
13406 */
13407 retry:
13408 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
13409 panic("flush_pagedep_deps: lost inode");
13410 /*
13411 * If the inode still has bitmap dependencies,
13412 * push them to disk.
13413 */
13414 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) {
13415 bp = inodedep->id_bmsafemap->sm_buf;
13416 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
13417 if (bp == NULL)
13418 goto retry;
13419 FREE_LOCK(ump);
13420 if ((error = bwrite(bp)) != 0)
13421 break;
13422 ACQUIRE_LOCK(ump);
13423 if (dap != LIST_FIRST(diraddhdp))
13424 continue;
13425 }
13426 /*
13427 * If the inode is still sitting in a buffer waiting
13428 * to be written or waiting for the link count to be
13429 * adjusted update it here to flush it to disk.
13430 */
13431 if (dap == LIST_FIRST(diraddhdp)) {
13432 FREE_LOCK(ump);
13433 error = get_parent_vp(pvp, mp, inum, locked_bp,
13434 diraddhdp, &unfinished, &vp);
13435 if (error != 0)
13436 break;
13437 error = ffs_update(vp, 1);
13438 vput(vp);
13439 if (error)
13440 break;
13441 ACQUIRE_LOCK(ump);
13442 }
13443 /*
13444 * If we have failed to get rid of all the dependencies
13445 * then something is seriously wrong.
13446 */
13447 if (dap == LIST_FIRST(diraddhdp)) {
13448 inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep);
13449 panic("flush_pagedep_deps: failed to flush "
13450 "inodedep %p ino %ju dap %p",
13451 inodedep, (uintmax_t)inum, dap);
13452 }
13453 }
13454 if (error)
13455 ACQUIRE_LOCK(ump);
13456 while ((dap = LIST_FIRST(&unfinished)) != NULL) {
13457 LIST_REMOVE(dap, da_pdlist);
13458 LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist);
13459 }
13460 return (error);
13461 }
13462
13463 /*
13464 * A large burst of file addition or deletion activity can drive the
13465 * memory load excessively high. First attempt to slow things down
13466 * using the techniques below. If that fails, this routine requests
13467 * the offending operations to fall back to running synchronously
13468 * until the memory load returns to a reasonable level.
13469 */
13470 int
13471 softdep_slowdown(struct vnode *vp)
13472 {
13473 struct ufsmount *ump;
13474 int jlow;
13475 int max_softdeps_hard;
13476
13477 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
13478 ("softdep_slowdown called on non-softdep filesystem"));
13479 ump = VFSTOUFS(vp->v_mount);
13480 ACQUIRE_LOCK(ump);
13481 jlow = 0;
13482 /*
13483 * Check for journal space if needed.
13484 */
13485 if (DOINGSUJ(vp)) {
13486 if (journal_space(ump, 0) == 0)
13487 jlow = 1;
13488 }
13489 /*
13490 * If the system is under its limits and our filesystem is
13491 * not responsible for more than our share of the usage and
13492 * we are not low on journal space, then no need to slow down.
13493 */
13494 max_softdeps_hard = max_softdeps * 11 / 10;
13495 if (dep_current[D_DIRREM] < max_softdeps_hard / 2 &&
13496 dep_current[D_INODEDEP] < max_softdeps_hard &&
13497 dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 &&
13498 dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 &&
13499 ump->softdep_curdeps[D_DIRREM] <
13500 (max_softdeps_hard / 2) / stat_flush_threads &&
13501 ump->softdep_curdeps[D_INODEDEP] <
13502 max_softdeps_hard / stat_flush_threads &&
13503 ump->softdep_curdeps[D_INDIRDEP] <
13504 (max_softdeps_hard / 1000) / stat_flush_threads &&
13505 ump->softdep_curdeps[D_FREEBLKS] <
13506 max_softdeps_hard / stat_flush_threads) {
13507 FREE_LOCK(ump);
13508 return (0);
13509 }
13510 /*
13511 * If the journal is low or our filesystem is over its limit
13512 * then speedup the cleanup.
13513 */
13514 if (ump->softdep_curdeps[D_INDIRDEP] <
13515 (max_softdeps_hard / 1000) / stat_flush_threads || jlow)
13516 softdep_speedup(ump);
13517 stat_sync_limit_hit += 1;
13518 FREE_LOCK(ump);
13519 /*
13520 * We only slow down the rate at which new dependencies are
13521 * generated if we are not using journaling. With journaling,
13522 * the cleanup should always be sufficient to keep things
13523 * under control.
13524 */
13525 if (DOINGSUJ(vp))
13526 return (0);
13527 return (1);
13528 }
13529
13530 static int
13531 softdep_request_cleanup_filter(struct vnode *vp, void *arg __unused)
13532 {
13533 return ((vp->v_iflag & VI_OWEINACT) != 0 && vp->v_usecount == 0 &&
13534 ((vp->v_vflag & VV_NOSYNC) != 0 || VTOI(vp)->i_effnlink == 0));
13535 }
13536
13537 static void
13538 softdep_request_cleanup_inactivate(struct mount *mp)
13539 {
13540 struct vnode *vp, *mvp;
13541 int error;
13542
13543 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, softdep_request_cleanup_filter,
13544 NULL) {
13545 vholdl(vp);
13546 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
13547 VI_LOCK(vp);
13548 if (IS_UFS(vp) && vp->v_usecount == 0) {
13549 while ((vp->v_iflag & VI_OWEINACT) != 0) {
13550 error = vinactive(vp);
13551 if (error != 0 && error != ERELOOKUP)
13552 break;
13553 }
13554 atomic_add_int(&stat_delayed_inact, 1);
13555 }
13556 VOP_UNLOCK(vp);
13557 vdropl(vp);
13558 }
13559 }
13560
13561 /*
13562 * Called by the allocation routines when they are about to fail
13563 * in the hope that we can free up the requested resource (inodes
13564 * or disk space).
13565 *
13566 * First check to see if the work list has anything on it. If it has,
13567 * clean up entries until we successfully free the requested resource.
13568 * Because this process holds inodes locked, we cannot handle any remove
13569 * requests that might block on a locked inode as that could lead to
13570 * deadlock. If the worklist yields none of the requested resource,
13571 * start syncing out vnodes to free up the needed space.
13572 */
13573 int
13574 softdep_request_cleanup(
13575 struct fs *fs,
13576 struct vnode *vp,
13577 struct ucred *cred,
13578 int resource)
13579 {
13580 struct ufsmount *ump;
13581 struct mount *mp;
13582 long starttime;
13583 ufs2_daddr_t needed;
13584 int error, failed_vnode;
13585
13586 /*
13587 * If we are being called because of a process doing a
13588 * copy-on-write, then it is not safe to process any
13589 * worklist items as we will recurse into the copyonwrite
13590 * routine. This will result in an incoherent snapshot.
13591 * If the vnode that we hold is a snapshot, we must avoid
13592 * handling other resources that could cause deadlock.
13593 */
13594 if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp)))
13595 return (0);
13596
13597 if (resource == FLUSH_BLOCKS_WAIT)
13598 stat_cleanup_blkrequests += 1;
13599 else
13600 stat_cleanup_inorequests += 1;
13601
13602 mp = vp->v_mount;
13603 ump = VFSTOUFS(mp);
13604 mtx_assert(UFS_MTX(ump), MA_OWNED);
13605 UFS_UNLOCK(ump);
13606 error = ffs_update(vp, 1);
13607 if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) {
13608 UFS_LOCK(ump);
13609 return (0);
13610 }
13611 /*
13612 * If we are in need of resources, start by cleaning up
13613 * any block removals associated with our inode.
13614 */
13615 ACQUIRE_LOCK(ump);
13616 process_removes(vp);
13617 process_truncates(vp);
13618 FREE_LOCK(ump);
13619 /*
13620 * Now clean up at least as many resources as we will need.
13621 *
13622 * When requested to clean up inodes, the number that are needed
13623 * is set by the number of simultaneous writers (mnt_writeopcount)
13624 * plus a bit of slop (2) in case some more writers show up while
13625 * we are cleaning.
13626 *
13627 * When requested to free up space, the amount of space that
13628 * we need is enough blocks to allocate a full-sized segment
13629 * (fs_contigsumsize). The number of such segments that will
13630 * be needed is set by the number of simultaneous writers
13631 * (mnt_writeopcount) plus a bit of slop (2) in case some more
13632 * writers show up while we are cleaning.
13633 *
13634 * Additionally, if we are unpriviledged and allocating space,
13635 * we need to ensure that we clean up enough blocks to get the
13636 * needed number of blocks over the threshold of the minimum
13637 * number of blocks required to be kept free by the filesystem
13638 * (fs_minfree).
13639 */
13640 if (resource == FLUSH_INODES_WAIT) {
13641 needed = vfs_mount_fetch_counter(vp->v_mount,
13642 MNT_COUNT_WRITEOPCOUNT) + 2;
13643 } else if (resource == FLUSH_BLOCKS_WAIT) {
13644 needed = (vfs_mount_fetch_counter(vp->v_mount,
13645 MNT_COUNT_WRITEOPCOUNT) + 2) * fs->fs_contigsumsize;
13646 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE))
13647 needed += fragstoblks(fs,
13648 roundup((fs->fs_dsize * fs->fs_minfree / 100) -
13649 fs->fs_cstotal.cs_nffree, fs->fs_frag));
13650 } else {
13651 printf("softdep_request_cleanup: Unknown resource type %d\n",
13652 resource);
13653 UFS_LOCK(ump);
13654 return (0);
13655 }
13656 starttime = time_second;
13657 retry:
13658 if (resource == FLUSH_BLOCKS_WAIT &&
13659 fs->fs_cstotal.cs_nbfree <= needed)
13660 softdep_send_speedup(ump, needed * fs->fs_bsize,
13661 BIO_SPEEDUP_TRIM);
13662 if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 &&
13663 fs->fs_cstotal.cs_nbfree <= needed) ||
13664 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13665 fs->fs_cstotal.cs_nifree <= needed)) {
13666 ACQUIRE_LOCK(ump);
13667 if (ump->softdep_on_worklist > 0 &&
13668 process_worklist_item(UFSTOVFS(ump),
13669 ump->softdep_on_worklist, LK_NOWAIT) != 0)
13670 stat_worklist_push += 1;
13671 FREE_LOCK(ump);
13672 }
13673
13674 /*
13675 * Check that there are vnodes pending inactivation. As they
13676 * have been unlinked, inactivating them will free up their
13677 * inodes.
13678 */
13679 ACQUIRE_LOCK(ump);
13680 if (resource == FLUSH_INODES_WAIT &&
13681 fs->fs_cstotal.cs_nifree <= needed &&
13682 fs->fs_pendinginodes <= needed) {
13683 if ((ump->um_softdep->sd_flags & FLUSH_DI_ACTIVE) == 0) {
13684 ump->um_softdep->sd_flags |= FLUSH_DI_ACTIVE;
13685 FREE_LOCK(ump);
13686 softdep_request_cleanup_inactivate(mp);
13687 ACQUIRE_LOCK(ump);
13688 ump->um_softdep->sd_flags &= ~FLUSH_DI_ACTIVE;
13689 wakeup(&ump->um_softdep->sd_flags);
13690 } else {
13691 while ((ump->um_softdep->sd_flags &
13692 FLUSH_DI_ACTIVE) != 0) {
13693 msleep(&ump->um_softdep->sd_flags,
13694 LOCK_PTR(ump), PVM, "ffsvina", hz);
13695 }
13696 }
13697 }
13698 FREE_LOCK(ump);
13699
13700 /*
13701 * If we still need resources and there are no more worklist
13702 * entries to process to obtain them, we have to start flushing
13703 * the dirty vnodes to force the release of additional requests
13704 * to the worklist that we can then process to reap addition
13705 * resources. We walk the vnodes associated with the mount point
13706 * until we get the needed worklist requests that we can reap.
13707 *
13708 * If there are several threads all needing to clean the same
13709 * mount point, only one is allowed to walk the mount list.
13710 * When several threads all try to walk the same mount list,
13711 * they end up competing with each other and often end up in
13712 * livelock. This approach ensures that forward progress is
13713 * made at the cost of occational ENOSPC errors being returned
13714 * that might otherwise have been avoided.
13715 */
13716 error = 1;
13717 if ((resource == FLUSH_BLOCKS_WAIT &&
13718 fs->fs_cstotal.cs_nbfree <= needed) ||
13719 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13720 fs->fs_cstotal.cs_nifree <= needed)) {
13721 ACQUIRE_LOCK(ump);
13722 if ((ump->um_softdep->sd_flags & FLUSH_RC_ACTIVE) == 0) {
13723 ump->um_softdep->sd_flags |= FLUSH_RC_ACTIVE;
13724 FREE_LOCK(ump);
13725 failed_vnode = softdep_request_cleanup_flush(mp, ump);
13726 ACQUIRE_LOCK(ump);
13727 ump->um_softdep->sd_flags &= ~FLUSH_RC_ACTIVE;
13728 wakeup(&ump->um_softdep->sd_flags);
13729 FREE_LOCK(ump);
13730 if (ump->softdep_on_worklist > 0) {
13731 stat_cleanup_retries += 1;
13732 if (!failed_vnode)
13733 goto retry;
13734 }
13735 } else {
13736 while ((ump->um_softdep->sd_flags &
13737 FLUSH_RC_ACTIVE) != 0) {
13738 msleep(&ump->um_softdep->sd_flags,
13739 LOCK_PTR(ump), PVM, "ffsrca", hz);
13740 }
13741 FREE_LOCK(ump);
13742 error = 0;
13743 }
13744 stat_cleanup_failures += 1;
13745 }
13746 if (time_second - starttime > stat_cleanup_high_delay)
13747 stat_cleanup_high_delay = time_second - starttime;
13748 UFS_LOCK(ump);
13749 return (error);
13750 }
13751
13752 /*
13753 * Scan the vnodes for the specified mount point flushing out any
13754 * vnodes that can be locked without waiting. Finally, try to flush
13755 * the device associated with the mount point if it can be locked
13756 * without waiting.
13757 *
13758 * We return 0 if we were able to lock every vnode in our scan.
13759 * If we had to skip one or more vnodes, we return 1.
13760 */
13761 static int
13762 softdep_request_cleanup_flush(struct mount *mp, struct ufsmount *ump)
13763 {
13764 struct thread *td;
13765 struct vnode *lvp, *mvp;
13766 int failed_vnode;
13767
13768 failed_vnode = 0;
13769 td = curthread;
13770 MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) {
13771 if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) {
13772 VI_UNLOCK(lvp);
13773 continue;
13774 }
13775 if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT) != 0) {
13776 failed_vnode = 1;
13777 continue;
13778 }
13779 if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */
13780 vput(lvp);
13781 continue;
13782 }
13783 (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0);
13784 vput(lvp);
13785 }
13786 lvp = ump->um_devvp;
13787 if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
13788 VOP_FSYNC(lvp, MNT_NOWAIT, td);
13789 VOP_UNLOCK(lvp);
13790 }
13791 return (failed_vnode);
13792 }
13793
13794 static bool
13795 softdep_excess_items(struct ufsmount *ump, int item)
13796 {
13797
13798 KASSERT(item >= 0 && item < D_LAST, ("item %d", item));
13799 return (dep_current[item] > max_softdeps &&
13800 ump->softdep_curdeps[item] > max_softdeps /
13801 stat_flush_threads);
13802 }
13803
13804 static void
13805 schedule_cleanup(struct mount *mp)
13806 {
13807 struct ufsmount *ump;
13808 struct thread *td;
13809
13810 ump = VFSTOUFS(mp);
13811 LOCK_OWNED(ump);
13812 FREE_LOCK(ump);
13813 td = curthread;
13814 if ((td->td_pflags & TDP_KTHREAD) != 0 &&
13815 (td->td_proc->p_flag2 & P2_AST_SU) == 0) {
13816 /*
13817 * No ast is delivered to kernel threads, so nobody
13818 * would deref the mp. Some kernel threads
13819 * explicitly check for AST, e.g. NFS daemon does
13820 * this in the serving loop.
13821 */
13822 return;
13823 }
13824 if (td->td_su != NULL)
13825 vfs_rel(td->td_su);
13826 vfs_ref(mp);
13827 td->td_su = mp;
13828 ast_sched(td, TDA_UFS);
13829 }
13830
13831 static void
13832 softdep_ast_cleanup_proc(struct thread *td, int ast __unused)
13833 {
13834 struct mount *mp;
13835 struct ufsmount *ump;
13836 int error;
13837 bool req;
13838
13839 while ((mp = td->td_su) != NULL) {
13840 td->td_su = NULL;
13841 error = vfs_busy(mp, MBF_NOWAIT);
13842 vfs_rel(mp);
13843 if (error != 0)
13844 return;
13845 if (ffs_own_mount(mp) && MOUNTEDSOFTDEP(mp)) {
13846 ump = VFSTOUFS(mp);
13847 for (;;) {
13848 req = false;
13849 ACQUIRE_LOCK(ump);
13850 if (softdep_excess_items(ump, D_INODEDEP)) {
13851 req = true;
13852 request_cleanup(mp, FLUSH_INODES);
13853 }
13854 if (softdep_excess_items(ump, D_DIRREM)) {
13855 req = true;
13856 request_cleanup(mp, FLUSH_BLOCKS);
13857 }
13858 FREE_LOCK(ump);
13859 if (softdep_excess_items(ump, D_NEWBLK) ||
13860 softdep_excess_items(ump, D_ALLOCDIRECT) ||
13861 softdep_excess_items(ump, D_ALLOCINDIR)) {
13862 error = vn_start_write(NULL, &mp,
13863 V_WAIT);
13864 if (error == 0) {
13865 req = true;
13866 VFS_SYNC(mp, MNT_WAIT);
13867 vn_finished_write(mp);
13868 }
13869 }
13870 if ((td->td_pflags & TDP_KTHREAD) != 0 || !req)
13871 break;
13872 }
13873 }
13874 vfs_unbusy(mp);
13875 }
13876 if ((mp = td->td_su) != NULL) {
13877 td->td_su = NULL;
13878 vfs_rel(mp);
13879 }
13880 }
13881
13882 /*
13883 * If memory utilization has gotten too high, deliberately slow things
13884 * down and speed up the I/O processing.
13885 */
13886 static int
13887 request_cleanup(struct mount *mp, int resource)
13888 {
13889 struct thread *td = curthread;
13890 struct ufsmount *ump;
13891
13892 ump = VFSTOUFS(mp);
13893 LOCK_OWNED(ump);
13894 /*
13895 * We never hold up the filesystem syncer or buf daemon.
13896 */
13897 if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF))
13898 return (0);
13899 /*
13900 * First check to see if the work list has gotten backlogged.
13901 * If it has, co-opt this process to help clean up two entries.
13902 * Because this process may hold inodes locked, we cannot
13903 * handle any remove requests that might block on a locked
13904 * inode as that could lead to deadlock. We set TDP_SOFTDEP
13905 * to avoid recursively processing the worklist.
13906 */
13907 if (ump->softdep_on_worklist > max_softdeps / 10) {
13908 td->td_pflags |= TDP_SOFTDEP;
13909 process_worklist_item(mp, 2, LK_NOWAIT);
13910 td->td_pflags &= ~TDP_SOFTDEP;
13911 stat_worklist_push += 2;
13912 return(1);
13913 }
13914 /*
13915 * Next, we attempt to speed up the syncer process. If that
13916 * is successful, then we allow the process to continue.
13917 */
13918 if (softdep_speedup(ump) &&
13919 resource != FLUSH_BLOCKS_WAIT &&
13920 resource != FLUSH_INODES_WAIT)
13921 return(0);
13922 /*
13923 * If we are resource constrained on inode dependencies, try
13924 * flushing some dirty inodes. Otherwise, we are constrained
13925 * by file deletions, so try accelerating flushes of directories
13926 * with removal dependencies. We would like to do the cleanup
13927 * here, but we probably hold an inode locked at this point and
13928 * that might deadlock against one that we try to clean. So,
13929 * the best that we can do is request the syncer daemon to do
13930 * the cleanup for us.
13931 */
13932 switch (resource) {
13933 case FLUSH_INODES:
13934 case FLUSH_INODES_WAIT:
13935 ACQUIRE_GBLLOCK(&lk);
13936 stat_ino_limit_push += 1;
13937 req_clear_inodedeps += 1;
13938 FREE_GBLLOCK(&lk);
13939 stat_countp = &stat_ino_limit_hit;
13940 break;
13941
13942 case FLUSH_BLOCKS:
13943 case FLUSH_BLOCKS_WAIT:
13944 ACQUIRE_GBLLOCK(&lk);
13945 stat_blk_limit_push += 1;
13946 req_clear_remove += 1;
13947 FREE_GBLLOCK(&lk);
13948 stat_countp = &stat_blk_limit_hit;
13949 break;
13950
13951 default:
13952 panic("request_cleanup: unknown type");
13953 }
13954 /*
13955 * Hopefully the syncer daemon will catch up and awaken us.
13956 * We wait at most tickdelay before proceeding in any case.
13957 */
13958 ACQUIRE_GBLLOCK(&lk);
13959 FREE_LOCK(ump);
13960 proc_waiting += 1;
13961 if (callout_pending(&softdep_callout) == FALSE)
13962 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13963 pause_timer, 0);
13964
13965 if ((td->td_pflags & TDP_KTHREAD) == 0)
13966 msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0);
13967 proc_waiting -= 1;
13968 FREE_GBLLOCK(&lk);
13969 ACQUIRE_LOCK(ump);
13970 return (1);
13971 }
13972
13973 /*
13974 * Awaken processes pausing in request_cleanup and clear proc_waiting
13975 * to indicate that there is no longer a timer running. Pause_timer
13976 * will be called with the global softdep mutex (&lk) locked.
13977 */
13978 static void
13979 pause_timer(void *arg)
13980 {
13981
13982 GBLLOCK_OWNED(&lk);
13983 /*
13984 * The callout_ API has acquired mtx and will hold it around this
13985 * function call.
13986 */
13987 *stat_countp += proc_waiting;
13988 wakeup(&proc_waiting);
13989 }
13990
13991 /*
13992 * If requested, try removing inode or removal dependencies.
13993 */
13994 static void
13995 check_clear_deps(struct mount *mp)
13996 {
13997 struct ufsmount *ump;
13998 bool suj_susp;
13999
14000 /*
14001 * Tell the lower layers that any TRIM or WRITE transactions that have
14002 * been delayed for performance reasons should proceed to help alleviate
14003 * the shortage faster. The race between checking req_* and the softdep
14004 * mutex (lk) is fine since this is an advisory operation that at most
14005 * causes deferred work to be done sooner.
14006 */
14007 ump = VFSTOUFS(mp);
14008 suj_susp = ump->um_softdep->sd_jblocks != NULL &&
14009 ump->softdep_jblocks->jb_suspended;
14010 if (req_clear_remove || req_clear_inodedeps || suj_susp) {
14011 FREE_LOCK(ump);
14012 softdep_send_speedup(ump, 0, BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE);
14013 ACQUIRE_LOCK(ump);
14014 }
14015
14016 /*
14017 * If we are suspended, it may be because of our using
14018 * too many inodedeps, so help clear them out.
14019 */
14020 if (suj_susp)
14021 clear_inodedeps(mp);
14022
14023 /*
14024 * General requests for cleanup of backed up dependencies
14025 */
14026 ACQUIRE_GBLLOCK(&lk);
14027 if (req_clear_inodedeps) {
14028 req_clear_inodedeps -= 1;
14029 FREE_GBLLOCK(&lk);
14030 clear_inodedeps(mp);
14031 ACQUIRE_GBLLOCK(&lk);
14032 wakeup(&proc_waiting);
14033 }
14034 if (req_clear_remove) {
14035 req_clear_remove -= 1;
14036 FREE_GBLLOCK(&lk);
14037 clear_remove(mp);
14038 ACQUIRE_GBLLOCK(&lk);
14039 wakeup(&proc_waiting);
14040 }
14041 FREE_GBLLOCK(&lk);
14042 }
14043
14044 /*
14045 * Flush out a directory with at least one removal dependency in an effort to
14046 * reduce the number of dirrem, freefile, and freeblks dependency structures.
14047 */
14048 static void
14049 clear_remove(struct mount *mp)
14050 {
14051 struct pagedep_hashhead *pagedephd;
14052 struct pagedep *pagedep;
14053 struct ufsmount *ump;
14054 struct vnode *vp;
14055 struct bufobj *bo;
14056 int error, cnt;
14057 ino_t ino;
14058
14059 ump = VFSTOUFS(mp);
14060 LOCK_OWNED(ump);
14061
14062 for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) {
14063 pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++];
14064 if (ump->pagedep_nextclean > ump->pagedep_hash_size)
14065 ump->pagedep_nextclean = 0;
14066 LIST_FOREACH(pagedep, pagedephd, pd_hash) {
14067 if (LIST_EMPTY(&pagedep->pd_dirremhd))
14068 continue;
14069 ino = pagedep->pd_ino;
14070 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
14071 continue;
14072 FREE_LOCK(ump);
14073
14074 /*
14075 * Let unmount clear deps
14076 */
14077 error = vfs_busy(mp, MBF_NOWAIT);
14078 if (error != 0)
14079 goto finish_write;
14080 error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
14081 FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP);
14082 vfs_unbusy(mp);
14083 if (error != 0) {
14084 softdep_error("clear_remove: vget", error);
14085 goto finish_write;
14086 }
14087 MPASS(VTOI(vp)->i_mode != 0);
14088 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
14089 softdep_error("clear_remove: fsync", error);
14090 bo = &vp->v_bufobj;
14091 BO_LOCK(bo);
14092 drain_output(vp);
14093 BO_UNLOCK(bo);
14094 vput(vp);
14095 finish_write:
14096 vn_finished_write(mp);
14097 ACQUIRE_LOCK(ump);
14098 return;
14099 }
14100 }
14101 }
14102
14103 /*
14104 * Clear out a block of dirty inodes in an effort to reduce
14105 * the number of inodedep dependency structures.
14106 */
14107 static void
14108 clear_inodedeps(struct mount *mp)
14109 {
14110 struct inodedep_hashhead *inodedephd;
14111 struct inodedep *inodedep;
14112 struct ufsmount *ump;
14113 struct vnode *vp;
14114 struct fs *fs;
14115 int error, cnt;
14116 ino_t firstino, lastino, ino;
14117
14118 ump = VFSTOUFS(mp);
14119 fs = ump->um_fs;
14120 LOCK_OWNED(ump);
14121 /*
14122 * Pick a random inode dependency to be cleared.
14123 * We will then gather up all the inodes in its block
14124 * that have dependencies and flush them out.
14125 */
14126 for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) {
14127 inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++];
14128 if (ump->inodedep_nextclean > ump->inodedep_hash_size)
14129 ump->inodedep_nextclean = 0;
14130 if ((inodedep = LIST_FIRST(inodedephd)) != NULL)
14131 break;
14132 }
14133 if (inodedep == NULL)
14134 return;
14135 /*
14136 * Find the last inode in the block with dependencies.
14137 */
14138 firstino = rounddown2(inodedep->id_ino, INOPB(fs));
14139 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
14140 if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
14141 break;
14142 /*
14143 * Asynchronously push all but the last inode with dependencies.
14144 * Synchronously push the last inode with dependencies to ensure
14145 * that the inode block gets written to free up the inodedeps.
14146 */
14147 for (ino = firstino; ino <= lastino; ino++) {
14148 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
14149 continue;
14150 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
14151 continue;
14152 FREE_LOCK(ump);
14153 error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */
14154 if (error != 0) {
14155 vn_finished_write(mp);
14156 ACQUIRE_LOCK(ump);
14157 return;
14158 }
14159 if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
14160 FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP)) != 0) {
14161 softdep_error("clear_inodedeps: vget", error);
14162 vfs_unbusy(mp);
14163 vn_finished_write(mp);
14164 ACQUIRE_LOCK(ump);
14165 return;
14166 }
14167 vfs_unbusy(mp);
14168 if (VTOI(vp)->i_mode == 0) {
14169 vgone(vp);
14170 } else if (ino == lastino) {
14171 do {
14172 error = ffs_syncvnode(vp, MNT_WAIT, 0);
14173 } while (error == ERELOOKUP);
14174 if (error != 0)
14175 softdep_error("clear_inodedeps: fsync1", error);
14176 } else {
14177 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
14178 softdep_error("clear_inodedeps: fsync2", error);
14179 BO_LOCK(&vp->v_bufobj);
14180 drain_output(vp);
14181 BO_UNLOCK(&vp->v_bufobj);
14182 }
14183 vput(vp);
14184 vn_finished_write(mp);
14185 ACQUIRE_LOCK(ump);
14186 }
14187 }
14188
14189 void
14190 softdep_buf_append(struct buf *bp, struct workhead *wkhd)
14191 {
14192 struct worklist *wk;
14193 struct ufsmount *ump;
14194
14195 if ((wk = LIST_FIRST(wkhd)) == NULL)
14196 return;
14197 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
14198 ("softdep_buf_append called on non-softdep filesystem"));
14199 ump = VFSTOUFS(wk->wk_mp);
14200 ACQUIRE_LOCK(ump);
14201 while ((wk = LIST_FIRST(wkhd)) != NULL) {
14202 WORKLIST_REMOVE(wk);
14203 WORKLIST_INSERT(&bp->b_dep, wk);
14204 }
14205 FREE_LOCK(ump);
14206
14207 }
14208
14209 void
14210 softdep_inode_append(
14211 struct inode *ip,
14212 struct ucred *cred,
14213 struct workhead *wkhd)
14214 {
14215 struct buf *bp;
14216 struct fs *fs;
14217 struct ufsmount *ump;
14218 int error;
14219
14220 ump = ITOUMP(ip);
14221 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
14222 ("softdep_inode_append called on non-softdep filesystem"));
14223 fs = ump->um_fs;
14224 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
14225 (int)fs->fs_bsize, cred, &bp);
14226 if (error) {
14227 bqrelse(bp);
14228 softdep_freework(wkhd);
14229 return;
14230 }
14231 softdep_buf_append(bp, wkhd);
14232 bqrelse(bp);
14233 }
14234
14235 void
14236 softdep_freework(struct workhead *wkhd)
14237 {
14238 struct worklist *wk;
14239 struct ufsmount *ump;
14240
14241 if ((wk = LIST_FIRST(wkhd)) == NULL)
14242 return;
14243 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
14244 ("softdep_freework called on non-softdep filesystem"));
14245 ump = VFSTOUFS(wk->wk_mp);
14246 ACQUIRE_LOCK(ump);
14247 handle_jwork(wkhd);
14248 FREE_LOCK(ump);
14249 }
14250
14251 static struct ufsmount *
14252 softdep_bp_to_mp(struct buf *bp)
14253 {
14254 struct mount *mp;
14255 struct vnode *vp;
14256
14257 if (LIST_EMPTY(&bp->b_dep))
14258 return (NULL);
14259 vp = bp->b_vp;
14260 KASSERT(vp != NULL,
14261 ("%s, buffer with dependencies lacks vnode", __func__));
14262
14263 /*
14264 * The ump mount point is stable after we get a correct
14265 * pointer, since bp is locked and this prevents unmount from
14266 * proceeding. But to get to it, we cannot dereference bp->b_dep
14267 * head wk_mp, because we do not yet own SU ump lock and
14268 * workitem might be freed while dereferenced.
14269 */
14270 retry:
14271 switch (vp->v_type) {
14272 case VCHR:
14273 VI_LOCK(vp);
14274 mp = vp->v_type == VCHR ? vp->v_rdev->si_mountpt : NULL;
14275 VI_UNLOCK(vp);
14276 if (mp == NULL)
14277 goto retry;
14278 break;
14279 case VREG:
14280 case VDIR:
14281 case VLNK:
14282 case VFIFO:
14283 case VSOCK:
14284 mp = vp->v_mount;
14285 break;
14286 case VBLK:
14287 vn_printf(vp, "softdep_bp_to_mp: unexpected block device\n");
14288 /* FALLTHROUGH */
14289 case VNON:
14290 case VBAD:
14291 case VMARKER:
14292 mp = NULL;
14293 break;
14294 default:
14295 vn_printf(vp, "unknown vnode type");
14296 mp = NULL;
14297 break;
14298 }
14299 return (VFSTOUFS(mp));
14300 }
14301
14302 /*
14303 * Function to determine if the buffer has outstanding dependencies
14304 * that will cause a roll-back if the buffer is written. If wantcount
14305 * is set, return number of dependencies, otherwise just yes or no.
14306 */
14307 static int
14308 softdep_count_dependencies(struct buf *bp, int wantcount)
14309 {
14310 struct worklist *wk;
14311 struct ufsmount *ump;
14312 struct bmsafemap *bmsafemap;
14313 struct freework *freework;
14314 struct inodedep *inodedep;
14315 struct indirdep *indirdep;
14316 struct freeblks *freeblks;
14317 struct allocindir *aip;
14318 struct pagedep *pagedep;
14319 struct dirrem *dirrem;
14320 struct newblk *newblk;
14321 struct mkdir *mkdir;
14322 struct diradd *dap;
14323 int i, retval;
14324
14325 ump = softdep_bp_to_mp(bp);
14326 if (ump == NULL)
14327 return (0);
14328 retval = 0;
14329 ACQUIRE_LOCK(ump);
14330 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
14331 switch (wk->wk_type) {
14332 case D_INODEDEP:
14333 inodedep = WK_INODEDEP(wk);
14334 if ((inodedep->id_state & DEPCOMPLETE) == 0) {
14335 /* bitmap allocation dependency */
14336 retval += 1;
14337 if (!wantcount)
14338 goto out;
14339 }
14340 if (TAILQ_FIRST(&inodedep->id_inoupdt)) {
14341 /* direct block pointer dependency */
14342 retval += 1;
14343 if (!wantcount)
14344 goto out;
14345 }
14346 if (TAILQ_FIRST(&inodedep->id_extupdt)) {
14347 /* direct block pointer dependency */
14348 retval += 1;
14349 if (!wantcount)
14350 goto out;
14351 }
14352 if (TAILQ_FIRST(&inodedep->id_inoreflst)) {
14353 /* Add reference dependency. */
14354 retval += 1;
14355 if (!wantcount)
14356 goto out;
14357 }
14358 continue;
14359
14360 case D_INDIRDEP:
14361 indirdep = WK_INDIRDEP(wk);
14362
14363 TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) {
14364 /* indirect truncation dependency */
14365 retval += 1;
14366 if (!wantcount)
14367 goto out;
14368 }
14369
14370 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
14371 /* indirect block pointer dependency */
14372 retval += 1;
14373 if (!wantcount)
14374 goto out;
14375 }
14376 continue;
14377
14378 case D_PAGEDEP:
14379 pagedep = WK_PAGEDEP(wk);
14380 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
14381 if (LIST_FIRST(&dirrem->dm_jremrefhd)) {
14382 /* Journal remove ref dependency. */
14383 retval += 1;
14384 if (!wantcount)
14385 goto out;
14386 }
14387 }
14388 for (i = 0; i < DAHASHSZ; i++) {
14389 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
14390 /* directory entry dependency */
14391 retval += 1;
14392 if (!wantcount)
14393 goto out;
14394 }
14395 }
14396 continue;
14397
14398 case D_BMSAFEMAP:
14399 bmsafemap = WK_BMSAFEMAP(wk);
14400 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) {
14401 /* Add reference dependency. */
14402 retval += 1;
14403 if (!wantcount)
14404 goto out;
14405 }
14406 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) {
14407 /* Allocate block dependency. */
14408 retval += 1;
14409 if (!wantcount)
14410 goto out;
14411 }
14412 continue;
14413
14414 case D_FREEBLKS:
14415 freeblks = WK_FREEBLKS(wk);
14416 if (LIST_FIRST(&freeblks->fb_jblkdephd)) {
14417 /* Freeblk journal dependency. */
14418 retval += 1;
14419 if (!wantcount)
14420 goto out;
14421 }
14422 continue;
14423
14424 case D_ALLOCDIRECT:
14425 case D_ALLOCINDIR:
14426 newblk = WK_NEWBLK(wk);
14427 if (newblk->nb_jnewblk) {
14428 /* Journal allocate dependency. */
14429 retval += 1;
14430 if (!wantcount)
14431 goto out;
14432 }
14433 continue;
14434
14435 case D_MKDIR:
14436 mkdir = WK_MKDIR(wk);
14437 if (mkdir->md_jaddref) {
14438 /* Journal reference dependency. */
14439 retval += 1;
14440 if (!wantcount)
14441 goto out;
14442 }
14443 continue;
14444
14445 case D_FREEWORK:
14446 case D_FREEDEP:
14447 case D_JSEGDEP:
14448 case D_JSEG:
14449 case D_SBDEP:
14450 /* never a dependency on these blocks */
14451 continue;
14452
14453 default:
14454 panic("softdep_count_dependencies: Unexpected type %s",
14455 TYPENAME(wk->wk_type));
14456 /* NOTREACHED */
14457 }
14458 }
14459 out:
14460 FREE_LOCK(ump);
14461 return (retval);
14462 }
14463
14464 /*
14465 * Acquire exclusive access to a buffer.
14466 * Must be called with a locked mtx parameter.
14467 * Return acquired buffer or NULL on failure.
14468 */
14469 static struct buf *
14470 getdirtybuf(struct buf *bp,
14471 struct rwlock *lock,
14472 int waitfor)
14473 {
14474 int error;
14475
14476 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
14477 if (waitfor != MNT_WAIT)
14478 return (NULL);
14479 error = BUF_LOCK(bp,
14480 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock);
14481 /*
14482 * Even if we successfully acquire bp here, we have dropped
14483 * lock, which may violates our guarantee.
14484 */
14485 if (error == 0)
14486 BUF_UNLOCK(bp);
14487 else if (error != ENOLCK)
14488 panic("getdirtybuf: inconsistent lock: %d", error);
14489 rw_wlock(lock);
14490 return (NULL);
14491 }
14492 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
14493 if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) {
14494 rw_wunlock(lock);
14495 BO_LOCK(bp->b_bufobj);
14496 BUF_UNLOCK(bp);
14497 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
14498 bp->b_vflags |= BV_BKGRDWAIT;
14499 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj),
14500 PRIBIO | PDROP, "getbuf", 0);
14501 } else
14502 BO_UNLOCK(bp->b_bufobj);
14503 rw_wlock(lock);
14504 return (NULL);
14505 }
14506 BUF_UNLOCK(bp);
14507 if (waitfor != MNT_WAIT)
14508 return (NULL);
14509 #ifdef DEBUG_VFS_LOCKS
14510 if (bp->b_vp->v_type != VCHR)
14511 ASSERT_BO_WLOCKED(bp->b_bufobj);
14512 #endif
14513 bp->b_vflags |= BV_BKGRDWAIT;
14514 rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0);
14515 return (NULL);
14516 }
14517 if ((bp->b_flags & B_DELWRI) == 0) {
14518 BUF_UNLOCK(bp);
14519 return (NULL);
14520 }
14521 bremfree(bp);
14522 return (bp);
14523 }
14524
14525 /*
14526 * Check if it is safe to suspend the file system now. On entry,
14527 * the vnode interlock for devvp should be held. Return 0 with
14528 * the mount interlock held if the file system can be suspended now,
14529 * otherwise return EAGAIN with the mount interlock held.
14530 */
14531 int
14532 softdep_check_suspend(struct mount *mp,
14533 struct vnode *devvp,
14534 int softdep_depcnt,
14535 int softdep_accdepcnt,
14536 int secondary_writes,
14537 int secondary_accwrites)
14538 {
14539 struct buf *bp;
14540 struct bufobj *bo;
14541 struct ufsmount *ump;
14542 struct inodedep *inodedep;
14543 struct indirdep *indirdep;
14544 struct worklist *wk, *nextwk;
14545 int error, unlinked;
14546
14547 bo = &devvp->v_bufobj;
14548 ASSERT_BO_WLOCKED(bo);
14549
14550 /*
14551 * If we are not running with soft updates, then we need only
14552 * deal with secondary writes as we try to suspend.
14553 */
14554 if (MOUNTEDSOFTDEP(mp) == 0) {
14555 MNT_ILOCK(mp);
14556 while (mp->mnt_secondary_writes != 0) {
14557 BO_UNLOCK(bo);
14558 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
14559 (PUSER - 1) | PDROP, "secwr", 0);
14560 BO_LOCK(bo);
14561 MNT_ILOCK(mp);
14562 }
14563
14564 /*
14565 * Reasons for needing more work before suspend:
14566 * - Dirty buffers on devvp.
14567 * - Secondary writes occurred after start of vnode sync loop
14568 */
14569 error = 0;
14570 if (bo->bo_numoutput > 0 ||
14571 bo->bo_dirty.bv_cnt > 0 ||
14572 secondary_writes != 0 ||
14573 mp->mnt_secondary_writes != 0 ||
14574 secondary_accwrites != mp->mnt_secondary_accwrites)
14575 error = EAGAIN;
14576 BO_UNLOCK(bo);
14577 return (error);
14578 }
14579
14580 /*
14581 * If we are running with soft updates, then we need to coordinate
14582 * with them as we try to suspend.
14583 */
14584 ump = VFSTOUFS(mp);
14585 for (;;) {
14586 if (!TRY_ACQUIRE_LOCK(ump)) {
14587 BO_UNLOCK(bo);
14588 ACQUIRE_LOCK(ump);
14589 FREE_LOCK(ump);
14590 BO_LOCK(bo);
14591 continue;
14592 }
14593 MNT_ILOCK(mp);
14594 if (mp->mnt_secondary_writes != 0) {
14595 FREE_LOCK(ump);
14596 BO_UNLOCK(bo);
14597 msleep(&mp->mnt_secondary_writes,
14598 MNT_MTX(mp),
14599 (PUSER - 1) | PDROP, "secwr", 0);
14600 BO_LOCK(bo);
14601 continue;
14602 }
14603 break;
14604 }
14605
14606 unlinked = 0;
14607 if (MOUNTEDSUJ(mp)) {
14608 for (inodedep = TAILQ_FIRST(&ump->softdep_unlinked);
14609 inodedep != NULL;
14610 inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
14611 if ((inodedep->id_state & (UNLINKED | UNLINKLINKS |
14612 UNLINKONLIST)) != (UNLINKED | UNLINKLINKS |
14613 UNLINKONLIST) ||
14614 !check_inodedep_free(inodedep))
14615 continue;
14616 unlinked++;
14617 }
14618 }
14619
14620 /*
14621 * XXX Check for orphaned indirdep dependency structures.
14622 *
14623 * During forcible unmount after a disk failure there is a
14624 * bug that causes one or more indirdep dependency structures
14625 * to fail to be deallocated. We check for them here and clean
14626 * them up so that the unmount can succeed.
14627 */
14628 if ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0 && ump->softdep_deps > 0 &&
14629 ump->softdep_deps == ump->softdep_curdeps[D_INDIRDEP]) {
14630 LIST_FOREACH_SAFE(wk, &ump->softdep_alldeps[D_INDIRDEP],
14631 wk_all, nextwk) {
14632 indirdep = WK_INDIRDEP(wk);
14633 if ((indirdep->ir_state & (GOINGAWAY | DEPCOMPLETE)) !=
14634 (GOINGAWAY | DEPCOMPLETE) ||
14635 !TAILQ_EMPTY(&indirdep->ir_trunc) ||
14636 !LIST_EMPTY(&indirdep->ir_completehd) ||
14637 !LIST_EMPTY(&indirdep->ir_writehd) ||
14638 !LIST_EMPTY(&indirdep->ir_donehd) ||
14639 !LIST_EMPTY(&indirdep->ir_deplisthd) ||
14640 indirdep->ir_saveddata != NULL ||
14641 indirdep->ir_savebp == NULL) {
14642 printf("%s: skipping orphaned indirdep %p\n",
14643 __FUNCTION__, indirdep);
14644 continue;
14645 }
14646 printf("%s: freeing orphaned indirdep %p\n",
14647 __FUNCTION__, indirdep);
14648 bp = indirdep->ir_savebp;
14649 indirdep->ir_savebp = NULL;
14650 free_indirdep(indirdep);
14651 FREE_LOCK(ump);
14652 brelse(bp);
14653 while (!TRY_ACQUIRE_LOCK(ump)) {
14654 BO_UNLOCK(bo);
14655 ACQUIRE_LOCK(ump);
14656 FREE_LOCK(ump);
14657 BO_LOCK(bo);
14658 }
14659 }
14660 }
14661
14662 /*
14663 * Reasons for needing more work before suspend:
14664 * - Dirty buffers on devvp.
14665 * - Dependency structures still exist
14666 * - Softdep activity occurred after start of vnode sync loop
14667 * - Secondary writes occurred after start of vnode sync loop
14668 */
14669 error = 0;
14670 if (bo->bo_numoutput > 0 ||
14671 bo->bo_dirty.bv_cnt > 0 ||
14672 softdep_depcnt != unlinked ||
14673 ump->softdep_deps != unlinked ||
14674 softdep_accdepcnt != ump->softdep_accdeps ||
14675 secondary_writes != 0 ||
14676 mp->mnt_secondary_writes != 0 ||
14677 secondary_accwrites != mp->mnt_secondary_accwrites)
14678 error = EAGAIN;
14679 FREE_LOCK(ump);
14680 BO_UNLOCK(bo);
14681 return (error);
14682 }
14683
14684 /*
14685 * Get the number of dependency structures for the file system, both
14686 * the current number and the total number allocated. These will
14687 * later be used to detect that softdep processing has occurred.
14688 */
14689 void
14690 softdep_get_depcounts(struct mount *mp,
14691 int *softdep_depsp,
14692 int *softdep_accdepsp)
14693 {
14694 struct ufsmount *ump;
14695
14696 if (MOUNTEDSOFTDEP(mp) == 0) {
14697 *softdep_depsp = 0;
14698 *softdep_accdepsp = 0;
14699 return;
14700 }
14701 ump = VFSTOUFS(mp);
14702 ACQUIRE_LOCK(ump);
14703 *softdep_depsp = ump->softdep_deps;
14704 *softdep_accdepsp = ump->softdep_accdeps;
14705 FREE_LOCK(ump);
14706 }
14707
14708 /*
14709 * Wait for pending output on a vnode to complete.
14710 */
14711 static void
14712 drain_output(struct vnode *vp)
14713 {
14714
14715 ASSERT_VOP_LOCKED(vp, "drain_output");
14716 (void)bufobj_wwait(&vp->v_bufobj, 0, 0);
14717 }
14718
14719 /*
14720 * Called whenever a buffer that is being invalidated or reallocated
14721 * contains dependencies. This should only happen if an I/O error has
14722 * occurred. The routine is called with the buffer locked.
14723 */
14724 static void
14725 softdep_deallocate_dependencies(struct buf *bp)
14726 {
14727
14728 if ((bp->b_ioflags & BIO_ERROR) == 0)
14729 panic("softdep_deallocate_dependencies: dangling deps");
14730 if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL)
14731 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
14732 else
14733 printf("softdep_deallocate_dependencies: "
14734 "got error %d while accessing filesystem\n", bp->b_error);
14735 if (bp->b_error != ENXIO)
14736 panic("softdep_deallocate_dependencies: unrecovered I/O error");
14737 }
14738
14739 /*
14740 * Function to handle asynchronous write errors in the filesystem.
14741 */
14742 static void
14743 softdep_error(char *func, int error)
14744 {
14745
14746 /* XXX should do something better! */
14747 printf("%s: got error %d while accessing filesystem\n", func, error);
14748 }
14749
14750 #ifdef DDB
14751
14752 /* exported to ffs_vfsops.c */
14753 extern void db_print_ffs(struct ufsmount *ump);
14754 void
14755 db_print_ffs(struct ufsmount *ump)
14756 {
14757 db_printf("mp %p (%s) devvp %p\n", ump->um_mountp,
14758 ump->um_mountp->mnt_stat.f_mntonname, ump->um_devvp);
14759 db_printf(" fs %p ", ump->um_fs);
14760
14761 if (ump->um_softdep != NULL) {
14762 db_printf("su_wl %d su_deps %d su_req %d\n",
14763 ump->softdep_on_worklist, ump->softdep_deps,
14764 ump->softdep_req);
14765 } else {
14766 db_printf("su disabled\n");
14767 }
14768 }
14769
14770 static void
14771 worklist_print(struct worklist *wk, int verbose)
14772 {
14773
14774 if (!verbose) {
14775 db_printf("%s: %p state 0x%b\n", TYPENAME(wk->wk_type), wk,
14776 (u_int)wk->wk_state, PRINT_SOFTDEP_FLAGS);
14777 return;
14778 }
14779 db_printf("worklist: %p type %s state 0x%b next %p\n ", wk,
14780 TYPENAME(wk->wk_type), (u_int)wk->wk_state, PRINT_SOFTDEP_FLAGS,
14781 LIST_NEXT(wk, wk_list));
14782 db_print_ffs(VFSTOUFS(wk->wk_mp));
14783 }
14784
14785 static void
14786 inodedep_print(struct inodedep *inodedep, int verbose)
14787 {
14788
14789 worklist_print(&inodedep->id_list, 0);
14790 db_printf(" fs %p ino %jd inoblk %jd delta %jd nlink %jd\n",
14791 inodedep->id_fs,
14792 (intmax_t)inodedep->id_ino,
14793 (intmax_t)fsbtodb(inodedep->id_fs,
14794 ino_to_fsba(inodedep->id_fs, inodedep->id_ino)),
14795 (intmax_t)inodedep->id_nlinkdelta,
14796 (intmax_t)inodedep->id_savednlink);
14797
14798 if (verbose == 0)
14799 return;
14800
14801 db_printf(" bmsafemap %p, mkdiradd %p, inoreflst %p\n",
14802 inodedep->id_bmsafemap,
14803 inodedep->id_mkdiradd,
14804 TAILQ_FIRST(&inodedep->id_inoreflst));
14805 db_printf(" dirremhd %p, pendinghd %p, bufwait %p\n",
14806 LIST_FIRST(&inodedep->id_dirremhd),
14807 LIST_FIRST(&inodedep->id_pendinghd),
14808 LIST_FIRST(&inodedep->id_bufwait));
14809 db_printf(" inowait %p, inoupdt %p, newinoupdt %p\n",
14810 LIST_FIRST(&inodedep->id_inowait),
14811 TAILQ_FIRST(&inodedep->id_inoupdt),
14812 TAILQ_FIRST(&inodedep->id_newinoupdt));
14813 db_printf(" extupdt %p, newextupdt %p, freeblklst %p\n",
14814 TAILQ_FIRST(&inodedep->id_extupdt),
14815 TAILQ_FIRST(&inodedep->id_newextupdt),
14816 TAILQ_FIRST(&inodedep->id_freeblklst));
14817 db_printf(" saveino %p, savedsize %jd, savedextsize %jd\n",
14818 inodedep->id_savedino1,
14819 (intmax_t)inodedep->id_savedsize,
14820 (intmax_t)inodedep->id_savedextsize);
14821 }
14822
14823 static void
14824 newblk_print(struct newblk *nbp)
14825 {
14826
14827 worklist_print(&nbp->nb_list, 0);
14828 db_printf(" newblkno %jd\n", (intmax_t)nbp->nb_newblkno);
14829 db_printf(" jnewblk %p, bmsafemap %p, freefrag %p\n",
14830 &nbp->nb_jnewblk,
14831 &nbp->nb_bmsafemap,
14832 &nbp->nb_freefrag);
14833 db_printf(" indirdeps %p, newdirblk %p, jwork %p\n",
14834 LIST_FIRST(&nbp->nb_indirdeps),
14835 LIST_FIRST(&nbp->nb_newdirblk),
14836 LIST_FIRST(&nbp->nb_jwork));
14837 }
14838
14839 static void
14840 allocdirect_print(struct allocdirect *adp)
14841 {
14842
14843 newblk_print(&adp->ad_block);
14844 db_printf(" oldblkno %jd, oldsize %ld, newsize %ld\n",
14845 adp->ad_oldblkno, adp->ad_oldsize, adp->ad_newsize);
14846 db_printf(" offset %d, inodedep %p\n",
14847 adp->ad_offset, adp->ad_inodedep);
14848 }
14849
14850 static void
14851 allocindir_print(struct allocindir *aip)
14852 {
14853
14854 newblk_print(&aip->ai_block);
14855 db_printf(" oldblkno %jd, lbn %jd\n",
14856 (intmax_t)aip->ai_oldblkno, (intmax_t)aip->ai_lbn);
14857 db_printf(" offset %d, indirdep %p\n",
14858 aip->ai_offset, aip->ai_indirdep);
14859 }
14860
14861 static void
14862 mkdir_print(struct mkdir *mkdir)
14863 {
14864
14865 worklist_print(&mkdir->md_list, 0);
14866 db_printf(" diradd %p, jaddref %p, buf %p\n",
14867 mkdir->md_diradd, mkdir->md_jaddref, mkdir->md_buf);
14868 }
14869
14870 DB_SHOW_COMMAND(sd_inodedep, db_show_sd_inodedep)
14871 {
14872
14873 if (have_addr == 0) {
14874 db_printf("inodedep address required\n");
14875 return;
14876 }
14877 inodedep_print((struct inodedep*)addr, 1);
14878 }
14879
14880 DB_SHOW_COMMAND(sd_allinodedeps, db_show_sd_allinodedeps)
14881 {
14882 struct inodedep_hashhead *inodedephd;
14883 struct inodedep *inodedep;
14884 struct ufsmount *ump;
14885 int cnt;
14886
14887 if (have_addr == 0) {
14888 db_printf("ufsmount address required\n");
14889 return;
14890 }
14891 ump = (struct ufsmount *)addr;
14892 for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) {
14893 inodedephd = &ump->inodedep_hashtbl[cnt];
14894 LIST_FOREACH(inodedep, inodedephd, id_hash) {
14895 inodedep_print(inodedep, 0);
14896 }
14897 }
14898 }
14899
14900 DB_SHOW_COMMAND(sd_worklist, db_show_sd_worklist)
14901 {
14902
14903 if (have_addr == 0) {
14904 db_printf("worklist address required\n");
14905 return;
14906 }
14907 worklist_print((struct worklist *)addr, 1);
14908 }
14909
14910 DB_SHOW_COMMAND(sd_workhead, db_show_sd_workhead)
14911 {
14912 struct worklist *wk;
14913 struct workhead *wkhd;
14914
14915 if (have_addr == 0) {
14916 db_printf("worklist address required "
14917 "(for example value in bp->b_dep)\n");
14918 return;
14919 }
14920 /*
14921 * We often do not have the address of the worklist head but
14922 * instead a pointer to its first entry (e.g., we have the
14923 * contents of bp->b_dep rather than &bp->b_dep). But the back
14924 * pointer of bp->b_dep will point at the head of the list, so
14925 * we cheat and use that instead. If we are in the middle of
14926 * a list we will still get the same result, so nothing
14927 * unexpected will result.
14928 */
14929 wk = (struct worklist *)addr;
14930 if (wk == NULL)
14931 return;
14932 wkhd = (struct workhead *)wk->wk_list.le_prev;
14933 LIST_FOREACH(wk, wkhd, wk_list) {
14934 switch(wk->wk_type) {
14935 case D_INODEDEP:
14936 inodedep_print(WK_INODEDEP(wk), 0);
14937 continue;
14938 case D_ALLOCDIRECT:
14939 allocdirect_print(WK_ALLOCDIRECT(wk));
14940 continue;
14941 case D_ALLOCINDIR:
14942 allocindir_print(WK_ALLOCINDIR(wk));
14943 continue;
14944 case D_MKDIR:
14945 mkdir_print(WK_MKDIR(wk));
14946 continue;
14947 default:
14948 worklist_print(wk, 0);
14949 continue;
14950 }
14951 }
14952 }
14953
14954 DB_SHOW_COMMAND(sd_mkdir, db_show_sd_mkdir)
14955 {
14956 if (have_addr == 0) {
14957 db_printf("mkdir address required\n");
14958 return;
14959 }
14960 mkdir_print((struct mkdir *)addr);
14961 }
14962
14963 DB_SHOW_COMMAND(sd_mkdir_list, db_show_sd_mkdir_list)
14964 {
14965 struct mkdirlist *mkdirlisthd;
14966 struct mkdir *mkdir;
14967
14968 if (have_addr == 0) {
14969 db_printf("mkdir listhead address required\n");
14970 return;
14971 }
14972 mkdirlisthd = (struct mkdirlist *)addr;
14973 LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) {
14974 mkdir_print(mkdir);
14975 if (mkdir->md_diradd != NULL) {
14976 db_printf(" ");
14977 worklist_print(&mkdir->md_diradd->da_list, 0);
14978 }
14979 if (mkdir->md_jaddref != NULL) {
14980 db_printf(" ");
14981 worklist_print(&mkdir->md_jaddref->ja_list, 0);
14982 }
14983 }
14984 }
14985
14986 DB_SHOW_COMMAND(sd_allocdirect, db_show_sd_allocdirect)
14987 {
14988 if (have_addr == 0) {
14989 db_printf("allocdirect address required\n");
14990 return;
14991 }
14992 allocdirect_print((struct allocdirect *)addr);
14993 }
14994
14995 DB_SHOW_COMMAND(sd_allocindir, db_show_sd_allocindir)
14996 {
14997 if (have_addr == 0) {
14998 db_printf("allocindir address required\n");
14999 return;
15000 }
15001 allocindir_print((struct allocindir *)addr);
15002 }
15003
15004 #endif /* DDB */
15005
15006 #endif /* SOFTUPDATES */
Cache object: 93b35717bd270b599aa838f86b771135
|