1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24 */
25
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
33 #include <sys/spa.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zio.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/sa.h>
38 #include <sys/sa_impl.h>
39 #include <sys/callb.h>
40 #include <sys/zfeature.h>
41
42 static int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */
43 static int32_t send_holes_without_birth_time = 1;
44 static uint_t zfs_traverse_indirect_prefetch_limit = 32;
45
46 typedef struct prefetch_data {
47 kmutex_t pd_mtx;
48 kcondvar_t pd_cv;
49 int32_t pd_bytes_fetched;
50 int pd_flags;
51 boolean_t pd_cancel;
52 boolean_t pd_exited;
53 zbookmark_phys_t pd_resume;
54 } prefetch_data_t;
55
56 typedef struct traverse_data {
57 spa_t *td_spa;
58 uint64_t td_objset;
59 blkptr_t *td_rootbp;
60 uint64_t td_min_txg;
61 zbookmark_phys_t *td_resume;
62 int td_flags;
63 prefetch_data_t *td_pfd;
64 boolean_t td_paused;
65 uint64_t td_hole_birth_enabled_txg;
66 blkptr_cb_t *td_func;
67 void *td_arg;
68 boolean_t td_realloc_possible;
69 } traverse_data_t;
70
71 static int traverse_dnode(traverse_data_t *td, const blkptr_t *bp,
72 const dnode_phys_t *dnp, uint64_t objset, uint64_t object);
73 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
74 uint64_t objset, uint64_t object);
75
76 static int
77 traverse_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
78 uint64_t claim_txg)
79 {
80 traverse_data_t *td = arg;
81 zbookmark_phys_t zb;
82
83 if (BP_IS_HOLE(bp))
84 return (0);
85
86 if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(td->td_spa))
87 return (-1);
88
89 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
90 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
91
92 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg);
93
94 return (0);
95 }
96
97 static int
98 traverse_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
99 uint64_t claim_txg)
100 {
101 traverse_data_t *td = arg;
102
103 if (lrc->lrc_txtype == TX_WRITE) {
104 lr_write_t *lr = (lr_write_t *)lrc;
105 blkptr_t *bp = &lr->lr_blkptr;
106 zbookmark_phys_t zb;
107
108 if (BP_IS_HOLE(bp))
109 return (0);
110
111 if (claim_txg == 0 || bp->blk_birth < claim_txg)
112 return (0);
113
114 ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
115 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
116 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
117
118 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL,
119 td->td_arg);
120 }
121 return (0);
122 }
123
124 static void
125 traverse_zil(traverse_data_t *td, zil_header_t *zh)
126 {
127 uint64_t claim_txg = zh->zh_claim_txg;
128
129 /*
130 * We only want to visit blocks that have been claimed but not yet
131 * replayed; plus blocks that are already stable in read-only mode.
132 */
133 if (claim_txg == 0 && spa_writeable(td->td_spa))
134 return;
135
136 zilog_t *zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
137 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
138 claim_txg, !(td->td_flags & TRAVERSE_NO_DECRYPT));
139 zil_free(zilog);
140 }
141
142 typedef enum resume_skip {
143 RESUME_SKIP_ALL,
144 RESUME_SKIP_NONE,
145 RESUME_SKIP_CHILDREN
146 } resume_skip_t;
147
148 /*
149 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
150 * the block indicated by zb does not need to be visited at all. Returns
151 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
152 * resume point. This indicates that this block should be visited but not its
153 * children (since they must have been visited in a previous traversal).
154 * Otherwise returns RESUME_SKIP_NONE.
155 */
156 static resume_skip_t
157 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
158 const zbookmark_phys_t *zb)
159 {
160 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
161 /*
162 * If we already visited this bp & everything below,
163 * don't bother doing it again.
164 */
165 if (zbookmark_subtree_completed(dnp, zb, td->td_resume))
166 return (RESUME_SKIP_ALL);
167
168 /*
169 * If we found the block we're trying to resume from, zero
170 * the bookmark out to indicate that we have resumed.
171 */
172 if (memcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
173 memset(td->td_resume, 0, sizeof (*zb));
174 if (td->td_flags & TRAVERSE_POST)
175 return (RESUME_SKIP_CHILDREN);
176 }
177 }
178 return (RESUME_SKIP_NONE);
179 }
180
181 /*
182 * Returns B_TRUE, if prefetch read is issued, otherwise B_FALSE.
183 */
184 static boolean_t
185 traverse_prefetch_metadata(traverse_data_t *td,
186 const blkptr_t *bp, const zbookmark_phys_t *zb)
187 {
188 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
189 ARC_FLAG_PRESCIENT_PREFETCH;
190 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
191
192 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
193 return (B_FALSE);
194 /*
195 * If we are in the process of resuming, don't prefetch, because
196 * some children will not be needed (and in fact may have already
197 * been freed).
198 */
199 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
200 return (B_FALSE);
201 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
202 return (B_FALSE);
203 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
204 return (B_FALSE);
205 ASSERT(!BP_IS_REDACTED(bp));
206
207 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
208 zio_flags |= ZIO_FLAG_RAW;
209
210 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL,
211 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
212 return (B_TRUE);
213 }
214
215 static boolean_t
216 prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp)
217 {
218 ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA);
219 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) ||
220 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG || BP_IS_REDACTED(bp))
221 return (B_FALSE);
222 return (B_TRUE);
223 }
224
225 static int
226 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
227 const blkptr_t *bp, const zbookmark_phys_t *zb)
228 {
229 int err = 0;
230 arc_buf_t *buf = NULL;
231 prefetch_data_t *pd = td->td_pfd;
232
233 switch (resume_skip_check(td, dnp, zb)) {
234 case RESUME_SKIP_ALL:
235 return (0);
236 case RESUME_SKIP_CHILDREN:
237 goto post;
238 case RESUME_SKIP_NONE:
239 break;
240 default:
241 ASSERT(0);
242 }
243
244 if (bp->blk_birth == 0) {
245 /*
246 * Since this block has a birth time of 0 it must be one of
247 * two things: a hole created before the
248 * SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole
249 * which has always been a hole in an object.
250 *
251 * If a file is written sparsely, then the unwritten parts of
252 * the file were "always holes" -- that is, they have been
253 * holes since this object was allocated. However, we (and
254 * our callers) can not necessarily tell when an object was
255 * allocated. Therefore, if it's possible that this object
256 * was freed and then its object number reused, we need to
257 * visit all the holes with birth==0.
258 *
259 * If it isn't possible that the object number was reused,
260 * then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote
261 * all the blocks we will visit as part of this traversal,
262 * then this hole must have always existed, so we can skip
263 * it. We visit blocks born after (exclusive) td_min_txg.
264 *
265 * Note that the meta-dnode cannot be reallocated.
266 */
267 if (!send_holes_without_birth_time &&
268 (!td->td_realloc_possible ||
269 zb->zb_object == DMU_META_DNODE_OBJECT) &&
270 td->td_hole_birth_enabled_txg <= td->td_min_txg)
271 return (0);
272 } else if (bp->blk_birth <= td->td_min_txg) {
273 return (0);
274 }
275
276 if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) {
277 uint64_t size = BP_GET_LSIZE(bp);
278 mutex_enter(&pd->pd_mtx);
279 ASSERT(pd->pd_bytes_fetched >= 0);
280 while (pd->pd_bytes_fetched < size && !pd->pd_exited)
281 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx);
282 pd->pd_bytes_fetched -= size;
283 cv_broadcast(&pd->pd_cv);
284 mutex_exit(&pd->pd_mtx);
285 }
286
287 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
288 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
289 if (err != 0)
290 goto post;
291 return (0);
292 }
293
294 if (td->td_flags & TRAVERSE_PRE) {
295 err = td->td_func(td->td_spa, NULL, bp, zb, dnp,
296 td->td_arg);
297 if (err == TRAVERSE_VISIT_NO_CHILDREN)
298 return (0);
299 if (err != 0)
300 goto post;
301 }
302
303 if (BP_GET_LEVEL(bp) > 0) {
304 uint32_t flags = ARC_FLAG_WAIT;
305 int32_t i, ptidx, pidx;
306 uint32_t prefetchlimit;
307 int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
308 zbookmark_phys_t *czb;
309
310 ASSERT(!BP_IS_PROTECTED(bp));
311
312 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
313 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
314 if (err != 0)
315 goto post;
316
317 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
318
319 /*
320 * When performing a traversal it is beneficial to
321 * asynchronously read-ahead the upcoming indirect
322 * blocks since they will be needed shortly. However,
323 * since a 128k indirect (non-L0) block may contain up
324 * to 1024 128-byte block pointers, its preferable to not
325 * prefetch them all at once. Issuing a large number of
326 * async reads may effect performance, and the earlier
327 * the indirect blocks are prefetched the less likely
328 * they are to still be resident in the ARC when needed.
329 * Therefore, prefetching indirect blocks is limited to
330 * zfs_traverse_indirect_prefetch_limit=32 blocks by
331 * default.
332 *
333 * pidx: Index for which next prefetch to be issued.
334 * ptidx: Index at which next prefetch to be triggered.
335 */
336 ptidx = 0;
337 pidx = 1;
338 prefetchlimit = zfs_traverse_indirect_prefetch_limit;
339 for (i = 0; i < epb; i++) {
340 if (prefetchlimit && i == ptidx) {
341 ASSERT3S(ptidx, <=, pidx);
342 for (uint32_t prefetched = 0; pidx < epb &&
343 prefetched < prefetchlimit; pidx++) {
344 SET_BOOKMARK(czb, zb->zb_objset,
345 zb->zb_object, zb->zb_level - 1,
346 zb->zb_blkid * epb + pidx);
347 if (traverse_prefetch_metadata(td,
348 &((blkptr_t *)buf->b_data)[pidx],
349 czb) == B_TRUE) {
350 prefetched++;
351 if (prefetched ==
352 MAX(prefetchlimit / 2, 1))
353 ptidx = pidx;
354 }
355 }
356 }
357
358 /* recursively visitbp() blocks below this */
359 SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
360 zb->zb_level - 1,
361 zb->zb_blkid * epb + i);
362 err = traverse_visitbp(td, dnp,
363 &((blkptr_t *)buf->b_data)[i], czb);
364 if (err != 0)
365 break;
366 }
367
368 kmem_free(czb, sizeof (zbookmark_phys_t));
369
370 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
371 uint32_t flags = ARC_FLAG_WAIT;
372 uint32_t zio_flags = ZIO_FLAG_CANFAIL;
373 int32_t i;
374 int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
375 dnode_phys_t *child_dnp;
376
377 /*
378 * dnode blocks might have their bonus buffers encrypted, so
379 * we must be careful to honor TRAVERSE_NO_DECRYPT
380 */
381 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
382 zio_flags |= ZIO_FLAG_RAW;
383
384 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
385 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
386 if (err != 0)
387 goto post;
388
389 child_dnp = buf->b_data;
390
391 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) {
392 prefetch_dnode_metadata(td, &child_dnp[i],
393 zb->zb_objset, zb->zb_blkid * epb + i);
394 }
395
396 /* recursively visitbp() blocks below this */
397 for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) {
398 err = traverse_dnode(td, bp, &child_dnp[i],
399 zb->zb_objset, zb->zb_blkid * epb + i);
400 if (err != 0)
401 break;
402 }
403 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
404 uint32_t zio_flags = ZIO_FLAG_CANFAIL;
405 arc_flags_t flags = ARC_FLAG_WAIT;
406 objset_phys_t *osp;
407
408 if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
409 zio_flags |= ZIO_FLAG_RAW;
410
411 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
412 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
413 if (err != 0)
414 goto post;
415
416 osp = buf->b_data;
417 prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset,
418 DMU_META_DNODE_OBJECT);
419 /*
420 * See the block comment above for the goal of this variable.
421 * If the maxblkid of the meta-dnode is 0, then we know that
422 * we've never had more than DNODES_PER_BLOCK objects in the
423 * dataset, which means we can't have reused any object ids.
424 */
425 if (osp->os_meta_dnode.dn_maxblkid == 0)
426 td->td_realloc_possible = B_FALSE;
427
428 if (OBJSET_BUF_HAS_USERUSED(buf)) {
429 if (OBJSET_BUF_HAS_PROJECTUSED(buf))
430 prefetch_dnode_metadata(td,
431 &osp->os_projectused_dnode,
432 zb->zb_objset, DMU_PROJECTUSED_OBJECT);
433 prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
434 zb->zb_objset, DMU_GROUPUSED_OBJECT);
435 prefetch_dnode_metadata(td, &osp->os_userused_dnode,
436 zb->zb_objset, DMU_USERUSED_OBJECT);
437 }
438
439 err = traverse_dnode(td, bp, &osp->os_meta_dnode, zb->zb_objset,
440 DMU_META_DNODE_OBJECT);
441 if (err == 0 && OBJSET_BUF_HAS_USERUSED(buf)) {
442 if (OBJSET_BUF_HAS_PROJECTUSED(buf))
443 err = traverse_dnode(td, bp,
444 &osp->os_projectused_dnode, zb->zb_objset,
445 DMU_PROJECTUSED_OBJECT);
446 if (err == 0)
447 err = traverse_dnode(td, bp,
448 &osp->os_groupused_dnode, zb->zb_objset,
449 DMU_GROUPUSED_OBJECT);
450 if (err == 0)
451 err = traverse_dnode(td, bp,
452 &osp->os_userused_dnode, zb->zb_objset,
453 DMU_USERUSED_OBJECT);
454 }
455 }
456
457 if (buf)
458 arc_buf_destroy(buf, &buf);
459
460 post:
461 if (err == 0 && (td->td_flags & TRAVERSE_POST))
462 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
463
464 if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) {
465 /*
466 * Ignore this disk error as requested by the HARD flag,
467 * and continue traversal.
468 */
469 err = 0;
470 }
471
472 /*
473 * If we are stopping here, set td_resume.
474 */
475 if (td->td_resume != NULL && err != 0 && !td->td_paused) {
476 td->td_resume->zb_objset = zb->zb_objset;
477 td->td_resume->zb_object = zb->zb_object;
478 td->td_resume->zb_level = 0;
479 /*
480 * If we have stopped on an indirect block (e.g. due to
481 * i/o error), we have not visited anything below it.
482 * Set the bookmark to the first level-0 block that we need
483 * to visit. This way, the resuming code does not need to
484 * deal with resuming from indirect blocks.
485 *
486 * Note, if zb_level <= 0, dnp may be NULL, so we don't want
487 * to dereference it.
488 */
489 td->td_resume->zb_blkid = zb->zb_blkid;
490 if (zb->zb_level > 0) {
491 td->td_resume->zb_blkid <<= zb->zb_level *
492 (dnp->dn_indblkshift - SPA_BLKPTRSHIFT);
493 }
494 td->td_paused = B_TRUE;
495 }
496
497 return (err);
498 }
499
500 static void
501 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
502 uint64_t objset, uint64_t object)
503 {
504 int j;
505 zbookmark_phys_t czb;
506
507 for (j = 0; j < dnp->dn_nblkptr; j++) {
508 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
509 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb);
510 }
511
512 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
513 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
514 traverse_prefetch_metadata(td, DN_SPILL_BLKPTR(dnp), &czb);
515 }
516 }
517
518 static int
519 traverse_dnode(traverse_data_t *td, const blkptr_t *bp, const dnode_phys_t *dnp,
520 uint64_t objset, uint64_t object)
521 {
522 int j, err = 0;
523 zbookmark_phys_t czb;
524
525 if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL &&
526 object < td->td_resume->zb_object)
527 return (0);
528
529 if (td->td_flags & TRAVERSE_PRE) {
530 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
531 ZB_DNODE_BLKID);
532 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp,
533 td->td_arg);
534 if (err == TRAVERSE_VISIT_NO_CHILDREN)
535 return (0);
536 if (err != 0)
537 return (err);
538 }
539
540 for (j = 0; j < dnp->dn_nblkptr; j++) {
541 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
542 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb);
543 if (err != 0)
544 break;
545 }
546
547 if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
548 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
549 err = traverse_visitbp(td, dnp, DN_SPILL_BLKPTR(dnp), &czb);
550 }
551
552 if (err == 0 && (td->td_flags & TRAVERSE_POST)) {
553 SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
554 ZB_DNODE_BLKID);
555 err = td->td_func(td->td_spa, NULL, bp, &czb, dnp,
556 td->td_arg);
557 if (err == TRAVERSE_VISIT_NO_CHILDREN)
558 return (0);
559 if (err != 0)
560 return (err);
561 }
562 return (err);
563 }
564
565 static int
566 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
567 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
568 {
569 (void) zilog, (void) dnp;
570 prefetch_data_t *pfd = arg;
571 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
572 arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
573 ARC_FLAG_PRESCIENT_PREFETCH;
574
575 ASSERT(pfd->pd_bytes_fetched >= 0);
576 if (zb->zb_level == ZB_DNODE_LEVEL)
577 return (0);
578 if (pfd->pd_cancel)
579 return (SET_ERROR(EINTR));
580
581 if (!prefetch_needed(pfd, bp))
582 return (0);
583
584 mutex_enter(&pfd->pd_mtx);
585 while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max)
586 cv_wait_sig(&pfd->pd_cv, &pfd->pd_mtx);
587 pfd->pd_bytes_fetched += BP_GET_LSIZE(bp);
588 cv_broadcast(&pfd->pd_cv);
589 mutex_exit(&pfd->pd_mtx);
590
591 if ((pfd->pd_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
592 zio_flags |= ZIO_FLAG_RAW;
593
594 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
595 zio_flags, &aflags, zb);
596
597 return (0);
598 }
599
600 static void
601 traverse_prefetch_thread(void *arg)
602 {
603 traverse_data_t *td_main = arg;
604 traverse_data_t td = *td_main;
605 zbookmark_phys_t czb;
606 fstrans_cookie_t cookie = spl_fstrans_mark();
607
608 td.td_func = traverse_prefetcher;
609 td.td_arg = td_main->td_pfd;
610 td.td_pfd = NULL;
611 td.td_resume = &td_main->td_pfd->pd_resume;
612
613 SET_BOOKMARK(&czb, td.td_objset,
614 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
615 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb);
616
617 mutex_enter(&td_main->td_pfd->pd_mtx);
618 td_main->td_pfd->pd_exited = B_TRUE;
619 cv_broadcast(&td_main->td_pfd->pd_cv);
620 mutex_exit(&td_main->td_pfd->pd_mtx);
621 spl_fstrans_unmark(cookie);
622 }
623
624 /*
625 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
626 * in syncing context).
627 */
628 static int
629 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
630 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
631 blkptr_cb_t func, void *arg)
632 {
633 traverse_data_t *td;
634 prefetch_data_t *pd;
635 zbookmark_phys_t *czb;
636 int err;
637
638 ASSERT(ds == NULL || objset == ds->ds_object);
639 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
640
641 td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP);
642 pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP);
643 czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
644
645 td->td_spa = spa;
646 td->td_objset = objset;
647 td->td_rootbp = rootbp;
648 td->td_min_txg = txg_start;
649 td->td_resume = resume;
650 td->td_func = func;
651 td->td_arg = arg;
652 td->td_pfd = pd;
653 td->td_flags = flags;
654 td->td_paused = B_FALSE;
655 td->td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE);
656
657 if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
658 VERIFY(spa_feature_enabled_txg(spa,
659 SPA_FEATURE_HOLE_BIRTH, &td->td_hole_birth_enabled_txg));
660 } else {
661 td->td_hole_birth_enabled_txg = UINT64_MAX;
662 }
663
664 pd->pd_flags = flags;
665 if (resume != NULL)
666 pd->pd_resume = *resume;
667 mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL);
668 cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL);
669
670 SET_BOOKMARK(czb, td->td_objset,
671 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
672
673 /* See comment on ZIL traversal in dsl_scan_visitds. */
674 if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) {
675 zio_flag_t zio_flags = ZIO_FLAG_CANFAIL;
676 uint32_t flags = ARC_FLAG_WAIT;
677 objset_phys_t *osp;
678 arc_buf_t *buf;
679 ASSERT(!BP_IS_REDACTED(rootbp));
680
681 if ((td->td_flags & TRAVERSE_NO_DECRYPT) &&
682 BP_IS_PROTECTED(rootbp))
683 zio_flags |= ZIO_FLAG_RAW;
684
685 err = arc_read(NULL, td->td_spa, rootbp, arc_getbuf_func,
686 &buf, ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, czb);
687 if (err != 0) {
688 /*
689 * If both TRAVERSE_HARD and TRAVERSE_PRE are set,
690 * continue to visitbp so that td_func can be called
691 * in pre stage, and err will reset to zero.
692 */
693 if (!(td->td_flags & TRAVERSE_HARD) ||
694 !(td->td_flags & TRAVERSE_PRE))
695 goto out;
696 } else {
697 osp = buf->b_data;
698 traverse_zil(td, &osp->os_zil_header);
699 arc_buf_destroy(buf, &buf);
700 }
701 }
702
703 if (!(flags & TRAVERSE_PREFETCH_DATA) ||
704 taskq_dispatch(spa->spa_prefetch_taskq, traverse_prefetch_thread,
705 td, TQ_NOQUEUE) == TASKQID_INVALID)
706 pd->pd_exited = B_TRUE;
707
708 err = traverse_visitbp(td, NULL, rootbp, czb);
709
710 mutex_enter(&pd->pd_mtx);
711 pd->pd_cancel = B_TRUE;
712 cv_broadcast(&pd->pd_cv);
713 while (!pd->pd_exited)
714 cv_wait_sig(&pd->pd_cv, &pd->pd_mtx);
715 mutex_exit(&pd->pd_mtx);
716 out:
717 mutex_destroy(&pd->pd_mtx);
718 cv_destroy(&pd->pd_cv);
719
720 kmem_free(czb, sizeof (zbookmark_phys_t));
721 kmem_free(pd, sizeof (struct prefetch_data));
722 kmem_free(td, sizeof (struct traverse_data));
723
724 return (err);
725 }
726
727 /*
728 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
729 * in syncing context).
730 */
731 int
732 traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start,
733 zbookmark_phys_t *resume,
734 int flags, blkptr_cb_t func, void *arg)
735 {
736 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
737 &dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg));
738 }
739
740 int
741 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start,
742 int flags, blkptr_cb_t func, void *arg)
743 {
744 return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg));
745 }
746
747 int
748 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
749 uint64_t txg_start, zbookmark_phys_t *resume, int flags,
750 blkptr_cb_t func, void *arg)
751 {
752 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
753 blkptr, txg_start, resume, flags, func, arg));
754 }
755
756 /*
757 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
758 */
759 int
760 traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
761 blkptr_cb_t func, void *arg)
762 {
763 int err;
764 dsl_pool_t *dp = spa_get_dsl(spa);
765 objset_t *mos = dp->dp_meta_objset;
766 boolean_t hard = (flags & TRAVERSE_HARD);
767
768 /* visit the MOS */
769 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
770 txg_start, NULL, flags, func, arg);
771 if (err != 0)
772 return (err);
773
774 /* visit each dataset */
775 for (uint64_t obj = 1; err == 0;
776 err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) {
777 dmu_object_info_t doi;
778
779 err = dmu_object_info(mos, obj, &doi);
780 if (err != 0) {
781 if (hard)
782 continue;
783 break;
784 }
785
786 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) {
787 dsl_dataset_t *ds;
788 uint64_t txg = txg_start;
789
790 dsl_pool_config_enter(dp, FTAG);
791 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
792 dsl_pool_config_exit(dp, FTAG);
793 if (err != 0) {
794 if (hard)
795 continue;
796 break;
797 }
798 if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg)
799 txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
800 err = traverse_dataset(ds, txg, flags, func, arg);
801 dsl_dataset_rele(ds, FTAG);
802 if (err != 0)
803 break;
804 }
805 }
806 if (err == ESRCH)
807 err = 0;
808 return (err);
809 }
810
811 EXPORT_SYMBOL(traverse_dataset);
812 EXPORT_SYMBOL(traverse_pool);
813
814 ZFS_MODULE_PARAM(zfs, zfs_, pd_bytes_max, INT, ZMOD_RW,
815 "Max number of bytes to prefetch");
816
817 ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, UINT, ZMOD_RW,
818 "Traverse prefetch number of blocks pointed by indirect block");
819
820 #if defined(_KERNEL)
821 module_param_named(ignore_hole_birth, send_holes_without_birth_time, int, 0644);
822 MODULE_PARM_DESC(ignore_hole_birth,
823 "Alias for send_holes_without_birth_time");
824 #endif
825
826 /* CSTYLED */
827 ZFS_MODULE_PARAM(zfs, , send_holes_without_birth_time, INT, ZMOD_RW,
828 "Ignore hole_birth txg for zfs send");
Cache object: ade8a77ef3644534e7311a404d1e3eb7
|