The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/openzfs/module/zfs/zio.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * CDDL HEADER START
    3  *
    4  * The contents of this file are subject to the terms of the
    5  * Common Development and Distribution License (the "License").
    6  * You may not use this file except in compliance with the License.
    7  *
    8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
    9  * or https://opensource.org/licenses/CDDL-1.0.
   10  * See the License for the specific language governing permissions
   11  * and limitations under the License.
   12  *
   13  * When distributing Covered Code, include this CDDL HEADER in each
   14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
   15  * If applicable, add the following below this CDDL HEADER, with the
   16  * fields enclosed by brackets "[]" replaced with your own identifying
   17  * information: Portions Copyright [yyyy] [name of copyright owner]
   18  *
   19  * CDDL HEADER END
   20  */
   21 /*
   22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
   23  * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
   24  * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
   25  * Copyright (c) 2017, Intel Corporation.
   26  * Copyright (c) 2019, Klara Inc.
   27  * Copyright (c) 2019, Allan Jude
   28  * Copyright (c) 2021, Datto, Inc.
   29  */
   30 
   31 #include <sys/sysmacros.h>
   32 #include <sys/zfs_context.h>
   33 #include <sys/fm/fs/zfs.h>
   34 #include <sys/spa.h>
   35 #include <sys/txg.h>
   36 #include <sys/spa_impl.h>
   37 #include <sys/vdev_impl.h>
   38 #include <sys/vdev_trim.h>
   39 #include <sys/zio_impl.h>
   40 #include <sys/zio_compress.h>
   41 #include <sys/zio_checksum.h>
   42 #include <sys/dmu_objset.h>
   43 #include <sys/arc.h>
   44 #include <sys/ddt.h>
   45 #include <sys/blkptr.h>
   46 #include <sys/zfeature.h>
   47 #include <sys/dsl_scan.h>
   48 #include <sys/metaslab_impl.h>
   49 #include <sys/time.h>
   50 #include <sys/trace_zfs.h>
   51 #include <sys/abd.h>
   52 #include <sys/dsl_crypt.h>
   53 #include <cityhash.h>
   54 
   55 /*
   56  * ==========================================================================
   57  * I/O type descriptions
   58  * ==========================================================================
   59  */
   60 const char *const zio_type_name[ZIO_TYPES] = {
   61         /*
   62          * Note: Linux kernel thread name length is limited
   63          * so these names will differ from upstream open zfs.
   64          */
   65         "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim"
   66 };
   67 
   68 int zio_dva_throttle_enabled = B_TRUE;
   69 static int zio_deadman_log_all = B_FALSE;
   70 
   71 /*
   72  * ==========================================================================
   73  * I/O kmem caches
   74  * ==========================================================================
   75  */
   76 static kmem_cache_t *zio_cache;
   77 static kmem_cache_t *zio_link_cache;
   78 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
   79 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
   80 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
   81 static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
   82 static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
   83 #endif
   84 
   85 /* Mark IOs as "slow" if they take longer than 30 seconds */
   86 static uint_t zio_slow_io_ms = (30 * MILLISEC);
   87 
   88 #define BP_SPANB(indblkshift, level) \
   89         (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
   90 #define COMPARE_META_LEVEL      0x80000000ul
   91 /*
   92  * The following actions directly effect the spa's sync-to-convergence logic.
   93  * The values below define the sync pass when we start performing the action.
   94  * Care should be taken when changing these values as they directly impact
   95  * spa_sync() performance. Tuning these values may introduce subtle performance
   96  * pathologies and should only be done in the context of performance analysis.
   97  * These tunables will eventually be removed and replaced with #defines once
   98  * enough analysis has been done to determine optimal values.
   99  *
  100  * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
  101  * regular blocks are not deferred.
  102  *
  103  * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
  104  * compression (including of metadata).  In practice, we don't have this
  105  * many sync passes, so this has no effect.
  106  *
  107  * The original intent was that disabling compression would help the sync
  108  * passes to converge. However, in practice disabling compression increases
  109  * the average number of sync passes, because when we turn compression off, a
  110  * lot of block's size will change and thus we have to re-allocate (not
  111  * overwrite) them. It also increases the number of 128KB allocations (e.g.
  112  * for indirect blocks and spacemaps) because these will not be compressed.
  113  * The 128K allocations are especially detrimental to performance on highly
  114  * fragmented systems, which may have very few free segments of this size,
  115  * and may need to load new metaslabs to satisfy 128K allocations.
  116  */
  117 
  118 /* defer frees starting in this pass */
  119 uint_t zfs_sync_pass_deferred_free = 2;
  120 
  121 /* don't compress starting in this pass */
  122 static uint_t zfs_sync_pass_dont_compress = 8;
  123 
  124 /* rewrite new bps starting in this pass */
  125 static uint_t zfs_sync_pass_rewrite = 2;
  126 
  127 /*
  128  * An allocating zio is one that either currently has the DVA allocate
  129  * stage set or will have it later in its lifetime.
  130  */
  131 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
  132 
  133 /*
  134  * Enable smaller cores by excluding metadata
  135  * allocations as well.
  136  */
  137 int zio_exclude_metadata = 0;
  138 static int zio_requeue_io_start_cut_in_line = 1;
  139 
  140 #ifdef ZFS_DEBUG
  141 static const int zio_buf_debug_limit = 16384;
  142 #else
  143 static const int zio_buf_debug_limit = 0;
  144 #endif
  145 
  146 static inline void __zio_execute(zio_t *zio);
  147 
  148 static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
  149 
  150 void
  151 zio_init(void)
  152 {
  153         size_t c;
  154 
  155         zio_cache = kmem_cache_create("zio_cache",
  156             sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
  157         zio_link_cache = kmem_cache_create("zio_link_cache",
  158             sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
  159 
  160         /*
  161          * For small buffers, we want a cache for each multiple of
  162          * SPA_MINBLOCKSIZE.  For larger buffers, we want a cache
  163          * for each quarter-power of 2.
  164          */
  165         for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
  166                 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
  167                 size_t p2 = size;
  168                 size_t align = 0;
  169                 size_t data_cflags, cflags;
  170 
  171                 data_cflags = KMC_NODEBUG;
  172                 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
  173                     KMC_NODEBUG : 0;
  174 
  175                 while (!ISP2(p2))
  176                         p2 &= p2 - 1;
  177 
  178 #ifndef _KERNEL
  179                 /*
  180                  * If we are using watchpoints, put each buffer on its own page,
  181                  * to eliminate the performance overhead of trapping to the
  182                  * kernel when modifying a non-watched buffer that shares the
  183                  * page with a watched buffer.
  184                  */
  185                 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
  186                         continue;
  187                 /*
  188                  * Here's the problem - on 4K native devices in userland on
  189                  * Linux using O_DIRECT, buffers must be 4K aligned or I/O
  190                  * will fail with EINVAL, causing zdb (and others) to coredump.
  191                  * Since userland probably doesn't need optimized buffer caches,
  192                  * we just force 4K alignment on everything.
  193                  */
  194                 align = 8 * SPA_MINBLOCKSIZE;
  195 #else
  196                 if (size < PAGESIZE) {
  197                         align = SPA_MINBLOCKSIZE;
  198                 } else if (IS_P2ALIGNED(size, p2 >> 2)) {
  199                         align = PAGESIZE;
  200                 }
  201 #endif
  202 
  203                 if (align != 0) {
  204                         char name[36];
  205                         if (cflags == data_cflags) {
  206                                 /*
  207                                  * Resulting kmem caches would be identical.
  208                                  * Save memory by creating only one.
  209                                  */
  210                                 (void) snprintf(name, sizeof (name),
  211                                     "zio_buf_comb_%lu", (ulong_t)size);
  212                                 zio_buf_cache[c] = kmem_cache_create(name,
  213                                     size, align, NULL, NULL, NULL, NULL, NULL,
  214                                     cflags);
  215                                 zio_data_buf_cache[c] = zio_buf_cache[c];
  216                                 continue;
  217                         }
  218                         (void) snprintf(name, sizeof (name), "zio_buf_%lu",
  219                             (ulong_t)size);
  220                         zio_buf_cache[c] = kmem_cache_create(name, size,
  221                             align, NULL, NULL, NULL, NULL, NULL, cflags);
  222 
  223                         (void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
  224                             (ulong_t)size);
  225                         zio_data_buf_cache[c] = kmem_cache_create(name, size,
  226                             align, NULL, NULL, NULL, NULL, NULL, data_cflags);
  227                 }
  228         }
  229 
  230         while (--c != 0) {
  231                 ASSERT(zio_buf_cache[c] != NULL);
  232                 if (zio_buf_cache[c - 1] == NULL)
  233                         zio_buf_cache[c - 1] = zio_buf_cache[c];
  234 
  235                 ASSERT(zio_data_buf_cache[c] != NULL);
  236                 if (zio_data_buf_cache[c - 1] == NULL)
  237                         zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
  238         }
  239 
  240         zio_inject_init();
  241 
  242         lz4_init();
  243 }
  244 
  245 void
  246 zio_fini(void)
  247 {
  248         size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
  249 
  250 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
  251         for (size_t i = 0; i < n; i++) {
  252                 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
  253                         (void) printf("zio_fini: [%d] %llu != %llu\n",
  254                             (int)((i + 1) << SPA_MINBLOCKSHIFT),
  255                             (long long unsigned)zio_buf_cache_allocs[i],
  256                             (long long unsigned)zio_buf_cache_frees[i]);
  257         }
  258 #endif
  259 
  260         /*
  261          * The same kmem cache can show up multiple times in both zio_buf_cache
  262          * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
  263          * sort it out.
  264          */
  265         for (size_t i = 0; i < n; i++) {
  266                 kmem_cache_t *cache = zio_buf_cache[i];
  267                 if (cache == NULL)
  268                         continue;
  269                 for (size_t j = i; j < n; j++) {
  270                         if (cache == zio_buf_cache[j])
  271                                 zio_buf_cache[j] = NULL;
  272                         if (cache == zio_data_buf_cache[j])
  273                                 zio_data_buf_cache[j] = NULL;
  274                 }
  275                 kmem_cache_destroy(cache);
  276         }
  277 
  278         for (size_t i = 0; i < n; i++) {
  279                 kmem_cache_t *cache = zio_data_buf_cache[i];
  280                 if (cache == NULL)
  281                         continue;
  282                 for (size_t j = i; j < n; j++) {
  283                         if (cache == zio_data_buf_cache[j])
  284                                 zio_data_buf_cache[j] = NULL;
  285                 }
  286                 kmem_cache_destroy(cache);
  287         }
  288 
  289         for (size_t i = 0; i < n; i++) {
  290                 VERIFY3P(zio_buf_cache[i], ==, NULL);
  291                 VERIFY3P(zio_data_buf_cache[i], ==, NULL);
  292         }
  293 
  294         kmem_cache_destroy(zio_link_cache);
  295         kmem_cache_destroy(zio_cache);
  296 
  297         zio_inject_fini();
  298 
  299         lz4_fini();
  300 }
  301 
  302 /*
  303  * ==========================================================================
  304  * Allocate and free I/O buffers
  305  * ==========================================================================
  306  */
  307 
  308 /*
  309  * Use zio_buf_alloc to allocate ZFS metadata.  This data will appear in a
  310  * crashdump if the kernel panics, so use it judiciously.  Obviously, it's
  311  * useful to inspect ZFS metadata, but if possible, we should avoid keeping
  312  * excess / transient data in-core during a crashdump.
  313  */
  314 void *
  315 zio_buf_alloc(size_t size)
  316 {
  317         size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
  318 
  319         VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
  320 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
  321         atomic_add_64(&zio_buf_cache_allocs[c], 1);
  322 #endif
  323 
  324         return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
  325 }
  326 
  327 /*
  328  * Use zio_data_buf_alloc to allocate data.  The data will not appear in a
  329  * crashdump if the kernel panics.  This exists so that we will limit the amount
  330  * of ZFS data that shows up in a kernel crashdump.  (Thus reducing the amount
  331  * of kernel heap dumped to disk when the kernel panics)
  332  */
  333 void *
  334 zio_data_buf_alloc(size_t size)
  335 {
  336         size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
  337 
  338         VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
  339 
  340         return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
  341 }
  342 
  343 void
  344 zio_buf_free(void *buf, size_t size)
  345 {
  346         size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
  347 
  348         VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
  349 #if defined(ZFS_DEBUG) && !defined(_KERNEL)
  350         atomic_add_64(&zio_buf_cache_frees[c], 1);
  351 #endif
  352 
  353         kmem_cache_free(zio_buf_cache[c], buf);
  354 }
  355 
  356 void
  357 zio_data_buf_free(void *buf, size_t size)
  358 {
  359         size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
  360 
  361         VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
  362 
  363         kmem_cache_free(zio_data_buf_cache[c], buf);
  364 }
  365 
  366 static void
  367 zio_abd_free(void *abd, size_t size)
  368 {
  369         (void) size;
  370         abd_free((abd_t *)abd);
  371 }
  372 
  373 /*
  374  * ==========================================================================
  375  * Push and pop I/O transform buffers
  376  * ==========================================================================
  377  */
  378 void
  379 zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
  380     zio_transform_func_t *transform)
  381 {
  382         zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
  383 
  384         zt->zt_orig_abd = zio->io_abd;
  385         zt->zt_orig_size = zio->io_size;
  386         zt->zt_bufsize = bufsize;
  387         zt->zt_transform = transform;
  388 
  389         zt->zt_next = zio->io_transform_stack;
  390         zio->io_transform_stack = zt;
  391 
  392         zio->io_abd = data;
  393         zio->io_size = size;
  394 }
  395 
  396 void
  397 zio_pop_transforms(zio_t *zio)
  398 {
  399         zio_transform_t *zt;
  400 
  401         while ((zt = zio->io_transform_stack) != NULL) {
  402                 if (zt->zt_transform != NULL)
  403                         zt->zt_transform(zio,
  404                             zt->zt_orig_abd, zt->zt_orig_size);
  405 
  406                 if (zt->zt_bufsize != 0)
  407                         abd_free(zio->io_abd);
  408 
  409                 zio->io_abd = zt->zt_orig_abd;
  410                 zio->io_size = zt->zt_orig_size;
  411                 zio->io_transform_stack = zt->zt_next;
  412 
  413                 kmem_free(zt, sizeof (zio_transform_t));
  414         }
  415 }
  416 
  417 /*
  418  * ==========================================================================
  419  * I/O transform callbacks for subblocks, decompression, and decryption
  420  * ==========================================================================
  421  */
  422 static void
  423 zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
  424 {
  425         ASSERT(zio->io_size > size);
  426 
  427         if (zio->io_type == ZIO_TYPE_READ)
  428                 abd_copy(data, zio->io_abd, size);
  429 }
  430 
  431 static void
  432 zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
  433 {
  434         if (zio->io_error == 0) {
  435                 void *tmp = abd_borrow_buf(data, size);
  436                 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
  437                     zio->io_abd, tmp, zio->io_size, size,
  438                     &zio->io_prop.zp_complevel);
  439                 abd_return_buf_copy(data, tmp, size);
  440 
  441                 if (zio_injection_enabled && ret == 0)
  442                         ret = zio_handle_fault_injection(zio, EINVAL);
  443 
  444                 if (ret != 0)
  445                         zio->io_error = SET_ERROR(EIO);
  446         }
  447 }
  448 
  449 static void
  450 zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
  451 {
  452         int ret;
  453         void *tmp;
  454         blkptr_t *bp = zio->io_bp;
  455         spa_t *spa = zio->io_spa;
  456         uint64_t dsobj = zio->io_bookmark.zb_objset;
  457         uint64_t lsize = BP_GET_LSIZE(bp);
  458         dmu_object_type_t ot = BP_GET_TYPE(bp);
  459         uint8_t salt[ZIO_DATA_SALT_LEN];
  460         uint8_t iv[ZIO_DATA_IV_LEN];
  461         uint8_t mac[ZIO_DATA_MAC_LEN];
  462         boolean_t no_crypt = B_FALSE;
  463 
  464         ASSERT(BP_USES_CRYPT(bp));
  465         ASSERT3U(size, !=, 0);
  466 
  467         if (zio->io_error != 0)
  468                 return;
  469 
  470         /*
  471          * Verify the cksum of MACs stored in an indirect bp. It will always
  472          * be possible to verify this since it does not require an encryption
  473          * key.
  474          */
  475         if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
  476                 zio_crypt_decode_mac_bp(bp, mac);
  477 
  478                 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
  479                         /*
  480                          * We haven't decompressed the data yet, but
  481                          * zio_crypt_do_indirect_mac_checksum() requires
  482                          * decompressed data to be able to parse out the MACs
  483                          * from the indirect block. We decompress it now and
  484                          * throw away the result after we are finished.
  485                          */
  486                         tmp = zio_buf_alloc(lsize);
  487                         ret = zio_decompress_data(BP_GET_COMPRESS(bp),
  488                             zio->io_abd, tmp, zio->io_size, lsize,
  489                             &zio->io_prop.zp_complevel);
  490                         if (ret != 0) {
  491                                 ret = SET_ERROR(EIO);
  492                                 goto error;
  493                         }
  494                         ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
  495                             tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
  496                         zio_buf_free(tmp, lsize);
  497                 } else {
  498                         ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
  499                             zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
  500                 }
  501                 abd_copy(data, zio->io_abd, size);
  502 
  503                 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
  504                         ret = zio_handle_decrypt_injection(spa,
  505                             &zio->io_bookmark, ot, ECKSUM);
  506                 }
  507                 if (ret != 0)
  508                         goto error;
  509 
  510                 return;
  511         }
  512 
  513         /*
  514          * If this is an authenticated block, just check the MAC. It would be
  515          * nice to separate this out into its own flag, but when this was done,
  516          * we had run out of bits in what is now zio_flag_t. Future cleanup
  517          * could make this a flag bit.
  518          */
  519         if (BP_IS_AUTHENTICATED(bp)) {
  520                 if (ot == DMU_OT_OBJSET) {
  521                         ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
  522                             dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
  523                 } else {
  524                         zio_crypt_decode_mac_bp(bp, mac);
  525                         ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
  526                             zio->io_abd, size, mac);
  527                         if (zio_injection_enabled && ret == 0) {
  528                                 ret = zio_handle_decrypt_injection(spa,
  529                                     &zio->io_bookmark, ot, ECKSUM);
  530                         }
  531                 }
  532                 abd_copy(data, zio->io_abd, size);
  533 
  534                 if (ret != 0)
  535                         goto error;
  536 
  537                 return;
  538         }
  539 
  540         zio_crypt_decode_params_bp(bp, salt, iv);
  541 
  542         if (ot == DMU_OT_INTENT_LOG) {
  543                 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
  544                 zio_crypt_decode_mac_zil(tmp, mac);
  545                 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
  546         } else {
  547                 zio_crypt_decode_mac_bp(bp, mac);
  548         }
  549 
  550         ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
  551             BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
  552             zio->io_abd, &no_crypt);
  553         if (no_crypt)
  554                 abd_copy(data, zio->io_abd, size);
  555 
  556         if (ret != 0)
  557                 goto error;
  558 
  559         return;
  560 
  561 error:
  562         /* assert that the key was found unless this was speculative */
  563         ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
  564 
  565         /*
  566          * If there was a decryption / authentication error return EIO as
  567          * the io_error. If this was not a speculative zio, create an ereport.
  568          */
  569         if (ret == ECKSUM) {
  570                 zio->io_error = SET_ERROR(EIO);
  571                 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
  572                         spa_log_error(spa, &zio->io_bookmark);
  573                         (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
  574                             spa, NULL, &zio->io_bookmark, zio, 0);
  575                 }
  576         } else {
  577                 zio->io_error = ret;
  578         }
  579 }
  580 
  581 /*
  582  * ==========================================================================
  583  * I/O parent/child relationships and pipeline interlocks
  584  * ==========================================================================
  585  */
  586 zio_t *
  587 zio_walk_parents(zio_t *cio, zio_link_t **zl)
  588 {
  589         list_t *pl = &cio->io_parent_list;
  590 
  591         *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
  592         if (*zl == NULL)
  593                 return (NULL);
  594 
  595         ASSERT((*zl)->zl_child == cio);
  596         return ((*zl)->zl_parent);
  597 }
  598 
  599 zio_t *
  600 zio_walk_children(zio_t *pio, zio_link_t **zl)
  601 {
  602         list_t *cl = &pio->io_child_list;
  603 
  604         ASSERT(MUTEX_HELD(&pio->io_lock));
  605 
  606         *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
  607         if (*zl == NULL)
  608                 return (NULL);
  609 
  610         ASSERT((*zl)->zl_parent == pio);
  611         return ((*zl)->zl_child);
  612 }
  613 
  614 zio_t *
  615 zio_unique_parent(zio_t *cio)
  616 {
  617         zio_link_t *zl = NULL;
  618         zio_t *pio = zio_walk_parents(cio, &zl);
  619 
  620         VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
  621         return (pio);
  622 }
  623 
  624 void
  625 zio_add_child(zio_t *pio, zio_t *cio)
  626 {
  627         zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
  628 
  629         /*
  630          * Logical I/Os can have logical, gang, or vdev children.
  631          * Gang I/Os can have gang or vdev children.
  632          * Vdev I/Os can only have vdev children.
  633          * The following ASSERT captures all of these constraints.
  634          */
  635         ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
  636 
  637         zl->zl_parent = pio;
  638         zl->zl_child = cio;
  639 
  640         mutex_enter(&pio->io_lock);
  641         mutex_enter(&cio->io_lock);
  642 
  643         ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
  644 
  645         for (int w = 0; w < ZIO_WAIT_TYPES; w++)
  646                 pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
  647 
  648         list_insert_head(&pio->io_child_list, zl);
  649         list_insert_head(&cio->io_parent_list, zl);
  650 
  651         pio->io_child_count++;
  652         cio->io_parent_count++;
  653 
  654         mutex_exit(&cio->io_lock);
  655         mutex_exit(&pio->io_lock);
  656 }
  657 
  658 static void
  659 zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
  660 {
  661         ASSERT(zl->zl_parent == pio);
  662         ASSERT(zl->zl_child == cio);
  663 
  664         mutex_enter(&pio->io_lock);
  665         mutex_enter(&cio->io_lock);
  666 
  667         list_remove(&pio->io_child_list, zl);
  668         list_remove(&cio->io_parent_list, zl);
  669 
  670         pio->io_child_count--;
  671         cio->io_parent_count--;
  672 
  673         mutex_exit(&cio->io_lock);
  674         mutex_exit(&pio->io_lock);
  675         kmem_cache_free(zio_link_cache, zl);
  676 }
  677 
  678 static boolean_t
  679 zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
  680 {
  681         boolean_t waiting = B_FALSE;
  682 
  683         mutex_enter(&zio->io_lock);
  684         ASSERT(zio->io_stall == NULL);
  685         for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
  686                 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
  687                         continue;
  688 
  689                 uint64_t *countp = &zio->io_children[c][wait];
  690                 if (*countp != 0) {
  691                         zio->io_stage >>= 1;
  692                         ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
  693                         zio->io_stall = countp;
  694                         waiting = B_TRUE;
  695                         break;
  696                 }
  697         }
  698         mutex_exit(&zio->io_lock);
  699         return (waiting);
  700 }
  701 
  702 __attribute__((always_inline))
  703 static inline void
  704 zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
  705     zio_t **next_to_executep)
  706 {
  707         uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
  708         int *errorp = &pio->io_child_error[zio->io_child_type];
  709 
  710         mutex_enter(&pio->io_lock);
  711         if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
  712                 *errorp = zio_worst_error(*errorp, zio->io_error);
  713         pio->io_reexecute |= zio->io_reexecute;
  714         ASSERT3U(*countp, >, 0);
  715 
  716         (*countp)--;
  717 
  718         if (*countp == 0 && pio->io_stall == countp) {
  719                 zio_taskq_type_t type =
  720                     pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
  721                     ZIO_TASKQ_INTERRUPT;
  722                 pio->io_stall = NULL;
  723                 mutex_exit(&pio->io_lock);
  724 
  725                 /*
  726                  * If we can tell the caller to execute this parent next, do
  727                  * so. We only do this if the parent's zio type matches the
  728                  * child's type. Otherwise dispatch the parent zio in its
  729                  * own taskq.
  730                  *
  731                  * Having the caller execute the parent when possible reduces
  732                  * locking on the zio taskq's, reduces context switch
  733                  * overhead, and has no recursion penalty.  Note that one
  734                  * read from disk typically causes at least 3 zio's: a
  735                  * zio_null(), the logical zio_read(), and then a physical
  736                  * zio.  When the physical ZIO completes, we are able to call
  737                  * zio_done() on all 3 of these zio's from one invocation of
  738                  * zio_execute() by returning the parent back to
  739                  * zio_execute().  Since the parent isn't executed until this
  740                  * thread returns back to zio_execute(), the caller should do
  741                  * so promptly.
  742                  *
  743                  * In other cases, dispatching the parent prevents
  744                  * overflowing the stack when we have deeply nested
  745                  * parent-child relationships, as we do with the "mega zio"
  746                  * of writes for spa_sync(), and the chain of ZIL blocks.
  747                  */
  748                 if (next_to_executep != NULL && *next_to_executep == NULL &&
  749                     pio->io_type == zio->io_type) {
  750                         *next_to_executep = pio;
  751                 } else {
  752                         zio_taskq_dispatch(pio, type, B_FALSE);
  753                 }
  754         } else {
  755                 mutex_exit(&pio->io_lock);
  756         }
  757 }
  758 
  759 static void
  760 zio_inherit_child_errors(zio_t *zio, enum zio_child c)
  761 {
  762         if (zio->io_child_error[c] != 0 && zio->io_error == 0)
  763                 zio->io_error = zio->io_child_error[c];
  764 }
  765 
  766 int
  767 zio_bookmark_compare(const void *x1, const void *x2)
  768 {
  769         const zio_t *z1 = x1;
  770         const zio_t *z2 = x2;
  771 
  772         if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
  773                 return (-1);
  774         if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
  775                 return (1);
  776 
  777         if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
  778                 return (-1);
  779         if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
  780                 return (1);
  781 
  782         if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
  783                 return (-1);
  784         if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
  785                 return (1);
  786 
  787         if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
  788                 return (-1);
  789         if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
  790                 return (1);
  791 
  792         if (z1 < z2)
  793                 return (-1);
  794         if (z1 > z2)
  795                 return (1);
  796 
  797         return (0);
  798 }
  799 
  800 /*
  801  * ==========================================================================
  802  * Create the various types of I/O (read, write, free, etc)
  803  * ==========================================================================
  804  */
  805 static zio_t *
  806 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
  807     abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
  808     void *private, zio_type_t type, zio_priority_t priority,
  809     zio_flag_t flags, vdev_t *vd, uint64_t offset,
  810     const zbookmark_phys_t *zb, enum zio_stage stage,
  811     enum zio_stage pipeline)
  812 {
  813         zio_t *zio;
  814 
  815         IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
  816         ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
  817         ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
  818 
  819         ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
  820         ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
  821         ASSERT(vd || stage == ZIO_STAGE_OPEN);
  822 
  823         IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
  824 
  825         zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
  826         memset(zio, 0, sizeof (zio_t));
  827 
  828         mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
  829         cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
  830 
  831         list_create(&zio->io_parent_list, sizeof (zio_link_t),
  832             offsetof(zio_link_t, zl_parent_node));
  833         list_create(&zio->io_child_list, sizeof (zio_link_t),
  834             offsetof(zio_link_t, zl_child_node));
  835         metaslab_trace_init(&zio->io_alloc_list);
  836 
  837         if (vd != NULL)
  838                 zio->io_child_type = ZIO_CHILD_VDEV;
  839         else if (flags & ZIO_FLAG_GANG_CHILD)
  840                 zio->io_child_type = ZIO_CHILD_GANG;
  841         else if (flags & ZIO_FLAG_DDT_CHILD)
  842                 zio->io_child_type = ZIO_CHILD_DDT;
  843         else
  844                 zio->io_child_type = ZIO_CHILD_LOGICAL;
  845 
  846         if (bp != NULL) {
  847                 zio->io_bp = (blkptr_t *)bp;
  848                 zio->io_bp_copy = *bp;
  849                 zio->io_bp_orig = *bp;
  850                 if (type != ZIO_TYPE_WRITE ||
  851                     zio->io_child_type == ZIO_CHILD_DDT)
  852                         zio->io_bp = &zio->io_bp_copy;  /* so caller can free */
  853                 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
  854                         zio->io_logical = zio;
  855                 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
  856                         pipeline |= ZIO_GANG_STAGES;
  857         }
  858 
  859         zio->io_spa = spa;
  860         zio->io_txg = txg;
  861         zio->io_done = done;
  862         zio->io_private = private;
  863         zio->io_type = type;
  864         zio->io_priority = priority;
  865         zio->io_vd = vd;
  866         zio->io_offset = offset;
  867         zio->io_orig_abd = zio->io_abd = data;
  868         zio->io_orig_size = zio->io_size = psize;
  869         zio->io_lsize = lsize;
  870         zio->io_orig_flags = zio->io_flags = flags;
  871         zio->io_orig_stage = zio->io_stage = stage;
  872         zio->io_orig_pipeline = zio->io_pipeline = pipeline;
  873         zio->io_pipeline_trace = ZIO_STAGE_OPEN;
  874 
  875         zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
  876         zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
  877 
  878         if (zb != NULL)
  879                 zio->io_bookmark = *zb;
  880 
  881         if (pio != NULL) {
  882                 zio->io_metaslab_class = pio->io_metaslab_class;
  883                 if (zio->io_logical == NULL)
  884                         zio->io_logical = pio->io_logical;
  885                 if (zio->io_child_type == ZIO_CHILD_GANG)
  886                         zio->io_gang_leader = pio->io_gang_leader;
  887                 zio_add_child(pio, zio);
  888         }
  889 
  890         taskq_init_ent(&zio->io_tqent);
  891 
  892         return (zio);
  893 }
  894 
  895 void
  896 zio_destroy(zio_t *zio)
  897 {
  898         metaslab_trace_fini(&zio->io_alloc_list);
  899         list_destroy(&zio->io_parent_list);
  900         list_destroy(&zio->io_child_list);
  901         mutex_destroy(&zio->io_lock);
  902         cv_destroy(&zio->io_cv);
  903         kmem_cache_free(zio_cache, zio);
  904 }
  905 
  906 zio_t *
  907 zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
  908     void *private, zio_flag_t flags)
  909 {
  910         zio_t *zio;
  911 
  912         zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
  913             ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
  914             ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
  915 
  916         return (zio);
  917 }
  918 
  919 zio_t *
  920 zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
  921 {
  922         return (zio_null(NULL, spa, NULL, done, private, flags));
  923 }
  924 
  925 static int
  926 zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
  927     enum blk_verify_flag blk_verify, const char *fmt, ...)
  928 {
  929         va_list adx;
  930         char buf[256];
  931 
  932         va_start(adx, fmt);
  933         (void) vsnprintf(buf, sizeof (buf), fmt, adx);
  934         va_end(adx);
  935 
  936         switch (blk_verify) {
  937         case BLK_VERIFY_HALT:
  938                 dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp);
  939                 zfs_panic_recover("%s: %s", spa_name(spa), buf);
  940                 break;
  941         case BLK_VERIFY_LOG:
  942                 zfs_dbgmsg("%s: %s", spa_name(spa), buf);
  943                 break;
  944         case BLK_VERIFY_ONLY:
  945                 break;
  946         }
  947 
  948         return (1);
  949 }
  950 
  951 /*
  952  * Verify the block pointer fields contain reasonable values.  This means
  953  * it only contains known object types, checksum/compression identifiers,
  954  * block sizes within the maximum allowed limits, valid DVAs, etc.
  955  *
  956  * If everything checks out B_TRUE is returned.  The zfs_blkptr_verify
  957  * argument controls the behavior when an invalid field is detected.
  958  *
  959  * Modes for zfs_blkptr_verify:
  960  *   1) BLK_VERIFY_ONLY (evaluate the block)
  961  *   2) BLK_VERIFY_LOG (evaluate the block and log problems)
  962  *   3) BLK_VERIFY_HALT (call zfs_panic_recover on error)
  963  */
  964 boolean_t
  965 zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, boolean_t config_held,
  966     enum blk_verify_flag blk_verify)
  967 {
  968         int errors = 0;
  969 
  970         if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
  971                 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
  972                     "blkptr at %p has invalid TYPE %llu",
  973                     bp, (longlong_t)BP_GET_TYPE(bp));
  974         }
  975         if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
  976                 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
  977                     "blkptr at %p has invalid CHECKSUM %llu",
  978                     bp, (longlong_t)BP_GET_CHECKSUM(bp));
  979         }
  980         if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
  981                 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
  982                     "blkptr at %p has invalid COMPRESS %llu",
  983                     bp, (longlong_t)BP_GET_COMPRESS(bp));
  984         }
  985         if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
  986                 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
  987                     "blkptr at %p has invalid LSIZE %llu",
  988                     bp, (longlong_t)BP_GET_LSIZE(bp));
  989         }
  990         if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
  991                 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
  992                     "blkptr at %p has invalid PSIZE %llu",
  993                     bp, (longlong_t)BP_GET_PSIZE(bp));
  994         }
  995 
  996         if (BP_IS_EMBEDDED(bp)) {
  997                 if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
  998                         errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
  999                             "blkptr at %p has invalid ETYPE %llu",
 1000                             bp, (longlong_t)BPE_GET_ETYPE(bp));
 1001                 }
 1002         }
 1003 
 1004         /*
 1005          * Do not verify individual DVAs if the config is not trusted. This
 1006          * will be done once the zio is executed in vdev_mirror_map_alloc.
 1007          */
 1008         if (!spa->spa_trust_config)
 1009                 return (errors == 0);
 1010 
 1011         if (!config_held)
 1012                 spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
 1013         else
 1014                 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
 1015         /*
 1016          * Pool-specific checks.
 1017          *
 1018          * Note: it would be nice to verify that the blk_birth and
 1019          * BP_PHYSICAL_BIRTH() are not too large.  However, spa_freeze()
 1020          * allows the birth time of log blocks (and dmu_sync()-ed blocks
 1021          * that are in the log) to be arbitrarily large.
 1022          */
 1023         for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
 1024                 const dva_t *dva = &bp->blk_dva[i];
 1025                 uint64_t vdevid = DVA_GET_VDEV(dva);
 1026 
 1027                 if (vdevid >= spa->spa_root_vdev->vdev_children) {
 1028                         errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
 1029                             "blkptr at %p DVA %u has invalid VDEV %llu",
 1030                             bp, i, (longlong_t)vdevid);
 1031                         continue;
 1032                 }
 1033                 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
 1034                 if (vd == NULL) {
 1035                         errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
 1036                             "blkptr at %p DVA %u has invalid VDEV %llu",
 1037                             bp, i, (longlong_t)vdevid);
 1038                         continue;
 1039                 }
 1040                 if (vd->vdev_ops == &vdev_hole_ops) {
 1041                         errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
 1042                             "blkptr at %p DVA %u has hole VDEV %llu",
 1043                             bp, i, (longlong_t)vdevid);
 1044                         continue;
 1045                 }
 1046                 if (vd->vdev_ops == &vdev_missing_ops) {
 1047                         /*
 1048                          * "missing" vdevs are valid during import, but we
 1049                          * don't have their detailed info (e.g. asize), so
 1050                          * we can't perform any more checks on them.
 1051                          */
 1052                         continue;
 1053                 }
 1054                 uint64_t offset = DVA_GET_OFFSET(dva);
 1055                 uint64_t asize = DVA_GET_ASIZE(dva);
 1056                 if (DVA_GET_GANG(dva))
 1057                         asize = vdev_gang_header_asize(vd);
 1058                 if (offset + asize > vd->vdev_asize) {
 1059                         errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
 1060                             "blkptr at %p DVA %u has invalid OFFSET %llu",
 1061                             bp, i, (longlong_t)offset);
 1062                 }
 1063         }
 1064         if (errors > 0)
 1065                 dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp);
 1066         if (!config_held)
 1067                 spa_config_exit(spa, SCL_VDEV, bp);
 1068 
 1069         return (errors == 0);
 1070 }
 1071 
 1072 boolean_t
 1073 zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
 1074 {
 1075         (void) bp;
 1076         uint64_t vdevid = DVA_GET_VDEV(dva);
 1077 
 1078         if (vdevid >= spa->spa_root_vdev->vdev_children)
 1079                 return (B_FALSE);
 1080 
 1081         vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
 1082         if (vd == NULL)
 1083                 return (B_FALSE);
 1084 
 1085         if (vd->vdev_ops == &vdev_hole_ops)
 1086                 return (B_FALSE);
 1087 
 1088         if (vd->vdev_ops == &vdev_missing_ops) {
 1089                 return (B_FALSE);
 1090         }
 1091 
 1092         uint64_t offset = DVA_GET_OFFSET(dva);
 1093         uint64_t asize = DVA_GET_ASIZE(dva);
 1094 
 1095         if (DVA_GET_GANG(dva))
 1096                 asize = vdev_gang_header_asize(vd);
 1097         if (offset + asize > vd->vdev_asize)
 1098                 return (B_FALSE);
 1099 
 1100         return (B_TRUE);
 1101 }
 1102 
 1103 zio_t *
 1104 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
 1105     abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
 1106     zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
 1107 {
 1108         zio_t *zio;
 1109 
 1110         zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
 1111             data, size, size, done, private,
 1112             ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
 1113             ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
 1114             ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
 1115 
 1116         return (zio);
 1117 }
 1118 
 1119 zio_t *
 1120 zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
 1121     abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
 1122     zio_done_func_t *ready, zio_done_func_t *children_ready,
 1123     zio_done_func_t *physdone, zio_done_func_t *done,
 1124     void *private, zio_priority_t priority, zio_flag_t flags,
 1125     const zbookmark_phys_t *zb)
 1126 {
 1127         zio_t *zio;
 1128 
 1129         ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
 1130             zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
 1131             zp->zp_compress >= ZIO_COMPRESS_OFF &&
 1132             zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
 1133             DMU_OT_IS_VALID(zp->zp_type) &&
 1134             zp->zp_level < 32 &&
 1135             zp->zp_copies > 0 &&
 1136             zp->zp_copies <= spa_max_replication(spa));
 1137 
 1138         zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
 1139             ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
 1140             ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
 1141             ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
 1142 
 1143         zio->io_ready = ready;
 1144         zio->io_children_ready = children_ready;
 1145         zio->io_physdone = physdone;
 1146         zio->io_prop = *zp;
 1147 
 1148         /*
 1149          * Data can be NULL if we are going to call zio_write_override() to
 1150          * provide the already-allocated BP.  But we may need the data to
 1151          * verify a dedup hit (if requested).  In this case, don't try to
 1152          * dedup (just take the already-allocated BP verbatim). Encrypted
 1153          * dedup blocks need data as well so we also disable dedup in this
 1154          * case.
 1155          */
 1156         if (data == NULL &&
 1157             (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
 1158                 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
 1159         }
 1160 
 1161         return (zio);
 1162 }
 1163 
 1164 zio_t *
 1165 zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
 1166     uint64_t size, zio_done_func_t *done, void *private,
 1167     zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
 1168 {
 1169         zio_t *zio;
 1170 
 1171         zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
 1172             ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
 1173             ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
 1174 
 1175         return (zio);
 1176 }
 1177 
 1178 void
 1179 zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite)
 1180 {
 1181         ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 1182         ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 1183         ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
 1184         ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
 1185 
 1186         /*
 1187          * We must reset the io_prop to match the values that existed
 1188          * when the bp was first written by dmu_sync() keeping in mind
 1189          * that nopwrite and dedup are mutually exclusive.
 1190          */
 1191         zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
 1192         zio->io_prop.zp_nopwrite = nopwrite;
 1193         zio->io_prop.zp_copies = copies;
 1194         zio->io_bp_override = bp;
 1195 }
 1196 
 1197 void
 1198 zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
 1199 {
 1200 
 1201         (void) zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_HALT);
 1202 
 1203         /*
 1204          * The check for EMBEDDED is a performance optimization.  We
 1205          * process the free here (by ignoring it) rather than
 1206          * putting it on the list and then processing it in zio_free_sync().
 1207          */
 1208         if (BP_IS_EMBEDDED(bp))
 1209                 return;
 1210 
 1211         /*
 1212          * Frees that are for the currently-syncing txg, are not going to be
 1213          * deferred, and which will not need to do a read (i.e. not GANG or
 1214          * DEDUP), can be processed immediately.  Otherwise, put them on the
 1215          * in-memory list for later processing.
 1216          *
 1217          * Note that we only defer frees after zfs_sync_pass_deferred_free
 1218          * when the log space map feature is disabled. [see relevant comment
 1219          * in spa_sync_iterate_to_convergence()]
 1220          */
 1221         if (BP_IS_GANG(bp) ||
 1222             BP_GET_DEDUP(bp) ||
 1223             txg != spa->spa_syncing_txg ||
 1224             (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
 1225             !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))) {
 1226                 metaslab_check_free(spa, bp);
 1227                 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
 1228         } else {
 1229                 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
 1230         }
 1231 }
 1232 
 1233 /*
 1234  * To improve performance, this function may return NULL if we were able
 1235  * to do the free immediately.  This avoids the cost of creating a zio
 1236  * (and linking it to the parent, etc).
 1237  */
 1238 zio_t *
 1239 zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
 1240     zio_flag_t flags)
 1241 {
 1242         ASSERT(!BP_IS_HOLE(bp));
 1243         ASSERT(spa_syncing_txg(spa) == txg);
 1244 
 1245         if (BP_IS_EMBEDDED(bp))
 1246                 return (NULL);
 1247 
 1248         metaslab_check_free(spa, bp);
 1249         arc_freed(spa, bp);
 1250         dsl_scan_freed(spa, bp);
 1251 
 1252         if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) {
 1253                 /*
 1254                  * GANG and DEDUP blocks can induce a read (for the gang block
 1255                  * header, or the DDT), so issue them asynchronously so that
 1256                  * this thread is not tied up.
 1257                  */
 1258                 enum zio_stage stage =
 1259                     ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
 1260 
 1261                 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
 1262                     BP_GET_PSIZE(bp), NULL, NULL,
 1263                     ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
 1264                     flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
 1265         } else {
 1266                 metaslab_free(spa, bp, txg, B_FALSE);
 1267                 return (NULL);
 1268         }
 1269 }
 1270 
 1271 zio_t *
 1272 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
 1273     zio_done_func_t *done, void *private, zio_flag_t flags)
 1274 {
 1275         zio_t *zio;
 1276 
 1277         (void) zfs_blkptr_verify(spa, bp, flags & ZIO_FLAG_CONFIG_WRITER,
 1278             BLK_VERIFY_HALT);
 1279 
 1280         if (BP_IS_EMBEDDED(bp))
 1281                 return (zio_null(pio, spa, NULL, NULL, NULL, 0));
 1282 
 1283         /*
 1284          * A claim is an allocation of a specific block.  Claims are needed
 1285          * to support immediate writes in the intent log.  The issue is that
 1286          * immediate writes contain committed data, but in a txg that was
 1287          * *not* committed.  Upon opening the pool after an unclean shutdown,
 1288          * the intent log claims all blocks that contain immediate write data
 1289          * so that the SPA knows they're in use.
 1290          *
 1291          * All claims *must* be resolved in the first txg -- before the SPA
 1292          * starts allocating blocks -- so that nothing is allocated twice.
 1293          * If txg == 0 we just verify that the block is claimable.
 1294          */
 1295         ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
 1296             spa_min_claim_txg(spa));
 1297         ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
 1298         ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa));       /* zdb(8) */
 1299 
 1300         zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
 1301             BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
 1302             flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
 1303         ASSERT0(zio->io_queued_timestamp);
 1304 
 1305         return (zio);
 1306 }
 1307 
 1308 zio_t *
 1309 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
 1310     zio_done_func_t *done, void *private, zio_flag_t flags)
 1311 {
 1312         zio_t *zio;
 1313         int c;
 1314 
 1315         if (vd->vdev_children == 0) {
 1316                 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
 1317                     ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
 1318                     ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
 1319 
 1320                 zio->io_cmd = cmd;
 1321         } else {
 1322                 zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
 1323 
 1324                 for (c = 0; c < vd->vdev_children; c++)
 1325                         zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
 1326                             done, private, flags));
 1327         }
 1328 
 1329         return (zio);
 1330 }
 1331 
 1332 zio_t *
 1333 zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
 1334     zio_done_func_t *done, void *private, zio_priority_t priority,
 1335     zio_flag_t flags, enum trim_flag trim_flags)
 1336 {
 1337         zio_t *zio;
 1338 
 1339         ASSERT0(vd->vdev_children);
 1340         ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
 1341         ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
 1342         ASSERT3U(size, !=, 0);
 1343 
 1344         zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
 1345             private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
 1346             vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
 1347         zio->io_trim_flags = trim_flags;
 1348 
 1349         return (zio);
 1350 }
 1351 
 1352 zio_t *
 1353 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
 1354     abd_t *data, int checksum, zio_done_func_t *done, void *private,
 1355     zio_priority_t priority, zio_flag_t flags, boolean_t labels)
 1356 {
 1357         zio_t *zio;
 1358 
 1359         ASSERT(vd->vdev_children == 0);
 1360         ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
 1361             offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
 1362         ASSERT3U(offset + size, <=, vd->vdev_psize);
 1363 
 1364         zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
 1365             private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
 1366             offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
 1367 
 1368         zio->io_prop.zp_checksum = checksum;
 1369 
 1370         return (zio);
 1371 }
 1372 
 1373 zio_t *
 1374 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
 1375     abd_t *data, int checksum, zio_done_func_t *done, void *private,
 1376     zio_priority_t priority, zio_flag_t flags, boolean_t labels)
 1377 {
 1378         zio_t *zio;
 1379 
 1380         ASSERT(vd->vdev_children == 0);
 1381         ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
 1382             offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
 1383         ASSERT3U(offset + size, <=, vd->vdev_psize);
 1384 
 1385         zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
 1386             private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
 1387             offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
 1388 
 1389         zio->io_prop.zp_checksum = checksum;
 1390 
 1391         if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
 1392                 /*
 1393                  * zec checksums are necessarily destructive -- they modify
 1394                  * the end of the write buffer to hold the verifier/checksum.
 1395                  * Therefore, we must make a local copy in case the data is
 1396                  * being written to multiple places in parallel.
 1397                  */
 1398                 abd_t *wbuf = abd_alloc_sametype(data, size);
 1399                 abd_copy(wbuf, data, size);
 1400 
 1401                 zio_push_transform(zio, wbuf, size, size, NULL);
 1402         }
 1403 
 1404         return (zio);
 1405 }
 1406 
 1407 /*
 1408  * Create a child I/O to do some work for us.
 1409  */
 1410 zio_t *
 1411 zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
 1412     abd_t *data, uint64_t size, int type, zio_priority_t priority,
 1413     zio_flag_t flags, zio_done_func_t *done, void *private)
 1414 {
 1415         enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
 1416         zio_t *zio;
 1417 
 1418         /*
 1419          * vdev child I/Os do not propagate their error to the parent.
 1420          * Therefore, for correct operation the caller *must* check for
 1421          * and handle the error in the child i/o's done callback.
 1422          * The only exceptions are i/os that we don't care about
 1423          * (OPTIONAL or REPAIR).
 1424          */
 1425         ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
 1426             done != NULL);
 1427 
 1428         if (type == ZIO_TYPE_READ && bp != NULL) {
 1429                 /*
 1430                  * If we have the bp, then the child should perform the
 1431                  * checksum and the parent need not.  This pushes error
 1432                  * detection as close to the leaves as possible and
 1433                  * eliminates redundant checksums in the interior nodes.
 1434                  */
 1435                 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
 1436                 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
 1437         }
 1438 
 1439         if (vd->vdev_ops->vdev_op_leaf) {
 1440                 ASSERT0(vd->vdev_children);
 1441                 offset += VDEV_LABEL_START_SIZE;
 1442         }
 1443 
 1444         flags |= ZIO_VDEV_CHILD_FLAGS(pio);
 1445 
 1446         /*
 1447          * If we've decided to do a repair, the write is not speculative --
 1448          * even if the original read was.
 1449          */
 1450         if (flags & ZIO_FLAG_IO_REPAIR)
 1451                 flags &= ~ZIO_FLAG_SPECULATIVE;
 1452 
 1453         /*
 1454          * If we're creating a child I/O that is not associated with a
 1455          * top-level vdev, then the child zio is not an allocating I/O.
 1456          * If this is a retried I/O then we ignore it since we will
 1457          * have already processed the original allocating I/O.
 1458          */
 1459         if (flags & ZIO_FLAG_IO_ALLOCATING &&
 1460             (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
 1461                 ASSERT(pio->io_metaslab_class != NULL);
 1462                 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
 1463                 ASSERT(type == ZIO_TYPE_WRITE);
 1464                 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
 1465                 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
 1466                 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
 1467                     pio->io_child_type == ZIO_CHILD_GANG);
 1468 
 1469                 flags &= ~ZIO_FLAG_IO_ALLOCATING;
 1470         }
 1471 
 1472 
 1473         zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
 1474             done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
 1475             ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
 1476         ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
 1477 
 1478         zio->io_physdone = pio->io_physdone;
 1479         if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
 1480                 zio->io_logical->io_phys_children++;
 1481 
 1482         return (zio);
 1483 }
 1484 
 1485 zio_t *
 1486 zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
 1487     zio_type_t type, zio_priority_t priority, zio_flag_t flags,
 1488     zio_done_func_t *done, void *private)
 1489 {
 1490         zio_t *zio;
 1491 
 1492         ASSERT(vd->vdev_ops->vdev_op_leaf);
 1493 
 1494         zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
 1495             data, size, size, done, private, type, priority,
 1496             flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
 1497             vd, offset, NULL,
 1498             ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
 1499 
 1500         return (zio);
 1501 }
 1502 
 1503 void
 1504 zio_flush(zio_t *zio, vdev_t *vd)
 1505 {
 1506         zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
 1507             NULL, NULL,
 1508             ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
 1509 }
 1510 
 1511 void
 1512 zio_shrink(zio_t *zio, uint64_t size)
 1513 {
 1514         ASSERT3P(zio->io_executor, ==, NULL);
 1515         ASSERT3U(zio->io_orig_size, ==, zio->io_size);
 1516         ASSERT3U(size, <=, zio->io_size);
 1517 
 1518         /*
 1519          * We don't shrink for raidz because of problems with the
 1520          * reconstruction when reading back less than the block size.
 1521          * Note, BP_IS_RAIDZ() assumes no compression.
 1522          */
 1523         ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
 1524         if (!BP_IS_RAIDZ(zio->io_bp)) {
 1525                 /* we are not doing a raw write */
 1526                 ASSERT3U(zio->io_size, ==, zio->io_lsize);
 1527                 zio->io_orig_size = zio->io_size = zio->io_lsize = size;
 1528         }
 1529 }
 1530 
 1531 /*
 1532  * ==========================================================================
 1533  * Prepare to read and write logical blocks
 1534  * ==========================================================================
 1535  */
 1536 
 1537 static zio_t *
 1538 zio_read_bp_init(zio_t *zio)
 1539 {
 1540         blkptr_t *bp = zio->io_bp;
 1541         uint64_t psize =
 1542             BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
 1543 
 1544         ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
 1545 
 1546         if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
 1547             zio->io_child_type == ZIO_CHILD_LOGICAL &&
 1548             !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
 1549                 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
 1550                     psize, psize, zio_decompress);
 1551         }
 1552 
 1553         if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
 1554             BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
 1555             zio->io_child_type == ZIO_CHILD_LOGICAL) {
 1556                 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
 1557                     psize, psize, zio_decrypt);
 1558         }
 1559 
 1560         if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
 1561                 int psize = BPE_GET_PSIZE(bp);
 1562                 void *data = abd_borrow_buf(zio->io_abd, psize);
 1563 
 1564                 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
 1565                 decode_embedded_bp_compressed(bp, data);
 1566                 abd_return_buf_copy(zio->io_abd, data, psize);
 1567         } else {
 1568                 ASSERT(!BP_IS_EMBEDDED(bp));
 1569                 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
 1570         }
 1571 
 1572         if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
 1573                 zio->io_flags |= ZIO_FLAG_DONT_CACHE;
 1574 
 1575         if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
 1576                 zio->io_flags |= ZIO_FLAG_DONT_CACHE;
 1577 
 1578         if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
 1579                 zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
 1580 
 1581         return (zio);
 1582 }
 1583 
 1584 static zio_t *
 1585 zio_write_bp_init(zio_t *zio)
 1586 {
 1587         if (!IO_IS_ALLOCATING(zio))
 1588                 return (zio);
 1589 
 1590         ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
 1591 
 1592         if (zio->io_bp_override) {
 1593                 blkptr_t *bp = zio->io_bp;
 1594                 zio_prop_t *zp = &zio->io_prop;
 1595 
 1596                 ASSERT(bp->blk_birth != zio->io_txg);
 1597                 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0);
 1598 
 1599                 *bp = *zio->io_bp_override;
 1600                 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
 1601 
 1602                 if (BP_IS_EMBEDDED(bp))
 1603                         return (zio);
 1604 
 1605                 /*
 1606                  * If we've been overridden and nopwrite is set then
 1607                  * set the flag accordingly to indicate that a nopwrite
 1608                  * has already occurred.
 1609                  */
 1610                 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
 1611                         ASSERT(!zp->zp_dedup);
 1612                         ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
 1613                         zio->io_flags |= ZIO_FLAG_NOPWRITE;
 1614                         return (zio);
 1615                 }
 1616 
 1617                 ASSERT(!zp->zp_nopwrite);
 1618 
 1619                 if (BP_IS_HOLE(bp) || !zp->zp_dedup)
 1620                         return (zio);
 1621 
 1622                 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
 1623                     ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
 1624 
 1625                 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
 1626                     !zp->zp_encrypt) {
 1627                         BP_SET_DEDUP(bp, 1);
 1628                         zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
 1629                         return (zio);
 1630                 }
 1631 
 1632                 /*
 1633                  * We were unable to handle this as an override bp, treat
 1634                  * it as a regular write I/O.
 1635                  */
 1636                 zio->io_bp_override = NULL;
 1637                 *bp = zio->io_bp_orig;
 1638                 zio->io_pipeline = zio->io_orig_pipeline;
 1639         }
 1640 
 1641         return (zio);
 1642 }
 1643 
 1644 static zio_t *
 1645 zio_write_compress(zio_t *zio)
 1646 {
 1647         spa_t *spa = zio->io_spa;
 1648         zio_prop_t *zp = &zio->io_prop;
 1649         enum zio_compress compress = zp->zp_compress;
 1650         blkptr_t *bp = zio->io_bp;
 1651         uint64_t lsize = zio->io_lsize;
 1652         uint64_t psize = zio->io_size;
 1653         uint32_t pass = 1;
 1654 
 1655         /*
 1656          * If our children haven't all reached the ready stage,
 1657          * wait for them and then repeat this pipeline stage.
 1658          */
 1659         if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
 1660             ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
 1661                 return (NULL);
 1662         }
 1663 
 1664         if (!IO_IS_ALLOCATING(zio))
 1665                 return (zio);
 1666 
 1667         if (zio->io_children_ready != NULL) {
 1668                 /*
 1669                  * Now that all our children are ready, run the callback
 1670                  * associated with this zio in case it wants to modify the
 1671                  * data to be written.
 1672                  */
 1673                 ASSERT3U(zp->zp_level, >, 0);
 1674                 zio->io_children_ready(zio);
 1675         }
 1676 
 1677         ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
 1678         ASSERT(zio->io_bp_override == NULL);
 1679 
 1680         if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
 1681                 /*
 1682                  * We're rewriting an existing block, which means we're
 1683                  * working on behalf of spa_sync().  For spa_sync() to
 1684                  * converge, it must eventually be the case that we don't
 1685                  * have to allocate new blocks.  But compression changes
 1686                  * the blocksize, which forces a reallocate, and makes
 1687                  * convergence take longer.  Therefore, after the first
 1688                  * few passes, stop compressing to ensure convergence.
 1689                  */
 1690                 pass = spa_sync_pass(spa);
 1691 
 1692                 ASSERT(zio->io_txg == spa_syncing_txg(spa));
 1693                 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 1694                 ASSERT(!BP_GET_DEDUP(bp));
 1695 
 1696                 if (pass >= zfs_sync_pass_dont_compress)
 1697                         compress = ZIO_COMPRESS_OFF;
 1698 
 1699                 /* Make sure someone doesn't change their mind on overwrites */
 1700                 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
 1701                     spa_max_replication(spa)) == BP_GET_NDVAS(bp));
 1702         }
 1703 
 1704         /* If it's a compressed write that is not raw, compress the buffer. */
 1705         if (compress != ZIO_COMPRESS_OFF &&
 1706             !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
 1707                 void *cbuf = zio_buf_alloc(lsize);
 1708                 psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize,
 1709                     zp->zp_complevel);
 1710                 if (psize == 0 || psize >= lsize) {
 1711                         compress = ZIO_COMPRESS_OFF;
 1712                         zio_buf_free(cbuf, lsize);
 1713                 } else if (!zp->zp_dedup && !zp->zp_encrypt &&
 1714                     psize <= BPE_PAYLOAD_SIZE &&
 1715                     zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
 1716                     spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
 1717                         encode_embedded_bp_compressed(bp,
 1718                             cbuf, compress, lsize, psize);
 1719                         BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
 1720                         BP_SET_TYPE(bp, zio->io_prop.zp_type);
 1721                         BP_SET_LEVEL(bp, zio->io_prop.zp_level);
 1722                         zio_buf_free(cbuf, lsize);
 1723                         bp->blk_birth = zio->io_txg;
 1724                         zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
 1725                         ASSERT(spa_feature_is_active(spa,
 1726                             SPA_FEATURE_EMBEDDED_DATA));
 1727                         return (zio);
 1728                 } else {
 1729                         /*
 1730                          * Round compressed size up to the minimum allocation
 1731                          * size of the smallest-ashift device, and zero the
 1732                          * tail. This ensures that the compressed size of the
 1733                          * BP (and thus compressratio property) are correct,
 1734                          * in that we charge for the padding used to fill out
 1735                          * the last sector.
 1736                          */
 1737                         ASSERT3U(spa->spa_min_alloc, >=, SPA_MINBLOCKSHIFT);
 1738                         size_t rounded = (size_t)roundup(psize,
 1739                             spa->spa_min_alloc);
 1740                         if (rounded >= lsize) {
 1741                                 compress = ZIO_COMPRESS_OFF;
 1742                                 zio_buf_free(cbuf, lsize);
 1743                                 psize = lsize;
 1744                         } else {
 1745                                 abd_t *cdata = abd_get_from_buf(cbuf, lsize);
 1746                                 abd_take_ownership_of_buf(cdata, B_TRUE);
 1747                                 abd_zero_off(cdata, psize, rounded - psize);
 1748                                 psize = rounded;
 1749                                 zio_push_transform(zio, cdata,
 1750                                     psize, lsize, NULL);
 1751                         }
 1752                 }
 1753 
 1754                 /*
 1755                  * We were unable to handle this as an override bp, treat
 1756                  * it as a regular write I/O.
 1757                  */
 1758                 zio->io_bp_override = NULL;
 1759                 *bp = zio->io_bp_orig;
 1760                 zio->io_pipeline = zio->io_orig_pipeline;
 1761 
 1762         } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
 1763             zp->zp_type == DMU_OT_DNODE) {
 1764                 /*
 1765                  * The DMU actually relies on the zio layer's compression
 1766                  * to free metadnode blocks that have had all contained
 1767                  * dnodes freed. As a result, even when doing a raw
 1768                  * receive, we must check whether the block can be compressed
 1769                  * to a hole.
 1770                  */
 1771                 psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
 1772                     zio->io_abd, NULL, lsize, zp->zp_complevel);
 1773                 if (psize == 0 || psize >= lsize)
 1774                         compress = ZIO_COMPRESS_OFF;
 1775         } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
 1776             !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
 1777                 /*
 1778                  * If we are raw receiving an encrypted dataset we should not
 1779                  * take this codepath because it will change the on-disk block
 1780                  * and decryption will fail.
 1781                  */
 1782                 size_t rounded = MIN((size_t)roundup(psize,
 1783                     spa->spa_min_alloc), lsize);
 1784 
 1785                 if (rounded != psize) {
 1786                         abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
 1787                         abd_zero_off(cdata, psize, rounded - psize);
 1788                         abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
 1789                         psize = rounded;
 1790                         zio_push_transform(zio, cdata,
 1791                             psize, rounded, NULL);
 1792                 }
 1793         } else {
 1794                 ASSERT3U(psize, !=, 0);
 1795         }
 1796 
 1797         /*
 1798          * The final pass of spa_sync() must be all rewrites, but the first
 1799          * few passes offer a trade-off: allocating blocks defers convergence,
 1800          * but newly allocated blocks are sequential, so they can be written
 1801          * to disk faster.  Therefore, we allow the first few passes of
 1802          * spa_sync() to allocate new blocks, but force rewrites after that.
 1803          * There should only be a handful of blocks after pass 1 in any case.
 1804          */
 1805         if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
 1806             BP_GET_PSIZE(bp) == psize &&
 1807             pass >= zfs_sync_pass_rewrite) {
 1808                 VERIFY3U(psize, !=, 0);
 1809                 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
 1810 
 1811                 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
 1812                 zio->io_flags |= ZIO_FLAG_IO_REWRITE;
 1813         } else {
 1814                 BP_ZERO(bp);
 1815                 zio->io_pipeline = ZIO_WRITE_PIPELINE;
 1816         }
 1817 
 1818         if (psize == 0) {
 1819                 if (zio->io_bp_orig.blk_birth != 0 &&
 1820                     spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
 1821                         BP_SET_LSIZE(bp, lsize);
 1822                         BP_SET_TYPE(bp, zp->zp_type);
 1823                         BP_SET_LEVEL(bp, zp->zp_level);
 1824                         BP_SET_BIRTH(bp, zio->io_txg, 0);
 1825                 }
 1826                 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
 1827         } else {
 1828                 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
 1829                 BP_SET_LSIZE(bp, lsize);
 1830                 BP_SET_TYPE(bp, zp->zp_type);
 1831                 BP_SET_LEVEL(bp, zp->zp_level);
 1832                 BP_SET_PSIZE(bp, psize);
 1833                 BP_SET_COMPRESS(bp, compress);
 1834                 BP_SET_CHECKSUM(bp, zp->zp_checksum);
 1835                 BP_SET_DEDUP(bp, zp->zp_dedup);
 1836                 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
 1837                 if (zp->zp_dedup) {
 1838                         ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 1839                         ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
 1840                         ASSERT(!zp->zp_encrypt ||
 1841                             DMU_OT_IS_ENCRYPTED(zp->zp_type));
 1842                         zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
 1843                 }
 1844                 if (zp->zp_nopwrite) {
 1845                         ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 1846                         ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
 1847                         zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
 1848                 }
 1849         }
 1850         return (zio);
 1851 }
 1852 
 1853 static zio_t *
 1854 zio_free_bp_init(zio_t *zio)
 1855 {
 1856         blkptr_t *bp = zio->io_bp;
 1857 
 1858         if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
 1859                 if (BP_GET_DEDUP(bp))
 1860                         zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
 1861         }
 1862 
 1863         ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
 1864 
 1865         return (zio);
 1866 }
 1867 
 1868 /*
 1869  * ==========================================================================
 1870  * Execute the I/O pipeline
 1871  * ==========================================================================
 1872  */
 1873 
 1874 static void
 1875 zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
 1876 {
 1877         spa_t *spa = zio->io_spa;
 1878         zio_type_t t = zio->io_type;
 1879         int flags = (cutinline ? TQ_FRONT : 0);
 1880 
 1881         /*
 1882          * If we're a config writer or a probe, the normal issue and
 1883          * interrupt threads may all be blocked waiting for the config lock.
 1884          * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
 1885          */
 1886         if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
 1887                 t = ZIO_TYPE_NULL;
 1888 
 1889         /*
 1890          * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
 1891          */
 1892         if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
 1893                 t = ZIO_TYPE_NULL;
 1894 
 1895         /*
 1896          * If this is a high priority I/O, then use the high priority taskq if
 1897          * available.
 1898          */
 1899         if ((zio->io_priority == ZIO_PRIORITY_NOW ||
 1900             zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
 1901             spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
 1902                 q++;
 1903 
 1904         ASSERT3U(q, <, ZIO_TASKQ_TYPES);
 1905 
 1906         /*
 1907          * NB: We are assuming that the zio can only be dispatched
 1908          * to a single taskq at a time.  It would be a grievous error
 1909          * to dispatch the zio to another taskq at the same time.
 1910          */
 1911         ASSERT(taskq_empty_ent(&zio->io_tqent));
 1912         spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
 1913             &zio->io_tqent);
 1914 }
 1915 
 1916 static boolean_t
 1917 zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
 1918 {
 1919         spa_t *spa = zio->io_spa;
 1920 
 1921         taskq_t *tq = taskq_of_curthread();
 1922 
 1923         for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
 1924                 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
 1925                 uint_t i;
 1926                 for (i = 0; i < tqs->stqs_count; i++) {
 1927                         if (tqs->stqs_taskq[i] == tq)
 1928                                 return (B_TRUE);
 1929                 }
 1930         }
 1931 
 1932         return (B_FALSE);
 1933 }
 1934 
 1935 static zio_t *
 1936 zio_issue_async(zio_t *zio)
 1937 {
 1938         zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
 1939 
 1940         return (NULL);
 1941 }
 1942 
 1943 void
 1944 zio_interrupt(void *zio)
 1945 {
 1946         zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
 1947 }
 1948 
 1949 void
 1950 zio_delay_interrupt(zio_t *zio)
 1951 {
 1952         /*
 1953          * The timeout_generic() function isn't defined in userspace, so
 1954          * rather than trying to implement the function, the zio delay
 1955          * functionality has been disabled for userspace builds.
 1956          */
 1957 
 1958 #ifdef _KERNEL
 1959         /*
 1960          * If io_target_timestamp is zero, then no delay has been registered
 1961          * for this IO, thus jump to the end of this function and "skip" the
 1962          * delay; issuing it directly to the zio layer.
 1963          */
 1964         if (zio->io_target_timestamp != 0) {
 1965                 hrtime_t now = gethrtime();
 1966 
 1967                 if (now >= zio->io_target_timestamp) {
 1968                         /*
 1969                          * This IO has already taken longer than the target
 1970                          * delay to complete, so we don't want to delay it
 1971                          * any longer; we "miss" the delay and issue it
 1972                          * directly to the zio layer. This is likely due to
 1973                          * the target latency being set to a value less than
 1974                          * the underlying hardware can satisfy (e.g. delay
 1975                          * set to 1ms, but the disks take 10ms to complete an
 1976                          * IO request).
 1977                          */
 1978 
 1979                         DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
 1980                             hrtime_t, now);
 1981 
 1982                         zio_interrupt(zio);
 1983                 } else {
 1984                         taskqid_t tid;
 1985                         hrtime_t diff = zio->io_target_timestamp - now;
 1986                         clock_t expire_at_tick = ddi_get_lbolt() +
 1987                             NSEC_TO_TICK(diff);
 1988 
 1989                         DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
 1990                             hrtime_t, now, hrtime_t, diff);
 1991 
 1992                         if (NSEC_TO_TICK(diff) == 0) {
 1993                                 /* Our delay is less than a jiffy - just spin */
 1994                                 zfs_sleep_until(zio->io_target_timestamp);
 1995                                 zio_interrupt(zio);
 1996                         } else {
 1997                                 /*
 1998                                  * Use taskq_dispatch_delay() in the place of
 1999                                  * OpenZFS's timeout_generic().
 2000                                  */
 2001                                 tid = taskq_dispatch_delay(system_taskq,
 2002                                     zio_interrupt, zio, TQ_NOSLEEP,
 2003                                     expire_at_tick);
 2004                                 if (tid == TASKQID_INVALID) {
 2005                                         /*
 2006                                          * Couldn't allocate a task.  Just
 2007                                          * finish the zio without a delay.
 2008                                          */
 2009                                         zio_interrupt(zio);
 2010                                 }
 2011                         }
 2012                 }
 2013                 return;
 2014         }
 2015 #endif
 2016         DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
 2017         zio_interrupt(zio);
 2018 }
 2019 
 2020 static void
 2021 zio_deadman_impl(zio_t *pio, int ziodepth)
 2022 {
 2023         zio_t *cio, *cio_next;
 2024         zio_link_t *zl = NULL;
 2025         vdev_t *vd = pio->io_vd;
 2026 
 2027         if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
 2028                 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
 2029                 zbookmark_phys_t *zb = &pio->io_bookmark;
 2030                 uint64_t delta = gethrtime() - pio->io_timestamp;
 2031                 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
 2032 
 2033                 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
 2034                     "delta=%llu queued=%llu io=%llu "
 2035                     "path=%s "
 2036                     "last=%llu type=%d "
 2037                     "priority=%d flags=0x%llx stage=0x%x "
 2038                     "pipeline=0x%x pipeline-trace=0x%x "
 2039                     "objset=%llu object=%llu "
 2040                     "level=%llu blkid=%llu "
 2041                     "offset=%llu size=%llu "
 2042                     "error=%d",
 2043                     ziodepth, pio, pio->io_timestamp,
 2044                     (u_longlong_t)delta, pio->io_delta, pio->io_delay,
 2045                     vd ? vd->vdev_path : "NULL",
 2046                     vq ? vq->vq_io_complete_ts : 0, pio->io_type,
 2047                     pio->io_priority, (u_longlong_t)pio->io_flags,
 2048                     pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
 2049                     (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
 2050                     (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
 2051                     (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
 2052                     pio->io_error);
 2053                 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
 2054                     pio->io_spa, vd, zb, pio, 0);
 2055 
 2056                 if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
 2057                     taskq_empty_ent(&pio->io_tqent)) {
 2058                         zio_interrupt(pio);
 2059                 }
 2060         }
 2061 
 2062         mutex_enter(&pio->io_lock);
 2063         for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
 2064                 cio_next = zio_walk_children(pio, &zl);
 2065                 zio_deadman_impl(cio, ziodepth + 1);
 2066         }
 2067         mutex_exit(&pio->io_lock);
 2068 }
 2069 
 2070 /*
 2071  * Log the critical information describing this zio and all of its children
 2072  * using the zfs_dbgmsg() interface then post deadman event for the ZED.
 2073  */
 2074 void
 2075 zio_deadman(zio_t *pio, const char *tag)
 2076 {
 2077         spa_t *spa = pio->io_spa;
 2078         char *name = spa_name(spa);
 2079 
 2080         if (!zfs_deadman_enabled || spa_suspended(spa))
 2081                 return;
 2082 
 2083         zio_deadman_impl(pio, 0);
 2084 
 2085         switch (spa_get_deadman_failmode(spa)) {
 2086         case ZIO_FAILURE_MODE_WAIT:
 2087                 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
 2088                 break;
 2089 
 2090         case ZIO_FAILURE_MODE_CONTINUE:
 2091                 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
 2092                 break;
 2093 
 2094         case ZIO_FAILURE_MODE_PANIC:
 2095                 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
 2096                 break;
 2097         }
 2098 }
 2099 
 2100 /*
 2101  * Execute the I/O pipeline until one of the following occurs:
 2102  * (1) the I/O completes; (2) the pipeline stalls waiting for
 2103  * dependent child I/Os; (3) the I/O issues, so we're waiting
 2104  * for an I/O completion interrupt; (4) the I/O is delegated by
 2105  * vdev-level caching or aggregation; (5) the I/O is deferred
 2106  * due to vdev-level queueing; (6) the I/O is handed off to
 2107  * another thread.  In all cases, the pipeline stops whenever
 2108  * there's no CPU work; it never burns a thread in cv_wait_io().
 2109  *
 2110  * There's no locking on io_stage because there's no legitimate way
 2111  * for multiple threads to be attempting to process the same I/O.
 2112  */
 2113 static zio_pipe_stage_t *zio_pipeline[];
 2114 
 2115 /*
 2116  * zio_execute() is a wrapper around the static function
 2117  * __zio_execute() so that we can force  __zio_execute() to be
 2118  * inlined.  This reduces stack overhead which is important
 2119  * because __zio_execute() is called recursively in several zio
 2120  * code paths.  zio_execute() itself cannot be inlined because
 2121  * it is externally visible.
 2122  */
 2123 void
 2124 zio_execute(void *zio)
 2125 {
 2126         fstrans_cookie_t cookie;
 2127 
 2128         cookie = spl_fstrans_mark();
 2129         __zio_execute(zio);
 2130         spl_fstrans_unmark(cookie);
 2131 }
 2132 
 2133 /*
 2134  * Used to determine if in the current context the stack is sized large
 2135  * enough to allow zio_execute() to be called recursively.  A minimum
 2136  * stack size of 16K is required to avoid needing to re-dispatch the zio.
 2137  */
 2138 static boolean_t
 2139 zio_execute_stack_check(zio_t *zio)
 2140 {
 2141 #if !defined(HAVE_LARGE_STACKS)
 2142         dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
 2143 
 2144         /* Executing in txg_sync_thread() context. */
 2145         if (dp && curthread == dp->dp_tx.tx_sync_thread)
 2146                 return (B_TRUE);
 2147 
 2148         /* Pool initialization outside of zio_taskq context. */
 2149         if (dp && spa_is_initializing(dp->dp_spa) &&
 2150             !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
 2151             !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
 2152                 return (B_TRUE);
 2153 #else
 2154         (void) zio;
 2155 #endif /* HAVE_LARGE_STACKS */
 2156 
 2157         return (B_FALSE);
 2158 }
 2159 
 2160 __attribute__((always_inline))
 2161 static inline void
 2162 __zio_execute(zio_t *zio)
 2163 {
 2164         ASSERT3U(zio->io_queued_timestamp, >, 0);
 2165 
 2166         while (zio->io_stage < ZIO_STAGE_DONE) {
 2167                 enum zio_stage pipeline = zio->io_pipeline;
 2168                 enum zio_stage stage = zio->io_stage;
 2169 
 2170                 zio->io_executor = curthread;
 2171 
 2172                 ASSERT(!MUTEX_HELD(&zio->io_lock));
 2173                 ASSERT(ISP2(stage));
 2174                 ASSERT(zio->io_stall == NULL);
 2175 
 2176                 do {
 2177                         stage <<= 1;
 2178                 } while ((stage & pipeline) == 0);
 2179 
 2180                 ASSERT(stage <= ZIO_STAGE_DONE);
 2181 
 2182                 /*
 2183                  * If we are in interrupt context and this pipeline stage
 2184                  * will grab a config lock that is held across I/O,
 2185                  * or may wait for an I/O that needs an interrupt thread
 2186                  * to complete, issue async to avoid deadlock.
 2187                  *
 2188                  * For VDEV_IO_START, we cut in line so that the io will
 2189                  * be sent to disk promptly.
 2190                  */
 2191                 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
 2192                     zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
 2193                         boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
 2194                             zio_requeue_io_start_cut_in_line : B_FALSE;
 2195                         zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
 2196                         return;
 2197                 }
 2198 
 2199                 /*
 2200                  * If the current context doesn't have large enough stacks
 2201                  * the zio must be issued asynchronously to prevent overflow.
 2202                  */
 2203                 if (zio_execute_stack_check(zio)) {
 2204                         boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
 2205                             zio_requeue_io_start_cut_in_line : B_FALSE;
 2206                         zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
 2207                         return;
 2208                 }
 2209 
 2210                 zio->io_stage = stage;
 2211                 zio->io_pipeline_trace |= zio->io_stage;
 2212 
 2213                 /*
 2214                  * The zio pipeline stage returns the next zio to execute
 2215                  * (typically the same as this one), or NULL if we should
 2216                  * stop.
 2217                  */
 2218                 zio = zio_pipeline[highbit64(stage) - 1](zio);
 2219 
 2220                 if (zio == NULL)
 2221                         return;
 2222         }
 2223 }
 2224 
 2225 
 2226 /*
 2227  * ==========================================================================
 2228  * Initiate I/O, either sync or async
 2229  * ==========================================================================
 2230  */
 2231 int
 2232 zio_wait(zio_t *zio)
 2233 {
 2234         /*
 2235          * Some routines, like zio_free_sync(), may return a NULL zio
 2236          * to avoid the performance overhead of creating and then destroying
 2237          * an unneeded zio.  For the callers' simplicity, we accept a NULL
 2238          * zio and ignore it.
 2239          */
 2240         if (zio == NULL)
 2241                 return (0);
 2242 
 2243         long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
 2244         int error;
 2245 
 2246         ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
 2247         ASSERT3P(zio->io_executor, ==, NULL);
 2248 
 2249         zio->io_waiter = curthread;
 2250         ASSERT0(zio->io_queued_timestamp);
 2251         zio->io_queued_timestamp = gethrtime();
 2252 
 2253         __zio_execute(zio);
 2254 
 2255         mutex_enter(&zio->io_lock);
 2256         while (zio->io_executor != NULL) {
 2257                 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
 2258                     ddi_get_lbolt() + timeout);
 2259 
 2260                 if (zfs_deadman_enabled && error == -1 &&
 2261                     gethrtime() - zio->io_queued_timestamp >
 2262                     spa_deadman_ziotime(zio->io_spa)) {
 2263                         mutex_exit(&zio->io_lock);
 2264                         timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
 2265                         zio_deadman(zio, FTAG);
 2266                         mutex_enter(&zio->io_lock);
 2267                 }
 2268         }
 2269         mutex_exit(&zio->io_lock);
 2270 
 2271         error = zio->io_error;
 2272         zio_destroy(zio);
 2273 
 2274         return (error);
 2275 }
 2276 
 2277 void
 2278 zio_nowait(zio_t *zio)
 2279 {
 2280         /*
 2281          * See comment in zio_wait().
 2282          */
 2283         if (zio == NULL)
 2284                 return;
 2285 
 2286         ASSERT3P(zio->io_executor, ==, NULL);
 2287 
 2288         if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
 2289             zio_unique_parent(zio) == NULL) {
 2290                 zio_t *pio;
 2291 
 2292                 /*
 2293                  * This is a logical async I/O with no parent to wait for it.
 2294                  * We add it to the spa_async_root_zio "Godfather" I/O which
 2295                  * will ensure they complete prior to unloading the pool.
 2296                  */
 2297                 spa_t *spa = zio->io_spa;
 2298                 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
 2299 
 2300                 zio_add_child(pio, zio);
 2301         }
 2302 
 2303         ASSERT0(zio->io_queued_timestamp);
 2304         zio->io_queued_timestamp = gethrtime();
 2305         __zio_execute(zio);
 2306 }
 2307 
 2308 /*
 2309  * ==========================================================================
 2310  * Reexecute, cancel, or suspend/resume failed I/O
 2311  * ==========================================================================
 2312  */
 2313 
 2314 static void
 2315 zio_reexecute(void *arg)
 2316 {
 2317         zio_t *pio = arg;
 2318         zio_t *cio, *cio_next;
 2319 
 2320         ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
 2321         ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
 2322         ASSERT(pio->io_gang_leader == NULL);
 2323         ASSERT(pio->io_gang_tree == NULL);
 2324 
 2325         pio->io_flags = pio->io_orig_flags;
 2326         pio->io_stage = pio->io_orig_stage;
 2327         pio->io_pipeline = pio->io_orig_pipeline;
 2328         pio->io_reexecute = 0;
 2329         pio->io_flags |= ZIO_FLAG_REEXECUTED;
 2330         pio->io_pipeline_trace = 0;
 2331         pio->io_error = 0;
 2332         for (int w = 0; w < ZIO_WAIT_TYPES; w++)
 2333                 pio->io_state[w] = 0;
 2334         for (int c = 0; c < ZIO_CHILD_TYPES; c++)
 2335                 pio->io_child_error[c] = 0;
 2336 
 2337         if (IO_IS_ALLOCATING(pio))
 2338                 BP_ZERO(pio->io_bp);
 2339 
 2340         /*
 2341          * As we reexecute pio's children, new children could be created.
 2342          * New children go to the head of pio's io_child_list, however,
 2343          * so we will (correctly) not reexecute them.  The key is that
 2344          * the remainder of pio's io_child_list, from 'cio_next' onward,
 2345          * cannot be affected by any side effects of reexecuting 'cio'.
 2346          */
 2347         zio_link_t *zl = NULL;
 2348         mutex_enter(&pio->io_lock);
 2349         for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
 2350                 cio_next = zio_walk_children(pio, &zl);
 2351                 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
 2352                         pio->io_children[cio->io_child_type][w]++;
 2353                 mutex_exit(&pio->io_lock);
 2354                 zio_reexecute(cio);
 2355                 mutex_enter(&pio->io_lock);
 2356         }
 2357         mutex_exit(&pio->io_lock);
 2358 
 2359         /*
 2360          * Now that all children have been reexecuted, execute the parent.
 2361          * We don't reexecute "The Godfather" I/O here as it's the
 2362          * responsibility of the caller to wait on it.
 2363          */
 2364         if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
 2365                 pio->io_queued_timestamp = gethrtime();
 2366                 __zio_execute(pio);
 2367         }
 2368 }
 2369 
 2370 void
 2371 zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
 2372 {
 2373         if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
 2374                 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
 2375                     "failure and the failure mode property for this pool "
 2376                     "is set to panic.", spa_name(spa));
 2377 
 2378         cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
 2379             "failure and has been suspended.\n", spa_name(spa));
 2380 
 2381         (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
 2382             NULL, NULL, 0);
 2383 
 2384         mutex_enter(&spa->spa_suspend_lock);
 2385 
 2386         if (spa->spa_suspend_zio_root == NULL)
 2387                 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
 2388                     ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
 2389                     ZIO_FLAG_GODFATHER);
 2390 
 2391         spa->spa_suspended = reason;
 2392 
 2393         if (zio != NULL) {
 2394                 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
 2395                 ASSERT(zio != spa->spa_suspend_zio_root);
 2396                 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 2397                 ASSERT(zio_unique_parent(zio) == NULL);
 2398                 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
 2399                 zio_add_child(spa->spa_suspend_zio_root, zio);
 2400         }
 2401 
 2402         mutex_exit(&spa->spa_suspend_lock);
 2403 }
 2404 
 2405 int
 2406 zio_resume(spa_t *spa)
 2407 {
 2408         zio_t *pio;
 2409 
 2410         /*
 2411          * Reexecute all previously suspended i/o.
 2412          */
 2413         mutex_enter(&spa->spa_suspend_lock);
 2414         spa->spa_suspended = ZIO_SUSPEND_NONE;
 2415         cv_broadcast(&spa->spa_suspend_cv);
 2416         pio = spa->spa_suspend_zio_root;
 2417         spa->spa_suspend_zio_root = NULL;
 2418         mutex_exit(&spa->spa_suspend_lock);
 2419 
 2420         if (pio == NULL)
 2421                 return (0);
 2422 
 2423         zio_reexecute(pio);
 2424         return (zio_wait(pio));
 2425 }
 2426 
 2427 void
 2428 zio_resume_wait(spa_t *spa)
 2429 {
 2430         mutex_enter(&spa->spa_suspend_lock);
 2431         while (spa_suspended(spa))
 2432                 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
 2433         mutex_exit(&spa->spa_suspend_lock);
 2434 }
 2435 
 2436 /*
 2437  * ==========================================================================
 2438  * Gang blocks.
 2439  *
 2440  * A gang block is a collection of small blocks that looks to the DMU
 2441  * like one large block.  When zio_dva_allocate() cannot find a block
 2442  * of the requested size, due to either severe fragmentation or the pool
 2443  * being nearly full, it calls zio_write_gang_block() to construct the
 2444  * block from smaller fragments.
 2445  *
 2446  * A gang block consists of a gang header (zio_gbh_phys_t) and up to
 2447  * three (SPA_GBH_NBLKPTRS) gang members.  The gang header is just like
 2448  * an indirect block: it's an array of block pointers.  It consumes
 2449  * only one sector and hence is allocatable regardless of fragmentation.
 2450  * The gang header's bps point to its gang members, which hold the data.
 2451  *
 2452  * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
 2453  * as the verifier to ensure uniqueness of the SHA256 checksum.
 2454  * Critically, the gang block bp's blk_cksum is the checksum of the data,
 2455  * not the gang header.  This ensures that data block signatures (needed for
 2456  * deduplication) are independent of how the block is physically stored.
 2457  *
 2458  * Gang blocks can be nested: a gang member may itself be a gang block.
 2459  * Thus every gang block is a tree in which root and all interior nodes are
 2460  * gang headers, and the leaves are normal blocks that contain user data.
 2461  * The root of the gang tree is called the gang leader.
 2462  *
 2463  * To perform any operation (read, rewrite, free, claim) on a gang block,
 2464  * zio_gang_assemble() first assembles the gang tree (minus data leaves)
 2465  * in the io_gang_tree field of the original logical i/o by recursively
 2466  * reading the gang leader and all gang headers below it.  This yields
 2467  * an in-core tree containing the contents of every gang header and the
 2468  * bps for every constituent of the gang block.
 2469  *
 2470  * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
 2471  * and invokes a callback on each bp.  To free a gang block, zio_gang_issue()
 2472  * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
 2473  * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
 2474  * zio_read_gang() is a wrapper around zio_read() that omits reading gang
 2475  * headers, since we already have those in io_gang_tree.  zio_rewrite_gang()
 2476  * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
 2477  * of the gang header plus zio_checksum_compute() of the data to update the
 2478  * gang header's blk_cksum as described above.
 2479  *
 2480  * The two-phase assemble/issue model solves the problem of partial failure --
 2481  * what if you'd freed part of a gang block but then couldn't read the
 2482  * gang header for another part?  Assembling the entire gang tree first
 2483  * ensures that all the necessary gang header I/O has succeeded before
 2484  * starting the actual work of free, claim, or write.  Once the gang tree
 2485  * is assembled, free and claim are in-memory operations that cannot fail.
 2486  *
 2487  * In the event that a gang write fails, zio_dva_unallocate() walks the
 2488  * gang tree to immediately free (i.e. insert back into the space map)
 2489  * everything we've allocated.  This ensures that we don't get ENOSPC
 2490  * errors during repeated suspend/resume cycles due to a flaky device.
 2491  *
 2492  * Gang rewrites only happen during sync-to-convergence.  If we can't assemble
 2493  * the gang tree, we won't modify the block, so we can safely defer the free
 2494  * (knowing that the block is still intact).  If we *can* assemble the gang
 2495  * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
 2496  * each constituent bp and we can allocate a new block on the next sync pass.
 2497  *
 2498  * In all cases, the gang tree allows complete recovery from partial failure.
 2499  * ==========================================================================
 2500  */
 2501 
 2502 static void
 2503 zio_gang_issue_func_done(zio_t *zio)
 2504 {
 2505         abd_free(zio->io_abd);
 2506 }
 2507 
 2508 static zio_t *
 2509 zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
 2510     uint64_t offset)
 2511 {
 2512         if (gn != NULL)
 2513                 return (pio);
 2514 
 2515         return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
 2516             BP_GET_PSIZE(bp), zio_gang_issue_func_done,
 2517             NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
 2518             &pio->io_bookmark));
 2519 }
 2520 
 2521 static zio_t *
 2522 zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
 2523     uint64_t offset)
 2524 {
 2525         zio_t *zio;
 2526 
 2527         if (gn != NULL) {
 2528                 abd_t *gbh_abd =
 2529                     abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
 2530                 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
 2531                     gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
 2532                     pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
 2533                     &pio->io_bookmark);
 2534                 /*
 2535                  * As we rewrite each gang header, the pipeline will compute
 2536                  * a new gang block header checksum for it; but no one will
 2537                  * compute a new data checksum, so we do that here.  The one
 2538                  * exception is the gang leader: the pipeline already computed
 2539                  * its data checksum because that stage precedes gang assembly.
 2540                  * (Presently, nothing actually uses interior data checksums;
 2541                  * this is just good hygiene.)
 2542                  */
 2543                 if (gn != pio->io_gang_leader->io_gang_tree) {
 2544                         abd_t *buf = abd_get_offset(data, offset);
 2545 
 2546                         zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
 2547                             buf, BP_GET_PSIZE(bp));
 2548 
 2549                         abd_free(buf);
 2550                 }
 2551                 /*
 2552                  * If we are here to damage data for testing purposes,
 2553                  * leave the GBH alone so that we can detect the damage.
 2554                  */
 2555                 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
 2556                         zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
 2557         } else {
 2558                 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
 2559                     abd_get_offset(data, offset), BP_GET_PSIZE(bp),
 2560                     zio_gang_issue_func_done, NULL, pio->io_priority,
 2561                     ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
 2562         }
 2563 
 2564         return (zio);
 2565 }
 2566 
 2567 static zio_t *
 2568 zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
 2569     uint64_t offset)
 2570 {
 2571         (void) gn, (void) data, (void) offset;
 2572 
 2573         zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
 2574             ZIO_GANG_CHILD_FLAGS(pio));
 2575         if (zio == NULL) {
 2576                 zio = zio_null(pio, pio->io_spa,
 2577                     NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
 2578         }
 2579         return (zio);
 2580 }
 2581 
 2582 static zio_t *
 2583 zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
 2584     uint64_t offset)
 2585 {
 2586         (void) gn, (void) data, (void) offset;
 2587         return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
 2588             NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
 2589 }
 2590 
 2591 static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
 2592         NULL,
 2593         zio_read_gang,
 2594         zio_rewrite_gang,
 2595         zio_free_gang,
 2596         zio_claim_gang,
 2597         NULL
 2598 };
 2599 
 2600 static void zio_gang_tree_assemble_done(zio_t *zio);
 2601 
 2602 static zio_gang_node_t *
 2603 zio_gang_node_alloc(zio_gang_node_t **gnpp)
 2604 {
 2605         zio_gang_node_t *gn;
 2606 
 2607         ASSERT(*gnpp == NULL);
 2608 
 2609         gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
 2610         gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
 2611         *gnpp = gn;
 2612 
 2613         return (gn);
 2614 }
 2615 
 2616 static void
 2617 zio_gang_node_free(zio_gang_node_t **gnpp)
 2618 {
 2619         zio_gang_node_t *gn = *gnpp;
 2620 
 2621         for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
 2622                 ASSERT(gn->gn_child[g] == NULL);
 2623 
 2624         zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
 2625         kmem_free(gn, sizeof (*gn));
 2626         *gnpp = NULL;
 2627 }
 2628 
 2629 static void
 2630 zio_gang_tree_free(zio_gang_node_t **gnpp)
 2631 {
 2632         zio_gang_node_t *gn = *gnpp;
 2633 
 2634         if (gn == NULL)
 2635                 return;
 2636 
 2637         for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
 2638                 zio_gang_tree_free(&gn->gn_child[g]);
 2639 
 2640         zio_gang_node_free(gnpp);
 2641 }
 2642 
 2643 static void
 2644 zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
 2645 {
 2646         zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
 2647         abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
 2648 
 2649         ASSERT(gio->io_gang_leader == gio);
 2650         ASSERT(BP_IS_GANG(bp));
 2651 
 2652         zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
 2653             zio_gang_tree_assemble_done, gn, gio->io_priority,
 2654             ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
 2655 }
 2656 
 2657 static void
 2658 zio_gang_tree_assemble_done(zio_t *zio)
 2659 {
 2660         zio_t *gio = zio->io_gang_leader;
 2661         zio_gang_node_t *gn = zio->io_private;
 2662         blkptr_t *bp = zio->io_bp;
 2663 
 2664         ASSERT(gio == zio_unique_parent(zio));
 2665         ASSERT(zio->io_child_count == 0);
 2666 
 2667         if (zio->io_error)
 2668                 return;
 2669 
 2670         /* this ABD was created from a linear buf in zio_gang_tree_assemble */
 2671         if (BP_SHOULD_BYTESWAP(bp))
 2672                 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
 2673 
 2674         ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
 2675         ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
 2676         ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
 2677 
 2678         abd_free(zio->io_abd);
 2679 
 2680         for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
 2681                 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
 2682                 if (!BP_IS_GANG(gbp))
 2683                         continue;
 2684                 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
 2685         }
 2686 }
 2687 
 2688 static void
 2689 zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
 2690     uint64_t offset)
 2691 {
 2692         zio_t *gio = pio->io_gang_leader;
 2693         zio_t *zio;
 2694 
 2695         ASSERT(BP_IS_GANG(bp) == !!gn);
 2696         ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
 2697         ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
 2698 
 2699         /*
 2700          * If you're a gang header, your data is in gn->gn_gbh.
 2701          * If you're a gang member, your data is in 'data' and gn == NULL.
 2702          */
 2703         zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
 2704 
 2705         if (gn != NULL) {
 2706                 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
 2707 
 2708                 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
 2709                         blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
 2710                         if (BP_IS_HOLE(gbp))
 2711                                 continue;
 2712                         zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
 2713                             offset);
 2714                         offset += BP_GET_PSIZE(gbp);
 2715                 }
 2716         }
 2717 
 2718         if (gn == gio->io_gang_tree)
 2719                 ASSERT3U(gio->io_size, ==, offset);
 2720 
 2721         if (zio != pio)
 2722                 zio_nowait(zio);
 2723 }
 2724 
 2725 static zio_t *
 2726 zio_gang_assemble(zio_t *zio)
 2727 {
 2728         blkptr_t *bp = zio->io_bp;
 2729 
 2730         ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
 2731         ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
 2732 
 2733         zio->io_gang_leader = zio;
 2734 
 2735         zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
 2736 
 2737         return (zio);
 2738 }
 2739 
 2740 static zio_t *
 2741 zio_gang_issue(zio_t *zio)
 2742 {
 2743         blkptr_t *bp = zio->io_bp;
 2744 
 2745         if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
 2746                 return (NULL);
 2747         }
 2748 
 2749         ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
 2750         ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
 2751 
 2752         if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
 2753                 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
 2754                     0);
 2755         else
 2756                 zio_gang_tree_free(&zio->io_gang_tree);
 2757 
 2758         zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
 2759 
 2760         return (zio);
 2761 }
 2762 
 2763 static void
 2764 zio_write_gang_member_ready(zio_t *zio)
 2765 {
 2766         zio_t *pio = zio_unique_parent(zio);
 2767         dva_t *cdva = zio->io_bp->blk_dva;
 2768         dva_t *pdva = pio->io_bp->blk_dva;
 2769         uint64_t asize;
 2770         zio_t *gio __maybe_unused = zio->io_gang_leader;
 2771 
 2772         if (BP_IS_HOLE(zio->io_bp))
 2773                 return;
 2774 
 2775         ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
 2776 
 2777         ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
 2778         ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
 2779         ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
 2780         ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
 2781         ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
 2782 
 2783         mutex_enter(&pio->io_lock);
 2784         for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
 2785                 ASSERT(DVA_GET_GANG(&pdva[d]));
 2786                 asize = DVA_GET_ASIZE(&pdva[d]);
 2787                 asize += DVA_GET_ASIZE(&cdva[d]);
 2788                 DVA_SET_ASIZE(&pdva[d], asize);
 2789         }
 2790         mutex_exit(&pio->io_lock);
 2791 }
 2792 
 2793 static void
 2794 zio_write_gang_done(zio_t *zio)
 2795 {
 2796         /*
 2797          * The io_abd field will be NULL for a zio with no data.  The io_flags
 2798          * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
 2799          * check for it here as it is cleared in zio_ready.
 2800          */
 2801         if (zio->io_abd != NULL)
 2802                 abd_free(zio->io_abd);
 2803 }
 2804 
 2805 static zio_t *
 2806 zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
 2807 {
 2808         spa_t *spa = pio->io_spa;
 2809         blkptr_t *bp = pio->io_bp;
 2810         zio_t *gio = pio->io_gang_leader;
 2811         zio_t *zio;
 2812         zio_gang_node_t *gn, **gnpp;
 2813         zio_gbh_phys_t *gbh;
 2814         abd_t *gbh_abd;
 2815         uint64_t txg = pio->io_txg;
 2816         uint64_t resid = pio->io_size;
 2817         uint64_t lsize;
 2818         int copies = gio->io_prop.zp_copies;
 2819         int gbh_copies;
 2820         zio_prop_t zp;
 2821         int error;
 2822         boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
 2823 
 2824         /*
 2825          * encrypted blocks need DVA[2] free so encrypted gang headers can't
 2826          * have a third copy.
 2827          */
 2828         gbh_copies = MIN(copies + 1, spa_max_replication(spa));
 2829         if (BP_IS_ENCRYPTED(bp) && gbh_copies >= SPA_DVAS_PER_BP)
 2830                 gbh_copies = SPA_DVAS_PER_BP - 1;
 2831 
 2832         int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
 2833         if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
 2834                 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
 2835                 ASSERT(has_data);
 2836 
 2837                 flags |= METASLAB_ASYNC_ALLOC;
 2838                 VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
 2839                     mca_alloc_slots, pio));
 2840 
 2841                 /*
 2842                  * The logical zio has already placed a reservation for
 2843                  * 'copies' allocation slots but gang blocks may require
 2844                  * additional copies. These additional copies
 2845                  * (i.e. gbh_copies - copies) are guaranteed to succeed
 2846                  * since metaslab_class_throttle_reserve() always allows
 2847                  * additional reservations for gang blocks.
 2848                  */
 2849                 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
 2850                     pio->io_allocator, pio, flags));
 2851         }
 2852 
 2853         error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
 2854             bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
 2855             &pio->io_alloc_list, pio, pio->io_allocator);
 2856         if (error) {
 2857                 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
 2858                         ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
 2859                         ASSERT(has_data);
 2860 
 2861                         /*
 2862                          * If we failed to allocate the gang block header then
 2863                          * we remove any additional allocation reservations that
 2864                          * we placed here. The original reservation will
 2865                          * be removed when the logical I/O goes to the ready
 2866                          * stage.
 2867                          */
 2868                         metaslab_class_throttle_unreserve(mc,
 2869                             gbh_copies - copies, pio->io_allocator, pio);
 2870                 }
 2871 
 2872                 pio->io_error = error;
 2873                 return (pio);
 2874         }
 2875 
 2876         if (pio == gio) {
 2877                 gnpp = &gio->io_gang_tree;
 2878         } else {
 2879                 gnpp = pio->io_private;
 2880                 ASSERT(pio->io_ready == zio_write_gang_member_ready);
 2881         }
 2882 
 2883         gn = zio_gang_node_alloc(gnpp);
 2884         gbh = gn->gn_gbh;
 2885         memset(gbh, 0, SPA_GANGBLOCKSIZE);
 2886         gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
 2887 
 2888         /*
 2889          * Create the gang header.
 2890          */
 2891         zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
 2892             zio_write_gang_done, NULL, pio->io_priority,
 2893             ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
 2894 
 2895         /*
 2896          * Create and nowait the gang children.
 2897          */
 2898         for (int g = 0; resid != 0; resid -= lsize, g++) {
 2899                 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
 2900                     SPA_MINBLOCKSIZE);
 2901                 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
 2902 
 2903                 zp.zp_checksum = gio->io_prop.zp_checksum;
 2904                 zp.zp_compress = ZIO_COMPRESS_OFF;
 2905                 zp.zp_complevel = gio->io_prop.zp_complevel;
 2906                 zp.zp_type = DMU_OT_NONE;
 2907                 zp.zp_level = 0;
 2908                 zp.zp_copies = gio->io_prop.zp_copies;
 2909                 zp.zp_dedup = B_FALSE;
 2910                 zp.zp_dedup_verify = B_FALSE;
 2911                 zp.zp_nopwrite = B_FALSE;
 2912                 zp.zp_encrypt = gio->io_prop.zp_encrypt;
 2913                 zp.zp_byteorder = gio->io_prop.zp_byteorder;
 2914                 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
 2915                 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
 2916                 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
 2917 
 2918                 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
 2919                     has_data ? abd_get_offset(pio->io_abd, pio->io_size -
 2920                     resid) : NULL, lsize, lsize, &zp,
 2921                     zio_write_gang_member_ready, NULL, NULL,
 2922                     zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
 2923                     ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
 2924 
 2925                 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
 2926                         ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
 2927                         ASSERT(has_data);
 2928 
 2929                         /*
 2930                          * Gang children won't throttle but we should
 2931                          * account for their work, so reserve an allocation
 2932                          * slot for them here.
 2933                          */
 2934                         VERIFY(metaslab_class_throttle_reserve(mc,
 2935                             zp.zp_copies, cio->io_allocator, cio, flags));
 2936                 }
 2937                 zio_nowait(cio);
 2938         }
 2939 
 2940         /*
 2941          * Set pio's pipeline to just wait for zio to finish.
 2942          */
 2943         pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
 2944 
 2945         /*
 2946          * We didn't allocate this bp, so make sure it doesn't get unmarked.
 2947          */
 2948         pio->io_flags &= ~ZIO_FLAG_FASTWRITE;
 2949 
 2950         zio_nowait(zio);
 2951 
 2952         return (pio);
 2953 }
 2954 
 2955 /*
 2956  * The zio_nop_write stage in the pipeline determines if allocating a
 2957  * new bp is necessary.  The nopwrite feature can handle writes in
 2958  * either syncing or open context (i.e. zil writes) and as a result is
 2959  * mutually exclusive with dedup.
 2960  *
 2961  * By leveraging a cryptographically secure checksum, such as SHA256, we
 2962  * can compare the checksums of the new data and the old to determine if
 2963  * allocating a new block is required.  Note that our requirements for
 2964  * cryptographic strength are fairly weak: there can't be any accidental
 2965  * hash collisions, but we don't need to be secure against intentional
 2966  * (malicious) collisions.  To trigger a nopwrite, you have to be able
 2967  * to write the file to begin with, and triggering an incorrect (hash
 2968  * collision) nopwrite is no worse than simply writing to the file.
 2969  * That said, there are no known attacks against the checksum algorithms
 2970  * used for nopwrite, assuming that the salt and the checksums
 2971  * themselves remain secret.
 2972  */
 2973 static zio_t *
 2974 zio_nop_write(zio_t *zio)
 2975 {
 2976         blkptr_t *bp = zio->io_bp;
 2977         blkptr_t *bp_orig = &zio->io_bp_orig;
 2978         zio_prop_t *zp = &zio->io_prop;
 2979 
 2980         ASSERT(BP_IS_HOLE(bp));
 2981         ASSERT(BP_GET_LEVEL(bp) == 0);
 2982         ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
 2983         ASSERT(zp->zp_nopwrite);
 2984         ASSERT(!zp->zp_dedup);
 2985         ASSERT(zio->io_bp_override == NULL);
 2986         ASSERT(IO_IS_ALLOCATING(zio));
 2987 
 2988         /*
 2989          * Check to see if the original bp and the new bp have matching
 2990          * characteristics (i.e. same checksum, compression algorithms, etc).
 2991          * If they don't then just continue with the pipeline which will
 2992          * allocate a new bp.
 2993          */
 2994         if (BP_IS_HOLE(bp_orig) ||
 2995             !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
 2996             ZCHECKSUM_FLAG_NOPWRITE) ||
 2997             BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
 2998             BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
 2999             BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
 3000             BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
 3001             zp->zp_copies != BP_GET_NDVAS(bp_orig))
 3002                 return (zio);
 3003 
 3004         /*
 3005          * If the checksums match then reset the pipeline so that we
 3006          * avoid allocating a new bp and issuing any I/O.
 3007          */
 3008         if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
 3009                 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
 3010                     ZCHECKSUM_FLAG_NOPWRITE);
 3011                 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
 3012                 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
 3013                 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
 3014                 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
 3015 
 3016                 /*
 3017                  * If we're overwriting a block that is currently on an
 3018                  * indirect vdev, then ignore the nopwrite request and
 3019                  * allow a new block to be allocated on a concrete vdev.
 3020                  */
 3021                 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
 3022                 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
 3023                         vdev_t *tvd = vdev_lookup_top(zio->io_spa,
 3024                             DVA_GET_VDEV(&bp_orig->blk_dva[d]));
 3025                         if (tvd->vdev_ops == &vdev_indirect_ops) {
 3026                                 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
 3027                                 return (zio);
 3028                         }
 3029                 }
 3030                 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
 3031 
 3032                 *bp = *bp_orig;
 3033                 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
 3034                 zio->io_flags |= ZIO_FLAG_NOPWRITE;
 3035         }
 3036 
 3037         return (zio);
 3038 }
 3039 
 3040 /*
 3041  * ==========================================================================
 3042  * Dedup
 3043  * ==========================================================================
 3044  */
 3045 static void
 3046 zio_ddt_child_read_done(zio_t *zio)
 3047 {
 3048         blkptr_t *bp = zio->io_bp;
 3049         ddt_entry_t *dde = zio->io_private;
 3050         ddt_phys_t *ddp;
 3051         zio_t *pio = zio_unique_parent(zio);
 3052 
 3053         mutex_enter(&pio->io_lock);
 3054         ddp = ddt_phys_select(dde, bp);
 3055         if (zio->io_error == 0)
 3056                 ddt_phys_clear(ddp);    /* this ddp doesn't need repair */
 3057 
 3058         if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
 3059                 dde->dde_repair_abd = zio->io_abd;
 3060         else
 3061                 abd_free(zio->io_abd);
 3062         mutex_exit(&pio->io_lock);
 3063 }
 3064 
 3065 static zio_t *
 3066 zio_ddt_read_start(zio_t *zio)
 3067 {
 3068         blkptr_t *bp = zio->io_bp;
 3069 
 3070         ASSERT(BP_GET_DEDUP(bp));
 3071         ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
 3072         ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 3073 
 3074         if (zio->io_child_error[ZIO_CHILD_DDT]) {
 3075                 ddt_t *ddt = ddt_select(zio->io_spa, bp);
 3076                 ddt_entry_t *dde = ddt_repair_start(ddt, bp);
 3077                 ddt_phys_t *ddp = dde->dde_phys;
 3078                 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
 3079                 blkptr_t blk;
 3080 
 3081                 ASSERT(zio->io_vsd == NULL);
 3082                 zio->io_vsd = dde;
 3083 
 3084                 if (ddp_self == NULL)
 3085                         return (zio);
 3086 
 3087                 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
 3088                         if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
 3089                                 continue;
 3090                         ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
 3091                             &blk);
 3092                         zio_nowait(zio_read(zio, zio->io_spa, &blk,
 3093                             abd_alloc_for_io(zio->io_size, B_TRUE),
 3094                             zio->io_size, zio_ddt_child_read_done, dde,
 3095                             zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
 3096                             ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
 3097                 }
 3098                 return (zio);
 3099         }
 3100 
 3101         zio_nowait(zio_read(zio, zio->io_spa, bp,
 3102             zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
 3103             ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
 3104 
 3105         return (zio);
 3106 }
 3107 
 3108 static zio_t *
 3109 zio_ddt_read_done(zio_t *zio)
 3110 {
 3111         blkptr_t *bp = zio->io_bp;
 3112 
 3113         if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
 3114                 return (NULL);
 3115         }
 3116 
 3117         ASSERT(BP_GET_DEDUP(bp));
 3118         ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
 3119         ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 3120 
 3121         if (zio->io_child_error[ZIO_CHILD_DDT]) {
 3122                 ddt_t *ddt = ddt_select(zio->io_spa, bp);
 3123                 ddt_entry_t *dde = zio->io_vsd;
 3124                 if (ddt == NULL) {
 3125                         ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
 3126                         return (zio);
 3127                 }
 3128                 if (dde == NULL) {
 3129                         zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
 3130                         zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
 3131                         return (NULL);
 3132                 }
 3133                 if (dde->dde_repair_abd != NULL) {
 3134                         abd_copy(zio->io_abd, dde->dde_repair_abd,
 3135                             zio->io_size);
 3136                         zio->io_child_error[ZIO_CHILD_DDT] = 0;
 3137                 }
 3138                 ddt_repair_done(ddt, dde);
 3139                 zio->io_vsd = NULL;
 3140         }
 3141 
 3142         ASSERT(zio->io_vsd == NULL);
 3143 
 3144         return (zio);
 3145 }
 3146 
 3147 static boolean_t
 3148 zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
 3149 {
 3150         spa_t *spa = zio->io_spa;
 3151         boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
 3152 
 3153         ASSERT(!(zio->io_bp_override && do_raw));
 3154 
 3155         /*
 3156          * Note: we compare the original data, not the transformed data,
 3157          * because when zio->io_bp is an override bp, we will not have
 3158          * pushed the I/O transforms.  That's an important optimization
 3159          * because otherwise we'd compress/encrypt all dmu_sync() data twice.
 3160          * However, we should never get a raw, override zio so in these
 3161          * cases we can compare the io_abd directly. This is useful because
 3162          * it allows us to do dedup verification even if we don't have access
 3163          * to the original data (for instance, if the encryption keys aren't
 3164          * loaded).
 3165          */
 3166 
 3167         for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
 3168                 zio_t *lio = dde->dde_lead_zio[p];
 3169 
 3170                 if (lio != NULL && do_raw) {
 3171                         return (lio->io_size != zio->io_size ||
 3172                             abd_cmp(zio->io_abd, lio->io_abd) != 0);
 3173                 } else if (lio != NULL) {
 3174                         return (lio->io_orig_size != zio->io_orig_size ||
 3175                             abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
 3176                 }
 3177         }
 3178 
 3179         for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
 3180                 ddt_phys_t *ddp = &dde->dde_phys[p];
 3181 
 3182                 if (ddp->ddp_phys_birth != 0 && do_raw) {
 3183                         blkptr_t blk = *zio->io_bp;
 3184                         uint64_t psize;
 3185                         abd_t *tmpabd;
 3186                         int error;
 3187 
 3188                         ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
 3189                         psize = BP_GET_PSIZE(&blk);
 3190 
 3191                         if (psize != zio->io_size)
 3192                                 return (B_TRUE);
 3193 
 3194                         ddt_exit(ddt);
 3195 
 3196                         tmpabd = abd_alloc_for_io(psize, B_TRUE);
 3197 
 3198                         error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
 3199                             psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
 3200                             ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
 3201                             ZIO_FLAG_RAW, &zio->io_bookmark));
 3202 
 3203                         if (error == 0) {
 3204                                 if (abd_cmp(tmpabd, zio->io_abd) != 0)
 3205                                         error = SET_ERROR(ENOENT);
 3206                         }
 3207 
 3208                         abd_free(tmpabd);
 3209                         ddt_enter(ddt);
 3210                         return (error != 0);
 3211                 } else if (ddp->ddp_phys_birth != 0) {
 3212                         arc_buf_t *abuf = NULL;
 3213                         arc_flags_t aflags = ARC_FLAG_WAIT;
 3214                         blkptr_t blk = *zio->io_bp;
 3215                         int error;
 3216 
 3217                         ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
 3218 
 3219                         if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
 3220                                 return (B_TRUE);
 3221 
 3222                         ddt_exit(ddt);
 3223 
 3224                         error = arc_read(NULL, spa, &blk,
 3225                             arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
 3226                             ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
 3227                             &aflags, &zio->io_bookmark);
 3228 
 3229                         if (error == 0) {
 3230                                 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
 3231                                     zio->io_orig_size) != 0)
 3232                                         error = SET_ERROR(ENOENT);
 3233                                 arc_buf_destroy(abuf, &abuf);
 3234                         }
 3235 
 3236                         ddt_enter(ddt);
 3237                         return (error != 0);
 3238                 }
 3239         }
 3240 
 3241         return (B_FALSE);
 3242 }
 3243 
 3244 static void
 3245 zio_ddt_child_write_ready(zio_t *zio)
 3246 {
 3247         int p = zio->io_prop.zp_copies;
 3248         ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
 3249         ddt_entry_t *dde = zio->io_private;
 3250         ddt_phys_t *ddp = &dde->dde_phys[p];
 3251         zio_t *pio;
 3252 
 3253         if (zio->io_error)
 3254                 return;
 3255 
 3256         ddt_enter(ddt);
 3257 
 3258         ASSERT(dde->dde_lead_zio[p] == zio);
 3259 
 3260         ddt_phys_fill(ddp, zio->io_bp);
 3261 
 3262         zio_link_t *zl = NULL;
 3263         while ((pio = zio_walk_parents(zio, &zl)) != NULL)
 3264                 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
 3265 
 3266         ddt_exit(ddt);
 3267 }
 3268 
 3269 static void
 3270 zio_ddt_child_write_done(zio_t *zio)
 3271 {
 3272         int p = zio->io_prop.zp_copies;
 3273         ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
 3274         ddt_entry_t *dde = zio->io_private;
 3275         ddt_phys_t *ddp = &dde->dde_phys[p];
 3276 
 3277         ddt_enter(ddt);
 3278 
 3279         ASSERT(ddp->ddp_refcnt == 0);
 3280         ASSERT(dde->dde_lead_zio[p] == zio);
 3281         dde->dde_lead_zio[p] = NULL;
 3282 
 3283         if (zio->io_error == 0) {
 3284                 zio_link_t *zl = NULL;
 3285                 while (zio_walk_parents(zio, &zl) != NULL)
 3286                         ddt_phys_addref(ddp);
 3287         } else {
 3288                 ddt_phys_clear(ddp);
 3289         }
 3290 
 3291         ddt_exit(ddt);
 3292 }
 3293 
 3294 static zio_t *
 3295 zio_ddt_write(zio_t *zio)
 3296 {
 3297         spa_t *spa = zio->io_spa;
 3298         blkptr_t *bp = zio->io_bp;
 3299         uint64_t txg = zio->io_txg;
 3300         zio_prop_t *zp = &zio->io_prop;
 3301         int p = zp->zp_copies;
 3302         zio_t *cio = NULL;
 3303         ddt_t *ddt = ddt_select(spa, bp);
 3304         ddt_entry_t *dde;
 3305         ddt_phys_t *ddp;
 3306 
 3307         ASSERT(BP_GET_DEDUP(bp));
 3308         ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
 3309         ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
 3310         ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
 3311 
 3312         ddt_enter(ddt);
 3313         dde = ddt_lookup(ddt, bp, B_TRUE);
 3314         ddp = &dde->dde_phys[p];
 3315 
 3316         if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
 3317                 /*
 3318                  * If we're using a weak checksum, upgrade to a strong checksum
 3319                  * and try again.  If we're already using a strong checksum,
 3320                  * we can't resolve it, so just convert to an ordinary write.
 3321                  * (And automatically e-mail a paper to Nature?)
 3322                  */
 3323                 if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
 3324                     ZCHECKSUM_FLAG_DEDUP)) {
 3325                         zp->zp_checksum = spa_dedup_checksum(spa);
 3326                         zio_pop_transforms(zio);
 3327                         zio->io_stage = ZIO_STAGE_OPEN;
 3328                         BP_ZERO(bp);
 3329                 } else {
 3330                         zp->zp_dedup = B_FALSE;
 3331                         BP_SET_DEDUP(bp, B_FALSE);
 3332                 }
 3333                 ASSERT(!BP_GET_DEDUP(bp));
 3334                 zio->io_pipeline = ZIO_WRITE_PIPELINE;
 3335                 ddt_exit(ddt);
 3336                 return (zio);
 3337         }
 3338 
 3339         if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
 3340                 if (ddp->ddp_phys_birth != 0)
 3341                         ddt_bp_fill(ddp, bp, txg);
 3342                 if (dde->dde_lead_zio[p] != NULL)
 3343                         zio_add_child(zio, dde->dde_lead_zio[p]);
 3344                 else
 3345                         ddt_phys_addref(ddp);
 3346         } else if (zio->io_bp_override) {
 3347                 ASSERT(bp->blk_birth == txg);
 3348                 ASSERT(BP_EQUAL(bp, zio->io_bp_override));
 3349                 ddt_phys_fill(ddp, bp);
 3350                 ddt_phys_addref(ddp);
 3351         } else {
 3352                 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
 3353                     zio->io_orig_size, zio->io_orig_size, zp,
 3354                     zio_ddt_child_write_ready, NULL, NULL,
 3355                     zio_ddt_child_write_done, dde, zio->io_priority,
 3356                     ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
 3357 
 3358                 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
 3359                 dde->dde_lead_zio[p] = cio;
 3360         }
 3361 
 3362         ddt_exit(ddt);
 3363 
 3364         zio_nowait(cio);
 3365 
 3366         return (zio);
 3367 }
 3368 
 3369 static ddt_entry_t *freedde; /* for debugging */
 3370 
 3371 static zio_t *
 3372 zio_ddt_free(zio_t *zio)
 3373 {
 3374         spa_t *spa = zio->io_spa;
 3375         blkptr_t *bp = zio->io_bp;
 3376         ddt_t *ddt = ddt_select(spa, bp);
 3377         ddt_entry_t *dde;
 3378         ddt_phys_t *ddp;
 3379 
 3380         ASSERT(BP_GET_DEDUP(bp));
 3381         ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 3382 
 3383         ddt_enter(ddt);
 3384         freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
 3385         if (dde) {
 3386                 ddp = ddt_phys_select(dde, bp);
 3387                 if (ddp)
 3388                         ddt_phys_decref(ddp);
 3389         }
 3390         ddt_exit(ddt);
 3391 
 3392         return (zio);
 3393 }
 3394 
 3395 /*
 3396  * ==========================================================================
 3397  * Allocate and free blocks
 3398  * ==========================================================================
 3399  */
 3400 
 3401 static zio_t *
 3402 zio_io_to_allocate(spa_t *spa, int allocator)
 3403 {
 3404         zio_t *zio;
 3405 
 3406         ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
 3407 
 3408         zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
 3409         if (zio == NULL)
 3410                 return (NULL);
 3411 
 3412         ASSERT(IO_IS_ALLOCATING(zio));
 3413 
 3414         /*
 3415          * Try to place a reservation for this zio. If we're unable to
 3416          * reserve then we throttle.
 3417          */
 3418         ASSERT3U(zio->io_allocator, ==, allocator);
 3419         if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
 3420             zio->io_prop.zp_copies, allocator, zio, 0)) {
 3421                 return (NULL);
 3422         }
 3423 
 3424         avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
 3425         ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
 3426 
 3427         return (zio);
 3428 }
 3429 
 3430 static zio_t *
 3431 zio_dva_throttle(zio_t *zio)
 3432 {
 3433         spa_t *spa = zio->io_spa;
 3434         zio_t *nio;
 3435         metaslab_class_t *mc;
 3436 
 3437         /* locate an appropriate allocation class */
 3438         mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
 3439             zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
 3440 
 3441         if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
 3442             !mc->mc_alloc_throttle_enabled ||
 3443             zio->io_child_type == ZIO_CHILD_GANG ||
 3444             zio->io_flags & ZIO_FLAG_NODATA) {
 3445                 return (zio);
 3446         }
 3447 
 3448         ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 3449         ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
 3450         ASSERT3U(zio->io_queued_timestamp, >, 0);
 3451         ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
 3452 
 3453         zbookmark_phys_t *bm = &zio->io_bookmark;
 3454         /*
 3455          * We want to try to use as many allocators as possible to help improve
 3456          * performance, but we also want logically adjacent IOs to be physically
 3457          * adjacent to improve sequential read performance. We chunk each object
 3458          * into 2^20 block regions, and then hash based on the objset, object,
 3459          * level, and region to accomplish both of these goals.
 3460          */
 3461         int allocator = (uint_t)cityhash4(bm->zb_objset, bm->zb_object,
 3462             bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
 3463         zio->io_allocator = allocator;
 3464         zio->io_metaslab_class = mc;
 3465         mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
 3466         avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
 3467         nio = zio_io_to_allocate(spa, allocator);
 3468         mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
 3469         return (nio);
 3470 }
 3471 
 3472 static void
 3473 zio_allocate_dispatch(spa_t *spa, int allocator)
 3474 {
 3475         zio_t *zio;
 3476 
 3477         mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
 3478         zio = zio_io_to_allocate(spa, allocator);
 3479         mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
 3480         if (zio == NULL)
 3481                 return;
 3482 
 3483         ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
 3484         ASSERT0(zio->io_error);
 3485         zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
 3486 }
 3487 
 3488 static zio_t *
 3489 zio_dva_allocate(zio_t *zio)
 3490 {
 3491         spa_t *spa = zio->io_spa;
 3492         metaslab_class_t *mc;
 3493         blkptr_t *bp = zio->io_bp;
 3494         int error;
 3495         int flags = 0;
 3496 
 3497         if (zio->io_gang_leader == NULL) {
 3498                 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
 3499                 zio->io_gang_leader = zio;
 3500         }
 3501 
 3502         ASSERT(BP_IS_HOLE(bp));
 3503         ASSERT0(BP_GET_NDVAS(bp));
 3504         ASSERT3U(zio->io_prop.zp_copies, >, 0);
 3505         ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
 3506         ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
 3507 
 3508         flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0;
 3509         if (zio->io_flags & ZIO_FLAG_NODATA)
 3510                 flags |= METASLAB_DONT_THROTTLE;
 3511         if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
 3512                 flags |= METASLAB_GANG_CHILD;
 3513         if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
 3514                 flags |= METASLAB_ASYNC_ALLOC;
 3515 
 3516         /*
 3517          * if not already chosen, locate an appropriate allocation class
 3518          */
 3519         mc = zio->io_metaslab_class;
 3520         if (mc == NULL) {
 3521                 mc = spa_preferred_class(spa, zio->io_size,
 3522                     zio->io_prop.zp_type, zio->io_prop.zp_level,
 3523                     zio->io_prop.zp_zpl_smallblk);
 3524                 zio->io_metaslab_class = mc;
 3525         }
 3526 
 3527         /*
 3528          * Try allocating the block in the usual metaslab class.
 3529          * If that's full, allocate it in the normal class.
 3530          * If that's full, allocate as a gang block,
 3531          * and if all are full, the allocation fails (which shouldn't happen).
 3532          *
 3533          * Note that we do not fall back on embedded slog (ZIL) space, to
 3534          * preserve unfragmented slog space, which is critical for decent
 3535          * sync write performance.  If a log allocation fails, we will fall
 3536          * back to spa_sync() which is abysmal for performance.
 3537          */
 3538         error = metaslab_alloc(spa, mc, zio->io_size, bp,
 3539             zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
 3540             &zio->io_alloc_list, zio, zio->io_allocator);
 3541 
 3542         /*
 3543          * Fallback to normal class when an alloc class is full
 3544          */
 3545         if (error == ENOSPC && mc != spa_normal_class(spa)) {
 3546                 /*
 3547                  * If throttling, transfer reservation over to normal class.
 3548                  * The io_allocator slot can remain the same even though we
 3549                  * are switching classes.
 3550                  */
 3551                 if (mc->mc_alloc_throttle_enabled &&
 3552                     (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
 3553                         metaslab_class_throttle_unreserve(mc,
 3554                             zio->io_prop.zp_copies, zio->io_allocator, zio);
 3555                         zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
 3556 
 3557                         VERIFY(metaslab_class_throttle_reserve(
 3558                             spa_normal_class(spa),
 3559                             zio->io_prop.zp_copies, zio->io_allocator, zio,
 3560                             flags | METASLAB_MUST_RESERVE));
 3561                 }
 3562                 zio->io_metaslab_class = mc = spa_normal_class(spa);
 3563                 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
 3564                         zfs_dbgmsg("%s: metaslab allocation failure, "
 3565                             "trying normal class: zio %px, size %llu, error %d",
 3566                             spa_name(spa), zio, (u_longlong_t)zio->io_size,
 3567                             error);
 3568                 }
 3569 
 3570                 error = metaslab_alloc(spa, mc, zio->io_size, bp,
 3571                     zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
 3572                     &zio->io_alloc_list, zio, zio->io_allocator);
 3573         }
 3574 
 3575         if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
 3576                 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
 3577                         zfs_dbgmsg("%s: metaslab allocation failure, "
 3578                             "trying ganging: zio %px, size %llu, error %d",
 3579                             spa_name(spa), zio, (u_longlong_t)zio->io_size,
 3580                             error);
 3581                 }
 3582                 return (zio_write_gang_block(zio, mc));
 3583         }
 3584         if (error != 0) {
 3585                 if (error != ENOSPC ||
 3586                     (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
 3587                         zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
 3588                             "size %llu, error %d",
 3589                             spa_name(spa), zio, (u_longlong_t)zio->io_size,
 3590                             error);
 3591                 }
 3592                 zio->io_error = error;
 3593         }
 3594 
 3595         return (zio);
 3596 }
 3597 
 3598 static zio_t *
 3599 zio_dva_free(zio_t *zio)
 3600 {
 3601         metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
 3602 
 3603         return (zio);
 3604 }
 3605 
 3606 static zio_t *
 3607 zio_dva_claim(zio_t *zio)
 3608 {
 3609         int error;
 3610 
 3611         error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
 3612         if (error)
 3613                 zio->io_error = error;
 3614 
 3615         return (zio);
 3616 }
 3617 
 3618 /*
 3619  * Undo an allocation.  This is used by zio_done() when an I/O fails
 3620  * and we want to give back the block we just allocated.
 3621  * This handles both normal blocks and gang blocks.
 3622  */
 3623 static void
 3624 zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
 3625 {
 3626         ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
 3627         ASSERT(zio->io_bp_override == NULL);
 3628 
 3629         if (!BP_IS_HOLE(bp))
 3630                 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
 3631 
 3632         if (gn != NULL) {
 3633                 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
 3634                         zio_dva_unallocate(zio, gn->gn_child[g],
 3635                             &gn->gn_gbh->zg_blkptr[g]);
 3636                 }
 3637         }
 3638 }
 3639 
 3640 /*
 3641  * Try to allocate an intent log block.  Return 0 on success, errno on failure.
 3642  */
 3643 int
 3644 zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
 3645     uint64_t size, boolean_t *slog)
 3646 {
 3647         int error = 1;
 3648         zio_alloc_list_t io_alloc_list;
 3649 
 3650         ASSERT(txg > spa_syncing_txg(spa));
 3651 
 3652         metaslab_trace_init(&io_alloc_list);
 3653 
 3654         /*
 3655          * Block pointer fields are useful to metaslabs for stats and debugging.
 3656          * Fill in the obvious ones before calling into metaslab_alloc().
 3657          */
 3658         BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
 3659         BP_SET_PSIZE(new_bp, size);
 3660         BP_SET_LEVEL(new_bp, 0);
 3661 
 3662         /*
 3663          * When allocating a zil block, we don't have information about
 3664          * the final destination of the block except the objset it's part
 3665          * of, so we just hash the objset ID to pick the allocator to get
 3666          * some parallelism.
 3667          */
 3668         int flags = METASLAB_FASTWRITE | METASLAB_ZIL;
 3669         int allocator = (uint_t)cityhash4(0, 0, 0,
 3670             os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
 3671         error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
 3672             txg, NULL, flags, &io_alloc_list, NULL, allocator);
 3673         *slog = (error == 0);
 3674         if (error != 0) {
 3675                 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
 3676                     new_bp, 1, txg, NULL, flags,
 3677                     &io_alloc_list, NULL, allocator);
 3678         }
 3679         if (error != 0) {
 3680                 error = metaslab_alloc(spa, spa_normal_class(spa), size,
 3681                     new_bp, 1, txg, NULL, flags,
 3682                     &io_alloc_list, NULL, allocator);
 3683         }
 3684         metaslab_trace_fini(&io_alloc_list);
 3685 
 3686         if (error == 0) {
 3687                 BP_SET_LSIZE(new_bp, size);
 3688                 BP_SET_PSIZE(new_bp, size);
 3689                 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
 3690                 BP_SET_CHECKSUM(new_bp,
 3691                     spa_version(spa) >= SPA_VERSION_SLIM_ZIL
 3692                     ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
 3693                 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
 3694                 BP_SET_LEVEL(new_bp, 0);
 3695                 BP_SET_DEDUP(new_bp, 0);
 3696                 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
 3697 
 3698                 /*
 3699                  * encrypted blocks will require an IV and salt. We generate
 3700                  * these now since we will not be rewriting the bp at
 3701                  * rewrite time.
 3702                  */
 3703                 if (os->os_encrypted) {
 3704                         uint8_t iv[ZIO_DATA_IV_LEN];
 3705                         uint8_t salt[ZIO_DATA_SALT_LEN];
 3706 
 3707                         BP_SET_CRYPT(new_bp, B_TRUE);
 3708                         VERIFY0(spa_crypt_get_salt(spa,
 3709                             dmu_objset_id(os), salt));
 3710                         VERIFY0(zio_crypt_generate_iv(iv));
 3711 
 3712                         zio_crypt_encode_params_bp(new_bp, salt, iv);
 3713                 }
 3714         } else {
 3715                 zfs_dbgmsg("%s: zil block allocation failure: "
 3716                     "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
 3717                     error);
 3718         }
 3719 
 3720         return (error);
 3721 }
 3722 
 3723 /*
 3724  * ==========================================================================
 3725  * Read and write to physical devices
 3726  * ==========================================================================
 3727  */
 3728 
 3729 /*
 3730  * Issue an I/O to the underlying vdev. Typically the issue pipeline
 3731  * stops after this stage and will resume upon I/O completion.
 3732  * However, there are instances where the vdev layer may need to
 3733  * continue the pipeline when an I/O was not issued. Since the I/O
 3734  * that was sent to the vdev layer might be different than the one
 3735  * currently active in the pipeline (see vdev_queue_io()), we explicitly
 3736  * force the underlying vdev layers to call either zio_execute() or
 3737  * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
 3738  */
 3739 static zio_t *
 3740 zio_vdev_io_start(zio_t *zio)
 3741 {
 3742         vdev_t *vd = zio->io_vd;
 3743         uint64_t align;
 3744         spa_t *spa = zio->io_spa;
 3745 
 3746         zio->io_delay = 0;
 3747 
 3748         ASSERT(zio->io_error == 0);
 3749         ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
 3750 
 3751         if (vd == NULL) {
 3752                 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
 3753                         spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
 3754 
 3755                 /*
 3756                  * The mirror_ops handle multiple DVAs in a single BP.
 3757                  */
 3758                 vdev_mirror_ops.vdev_op_io_start(zio);
 3759                 return (NULL);
 3760         }
 3761 
 3762         ASSERT3P(zio->io_logical, !=, zio);
 3763         if (zio->io_type == ZIO_TYPE_WRITE) {
 3764                 ASSERT(spa->spa_trust_config);
 3765 
 3766                 /*
 3767                  * Note: the code can handle other kinds of writes,
 3768                  * but we don't expect them.
 3769                  */
 3770                 if (zio->io_vd->vdev_noalloc) {
 3771                         ASSERT(zio->io_flags &
 3772                             (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
 3773                             ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
 3774                 }
 3775         }
 3776 
 3777         align = 1ULL << vd->vdev_top->vdev_ashift;
 3778 
 3779         if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
 3780             P2PHASE(zio->io_size, align) != 0) {
 3781                 /* Transform logical writes to be a full physical block size. */
 3782                 uint64_t asize = P2ROUNDUP(zio->io_size, align);
 3783                 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
 3784                 ASSERT(vd == vd->vdev_top);
 3785                 if (zio->io_type == ZIO_TYPE_WRITE) {
 3786                         abd_copy(abuf, zio->io_abd, zio->io_size);
 3787                         abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
 3788                 }
 3789                 zio_push_transform(zio, abuf, asize, asize, zio_subblock);
 3790         }
 3791 
 3792         /*
 3793          * If this is not a physical io, make sure that it is properly aligned
 3794          * before proceeding.
 3795          */
 3796         if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
 3797                 ASSERT0(P2PHASE(zio->io_offset, align));
 3798                 ASSERT0(P2PHASE(zio->io_size, align));
 3799         } else {
 3800                 /*
 3801                  * For physical writes, we allow 512b aligned writes and assume
 3802                  * the device will perform a read-modify-write as necessary.
 3803                  */
 3804                 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
 3805                 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
 3806         }
 3807 
 3808         VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
 3809 
 3810         /*
 3811          * If this is a repair I/O, and there's no self-healing involved --
 3812          * that is, we're just resilvering what we expect to resilver --
 3813          * then don't do the I/O unless zio's txg is actually in vd's DTL.
 3814          * This prevents spurious resilvering.
 3815          *
 3816          * There are a few ways that we can end up creating these spurious
 3817          * resilver i/os:
 3818          *
 3819          * 1. A resilver i/o will be issued if any DVA in the BP has a
 3820          * dirty DTL.  The mirror code will issue resilver writes to
 3821          * each DVA, including the one(s) that are not on vdevs with dirty
 3822          * DTLs.
 3823          *
 3824          * 2. With nested replication, which happens when we have a
 3825          * "replacing" or "spare" vdev that's a child of a mirror or raidz.
 3826          * For example, given mirror(replacing(A+B), C), it's likely that
 3827          * only A is out of date (it's the new device). In this case, we'll
 3828          * read from C, then use the data to resilver A+B -- but we don't
 3829          * actually want to resilver B, just A. The top-level mirror has no
 3830          * way to know this, so instead we just discard unnecessary repairs
 3831          * as we work our way down the vdev tree.
 3832          *
 3833          * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
 3834          * The same logic applies to any form of nested replication: ditto
 3835          * + mirror, RAID-Z + replacing, etc.
 3836          *
 3837          * However, indirect vdevs point off to other vdevs which may have
 3838          * DTL's, so we never bypass them.  The child i/os on concrete vdevs
 3839          * will be properly bypassed instead.
 3840          *
 3841          * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
 3842          * a dRAID spare vdev. For example, when a dRAID spare is first
 3843          * used, its spare blocks need to be written to but the leaf vdev's
 3844          * of such blocks can have empty DTL_PARTIAL.
 3845          *
 3846          * There seemed no clean way to allow such writes while bypassing
 3847          * spurious ones. At this point, just avoid all bypassing for dRAID
 3848          * for correctness.
 3849          */
 3850         if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
 3851             !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
 3852             zio->io_txg != 0 && /* not a delegated i/o */
 3853             vd->vdev_ops != &vdev_indirect_ops &&
 3854             vd->vdev_top->vdev_ops != &vdev_draid_ops &&
 3855             !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
 3856                 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 3857                 zio_vdev_io_bypass(zio);
 3858                 return (zio);
 3859         }
 3860 
 3861         /*
 3862          * Select the next best leaf I/O to process.  Distributed spares are
 3863          * excluded since they dispatch the I/O directly to a leaf vdev after
 3864          * applying the dRAID mapping.
 3865          */
 3866         if (vd->vdev_ops->vdev_op_leaf &&
 3867             vd->vdev_ops != &vdev_draid_spare_ops &&
 3868             (zio->io_type == ZIO_TYPE_READ ||
 3869             zio->io_type == ZIO_TYPE_WRITE ||
 3870             zio->io_type == ZIO_TYPE_TRIM)) {
 3871 
 3872                 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
 3873                         return (zio);
 3874 
 3875                 if ((zio = vdev_queue_io(zio)) == NULL)
 3876                         return (NULL);
 3877 
 3878                 if (!vdev_accessible(vd, zio)) {
 3879                         zio->io_error = SET_ERROR(ENXIO);
 3880                         zio_interrupt(zio);
 3881                         return (NULL);
 3882                 }
 3883                 zio->io_delay = gethrtime();
 3884         }
 3885 
 3886         vd->vdev_ops->vdev_op_io_start(zio);
 3887         return (NULL);
 3888 }
 3889 
 3890 static zio_t *
 3891 zio_vdev_io_done(zio_t *zio)
 3892 {
 3893         vdev_t *vd = zio->io_vd;
 3894         vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
 3895         boolean_t unexpected_error = B_FALSE;
 3896 
 3897         if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
 3898                 return (NULL);
 3899         }
 3900 
 3901         ASSERT(zio->io_type == ZIO_TYPE_READ ||
 3902             zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM);
 3903 
 3904         if (zio->io_delay)
 3905                 zio->io_delay = gethrtime() - zio->io_delay;
 3906 
 3907         if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
 3908             vd->vdev_ops != &vdev_draid_spare_ops) {
 3909                 vdev_queue_io_done(zio);
 3910 
 3911                 if (zio->io_type == ZIO_TYPE_WRITE)
 3912                         vdev_cache_write(zio);
 3913 
 3914                 if (zio_injection_enabled && zio->io_error == 0)
 3915                         zio->io_error = zio_handle_device_injections(vd, zio,
 3916                             EIO, EILSEQ);
 3917 
 3918                 if (zio_injection_enabled && zio->io_error == 0)
 3919                         zio->io_error = zio_handle_label_injection(zio, EIO);
 3920 
 3921                 if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) {
 3922                         if (!vdev_accessible(vd, zio)) {
 3923                                 zio->io_error = SET_ERROR(ENXIO);
 3924                         } else {
 3925                                 unexpected_error = B_TRUE;
 3926                         }
 3927                 }
 3928         }
 3929 
 3930         ops->vdev_op_io_done(zio);
 3931 
 3932         if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
 3933                 VERIFY(vdev_probe(vd, zio) == NULL);
 3934 
 3935         return (zio);
 3936 }
 3937 
 3938 /*
 3939  * This function is used to change the priority of an existing zio that is
 3940  * currently in-flight. This is used by the arc to upgrade priority in the
 3941  * event that a demand read is made for a block that is currently queued
 3942  * as a scrub or async read IO. Otherwise, the high priority read request
 3943  * would end up having to wait for the lower priority IO.
 3944  */
 3945 void
 3946 zio_change_priority(zio_t *pio, zio_priority_t priority)
 3947 {
 3948         zio_t *cio, *cio_next;
 3949         zio_link_t *zl = NULL;
 3950 
 3951         ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 3952 
 3953         if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
 3954                 vdev_queue_change_io_priority(pio, priority);
 3955         } else {
 3956                 pio->io_priority = priority;
 3957         }
 3958 
 3959         mutex_enter(&pio->io_lock);
 3960         for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
 3961                 cio_next = zio_walk_children(pio, &zl);
 3962                 zio_change_priority(cio, priority);
 3963         }
 3964         mutex_exit(&pio->io_lock);
 3965 }
 3966 
 3967 /*
 3968  * For non-raidz ZIOs, we can just copy aside the bad data read from the
 3969  * disk, and use that to finish the checksum ereport later.
 3970  */
 3971 static void
 3972 zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
 3973     const abd_t *good_buf)
 3974 {
 3975         /* no processing needed */
 3976         zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
 3977 }
 3978 
 3979 void
 3980 zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
 3981 {
 3982         void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
 3983 
 3984         abd_copy(abd, zio->io_abd, zio->io_size);
 3985 
 3986         zcr->zcr_cbinfo = zio->io_size;
 3987         zcr->zcr_cbdata = abd;
 3988         zcr->zcr_finish = zio_vsd_default_cksum_finish;
 3989         zcr->zcr_free = zio_abd_free;
 3990 }
 3991 
 3992 static zio_t *
 3993 zio_vdev_io_assess(zio_t *zio)
 3994 {
 3995         vdev_t *vd = zio->io_vd;
 3996 
 3997         if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
 3998                 return (NULL);
 3999         }
 4000 
 4001         if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
 4002                 spa_config_exit(zio->io_spa, SCL_ZIO, zio);
 4003 
 4004         if (zio->io_vsd != NULL) {
 4005                 zio->io_vsd_ops->vsd_free(zio);
 4006                 zio->io_vsd = NULL;
 4007         }
 4008 
 4009         if (zio_injection_enabled && zio->io_error == 0)
 4010                 zio->io_error = zio_handle_fault_injection(zio, EIO);
 4011 
 4012         /*
 4013          * If the I/O failed, determine whether we should attempt to retry it.
 4014          *
 4015          * On retry, we cut in line in the issue queue, since we don't want
 4016          * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
 4017          */
 4018         if (zio->io_error && vd == NULL &&
 4019             !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
 4020                 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
 4021                 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS));  /* not a leaf */
 4022                 zio->io_error = 0;
 4023                 zio->io_flags |= ZIO_FLAG_IO_RETRY |
 4024                     ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE;
 4025                 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
 4026                 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
 4027                     zio_requeue_io_start_cut_in_line);
 4028                 return (NULL);
 4029         }
 4030 
 4031         /*
 4032          * If we got an error on a leaf device, convert it to ENXIO
 4033          * if the device is not accessible at all.
 4034          */
 4035         if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
 4036             !vdev_accessible(vd, zio))
 4037                 zio->io_error = SET_ERROR(ENXIO);
 4038 
 4039         /*
 4040          * If we can't write to an interior vdev (mirror or RAID-Z),
 4041          * set vdev_cant_write so that we stop trying to allocate from it.
 4042          */
 4043         if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
 4044             vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
 4045                 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
 4046                     "cant_write=TRUE due to write failure with ENXIO",
 4047                     zio);
 4048                 vd->vdev_cant_write = B_TRUE;
 4049         }
 4050 
 4051         /*
 4052          * If a cache flush returns ENOTSUP or ENOTTY, we know that no future
 4053          * attempts will ever succeed. In this case we set a persistent
 4054          * boolean flag so that we don't bother with it in the future.
 4055          */
 4056         if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
 4057             zio->io_type == ZIO_TYPE_IOCTL &&
 4058             zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
 4059                 vd->vdev_nowritecache = B_TRUE;
 4060 
 4061         if (zio->io_error)
 4062                 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
 4063 
 4064         if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
 4065             zio->io_physdone != NULL) {
 4066                 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED));
 4067                 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV);
 4068                 zio->io_physdone(zio->io_logical);
 4069         }
 4070 
 4071         return (zio);
 4072 }
 4073 
 4074 void
 4075 zio_vdev_io_reissue(zio_t *zio)
 4076 {
 4077         ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
 4078         ASSERT(zio->io_error == 0);
 4079 
 4080         zio->io_stage >>= 1;
 4081 }
 4082 
 4083 void
 4084 zio_vdev_io_redone(zio_t *zio)
 4085 {
 4086         ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
 4087 
 4088         zio->io_stage >>= 1;
 4089 }
 4090 
 4091 void
 4092 zio_vdev_io_bypass(zio_t *zio)
 4093 {
 4094         ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
 4095         ASSERT(zio->io_error == 0);
 4096 
 4097         zio->io_flags |= ZIO_FLAG_IO_BYPASS;
 4098         zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
 4099 }
 4100 
 4101 /*
 4102  * ==========================================================================
 4103  * Encrypt and store encryption parameters
 4104  * ==========================================================================
 4105  */
 4106 
 4107 
 4108 /*
 4109  * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
 4110  * managing the storage of encryption parameters and passing them to the
 4111  * lower-level encryption functions.
 4112  */
 4113 static zio_t *
 4114 zio_encrypt(zio_t *zio)
 4115 {
 4116         zio_prop_t *zp = &zio->io_prop;
 4117         spa_t *spa = zio->io_spa;
 4118         blkptr_t *bp = zio->io_bp;
 4119         uint64_t psize = BP_GET_PSIZE(bp);
 4120         uint64_t dsobj = zio->io_bookmark.zb_objset;
 4121         dmu_object_type_t ot = BP_GET_TYPE(bp);
 4122         void *enc_buf = NULL;
 4123         abd_t *eabd = NULL;
 4124         uint8_t salt[ZIO_DATA_SALT_LEN];
 4125         uint8_t iv[ZIO_DATA_IV_LEN];
 4126         uint8_t mac[ZIO_DATA_MAC_LEN];
 4127         boolean_t no_crypt = B_FALSE;
 4128 
 4129         /* the root zio already encrypted the data */
 4130         if (zio->io_child_type == ZIO_CHILD_GANG)
 4131                 return (zio);
 4132 
 4133         /* only ZIL blocks are re-encrypted on rewrite */
 4134         if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
 4135                 return (zio);
 4136 
 4137         if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
 4138                 BP_SET_CRYPT(bp, B_FALSE);
 4139                 return (zio);
 4140         }
 4141 
 4142         /* if we are doing raw encryption set the provided encryption params */
 4143         if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
 4144                 ASSERT0(BP_GET_LEVEL(bp));
 4145                 BP_SET_CRYPT(bp, B_TRUE);
 4146                 BP_SET_BYTEORDER(bp, zp->zp_byteorder);
 4147                 if (ot != DMU_OT_OBJSET)
 4148                         zio_crypt_encode_mac_bp(bp, zp->zp_mac);
 4149 
 4150                 /* dnode blocks must be written out in the provided byteorder */
 4151                 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
 4152                     ot == DMU_OT_DNODE) {
 4153                         void *bswap_buf = zio_buf_alloc(psize);
 4154                         abd_t *babd = abd_get_from_buf(bswap_buf, psize);
 4155 
 4156                         ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
 4157                         abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
 4158                         dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
 4159                             psize);
 4160 
 4161                         abd_take_ownership_of_buf(babd, B_TRUE);
 4162                         zio_push_transform(zio, babd, psize, psize, NULL);
 4163                 }
 4164 
 4165                 if (DMU_OT_IS_ENCRYPTED(ot))
 4166                         zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
 4167                 return (zio);
 4168         }
 4169 
 4170         /* indirect blocks only maintain a cksum of the lower level MACs */
 4171         if (BP_GET_LEVEL(bp) > 0) {
 4172                 BP_SET_CRYPT(bp, B_TRUE);
 4173                 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
 4174                     zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
 4175                     mac));
 4176                 zio_crypt_encode_mac_bp(bp, mac);
 4177                 return (zio);
 4178         }
 4179 
 4180         /*
 4181          * Objset blocks are a special case since they have 2 256-bit MACs
 4182          * embedded within them.
 4183          */
 4184         if (ot == DMU_OT_OBJSET) {
 4185                 ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
 4186                 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
 4187                 BP_SET_CRYPT(bp, B_TRUE);
 4188                 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
 4189                     zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
 4190                 return (zio);
 4191         }
 4192 
 4193         /* unencrypted object types are only authenticated with a MAC */
 4194         if (!DMU_OT_IS_ENCRYPTED(ot)) {
 4195                 BP_SET_CRYPT(bp, B_TRUE);
 4196                 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
 4197                     zio->io_abd, psize, mac));
 4198                 zio_crypt_encode_mac_bp(bp, mac);
 4199                 return (zio);
 4200         }
 4201 
 4202         /*
 4203          * Later passes of sync-to-convergence may decide to rewrite data
 4204          * in place to avoid more disk reallocations. This presents a problem
 4205          * for encryption because this constitutes rewriting the new data with
 4206          * the same encryption key and IV. However, this only applies to blocks
 4207          * in the MOS (particularly the spacemaps) and we do not encrypt the
 4208          * MOS. We assert that the zio is allocating or an intent log write
 4209          * to enforce this.
 4210          */
 4211         ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
 4212         ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
 4213         ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
 4214         ASSERT3U(psize, !=, 0);
 4215 
 4216         enc_buf = zio_buf_alloc(psize);
 4217         eabd = abd_get_from_buf(enc_buf, psize);
 4218         abd_take_ownership_of_buf(eabd, B_TRUE);
 4219 
 4220         /*
 4221          * For an explanation of what encryption parameters are stored
 4222          * where, see the block comment in zio_crypt.c.
 4223          */
 4224         if (ot == DMU_OT_INTENT_LOG) {
 4225                 zio_crypt_decode_params_bp(bp, salt, iv);
 4226         } else {
 4227                 BP_SET_CRYPT(bp, B_TRUE);
 4228         }
 4229 
 4230         /* Perform the encryption. This should not fail */
 4231         VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
 4232             BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
 4233             salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
 4234 
 4235         /* encode encryption metadata into the bp */
 4236         if (ot == DMU_OT_INTENT_LOG) {
 4237                 /*
 4238                  * ZIL blocks store the MAC in the embedded checksum, so the
 4239                  * transform must always be applied.
 4240                  */
 4241                 zio_crypt_encode_mac_zil(enc_buf, mac);
 4242                 zio_push_transform(zio, eabd, psize, psize, NULL);
 4243         } else {
 4244                 BP_SET_CRYPT(bp, B_TRUE);
 4245                 zio_crypt_encode_params_bp(bp, salt, iv);
 4246                 zio_crypt_encode_mac_bp(bp, mac);
 4247 
 4248                 if (no_crypt) {
 4249                         ASSERT3U(ot, ==, DMU_OT_DNODE);
 4250                         abd_free(eabd);
 4251                 } else {
 4252                         zio_push_transform(zio, eabd, psize, psize, NULL);
 4253                 }
 4254         }
 4255 
 4256         return (zio);
 4257 }
 4258 
 4259 /*
 4260  * ==========================================================================
 4261  * Generate and verify checksums
 4262  * ==========================================================================
 4263  */
 4264 static zio_t *
 4265 zio_checksum_generate(zio_t *zio)
 4266 {
 4267         blkptr_t *bp = zio->io_bp;
 4268         enum zio_checksum checksum;
 4269 
 4270         if (bp == NULL) {
 4271                 /*
 4272                  * This is zio_write_phys().
 4273                  * We're either generating a label checksum, or none at all.
 4274                  */
 4275                 checksum = zio->io_prop.zp_checksum;
 4276 
 4277                 if (checksum == ZIO_CHECKSUM_OFF)
 4278                         return (zio);
 4279 
 4280                 ASSERT(checksum == ZIO_CHECKSUM_LABEL);
 4281         } else {
 4282                 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
 4283                         ASSERT(!IO_IS_ALLOCATING(zio));
 4284                         checksum = ZIO_CHECKSUM_GANG_HEADER;
 4285                 } else {
 4286                         checksum = BP_GET_CHECKSUM(bp);
 4287                 }
 4288         }
 4289 
 4290         zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
 4291 
 4292         return (zio);
 4293 }
 4294 
 4295 static zio_t *
 4296 zio_checksum_verify(zio_t *zio)
 4297 {
 4298         zio_bad_cksum_t info;
 4299         blkptr_t *bp = zio->io_bp;
 4300         int error;
 4301 
 4302         ASSERT(zio->io_vd != NULL);
 4303 
 4304         if (bp == NULL) {
 4305                 /*
 4306                  * This is zio_read_phys().
 4307                  * We're either verifying a label checksum, or nothing at all.
 4308                  */
 4309                 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
 4310                         return (zio);
 4311 
 4312                 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
 4313         }
 4314 
 4315         if ((error = zio_checksum_error(zio, &info)) != 0) {
 4316                 zio->io_error = error;
 4317                 if (error == ECKSUM &&
 4318                     !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
 4319                         mutex_enter(&zio->io_vd->vdev_stat_lock);
 4320                         zio->io_vd->vdev_stat.vs_checksum_errors++;
 4321                         mutex_exit(&zio->io_vd->vdev_stat_lock);
 4322                         (void) zfs_ereport_start_checksum(zio->io_spa,
 4323                             zio->io_vd, &zio->io_bookmark, zio,
 4324                             zio->io_offset, zio->io_size, &info);
 4325                 }
 4326         }
 4327 
 4328         return (zio);
 4329 }
 4330 
 4331 /*
 4332  * Called by RAID-Z to ensure we don't compute the checksum twice.
 4333  */
 4334 void
 4335 zio_checksum_verified(zio_t *zio)
 4336 {
 4337         zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
 4338 }
 4339 
 4340 /*
 4341  * ==========================================================================
 4342  * Error rank.  Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
 4343  * An error of 0 indicates success.  ENXIO indicates whole-device failure,
 4344  * which may be transient (e.g. unplugged) or permanent.  ECKSUM and EIO
 4345  * indicate errors that are specific to one I/O, and most likely permanent.
 4346  * Any other error is presumed to be worse because we weren't expecting it.
 4347  * ==========================================================================
 4348  */
 4349 int
 4350 zio_worst_error(int e1, int e2)
 4351 {
 4352         static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
 4353         int r1, r2;
 4354 
 4355         for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
 4356                 if (e1 == zio_error_rank[r1])
 4357                         break;
 4358 
 4359         for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
 4360                 if (e2 == zio_error_rank[r2])
 4361                         break;
 4362 
 4363         return (r1 > r2 ? e1 : e2);
 4364 }
 4365 
 4366 /*
 4367  * ==========================================================================
 4368  * I/O completion
 4369  * ==========================================================================
 4370  */
 4371 static zio_t *
 4372 zio_ready(zio_t *zio)
 4373 {
 4374         blkptr_t *bp = zio->io_bp;
 4375         zio_t *pio, *pio_next;
 4376         zio_link_t *zl = NULL;
 4377 
 4378         if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
 4379             ZIO_WAIT_READY)) {
 4380                 return (NULL);
 4381         }
 4382 
 4383         if (zio->io_ready) {
 4384                 ASSERT(IO_IS_ALLOCATING(zio));
 4385                 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
 4386                     (zio->io_flags & ZIO_FLAG_NOPWRITE));
 4387                 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
 4388 
 4389                 zio->io_ready(zio);
 4390         }
 4391 
 4392         if (bp != NULL && bp != &zio->io_bp_copy)
 4393                 zio->io_bp_copy = *bp;
 4394 
 4395         if (zio->io_error != 0) {
 4396                 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
 4397 
 4398                 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
 4399                         ASSERT(IO_IS_ALLOCATING(zio));
 4400                         ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
 4401                         ASSERT(zio->io_metaslab_class != NULL);
 4402 
 4403                         /*
 4404                          * We were unable to allocate anything, unreserve and
 4405                          * issue the next I/O to allocate.
 4406                          */
 4407                         metaslab_class_throttle_unreserve(
 4408                             zio->io_metaslab_class, zio->io_prop.zp_copies,
 4409                             zio->io_allocator, zio);
 4410                         zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
 4411                 }
 4412         }
 4413 
 4414         mutex_enter(&zio->io_lock);
 4415         zio->io_state[ZIO_WAIT_READY] = 1;
 4416         pio = zio_walk_parents(zio, &zl);
 4417         mutex_exit(&zio->io_lock);
 4418 
 4419         /*
 4420          * As we notify zio's parents, new parents could be added.
 4421          * New parents go to the head of zio's io_parent_list, however,
 4422          * so we will (correctly) not notify them.  The remainder of zio's
 4423          * io_parent_list, from 'pio_next' onward, cannot change because
 4424          * all parents must wait for us to be done before they can be done.
 4425          */
 4426         for (; pio != NULL; pio = pio_next) {
 4427                 pio_next = zio_walk_parents(zio, &zl);
 4428                 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
 4429         }
 4430 
 4431         if (zio->io_flags & ZIO_FLAG_NODATA) {
 4432                 if (BP_IS_GANG(bp)) {
 4433                         zio->io_flags &= ~ZIO_FLAG_NODATA;
 4434                 } else {
 4435                         ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
 4436                         zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
 4437                 }
 4438         }
 4439 
 4440         if (zio_injection_enabled &&
 4441             zio->io_spa->spa_syncing_txg == zio->io_txg)
 4442                 zio_handle_ignored_writes(zio);
 4443 
 4444         return (zio);
 4445 }
 4446 
 4447 /*
 4448  * Update the allocation throttle accounting.
 4449  */
 4450 static void
 4451 zio_dva_throttle_done(zio_t *zio)
 4452 {
 4453         zio_t *lio __maybe_unused = zio->io_logical;
 4454         zio_t *pio = zio_unique_parent(zio);
 4455         vdev_t *vd = zio->io_vd;
 4456         int flags = METASLAB_ASYNC_ALLOC;
 4457 
 4458         ASSERT3P(zio->io_bp, !=, NULL);
 4459         ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
 4460         ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
 4461         ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
 4462         ASSERT(vd != NULL);
 4463         ASSERT3P(vd, ==, vd->vdev_top);
 4464         ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
 4465         ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
 4466         ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
 4467         ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
 4468         ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
 4469 
 4470         /*
 4471          * Parents of gang children can have two flavors -- ones that
 4472          * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
 4473          * and ones that allocated the constituent blocks. The allocation
 4474          * throttle needs to know the allocating parent zio so we must find
 4475          * it here.
 4476          */
 4477         if (pio->io_child_type == ZIO_CHILD_GANG) {
 4478                 /*
 4479                  * If our parent is a rewrite gang child then our grandparent
 4480                  * would have been the one that performed the allocation.
 4481                  */
 4482                 if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
 4483                         pio = zio_unique_parent(pio);
 4484                 flags |= METASLAB_GANG_CHILD;
 4485         }
 4486 
 4487         ASSERT(IO_IS_ALLOCATING(pio));
 4488         ASSERT3P(zio, !=, zio->io_logical);
 4489         ASSERT(zio->io_logical != NULL);
 4490         ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
 4491         ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
 4492         ASSERT(zio->io_metaslab_class != NULL);
 4493 
 4494         mutex_enter(&pio->io_lock);
 4495         metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
 4496             pio->io_allocator, B_TRUE);
 4497         mutex_exit(&pio->io_lock);
 4498 
 4499         metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
 4500             pio->io_allocator, pio);
 4501 
 4502         /*
 4503          * Call into the pipeline to see if there is more work that
 4504          * needs to be done. If there is work to be done it will be
 4505          * dispatched to another taskq thread.
 4506          */
 4507         zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
 4508 }
 4509 
 4510 static zio_t *
 4511 zio_done(zio_t *zio)
 4512 {
 4513         /*
 4514          * Always attempt to keep stack usage minimal here since
 4515          * we can be called recursively up to 19 levels deep.
 4516          */
 4517         const uint64_t psize = zio->io_size;
 4518         zio_t *pio, *pio_next;
 4519         zio_link_t *zl = NULL;
 4520 
 4521         /*
 4522          * If our children haven't all completed,
 4523          * wait for them and then repeat this pipeline stage.
 4524          */
 4525         if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
 4526                 return (NULL);
 4527         }
 4528 
 4529         /*
 4530          * If the allocation throttle is enabled, then update the accounting.
 4531          * We only track child I/Os that are part of an allocating async
 4532          * write. We must do this since the allocation is performed
 4533          * by the logical I/O but the actual write is done by child I/Os.
 4534          */
 4535         if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
 4536             zio->io_child_type == ZIO_CHILD_VDEV) {
 4537                 ASSERT(zio->io_metaslab_class != NULL);
 4538                 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
 4539                 zio_dva_throttle_done(zio);
 4540         }
 4541 
 4542         /*
 4543          * If the allocation throttle is enabled, verify that
 4544          * we have decremented the refcounts for every I/O that was throttled.
 4545          */
 4546         if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
 4547                 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 4548                 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
 4549                 ASSERT(zio->io_bp != NULL);
 4550 
 4551                 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
 4552                     zio->io_allocator);
 4553                 VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
 4554                     mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
 4555         }
 4556 
 4557 
 4558         for (int c = 0; c < ZIO_CHILD_TYPES; c++)
 4559                 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
 4560                         ASSERT(zio->io_children[c][w] == 0);
 4561 
 4562         if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
 4563                 ASSERT(zio->io_bp->blk_pad[0] == 0);
 4564                 ASSERT(zio->io_bp->blk_pad[1] == 0);
 4565                 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
 4566                     sizeof (blkptr_t)) == 0 ||
 4567                     (zio->io_bp == zio_unique_parent(zio)->io_bp));
 4568                 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
 4569                     zio->io_bp_override == NULL &&
 4570                     !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
 4571                         ASSERT3U(zio->io_prop.zp_copies, <=,
 4572                             BP_GET_NDVAS(zio->io_bp));
 4573                         ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
 4574                             (BP_COUNT_GANG(zio->io_bp) ==
 4575                             BP_GET_NDVAS(zio->io_bp)));
 4576                 }
 4577                 if (zio->io_flags & ZIO_FLAG_NOPWRITE)
 4578                         VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
 4579         }
 4580 
 4581         /*
 4582          * If there were child vdev/gang/ddt errors, they apply to us now.
 4583          */
 4584         zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
 4585         zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
 4586         zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
 4587 
 4588         /*
 4589          * If the I/O on the transformed data was successful, generate any
 4590          * checksum reports now while we still have the transformed data.
 4591          */
 4592         if (zio->io_error == 0) {
 4593                 while (zio->io_cksum_report != NULL) {
 4594                         zio_cksum_report_t *zcr = zio->io_cksum_report;
 4595                         uint64_t align = zcr->zcr_align;
 4596                         uint64_t asize = P2ROUNDUP(psize, align);
 4597                         abd_t *adata = zio->io_abd;
 4598 
 4599                         if (adata != NULL && asize != psize) {
 4600                                 adata = abd_alloc(asize, B_TRUE);
 4601                                 abd_copy(adata, zio->io_abd, psize);
 4602                                 abd_zero_off(adata, psize, asize - psize);
 4603                         }
 4604 
 4605                         zio->io_cksum_report = zcr->zcr_next;
 4606                         zcr->zcr_next = NULL;
 4607                         zcr->zcr_finish(zcr, adata);
 4608                         zfs_ereport_free_checksum(zcr);
 4609 
 4610                         if (adata != NULL && asize != psize)
 4611                                 abd_free(adata);
 4612                 }
 4613         }
 4614 
 4615         zio_pop_transforms(zio);        /* note: may set zio->io_error */
 4616 
 4617         vdev_stat_update(zio, psize);
 4618 
 4619         /*
 4620          * If this I/O is attached to a particular vdev is slow, exceeding
 4621          * 30 seconds to complete, post an error described the I/O delay.
 4622          * We ignore these errors if the device is currently unavailable.
 4623          */
 4624         if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
 4625                 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
 4626                         /*
 4627                          * We want to only increment our slow IO counters if
 4628                          * the IO is valid (i.e. not if the drive is removed).
 4629                          *
 4630                          * zfs_ereport_post() will also do these checks, but
 4631                          * it can also ratelimit and have other failures, so we
 4632                          * need to increment the slow_io counters independent
 4633                          * of it.
 4634                          */
 4635                         if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
 4636                             zio->io_spa, zio->io_vd, zio)) {
 4637                                 mutex_enter(&zio->io_vd->vdev_stat_lock);
 4638                                 zio->io_vd->vdev_stat.vs_slow_ios++;
 4639                                 mutex_exit(&zio->io_vd->vdev_stat_lock);
 4640 
 4641                                 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
 4642                                     zio->io_spa, zio->io_vd, &zio->io_bookmark,
 4643                                     zio, 0);
 4644                         }
 4645                 }
 4646         }
 4647 
 4648         if (zio->io_error) {
 4649                 /*
 4650                  * If this I/O is attached to a particular vdev,
 4651                  * generate an error message describing the I/O failure
 4652                  * at the block level.  We ignore these errors if the
 4653                  * device is currently unavailable.
 4654                  */
 4655                 if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
 4656                     !vdev_is_dead(zio->io_vd)) {
 4657                         int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
 4658                             zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
 4659                         if (ret != EALREADY) {
 4660                                 mutex_enter(&zio->io_vd->vdev_stat_lock);
 4661                                 if (zio->io_type == ZIO_TYPE_READ)
 4662                                         zio->io_vd->vdev_stat.vs_read_errors++;
 4663                                 else if (zio->io_type == ZIO_TYPE_WRITE)
 4664                                         zio->io_vd->vdev_stat.vs_write_errors++;
 4665                                 mutex_exit(&zio->io_vd->vdev_stat_lock);
 4666                         }
 4667                 }
 4668 
 4669                 if ((zio->io_error == EIO || !(zio->io_flags &
 4670                     (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
 4671                     zio == zio->io_logical) {
 4672                         /*
 4673                          * For logical I/O requests, tell the SPA to log the
 4674                          * error and generate a logical data ereport.
 4675                          */
 4676                         spa_log_error(zio->io_spa, &zio->io_bookmark);
 4677                         (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
 4678                             zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
 4679                 }
 4680         }
 4681 
 4682         if (zio->io_error && zio == zio->io_logical) {
 4683                 /*
 4684                  * Determine whether zio should be reexecuted.  This will
 4685                  * propagate all the way to the root via zio_notify_parent().
 4686                  */
 4687                 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
 4688                 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 4689 
 4690                 if (IO_IS_ALLOCATING(zio) &&
 4691                     !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
 4692                         if (zio->io_error != ENOSPC)
 4693                                 zio->io_reexecute |= ZIO_REEXECUTE_NOW;
 4694                         else
 4695                                 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
 4696                 }
 4697 
 4698                 if ((zio->io_type == ZIO_TYPE_READ ||
 4699                     zio->io_type == ZIO_TYPE_FREE) &&
 4700                     !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
 4701                     zio->io_error == ENXIO &&
 4702                     spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
 4703                     spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
 4704                         zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
 4705 
 4706                 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
 4707                         zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
 4708 
 4709                 /*
 4710                  * Here is a possibly good place to attempt to do
 4711                  * either combinatorial reconstruction or error correction
 4712                  * based on checksums.  It also might be a good place
 4713                  * to send out preliminary ereports before we suspend
 4714                  * processing.
 4715                  */
 4716         }
 4717 
 4718         /*
 4719          * If there were logical child errors, they apply to us now.
 4720          * We defer this until now to avoid conflating logical child
 4721          * errors with errors that happened to the zio itself when
 4722          * updating vdev stats and reporting FMA events above.
 4723          */
 4724         zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
 4725 
 4726         if ((zio->io_error || zio->io_reexecute) &&
 4727             IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
 4728             !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
 4729                 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
 4730 
 4731         zio_gang_tree_free(&zio->io_gang_tree);
 4732 
 4733         /*
 4734          * Godfather I/Os should never suspend.
 4735          */
 4736         if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
 4737             (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
 4738                 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
 4739 
 4740         if (zio->io_reexecute) {
 4741                 /*
 4742                  * This is a logical I/O that wants to reexecute.
 4743                  *
 4744                  * Reexecute is top-down.  When an i/o fails, if it's not
 4745                  * the root, it simply notifies its parent and sticks around.
 4746                  * The parent, seeing that it still has children in zio_done(),
 4747                  * does the same.  This percolates all the way up to the root.
 4748                  * The root i/o will reexecute or suspend the entire tree.
 4749                  *
 4750                  * This approach ensures that zio_reexecute() honors
 4751                  * all the original i/o dependency relationships, e.g.
 4752                  * parents not executing until children are ready.
 4753                  */
 4754                 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
 4755 
 4756                 zio->io_gang_leader = NULL;
 4757 
 4758                 mutex_enter(&zio->io_lock);
 4759                 zio->io_state[ZIO_WAIT_DONE] = 1;
 4760                 mutex_exit(&zio->io_lock);
 4761 
 4762                 /*
 4763                  * "The Godfather" I/O monitors its children but is
 4764                  * not a true parent to them. It will track them through
 4765                  * the pipeline but severs its ties whenever they get into
 4766                  * trouble (e.g. suspended). This allows "The Godfather"
 4767                  * I/O to return status without blocking.
 4768                  */
 4769                 zl = NULL;
 4770                 for (pio = zio_walk_parents(zio, &zl); pio != NULL;
 4771                     pio = pio_next) {
 4772                         zio_link_t *remove_zl = zl;
 4773                         pio_next = zio_walk_parents(zio, &zl);
 4774 
 4775                         if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
 4776                             (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
 4777                                 zio_remove_child(pio, zio, remove_zl);
 4778                                 /*
 4779                                  * This is a rare code path, so we don't
 4780                                  * bother with "next_to_execute".
 4781                                  */
 4782                                 zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
 4783                                     NULL);
 4784                         }
 4785                 }
 4786 
 4787                 if ((pio = zio_unique_parent(zio)) != NULL) {
 4788                         /*
 4789                          * We're not a root i/o, so there's nothing to do
 4790                          * but notify our parent.  Don't propagate errors
 4791                          * upward since we haven't permanently failed yet.
 4792                          */
 4793                         ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
 4794                         zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
 4795                         /*
 4796                          * This is a rare code path, so we don't bother with
 4797                          * "next_to_execute".
 4798                          */
 4799                         zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
 4800                 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
 4801                         /*
 4802                          * We'd fail again if we reexecuted now, so suspend
 4803                          * until conditions improve (e.g. device comes online).
 4804                          */
 4805                         zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
 4806                 } else {
 4807                         /*
 4808                          * Reexecution is potentially a huge amount of work.
 4809                          * Hand it off to the otherwise-unused claim taskq.
 4810                          */
 4811                         ASSERT(taskq_empty_ent(&zio->io_tqent));
 4812                         spa_taskq_dispatch_ent(zio->io_spa,
 4813                             ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
 4814                             zio_reexecute, zio, 0, &zio->io_tqent);
 4815                 }
 4816                 return (NULL);
 4817         }
 4818 
 4819         ASSERT(zio->io_child_count == 0);
 4820         ASSERT(zio->io_reexecute == 0);
 4821         ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
 4822 
 4823         /*
 4824          * Report any checksum errors, since the I/O is complete.
 4825          */
 4826         while (zio->io_cksum_report != NULL) {
 4827                 zio_cksum_report_t *zcr = zio->io_cksum_report;
 4828                 zio->io_cksum_report = zcr->zcr_next;
 4829                 zcr->zcr_next = NULL;
 4830                 zcr->zcr_finish(zcr, NULL);
 4831                 zfs_ereport_free_checksum(zcr);
 4832         }
 4833 
 4834         if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp &&
 4835             !BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) &&
 4836             !(zio->io_flags & ZIO_FLAG_NOPWRITE)) {
 4837                 metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp);
 4838         }
 4839 
 4840         /*
 4841          * It is the responsibility of the done callback to ensure that this
 4842          * particular zio is no longer discoverable for adoption, and as
 4843          * such, cannot acquire any new parents.
 4844          */
 4845         if (zio->io_done)
 4846                 zio->io_done(zio);
 4847 
 4848         mutex_enter(&zio->io_lock);
 4849         zio->io_state[ZIO_WAIT_DONE] = 1;
 4850         mutex_exit(&zio->io_lock);
 4851 
 4852         /*
 4853          * We are done executing this zio.  We may want to execute a parent
 4854          * next.  See the comment in zio_notify_parent().
 4855          */
 4856         zio_t *next_to_execute = NULL;
 4857         zl = NULL;
 4858         for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
 4859                 zio_link_t *remove_zl = zl;
 4860                 pio_next = zio_walk_parents(zio, &zl);
 4861                 zio_remove_child(pio, zio, remove_zl);
 4862                 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
 4863         }
 4864 
 4865         if (zio->io_waiter != NULL) {
 4866                 mutex_enter(&zio->io_lock);
 4867                 zio->io_executor = NULL;
 4868                 cv_broadcast(&zio->io_cv);
 4869                 mutex_exit(&zio->io_lock);
 4870         } else {
 4871                 zio_destroy(zio);
 4872         }
 4873 
 4874         return (next_to_execute);
 4875 }
 4876 
 4877 /*
 4878  * ==========================================================================
 4879  * I/O pipeline definition
 4880  * ==========================================================================
 4881  */
 4882 static zio_pipe_stage_t *zio_pipeline[] = {
 4883         NULL,
 4884         zio_read_bp_init,
 4885         zio_write_bp_init,
 4886         zio_free_bp_init,
 4887         zio_issue_async,
 4888         zio_write_compress,
 4889         zio_encrypt,
 4890         zio_checksum_generate,
 4891         zio_nop_write,
 4892         zio_ddt_read_start,
 4893         zio_ddt_read_done,
 4894         zio_ddt_write,
 4895         zio_ddt_free,
 4896         zio_gang_assemble,
 4897         zio_gang_issue,
 4898         zio_dva_throttle,
 4899         zio_dva_allocate,
 4900         zio_dva_free,
 4901         zio_dva_claim,
 4902         zio_ready,
 4903         zio_vdev_io_start,
 4904         zio_vdev_io_done,
 4905         zio_vdev_io_assess,
 4906         zio_checksum_verify,
 4907         zio_done
 4908 };
 4909 
 4910 
 4911 
 4912 
 4913 /*
 4914  * Compare two zbookmark_phys_t's to see which we would reach first in a
 4915  * pre-order traversal of the object tree.
 4916  *
 4917  * This is simple in every case aside from the meta-dnode object. For all other
 4918  * objects, we traverse them in order (object 1 before object 2, and so on).
 4919  * However, all of these objects are traversed while traversing object 0, since
 4920  * the data it points to is the list of objects.  Thus, we need to convert to a
 4921  * canonical representation so we can compare meta-dnode bookmarks to
 4922  * non-meta-dnode bookmarks.
 4923  *
 4924  * We do this by calculating "equivalents" for each field of the zbookmark.
 4925  * zbookmarks outside of the meta-dnode use their own object and level, and
 4926  * calculate the level 0 equivalent (the first L0 blkid that is contained in the
 4927  * blocks this bookmark refers to) by multiplying their blkid by their span
 4928  * (the number of L0 blocks contained within one block at their level).
 4929  * zbookmarks inside the meta-dnode calculate their object equivalent
 4930  * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
 4931  * level + 1<<31 (any value larger than a level could ever be) for their level.
 4932  * This causes them to always compare before a bookmark in their object
 4933  * equivalent, compare appropriately to bookmarks in other objects, and to
 4934  * compare appropriately to other bookmarks in the meta-dnode.
 4935  */
 4936 int
 4937 zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
 4938     const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
 4939 {
 4940         /*
 4941          * These variables represent the "equivalent" values for the zbookmark,
 4942          * after converting zbookmarks inside the meta dnode to their
 4943          * normal-object equivalents.
 4944          */
 4945         uint64_t zb1obj, zb2obj;
 4946         uint64_t zb1L0, zb2L0;
 4947         uint64_t zb1level, zb2level;
 4948 
 4949         if (zb1->zb_object == zb2->zb_object &&
 4950             zb1->zb_level == zb2->zb_level &&
 4951             zb1->zb_blkid == zb2->zb_blkid)
 4952                 return (0);
 4953 
 4954         IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
 4955         IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
 4956 
 4957         /*
 4958          * BP_SPANB calculates the span in blocks.
 4959          */
 4960         zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
 4961         zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
 4962 
 4963         if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
 4964                 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
 4965                 zb1L0 = 0;
 4966                 zb1level = zb1->zb_level + COMPARE_META_LEVEL;
 4967         } else {
 4968                 zb1obj = zb1->zb_object;
 4969                 zb1level = zb1->zb_level;
 4970         }
 4971 
 4972         if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
 4973                 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
 4974                 zb2L0 = 0;
 4975                 zb2level = zb2->zb_level + COMPARE_META_LEVEL;
 4976         } else {
 4977                 zb2obj = zb2->zb_object;
 4978                 zb2level = zb2->zb_level;
 4979         }
 4980 
 4981         /* Now that we have a canonical representation, do the comparison. */
 4982         if (zb1obj != zb2obj)
 4983                 return (zb1obj < zb2obj ? -1 : 1);
 4984         else if (zb1L0 != zb2L0)
 4985                 return (zb1L0 < zb2L0 ? -1 : 1);
 4986         else if (zb1level != zb2level)
 4987                 return (zb1level > zb2level ? -1 : 1);
 4988         /*
 4989          * This can (theoretically) happen if the bookmarks have the same object
 4990          * and level, but different blkids, if the block sizes are not the same.
 4991          * There is presently no way to change the indirect block sizes
 4992          */
 4993         return (0);
 4994 }
 4995 
 4996 /*
 4997  *  This function checks the following: given that last_block is the place that
 4998  *  our traversal stopped last time, does that guarantee that we've visited
 4999  *  every node under subtree_root?  Therefore, we can't just use the raw output
 5000  *  of zbookmark_compare.  We have to pass in a modified version of
 5001  *  subtree_root; by incrementing the block id, and then checking whether
 5002  *  last_block is before or equal to that, we can tell whether or not having
 5003  *  visited last_block implies that all of subtree_root's children have been
 5004  *  visited.
 5005  */
 5006 boolean_t
 5007 zbookmark_subtree_completed(const dnode_phys_t *dnp,
 5008     const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
 5009 {
 5010         zbookmark_phys_t mod_zb = *subtree_root;
 5011         mod_zb.zb_blkid++;
 5012         ASSERT0(last_block->zb_level);
 5013 
 5014         /* The objset_phys_t isn't before anything. */
 5015         if (dnp == NULL)
 5016                 return (B_FALSE);
 5017 
 5018         /*
 5019          * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
 5020          * data block size in sectors, because that variable is only used if
 5021          * the bookmark refers to a block in the meta-dnode.  Since we don't
 5022          * know without examining it what object it refers to, and there's no
 5023          * harm in passing in this value in other cases, we always pass it in.
 5024          *
 5025          * We pass in 0 for the indirect block size shift because zb2 must be
 5026          * level 0.  The indirect block size is only used to calculate the span
 5027          * of the bookmark, but since the bookmark must be level 0, the span is
 5028          * always 1, so the math works out.
 5029          *
 5030          * If you make changes to how the zbookmark_compare code works, be sure
 5031          * to make sure that this code still works afterwards.
 5032          */
 5033         return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
 5034             1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
 5035             last_block) <= 0);
 5036 }
 5037 
 5038 /*
 5039  * This function is similar to zbookmark_subtree_completed(), but returns true
 5040  * if subtree_root is equal or ahead of last_block, i.e. still to be done.
 5041  */
 5042 boolean_t
 5043 zbookmark_subtree_tbd(const dnode_phys_t *dnp,
 5044     const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
 5045 {
 5046         ASSERT0(last_block->zb_level);
 5047         if (dnp == NULL)
 5048                 return (B_FALSE);
 5049         return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
 5050             1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
 5051             last_block) >= 0);
 5052 }
 5053 
 5054 EXPORT_SYMBOL(zio_type_name);
 5055 EXPORT_SYMBOL(zio_buf_alloc);
 5056 EXPORT_SYMBOL(zio_data_buf_alloc);
 5057 EXPORT_SYMBOL(zio_buf_free);
 5058 EXPORT_SYMBOL(zio_data_buf_free);
 5059 
 5060 ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
 5061         "Max I/O completion time (milliseconds) before marking it as slow");
 5062 
 5063 ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
 5064         "Prioritize requeued I/O");
 5065 
 5066 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free,  UINT, ZMOD_RW,
 5067         "Defer frees starting in this pass");
 5068 
 5069 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
 5070         "Don't compress starting in this pass");
 5071 
 5072 ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
 5073         "Rewrite new bps starting in this pass");
 5074 
 5075 ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
 5076         "Throttle block allocations in the ZIO pipeline");
 5077 
 5078 ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
 5079         "Log all slow ZIOs, not just those with vdevs");

Cache object: 3a3b0b54d0049aaa26107d9201f3c87d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.