The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/openzfs/module/zfs/spa_misc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * CDDL HEADER START
    3  *
    4  * The contents of this file are subject to the terms of the
    5  * Common Development and Distribution License (the "License").
    6  * You may not use this file except in compliance with the License.
    7  *
    8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
    9  * or https://opensource.org/licenses/CDDL-1.0.
   10  * See the License for the specific language governing permissions
   11  * and limitations under the License.
   12  *
   13  * When distributing Covered Code, include this CDDL HEADER in each
   14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
   15  * If applicable, add the following below this CDDL HEADER, with the
   16  * fields enclosed by brackets "[]" replaced with your own identifying
   17  * information: Portions Copyright [yyyy] [name of copyright owner]
   18  *
   19  * CDDL HEADER END
   20  */
   21 /*
   22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
   23  * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
   24  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
   25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
   26  * Copyright 2013 Saso Kiselkov. All rights reserved.
   27  * Copyright (c) 2017 Datto Inc.
   28  * Copyright (c) 2017, Intel Corporation.
   29  * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
   30  */
   31 
   32 #include <sys/zfs_context.h>
   33 #include <sys/zfs_chksum.h>
   34 #include <sys/spa_impl.h>
   35 #include <sys/zio.h>
   36 #include <sys/zio_checksum.h>
   37 #include <sys/zio_compress.h>
   38 #include <sys/dmu.h>
   39 #include <sys/dmu_tx.h>
   40 #include <sys/zap.h>
   41 #include <sys/zil.h>
   42 #include <sys/vdev_impl.h>
   43 #include <sys/vdev_initialize.h>
   44 #include <sys/vdev_trim.h>
   45 #include <sys/vdev_file.h>
   46 #include <sys/vdev_raidz.h>
   47 #include <sys/metaslab.h>
   48 #include <sys/uberblock_impl.h>
   49 #include <sys/txg.h>
   50 #include <sys/avl.h>
   51 #include <sys/unique.h>
   52 #include <sys/dsl_pool.h>
   53 #include <sys/dsl_dir.h>
   54 #include <sys/dsl_prop.h>
   55 #include <sys/fm/util.h>
   56 #include <sys/dsl_scan.h>
   57 #include <sys/fs/zfs.h>
   58 #include <sys/metaslab_impl.h>
   59 #include <sys/arc.h>
   60 #include <sys/ddt.h>
   61 #include <sys/kstat.h>
   62 #include "zfs_prop.h"
   63 #include <sys/btree.h>
   64 #include <sys/zfeature.h>
   65 #include <sys/qat.h>
   66 #include <sys/zstd/zstd.h>
   67 
   68 /*
   69  * SPA locking
   70  *
   71  * There are three basic locks for managing spa_t structures:
   72  *
   73  * spa_namespace_lock (global mutex)
   74  *
   75  *      This lock must be acquired to do any of the following:
   76  *
   77  *              - Lookup a spa_t by name
   78  *              - Add or remove a spa_t from the namespace
   79  *              - Increase spa_refcount from non-zero
   80  *              - Check if spa_refcount is zero
   81  *              - Rename a spa_t
   82  *              - add/remove/attach/detach devices
   83  *              - Held for the duration of create/destroy/import/export
   84  *
   85  *      It does not need to handle recursion.  A create or destroy may
   86  *      reference objects (files or zvols) in other pools, but by
   87  *      definition they must have an existing reference, and will never need
   88  *      to lookup a spa_t by name.
   89  *
   90  * spa_refcount (per-spa zfs_refcount_t protected by mutex)
   91  *
   92  *      This reference count keep track of any active users of the spa_t.  The
   93  *      spa_t cannot be destroyed or freed while this is non-zero.  Internally,
   94  *      the refcount is never really 'zero' - opening a pool implicitly keeps
   95  *      some references in the DMU.  Internally we check against spa_minref, but
   96  *      present the image of a zero/non-zero value to consumers.
   97  *
   98  * spa_config_lock[] (per-spa array of rwlocks)
   99  *
  100  *      This protects the spa_t from config changes, and must be held in
  101  *      the following circumstances:
  102  *
  103  *              - RW_READER to perform I/O to the spa
  104  *              - RW_WRITER to change the vdev config
  105  *
  106  * The locking order is fairly straightforward:
  107  *
  108  *              spa_namespace_lock      ->      spa_refcount
  109  *
  110  *      The namespace lock must be acquired to increase the refcount from 0
  111  *      or to check if it is zero.
  112  *
  113  *              spa_refcount            ->      spa_config_lock[]
  114  *
  115  *      There must be at least one valid reference on the spa_t to acquire
  116  *      the config lock.
  117  *
  118  *              spa_namespace_lock      ->      spa_config_lock[]
  119  *
  120  *      The namespace lock must always be taken before the config lock.
  121  *
  122  *
  123  * The spa_namespace_lock can be acquired directly and is globally visible.
  124  *
  125  * The namespace is manipulated using the following functions, all of which
  126  * require the spa_namespace_lock to be held.
  127  *
  128  *      spa_lookup()            Lookup a spa_t by name.
  129  *
  130  *      spa_add()               Create a new spa_t in the namespace.
  131  *
  132  *      spa_remove()            Remove a spa_t from the namespace.  This also
  133  *                              frees up any memory associated with the spa_t.
  134  *
  135  *      spa_next()              Returns the next spa_t in the system, or the
  136  *                              first if NULL is passed.
  137  *
  138  *      spa_evict_all()         Shutdown and remove all spa_t structures in
  139  *                              the system.
  140  *
  141  *      spa_guid_exists()       Determine whether a pool/device guid exists.
  142  *
  143  * The spa_refcount is manipulated using the following functions:
  144  *
  145  *      spa_open_ref()          Adds a reference to the given spa_t.  Must be
  146  *                              called with spa_namespace_lock held if the
  147  *                              refcount is currently zero.
  148  *
  149  *      spa_close()             Remove a reference from the spa_t.  This will
  150  *                              not free the spa_t or remove it from the
  151  *                              namespace.  No locking is required.
  152  *
  153  *      spa_refcount_zero()     Returns true if the refcount is currently
  154  *                              zero.  Must be called with spa_namespace_lock
  155  *                              held.
  156  *
  157  * The spa_config_lock[] is an array of rwlocks, ordered as follows:
  158  * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
  159  * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
  160  *
  161  * To read the configuration, it suffices to hold one of these locks as reader.
  162  * To modify the configuration, you must hold all locks as writer.  To modify
  163  * vdev state without altering the vdev tree's topology (e.g. online/offline),
  164  * you must hold SCL_STATE and SCL_ZIO as writer.
  165  *
  166  * We use these distinct config locks to avoid recursive lock entry.
  167  * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
  168  * block allocations (SCL_ALLOC), which may require reading space maps
  169  * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
  170  *
  171  * The spa config locks cannot be normal rwlocks because we need the
  172  * ability to hand off ownership.  For example, SCL_ZIO is acquired
  173  * by the issuing thread and later released by an interrupt thread.
  174  * They do, however, obey the usual write-wanted semantics to prevent
  175  * writer (i.e. system administrator) starvation.
  176  *
  177  * The lock acquisition rules are as follows:
  178  *
  179  * SCL_CONFIG
  180  *      Protects changes to the vdev tree topology, such as vdev
  181  *      add/remove/attach/detach.  Protects the dirty config list
  182  *      (spa_config_dirty_list) and the set of spares and l2arc devices.
  183  *
  184  * SCL_STATE
  185  *      Protects changes to pool state and vdev state, such as vdev
  186  *      online/offline/fault/degrade/clear.  Protects the dirty state list
  187  *      (spa_state_dirty_list) and global pool state (spa_state).
  188  *
  189  * SCL_ALLOC
  190  *      Protects changes to metaslab groups and classes.
  191  *      Held as reader by metaslab_alloc() and metaslab_claim().
  192  *
  193  * SCL_ZIO
  194  *      Held by bp-level zios (those which have no io_vd upon entry)
  195  *      to prevent changes to the vdev tree.  The bp-level zio implicitly
  196  *      protects all of its vdev child zios, which do not hold SCL_ZIO.
  197  *
  198  * SCL_FREE
  199  *      Protects changes to metaslab groups and classes.
  200  *      Held as reader by metaslab_free().  SCL_FREE is distinct from
  201  *      SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
  202  *      blocks in zio_done() while another i/o that holds either
  203  *      SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
  204  *
  205  * SCL_VDEV
  206  *      Held as reader to prevent changes to the vdev tree during trivial
  207  *      inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
  208  *      other locks, and lower than all of them, to ensure that it's safe
  209  *      to acquire regardless of caller context.
  210  *
  211  * In addition, the following rules apply:
  212  *
  213  * (a)  spa_props_lock protects pool properties, spa_config and spa_config_list.
  214  *      The lock ordering is SCL_CONFIG > spa_props_lock.
  215  *
  216  * (b)  I/O operations on leaf vdevs.  For any zio operation that takes
  217  *      an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
  218  *      or zio_write_phys() -- the caller must ensure that the config cannot
  219  *      cannot change in the interim, and that the vdev cannot be reopened.
  220  *      SCL_STATE as reader suffices for both.
  221  *
  222  * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
  223  *
  224  *      spa_vdev_enter()        Acquire the namespace lock and the config lock
  225  *                              for writing.
  226  *
  227  *      spa_vdev_exit()         Release the config lock, wait for all I/O
  228  *                              to complete, sync the updated configs to the
  229  *                              cache, and release the namespace lock.
  230  *
  231  * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
  232  * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
  233  * locking is, always, based on spa_namespace_lock and spa_config_lock[].
  234  */
  235 
  236 static avl_tree_t spa_namespace_avl;
  237 kmutex_t spa_namespace_lock;
  238 static kcondvar_t spa_namespace_cv;
  239 static const int spa_max_replication_override = SPA_DVAS_PER_BP;
  240 
  241 static kmutex_t spa_spare_lock;
  242 static avl_tree_t spa_spare_avl;
  243 static kmutex_t spa_l2cache_lock;
  244 static avl_tree_t spa_l2cache_avl;
  245 
  246 spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
  247 
  248 #ifdef ZFS_DEBUG
  249 /*
  250  * Everything except dprintf, set_error, spa, and indirect_remap is on
  251  * by default in debug builds.
  252  */
  253 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
  254     ZFS_DEBUG_INDIRECT_REMAP);
  255 #else
  256 int zfs_flags = 0;
  257 #endif
  258 
  259 /*
  260  * zfs_recover can be set to nonzero to attempt to recover from
  261  * otherwise-fatal errors, typically caused by on-disk corruption.  When
  262  * set, calls to zfs_panic_recover() will turn into warning messages.
  263  * This should only be used as a last resort, as it typically results
  264  * in leaked space, or worse.
  265  */
  266 int zfs_recover = B_FALSE;
  267 
  268 /*
  269  * If destroy encounters an EIO while reading metadata (e.g. indirect
  270  * blocks), space referenced by the missing metadata can not be freed.
  271  * Normally this causes the background destroy to become "stalled", as
  272  * it is unable to make forward progress.  While in this stalled state,
  273  * all remaining space to free from the error-encountering filesystem is
  274  * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
  275  * permanently leak the space from indirect blocks that can not be read,
  276  * and continue to free everything else that it can.
  277  *
  278  * The default, "stalling" behavior is useful if the storage partially
  279  * fails (i.e. some but not all i/os fail), and then later recovers.  In
  280  * this case, we will be able to continue pool operations while it is
  281  * partially failed, and when it recovers, we can continue to free the
  282  * space, with no leaks.  However, note that this case is actually
  283  * fairly rare.
  284  *
  285  * Typically pools either (a) fail completely (but perhaps temporarily,
  286  * e.g. a top-level vdev going offline), or (b) have localized,
  287  * permanent errors (e.g. disk returns the wrong data due to bit flip or
  288  * firmware bug).  In case (a), this setting does not matter because the
  289  * pool will be suspended and the sync thread will not be able to make
  290  * forward progress regardless.  In case (b), because the error is
  291  * permanent, the best we can do is leak the minimum amount of space,
  292  * which is what setting this flag will do.  Therefore, it is reasonable
  293  * for this flag to normally be set, but we chose the more conservative
  294  * approach of not setting it, so that there is no possibility of
  295  * leaking space in the "partial temporary" failure case.
  296  */
  297 int zfs_free_leak_on_eio = B_FALSE;
  298 
  299 /*
  300  * Expiration time in milliseconds. This value has two meanings. First it is
  301  * used to determine when the spa_deadman() logic should fire. By default the
  302  * spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
  303  * Secondly, the value determines if an I/O is considered "hung". Any I/O that
  304  * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
  305  * in one of three behaviors controlled by zfs_deadman_failmode.
  306  */
  307 uint64_t zfs_deadman_synctime_ms = 600000UL;  /* 10 min. */
  308 
  309 /*
  310  * This value controls the maximum amount of time zio_wait() will block for an
  311  * outstanding IO.  By default this is 300 seconds at which point the "hung"
  312  * behavior will be applied as described for zfs_deadman_synctime_ms.
  313  */
  314 uint64_t zfs_deadman_ziotime_ms = 300000UL;  /* 5 min. */
  315 
  316 /*
  317  * Check time in milliseconds. This defines the frequency at which we check
  318  * for hung I/O.
  319  */
  320 uint64_t zfs_deadman_checktime_ms = 60000UL;  /* 1 min. */
  321 
  322 /*
  323  * By default the deadman is enabled.
  324  */
  325 int zfs_deadman_enabled = B_TRUE;
  326 
  327 /*
  328  * Controls the behavior of the deadman when it detects a "hung" I/O.
  329  * Valid values are zfs_deadman_failmode=<wait|continue|panic>.
  330  *
  331  * wait     - Wait for the "hung" I/O (default)
  332  * continue - Attempt to recover from a "hung" I/O
  333  * panic    - Panic the system
  334  */
  335 const char *zfs_deadman_failmode = "wait";
  336 
  337 /*
  338  * The worst case is single-sector max-parity RAID-Z blocks, in which
  339  * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
  340  * times the size; so just assume that.  Add to this the fact that
  341  * we can have up to 3 DVAs per bp, and one more factor of 2 because
  342  * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
  343  * the worst case is:
  344  *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
  345  */
  346 uint_t spa_asize_inflation = 24;
  347 
  348 /*
  349  * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
  350  * the pool to be consumed (bounded by spa_max_slop).  This ensures that we
  351  * don't run the pool completely out of space, due to unaccounted changes (e.g.
  352  * to the MOS).  It also limits the worst-case time to allocate space.  If we
  353  * have less than this amount of free space, most ZPL operations (e.g.  write,
  354  * create) will return ENOSPC.  The ZIL metaslabs (spa_embedded_log_class) are
  355  * also part of this 3.2% of space which can't be consumed by normal writes;
  356  * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
  357  * log space.
  358  *
  359  * Certain operations (e.g. file removal, most administrative actions) can
  360  * use half the slop space.  They will only return ENOSPC if less than half
  361  * the slop space is free.  Typically, once the pool has less than the slop
  362  * space free, the user will use these operations to free up space in the pool.
  363  * These are the operations that call dsl_pool_adjustedsize() with the netfree
  364  * argument set to TRUE.
  365  *
  366  * Operations that are almost guaranteed to free up space in the absence of
  367  * a pool checkpoint can use up to three quarters of the slop space
  368  * (e.g zfs destroy).
  369  *
  370  * A very restricted set of operations are always permitted, regardless of
  371  * the amount of free space.  These are the operations that call
  372  * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
  373  * increase in the amount of space used, it is possible to run the pool
  374  * completely out of space, causing it to be permanently read-only.
  375  *
  376  * Note that on very small pools, the slop space will be larger than
  377  * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
  378  * but we never allow it to be more than half the pool size.
  379  *
  380  * Further, on very large pools, the slop space will be smaller than
  381  * 3.2%, to avoid reserving much more space than we actually need; bounded
  382  * by spa_max_slop (128GB).
  383  *
  384  * See also the comments in zfs_space_check_t.
  385  */
  386 uint_t spa_slop_shift = 5;
  387 static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
  388 static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
  389 static const int spa_allocators = 4;
  390 
  391 
  392 void
  393 spa_load_failed(spa_t *spa, const char *fmt, ...)
  394 {
  395         va_list adx;
  396         char buf[256];
  397 
  398         va_start(adx, fmt);
  399         (void) vsnprintf(buf, sizeof (buf), fmt, adx);
  400         va_end(adx);
  401 
  402         zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
  403             spa->spa_trust_config ? "trusted" : "untrusted", buf);
  404 }
  405 
  406 void
  407 spa_load_note(spa_t *spa, const char *fmt, ...)
  408 {
  409         va_list adx;
  410         char buf[256];
  411 
  412         va_start(adx, fmt);
  413         (void) vsnprintf(buf, sizeof (buf), fmt, adx);
  414         va_end(adx);
  415 
  416         zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
  417             spa->spa_trust_config ? "trusted" : "untrusted", buf);
  418 }
  419 
  420 /*
  421  * By default dedup and user data indirects land in the special class
  422  */
  423 static int zfs_ddt_data_is_special = B_TRUE;
  424 static int zfs_user_indirect_is_special = B_TRUE;
  425 
  426 /*
  427  * The percentage of special class final space reserved for metadata only.
  428  * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
  429  * let metadata into the class.
  430  */
  431 static uint_t zfs_special_class_metadata_reserve_pct = 25;
  432 
  433 /*
  434  * ==========================================================================
  435  * SPA config locking
  436  * ==========================================================================
  437  */
  438 static void
  439 spa_config_lock_init(spa_t *spa)
  440 {
  441         for (int i = 0; i < SCL_LOCKS; i++) {
  442                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
  443                 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
  444                 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
  445                 scl->scl_writer = NULL;
  446                 scl->scl_write_wanted = 0;
  447                 scl->scl_count = 0;
  448         }
  449 }
  450 
  451 static void
  452 spa_config_lock_destroy(spa_t *spa)
  453 {
  454         for (int i = 0; i < SCL_LOCKS; i++) {
  455                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
  456                 mutex_destroy(&scl->scl_lock);
  457                 cv_destroy(&scl->scl_cv);
  458                 ASSERT(scl->scl_writer == NULL);
  459                 ASSERT(scl->scl_write_wanted == 0);
  460                 ASSERT(scl->scl_count == 0);
  461         }
  462 }
  463 
  464 int
  465 spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
  466 {
  467         for (int i = 0; i < SCL_LOCKS; i++) {
  468                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
  469                 if (!(locks & (1 << i)))
  470                         continue;
  471                 mutex_enter(&scl->scl_lock);
  472                 if (rw == RW_READER) {
  473                         if (scl->scl_writer || scl->scl_write_wanted) {
  474                                 mutex_exit(&scl->scl_lock);
  475                                 spa_config_exit(spa, locks & ((1 << i) - 1),
  476                                     tag);
  477                                 return (0);
  478                         }
  479                 } else {
  480                         ASSERT(scl->scl_writer != curthread);
  481                         if (scl->scl_count != 0) {
  482                                 mutex_exit(&scl->scl_lock);
  483                                 spa_config_exit(spa, locks & ((1 << i) - 1),
  484                                     tag);
  485                                 return (0);
  486                         }
  487                         scl->scl_writer = curthread;
  488                 }
  489                 scl->scl_count++;
  490                 mutex_exit(&scl->scl_lock);
  491         }
  492         return (1);
  493 }
  494 
  495 void
  496 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
  497 {
  498         (void) tag;
  499         int wlocks_held = 0;
  500 
  501         ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
  502 
  503         for (int i = 0; i < SCL_LOCKS; i++) {
  504                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
  505                 if (scl->scl_writer == curthread)
  506                         wlocks_held |= (1 << i);
  507                 if (!(locks & (1 << i)))
  508                         continue;
  509                 mutex_enter(&scl->scl_lock);
  510                 if (rw == RW_READER) {
  511                         while (scl->scl_writer || scl->scl_write_wanted) {
  512                                 cv_wait(&scl->scl_cv, &scl->scl_lock);
  513                         }
  514                 } else {
  515                         ASSERT(scl->scl_writer != curthread);
  516                         while (scl->scl_count != 0) {
  517                                 scl->scl_write_wanted++;
  518                                 cv_wait(&scl->scl_cv, &scl->scl_lock);
  519                                 scl->scl_write_wanted--;
  520                         }
  521                         scl->scl_writer = curthread;
  522                 }
  523                 scl->scl_count++;
  524                 mutex_exit(&scl->scl_lock);
  525         }
  526         ASSERT3U(wlocks_held, <=, locks);
  527 }
  528 
  529 void
  530 spa_config_exit(spa_t *spa, int locks, const void *tag)
  531 {
  532         (void) tag;
  533         for (int i = SCL_LOCKS - 1; i >= 0; i--) {
  534                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
  535                 if (!(locks & (1 << i)))
  536                         continue;
  537                 mutex_enter(&scl->scl_lock);
  538                 ASSERT(scl->scl_count > 0);
  539                 if (--scl->scl_count == 0) {
  540                         ASSERT(scl->scl_writer == NULL ||
  541                             scl->scl_writer == curthread);
  542                         scl->scl_writer = NULL; /* OK in either case */
  543                         cv_broadcast(&scl->scl_cv);
  544                 }
  545                 mutex_exit(&scl->scl_lock);
  546         }
  547 }
  548 
  549 int
  550 spa_config_held(spa_t *spa, int locks, krw_t rw)
  551 {
  552         int locks_held = 0;
  553 
  554         for (int i = 0; i < SCL_LOCKS; i++) {
  555                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
  556                 if (!(locks & (1 << i)))
  557                         continue;
  558                 if ((rw == RW_READER && scl->scl_count != 0) ||
  559                     (rw == RW_WRITER && scl->scl_writer == curthread))
  560                         locks_held |= 1 << i;
  561         }
  562 
  563         return (locks_held);
  564 }
  565 
  566 /*
  567  * ==========================================================================
  568  * SPA namespace functions
  569  * ==========================================================================
  570  */
  571 
  572 /*
  573  * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
  574  * Returns NULL if no matching spa_t is found.
  575  */
  576 spa_t *
  577 spa_lookup(const char *name)
  578 {
  579         static spa_t search;    /* spa_t is large; don't allocate on stack */
  580         spa_t *spa;
  581         avl_index_t where;
  582         char *cp;
  583 
  584         ASSERT(MUTEX_HELD(&spa_namespace_lock));
  585 
  586         (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
  587 
  588         /*
  589          * If it's a full dataset name, figure out the pool name and
  590          * just use that.
  591          */
  592         cp = strpbrk(search.spa_name, "/@#");
  593         if (cp != NULL)
  594                 *cp = '\0';
  595 
  596         spa = avl_find(&spa_namespace_avl, &search, &where);
  597 
  598         return (spa);
  599 }
  600 
  601 /*
  602  * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
  603  * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
  604  * looking for potentially hung I/Os.
  605  */
  606 void
  607 spa_deadman(void *arg)
  608 {
  609         spa_t *spa = arg;
  610 
  611         /* Disable the deadman if the pool is suspended. */
  612         if (spa_suspended(spa))
  613                 return;
  614 
  615         zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
  616             (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
  617             (u_longlong_t)++spa->spa_deadman_calls);
  618         if (zfs_deadman_enabled)
  619                 vdev_deadman(spa->spa_root_vdev, FTAG);
  620 
  621         spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
  622             spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
  623             MSEC_TO_TICK(zfs_deadman_checktime_ms));
  624 }
  625 
  626 static int
  627 spa_log_sm_sort_by_txg(const void *va, const void *vb)
  628 {
  629         const spa_log_sm_t *a = va;
  630         const spa_log_sm_t *b = vb;
  631 
  632         return (TREE_CMP(a->sls_txg, b->sls_txg));
  633 }
  634 
  635 /*
  636  * Create an uninitialized spa_t with the given name.  Requires
  637  * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
  638  * exist by calling spa_lookup() first.
  639  */
  640 spa_t *
  641 spa_add(const char *name, nvlist_t *config, const char *altroot)
  642 {
  643         spa_t *spa;
  644         spa_config_dirent_t *dp;
  645 
  646         ASSERT(MUTEX_HELD(&spa_namespace_lock));
  647 
  648         spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
  649 
  650         mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
  651         mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
  652         mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
  653         mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
  654         mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
  655         mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
  656         mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
  657         mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
  658         mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
  659         mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
  660         mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
  661         mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
  662         mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
  663         mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
  664 
  665         cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
  666         cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
  667         cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
  668         cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
  669         cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
  670         cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
  671         cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
  672 
  673         for (int t = 0; t < TXG_SIZE; t++)
  674                 bplist_create(&spa->spa_free_bplist[t]);
  675 
  676         (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
  677         spa->spa_state = POOL_STATE_UNINITIALIZED;
  678         spa->spa_freeze_txg = UINT64_MAX;
  679         spa->spa_final_txg = UINT64_MAX;
  680         spa->spa_load_max_txg = UINT64_MAX;
  681         spa->spa_proc = &p0;
  682         spa->spa_proc_state = SPA_PROC_NONE;
  683         spa->spa_trust_config = B_TRUE;
  684         spa->spa_hostid = zone_get_hostid(NULL);
  685 
  686         spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
  687         spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
  688         spa_set_deadman_failmode(spa, zfs_deadman_failmode);
  689 
  690         zfs_refcount_create(&spa->spa_refcount);
  691         spa_config_lock_init(spa);
  692         spa_stats_init(spa);
  693 
  694         avl_add(&spa_namespace_avl, spa);
  695 
  696         /*
  697          * Set the alternate root, if there is one.
  698          */
  699         if (altroot)
  700                 spa->spa_root = spa_strdup(altroot);
  701 
  702         spa->spa_alloc_count = spa_allocators;
  703         spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
  704             sizeof (spa_alloc_t), KM_SLEEP);
  705         for (int i = 0; i < spa->spa_alloc_count; i++) {
  706                 mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
  707                     NULL);
  708                 avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
  709                     sizeof (zio_t), offsetof(zio_t, io_alloc_node));
  710         }
  711         avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
  712             sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
  713         avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
  714             sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
  715         list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
  716             offsetof(log_summary_entry_t, lse_node));
  717 
  718         /*
  719          * Every pool starts with the default cachefile
  720          */
  721         list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
  722             offsetof(spa_config_dirent_t, scd_link));
  723 
  724         dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
  725         dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
  726         list_insert_head(&spa->spa_config_list, dp);
  727 
  728         VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
  729             KM_SLEEP) == 0);
  730 
  731         if (config != NULL) {
  732                 nvlist_t *features;
  733 
  734                 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
  735                     &features) == 0) {
  736                         VERIFY(nvlist_dup(features, &spa->spa_label_features,
  737                             0) == 0);
  738                 }
  739 
  740                 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
  741         }
  742 
  743         if (spa->spa_label_features == NULL) {
  744                 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
  745                     KM_SLEEP) == 0);
  746         }
  747 
  748         spa->spa_min_ashift = INT_MAX;
  749         spa->spa_max_ashift = 0;
  750         spa->spa_min_alloc = INT_MAX;
  751 
  752         /* Reset cached value */
  753         spa->spa_dedup_dspace = ~0ULL;
  754 
  755         /*
  756          * As a pool is being created, treat all features as disabled by
  757          * setting SPA_FEATURE_DISABLED for all entries in the feature
  758          * refcount cache.
  759          */
  760         for (int i = 0; i < SPA_FEATURES; i++) {
  761                 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
  762         }
  763 
  764         list_create(&spa->spa_leaf_list, sizeof (vdev_t),
  765             offsetof(vdev_t, vdev_leaf_node));
  766 
  767         return (spa);
  768 }
  769 
  770 /*
  771  * Removes a spa_t from the namespace, freeing up any memory used.  Requires
  772  * spa_namespace_lock.  This is called only after the spa_t has been closed and
  773  * deactivated.
  774  */
  775 void
  776 spa_remove(spa_t *spa)
  777 {
  778         spa_config_dirent_t *dp;
  779 
  780         ASSERT(MUTEX_HELD(&spa_namespace_lock));
  781         ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
  782         ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
  783         ASSERT0(spa->spa_waiters);
  784 
  785         nvlist_free(spa->spa_config_splitting);
  786 
  787         avl_remove(&spa_namespace_avl, spa);
  788         cv_broadcast(&spa_namespace_cv);
  789 
  790         if (spa->spa_root)
  791                 spa_strfree(spa->spa_root);
  792 
  793         while ((dp = list_head(&spa->spa_config_list)) != NULL) {
  794                 list_remove(&spa->spa_config_list, dp);
  795                 if (dp->scd_path != NULL)
  796                         spa_strfree(dp->scd_path);
  797                 kmem_free(dp, sizeof (spa_config_dirent_t));
  798         }
  799 
  800         for (int i = 0; i < spa->spa_alloc_count; i++) {
  801                 avl_destroy(&spa->spa_allocs[i].spaa_tree);
  802                 mutex_destroy(&spa->spa_allocs[i].spaa_lock);
  803         }
  804         kmem_free(spa->spa_allocs, spa->spa_alloc_count *
  805             sizeof (spa_alloc_t));
  806 
  807         avl_destroy(&spa->spa_metaslabs_by_flushed);
  808         avl_destroy(&spa->spa_sm_logs_by_txg);
  809         list_destroy(&spa->spa_log_summary);
  810         list_destroy(&spa->spa_config_list);
  811         list_destroy(&spa->spa_leaf_list);
  812 
  813         nvlist_free(spa->spa_label_features);
  814         nvlist_free(spa->spa_load_info);
  815         nvlist_free(spa->spa_feat_stats);
  816         spa_config_set(spa, NULL);
  817 
  818         zfs_refcount_destroy(&spa->spa_refcount);
  819 
  820         spa_stats_destroy(spa);
  821         spa_config_lock_destroy(spa);
  822 
  823         for (int t = 0; t < TXG_SIZE; t++)
  824                 bplist_destroy(&spa->spa_free_bplist[t]);
  825 
  826         zio_checksum_templates_free(spa);
  827 
  828         cv_destroy(&spa->spa_async_cv);
  829         cv_destroy(&spa->spa_evicting_os_cv);
  830         cv_destroy(&spa->spa_proc_cv);
  831         cv_destroy(&spa->spa_scrub_io_cv);
  832         cv_destroy(&spa->spa_suspend_cv);
  833         cv_destroy(&spa->spa_activities_cv);
  834         cv_destroy(&spa->spa_waiters_cv);
  835 
  836         mutex_destroy(&spa->spa_flushed_ms_lock);
  837         mutex_destroy(&spa->spa_async_lock);
  838         mutex_destroy(&spa->spa_errlist_lock);
  839         mutex_destroy(&spa->spa_errlog_lock);
  840         mutex_destroy(&spa->spa_evicting_os_lock);
  841         mutex_destroy(&spa->spa_history_lock);
  842         mutex_destroy(&spa->spa_proc_lock);
  843         mutex_destroy(&spa->spa_props_lock);
  844         mutex_destroy(&spa->spa_cksum_tmpls_lock);
  845         mutex_destroy(&spa->spa_scrub_lock);
  846         mutex_destroy(&spa->spa_suspend_lock);
  847         mutex_destroy(&spa->spa_vdev_top_lock);
  848         mutex_destroy(&spa->spa_feat_stats_lock);
  849         mutex_destroy(&spa->spa_activities_lock);
  850 
  851         kmem_free(spa, sizeof (spa_t));
  852 }
  853 
  854 /*
  855  * Given a pool, return the next pool in the namespace, or NULL if there is
  856  * none.  If 'prev' is NULL, return the first pool.
  857  */
  858 spa_t *
  859 spa_next(spa_t *prev)
  860 {
  861         ASSERT(MUTEX_HELD(&spa_namespace_lock));
  862 
  863         if (prev)
  864                 return (AVL_NEXT(&spa_namespace_avl, prev));
  865         else
  866                 return (avl_first(&spa_namespace_avl));
  867 }
  868 
  869 /*
  870  * ==========================================================================
  871  * SPA refcount functions
  872  * ==========================================================================
  873  */
  874 
  875 /*
  876  * Add a reference to the given spa_t.  Must have at least one reference, or
  877  * have the namespace lock held.
  878  */
  879 void
  880 spa_open_ref(spa_t *spa, const void *tag)
  881 {
  882         ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
  883             MUTEX_HELD(&spa_namespace_lock));
  884         (void) zfs_refcount_add(&spa->spa_refcount, tag);
  885 }
  886 
  887 /*
  888  * Remove a reference to the given spa_t.  Must have at least one reference, or
  889  * have the namespace lock held.
  890  */
  891 void
  892 spa_close(spa_t *spa, const void *tag)
  893 {
  894         ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
  895             MUTEX_HELD(&spa_namespace_lock));
  896         (void) zfs_refcount_remove(&spa->spa_refcount, tag);
  897 }
  898 
  899 /*
  900  * Remove a reference to the given spa_t held by a dsl dir that is
  901  * being asynchronously released.  Async releases occur from a taskq
  902  * performing eviction of dsl datasets and dirs.  The namespace lock
  903  * isn't held and the hold by the object being evicted may contribute to
  904  * spa_minref (e.g. dataset or directory released during pool export),
  905  * so the asserts in spa_close() do not apply.
  906  */
  907 void
  908 spa_async_close(spa_t *spa, const void *tag)
  909 {
  910         (void) zfs_refcount_remove(&spa->spa_refcount, tag);
  911 }
  912 
  913 /*
  914  * Check to see if the spa refcount is zero.  Must be called with
  915  * spa_namespace_lock held.  We really compare against spa_minref, which is the
  916  * number of references acquired when opening a pool
  917  */
  918 boolean_t
  919 spa_refcount_zero(spa_t *spa)
  920 {
  921         ASSERT(MUTEX_HELD(&spa_namespace_lock));
  922 
  923         return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
  924 }
  925 
  926 /*
  927  * ==========================================================================
  928  * SPA spare and l2cache tracking
  929  * ==========================================================================
  930  */
  931 
  932 /*
  933  * Hot spares and cache devices are tracked using the same code below,
  934  * for 'auxiliary' devices.
  935  */
  936 
  937 typedef struct spa_aux {
  938         uint64_t        aux_guid;
  939         uint64_t        aux_pool;
  940         avl_node_t      aux_avl;
  941         int             aux_count;
  942 } spa_aux_t;
  943 
  944 static inline int
  945 spa_aux_compare(const void *a, const void *b)
  946 {
  947         const spa_aux_t *sa = (const spa_aux_t *)a;
  948         const spa_aux_t *sb = (const spa_aux_t *)b;
  949 
  950         return (TREE_CMP(sa->aux_guid, sb->aux_guid));
  951 }
  952 
  953 static void
  954 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
  955 {
  956         avl_index_t where;
  957         spa_aux_t search;
  958         spa_aux_t *aux;
  959 
  960         search.aux_guid = vd->vdev_guid;
  961         if ((aux = avl_find(avl, &search, &where)) != NULL) {
  962                 aux->aux_count++;
  963         } else {
  964                 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
  965                 aux->aux_guid = vd->vdev_guid;
  966                 aux->aux_count = 1;
  967                 avl_insert(avl, aux, where);
  968         }
  969 }
  970 
  971 static void
  972 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
  973 {
  974         spa_aux_t search;
  975         spa_aux_t *aux;
  976         avl_index_t where;
  977 
  978         search.aux_guid = vd->vdev_guid;
  979         aux = avl_find(avl, &search, &where);
  980 
  981         ASSERT(aux != NULL);
  982 
  983         if (--aux->aux_count == 0) {
  984                 avl_remove(avl, aux);
  985                 kmem_free(aux, sizeof (spa_aux_t));
  986         } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
  987                 aux->aux_pool = 0ULL;
  988         }
  989 }
  990 
  991 static boolean_t
  992 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
  993 {
  994         spa_aux_t search, *found;
  995 
  996         search.aux_guid = guid;
  997         found = avl_find(avl, &search, NULL);
  998 
  999         if (pool) {
 1000                 if (found)
 1001                         *pool = found->aux_pool;
 1002                 else
 1003                         *pool = 0ULL;
 1004         }
 1005 
 1006         if (refcnt) {
 1007                 if (found)
 1008                         *refcnt = found->aux_count;
 1009                 else
 1010                         *refcnt = 0;
 1011         }
 1012 
 1013         return (found != NULL);
 1014 }
 1015 
 1016 static void
 1017 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
 1018 {
 1019         spa_aux_t search, *found;
 1020         avl_index_t where;
 1021 
 1022         search.aux_guid = vd->vdev_guid;
 1023         found = avl_find(avl, &search, &where);
 1024         ASSERT(found != NULL);
 1025         ASSERT(found->aux_pool == 0ULL);
 1026 
 1027         found->aux_pool = spa_guid(vd->vdev_spa);
 1028 }
 1029 
 1030 /*
 1031  * Spares are tracked globally due to the following constraints:
 1032  *
 1033  *      - A spare may be part of multiple pools.
 1034  *      - A spare may be added to a pool even if it's actively in use within
 1035  *        another pool.
 1036  *      - A spare in use in any pool can only be the source of a replacement if
 1037  *        the target is a spare in the same pool.
 1038  *
 1039  * We keep track of all spares on the system through the use of a reference
 1040  * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
 1041  * spare, then we bump the reference count in the AVL tree.  In addition, we set
 1042  * the 'vdev_isspare' member to indicate that the device is a spare (active or
 1043  * inactive).  When a spare is made active (used to replace a device in the
 1044  * pool), we also keep track of which pool its been made a part of.
 1045  *
 1046  * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
 1047  * called under the spa_namespace lock as part of vdev reconfiguration.  The
 1048  * separate spare lock exists for the status query path, which does not need to
 1049  * be completely consistent with respect to other vdev configuration changes.
 1050  */
 1051 
 1052 static int
 1053 spa_spare_compare(const void *a, const void *b)
 1054 {
 1055         return (spa_aux_compare(a, b));
 1056 }
 1057 
 1058 void
 1059 spa_spare_add(vdev_t *vd)
 1060 {
 1061         mutex_enter(&spa_spare_lock);
 1062         ASSERT(!vd->vdev_isspare);
 1063         spa_aux_add(vd, &spa_spare_avl);
 1064         vd->vdev_isspare = B_TRUE;
 1065         mutex_exit(&spa_spare_lock);
 1066 }
 1067 
 1068 void
 1069 spa_spare_remove(vdev_t *vd)
 1070 {
 1071         mutex_enter(&spa_spare_lock);
 1072         ASSERT(vd->vdev_isspare);
 1073         spa_aux_remove(vd, &spa_spare_avl);
 1074         vd->vdev_isspare = B_FALSE;
 1075         mutex_exit(&spa_spare_lock);
 1076 }
 1077 
 1078 boolean_t
 1079 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
 1080 {
 1081         boolean_t found;
 1082 
 1083         mutex_enter(&spa_spare_lock);
 1084         found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
 1085         mutex_exit(&spa_spare_lock);
 1086 
 1087         return (found);
 1088 }
 1089 
 1090 void
 1091 spa_spare_activate(vdev_t *vd)
 1092 {
 1093         mutex_enter(&spa_spare_lock);
 1094         ASSERT(vd->vdev_isspare);
 1095         spa_aux_activate(vd, &spa_spare_avl);
 1096         mutex_exit(&spa_spare_lock);
 1097 }
 1098 
 1099 /*
 1100  * Level 2 ARC devices are tracked globally for the same reasons as spares.
 1101  * Cache devices currently only support one pool per cache device, and so
 1102  * for these devices the aux reference count is currently unused beyond 1.
 1103  */
 1104 
 1105 static int
 1106 spa_l2cache_compare(const void *a, const void *b)
 1107 {
 1108         return (spa_aux_compare(a, b));
 1109 }
 1110 
 1111 void
 1112 spa_l2cache_add(vdev_t *vd)
 1113 {
 1114         mutex_enter(&spa_l2cache_lock);
 1115         ASSERT(!vd->vdev_isl2cache);
 1116         spa_aux_add(vd, &spa_l2cache_avl);
 1117         vd->vdev_isl2cache = B_TRUE;
 1118         mutex_exit(&spa_l2cache_lock);
 1119 }
 1120 
 1121 void
 1122 spa_l2cache_remove(vdev_t *vd)
 1123 {
 1124         mutex_enter(&spa_l2cache_lock);
 1125         ASSERT(vd->vdev_isl2cache);
 1126         spa_aux_remove(vd, &spa_l2cache_avl);
 1127         vd->vdev_isl2cache = B_FALSE;
 1128         mutex_exit(&spa_l2cache_lock);
 1129 }
 1130 
 1131 boolean_t
 1132 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
 1133 {
 1134         boolean_t found;
 1135 
 1136         mutex_enter(&spa_l2cache_lock);
 1137         found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
 1138         mutex_exit(&spa_l2cache_lock);
 1139 
 1140         return (found);
 1141 }
 1142 
 1143 void
 1144 spa_l2cache_activate(vdev_t *vd)
 1145 {
 1146         mutex_enter(&spa_l2cache_lock);
 1147         ASSERT(vd->vdev_isl2cache);
 1148         spa_aux_activate(vd, &spa_l2cache_avl);
 1149         mutex_exit(&spa_l2cache_lock);
 1150 }
 1151 
 1152 /*
 1153  * ==========================================================================
 1154  * SPA vdev locking
 1155  * ==========================================================================
 1156  */
 1157 
 1158 /*
 1159  * Lock the given spa_t for the purpose of adding or removing a vdev.
 1160  * Grabs the global spa_namespace_lock plus the spa config lock for writing.
 1161  * It returns the next transaction group for the spa_t.
 1162  */
 1163 uint64_t
 1164 spa_vdev_enter(spa_t *spa)
 1165 {
 1166         mutex_enter(&spa->spa_vdev_top_lock);
 1167         mutex_enter(&spa_namespace_lock);
 1168 
 1169         vdev_autotrim_stop_all(spa);
 1170 
 1171         return (spa_vdev_config_enter(spa));
 1172 }
 1173 
 1174 /*
 1175  * The same as spa_vdev_enter() above but additionally takes the guid of
 1176  * the vdev being detached.  When there is a rebuild in process it will be
 1177  * suspended while the vdev tree is modified then resumed by spa_vdev_exit().
 1178  * The rebuild is canceled if only a single child remains after the detach.
 1179  */
 1180 uint64_t
 1181 spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
 1182 {
 1183         mutex_enter(&spa->spa_vdev_top_lock);
 1184         mutex_enter(&spa_namespace_lock);
 1185 
 1186         vdev_autotrim_stop_all(spa);
 1187 
 1188         if (guid != 0) {
 1189                 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
 1190                 if (vd) {
 1191                         vdev_rebuild_stop_wait(vd->vdev_top);
 1192                 }
 1193         }
 1194 
 1195         return (spa_vdev_config_enter(spa));
 1196 }
 1197 
 1198 /*
 1199  * Internal implementation for spa_vdev_enter().  Used when a vdev
 1200  * operation requires multiple syncs (i.e. removing a device) while
 1201  * keeping the spa_namespace_lock held.
 1202  */
 1203 uint64_t
 1204 spa_vdev_config_enter(spa_t *spa)
 1205 {
 1206         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 1207 
 1208         spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
 1209 
 1210         return (spa_last_synced_txg(spa) + 1);
 1211 }
 1212 
 1213 /*
 1214  * Used in combination with spa_vdev_config_enter() to allow the syncing
 1215  * of multiple transactions without releasing the spa_namespace_lock.
 1216  */
 1217 void
 1218 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
 1219     const char *tag)
 1220 {
 1221         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 1222 
 1223         int config_changed = B_FALSE;
 1224 
 1225         ASSERT(txg > spa_last_synced_txg(spa));
 1226 
 1227         spa->spa_pending_vdev = NULL;
 1228 
 1229         /*
 1230          * Reassess the DTLs.
 1231          */
 1232         vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
 1233 
 1234         if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
 1235                 config_changed = B_TRUE;
 1236                 spa->spa_config_generation++;
 1237         }
 1238 
 1239         /*
 1240          * Verify the metaslab classes.
 1241          */
 1242         ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
 1243         ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
 1244         ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
 1245         ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
 1246         ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
 1247 
 1248         spa_config_exit(spa, SCL_ALL, spa);
 1249 
 1250         /*
 1251          * Panic the system if the specified tag requires it.  This
 1252          * is useful for ensuring that configurations are updated
 1253          * transactionally.
 1254          */
 1255         if (zio_injection_enabled)
 1256                 zio_handle_panic_injection(spa, tag, 0);
 1257 
 1258         /*
 1259          * Note: this txg_wait_synced() is important because it ensures
 1260          * that there won't be more than one config change per txg.
 1261          * This allows us to use the txg as the generation number.
 1262          */
 1263         if (error == 0)
 1264                 txg_wait_synced(spa->spa_dsl_pool, txg);
 1265 
 1266         if (vd != NULL) {
 1267                 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
 1268                 if (vd->vdev_ops->vdev_op_leaf) {
 1269                         mutex_enter(&vd->vdev_initialize_lock);
 1270                         vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
 1271                             NULL);
 1272                         mutex_exit(&vd->vdev_initialize_lock);
 1273 
 1274                         mutex_enter(&vd->vdev_trim_lock);
 1275                         vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
 1276                         mutex_exit(&vd->vdev_trim_lock);
 1277                 }
 1278 
 1279                 /*
 1280                  * The vdev may be both a leaf and top-level device.
 1281                  */
 1282                 vdev_autotrim_stop_wait(vd);
 1283 
 1284                 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
 1285                 vdev_free(vd);
 1286                 spa_config_exit(spa, SCL_STATE_ALL, spa);
 1287         }
 1288 
 1289         /*
 1290          * If the config changed, update the config cache.
 1291          */
 1292         if (config_changed)
 1293                 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
 1294 }
 1295 
 1296 /*
 1297  * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
 1298  * locking of spa_vdev_enter(), we also want make sure the transactions have
 1299  * synced to disk, and then update the global configuration cache with the new
 1300  * information.
 1301  */
 1302 int
 1303 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
 1304 {
 1305         vdev_autotrim_restart(spa);
 1306         vdev_rebuild_restart(spa);
 1307 
 1308         spa_vdev_config_exit(spa, vd, txg, error, FTAG);
 1309         mutex_exit(&spa_namespace_lock);
 1310         mutex_exit(&spa->spa_vdev_top_lock);
 1311 
 1312         return (error);
 1313 }
 1314 
 1315 /*
 1316  * Lock the given spa_t for the purpose of changing vdev state.
 1317  */
 1318 void
 1319 spa_vdev_state_enter(spa_t *spa, int oplocks)
 1320 {
 1321         int locks = SCL_STATE_ALL | oplocks;
 1322 
 1323         /*
 1324          * Root pools may need to read of the underlying devfs filesystem
 1325          * when opening up a vdev.  Unfortunately if we're holding the
 1326          * SCL_ZIO lock it will result in a deadlock when we try to issue
 1327          * the read from the root filesystem.  Instead we "prefetch"
 1328          * the associated vnodes that we need prior to opening the
 1329          * underlying devices and cache them so that we can prevent
 1330          * any I/O when we are doing the actual open.
 1331          */
 1332         if (spa_is_root(spa)) {
 1333                 int low = locks & ~(SCL_ZIO - 1);
 1334                 int high = locks & ~low;
 1335 
 1336                 spa_config_enter(spa, high, spa, RW_WRITER);
 1337                 vdev_hold(spa->spa_root_vdev);
 1338                 spa_config_enter(spa, low, spa, RW_WRITER);
 1339         } else {
 1340                 spa_config_enter(spa, locks, spa, RW_WRITER);
 1341         }
 1342         spa->spa_vdev_locks = locks;
 1343 }
 1344 
 1345 int
 1346 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
 1347 {
 1348         boolean_t config_changed = B_FALSE;
 1349         vdev_t *vdev_top;
 1350 
 1351         if (vd == NULL || vd == spa->spa_root_vdev) {
 1352                 vdev_top = spa->spa_root_vdev;
 1353         } else {
 1354                 vdev_top = vd->vdev_top;
 1355         }
 1356 
 1357         if (vd != NULL || error == 0)
 1358                 vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
 1359 
 1360         if (vd != NULL) {
 1361                 if (vd != spa->spa_root_vdev)
 1362                         vdev_state_dirty(vdev_top);
 1363 
 1364                 config_changed = B_TRUE;
 1365                 spa->spa_config_generation++;
 1366         }
 1367 
 1368         if (spa_is_root(spa))
 1369                 vdev_rele(spa->spa_root_vdev);
 1370 
 1371         ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
 1372         spa_config_exit(spa, spa->spa_vdev_locks, spa);
 1373 
 1374         /*
 1375          * If anything changed, wait for it to sync.  This ensures that,
 1376          * from the system administrator's perspective, zpool(8) commands
 1377          * are synchronous.  This is important for things like zpool offline:
 1378          * when the command completes, you expect no further I/O from ZFS.
 1379          */
 1380         if (vd != NULL)
 1381                 txg_wait_synced(spa->spa_dsl_pool, 0);
 1382 
 1383         /*
 1384          * If the config changed, update the config cache.
 1385          */
 1386         if (config_changed) {
 1387                 mutex_enter(&spa_namespace_lock);
 1388                 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
 1389                 mutex_exit(&spa_namespace_lock);
 1390         }
 1391 
 1392         return (error);
 1393 }
 1394 
 1395 /*
 1396  * ==========================================================================
 1397  * Miscellaneous functions
 1398  * ==========================================================================
 1399  */
 1400 
 1401 void
 1402 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
 1403 {
 1404         if (!nvlist_exists(spa->spa_label_features, feature)) {
 1405                 fnvlist_add_boolean(spa->spa_label_features, feature);
 1406                 /*
 1407                  * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
 1408                  * dirty the vdev config because lock SCL_CONFIG is not held.
 1409                  * Thankfully, in this case we don't need to dirty the config
 1410                  * because it will be written out anyway when we finish
 1411                  * creating the pool.
 1412                  */
 1413                 if (tx->tx_txg != TXG_INITIAL)
 1414                         vdev_config_dirty(spa->spa_root_vdev);
 1415         }
 1416 }
 1417 
 1418 void
 1419 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
 1420 {
 1421         if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
 1422                 vdev_config_dirty(spa->spa_root_vdev);
 1423 }
 1424 
 1425 /*
 1426  * Return the spa_t associated with given pool_guid, if it exists.  If
 1427  * device_guid is non-zero, determine whether the pool exists *and* contains
 1428  * a device with the specified device_guid.
 1429  */
 1430 spa_t *
 1431 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
 1432 {
 1433         spa_t *spa;
 1434         avl_tree_t *t = &spa_namespace_avl;
 1435 
 1436         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 1437 
 1438         for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
 1439                 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
 1440                         continue;
 1441                 if (spa->spa_root_vdev == NULL)
 1442                         continue;
 1443                 if (spa_guid(spa) == pool_guid) {
 1444                         if (device_guid == 0)
 1445                                 break;
 1446 
 1447                         if (vdev_lookup_by_guid(spa->spa_root_vdev,
 1448                             device_guid) != NULL)
 1449                                 break;
 1450 
 1451                         /*
 1452                          * Check any devices we may be in the process of adding.
 1453                          */
 1454                         if (spa->spa_pending_vdev) {
 1455                                 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
 1456                                     device_guid) != NULL)
 1457                                         break;
 1458                         }
 1459                 }
 1460         }
 1461 
 1462         return (spa);
 1463 }
 1464 
 1465 /*
 1466  * Determine whether a pool with the given pool_guid exists.
 1467  */
 1468 boolean_t
 1469 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
 1470 {
 1471         return (spa_by_guid(pool_guid, device_guid) != NULL);
 1472 }
 1473 
 1474 char *
 1475 spa_strdup(const char *s)
 1476 {
 1477         size_t len;
 1478         char *new;
 1479 
 1480         len = strlen(s);
 1481         new = kmem_alloc(len + 1, KM_SLEEP);
 1482         memcpy(new, s, len + 1);
 1483 
 1484         return (new);
 1485 }
 1486 
 1487 void
 1488 spa_strfree(char *s)
 1489 {
 1490         kmem_free(s, strlen(s) + 1);
 1491 }
 1492 
 1493 uint64_t
 1494 spa_generate_guid(spa_t *spa)
 1495 {
 1496         uint64_t guid;
 1497 
 1498         if (spa != NULL) {
 1499                 do {
 1500                         (void) random_get_pseudo_bytes((void *)&guid,
 1501                             sizeof (guid));
 1502                 } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
 1503         } else {
 1504                 do {
 1505                         (void) random_get_pseudo_bytes((void *)&guid,
 1506                             sizeof (guid));
 1507                 } while (guid == 0 || spa_guid_exists(guid, 0));
 1508         }
 1509 
 1510         return (guid);
 1511 }
 1512 
 1513 void
 1514 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
 1515 {
 1516         char type[256];
 1517         const char *checksum = NULL;
 1518         const char *compress = NULL;
 1519 
 1520         if (bp != NULL) {
 1521                 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
 1522                         dmu_object_byteswap_t bswap =
 1523                             DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
 1524                         (void) snprintf(type, sizeof (type), "bswap %s %s",
 1525                             DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
 1526                             "metadata" : "data",
 1527                             dmu_ot_byteswap[bswap].ob_name);
 1528                 } else {
 1529                         (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
 1530                             sizeof (type));
 1531                 }
 1532                 if (!BP_IS_EMBEDDED(bp)) {
 1533                         checksum =
 1534                             zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
 1535                 }
 1536                 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
 1537         }
 1538 
 1539         SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum,
 1540             compress);
 1541 }
 1542 
 1543 void
 1544 spa_freeze(spa_t *spa)
 1545 {
 1546         uint64_t freeze_txg = 0;
 1547 
 1548         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
 1549         if (spa->spa_freeze_txg == UINT64_MAX) {
 1550                 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
 1551                 spa->spa_freeze_txg = freeze_txg;
 1552         }
 1553         spa_config_exit(spa, SCL_ALL, FTAG);
 1554         if (freeze_txg != 0)
 1555                 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
 1556 }
 1557 
 1558 void
 1559 zfs_panic_recover(const char *fmt, ...)
 1560 {
 1561         va_list adx;
 1562 
 1563         va_start(adx, fmt);
 1564         vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
 1565         va_end(adx);
 1566 }
 1567 
 1568 /*
 1569  * This is a stripped-down version of strtoull, suitable only for converting
 1570  * lowercase hexadecimal numbers that don't overflow.
 1571  */
 1572 uint64_t
 1573 zfs_strtonum(const char *str, char **nptr)
 1574 {
 1575         uint64_t val = 0;
 1576         char c;
 1577         int digit;
 1578 
 1579         while ((c = *str) != '\0') {
 1580                 if (c >= '' && c <= '9')
 1581                         digit = c - '';
 1582                 else if (c >= 'a' && c <= 'f')
 1583                         digit = 10 + c - 'a';
 1584                 else
 1585                         break;
 1586 
 1587                 val *= 16;
 1588                 val += digit;
 1589 
 1590                 str++;
 1591         }
 1592 
 1593         if (nptr)
 1594                 *nptr = (char *)str;
 1595 
 1596         return (val);
 1597 }
 1598 
 1599 void
 1600 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
 1601 {
 1602         /*
 1603          * We bump the feature refcount for each special vdev added to the pool
 1604          */
 1605         ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
 1606         spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
 1607 }
 1608 
 1609 /*
 1610  * ==========================================================================
 1611  * Accessor functions
 1612  * ==========================================================================
 1613  */
 1614 
 1615 boolean_t
 1616 spa_shutting_down(spa_t *spa)
 1617 {
 1618         return (spa->spa_async_suspended);
 1619 }
 1620 
 1621 dsl_pool_t *
 1622 spa_get_dsl(spa_t *spa)
 1623 {
 1624         return (spa->spa_dsl_pool);
 1625 }
 1626 
 1627 boolean_t
 1628 spa_is_initializing(spa_t *spa)
 1629 {
 1630         return (spa->spa_is_initializing);
 1631 }
 1632 
 1633 boolean_t
 1634 spa_indirect_vdevs_loaded(spa_t *spa)
 1635 {
 1636         return (spa->spa_indirect_vdevs_loaded);
 1637 }
 1638 
 1639 blkptr_t *
 1640 spa_get_rootblkptr(spa_t *spa)
 1641 {
 1642         return (&spa->spa_ubsync.ub_rootbp);
 1643 }
 1644 
 1645 void
 1646 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
 1647 {
 1648         spa->spa_uberblock.ub_rootbp = *bp;
 1649 }
 1650 
 1651 void
 1652 spa_altroot(spa_t *spa, char *buf, size_t buflen)
 1653 {
 1654         if (spa->spa_root == NULL)
 1655                 buf[0] = '\0';
 1656         else
 1657                 (void) strlcpy(buf, spa->spa_root, buflen);
 1658 }
 1659 
 1660 uint32_t
 1661 spa_sync_pass(spa_t *spa)
 1662 {
 1663         return (spa->spa_sync_pass);
 1664 }
 1665 
 1666 char *
 1667 spa_name(spa_t *spa)
 1668 {
 1669         return (spa->spa_name);
 1670 }
 1671 
 1672 uint64_t
 1673 spa_guid(spa_t *spa)
 1674 {
 1675         dsl_pool_t *dp = spa_get_dsl(spa);
 1676         uint64_t guid;
 1677 
 1678         /*
 1679          * If we fail to parse the config during spa_load(), we can go through
 1680          * the error path (which posts an ereport) and end up here with no root
 1681          * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
 1682          * this case.
 1683          */
 1684         if (spa->spa_root_vdev == NULL)
 1685                 return (spa->spa_config_guid);
 1686 
 1687         guid = spa->spa_last_synced_guid != 0 ?
 1688             spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
 1689 
 1690         /*
 1691          * Return the most recently synced out guid unless we're
 1692          * in syncing context.
 1693          */
 1694         if (dp && dsl_pool_sync_context(dp))
 1695                 return (spa->spa_root_vdev->vdev_guid);
 1696         else
 1697                 return (guid);
 1698 }
 1699 
 1700 uint64_t
 1701 spa_load_guid(spa_t *spa)
 1702 {
 1703         /*
 1704          * This is a GUID that exists solely as a reference for the
 1705          * purposes of the arc.  It is generated at load time, and
 1706          * is never written to persistent storage.
 1707          */
 1708         return (spa->spa_load_guid);
 1709 }
 1710 
 1711 uint64_t
 1712 spa_last_synced_txg(spa_t *spa)
 1713 {
 1714         return (spa->spa_ubsync.ub_txg);
 1715 }
 1716 
 1717 uint64_t
 1718 spa_first_txg(spa_t *spa)
 1719 {
 1720         return (spa->spa_first_txg);
 1721 }
 1722 
 1723 uint64_t
 1724 spa_syncing_txg(spa_t *spa)
 1725 {
 1726         return (spa->spa_syncing_txg);
 1727 }
 1728 
 1729 /*
 1730  * Return the last txg where data can be dirtied. The final txgs
 1731  * will be used to just clear out any deferred frees that remain.
 1732  */
 1733 uint64_t
 1734 spa_final_dirty_txg(spa_t *spa)
 1735 {
 1736         return (spa->spa_final_txg - TXG_DEFER_SIZE);
 1737 }
 1738 
 1739 pool_state_t
 1740 spa_state(spa_t *spa)
 1741 {
 1742         return (spa->spa_state);
 1743 }
 1744 
 1745 spa_load_state_t
 1746 spa_load_state(spa_t *spa)
 1747 {
 1748         return (spa->spa_load_state);
 1749 }
 1750 
 1751 uint64_t
 1752 spa_freeze_txg(spa_t *spa)
 1753 {
 1754         return (spa->spa_freeze_txg);
 1755 }
 1756 
 1757 /*
 1758  * Return the inflated asize for a logical write in bytes. This is used by the
 1759  * DMU to calculate the space a logical write will require on disk.
 1760  * If lsize is smaller than the largest physical block size allocatable on this
 1761  * pool we use its value instead, since the write will end up using the whole
 1762  * block anyway.
 1763  */
 1764 uint64_t
 1765 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
 1766 {
 1767         if (lsize == 0)
 1768                 return (0);     /* No inflation needed */
 1769         return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
 1770 }
 1771 
 1772 /*
 1773  * Return the amount of slop space in bytes.  It is typically 1/32 of the pool
 1774  * (3.2%), minus the embedded log space.  On very small pools, it may be
 1775  * slightly larger than this.  On very large pools, it will be capped to
 1776  * the value of spa_max_slop.  The embedded log space is not included in
 1777  * spa_dspace.  By subtracting it, the usable space (per "zfs list") is a
 1778  * constant 97% of the total space, regardless of metaslab size (assuming the
 1779  * default spa_slop_shift=5 and a non-tiny pool).
 1780  *
 1781  * See the comment above spa_slop_shift for more details.
 1782  */
 1783 uint64_t
 1784 spa_get_slop_space(spa_t *spa)
 1785 {
 1786         uint64_t space = 0;
 1787         uint64_t slop = 0;
 1788 
 1789         /*
 1790          * Make sure spa_dedup_dspace has been set.
 1791          */
 1792         if (spa->spa_dedup_dspace == ~0ULL)
 1793                 spa_update_dspace(spa);
 1794 
 1795         /*
 1796          * spa_get_dspace() includes the space only logically "used" by
 1797          * deduplicated data, so since it's not useful to reserve more
 1798          * space with more deduplicated data, we subtract that out here.
 1799          */
 1800         space = spa_get_dspace(spa) - spa->spa_dedup_dspace;
 1801         slop = MIN(space >> spa_slop_shift, spa_max_slop);
 1802 
 1803         /*
 1804          * Subtract the embedded log space, but no more than half the (3.2%)
 1805          * unusable space.  Note, the "no more than half" is only relevant if
 1806          * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
 1807          * default.
 1808          */
 1809         uint64_t embedded_log =
 1810             metaslab_class_get_dspace(spa_embedded_log_class(spa));
 1811         slop -= MIN(embedded_log, slop >> 1);
 1812 
 1813         /*
 1814          * Slop space should be at least spa_min_slop, but no more than half
 1815          * the entire pool.
 1816          */
 1817         slop = MAX(slop, MIN(space >> 1, spa_min_slop));
 1818         return (slop);
 1819 }
 1820 
 1821 uint64_t
 1822 spa_get_dspace(spa_t *spa)
 1823 {
 1824         return (spa->spa_dspace);
 1825 }
 1826 
 1827 uint64_t
 1828 spa_get_checkpoint_space(spa_t *spa)
 1829 {
 1830         return (spa->spa_checkpoint_info.sci_dspace);
 1831 }
 1832 
 1833 void
 1834 spa_update_dspace(spa_t *spa)
 1835 {
 1836         spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
 1837             ddt_get_dedup_dspace(spa);
 1838         if (spa->spa_nonallocating_dspace > 0) {
 1839                 /*
 1840                  * Subtract the space provided by all non-allocating vdevs that
 1841                  * contribute to dspace.  If a file is overwritten, its old
 1842                  * blocks are freed and new blocks are allocated.  If there are
 1843                  * no snapshots of the file, the available space should remain
 1844                  * the same.  The old blocks could be freed from the
 1845                  * non-allocating vdev, but the new blocks must be allocated on
 1846                  * other (allocating) vdevs.  By reserving the entire size of
 1847                  * the non-allocating vdevs (including allocated space), we
 1848                  * ensure that there will be enough space on the allocating
 1849                  * vdevs for this file overwrite to succeed.
 1850                  *
 1851                  * Note that the DMU/DSL doesn't actually know or care
 1852                  * how much space is allocated (it does its own tracking
 1853                  * of how much space has been logically used).  So it
 1854                  * doesn't matter that the data we are moving may be
 1855                  * allocated twice (on the old device and the new device).
 1856                  */
 1857                 ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace);
 1858                 spa->spa_dspace -= spa->spa_nonallocating_dspace;
 1859         }
 1860 }
 1861 
 1862 /*
 1863  * Return the failure mode that has been set to this pool. The default
 1864  * behavior will be to block all I/Os when a complete failure occurs.
 1865  */
 1866 uint64_t
 1867 spa_get_failmode(spa_t *spa)
 1868 {
 1869         return (spa->spa_failmode);
 1870 }
 1871 
 1872 boolean_t
 1873 spa_suspended(spa_t *spa)
 1874 {
 1875         return (spa->spa_suspended != ZIO_SUSPEND_NONE);
 1876 }
 1877 
 1878 uint64_t
 1879 spa_version(spa_t *spa)
 1880 {
 1881         return (spa->spa_ubsync.ub_version);
 1882 }
 1883 
 1884 boolean_t
 1885 spa_deflate(spa_t *spa)
 1886 {
 1887         return (spa->spa_deflate);
 1888 }
 1889 
 1890 metaslab_class_t *
 1891 spa_normal_class(spa_t *spa)
 1892 {
 1893         return (spa->spa_normal_class);
 1894 }
 1895 
 1896 metaslab_class_t *
 1897 spa_log_class(spa_t *spa)
 1898 {
 1899         return (spa->spa_log_class);
 1900 }
 1901 
 1902 metaslab_class_t *
 1903 spa_embedded_log_class(spa_t *spa)
 1904 {
 1905         return (spa->spa_embedded_log_class);
 1906 }
 1907 
 1908 metaslab_class_t *
 1909 spa_special_class(spa_t *spa)
 1910 {
 1911         return (spa->spa_special_class);
 1912 }
 1913 
 1914 metaslab_class_t *
 1915 spa_dedup_class(spa_t *spa)
 1916 {
 1917         return (spa->spa_dedup_class);
 1918 }
 1919 
 1920 /*
 1921  * Locate an appropriate allocation class
 1922  */
 1923 metaslab_class_t *
 1924 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
 1925     uint_t level, uint_t special_smallblk)
 1926 {
 1927         /*
 1928          * ZIL allocations determine their class in zio_alloc_zil().
 1929          */
 1930         ASSERT(objtype != DMU_OT_INTENT_LOG);
 1931 
 1932         boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
 1933 
 1934         if (DMU_OT_IS_DDT(objtype)) {
 1935                 if (spa->spa_dedup_class->mc_groups != 0)
 1936                         return (spa_dedup_class(spa));
 1937                 else if (has_special_class && zfs_ddt_data_is_special)
 1938                         return (spa_special_class(spa));
 1939                 else
 1940                         return (spa_normal_class(spa));
 1941         }
 1942 
 1943         /* Indirect blocks for user data can land in special if allowed */
 1944         if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
 1945                 if (has_special_class && zfs_user_indirect_is_special)
 1946                         return (spa_special_class(spa));
 1947                 else
 1948                         return (spa_normal_class(spa));
 1949         }
 1950 
 1951         if (DMU_OT_IS_METADATA(objtype) || level > 0) {
 1952                 if (has_special_class)
 1953                         return (spa_special_class(spa));
 1954                 else
 1955                         return (spa_normal_class(spa));
 1956         }
 1957 
 1958         /*
 1959          * Allow small file blocks in special class in some cases (like
 1960          * for the dRAID vdev feature). But always leave a reserve of
 1961          * zfs_special_class_metadata_reserve_pct exclusively for metadata.
 1962          */
 1963         if (DMU_OT_IS_FILE(objtype) &&
 1964             has_special_class && size <= special_smallblk) {
 1965                 metaslab_class_t *special = spa_special_class(spa);
 1966                 uint64_t alloc = metaslab_class_get_alloc(special);
 1967                 uint64_t space = metaslab_class_get_space(special);
 1968                 uint64_t limit =
 1969                     (space * (100 - zfs_special_class_metadata_reserve_pct))
 1970                     / 100;
 1971 
 1972                 if (alloc < limit)
 1973                         return (special);
 1974         }
 1975 
 1976         return (spa_normal_class(spa));
 1977 }
 1978 
 1979 void
 1980 spa_evicting_os_register(spa_t *spa, objset_t *os)
 1981 {
 1982         mutex_enter(&spa->spa_evicting_os_lock);
 1983         list_insert_head(&spa->spa_evicting_os_list, os);
 1984         mutex_exit(&spa->spa_evicting_os_lock);
 1985 }
 1986 
 1987 void
 1988 spa_evicting_os_deregister(spa_t *spa, objset_t *os)
 1989 {
 1990         mutex_enter(&spa->spa_evicting_os_lock);
 1991         list_remove(&spa->spa_evicting_os_list, os);
 1992         cv_broadcast(&spa->spa_evicting_os_cv);
 1993         mutex_exit(&spa->spa_evicting_os_lock);
 1994 }
 1995 
 1996 void
 1997 spa_evicting_os_wait(spa_t *spa)
 1998 {
 1999         mutex_enter(&spa->spa_evicting_os_lock);
 2000         while (!list_is_empty(&spa->spa_evicting_os_list))
 2001                 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
 2002         mutex_exit(&spa->spa_evicting_os_lock);
 2003 
 2004         dmu_buf_user_evict_wait();
 2005 }
 2006 
 2007 int
 2008 spa_max_replication(spa_t *spa)
 2009 {
 2010         /*
 2011          * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
 2012          * handle BPs with more than one DVA allocated.  Set our max
 2013          * replication level accordingly.
 2014          */
 2015         if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
 2016                 return (1);
 2017         return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
 2018 }
 2019 
 2020 int
 2021 spa_prev_software_version(spa_t *spa)
 2022 {
 2023         return (spa->spa_prev_software_version);
 2024 }
 2025 
 2026 uint64_t
 2027 spa_deadman_synctime(spa_t *spa)
 2028 {
 2029         return (spa->spa_deadman_synctime);
 2030 }
 2031 
 2032 spa_autotrim_t
 2033 spa_get_autotrim(spa_t *spa)
 2034 {
 2035         return (spa->spa_autotrim);
 2036 }
 2037 
 2038 uint64_t
 2039 spa_deadman_ziotime(spa_t *spa)
 2040 {
 2041         return (spa->spa_deadman_ziotime);
 2042 }
 2043 
 2044 uint64_t
 2045 spa_get_deadman_failmode(spa_t *spa)
 2046 {
 2047         return (spa->spa_deadman_failmode);
 2048 }
 2049 
 2050 void
 2051 spa_set_deadman_failmode(spa_t *spa, const char *failmode)
 2052 {
 2053         if (strcmp(failmode, "wait") == 0)
 2054                 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
 2055         else if (strcmp(failmode, "continue") == 0)
 2056                 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
 2057         else if (strcmp(failmode, "panic") == 0)
 2058                 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
 2059         else
 2060                 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
 2061 }
 2062 
 2063 void
 2064 spa_set_deadman_ziotime(hrtime_t ns)
 2065 {
 2066         spa_t *spa = NULL;
 2067 
 2068         if (spa_mode_global != SPA_MODE_UNINIT) {
 2069                 mutex_enter(&spa_namespace_lock);
 2070                 while ((spa = spa_next(spa)) != NULL)
 2071                         spa->spa_deadman_ziotime = ns;
 2072                 mutex_exit(&spa_namespace_lock);
 2073         }
 2074 }
 2075 
 2076 void
 2077 spa_set_deadman_synctime(hrtime_t ns)
 2078 {
 2079         spa_t *spa = NULL;
 2080 
 2081         if (spa_mode_global != SPA_MODE_UNINIT) {
 2082                 mutex_enter(&spa_namespace_lock);
 2083                 while ((spa = spa_next(spa)) != NULL)
 2084                         spa->spa_deadman_synctime = ns;
 2085                 mutex_exit(&spa_namespace_lock);
 2086         }
 2087 }
 2088 
 2089 uint64_t
 2090 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
 2091 {
 2092         uint64_t asize = DVA_GET_ASIZE(dva);
 2093         uint64_t dsize = asize;
 2094 
 2095         ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
 2096 
 2097         if (asize != 0 && spa->spa_deflate) {
 2098                 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
 2099                 if (vd != NULL)
 2100                         dsize = (asize >> SPA_MINBLOCKSHIFT) *
 2101                             vd->vdev_deflate_ratio;
 2102         }
 2103 
 2104         return (dsize);
 2105 }
 2106 
 2107 uint64_t
 2108 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
 2109 {
 2110         uint64_t dsize = 0;
 2111 
 2112         for (int d = 0; d < BP_GET_NDVAS(bp); d++)
 2113                 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
 2114 
 2115         return (dsize);
 2116 }
 2117 
 2118 uint64_t
 2119 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
 2120 {
 2121         uint64_t dsize = 0;
 2122 
 2123         spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
 2124 
 2125         for (int d = 0; d < BP_GET_NDVAS(bp); d++)
 2126                 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
 2127 
 2128         spa_config_exit(spa, SCL_VDEV, FTAG);
 2129 
 2130         return (dsize);
 2131 }
 2132 
 2133 uint64_t
 2134 spa_dirty_data(spa_t *spa)
 2135 {
 2136         return (spa->spa_dsl_pool->dp_dirty_total);
 2137 }
 2138 
 2139 /*
 2140  * ==========================================================================
 2141  * SPA Import Progress Routines
 2142  * ==========================================================================
 2143  */
 2144 
 2145 typedef struct spa_import_progress {
 2146         uint64_t                pool_guid;      /* unique id for updates */
 2147         char                    *pool_name;
 2148         spa_load_state_t        spa_load_state;
 2149         uint64_t                mmp_sec_remaining;      /* MMP activity check */
 2150         uint64_t                spa_load_max_txg;       /* rewind txg */
 2151         procfs_list_node_t      smh_node;
 2152 } spa_import_progress_t;
 2153 
 2154 spa_history_list_t *spa_import_progress_list = NULL;
 2155 
 2156 static int
 2157 spa_import_progress_show_header(struct seq_file *f)
 2158 {
 2159         seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid",
 2160             "load_state", "multihost_secs", "max_txg",
 2161             "pool_name");
 2162         return (0);
 2163 }
 2164 
 2165 static int
 2166 spa_import_progress_show(struct seq_file *f, void *data)
 2167 {
 2168         spa_import_progress_t *sip = (spa_import_progress_t *)data;
 2169 
 2170         seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n",
 2171             (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
 2172             (u_longlong_t)sip->mmp_sec_remaining,
 2173             (u_longlong_t)sip->spa_load_max_txg,
 2174             (sip->pool_name ? sip->pool_name : "-"));
 2175 
 2176         return (0);
 2177 }
 2178 
 2179 /* Remove oldest elements from list until there are no more than 'size' left */
 2180 static void
 2181 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
 2182 {
 2183         spa_import_progress_t *sip;
 2184         while (shl->size > size) {
 2185                 sip = list_remove_head(&shl->procfs_list.pl_list);
 2186                 if (sip->pool_name)
 2187                         spa_strfree(sip->pool_name);
 2188                 kmem_free(sip, sizeof (spa_import_progress_t));
 2189                 shl->size--;
 2190         }
 2191 
 2192         IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
 2193 }
 2194 
 2195 static void
 2196 spa_import_progress_init(void)
 2197 {
 2198         spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
 2199             KM_SLEEP);
 2200 
 2201         spa_import_progress_list->size = 0;
 2202 
 2203         spa_import_progress_list->procfs_list.pl_private =
 2204             spa_import_progress_list;
 2205 
 2206         procfs_list_install("zfs",
 2207             NULL,
 2208             "import_progress",
 2209             0644,
 2210             &spa_import_progress_list->procfs_list,
 2211             spa_import_progress_show,
 2212             spa_import_progress_show_header,
 2213             NULL,
 2214             offsetof(spa_import_progress_t, smh_node));
 2215 }
 2216 
 2217 static void
 2218 spa_import_progress_destroy(void)
 2219 {
 2220         spa_history_list_t *shl = spa_import_progress_list;
 2221         procfs_list_uninstall(&shl->procfs_list);
 2222         spa_import_progress_truncate(shl, 0);
 2223         procfs_list_destroy(&shl->procfs_list);
 2224         kmem_free(shl, sizeof (spa_history_list_t));
 2225 }
 2226 
 2227 int
 2228 spa_import_progress_set_state(uint64_t pool_guid,
 2229     spa_load_state_t load_state)
 2230 {
 2231         spa_history_list_t *shl = spa_import_progress_list;
 2232         spa_import_progress_t *sip;
 2233         int error = ENOENT;
 2234 
 2235         if (shl->size == 0)
 2236                 return (0);
 2237 
 2238         mutex_enter(&shl->procfs_list.pl_lock);
 2239         for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
 2240             sip = list_prev(&shl->procfs_list.pl_list, sip)) {
 2241                 if (sip->pool_guid == pool_guid) {
 2242                         sip->spa_load_state = load_state;
 2243                         error = 0;
 2244                         break;
 2245                 }
 2246         }
 2247         mutex_exit(&shl->procfs_list.pl_lock);
 2248 
 2249         return (error);
 2250 }
 2251 
 2252 int
 2253 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
 2254 {
 2255         spa_history_list_t *shl = spa_import_progress_list;
 2256         spa_import_progress_t *sip;
 2257         int error = ENOENT;
 2258 
 2259         if (shl->size == 0)
 2260                 return (0);
 2261 
 2262         mutex_enter(&shl->procfs_list.pl_lock);
 2263         for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
 2264             sip = list_prev(&shl->procfs_list.pl_list, sip)) {
 2265                 if (sip->pool_guid == pool_guid) {
 2266                         sip->spa_load_max_txg = load_max_txg;
 2267                         error = 0;
 2268                         break;
 2269                 }
 2270         }
 2271         mutex_exit(&shl->procfs_list.pl_lock);
 2272 
 2273         return (error);
 2274 }
 2275 
 2276 int
 2277 spa_import_progress_set_mmp_check(uint64_t pool_guid,
 2278     uint64_t mmp_sec_remaining)
 2279 {
 2280         spa_history_list_t *shl = spa_import_progress_list;
 2281         spa_import_progress_t *sip;
 2282         int error = ENOENT;
 2283 
 2284         if (shl->size == 0)
 2285                 return (0);
 2286 
 2287         mutex_enter(&shl->procfs_list.pl_lock);
 2288         for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
 2289             sip = list_prev(&shl->procfs_list.pl_list, sip)) {
 2290                 if (sip->pool_guid == pool_guid) {
 2291                         sip->mmp_sec_remaining = mmp_sec_remaining;
 2292                         error = 0;
 2293                         break;
 2294                 }
 2295         }
 2296         mutex_exit(&shl->procfs_list.pl_lock);
 2297 
 2298         return (error);
 2299 }
 2300 
 2301 /*
 2302  * A new import is in progress, add an entry.
 2303  */
 2304 void
 2305 spa_import_progress_add(spa_t *spa)
 2306 {
 2307         spa_history_list_t *shl = spa_import_progress_list;
 2308         spa_import_progress_t *sip;
 2309         char *poolname = NULL;
 2310 
 2311         sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
 2312         sip->pool_guid = spa_guid(spa);
 2313 
 2314         (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
 2315             &poolname);
 2316         if (poolname == NULL)
 2317                 poolname = spa_name(spa);
 2318         sip->pool_name = spa_strdup(poolname);
 2319         sip->spa_load_state = spa_load_state(spa);
 2320 
 2321         mutex_enter(&shl->procfs_list.pl_lock);
 2322         procfs_list_add(&shl->procfs_list, sip);
 2323         shl->size++;
 2324         mutex_exit(&shl->procfs_list.pl_lock);
 2325 }
 2326 
 2327 void
 2328 spa_import_progress_remove(uint64_t pool_guid)
 2329 {
 2330         spa_history_list_t *shl = spa_import_progress_list;
 2331         spa_import_progress_t *sip;
 2332 
 2333         mutex_enter(&shl->procfs_list.pl_lock);
 2334         for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
 2335             sip = list_prev(&shl->procfs_list.pl_list, sip)) {
 2336                 if (sip->pool_guid == pool_guid) {
 2337                         if (sip->pool_name)
 2338                                 spa_strfree(sip->pool_name);
 2339                         list_remove(&shl->procfs_list.pl_list, sip);
 2340                         shl->size--;
 2341                         kmem_free(sip, sizeof (spa_import_progress_t));
 2342                         break;
 2343                 }
 2344         }
 2345         mutex_exit(&shl->procfs_list.pl_lock);
 2346 }
 2347 
 2348 /*
 2349  * ==========================================================================
 2350  * Initialization and Termination
 2351  * ==========================================================================
 2352  */
 2353 
 2354 static int
 2355 spa_name_compare(const void *a1, const void *a2)
 2356 {
 2357         const spa_t *s1 = a1;
 2358         const spa_t *s2 = a2;
 2359         int s;
 2360 
 2361         s = strcmp(s1->spa_name, s2->spa_name);
 2362 
 2363         return (TREE_ISIGN(s));
 2364 }
 2365 
 2366 void
 2367 spa_boot_init(void)
 2368 {
 2369         spa_config_load();
 2370 }
 2371 
 2372 void
 2373 spa_init(spa_mode_t mode)
 2374 {
 2375         mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
 2376         mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
 2377         mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
 2378         cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
 2379 
 2380         avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
 2381             offsetof(spa_t, spa_avl));
 2382 
 2383         avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
 2384             offsetof(spa_aux_t, aux_avl));
 2385 
 2386         avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
 2387             offsetof(spa_aux_t, aux_avl));
 2388 
 2389         spa_mode_global = mode;
 2390 
 2391 #ifndef _KERNEL
 2392         if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
 2393                 struct sigaction sa;
 2394 
 2395                 sa.sa_flags = SA_SIGINFO;
 2396                 sigemptyset(&sa.sa_mask);
 2397                 sa.sa_sigaction = arc_buf_sigsegv;
 2398 
 2399                 if (sigaction(SIGSEGV, &sa, NULL) == -1) {
 2400                         perror("could not enable watchpoints: "
 2401                             "sigaction(SIGSEGV, ...) = ");
 2402                 } else {
 2403                         arc_watch = B_TRUE;
 2404                 }
 2405         }
 2406 #endif
 2407 
 2408         fm_init();
 2409         zfs_refcount_init();
 2410         unique_init();
 2411         zfs_btree_init();
 2412         metaslab_stat_init();
 2413         ddt_init();
 2414         zio_init();
 2415         dmu_init();
 2416         zil_init();
 2417         vdev_cache_stat_init();
 2418         vdev_mirror_stat_init();
 2419         vdev_raidz_math_init();
 2420         vdev_file_init();
 2421         zfs_prop_init();
 2422         chksum_init();
 2423         zpool_prop_init();
 2424         zpool_feature_init();
 2425         spa_config_load();
 2426         vdev_prop_init();
 2427         l2arc_start();
 2428         scan_init();
 2429         qat_init();
 2430         spa_import_progress_init();
 2431 }
 2432 
 2433 void
 2434 spa_fini(void)
 2435 {
 2436         l2arc_stop();
 2437 
 2438         spa_evict_all();
 2439 
 2440         vdev_file_fini();
 2441         vdev_cache_stat_fini();
 2442         vdev_mirror_stat_fini();
 2443         vdev_raidz_math_fini();
 2444         chksum_fini();
 2445         zil_fini();
 2446         dmu_fini();
 2447         zio_fini();
 2448         ddt_fini();
 2449         metaslab_stat_fini();
 2450         zfs_btree_fini();
 2451         unique_fini();
 2452         zfs_refcount_fini();
 2453         fm_fini();
 2454         scan_fini();
 2455         qat_fini();
 2456         spa_import_progress_destroy();
 2457 
 2458         avl_destroy(&spa_namespace_avl);
 2459         avl_destroy(&spa_spare_avl);
 2460         avl_destroy(&spa_l2cache_avl);
 2461 
 2462         cv_destroy(&spa_namespace_cv);
 2463         mutex_destroy(&spa_namespace_lock);
 2464         mutex_destroy(&spa_spare_lock);
 2465         mutex_destroy(&spa_l2cache_lock);
 2466 }
 2467 
 2468 /*
 2469  * Return whether this pool has a dedicated slog device. No locking needed.
 2470  * It's not a problem if the wrong answer is returned as it's only for
 2471  * performance and not correctness.
 2472  */
 2473 boolean_t
 2474 spa_has_slogs(spa_t *spa)
 2475 {
 2476         return (spa->spa_log_class->mc_groups != 0);
 2477 }
 2478 
 2479 spa_log_state_t
 2480 spa_get_log_state(spa_t *spa)
 2481 {
 2482         return (spa->spa_log_state);
 2483 }
 2484 
 2485 void
 2486 spa_set_log_state(spa_t *spa, spa_log_state_t state)
 2487 {
 2488         spa->spa_log_state = state;
 2489 }
 2490 
 2491 boolean_t
 2492 spa_is_root(spa_t *spa)
 2493 {
 2494         return (spa->spa_is_root);
 2495 }
 2496 
 2497 boolean_t
 2498 spa_writeable(spa_t *spa)
 2499 {
 2500         return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
 2501 }
 2502 
 2503 /*
 2504  * Returns true if there is a pending sync task in any of the current
 2505  * syncing txg, the current quiescing txg, or the current open txg.
 2506  */
 2507 boolean_t
 2508 spa_has_pending_synctask(spa_t *spa)
 2509 {
 2510         return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
 2511             !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
 2512 }
 2513 
 2514 spa_mode_t
 2515 spa_mode(spa_t *spa)
 2516 {
 2517         return (spa->spa_mode);
 2518 }
 2519 
 2520 uint64_t
 2521 spa_bootfs(spa_t *spa)
 2522 {
 2523         return (spa->spa_bootfs);
 2524 }
 2525 
 2526 uint64_t
 2527 spa_delegation(spa_t *spa)
 2528 {
 2529         return (spa->spa_delegation);
 2530 }
 2531 
 2532 objset_t *
 2533 spa_meta_objset(spa_t *spa)
 2534 {
 2535         return (spa->spa_meta_objset);
 2536 }
 2537 
 2538 enum zio_checksum
 2539 spa_dedup_checksum(spa_t *spa)
 2540 {
 2541         return (spa->spa_dedup_checksum);
 2542 }
 2543 
 2544 /*
 2545  * Reset pool scan stat per scan pass (or reboot).
 2546  */
 2547 void
 2548 spa_scan_stat_init(spa_t *spa)
 2549 {
 2550         /* data not stored on disk */
 2551         spa->spa_scan_pass_start = gethrestime_sec();
 2552         if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
 2553                 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
 2554         else
 2555                 spa->spa_scan_pass_scrub_pause = 0;
 2556         spa->spa_scan_pass_scrub_spent_paused = 0;
 2557         spa->spa_scan_pass_exam = 0;
 2558         spa->spa_scan_pass_issued = 0;
 2559         vdev_scan_stat_init(spa->spa_root_vdev);
 2560 }
 2561 
 2562 /*
 2563  * Get scan stats for zpool status reports
 2564  */
 2565 int
 2566 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
 2567 {
 2568         dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
 2569 
 2570         if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
 2571                 return (SET_ERROR(ENOENT));
 2572         memset(ps, 0, sizeof (pool_scan_stat_t));
 2573 
 2574         /* data stored on disk */
 2575         ps->pss_func = scn->scn_phys.scn_func;
 2576         ps->pss_state = scn->scn_phys.scn_state;
 2577         ps->pss_start_time = scn->scn_phys.scn_start_time;
 2578         ps->pss_end_time = scn->scn_phys.scn_end_time;
 2579         ps->pss_to_examine = scn->scn_phys.scn_to_examine;
 2580         ps->pss_examined = scn->scn_phys.scn_examined;
 2581         ps->pss_to_process = scn->scn_phys.scn_to_process;
 2582         ps->pss_processed = scn->scn_phys.scn_processed;
 2583         ps->pss_errors = scn->scn_phys.scn_errors;
 2584 
 2585         /* data not stored on disk */
 2586         ps->pss_pass_exam = spa->spa_scan_pass_exam;
 2587         ps->pss_pass_start = spa->spa_scan_pass_start;
 2588         ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
 2589         ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
 2590         ps->pss_pass_issued = spa->spa_scan_pass_issued;
 2591         ps->pss_issued =
 2592             scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
 2593 
 2594         return (0);
 2595 }
 2596 
 2597 int
 2598 spa_maxblocksize(spa_t *spa)
 2599 {
 2600         if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
 2601                 return (SPA_MAXBLOCKSIZE);
 2602         else
 2603                 return (SPA_OLD_MAXBLOCKSIZE);
 2604 }
 2605 
 2606 
 2607 /*
 2608  * Returns the txg that the last device removal completed. No indirect mappings
 2609  * have been added since this txg.
 2610  */
 2611 uint64_t
 2612 spa_get_last_removal_txg(spa_t *spa)
 2613 {
 2614         uint64_t vdevid;
 2615         uint64_t ret = -1ULL;
 2616 
 2617         spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
 2618         /*
 2619          * sr_prev_indirect_vdev is only modified while holding all the
 2620          * config locks, so it is sufficient to hold SCL_VDEV as reader when
 2621          * examining it.
 2622          */
 2623         vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
 2624 
 2625         while (vdevid != -1ULL) {
 2626                 vdev_t *vd = vdev_lookup_top(spa, vdevid);
 2627                 vdev_indirect_births_t *vib = vd->vdev_indirect_births;
 2628 
 2629                 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
 2630 
 2631                 /*
 2632                  * If the removal did not remap any data, we don't care.
 2633                  */
 2634                 if (vdev_indirect_births_count(vib) != 0) {
 2635                         ret = vdev_indirect_births_last_entry_txg(vib);
 2636                         break;
 2637                 }
 2638 
 2639                 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
 2640         }
 2641         spa_config_exit(spa, SCL_VDEV, FTAG);
 2642 
 2643         IMPLY(ret != -1ULL,
 2644             spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
 2645 
 2646         return (ret);
 2647 }
 2648 
 2649 int
 2650 spa_maxdnodesize(spa_t *spa)
 2651 {
 2652         if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
 2653                 return (DNODE_MAX_SIZE);
 2654         else
 2655                 return (DNODE_MIN_SIZE);
 2656 }
 2657 
 2658 boolean_t
 2659 spa_multihost(spa_t *spa)
 2660 {
 2661         return (spa->spa_multihost ? B_TRUE : B_FALSE);
 2662 }
 2663 
 2664 uint32_t
 2665 spa_get_hostid(spa_t *spa)
 2666 {
 2667         return (spa->spa_hostid);
 2668 }
 2669 
 2670 boolean_t
 2671 spa_trust_config(spa_t *spa)
 2672 {
 2673         return (spa->spa_trust_config);
 2674 }
 2675 
 2676 uint64_t
 2677 spa_missing_tvds_allowed(spa_t *spa)
 2678 {
 2679         return (spa->spa_missing_tvds_allowed);
 2680 }
 2681 
 2682 space_map_t *
 2683 spa_syncing_log_sm(spa_t *spa)
 2684 {
 2685         return (spa->spa_syncing_log_sm);
 2686 }
 2687 
 2688 void
 2689 spa_set_missing_tvds(spa_t *spa, uint64_t missing)
 2690 {
 2691         spa->spa_missing_tvds = missing;
 2692 }
 2693 
 2694 /*
 2695  * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
 2696  */
 2697 const char *
 2698 spa_state_to_name(spa_t *spa)
 2699 {
 2700         ASSERT3P(spa, !=, NULL);
 2701 
 2702         /*
 2703          * it is possible for the spa to exist, without root vdev
 2704          * as the spa transitions during import/export
 2705          */
 2706         vdev_t *rvd = spa->spa_root_vdev;
 2707         if (rvd == NULL) {
 2708                 return ("TRANSITIONING");
 2709         }
 2710         vdev_state_t state = rvd->vdev_state;
 2711         vdev_aux_t aux = rvd->vdev_stat.vs_aux;
 2712 
 2713         if (spa_suspended(spa) &&
 2714             (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
 2715                 return ("SUSPENDED");
 2716 
 2717         switch (state) {
 2718         case VDEV_STATE_CLOSED:
 2719         case VDEV_STATE_OFFLINE:
 2720                 return ("OFFLINE");
 2721         case VDEV_STATE_REMOVED:
 2722                 return ("REMOVED");
 2723         case VDEV_STATE_CANT_OPEN:
 2724                 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
 2725                         return ("FAULTED");
 2726                 else if (aux == VDEV_AUX_SPLIT_POOL)
 2727                         return ("SPLIT");
 2728                 else
 2729                         return ("UNAVAIL");
 2730         case VDEV_STATE_FAULTED:
 2731                 return ("FAULTED");
 2732         case VDEV_STATE_DEGRADED:
 2733                 return ("DEGRADED");
 2734         case VDEV_STATE_HEALTHY:
 2735                 return ("ONLINE");
 2736         default:
 2737                 break;
 2738         }
 2739 
 2740         return ("UNKNOWN");
 2741 }
 2742 
 2743 boolean_t
 2744 spa_top_vdevs_spacemap_addressable(spa_t *spa)
 2745 {
 2746         vdev_t *rvd = spa->spa_root_vdev;
 2747         for (uint64_t c = 0; c < rvd->vdev_children; c++) {
 2748                 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
 2749                         return (B_FALSE);
 2750         }
 2751         return (B_TRUE);
 2752 }
 2753 
 2754 boolean_t
 2755 spa_has_checkpoint(spa_t *spa)
 2756 {
 2757         return (spa->spa_checkpoint_txg != 0);
 2758 }
 2759 
 2760 boolean_t
 2761 spa_importing_readonly_checkpoint(spa_t *spa)
 2762 {
 2763         return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
 2764             spa->spa_mode == SPA_MODE_READ);
 2765 }
 2766 
 2767 uint64_t
 2768 spa_min_claim_txg(spa_t *spa)
 2769 {
 2770         uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
 2771 
 2772         if (checkpoint_txg != 0)
 2773                 return (checkpoint_txg + 1);
 2774 
 2775         return (spa->spa_first_txg);
 2776 }
 2777 
 2778 /*
 2779  * If there is a checkpoint, async destroys may consume more space from
 2780  * the pool instead of freeing it. In an attempt to save the pool from
 2781  * getting suspended when it is about to run out of space, we stop
 2782  * processing async destroys.
 2783  */
 2784 boolean_t
 2785 spa_suspend_async_destroy(spa_t *spa)
 2786 {
 2787         dsl_pool_t *dp = spa_get_dsl(spa);
 2788 
 2789         uint64_t unreserved = dsl_pool_unreserved_space(dp,
 2790             ZFS_SPACE_CHECK_EXTRA_RESERVED);
 2791         uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
 2792         uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
 2793 
 2794         if (spa_has_checkpoint(spa) && avail == 0)
 2795                 return (B_TRUE);
 2796 
 2797         return (B_FALSE);
 2798 }
 2799 
 2800 #if defined(_KERNEL)
 2801 
 2802 int
 2803 param_set_deadman_failmode_common(const char *val)
 2804 {
 2805         spa_t *spa = NULL;
 2806         char *p;
 2807 
 2808         if (val == NULL)
 2809                 return (SET_ERROR(EINVAL));
 2810 
 2811         if ((p = strchr(val, '\n')) != NULL)
 2812                 *p = '\0';
 2813 
 2814         if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
 2815             strcmp(val, "panic"))
 2816                 return (SET_ERROR(EINVAL));
 2817 
 2818         if (spa_mode_global != SPA_MODE_UNINIT) {
 2819                 mutex_enter(&spa_namespace_lock);
 2820                 while ((spa = spa_next(spa)) != NULL)
 2821                         spa_set_deadman_failmode(spa, val);
 2822                 mutex_exit(&spa_namespace_lock);
 2823         }
 2824 
 2825         return (0);
 2826 }
 2827 #endif
 2828 
 2829 /* Namespace manipulation */
 2830 EXPORT_SYMBOL(spa_lookup);
 2831 EXPORT_SYMBOL(spa_add);
 2832 EXPORT_SYMBOL(spa_remove);
 2833 EXPORT_SYMBOL(spa_next);
 2834 
 2835 /* Refcount functions */
 2836 EXPORT_SYMBOL(spa_open_ref);
 2837 EXPORT_SYMBOL(spa_close);
 2838 EXPORT_SYMBOL(spa_refcount_zero);
 2839 
 2840 /* Pool configuration lock */
 2841 EXPORT_SYMBOL(spa_config_tryenter);
 2842 EXPORT_SYMBOL(spa_config_enter);
 2843 EXPORT_SYMBOL(spa_config_exit);
 2844 EXPORT_SYMBOL(spa_config_held);
 2845 
 2846 /* Pool vdev add/remove lock */
 2847 EXPORT_SYMBOL(spa_vdev_enter);
 2848 EXPORT_SYMBOL(spa_vdev_exit);
 2849 
 2850 /* Pool vdev state change lock */
 2851 EXPORT_SYMBOL(spa_vdev_state_enter);
 2852 EXPORT_SYMBOL(spa_vdev_state_exit);
 2853 
 2854 /* Accessor functions */
 2855 EXPORT_SYMBOL(spa_shutting_down);
 2856 EXPORT_SYMBOL(spa_get_dsl);
 2857 EXPORT_SYMBOL(spa_get_rootblkptr);
 2858 EXPORT_SYMBOL(spa_set_rootblkptr);
 2859 EXPORT_SYMBOL(spa_altroot);
 2860 EXPORT_SYMBOL(spa_sync_pass);
 2861 EXPORT_SYMBOL(spa_name);
 2862 EXPORT_SYMBOL(spa_guid);
 2863 EXPORT_SYMBOL(spa_last_synced_txg);
 2864 EXPORT_SYMBOL(spa_first_txg);
 2865 EXPORT_SYMBOL(spa_syncing_txg);
 2866 EXPORT_SYMBOL(spa_version);
 2867 EXPORT_SYMBOL(spa_state);
 2868 EXPORT_SYMBOL(spa_load_state);
 2869 EXPORT_SYMBOL(spa_freeze_txg);
 2870 EXPORT_SYMBOL(spa_get_dspace);
 2871 EXPORT_SYMBOL(spa_update_dspace);
 2872 EXPORT_SYMBOL(spa_deflate);
 2873 EXPORT_SYMBOL(spa_normal_class);
 2874 EXPORT_SYMBOL(spa_log_class);
 2875 EXPORT_SYMBOL(spa_special_class);
 2876 EXPORT_SYMBOL(spa_preferred_class);
 2877 EXPORT_SYMBOL(spa_max_replication);
 2878 EXPORT_SYMBOL(spa_prev_software_version);
 2879 EXPORT_SYMBOL(spa_get_failmode);
 2880 EXPORT_SYMBOL(spa_suspended);
 2881 EXPORT_SYMBOL(spa_bootfs);
 2882 EXPORT_SYMBOL(spa_delegation);
 2883 EXPORT_SYMBOL(spa_meta_objset);
 2884 EXPORT_SYMBOL(spa_maxblocksize);
 2885 EXPORT_SYMBOL(spa_maxdnodesize);
 2886 
 2887 /* Miscellaneous support routines */
 2888 EXPORT_SYMBOL(spa_guid_exists);
 2889 EXPORT_SYMBOL(spa_strdup);
 2890 EXPORT_SYMBOL(spa_strfree);
 2891 EXPORT_SYMBOL(spa_generate_guid);
 2892 EXPORT_SYMBOL(snprintf_blkptr);
 2893 EXPORT_SYMBOL(spa_freeze);
 2894 EXPORT_SYMBOL(spa_upgrade);
 2895 EXPORT_SYMBOL(spa_evict_all);
 2896 EXPORT_SYMBOL(spa_lookup_by_guid);
 2897 EXPORT_SYMBOL(spa_has_spare);
 2898 EXPORT_SYMBOL(dva_get_dsize_sync);
 2899 EXPORT_SYMBOL(bp_get_dsize_sync);
 2900 EXPORT_SYMBOL(bp_get_dsize);
 2901 EXPORT_SYMBOL(spa_has_slogs);
 2902 EXPORT_SYMBOL(spa_is_root);
 2903 EXPORT_SYMBOL(spa_writeable);
 2904 EXPORT_SYMBOL(spa_mode);
 2905 EXPORT_SYMBOL(spa_namespace_lock);
 2906 EXPORT_SYMBOL(spa_trust_config);
 2907 EXPORT_SYMBOL(spa_missing_tvds_allowed);
 2908 EXPORT_SYMBOL(spa_set_missing_tvds);
 2909 EXPORT_SYMBOL(spa_state_to_name);
 2910 EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
 2911 EXPORT_SYMBOL(spa_min_claim_txg);
 2912 EXPORT_SYMBOL(spa_suspend_async_destroy);
 2913 EXPORT_SYMBOL(spa_has_checkpoint);
 2914 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
 2915 
 2916 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
 2917         "Set additional debugging flags");
 2918 
 2919 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
 2920         "Set to attempt to recover from fatal errors");
 2921 
 2922 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
 2923         "Set to ignore IO errors during free and permanently leak the space");
 2924 
 2925 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW,
 2926         "Dead I/O check interval in milliseconds");
 2927 
 2928 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
 2929         "Enable deadman timer");
 2930 
 2931 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW,
 2932         "SPA size estimate multiplication factor");
 2933 
 2934 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
 2935         "Place DDT data into the special class");
 2936 
 2937 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
 2938         "Place user data indirect blocks into the special class");
 2939 
 2940 /* BEGIN CSTYLED */
 2941 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
 2942         param_set_deadman_failmode, param_get_charp, ZMOD_RW,
 2943         "Failmode for deadman timer");
 2944 
 2945 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
 2946         param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW,
 2947         "Pool sync expiration time in milliseconds");
 2948 
 2949 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
 2950         param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW,
 2951         "IO expiration time in milliseconds");
 2952 
 2953 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW,
 2954         "Small file blocks in special vdevs depends on this much "
 2955         "free space available");
 2956 /* END CSTYLED */
 2957 
 2958 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
 2959         param_get_uint, ZMOD_RW, "Reserved free space in pool");

Cache object: 01635faf788891d9d2789ff73ea788f2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.