1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved.
28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
30 * Copyright (c) 2017 Datto Inc.
31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
32 * Copyright (c) 2017, Intel Corporation.
33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
35 * Copyright (c) 2021, Klara Inc.
36 * Copyright [2021] Hewlett Packard Enterprise Development LP
37 */
38
39 #include <assert.h>
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <getopt.h>
45 #include <libgen.h>
46 #include <libintl.h>
47 #include <libuutil.h>
48 #include <locale.h>
49 #include <pthread.h>
50 #include <stdio.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <time.h>
54 #include <unistd.h>
55 #include <pwd.h>
56 #include <zone.h>
57 #include <sys/wait.h>
58 #include <zfs_prop.h>
59 #include <sys/fs/zfs.h>
60 #include <sys/stat.h>
61 #include <sys/systeminfo.h>
62 #include <sys/fm/fs/zfs.h>
63 #include <sys/fm/util.h>
64 #include <sys/fm/protocol.h>
65 #include <sys/zfs_ioctl.h>
66 #include <sys/mount.h>
67 #include <sys/sysmacros.h>
68
69 #include <math.h>
70
71 #include <libzfs.h>
72 #include <libzutil.h>
73
74 #include "zpool_util.h"
75 #include "zfs_comutil.h"
76 #include "zfeature_common.h"
77
78 #include "statcommon.h"
79
80 libzfs_handle_t *g_zfs;
81
82 static int zpool_do_create(int, char **);
83 static int zpool_do_destroy(int, char **);
84
85 static int zpool_do_add(int, char **);
86 static int zpool_do_remove(int, char **);
87 static int zpool_do_labelclear(int, char **);
88
89 static int zpool_do_checkpoint(int, char **);
90
91 static int zpool_do_list(int, char **);
92 static int zpool_do_iostat(int, char **);
93 static int zpool_do_status(int, char **);
94
95 static int zpool_do_online(int, char **);
96 static int zpool_do_offline(int, char **);
97 static int zpool_do_clear(int, char **);
98 static int zpool_do_reopen(int, char **);
99
100 static int zpool_do_reguid(int, char **);
101
102 static int zpool_do_attach(int, char **);
103 static int zpool_do_detach(int, char **);
104 static int zpool_do_replace(int, char **);
105 static int zpool_do_split(int, char **);
106
107 static int zpool_do_initialize(int, char **);
108 static int zpool_do_scrub(int, char **);
109 static int zpool_do_resilver(int, char **);
110 static int zpool_do_trim(int, char **);
111
112 static int zpool_do_import(int, char **);
113 static int zpool_do_export(int, char **);
114
115 static int zpool_do_upgrade(int, char **);
116
117 static int zpool_do_history(int, char **);
118 static int zpool_do_events(int, char **);
119
120 static int zpool_do_get(int, char **);
121 static int zpool_do_set(int, char **);
122
123 static int zpool_do_sync(int, char **);
124
125 static int zpool_do_version(int, char **);
126
127 static int zpool_do_wait(int, char **);
128
129 static zpool_compat_status_t zpool_do_load_compat(
130 const char *, boolean_t *);
131
132 /*
133 * These libumem hooks provide a reasonable set of defaults for the allocator's
134 * debugging facilities.
135 */
136
137 #ifdef DEBUG
138 const char *
139 _umem_debug_init(void)
140 {
141 return ("default,verbose"); /* $UMEM_DEBUG setting */
142 }
143
144 const char *
145 _umem_logging_init(void)
146 {
147 return ("fail,contents"); /* $UMEM_LOGGING setting */
148 }
149 #endif
150
151 typedef enum {
152 HELP_ADD,
153 HELP_ATTACH,
154 HELP_CLEAR,
155 HELP_CREATE,
156 HELP_CHECKPOINT,
157 HELP_DESTROY,
158 HELP_DETACH,
159 HELP_EXPORT,
160 HELP_HISTORY,
161 HELP_IMPORT,
162 HELP_IOSTAT,
163 HELP_LABELCLEAR,
164 HELP_LIST,
165 HELP_OFFLINE,
166 HELP_ONLINE,
167 HELP_REPLACE,
168 HELP_REMOVE,
169 HELP_INITIALIZE,
170 HELP_SCRUB,
171 HELP_RESILVER,
172 HELP_TRIM,
173 HELP_STATUS,
174 HELP_UPGRADE,
175 HELP_EVENTS,
176 HELP_GET,
177 HELP_SET,
178 HELP_SPLIT,
179 HELP_SYNC,
180 HELP_REGUID,
181 HELP_REOPEN,
182 HELP_VERSION,
183 HELP_WAIT
184 } zpool_help_t;
185
186
187 /*
188 * Flags for stats to display with "zpool iostats"
189 */
190 enum iostat_type {
191 IOS_DEFAULT = 0,
192 IOS_LATENCY = 1,
193 IOS_QUEUES = 2,
194 IOS_L_HISTO = 3,
195 IOS_RQ_HISTO = 4,
196 IOS_COUNT, /* always last element */
197 };
198
199 /* iostat_type entries as bitmasks */
200 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
201 #define IOS_LATENCY_M (1ULL << IOS_LATENCY)
202 #define IOS_QUEUES_M (1ULL << IOS_QUEUES)
203 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
204 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
205
206 /* Mask of all the histo bits */
207 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
208
209 /*
210 * Lookup table for iostat flags to nvlist names. Basically a list
211 * of all the nvlists a flag requires. Also specifies the order in
212 * which data gets printed in zpool iostat.
213 */
214 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
215 [IOS_L_HISTO] = {
216 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
217 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
218 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
219 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
220 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
221 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
222 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
223 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
224 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
225 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
226 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
227 NULL},
228 [IOS_LATENCY] = {
229 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
230 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
231 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
232 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
233 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
234 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
235 NULL},
236 [IOS_QUEUES] = {
237 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
238 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
239 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
240 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
241 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
242 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
243 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
244 NULL},
245 [IOS_RQ_HISTO] = {
246 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
247 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
248 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
249 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
250 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
251 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
252 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
253 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
254 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
255 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
256 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
257 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
258 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
259 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
260 NULL},
261 };
262
263
264 /*
265 * Given a cb->cb_flags with a histogram bit set, return the iostat_type.
266 * Right now, only one histo bit is ever set at one time, so we can
267 * just do a highbit64(a)
268 */
269 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
270
271 typedef struct zpool_command {
272 const char *name;
273 int (*func)(int, char **);
274 zpool_help_t usage;
275 } zpool_command_t;
276
277 /*
278 * Master command table. Each ZFS command has a name, associated function, and
279 * usage message. The usage messages need to be internationalized, so we have
280 * to have a function to return the usage message based on a command index.
281 *
282 * These commands are organized according to how they are displayed in the usage
283 * message. An empty command (one with a NULL name) indicates an empty line in
284 * the generic usage message.
285 */
286 static zpool_command_t command_table[] = {
287 { "version", zpool_do_version, HELP_VERSION },
288 { NULL },
289 { "create", zpool_do_create, HELP_CREATE },
290 { "destroy", zpool_do_destroy, HELP_DESTROY },
291 { NULL },
292 { "add", zpool_do_add, HELP_ADD },
293 { "remove", zpool_do_remove, HELP_REMOVE },
294 { NULL },
295 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
296 { NULL },
297 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
298 { NULL },
299 { "list", zpool_do_list, HELP_LIST },
300 { "iostat", zpool_do_iostat, HELP_IOSTAT },
301 { "status", zpool_do_status, HELP_STATUS },
302 { NULL },
303 { "online", zpool_do_online, HELP_ONLINE },
304 { "offline", zpool_do_offline, HELP_OFFLINE },
305 { "clear", zpool_do_clear, HELP_CLEAR },
306 { "reopen", zpool_do_reopen, HELP_REOPEN },
307 { NULL },
308 { "attach", zpool_do_attach, HELP_ATTACH },
309 { "detach", zpool_do_detach, HELP_DETACH },
310 { "replace", zpool_do_replace, HELP_REPLACE },
311 { "split", zpool_do_split, HELP_SPLIT },
312 { NULL },
313 { "initialize", zpool_do_initialize, HELP_INITIALIZE },
314 { "resilver", zpool_do_resilver, HELP_RESILVER },
315 { "scrub", zpool_do_scrub, HELP_SCRUB },
316 { "trim", zpool_do_trim, HELP_TRIM },
317 { NULL },
318 { "import", zpool_do_import, HELP_IMPORT },
319 { "export", zpool_do_export, HELP_EXPORT },
320 { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
321 { "reguid", zpool_do_reguid, HELP_REGUID },
322 { NULL },
323 { "history", zpool_do_history, HELP_HISTORY },
324 { "events", zpool_do_events, HELP_EVENTS },
325 { NULL },
326 { "get", zpool_do_get, HELP_GET },
327 { "set", zpool_do_set, HELP_SET },
328 { "sync", zpool_do_sync, HELP_SYNC },
329 { NULL },
330 { "wait", zpool_do_wait, HELP_WAIT },
331 };
332
333 #define NCOMMAND (ARRAY_SIZE(command_table))
334
335 #define VDEV_ALLOC_CLASS_LOGS "logs"
336
337 static zpool_command_t *current_command;
338 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV);
339 static char history_str[HIS_MAX_RECORD_LEN];
340 static boolean_t log_history = B_TRUE;
341 static uint_t timestamp_fmt = NODATE;
342
343 static const char *
344 get_usage(zpool_help_t idx)
345 {
346 switch (idx) {
347 case HELP_ADD:
348 return (gettext("\tadd [-fgLnP] [-o property=value] "
349 "<pool> <vdev> ...\n"));
350 case HELP_ATTACH:
351 return (gettext("\tattach [-fsw] [-o property=value] "
352 "<pool> <device> <new-device>\n"));
353 case HELP_CLEAR:
354 return (gettext("\tclear [-nF] <pool> [device]\n"));
355 case HELP_CREATE:
356 return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
357 "\t [-O file-system-property=value] ... \n"
358 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
359 case HELP_CHECKPOINT:
360 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
361 case HELP_DESTROY:
362 return (gettext("\tdestroy [-f] <pool>\n"));
363 case HELP_DETACH:
364 return (gettext("\tdetach <pool> <device>\n"));
365 case HELP_EXPORT:
366 return (gettext("\texport [-af] <pool> ...\n"));
367 case HELP_HISTORY:
368 return (gettext("\thistory [-il] [<pool>] ...\n"));
369 case HELP_IMPORT:
370 return (gettext("\timport [-d dir] [-D]\n"
371 "\timport [-o mntopts] [-o property=value] ... \n"
372 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
373 "[-R root] [-F [-n]] -a\n"
374 "\timport [-o mntopts] [-o property=value] ... \n"
375 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
376 "[-R root] [-F [-n]]\n"
377 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
378 case HELP_IOSTAT:
379 return (gettext("\tiostat [[[-c [script1,script2,...]"
380 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
381 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
382 " [[-n] interval [count]]\n"));
383 case HELP_LABELCLEAR:
384 return (gettext("\tlabelclear [-f] <vdev>\n"));
385 case HELP_LIST:
386 return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
387 "[-T d|u] [pool] ... \n"
388 "\t [interval [count]]\n"));
389 case HELP_OFFLINE:
390 return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
391 case HELP_ONLINE:
392 return (gettext("\tonline [-e] <pool> <device> ...\n"));
393 case HELP_REPLACE:
394 return (gettext("\treplace [-fsw] [-o property=value] "
395 "<pool> <device> [new-device]\n"));
396 case HELP_REMOVE:
397 return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
398 case HELP_REOPEN:
399 return (gettext("\treopen [-n] <pool>\n"));
400 case HELP_INITIALIZE:
401 return (gettext("\tinitialize [-c | -s] [-w] <pool> "
402 "[<device> ...]\n"));
403 case HELP_SCRUB:
404 return (gettext("\tscrub [-s | -p] [-w] <pool> ...\n"));
405 case HELP_RESILVER:
406 return (gettext("\tresilver <pool> ...\n"));
407 case HELP_TRIM:
408 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
409 "[<device> ...]\n"));
410 case HELP_STATUS:
411 return (gettext("\tstatus [-c [script1,script2,...]] "
412 "[-igLpPstvxD] [-T d|u] [pool] ... \n"
413 "\t [interval [count]]\n"));
414 case HELP_UPGRADE:
415 return (gettext("\tupgrade\n"
416 "\tupgrade -v\n"
417 "\tupgrade [-V version] <-a | pool ...>\n"));
418 case HELP_EVENTS:
419 return (gettext("\tevents [-vHf [pool] | -c]\n"));
420 case HELP_GET:
421 return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
422 "<\"all\" | property[,...]> <pool> ...\n"));
423 case HELP_SET:
424 return (gettext("\tset <property=value> <pool>\n"
425 "\tset <vdev_property=value> <pool> <vdev>\n"));
426 case HELP_SPLIT:
427 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
428 "\t [-o property=value] <pool> <newpool> "
429 "[<device> ...]\n"));
430 case HELP_REGUID:
431 return (gettext("\treguid <pool>\n"));
432 case HELP_SYNC:
433 return (gettext("\tsync [pool] ...\n"));
434 case HELP_VERSION:
435 return (gettext("\tversion\n"));
436 case HELP_WAIT:
437 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
438 "<pool> [interval]\n"));
439 default:
440 __builtin_unreachable();
441 }
442 }
443
444 static void
445 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
446 {
447 uint_t children = 0;
448 nvlist_t **child;
449 uint_t i;
450
451 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
452 &child, &children);
453
454 if (children == 0) {
455 char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
456 VDEV_NAME_PATH);
457
458 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
459 strcmp(path, VDEV_TYPE_HOLE) != 0)
460 fnvlist_add_boolean(res, path);
461
462 free(path);
463 return;
464 }
465
466 for (i = 0; i < children; i++) {
467 zpool_collect_leaves(zhp, child[i], res);
468 }
469 }
470
471 /*
472 * Callback routine that will print out a pool property value.
473 */
474 static int
475 print_pool_prop_cb(int prop, void *cb)
476 {
477 FILE *fp = cb;
478
479 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
480
481 if (zpool_prop_readonly(prop))
482 (void) fprintf(fp, " NO ");
483 else
484 (void) fprintf(fp, " YES ");
485
486 if (zpool_prop_values(prop) == NULL)
487 (void) fprintf(fp, "-\n");
488 else
489 (void) fprintf(fp, "%s\n", zpool_prop_values(prop));
490
491 return (ZPROP_CONT);
492 }
493
494 /*
495 * Callback routine that will print out a vdev property value.
496 */
497 static int
498 print_vdev_prop_cb(int prop, void *cb)
499 {
500 FILE *fp = cb;
501
502 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop));
503
504 if (vdev_prop_readonly(prop))
505 (void) fprintf(fp, " NO ");
506 else
507 (void) fprintf(fp, " YES ");
508
509 if (vdev_prop_values(prop) == NULL)
510 (void) fprintf(fp, "-\n");
511 else
512 (void) fprintf(fp, "%s\n", vdev_prop_values(prop));
513
514 return (ZPROP_CONT);
515 }
516
517 /*
518 * Display usage message. If we're inside a command, display only the usage for
519 * that command. Otherwise, iterate over the entire command table and display
520 * a complete usage message.
521 */
522 static __attribute__((noreturn)) void
523 usage(boolean_t requested)
524 {
525 FILE *fp = requested ? stdout : stderr;
526
527 if (current_command == NULL) {
528 int i;
529
530 (void) fprintf(fp, gettext("usage: zpool command args ...\n"));
531 (void) fprintf(fp,
532 gettext("where 'command' is one of the following:\n\n"));
533
534 for (i = 0; i < NCOMMAND; i++) {
535 if (command_table[i].name == NULL)
536 (void) fprintf(fp, "\n");
537 else
538 (void) fprintf(fp, "%s",
539 get_usage(command_table[i].usage));
540 }
541 } else {
542 (void) fprintf(fp, gettext("usage:\n"));
543 (void) fprintf(fp, "%s", get_usage(current_command->usage));
544 }
545
546 if (current_command != NULL &&
547 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) &&
548 ((strcmp(current_command->name, "set") == 0) ||
549 (strcmp(current_command->name, "get") == 0) ||
550 (strcmp(current_command->name, "list") == 0))) {
551
552 (void) fprintf(fp, "%s",
553 gettext("\nthe following properties are supported:\n"));
554
555 (void) fprintf(fp, "\n\t%-19s %s %s\n\n",
556 "PROPERTY", "EDIT", "VALUES");
557
558 /* Iterate over all properties */
559 if (current_prop_type == ZFS_TYPE_POOL) {
560 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE,
561 B_TRUE, current_prop_type);
562
563 (void) fprintf(fp, "\t%-19s ", "feature@...");
564 (void) fprintf(fp, "YES "
565 "disabled | enabled | active\n");
566
567 (void) fprintf(fp, gettext("\nThe feature@ properties "
568 "must be appended with a feature name.\n"
569 "See zpool-features(7).\n"));
570 } else if (current_prop_type == ZFS_TYPE_VDEV) {
571 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE,
572 B_TRUE, current_prop_type);
573 }
574 }
575
576 /*
577 * See comments at end of main().
578 */
579 if (getenv("ZFS_ABORT") != NULL) {
580 (void) printf("dumping core by request\n");
581 abort();
582 }
583
584 exit(requested ? 0 : 2);
585 }
586
587 /*
588 * zpool initialize [-c | -s] [-w] <pool> [<vdev> ...]
589 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
590 * if none specified.
591 *
592 * -c Cancel. Ends active initializing.
593 * -s Suspend. Initializing can then be restarted with no flags.
594 * -w Wait. Blocks until initializing has completed.
595 */
596 int
597 zpool_do_initialize(int argc, char **argv)
598 {
599 int c;
600 char *poolname;
601 zpool_handle_t *zhp;
602 nvlist_t *vdevs;
603 int err = 0;
604 boolean_t wait = B_FALSE;
605
606 struct option long_options[] = {
607 {"cancel", no_argument, NULL, 'c'},
608 {"suspend", no_argument, NULL, 's'},
609 {"wait", no_argument, NULL, 'w'},
610 {0, 0, 0, 0}
611 };
612
613 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
614 while ((c = getopt_long(argc, argv, "csw", long_options, NULL)) != -1) {
615 switch (c) {
616 case 'c':
617 if (cmd_type != POOL_INITIALIZE_START &&
618 cmd_type != POOL_INITIALIZE_CANCEL) {
619 (void) fprintf(stderr, gettext("-c cannot be "
620 "combined with other options\n"));
621 usage(B_FALSE);
622 }
623 cmd_type = POOL_INITIALIZE_CANCEL;
624 break;
625 case 's':
626 if (cmd_type != POOL_INITIALIZE_START &&
627 cmd_type != POOL_INITIALIZE_SUSPEND) {
628 (void) fprintf(stderr, gettext("-s cannot be "
629 "combined with other options\n"));
630 usage(B_FALSE);
631 }
632 cmd_type = POOL_INITIALIZE_SUSPEND;
633 break;
634 case 'w':
635 wait = B_TRUE;
636 break;
637 case '?':
638 if (optopt != 0) {
639 (void) fprintf(stderr,
640 gettext("invalid option '%c'\n"), optopt);
641 } else {
642 (void) fprintf(stderr,
643 gettext("invalid option '%s'\n"),
644 argv[optind - 1]);
645 }
646 usage(B_FALSE);
647 }
648 }
649
650 argc -= optind;
651 argv += optind;
652
653 if (argc < 1) {
654 (void) fprintf(stderr, gettext("missing pool name argument\n"));
655 usage(B_FALSE);
656 return (-1);
657 }
658
659 if (wait && (cmd_type != POOL_INITIALIZE_START)) {
660 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
661 "-s\n"));
662 usage(B_FALSE);
663 }
664
665 poolname = argv[0];
666 zhp = zpool_open(g_zfs, poolname);
667 if (zhp == NULL)
668 return (-1);
669
670 vdevs = fnvlist_alloc();
671 if (argc == 1) {
672 /* no individual leaf vdevs specified, so add them all */
673 nvlist_t *config = zpool_get_config(zhp, NULL);
674 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
675 ZPOOL_CONFIG_VDEV_TREE);
676 zpool_collect_leaves(zhp, nvroot, vdevs);
677 } else {
678 for (int i = 1; i < argc; i++) {
679 fnvlist_add_boolean(vdevs, argv[i]);
680 }
681 }
682
683 if (wait)
684 err = zpool_initialize_wait(zhp, cmd_type, vdevs);
685 else
686 err = zpool_initialize(zhp, cmd_type, vdevs);
687
688 fnvlist_free(vdevs);
689 zpool_close(zhp);
690
691 return (err);
692 }
693
694 /*
695 * print a pool vdev config for dry runs
696 */
697 static void
698 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
699 const char *match, int name_flags)
700 {
701 nvlist_t **child;
702 uint_t c, children;
703 char *vname;
704 boolean_t printed = B_FALSE;
705
706 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
707 &child, &children) != 0) {
708 if (name != NULL)
709 (void) printf("\t%*s%s\n", indent, "", name);
710 return;
711 }
712
713 for (c = 0; c < children; c++) {
714 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
715 char *class = (char *)"";
716
717 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
718 &is_hole);
719
720 if (is_hole == B_TRUE) {
721 continue;
722 }
723
724 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
725 &is_log);
726 if (is_log)
727 class = (char *)VDEV_ALLOC_BIAS_LOG;
728 (void) nvlist_lookup_string(child[c],
729 ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
730 if (strcmp(match, class) != 0)
731 continue;
732
733 if (!printed && name != NULL) {
734 (void) printf("\t%*s%s\n", indent, "", name);
735 printed = B_TRUE;
736 }
737 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
738 print_vdev_tree(zhp, vname, child[c], indent + 2, "",
739 name_flags);
740 free(vname);
741 }
742 }
743
744 /*
745 * Print the list of l2cache devices for dry runs.
746 */
747 static void
748 print_cache_list(nvlist_t *nv, int indent)
749 {
750 nvlist_t **child;
751 uint_t c, children;
752
753 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
754 &child, &children) == 0 && children > 0) {
755 (void) printf("\t%*s%s\n", indent, "", "cache");
756 } else {
757 return;
758 }
759 for (c = 0; c < children; c++) {
760 char *vname;
761
762 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
763 (void) printf("\t%*s%s\n", indent + 2, "", vname);
764 free(vname);
765 }
766 }
767
768 /*
769 * Print the list of spares for dry runs.
770 */
771 static void
772 print_spare_list(nvlist_t *nv, int indent)
773 {
774 nvlist_t **child;
775 uint_t c, children;
776
777 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
778 &child, &children) == 0 && children > 0) {
779 (void) printf("\t%*s%s\n", indent, "", "spares");
780 } else {
781 return;
782 }
783 for (c = 0; c < children; c++) {
784 char *vname;
785
786 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
787 (void) printf("\t%*s%s\n", indent + 2, "", vname);
788 free(vname);
789 }
790 }
791
792 static boolean_t
793 prop_list_contains_feature(nvlist_t *proplist)
794 {
795 nvpair_t *nvp;
796 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
797 nvp = nvlist_next_nvpair(proplist, nvp)) {
798 if (zpool_prop_feature(nvpair_name(nvp)))
799 return (B_TRUE);
800 }
801 return (B_FALSE);
802 }
803
804 /*
805 * Add a property pair (name, string-value) into a property nvlist.
806 */
807 static int
808 add_prop_list(const char *propname, const char *propval, nvlist_t **props,
809 boolean_t poolprop)
810 {
811 zpool_prop_t prop = ZPOOL_PROP_INVAL;
812 nvlist_t *proplist;
813 const char *normnm;
814 char *strval;
815
816 if (*props == NULL &&
817 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
818 (void) fprintf(stderr,
819 gettext("internal error: out of memory\n"));
820 return (1);
821 }
822
823 proplist = *props;
824
825 if (poolprop) {
826 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
827 const char *cname =
828 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
829
830 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
831 (!zpool_prop_feature(propname) &&
832 !zpool_prop_vdev(propname))) {
833 (void) fprintf(stderr, gettext("property '%s' is "
834 "not a valid pool or vdev property\n"), propname);
835 return (2);
836 }
837
838 /*
839 * feature@ properties and version should not be specified
840 * at the same time.
841 */
842 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
843 nvlist_exists(proplist, vname)) ||
844 (prop == ZPOOL_PROP_VERSION &&
845 prop_list_contains_feature(proplist))) {
846 (void) fprintf(stderr, gettext("'feature@' and "
847 "'version' properties cannot be specified "
848 "together\n"));
849 return (2);
850 }
851
852 /*
853 * if version is specified, only "legacy" compatibility
854 * may be requested
855 */
856 if ((prop == ZPOOL_PROP_COMPATIBILITY &&
857 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
858 nvlist_exists(proplist, vname)) ||
859 (prop == ZPOOL_PROP_VERSION &&
860 nvlist_exists(proplist, cname) &&
861 strcmp(fnvlist_lookup_string(proplist, cname),
862 ZPOOL_COMPAT_LEGACY) != 0)) {
863 (void) fprintf(stderr, gettext("when 'version' is "
864 "specified, the 'compatibility' feature may only "
865 "be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
866 return (2);
867 }
868
869 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname))
870 normnm = propname;
871 else
872 normnm = zpool_prop_to_name(prop);
873 } else {
874 zfs_prop_t fsprop = zfs_name_to_prop(propname);
875
876 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
877 B_FALSE)) {
878 normnm = zfs_prop_to_name(fsprop);
879 } else if (zfs_prop_user(propname) ||
880 zfs_prop_userquota(propname)) {
881 normnm = propname;
882 } else {
883 (void) fprintf(stderr, gettext("property '%s' is "
884 "not a valid filesystem property\n"), propname);
885 return (2);
886 }
887 }
888
889 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
890 prop != ZPOOL_PROP_CACHEFILE) {
891 (void) fprintf(stderr, gettext("property '%s' "
892 "specified multiple times\n"), propname);
893 return (2);
894 }
895
896 if (nvlist_add_string(proplist, normnm, propval) != 0) {
897 (void) fprintf(stderr, gettext("internal "
898 "error: out of memory\n"));
899 return (1);
900 }
901
902 return (0);
903 }
904
905 /*
906 * Set a default property pair (name, string-value) in a property nvlist
907 */
908 static int
909 add_prop_list_default(const char *propname, const char *propval,
910 nvlist_t **props)
911 {
912 char *pval;
913
914 if (nvlist_lookup_string(*props, propname, &pval) == 0)
915 return (0);
916
917 return (add_prop_list(propname, propval, props, B_TRUE));
918 }
919
920 /*
921 * zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
922 *
923 * -f Force addition of devices, even if they appear in use
924 * -g Display guid for individual vdev name.
925 * -L Follow links when resolving vdev path name.
926 * -n Do not add the devices, but display the resulting layout if
927 * they were to be added.
928 * -o Set property=value.
929 * -P Display full path for vdev name.
930 *
931 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
932 * handled by make_root_vdev(), which constructs the nvlist needed to pass to
933 * libzfs.
934 */
935 int
936 zpool_do_add(int argc, char **argv)
937 {
938 boolean_t force = B_FALSE;
939 boolean_t dryrun = B_FALSE;
940 int name_flags = 0;
941 int c;
942 nvlist_t *nvroot;
943 char *poolname;
944 int ret;
945 zpool_handle_t *zhp;
946 nvlist_t *config;
947 nvlist_t *props = NULL;
948 char *propval;
949
950 /* check options */
951 while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
952 switch (c) {
953 case 'f':
954 force = B_TRUE;
955 break;
956 case 'g':
957 name_flags |= VDEV_NAME_GUID;
958 break;
959 case 'L':
960 name_flags |= VDEV_NAME_FOLLOW_LINKS;
961 break;
962 case 'n':
963 dryrun = B_TRUE;
964 break;
965 case 'o':
966 if ((propval = strchr(optarg, '=')) == NULL) {
967 (void) fprintf(stderr, gettext("missing "
968 "'=' for -o option\n"));
969 usage(B_FALSE);
970 }
971 *propval = '\0';
972 propval++;
973
974 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
975 (add_prop_list(optarg, propval, &props, B_TRUE)))
976 usage(B_FALSE);
977 break;
978 case 'P':
979 name_flags |= VDEV_NAME_PATH;
980 break;
981 case '?':
982 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
983 optopt);
984 usage(B_FALSE);
985 }
986 }
987
988 argc -= optind;
989 argv += optind;
990
991 /* get pool name and check number of arguments */
992 if (argc < 1) {
993 (void) fprintf(stderr, gettext("missing pool name argument\n"));
994 usage(B_FALSE);
995 }
996 if (argc < 2) {
997 (void) fprintf(stderr, gettext("missing vdev specification\n"));
998 usage(B_FALSE);
999 }
1000
1001 poolname = argv[0];
1002
1003 argc--;
1004 argv++;
1005
1006 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1007 return (1);
1008
1009 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
1010 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
1011 poolname);
1012 zpool_close(zhp);
1013 return (1);
1014 }
1015
1016 /* unless manually specified use "ashift" pool property (if set) */
1017 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
1018 int intval;
1019 zprop_source_t src;
1020 char strval[ZPOOL_MAXPROPLEN];
1021
1022 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
1023 if (src != ZPROP_SRC_DEFAULT) {
1024 (void) sprintf(strval, "%" PRId32, intval);
1025 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
1026 &props, B_TRUE) == 0);
1027 }
1028 }
1029
1030 /* pass off to make_root_vdev for processing */
1031 nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
1032 argc, argv);
1033 if (nvroot == NULL) {
1034 zpool_close(zhp);
1035 return (1);
1036 }
1037
1038 if (dryrun) {
1039 nvlist_t *poolnvroot;
1040 nvlist_t **l2child, **sparechild;
1041 uint_t l2children, sparechildren, c;
1042 char *vname;
1043 boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
1044
1045 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1046 &poolnvroot) == 0);
1047
1048 (void) printf(gettext("would update '%s' to the following "
1049 "configuration:\n\n"), zpool_get_name(zhp));
1050
1051 /* print original main pool and new tree */
1052 print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
1053 name_flags | VDEV_NAME_TYPE_ID);
1054 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
1055
1056 /* print other classes: 'dedup', 'special', and 'log' */
1057 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1058 print_vdev_tree(zhp, "dedup", poolnvroot, 0,
1059 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1060 print_vdev_tree(zhp, NULL, nvroot, 0,
1061 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1062 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
1063 print_vdev_tree(zhp, "dedup", nvroot, 0,
1064 VDEV_ALLOC_BIAS_DEDUP, name_flags);
1065 }
1066
1067 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1068 print_vdev_tree(zhp, "special", poolnvroot, 0,
1069 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1070 print_vdev_tree(zhp, NULL, nvroot, 0,
1071 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1072 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
1073 print_vdev_tree(zhp, "special", nvroot, 0,
1074 VDEV_ALLOC_BIAS_SPECIAL, name_flags);
1075 }
1076
1077 if (num_logs(poolnvroot) > 0) {
1078 print_vdev_tree(zhp, "logs", poolnvroot, 0,
1079 VDEV_ALLOC_BIAS_LOG, name_flags);
1080 print_vdev_tree(zhp, NULL, nvroot, 0,
1081 VDEV_ALLOC_BIAS_LOG, name_flags);
1082 } else if (num_logs(nvroot) > 0) {
1083 print_vdev_tree(zhp, "logs", nvroot, 0,
1084 VDEV_ALLOC_BIAS_LOG, name_flags);
1085 }
1086
1087 /* Do the same for the caches */
1088 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
1089 &l2child, &l2children) == 0 && l2children) {
1090 hadcache = B_TRUE;
1091 (void) printf(gettext("\tcache\n"));
1092 for (c = 0; c < l2children; c++) {
1093 vname = zpool_vdev_name(g_zfs, NULL,
1094 l2child[c], name_flags);
1095 (void) printf("\t %s\n", vname);
1096 free(vname);
1097 }
1098 }
1099 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1100 &l2child, &l2children) == 0 && l2children) {
1101 if (!hadcache)
1102 (void) printf(gettext("\tcache\n"));
1103 for (c = 0; c < l2children; c++) {
1104 vname = zpool_vdev_name(g_zfs, NULL,
1105 l2child[c], name_flags);
1106 (void) printf("\t %s\n", vname);
1107 free(vname);
1108 }
1109 }
1110 /* And finally the spares */
1111 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
1112 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1113 hadspare = B_TRUE;
1114 (void) printf(gettext("\tspares\n"));
1115 for (c = 0; c < sparechildren; c++) {
1116 vname = zpool_vdev_name(g_zfs, NULL,
1117 sparechild[c], name_flags);
1118 (void) printf("\t %s\n", vname);
1119 free(vname);
1120 }
1121 }
1122 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1123 &sparechild, &sparechildren) == 0 && sparechildren > 0) {
1124 if (!hadspare)
1125 (void) printf(gettext("\tspares\n"));
1126 for (c = 0; c < sparechildren; c++) {
1127 vname = zpool_vdev_name(g_zfs, NULL,
1128 sparechild[c], name_flags);
1129 (void) printf("\t %s\n", vname);
1130 free(vname);
1131 }
1132 }
1133
1134 ret = 0;
1135 } else {
1136 ret = (zpool_add(zhp, nvroot) != 0);
1137 }
1138
1139 nvlist_free(props);
1140 nvlist_free(nvroot);
1141 zpool_close(zhp);
1142
1143 return (ret);
1144 }
1145
1146 /*
1147 * zpool remove [-npsw] <pool> <vdev> ...
1148 *
1149 * Removes the given vdev from the pool.
1150 */
1151 int
1152 zpool_do_remove(int argc, char **argv)
1153 {
1154 char *poolname;
1155 int i, ret = 0;
1156 zpool_handle_t *zhp = NULL;
1157 boolean_t stop = B_FALSE;
1158 int c;
1159 boolean_t noop = B_FALSE;
1160 boolean_t parsable = B_FALSE;
1161 boolean_t wait = B_FALSE;
1162
1163 /* check options */
1164 while ((c = getopt(argc, argv, "npsw")) != -1) {
1165 switch (c) {
1166 case 'n':
1167 noop = B_TRUE;
1168 break;
1169 case 'p':
1170 parsable = B_TRUE;
1171 break;
1172 case 's':
1173 stop = B_TRUE;
1174 break;
1175 case 'w':
1176 wait = B_TRUE;
1177 break;
1178 case '?':
1179 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1180 optopt);
1181 usage(B_FALSE);
1182 }
1183 }
1184
1185 argc -= optind;
1186 argv += optind;
1187
1188 /* get pool name and check number of arguments */
1189 if (argc < 1) {
1190 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1191 usage(B_FALSE);
1192 }
1193
1194 poolname = argv[0];
1195
1196 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
1197 return (1);
1198
1199 if (stop && noop) {
1200 zpool_close(zhp);
1201 (void) fprintf(stderr, gettext("stop request ignored\n"));
1202 return (0);
1203 }
1204
1205 if (stop) {
1206 if (argc > 1) {
1207 (void) fprintf(stderr, gettext("too many arguments\n"));
1208 usage(B_FALSE);
1209 }
1210 if (zpool_vdev_remove_cancel(zhp) != 0)
1211 ret = 1;
1212 if (wait) {
1213 (void) fprintf(stderr, gettext("invalid option "
1214 "combination: -w cannot be used with -s\n"));
1215 usage(B_FALSE);
1216 }
1217 } else {
1218 if (argc < 2) {
1219 (void) fprintf(stderr, gettext("missing device\n"));
1220 usage(B_FALSE);
1221 }
1222
1223 for (i = 1; i < argc; i++) {
1224 if (noop) {
1225 uint64_t size;
1226
1227 if (zpool_vdev_indirect_size(zhp, argv[i],
1228 &size) != 0) {
1229 ret = 1;
1230 break;
1231 }
1232 if (parsable) {
1233 (void) printf("%s %llu\n",
1234 argv[i], (unsigned long long)size);
1235 } else {
1236 char valstr[32];
1237 zfs_nicenum(size, valstr,
1238 sizeof (valstr));
1239 (void) printf("Memory that will be "
1240 "used after removing %s: %s\n",
1241 argv[i], valstr);
1242 }
1243 } else {
1244 if (zpool_vdev_remove(zhp, argv[i]) != 0)
1245 ret = 1;
1246 }
1247 }
1248
1249 if (ret == 0 && wait)
1250 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
1251 }
1252 zpool_close(zhp);
1253
1254 return (ret);
1255 }
1256
1257 /*
1258 * Return 1 if a vdev is active (being used in a pool)
1259 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
1260 *
1261 * This is useful for checking if a disk in an active pool is offlined or
1262 * faulted.
1263 */
1264 static int
1265 vdev_is_active(char *vdev_path)
1266 {
1267 int fd;
1268 fd = open(vdev_path, O_EXCL);
1269 if (fd < 0) {
1270 return (1); /* cant open O_EXCL - disk is active */
1271 }
1272
1273 close(fd);
1274 return (0); /* disk is inactive in the pool */
1275 }
1276
1277 /*
1278 * zpool labelclear [-f] <vdev>
1279 *
1280 * -f Force clearing the label for the vdevs which are members of
1281 * the exported or foreign pools.
1282 *
1283 * Verifies that the vdev is not active and zeros out the label information
1284 * on the device.
1285 */
1286 int
1287 zpool_do_labelclear(int argc, char **argv)
1288 {
1289 char vdev[MAXPATHLEN];
1290 char *name = NULL;
1291 struct stat st;
1292 int c, fd = -1, ret = 0;
1293 nvlist_t *config;
1294 pool_state_t state;
1295 boolean_t inuse = B_FALSE;
1296 boolean_t force = B_FALSE;
1297
1298 /* check options */
1299 while ((c = getopt(argc, argv, "f")) != -1) {
1300 switch (c) {
1301 case 'f':
1302 force = B_TRUE;
1303 break;
1304 default:
1305 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1306 optopt);
1307 usage(B_FALSE);
1308 }
1309 }
1310
1311 argc -= optind;
1312 argv += optind;
1313
1314 /* get vdev name */
1315 if (argc < 1) {
1316 (void) fprintf(stderr, gettext("missing vdev name\n"));
1317 usage(B_FALSE);
1318 }
1319 if (argc > 1) {
1320 (void) fprintf(stderr, gettext("too many arguments\n"));
1321 usage(B_FALSE);
1322 }
1323
1324 /*
1325 * Check if we were given absolute path and use it as is.
1326 * Otherwise if the provided vdev name doesn't point to a file,
1327 * try prepending expected disk paths and partition numbers.
1328 */
1329 (void) strlcpy(vdev, argv[0], sizeof (vdev));
1330 if (vdev[0] != '/' && stat(vdev, &st) != 0) {
1331 int error;
1332
1333 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
1334 if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
1335 if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
1336 error = ENOENT;
1337 }
1338
1339 if (error || (stat(vdev, &st) != 0)) {
1340 (void) fprintf(stderr, gettext(
1341 "failed to find device %s, try specifying absolute "
1342 "path instead\n"), argv[0]);
1343 return (1);
1344 }
1345 }
1346
1347 if ((fd = open(vdev, O_RDWR)) < 0) {
1348 (void) fprintf(stderr, gettext("failed to open %s: %s\n"),
1349 vdev, strerror(errno));
1350 return (1);
1351 }
1352
1353 /*
1354 * Flush all dirty pages for the block device. This should not be
1355 * fatal when the device does not support BLKFLSBUF as would be the
1356 * case for a file vdev.
1357 */
1358 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
1359 (void) fprintf(stderr, gettext("failed to invalidate "
1360 "cache for %s: %s\n"), vdev, strerror(errno));
1361
1362 if (zpool_read_label(fd, &config, NULL) != 0) {
1363 (void) fprintf(stderr,
1364 gettext("failed to read label from %s\n"), vdev);
1365 ret = 1;
1366 goto errout;
1367 }
1368 nvlist_free(config);
1369
1370 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
1371 if (ret != 0) {
1372 (void) fprintf(stderr,
1373 gettext("failed to check state for %s\n"), vdev);
1374 ret = 1;
1375 goto errout;
1376 }
1377
1378 if (!inuse)
1379 goto wipe_label;
1380
1381 switch (state) {
1382 default:
1383 case POOL_STATE_ACTIVE:
1384 case POOL_STATE_SPARE:
1385 case POOL_STATE_L2CACHE:
1386 /*
1387 * We allow the user to call 'zpool offline -f'
1388 * on an offlined disk in an active pool. We can check if
1389 * the disk is online by calling vdev_is_active().
1390 */
1391 if (force && !vdev_is_active(vdev))
1392 break;
1393
1394 (void) fprintf(stderr, gettext(
1395 "%s is a member (%s) of pool \"%s\""),
1396 vdev, zpool_pool_state_to_name(state), name);
1397
1398 if (force) {
1399 (void) fprintf(stderr, gettext(
1400 ". Offline the disk first to clear its label."));
1401 }
1402 printf("\n");
1403 ret = 1;
1404 goto errout;
1405
1406 case POOL_STATE_EXPORTED:
1407 if (force)
1408 break;
1409 (void) fprintf(stderr, gettext(
1410 "use '-f' to override the following error:\n"
1411 "%s is a member of exported pool \"%s\"\n"),
1412 vdev, name);
1413 ret = 1;
1414 goto errout;
1415
1416 case POOL_STATE_POTENTIALLY_ACTIVE:
1417 if (force)
1418 break;
1419 (void) fprintf(stderr, gettext(
1420 "use '-f' to override the following error:\n"
1421 "%s is a member of potentially active pool \"%s\"\n"),
1422 vdev, name);
1423 ret = 1;
1424 goto errout;
1425
1426 case POOL_STATE_DESTROYED:
1427 /* inuse should never be set for a destroyed pool */
1428 assert(0);
1429 break;
1430 }
1431
1432 wipe_label:
1433 ret = zpool_clear_label(fd);
1434 if (ret != 0) {
1435 (void) fprintf(stderr,
1436 gettext("failed to clear label for %s\n"), vdev);
1437 }
1438
1439 errout:
1440 free(name);
1441 (void) close(fd);
1442
1443 return (ret);
1444 }
1445
1446 /*
1447 * zpool create [-fnd] [-o property=value] ...
1448 * [-O file-system-property=value] ...
1449 * [-R root] [-m mountpoint] <pool> <dev> ...
1450 *
1451 * -f Force creation, even if devices appear in use
1452 * -n Do not create the pool, but display the resulting layout if it
1453 * were to be created.
1454 * -R Create a pool under an alternate root
1455 * -m Set default mountpoint for the root dataset. By default it's
1456 * '/<pool>'
1457 * -o Set property=value.
1458 * -o Set feature@feature=enabled|disabled.
1459 * -d Don't automatically enable all supported pool features
1460 * (individual features can be enabled with -o).
1461 * -O Set fsproperty=value in the pool's root file system
1462 *
1463 * Creates the named pool according to the given vdev specification. The
1464 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
1465 * Once we get the nvlist back from make_root_vdev(), we either print out the
1466 * contents (if '-n' was specified), or pass it to libzfs to do the creation.
1467 */
1468 int
1469 zpool_do_create(int argc, char **argv)
1470 {
1471 boolean_t force = B_FALSE;
1472 boolean_t dryrun = B_FALSE;
1473 boolean_t enable_pool_features = B_TRUE;
1474
1475 int c;
1476 nvlist_t *nvroot = NULL;
1477 char *poolname;
1478 char *tname = NULL;
1479 int ret = 1;
1480 char *altroot = NULL;
1481 char *compat = NULL;
1482 char *mountpoint = NULL;
1483 nvlist_t *fsprops = NULL;
1484 nvlist_t *props = NULL;
1485 char *propval;
1486
1487 /* check options */
1488 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
1489 switch (c) {
1490 case 'f':
1491 force = B_TRUE;
1492 break;
1493 case 'n':
1494 dryrun = B_TRUE;
1495 break;
1496 case 'd':
1497 enable_pool_features = B_FALSE;
1498 break;
1499 case 'R':
1500 altroot = optarg;
1501 if (add_prop_list(zpool_prop_to_name(
1502 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
1503 goto errout;
1504 if (add_prop_list_default(zpool_prop_to_name(
1505 ZPOOL_PROP_CACHEFILE), "none", &props))
1506 goto errout;
1507 break;
1508 case 'm':
1509 /* Equivalent to -O mountpoint=optarg */
1510 mountpoint = optarg;
1511 break;
1512 case 'o':
1513 if ((propval = strchr(optarg, '=')) == NULL) {
1514 (void) fprintf(stderr, gettext("missing "
1515 "'=' for -o option\n"));
1516 goto errout;
1517 }
1518 *propval = '\0';
1519 propval++;
1520
1521 if (add_prop_list(optarg, propval, &props, B_TRUE))
1522 goto errout;
1523
1524 /*
1525 * If the user is creating a pool that doesn't support
1526 * feature flags, don't enable any features.
1527 */
1528 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
1529 char *end;
1530 u_longlong_t ver;
1531
1532 ver = strtoull(propval, &end, 10);
1533 if (*end == '\0' &&
1534 ver < SPA_VERSION_FEATURES) {
1535 enable_pool_features = B_FALSE;
1536 }
1537 }
1538 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
1539 altroot = propval;
1540 if (zpool_name_to_prop(optarg) ==
1541 ZPOOL_PROP_COMPATIBILITY)
1542 compat = propval;
1543 break;
1544 case 'O':
1545 if ((propval = strchr(optarg, '=')) == NULL) {
1546 (void) fprintf(stderr, gettext("missing "
1547 "'=' for -O option\n"));
1548 goto errout;
1549 }
1550 *propval = '\0';
1551 propval++;
1552
1553 /*
1554 * Mountpoints are checked and then added later.
1555 * Uniquely among properties, they can be specified
1556 * more than once, to avoid conflict with -m.
1557 */
1558 if (0 == strcmp(optarg,
1559 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
1560 mountpoint = propval;
1561 } else if (add_prop_list(optarg, propval, &fsprops,
1562 B_FALSE)) {
1563 goto errout;
1564 }
1565 break;
1566 case 't':
1567 /*
1568 * Sanity check temporary pool name.
1569 */
1570 if (strchr(optarg, '/') != NULL) {
1571 (void) fprintf(stderr, gettext("cannot create "
1572 "'%s': invalid character '/' in temporary "
1573 "name\n"), optarg);
1574 (void) fprintf(stderr, gettext("use 'zfs "
1575 "create' to create a dataset\n"));
1576 goto errout;
1577 }
1578
1579 if (add_prop_list(zpool_prop_to_name(
1580 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
1581 goto errout;
1582 if (add_prop_list_default(zpool_prop_to_name(
1583 ZPOOL_PROP_CACHEFILE), "none", &props))
1584 goto errout;
1585 tname = optarg;
1586 break;
1587 case ':':
1588 (void) fprintf(stderr, gettext("missing argument for "
1589 "'%c' option\n"), optopt);
1590 goto badusage;
1591 case '?':
1592 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1593 optopt);
1594 goto badusage;
1595 }
1596 }
1597
1598 argc -= optind;
1599 argv += optind;
1600
1601 /* get pool name and check number of arguments */
1602 if (argc < 1) {
1603 (void) fprintf(stderr, gettext("missing pool name argument\n"));
1604 goto badusage;
1605 }
1606 if (argc < 2) {
1607 (void) fprintf(stderr, gettext("missing vdev specification\n"));
1608 goto badusage;
1609 }
1610
1611 poolname = argv[0];
1612
1613 /*
1614 * As a special case, check for use of '/' in the name, and direct the
1615 * user to use 'zfs create' instead.
1616 */
1617 if (strchr(poolname, '/') != NULL) {
1618 (void) fprintf(stderr, gettext("cannot create '%s': invalid "
1619 "character '/' in pool name\n"), poolname);
1620 (void) fprintf(stderr, gettext("use 'zfs create' to "
1621 "create a dataset\n"));
1622 goto errout;
1623 }
1624
1625 /* pass off to make_root_vdev for bulk processing */
1626 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
1627 argc - 1, argv + 1);
1628 if (nvroot == NULL)
1629 goto errout;
1630
1631 /* make_root_vdev() allows 0 toplevel children if there are spares */
1632 if (!zfs_allocatable_devs(nvroot)) {
1633 (void) fprintf(stderr, gettext("invalid vdev "
1634 "specification: at least one toplevel vdev must be "
1635 "specified\n"));
1636 goto errout;
1637 }
1638
1639 if (altroot != NULL && altroot[0] != '/') {
1640 (void) fprintf(stderr, gettext("invalid alternate root '%s': "
1641 "must be an absolute path\n"), altroot);
1642 goto errout;
1643 }
1644
1645 /*
1646 * Check the validity of the mountpoint and direct the user to use the
1647 * '-m' mountpoint option if it looks like its in use.
1648 */
1649 if (mountpoint == NULL ||
1650 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
1651 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
1652 char buf[MAXPATHLEN];
1653 DIR *dirp;
1654
1655 if (mountpoint && mountpoint[0] != '/') {
1656 (void) fprintf(stderr, gettext("invalid mountpoint "
1657 "'%s': must be an absolute path, 'legacy', or "
1658 "'none'\n"), mountpoint);
1659 goto errout;
1660 }
1661
1662 if (mountpoint == NULL) {
1663 if (altroot != NULL)
1664 (void) snprintf(buf, sizeof (buf), "%s/%s",
1665 altroot, poolname);
1666 else
1667 (void) snprintf(buf, sizeof (buf), "/%s",
1668 poolname);
1669 } else {
1670 if (altroot != NULL)
1671 (void) snprintf(buf, sizeof (buf), "%s%s",
1672 altroot, mountpoint);
1673 else
1674 (void) snprintf(buf, sizeof (buf), "%s",
1675 mountpoint);
1676 }
1677
1678 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
1679 (void) fprintf(stderr, gettext("mountpoint '%s' : "
1680 "%s\n"), buf, strerror(errno));
1681 (void) fprintf(stderr, gettext("use '-m' "
1682 "option to provide a different default\n"));
1683 goto errout;
1684 } else if (dirp) {
1685 int count = 0;
1686
1687 while (count < 3 && readdir(dirp) != NULL)
1688 count++;
1689 (void) closedir(dirp);
1690
1691 if (count > 2) {
1692 (void) fprintf(stderr, gettext("mountpoint "
1693 "'%s' exists and is not empty\n"), buf);
1694 (void) fprintf(stderr, gettext("use '-m' "
1695 "option to provide a "
1696 "different default\n"));
1697 goto errout;
1698 }
1699 }
1700 }
1701
1702 /*
1703 * Now that the mountpoint's validity has been checked, ensure that
1704 * the property is set appropriately prior to creating the pool.
1705 */
1706 if (mountpoint != NULL) {
1707 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1708 mountpoint, &fsprops, B_FALSE);
1709 if (ret != 0)
1710 goto errout;
1711 }
1712
1713 ret = 1;
1714 if (dryrun) {
1715 /*
1716 * For a dry run invocation, print out a basic message and run
1717 * through all the vdevs in the list and print out in an
1718 * appropriate hierarchy.
1719 */
1720 (void) printf(gettext("would create '%s' with the "
1721 "following layout:\n\n"), poolname);
1722
1723 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
1724 print_vdev_tree(NULL, "dedup", nvroot, 0,
1725 VDEV_ALLOC_BIAS_DEDUP, 0);
1726 print_vdev_tree(NULL, "special", nvroot, 0,
1727 VDEV_ALLOC_BIAS_SPECIAL, 0);
1728 print_vdev_tree(NULL, "logs", nvroot, 0,
1729 VDEV_ALLOC_BIAS_LOG, 0);
1730 print_cache_list(nvroot, 0);
1731 print_spare_list(nvroot, 0);
1732
1733 ret = 0;
1734 } else {
1735 /*
1736 * Load in feature set.
1737 * Note: if compatibility property not given, we'll have
1738 * NULL, which means 'all features'.
1739 */
1740 boolean_t requested_features[SPA_FEATURES];
1741 if (zpool_do_load_compat(compat, requested_features) !=
1742 ZPOOL_COMPATIBILITY_OK)
1743 goto errout;
1744
1745 /*
1746 * props contains list of features to enable.
1747 * For each feature:
1748 * - remove it if feature@name=disabled
1749 * - leave it there if feature@name=enabled
1750 * - add it if:
1751 * - enable_pool_features (ie: no '-d' or '-o version')
1752 * - it's supported by the kernel module
1753 * - it's in the requested feature set
1754 * - warn if it's enabled but not in compat
1755 */
1756 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
1757 char propname[MAXPATHLEN];
1758 char *propval;
1759 zfeature_info_t *feat = &spa_feature_table[i];
1760
1761 (void) snprintf(propname, sizeof (propname),
1762 "feature@%s", feat->fi_uname);
1763
1764 if (!nvlist_lookup_string(props, propname, &propval)) {
1765 if (strcmp(propval,
1766 ZFS_FEATURE_DISABLED) == 0) {
1767 (void) nvlist_remove_all(props,
1768 propname);
1769 } else if (strcmp(propval,
1770 ZFS_FEATURE_ENABLED) == 0 &&
1771 !requested_features[i]) {
1772 (void) fprintf(stderr, gettext(
1773 "Warning: feature \"%s\" enabled "
1774 "but is not in specified "
1775 "'compatibility' feature set.\n"),
1776 feat->fi_uname);
1777 }
1778 } else if (
1779 enable_pool_features &&
1780 feat->fi_zfs_mod_supported &&
1781 requested_features[i]) {
1782 ret = add_prop_list(propname,
1783 ZFS_FEATURE_ENABLED, &props, B_TRUE);
1784 if (ret != 0)
1785 goto errout;
1786 }
1787 }
1788
1789 ret = 1;
1790 if (zpool_create(g_zfs, poolname,
1791 nvroot, props, fsprops) == 0) {
1792 zfs_handle_t *pool = zfs_open(g_zfs,
1793 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
1794 if (pool != NULL) {
1795 if (zfs_mount(pool, NULL, 0) == 0) {
1796 ret = zfs_share(pool, NULL);
1797 zfs_commit_shares(NULL);
1798 }
1799 zfs_close(pool);
1800 }
1801 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
1802 (void) fprintf(stderr, gettext("pool name may have "
1803 "been omitted\n"));
1804 }
1805 }
1806
1807 errout:
1808 nvlist_free(nvroot);
1809 nvlist_free(fsprops);
1810 nvlist_free(props);
1811 return (ret);
1812 badusage:
1813 nvlist_free(fsprops);
1814 nvlist_free(props);
1815 usage(B_FALSE);
1816 return (2);
1817 }
1818
1819 /*
1820 * zpool destroy <pool>
1821 *
1822 * -f Forcefully unmount any datasets
1823 *
1824 * Destroy the given pool. Automatically unmounts any datasets in the pool.
1825 */
1826 int
1827 zpool_do_destroy(int argc, char **argv)
1828 {
1829 boolean_t force = B_FALSE;
1830 int c;
1831 char *pool;
1832 zpool_handle_t *zhp;
1833 int ret;
1834
1835 /* check options */
1836 while ((c = getopt(argc, argv, "f")) != -1) {
1837 switch (c) {
1838 case 'f':
1839 force = B_TRUE;
1840 break;
1841 case '?':
1842 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1843 optopt);
1844 usage(B_FALSE);
1845 }
1846 }
1847
1848 argc -= optind;
1849 argv += optind;
1850
1851 /* check arguments */
1852 if (argc < 1) {
1853 (void) fprintf(stderr, gettext("missing pool argument\n"));
1854 usage(B_FALSE);
1855 }
1856 if (argc > 1) {
1857 (void) fprintf(stderr, gettext("too many arguments\n"));
1858 usage(B_FALSE);
1859 }
1860
1861 pool = argv[0];
1862
1863 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
1864 /*
1865 * As a special case, check for use of '/' in the name, and
1866 * direct the user to use 'zfs destroy' instead.
1867 */
1868 if (strchr(pool, '/') != NULL)
1869 (void) fprintf(stderr, gettext("use 'zfs destroy' to "
1870 "destroy a dataset\n"));
1871 return (1);
1872 }
1873
1874 if (zpool_disable_datasets(zhp, force) != 0) {
1875 (void) fprintf(stderr, gettext("could not destroy '%s': "
1876 "could not unmount datasets\n"), zpool_get_name(zhp));
1877 zpool_close(zhp);
1878 return (1);
1879 }
1880
1881 /* The history must be logged as part of the export */
1882 log_history = B_FALSE;
1883
1884 ret = (zpool_destroy(zhp, history_str) != 0);
1885
1886 zpool_close(zhp);
1887
1888 return (ret);
1889 }
1890
1891 typedef struct export_cbdata {
1892 boolean_t force;
1893 boolean_t hardforce;
1894 } export_cbdata_t;
1895
1896 /*
1897 * Export one pool
1898 */
1899 static int
1900 zpool_export_one(zpool_handle_t *zhp, void *data)
1901 {
1902 export_cbdata_t *cb = data;
1903
1904 if (zpool_disable_datasets(zhp, cb->force) != 0)
1905 return (1);
1906
1907 /* The history must be logged as part of the export */
1908 log_history = B_FALSE;
1909
1910 if (cb->hardforce) {
1911 if (zpool_export_force(zhp, history_str) != 0)
1912 return (1);
1913 } else if (zpool_export(zhp, cb->force, history_str) != 0) {
1914 return (1);
1915 }
1916
1917 return (0);
1918 }
1919
1920 /*
1921 * zpool export [-f] <pool> ...
1922 *
1923 * -a Export all pools
1924 * -f Forcefully unmount datasets
1925 *
1926 * Export the given pools. By default, the command will attempt to cleanly
1927 * unmount any active datasets within the pool. If the '-f' flag is specified,
1928 * then the datasets will be forcefully unmounted.
1929 */
1930 int
1931 zpool_do_export(int argc, char **argv)
1932 {
1933 export_cbdata_t cb;
1934 boolean_t do_all = B_FALSE;
1935 boolean_t force = B_FALSE;
1936 boolean_t hardforce = B_FALSE;
1937 int c, ret;
1938
1939 /* check options */
1940 while ((c = getopt(argc, argv, "afF")) != -1) {
1941 switch (c) {
1942 case 'a':
1943 do_all = B_TRUE;
1944 break;
1945 case 'f':
1946 force = B_TRUE;
1947 break;
1948 case 'F':
1949 hardforce = B_TRUE;
1950 break;
1951 case '?':
1952 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
1953 optopt);
1954 usage(B_FALSE);
1955 }
1956 }
1957
1958 cb.force = force;
1959 cb.hardforce = hardforce;
1960 argc -= optind;
1961 argv += optind;
1962
1963 if (do_all) {
1964 if (argc != 0) {
1965 (void) fprintf(stderr, gettext("too many arguments\n"));
1966 usage(B_FALSE);
1967 }
1968
1969 return (for_each_pool(argc, argv, B_TRUE, NULL,
1970 ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb));
1971 }
1972
1973 /* check arguments */
1974 if (argc < 1) {
1975 (void) fprintf(stderr, gettext("missing pool argument\n"));
1976 usage(B_FALSE);
1977 }
1978
1979 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
1980 B_FALSE, zpool_export_one, &cb);
1981
1982 return (ret);
1983 }
1984
1985 /*
1986 * Given a vdev configuration, determine the maximum width needed for the device
1987 * name column.
1988 */
1989 static int
1990 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
1991 int name_flags)
1992 {
1993 static const char *const subtypes[] =
1994 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN};
1995
1996 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
1997 max = MAX(strlen(name) + depth, max);
1998 free(name);
1999
2000 nvlist_t **child;
2001 uint_t children;
2002 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i)
2003 if (nvlist_lookup_nvlist_array(nv, subtypes[i],
2004 &child, &children) == 0)
2005 for (uint_t c = 0; c < children; ++c)
2006 max = MAX(max_width(zhp, child[c], depth + 2,
2007 max, name_flags), max);
2008
2009 return (max);
2010 }
2011
2012 typedef struct spare_cbdata {
2013 uint64_t cb_guid;
2014 zpool_handle_t *cb_zhp;
2015 } spare_cbdata_t;
2016
2017 static boolean_t
2018 find_vdev(nvlist_t *nv, uint64_t search)
2019 {
2020 uint64_t guid;
2021 nvlist_t **child;
2022 uint_t c, children;
2023
2024 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
2025 search == guid)
2026 return (B_TRUE);
2027
2028 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2029 &child, &children) == 0) {
2030 for (c = 0; c < children; c++)
2031 if (find_vdev(child[c], search))
2032 return (B_TRUE);
2033 }
2034
2035 return (B_FALSE);
2036 }
2037
2038 static int
2039 find_spare(zpool_handle_t *zhp, void *data)
2040 {
2041 spare_cbdata_t *cbp = data;
2042 nvlist_t *config, *nvroot;
2043
2044 config = zpool_get_config(zhp, NULL);
2045 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2046 &nvroot) == 0);
2047
2048 if (find_vdev(nvroot, cbp->cb_guid)) {
2049 cbp->cb_zhp = zhp;
2050 return (1);
2051 }
2052
2053 zpool_close(zhp);
2054 return (0);
2055 }
2056
2057 typedef struct status_cbdata {
2058 int cb_count;
2059 int cb_name_flags;
2060 int cb_namewidth;
2061 boolean_t cb_allpools;
2062 boolean_t cb_verbose;
2063 boolean_t cb_literal;
2064 boolean_t cb_explain;
2065 boolean_t cb_first;
2066 boolean_t cb_dedup_stats;
2067 boolean_t cb_print_status;
2068 boolean_t cb_print_slow_ios;
2069 boolean_t cb_print_vdev_init;
2070 boolean_t cb_print_vdev_trim;
2071 vdev_cmd_data_list_t *vcdl;
2072 } status_cbdata_t;
2073
2074 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
2075 static boolean_t
2076 is_blank_str(const char *str)
2077 {
2078 for (; str != NULL && *str != '\0'; ++str)
2079 if (!isblank(*str))
2080 return (B_FALSE);
2081 return (B_TRUE);
2082 }
2083
2084 /* Print command output lines for specific vdev in a specific pool */
2085 static void
2086 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path)
2087 {
2088 vdev_cmd_data_t *data;
2089 int i, j;
2090 const char *val;
2091
2092 for (i = 0; i < vcdl->count; i++) {
2093 if ((strcmp(vcdl->data[i].path, path) != 0) ||
2094 (strcmp(vcdl->data[i].pool, pool) != 0)) {
2095 /* Not the vdev we're looking for */
2096 continue;
2097 }
2098
2099 data = &vcdl->data[i];
2100 /* Print out all the output values for this vdev */
2101 for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
2102 val = NULL;
2103 /* Does this vdev have values for this column? */
2104 for (int k = 0; k < data->cols_cnt; k++) {
2105 if (strcmp(data->cols[k],
2106 vcdl->uniq_cols[j]) == 0) {
2107 /* yes it does, record the value */
2108 val = data->lines[k];
2109 break;
2110 }
2111 }
2112 /*
2113 * Mark empty values with dashes to make output
2114 * awk-able.
2115 */
2116 if (val == NULL || is_blank_str(val))
2117 val = "-";
2118
2119 printf("%*s", vcdl->uniq_cols_width[j], val);
2120 if (j < vcdl->uniq_cols_cnt - 1)
2121 fputs(" ", stdout);
2122 }
2123
2124 /* Print out any values that aren't in a column at the end */
2125 for (j = data->cols_cnt; j < data->lines_cnt; j++) {
2126 /* Did we have any columns? If so print a spacer. */
2127 if (vcdl->uniq_cols_cnt > 0)
2128 fputs(" ", stdout);
2129
2130 val = data->lines[j];
2131 fputs(val ?: "", stdout);
2132 }
2133 break;
2134 }
2135 }
2136
2137 /*
2138 * Print vdev initialization status for leaves
2139 */
2140 static void
2141 print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
2142 {
2143 if (verbose) {
2144 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
2145 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
2146 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
2147 !vs->vs_scan_removing) {
2148 char zbuf[1024];
2149 char tbuf[256];
2150 struct tm zaction_ts;
2151
2152 time_t t = vs->vs_initialize_action_time;
2153 int initialize_pct = 100;
2154 if (vs->vs_initialize_state !=
2155 VDEV_INITIALIZE_COMPLETE) {
2156 initialize_pct = (vs->vs_initialize_bytes_done *
2157 100 / (vs->vs_initialize_bytes_est + 1));
2158 }
2159
2160 (void) localtime_r(&t, &zaction_ts);
2161 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2162
2163 switch (vs->vs_initialize_state) {
2164 case VDEV_INITIALIZE_SUSPENDED:
2165 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2166 gettext("suspended, started at"), tbuf);
2167 break;
2168 case VDEV_INITIALIZE_ACTIVE:
2169 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2170 gettext("started at"), tbuf);
2171 break;
2172 case VDEV_INITIALIZE_COMPLETE:
2173 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2174 gettext("completed at"), tbuf);
2175 break;
2176 }
2177
2178 (void) printf(gettext(" (%d%% initialized%s)"),
2179 initialize_pct, zbuf);
2180 } else {
2181 (void) printf(gettext(" (uninitialized)"));
2182 }
2183 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
2184 (void) printf(gettext(" (initializing)"));
2185 }
2186 }
2187
2188 /*
2189 * Print vdev TRIM status for leaves
2190 */
2191 static void
2192 print_status_trim(vdev_stat_t *vs, boolean_t verbose)
2193 {
2194 if (verbose) {
2195 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
2196 vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
2197 vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
2198 !vs->vs_scan_removing) {
2199 char zbuf[1024];
2200 char tbuf[256];
2201 struct tm zaction_ts;
2202
2203 time_t t = vs->vs_trim_action_time;
2204 int trim_pct = 100;
2205 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
2206 trim_pct = (vs->vs_trim_bytes_done *
2207 100 / (vs->vs_trim_bytes_est + 1));
2208 }
2209
2210 (void) localtime_r(&t, &zaction_ts);
2211 (void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
2212
2213 switch (vs->vs_trim_state) {
2214 case VDEV_TRIM_SUSPENDED:
2215 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2216 gettext("suspended, started at"), tbuf);
2217 break;
2218 case VDEV_TRIM_ACTIVE:
2219 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2220 gettext("started at"), tbuf);
2221 break;
2222 case VDEV_TRIM_COMPLETE:
2223 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
2224 gettext("completed at"), tbuf);
2225 break;
2226 }
2227
2228 (void) printf(gettext(" (%d%% trimmed%s)"),
2229 trim_pct, zbuf);
2230 } else if (vs->vs_trim_notsup) {
2231 (void) printf(gettext(" (trim unsupported)"));
2232 } else {
2233 (void) printf(gettext(" (untrimmed)"));
2234 }
2235 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
2236 (void) printf(gettext(" (trimming)"));
2237 }
2238 }
2239
2240 /*
2241 * Return the color associated with a health string. This includes returning
2242 * NULL for no color change.
2243 */
2244 static const char *
2245 health_str_to_color(const char *health)
2246 {
2247 if (strcmp(health, gettext("FAULTED")) == 0 ||
2248 strcmp(health, gettext("SUSPENDED")) == 0 ||
2249 strcmp(health, gettext("UNAVAIL")) == 0) {
2250 return (ANSI_RED);
2251 }
2252
2253 if (strcmp(health, gettext("OFFLINE")) == 0 ||
2254 strcmp(health, gettext("DEGRADED")) == 0 ||
2255 strcmp(health, gettext("REMOVED")) == 0) {
2256 return (ANSI_YELLOW);
2257 }
2258
2259 return (NULL);
2260 }
2261
2262 /*
2263 * Print out configuration state as requested by status_callback.
2264 */
2265 static void
2266 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
2267 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
2268 {
2269 nvlist_t **child, *root;
2270 uint_t c, i, vsc, children;
2271 pool_scan_stat_t *ps = NULL;
2272 vdev_stat_t *vs;
2273 char rbuf[6], wbuf[6], cbuf[6];
2274 char *vname;
2275 uint64_t notpresent;
2276 spare_cbdata_t spare_cb;
2277 const char *state;
2278 char *type;
2279 char *path = NULL;
2280 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL;
2281
2282 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2283 &child, &children) != 0)
2284 children = 0;
2285
2286 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2287 (uint64_t **)&vs, &vsc) == 0);
2288
2289 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2290
2291 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2292 return;
2293
2294 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2295
2296 if (isspare) {
2297 /*
2298 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
2299 * online drives.
2300 */
2301 if (vs->vs_aux == VDEV_AUX_SPARED)
2302 state = gettext("INUSE");
2303 else if (vs->vs_state == VDEV_STATE_HEALTHY)
2304 state = gettext("AVAIL");
2305 }
2306
2307 printf_color(health_str_to_color(state),
2308 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
2309 name, state);
2310
2311 if (!isspare) {
2312 if (vs->vs_read_errors)
2313 rcolor = ANSI_RED;
2314
2315 if (vs->vs_write_errors)
2316 wcolor = ANSI_RED;
2317
2318 if (vs->vs_checksum_errors)
2319 ccolor = ANSI_RED;
2320
2321 if (cb->cb_literal) {
2322 fputc(' ', stdout);
2323 printf_color(rcolor, "%5llu",
2324 (u_longlong_t)vs->vs_read_errors);
2325 fputc(' ', stdout);
2326 printf_color(wcolor, "%5llu",
2327 (u_longlong_t)vs->vs_write_errors);
2328 fputc(' ', stdout);
2329 printf_color(ccolor, "%5llu",
2330 (u_longlong_t)vs->vs_checksum_errors);
2331 } else {
2332 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
2333 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
2334 zfs_nicenum(vs->vs_checksum_errors, cbuf,
2335 sizeof (cbuf));
2336 fputc(' ', stdout);
2337 printf_color(rcolor, "%5s", rbuf);
2338 fputc(' ', stdout);
2339 printf_color(wcolor, "%5s", wbuf);
2340 fputc(' ', stdout);
2341 printf_color(ccolor, "%5s", cbuf);
2342 }
2343 if (cb->cb_print_slow_ios) {
2344 if (children == 0) {
2345 /* Only leafs vdevs have slow IOs */
2346 zfs_nicenum(vs->vs_slow_ios, rbuf,
2347 sizeof (rbuf));
2348 } else {
2349 snprintf(rbuf, sizeof (rbuf), "-");
2350 }
2351
2352 if (cb->cb_literal)
2353 printf(" %5llu", (u_longlong_t)vs->vs_slow_ios);
2354 else
2355 printf(" %5s", rbuf);
2356 }
2357 }
2358
2359 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2360 ¬present) == 0) {
2361 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
2362 (void) printf(" %s %s", gettext("was"), path);
2363 } else if (vs->vs_aux != 0) {
2364 (void) printf(" ");
2365 color_start(ANSI_RED);
2366 switch (vs->vs_aux) {
2367 case VDEV_AUX_OPEN_FAILED:
2368 (void) printf(gettext("cannot open"));
2369 break;
2370
2371 case VDEV_AUX_BAD_GUID_SUM:
2372 (void) printf(gettext("missing device"));
2373 break;
2374
2375 case VDEV_AUX_NO_REPLICAS:
2376 (void) printf(gettext("insufficient replicas"));
2377 break;
2378
2379 case VDEV_AUX_VERSION_NEWER:
2380 (void) printf(gettext("newer version"));
2381 break;
2382
2383 case VDEV_AUX_UNSUP_FEAT:
2384 (void) printf(gettext("unsupported feature(s)"));
2385 break;
2386
2387 case VDEV_AUX_ASHIFT_TOO_BIG:
2388 (void) printf(gettext("unsupported minimum blocksize"));
2389 break;
2390
2391 case VDEV_AUX_SPARED:
2392 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2393 &spare_cb.cb_guid) == 0);
2394 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
2395 if (strcmp(zpool_get_name(spare_cb.cb_zhp),
2396 zpool_get_name(zhp)) == 0)
2397 (void) printf(gettext("currently in "
2398 "use"));
2399 else
2400 (void) printf(gettext("in use by "
2401 "pool '%s'"),
2402 zpool_get_name(spare_cb.cb_zhp));
2403 zpool_close(spare_cb.cb_zhp);
2404 } else {
2405 (void) printf(gettext("currently in use"));
2406 }
2407 break;
2408
2409 case VDEV_AUX_ERR_EXCEEDED:
2410 (void) printf(gettext("too many errors"));
2411 break;
2412
2413 case VDEV_AUX_IO_FAILURE:
2414 (void) printf(gettext("experienced I/O failures"));
2415 break;
2416
2417 case VDEV_AUX_BAD_LOG:
2418 (void) printf(gettext("bad intent log"));
2419 break;
2420
2421 case VDEV_AUX_EXTERNAL:
2422 (void) printf(gettext("external device fault"));
2423 break;
2424
2425 case VDEV_AUX_SPLIT_POOL:
2426 (void) printf(gettext("split into new pool"));
2427 break;
2428
2429 case VDEV_AUX_ACTIVE:
2430 (void) printf(gettext("currently in use"));
2431 break;
2432
2433 case VDEV_AUX_CHILDREN_OFFLINE:
2434 (void) printf(gettext("all children offline"));
2435 break;
2436
2437 case VDEV_AUX_BAD_LABEL:
2438 (void) printf(gettext("invalid label"));
2439 break;
2440
2441 default:
2442 (void) printf(gettext("corrupted data"));
2443 break;
2444 }
2445 color_end();
2446 } else if (children == 0 && !isspare &&
2447 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
2448 VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
2449 vs->vs_configured_ashift < vs->vs_physical_ashift) {
2450 (void) printf(
2451 gettext(" block size: %dB configured, %dB native"),
2452 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
2453 }
2454
2455 if (vs->vs_scan_removing != 0) {
2456 (void) printf(gettext(" (removing)"));
2457 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) {
2458 (void) printf(gettext(" (non-allocating)"));
2459 }
2460
2461 /* The root vdev has the scrub/resilver stats */
2462 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2463 ZPOOL_CONFIG_VDEV_TREE);
2464 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
2465 (uint64_t **)&ps, &c);
2466
2467 /*
2468 * If you force fault a drive that's resilvering, its scan stats can
2469 * get frozen in time, giving the false impression that it's
2470 * being resilvered. That's why we check the state to see if the vdev
2471 * is healthy before reporting "resilvering" or "repairing".
2472 */
2473 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 &&
2474 vs->vs_state == VDEV_STATE_HEALTHY) {
2475 if (vs->vs_scan_processed != 0) {
2476 (void) printf(gettext(" (%s)"),
2477 (ps->pss_func == POOL_SCAN_RESILVER) ?
2478 "resilvering" : "repairing");
2479 } else if (vs->vs_resilver_deferred) {
2480 (void) printf(gettext(" (awaiting resilver)"));
2481 }
2482 }
2483
2484 /* The top-level vdevs have the rebuild stats */
2485 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
2486 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) {
2487 if (vs->vs_rebuild_processed != 0) {
2488 (void) printf(gettext(" (resilvering)"));
2489 }
2490 }
2491
2492 if (cb->vcdl != NULL) {
2493 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2494 printf(" ");
2495 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
2496 }
2497 }
2498
2499 /* Display vdev initialization and trim status for leaves. */
2500 if (children == 0) {
2501 print_status_initialize(vs, cb->cb_print_vdev_init);
2502 print_status_trim(vs, cb->cb_print_vdev_trim);
2503 }
2504
2505 (void) printf("\n");
2506
2507 for (c = 0; c < children; c++) {
2508 uint64_t islog = B_FALSE, ishole = B_FALSE;
2509
2510 /* Don't print logs or holes here */
2511 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2512 &islog);
2513 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2514 &ishole);
2515 if (islog || ishole)
2516 continue;
2517 /* Only print normal classes here */
2518 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2519 continue;
2520
2521 /* Provide vdev_rebuild_stats to children if available */
2522 if (vrs == NULL) {
2523 (void) nvlist_lookup_uint64_array(nv,
2524 ZPOOL_CONFIG_REBUILD_STATS,
2525 (uint64_t **)&vrs, &i);
2526 }
2527
2528 vname = zpool_vdev_name(g_zfs, zhp, child[c],
2529 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2530 print_status_config(zhp, cb, vname, child[c], depth + 2,
2531 isspare, vrs);
2532 free(vname);
2533 }
2534 }
2535
2536 /*
2537 * Print the configuration of an exported pool. Iterate over all vdevs in the
2538 * pool, printing out the name and status for each one.
2539 */
2540 static void
2541 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
2542 int depth)
2543 {
2544 nvlist_t **child;
2545 uint_t c, children;
2546 vdev_stat_t *vs;
2547 char *type, *vname;
2548
2549 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
2550 if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
2551 strcmp(type, VDEV_TYPE_HOLE) == 0)
2552 return;
2553
2554 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
2555 (uint64_t **)&vs, &c) == 0);
2556
2557 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
2558 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
2559
2560 if (vs->vs_aux != 0) {
2561 (void) printf(" ");
2562
2563 switch (vs->vs_aux) {
2564 case VDEV_AUX_OPEN_FAILED:
2565 (void) printf(gettext("cannot open"));
2566 break;
2567
2568 case VDEV_AUX_BAD_GUID_SUM:
2569 (void) printf(gettext("missing device"));
2570 break;
2571
2572 case VDEV_AUX_NO_REPLICAS:
2573 (void) printf(gettext("insufficient replicas"));
2574 break;
2575
2576 case VDEV_AUX_VERSION_NEWER:
2577 (void) printf(gettext("newer version"));
2578 break;
2579
2580 case VDEV_AUX_UNSUP_FEAT:
2581 (void) printf(gettext("unsupported feature(s)"));
2582 break;
2583
2584 case VDEV_AUX_ERR_EXCEEDED:
2585 (void) printf(gettext("too many errors"));
2586 break;
2587
2588 case VDEV_AUX_ACTIVE:
2589 (void) printf(gettext("currently in use"));
2590 break;
2591
2592 case VDEV_AUX_CHILDREN_OFFLINE:
2593 (void) printf(gettext("all children offline"));
2594 break;
2595
2596 case VDEV_AUX_BAD_LABEL:
2597 (void) printf(gettext("invalid label"));
2598 break;
2599
2600 default:
2601 (void) printf(gettext("corrupted data"));
2602 break;
2603 }
2604 }
2605 (void) printf("\n");
2606
2607 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2608 &child, &children) != 0)
2609 return;
2610
2611 for (c = 0; c < children; c++) {
2612 uint64_t is_log = B_FALSE;
2613
2614 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2615 &is_log);
2616 if (is_log)
2617 continue;
2618 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
2619 continue;
2620
2621 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2622 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2623 print_import_config(cb, vname, child[c], depth + 2);
2624 free(vname);
2625 }
2626
2627 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2628 &child, &children) == 0) {
2629 (void) printf(gettext("\tcache\n"));
2630 for (c = 0; c < children; c++) {
2631 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2632 cb->cb_name_flags);
2633 (void) printf("\t %s\n", vname);
2634 free(vname);
2635 }
2636 }
2637
2638 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2639 &child, &children) == 0) {
2640 (void) printf(gettext("\tspares\n"));
2641 for (c = 0; c < children; c++) {
2642 vname = zpool_vdev_name(g_zfs, NULL, child[c],
2643 cb->cb_name_flags);
2644 (void) printf("\t %s\n", vname);
2645 free(vname);
2646 }
2647 }
2648 }
2649
2650 /*
2651 * Print specialized class vdevs.
2652 *
2653 * These are recorded as top level vdevs in the main pool child array
2654 * but with "is_log" set to 1 or an "alloc_bias" string. We use either
2655 * print_status_config() or print_import_config() to print the top level
2656 * class vdevs then any of their children (eg mirrored slogs) are printed
2657 * recursively - which works because only the top level vdev is marked.
2658 */
2659 static void
2660 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
2661 const char *class)
2662 {
2663 uint_t c, children;
2664 nvlist_t **child;
2665 boolean_t printed = B_FALSE;
2666
2667 assert(zhp != NULL || !cb->cb_verbose);
2668
2669 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
2670 &children) != 0)
2671 return;
2672
2673 for (c = 0; c < children; c++) {
2674 uint64_t is_log = B_FALSE;
2675 char *bias = NULL;
2676 char *type = NULL;
2677
2678 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2679 &is_log);
2680
2681 if (is_log) {
2682 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
2683 } else {
2684 (void) nvlist_lookup_string(child[c],
2685 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
2686 (void) nvlist_lookup_string(child[c],
2687 ZPOOL_CONFIG_TYPE, &type);
2688 }
2689
2690 if (bias == NULL || strcmp(bias, class) != 0)
2691 continue;
2692 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
2693 continue;
2694
2695 if (!printed) {
2696 (void) printf("\t%s\t\n", gettext(class));
2697 printed = B_TRUE;
2698 }
2699
2700 char *name = zpool_vdev_name(g_zfs, zhp, child[c],
2701 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
2702 if (cb->cb_print_status)
2703 print_status_config(zhp, cb, name, child[c], 2,
2704 B_FALSE, NULL);
2705 else
2706 print_import_config(cb, name, child[c], 2);
2707 free(name);
2708 }
2709 }
2710
2711 /*
2712 * Display the status for the given pool.
2713 */
2714 static int
2715 show_import(nvlist_t *config, boolean_t report_error)
2716 {
2717 uint64_t pool_state;
2718 vdev_stat_t *vs;
2719 char *name;
2720 uint64_t guid;
2721 uint64_t hostid = 0;
2722 const char *msgid;
2723 const char *hostname = "unknown";
2724 nvlist_t *nvroot, *nvinfo;
2725 zpool_status_t reason;
2726 zpool_errata_t errata;
2727 const char *health;
2728 uint_t vsc;
2729 char *comment;
2730 status_cbdata_t cb = { 0 };
2731
2732 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
2733 &name) == 0);
2734 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
2735 &guid) == 0);
2736 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
2737 &pool_state) == 0);
2738 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2739 &nvroot) == 0);
2740
2741 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
2742 (uint64_t **)&vs, &vsc) == 0);
2743 health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
2744
2745 reason = zpool_import_status(config, &msgid, &errata);
2746
2747 /*
2748 * If we're importing using a cachefile, then we won't report any
2749 * errors unless we are in the scan phase of the import.
2750 */
2751 if (reason != ZPOOL_STATUS_OK && !report_error)
2752 return (reason);
2753
2754 (void) printf(gettext(" pool: %s\n"), name);
2755 (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
2756 (void) printf(gettext(" state: %s"), health);
2757 if (pool_state == POOL_STATE_DESTROYED)
2758 (void) printf(gettext(" (DESTROYED)"));
2759 (void) printf("\n");
2760
2761 switch (reason) {
2762 case ZPOOL_STATUS_MISSING_DEV_R:
2763 case ZPOOL_STATUS_MISSING_DEV_NR:
2764 case ZPOOL_STATUS_BAD_GUID_SUM:
2765 printf_color(ANSI_BOLD, gettext("status: "));
2766 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2767 "missing from the system.\n"));
2768 break;
2769
2770 case ZPOOL_STATUS_CORRUPT_LABEL_R:
2771 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
2772 printf_color(ANSI_BOLD, gettext("status: "));
2773 printf_color(ANSI_YELLOW, gettext("One or more devices contains"
2774 " corrupted data.\n"));
2775 break;
2776
2777 case ZPOOL_STATUS_CORRUPT_DATA:
2778 (void) printf(
2779 gettext(" status: The pool data is corrupted.\n"));
2780 break;
2781
2782 case ZPOOL_STATUS_OFFLINE_DEV:
2783 printf_color(ANSI_BOLD, gettext("status: "));
2784 printf_color(ANSI_YELLOW, gettext("One or more devices "
2785 "are offlined.\n"));
2786 break;
2787
2788 case ZPOOL_STATUS_CORRUPT_POOL:
2789 printf_color(ANSI_BOLD, gettext("status: "));
2790 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
2791 "corrupted.\n"));
2792 break;
2793
2794 case ZPOOL_STATUS_VERSION_OLDER:
2795 printf_color(ANSI_BOLD, gettext("status: "));
2796 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
2797 "a legacy on-disk version.\n"));
2798 break;
2799
2800 case ZPOOL_STATUS_VERSION_NEWER:
2801 printf_color(ANSI_BOLD, gettext("status: "));
2802 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
2803 "an incompatible version.\n"));
2804 break;
2805
2806 case ZPOOL_STATUS_FEAT_DISABLED:
2807 printf_color(ANSI_BOLD, gettext("status: "));
2808 printf_color(ANSI_YELLOW, gettext("Some supported "
2809 "features are not enabled on the pool.\n\t"
2810 "(Note that they may be intentionally disabled "
2811 "if the\n\t'compatibility' property is set.)\n"));
2812 break;
2813
2814 case ZPOOL_STATUS_COMPATIBILITY_ERR:
2815 printf_color(ANSI_BOLD, gettext("status: "));
2816 printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
2817 "the file(s) indicated by the 'compatibility'\n"
2818 "property.\n"));
2819 break;
2820
2821 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
2822 printf_color(ANSI_BOLD, gettext("status: "));
2823 printf_color(ANSI_YELLOW, gettext("One or more features "
2824 "are enabled on the pool despite not being\n"
2825 "requested by the 'compatibility' property.\n"));
2826 break;
2827
2828 case ZPOOL_STATUS_UNSUP_FEAT_READ:
2829 printf_color(ANSI_BOLD, gettext("status: "));
2830 printf_color(ANSI_YELLOW, gettext("The pool uses the following "
2831 "feature(s) not supported on this system:\n"));
2832 color_start(ANSI_YELLOW);
2833 zpool_print_unsup_feat(config);
2834 color_end();
2835 break;
2836
2837 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
2838 printf_color(ANSI_BOLD, gettext("status: "));
2839 printf_color(ANSI_YELLOW, gettext("The pool can only be "
2840 "accessed in read-only mode on this system. It\n\tcannot be"
2841 " accessed in read-write mode because it uses the "
2842 "following\n\tfeature(s) not supported on this system:\n"));
2843 color_start(ANSI_YELLOW);
2844 zpool_print_unsup_feat(config);
2845 color_end();
2846 break;
2847
2848 case ZPOOL_STATUS_HOSTID_ACTIVE:
2849 printf_color(ANSI_BOLD, gettext("status: "));
2850 printf_color(ANSI_YELLOW, gettext("The pool is currently "
2851 "imported by another system.\n"));
2852 break;
2853
2854 case ZPOOL_STATUS_HOSTID_REQUIRED:
2855 printf_color(ANSI_BOLD, gettext("status: "));
2856 printf_color(ANSI_YELLOW, gettext("The pool has the "
2857 "multihost property on. It cannot\n\tbe safely imported "
2858 "when the system hostid is not set.\n"));
2859 break;
2860
2861 case ZPOOL_STATUS_HOSTID_MISMATCH:
2862 printf_color(ANSI_BOLD, gettext("status: "));
2863 printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
2864 "by another system.\n"));
2865 break;
2866
2867 case ZPOOL_STATUS_FAULTED_DEV_R:
2868 case ZPOOL_STATUS_FAULTED_DEV_NR:
2869 printf_color(ANSI_BOLD, gettext("status: "));
2870 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2871 "faulted.\n"));
2872 break;
2873
2874 case ZPOOL_STATUS_BAD_LOG:
2875 printf_color(ANSI_BOLD, gettext("status: "));
2876 printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
2877 "be read.\n"));
2878 break;
2879
2880 case ZPOOL_STATUS_RESILVERING:
2881 case ZPOOL_STATUS_REBUILDING:
2882 printf_color(ANSI_BOLD, gettext("status: "));
2883 printf_color(ANSI_YELLOW, gettext("One or more devices were "
2884 "being resilvered.\n"));
2885 break;
2886
2887 case ZPOOL_STATUS_ERRATA:
2888 printf_color(ANSI_BOLD, gettext("status: "));
2889 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
2890 errata);
2891 break;
2892
2893 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
2894 printf_color(ANSI_BOLD, gettext("status: "));
2895 printf_color(ANSI_YELLOW, gettext("One or more devices are "
2896 "configured to use a non-native block size.\n"
2897 "\tExpect reduced performance.\n"));
2898 break;
2899
2900 default:
2901 /*
2902 * No other status can be seen when importing pools.
2903 */
2904 assert(reason == ZPOOL_STATUS_OK);
2905 }
2906
2907 /*
2908 * Print out an action according to the overall state of the pool.
2909 */
2910 if (vs->vs_state == VDEV_STATE_HEALTHY) {
2911 if (reason == ZPOOL_STATUS_VERSION_OLDER ||
2912 reason == ZPOOL_STATUS_FEAT_DISABLED) {
2913 (void) printf(gettext(" action: The pool can be "
2914 "imported using its name or numeric identifier, "
2915 "though\n\tsome features will not be available "
2916 "without an explicit 'zpool upgrade'.\n"));
2917 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
2918 (void) printf(gettext(" action: The pool can be "
2919 "imported using its name or numeric\n\tidentifier, "
2920 "though the file(s) indicated by its "
2921 "'compatibility'\n\tproperty cannot be parsed at "
2922 "this time.\n"));
2923 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
2924 (void) printf(gettext(" action: The pool can be "
2925 "imported using its name or numeric "
2926 "identifier and\n\tthe '-f' flag.\n"));
2927 } else if (reason == ZPOOL_STATUS_ERRATA) {
2928 switch (errata) {
2929 case ZPOOL_ERRATA_NONE:
2930 break;
2931
2932 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
2933 (void) printf(gettext(" action: The pool can "
2934 "be imported using its name or numeric "
2935 "identifier,\n\thowever there is a compat"
2936 "ibility issue which should be corrected"
2937 "\n\tby running 'zpool scrub'\n"));
2938 break;
2939
2940 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
2941 (void) printf(gettext(" action: The pool can"
2942 "not be imported with this version of ZFS "
2943 "due to\n\tan active asynchronous destroy. "
2944 "Revert to an earlier version\n\tand "
2945 "allow the destroy to complete before "
2946 "updating.\n"));
2947 break;
2948
2949 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
2950 (void) printf(gettext(" action: Existing "
2951 "encrypted datasets contain an on-disk "
2952 "incompatibility, which\n\tneeds to be "
2953 "corrected. Backup these datasets to new "
2954 "encrypted datasets\n\tand destroy the "
2955 "old ones.\n"));
2956 break;
2957
2958 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
2959 (void) printf(gettext(" action: Existing "
2960 "encrypted snapshots and bookmarks contain "
2961 "an on-disk\n\tincompatibility. This may "
2962 "cause on-disk corruption if they are used"
2963 "\n\twith 'zfs recv'. To correct the "
2964 "issue, enable the bookmark_v2 feature.\n\t"
2965 "No additional action is needed if there "
2966 "are no encrypted snapshots or\n\t"
2967 "bookmarks. If preserving the encrypted "
2968 "snapshots and bookmarks is\n\trequired, "
2969 "use a non-raw send to backup and restore "
2970 "them. Alternately,\n\tthey may be removed"
2971 " to resolve the incompatibility.\n"));
2972 break;
2973 default:
2974 /*
2975 * All errata must contain an action message.
2976 */
2977 assert(0);
2978 }
2979 } else {
2980 (void) printf(gettext(" action: The pool can be "
2981 "imported using its name or numeric "
2982 "identifier.\n"));
2983 }
2984 } else if (vs->vs_state == VDEV_STATE_DEGRADED) {
2985 (void) printf(gettext(" action: The pool can be imported "
2986 "despite missing or damaged devices. The\n\tfault "
2987 "tolerance of the pool may be compromised if imported.\n"));
2988 } else {
2989 switch (reason) {
2990 case ZPOOL_STATUS_VERSION_NEWER:
2991 (void) printf(gettext(" action: The pool cannot be "
2992 "imported. Access the pool on a system running "
2993 "newer\n\tsoftware, or recreate the pool from "
2994 "backup.\n"));
2995 break;
2996 case ZPOOL_STATUS_UNSUP_FEAT_READ:
2997 printf_color(ANSI_BOLD, gettext("action: "));
2998 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
2999 "imported. Access the pool on a system that "
3000 "supports\n\tthe required feature(s), or recreate "
3001 "the pool from backup.\n"));
3002 break;
3003 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
3004 printf_color(ANSI_BOLD, gettext("action: "));
3005 printf_color(ANSI_YELLOW, gettext("The pool cannot be "
3006 "imported in read-write mode. Import the pool "
3007 "with\n"
3008 "\t\"-o readonly=on\", access the pool on a system "
3009 "that supports the\n\trequired feature(s), or "
3010 "recreate the pool from backup.\n"));
3011 break;
3012 case ZPOOL_STATUS_MISSING_DEV_R:
3013 case ZPOOL_STATUS_MISSING_DEV_NR:
3014 case ZPOOL_STATUS_BAD_GUID_SUM:
3015 (void) printf(gettext(" action: The pool cannot be "
3016 "imported. Attach the missing\n\tdevices and try "
3017 "again.\n"));
3018 break;
3019 case ZPOOL_STATUS_HOSTID_ACTIVE:
3020 VERIFY0(nvlist_lookup_nvlist(config,
3021 ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
3022
3023 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3024 hostname = fnvlist_lookup_string(nvinfo,
3025 ZPOOL_CONFIG_MMP_HOSTNAME);
3026
3027 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3028 hostid = fnvlist_lookup_uint64(nvinfo,
3029 ZPOOL_CONFIG_MMP_HOSTID);
3030
3031 (void) printf(gettext(" action: The pool must be "
3032 "exported from %s (hostid=%"PRIx64")\n\tbefore it "
3033 "can be safely imported.\n"), hostname, hostid);
3034 break;
3035 case ZPOOL_STATUS_HOSTID_REQUIRED:
3036 (void) printf(gettext(" action: Set a unique system "
3037 "hostid with the zgenhostid(8) command.\n"));
3038 break;
3039 default:
3040 (void) printf(gettext(" action: The pool cannot be "
3041 "imported due to damaged devices or data.\n"));
3042 }
3043 }
3044
3045 /* Print the comment attached to the pool. */
3046 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
3047 (void) printf(gettext("comment: %s\n"), comment);
3048
3049 /*
3050 * If the state is "closed" or "can't open", and the aux state
3051 * is "corrupt data":
3052 */
3053 if (((vs->vs_state == VDEV_STATE_CLOSED) ||
3054 (vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
3055 (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
3056 if (pool_state == POOL_STATE_DESTROYED)
3057 (void) printf(gettext("\tThe pool was destroyed, "
3058 "but can be imported using the '-Df' flags.\n"));
3059 else if (pool_state != POOL_STATE_EXPORTED)
3060 (void) printf(gettext("\tThe pool may be active on "
3061 "another system, but can be imported using\n\t"
3062 "the '-f' flag.\n"));
3063 }
3064
3065 if (msgid != NULL) {
3066 (void) printf(gettext(
3067 " see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
3068 msgid);
3069 }
3070
3071 (void) printf(gettext(" config:\n\n"));
3072
3073 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
3074 VDEV_NAME_TYPE_ID);
3075 if (cb.cb_namewidth < 10)
3076 cb.cb_namewidth = 10;
3077
3078 print_import_config(&cb, name, nvroot, 0);
3079
3080 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
3081 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
3082 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
3083
3084 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
3085 (void) printf(gettext("\n\tAdditional devices are known to "
3086 "be part of this pool, though their\n\texact "
3087 "configuration cannot be determined.\n"));
3088 }
3089 return (0);
3090 }
3091
3092 static boolean_t
3093 zfs_force_import_required(nvlist_t *config)
3094 {
3095 uint64_t state;
3096 uint64_t hostid = 0;
3097 nvlist_t *nvinfo;
3098
3099 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
3100 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
3101
3102 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
3103 return (B_TRUE);
3104
3105 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3106 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
3107 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
3108 ZPOOL_CONFIG_MMP_STATE);
3109
3110 if (mmp_state != MMP_STATE_INACTIVE)
3111 return (B_TRUE);
3112 }
3113
3114 return (B_FALSE);
3115 }
3116
3117 /*
3118 * Perform the import for the given configuration. This passes the heavy
3119 * lifting off to zpool_import_props(), and then mounts the datasets contained
3120 * within the pool.
3121 */
3122 static int
3123 do_import(nvlist_t *config, const char *newname, const char *mntopts,
3124 nvlist_t *props, int flags)
3125 {
3126 int ret = 0;
3127 zpool_handle_t *zhp;
3128 const char *name;
3129 uint64_t version;
3130
3131 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
3132 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
3133
3134 if (!SPA_VERSION_IS_SUPPORTED(version)) {
3135 (void) fprintf(stderr, gettext("cannot import '%s': pool "
3136 "is formatted using an unsupported ZFS version\n"), name);
3137 return (1);
3138 } else if (zfs_force_import_required(config) &&
3139 !(flags & ZFS_IMPORT_ANY_HOST)) {
3140 mmp_state_t mmp_state = MMP_STATE_INACTIVE;
3141 nvlist_t *nvinfo;
3142
3143 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
3144 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
3145 mmp_state = fnvlist_lookup_uint64(nvinfo,
3146 ZPOOL_CONFIG_MMP_STATE);
3147
3148 if (mmp_state == MMP_STATE_ACTIVE) {
3149 const char *hostname = "<unknown>";
3150 uint64_t hostid = 0;
3151
3152 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
3153 hostname = fnvlist_lookup_string(nvinfo,
3154 ZPOOL_CONFIG_MMP_HOSTNAME);
3155
3156 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
3157 hostid = fnvlist_lookup_uint64(nvinfo,
3158 ZPOOL_CONFIG_MMP_HOSTID);
3159
3160 (void) fprintf(stderr, gettext("cannot import '%s': "
3161 "pool is imported on %s (hostid: "
3162 "0x%"PRIx64")\nExport the pool on the other "
3163 "system, then run 'zpool import'.\n"),
3164 name, hostname, hostid);
3165 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
3166 (void) fprintf(stderr, gettext("Cannot import '%s': "
3167 "pool has the multihost property on and the\n"
3168 "system's hostid is not set. Set a unique hostid "
3169 "with the zgenhostid(8) command.\n"), name);
3170 } else {
3171 const char *hostname = "<unknown>";
3172 time_t timestamp = 0;
3173 uint64_t hostid = 0;
3174
3175 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
3176 hostname = fnvlist_lookup_string(config,
3177 ZPOOL_CONFIG_HOSTNAME);
3178
3179 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
3180 timestamp = fnvlist_lookup_uint64(config,
3181 ZPOOL_CONFIG_TIMESTAMP);
3182
3183 if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
3184 hostid = fnvlist_lookup_uint64(config,
3185 ZPOOL_CONFIG_HOSTID);
3186
3187 (void) fprintf(stderr, gettext("cannot import '%s': "
3188 "pool was previously in use from another system.\n"
3189 "Last accessed by %s (hostid=%"PRIx64") at %s"
3190 "The pool can be imported, use 'zpool import -f' "
3191 "to import the pool.\n"), name, hostname,
3192 hostid, ctime(×tamp));
3193 }
3194
3195 return (1);
3196 }
3197
3198 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
3199 return (1);
3200
3201 if (newname != NULL)
3202 name = newname;
3203
3204 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
3205 return (1);
3206
3207 /*
3208 * Loading keys is best effort. We don't want to return immediately
3209 * if it fails but we do want to give the error to the caller.
3210 */
3211 if (flags & ZFS_IMPORT_LOAD_KEYS &&
3212 zfs_crypto_attempt_load_keys(g_zfs, name) != 0)
3213 ret = 1;
3214
3215 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
3216 !(flags & ZFS_IMPORT_ONLY) &&
3217 zpool_enable_datasets(zhp, mntopts, 0) != 0) {
3218 zpool_close(zhp);
3219 return (1);
3220 }
3221
3222 zpool_close(zhp);
3223 return (ret);
3224 }
3225
3226 static int
3227 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
3228 char *orig_name, char *new_name,
3229 boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
3230 importargs_t *import)
3231 {
3232 nvlist_t *config = NULL;
3233 nvlist_t *found_config = NULL;
3234 uint64_t pool_state;
3235
3236 /*
3237 * At this point we have a list of import candidate configs. Even if
3238 * we were searching by pool name or guid, we still need to
3239 * post-process the list to deal with pool state and possible
3240 * duplicate names.
3241 */
3242 int err = 0;
3243 nvpair_t *elem = NULL;
3244 boolean_t first = B_TRUE;
3245 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
3246
3247 verify(nvpair_value_nvlist(elem, &config) == 0);
3248
3249 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
3250 &pool_state) == 0);
3251 if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
3252 continue;
3253 if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
3254 continue;
3255
3256 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
3257 import->policy) == 0);
3258
3259 if (!pool_specified) {
3260 if (first)
3261 first = B_FALSE;
3262 else if (!do_all)
3263 (void) fputc('\n', stdout);
3264
3265 if (do_all) {
3266 err |= do_import(config, NULL, mntopts,
3267 props, flags);
3268 } else {
3269 /*
3270 * If we're importing from cachefile, then
3271 * we don't want to report errors until we
3272 * are in the scan phase of the import. If
3273 * we get an error, then we return that error
3274 * to invoke the scan phase.
3275 */
3276 if (import->cachefile && !import->scan)
3277 err = show_import(config, B_FALSE);
3278 else
3279 (void) show_import(config, B_TRUE);
3280 }
3281 } else if (import->poolname != NULL) {
3282 char *name;
3283
3284 /*
3285 * We are searching for a pool based on name.
3286 */
3287 verify(nvlist_lookup_string(config,
3288 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
3289
3290 if (strcmp(name, import->poolname) == 0) {
3291 if (found_config != NULL) {
3292 (void) fprintf(stderr, gettext(
3293 "cannot import '%s': more than "
3294 "one matching pool\n"),
3295 import->poolname);
3296 (void) fprintf(stderr, gettext(
3297 "import by numeric ID instead\n"));
3298 err = B_TRUE;
3299 }
3300 found_config = config;
3301 }
3302 } else {
3303 uint64_t guid;
3304
3305 /*
3306 * Search for a pool by guid.
3307 */
3308 verify(nvlist_lookup_uint64(config,
3309 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
3310
3311 if (guid == import->guid)
3312 found_config = config;
3313 }
3314 }
3315
3316 /*
3317 * If we were searching for a specific pool, verify that we found a
3318 * pool, and then do the import.
3319 */
3320 if (pool_specified && err == 0) {
3321 if (found_config == NULL) {
3322 (void) fprintf(stderr, gettext("cannot import '%s': "
3323 "no such pool available\n"), orig_name);
3324 err = B_TRUE;
3325 } else {
3326 err |= do_import(found_config, new_name,
3327 mntopts, props, flags);
3328 }
3329 }
3330
3331 /*
3332 * If we were just looking for pools, report an error if none were
3333 * found.
3334 */
3335 if (!pool_specified && first)
3336 (void) fprintf(stderr,
3337 gettext("no pools available to import\n"));
3338 return (err);
3339 }
3340
3341 typedef struct target_exists_args {
3342 const char *poolname;
3343 uint64_t poolguid;
3344 } target_exists_args_t;
3345
3346 static int
3347 name_or_guid_exists(zpool_handle_t *zhp, void *data)
3348 {
3349 target_exists_args_t *args = data;
3350 nvlist_t *config = zpool_get_config(zhp, NULL);
3351 int found = 0;
3352
3353 if (config == NULL)
3354 return (0);
3355
3356 if (args->poolname != NULL) {
3357 char *pool_name;
3358
3359 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
3360 &pool_name) == 0);
3361 if (strcmp(pool_name, args->poolname) == 0)
3362 found = 1;
3363 } else {
3364 uint64_t pool_guid;
3365
3366 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3367 &pool_guid) == 0);
3368 if (pool_guid == args->poolguid)
3369 found = 1;
3370 }
3371 zpool_close(zhp);
3372
3373 return (found);
3374 }
3375 /*
3376 * zpool checkpoint <pool>
3377 * checkpoint --discard <pool>
3378 *
3379 * -d Discard the checkpoint from a checkpointed
3380 * --discard pool.
3381 *
3382 * -w Wait for discarding a checkpoint to complete.
3383 * --wait
3384 *
3385 * Checkpoints the specified pool, by taking a "snapshot" of its
3386 * current state. A pool can only have one checkpoint at a time.
3387 */
3388 int
3389 zpool_do_checkpoint(int argc, char **argv)
3390 {
3391 boolean_t discard, wait;
3392 char *pool;
3393 zpool_handle_t *zhp;
3394 int c, err;
3395
3396 struct option long_options[] = {
3397 {"discard", no_argument, NULL, 'd'},
3398 {"wait", no_argument, NULL, 'w'},
3399 {0, 0, 0, 0}
3400 };
3401
3402 discard = B_FALSE;
3403 wait = B_FALSE;
3404 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
3405 switch (c) {
3406 case 'd':
3407 discard = B_TRUE;
3408 break;
3409 case 'w':
3410 wait = B_TRUE;
3411 break;
3412 case '?':
3413 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3414 optopt);
3415 usage(B_FALSE);
3416 }
3417 }
3418
3419 if (wait && !discard) {
3420 (void) fprintf(stderr, gettext("--wait only valid when "
3421 "--discard also specified\n"));
3422 usage(B_FALSE);
3423 }
3424
3425 argc -= optind;
3426 argv += optind;
3427
3428 if (argc < 1) {
3429 (void) fprintf(stderr, gettext("missing pool argument\n"));
3430 usage(B_FALSE);
3431 }
3432
3433 if (argc > 1) {
3434 (void) fprintf(stderr, gettext("too many arguments\n"));
3435 usage(B_FALSE);
3436 }
3437
3438 pool = argv[0];
3439
3440 if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
3441 /* As a special case, check for use of '/' in the name */
3442 if (strchr(pool, '/') != NULL)
3443 (void) fprintf(stderr, gettext("'zpool checkpoint' "
3444 "doesn't work on datasets. To save the state "
3445 "of a dataset from a specific point in time "
3446 "please use 'zfs snapshot'\n"));
3447 return (1);
3448 }
3449
3450 if (discard) {
3451 err = (zpool_discard_checkpoint(zhp) != 0);
3452 if (err == 0 && wait)
3453 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
3454 } else {
3455 err = (zpool_checkpoint(zhp) != 0);
3456 }
3457
3458 zpool_close(zhp);
3459
3460 return (err);
3461 }
3462
3463 #define CHECKPOINT_OPT 1024
3464
3465 /*
3466 * zpool import [-d dir] [-D]
3467 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3468 * [-d dir | -c cachefile | -s] [-f] -a
3469 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
3470 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
3471 * [newpool]
3472 *
3473 * -c Read pool information from a cachefile instead of searching
3474 * devices. If importing from a cachefile config fails, then
3475 * fallback to searching for devices only in the directories that
3476 * exist in the cachefile.
3477 *
3478 * -d Scan in a specific directory, other than /dev/. More than
3479 * one directory can be specified using multiple '-d' options.
3480 *
3481 * -D Scan for previously destroyed pools or import all or only
3482 * specified destroyed pools.
3483 *
3484 * -R Temporarily import the pool, with all mountpoints relative to
3485 * the given root. The pool will remain exported when the machine
3486 * is rebooted.
3487 *
3488 * -V Import even in the presence of faulted vdevs. This is an
3489 * intentionally undocumented option for testing purposes, and
3490 * treats the pool configuration as complete, leaving any bad
3491 * vdevs in the FAULTED state. In other words, it does verbatim
3492 * import.
3493 *
3494 * -f Force import, even if it appears that the pool is active.
3495 *
3496 * -F Attempt rewind if necessary.
3497 *
3498 * -n See if rewind would work, but don't actually rewind.
3499 *
3500 * -N Import the pool but don't mount datasets.
3501 *
3502 * -T Specify a starting txg to use for import. This option is
3503 * intentionally undocumented option for testing purposes.
3504 *
3505 * -a Import all pools found.
3506 *
3507 * -l Load encryption keys while importing.
3508 *
3509 * -o Set property=value and/or temporary mount options (without '=').
3510 *
3511 * -s Scan using the default search path, the libblkid cache will
3512 * not be consulted.
3513 *
3514 * --rewind-to-checkpoint
3515 * Import the pool and revert back to the checkpoint.
3516 *
3517 * The import command scans for pools to import, and import pools based on pool
3518 * name and GUID. The pool can also be renamed as part of the import process.
3519 */
3520 int
3521 zpool_do_import(int argc, char **argv)
3522 {
3523 char **searchdirs = NULL;
3524 char *env, *envdup = NULL;
3525 int nsearch = 0;
3526 int c;
3527 int err = 0;
3528 nvlist_t *pools = NULL;
3529 boolean_t do_all = B_FALSE;
3530 boolean_t do_destroyed = B_FALSE;
3531 char *mntopts = NULL;
3532 uint64_t searchguid = 0;
3533 char *searchname = NULL;
3534 char *propval;
3535 nvlist_t *policy = NULL;
3536 nvlist_t *props = NULL;
3537 int flags = ZFS_IMPORT_NORMAL;
3538 uint32_t rewind_policy = ZPOOL_NO_REWIND;
3539 boolean_t dryrun = B_FALSE;
3540 boolean_t do_rewind = B_FALSE;
3541 boolean_t xtreme_rewind = B_FALSE;
3542 boolean_t do_scan = B_FALSE;
3543 boolean_t pool_exists = B_FALSE;
3544 boolean_t pool_specified = B_FALSE;
3545 uint64_t txg = -1ULL;
3546 char *cachefile = NULL;
3547 importargs_t idata = { 0 };
3548 char *endptr;
3549
3550 struct option long_options[] = {
3551 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
3552 {0, 0, 0, 0}
3553 };
3554
3555 /* check options */
3556 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
3557 long_options, NULL)) != -1) {
3558 switch (c) {
3559 case 'a':
3560 do_all = B_TRUE;
3561 break;
3562 case 'c':
3563 cachefile = optarg;
3564 break;
3565 case 'd':
3566 searchdirs = safe_realloc(searchdirs,
3567 (nsearch + 1) * sizeof (char *));
3568 searchdirs[nsearch++] = optarg;
3569 break;
3570 case 'D':
3571 do_destroyed = B_TRUE;
3572 break;
3573 case 'f':
3574 flags |= ZFS_IMPORT_ANY_HOST;
3575 break;
3576 case 'F':
3577 do_rewind = B_TRUE;
3578 break;
3579 case 'l':
3580 flags |= ZFS_IMPORT_LOAD_KEYS;
3581 break;
3582 case 'm':
3583 flags |= ZFS_IMPORT_MISSING_LOG;
3584 break;
3585 case 'n':
3586 dryrun = B_TRUE;
3587 break;
3588 case 'N':
3589 flags |= ZFS_IMPORT_ONLY;
3590 break;
3591 case 'o':
3592 if ((propval = strchr(optarg, '=')) != NULL) {
3593 *propval = '\0';
3594 propval++;
3595 if (add_prop_list(optarg, propval,
3596 &props, B_TRUE))
3597 goto error;
3598 } else {
3599 mntopts = optarg;
3600 }
3601 break;
3602 case 'R':
3603 if (add_prop_list(zpool_prop_to_name(
3604 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
3605 goto error;
3606 if (add_prop_list_default(zpool_prop_to_name(
3607 ZPOOL_PROP_CACHEFILE), "none", &props))
3608 goto error;
3609 break;
3610 case 's':
3611 do_scan = B_TRUE;
3612 break;
3613 case 't':
3614 flags |= ZFS_IMPORT_TEMP_NAME;
3615 if (add_prop_list_default(zpool_prop_to_name(
3616 ZPOOL_PROP_CACHEFILE), "none", &props))
3617 goto error;
3618 break;
3619
3620 case 'T':
3621 errno = 0;
3622 txg = strtoull(optarg, &endptr, 0);
3623 if (errno != 0 || *endptr != '\0') {
3624 (void) fprintf(stderr,
3625 gettext("invalid txg value\n"));
3626 usage(B_FALSE);
3627 }
3628 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
3629 break;
3630 case 'V':
3631 flags |= ZFS_IMPORT_VERBATIM;
3632 break;
3633 case 'X':
3634 xtreme_rewind = B_TRUE;
3635 break;
3636 case CHECKPOINT_OPT:
3637 flags |= ZFS_IMPORT_CHECKPOINT;
3638 break;
3639 case ':':
3640 (void) fprintf(stderr, gettext("missing argument for "
3641 "'%c' option\n"), optopt);
3642 usage(B_FALSE);
3643 break;
3644 case '?':
3645 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3646 optopt);
3647 usage(B_FALSE);
3648 }
3649 }
3650
3651 argc -= optind;
3652 argv += optind;
3653
3654 if (cachefile && nsearch != 0) {
3655 (void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
3656 usage(B_FALSE);
3657 }
3658
3659 if (cachefile && do_scan) {
3660 (void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
3661 usage(B_FALSE);
3662 }
3663
3664 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
3665 (void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
3666 usage(B_FALSE);
3667 }
3668
3669 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
3670 (void) fprintf(stderr, gettext("-l is only meaningful during "
3671 "an import\n"));
3672 usage(B_FALSE);
3673 }
3674
3675 if ((dryrun || xtreme_rewind) && !do_rewind) {
3676 (void) fprintf(stderr,
3677 gettext("-n or -X only meaningful with -F\n"));
3678 usage(B_FALSE);
3679 }
3680 if (dryrun)
3681 rewind_policy = ZPOOL_TRY_REWIND;
3682 else if (do_rewind)
3683 rewind_policy = ZPOOL_DO_REWIND;
3684 if (xtreme_rewind)
3685 rewind_policy |= ZPOOL_EXTREME_REWIND;
3686
3687 /* In the future, we can capture further policy and include it here */
3688 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
3689 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
3690 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
3691 rewind_policy) != 0)
3692 goto error;
3693
3694 /* check argument count */
3695 if (do_all) {
3696 if (argc != 0) {
3697 (void) fprintf(stderr, gettext("too many arguments\n"));
3698 usage(B_FALSE);
3699 }
3700 } else {
3701 if (argc > 2) {
3702 (void) fprintf(stderr, gettext("too many arguments\n"));
3703 usage(B_FALSE);
3704 }
3705 }
3706
3707 /*
3708 * Check for the effective uid. We do this explicitly here because
3709 * otherwise any attempt to discover pools will silently fail.
3710 */
3711 if (argc == 0 && geteuid() != 0) {
3712 (void) fprintf(stderr, gettext("cannot "
3713 "discover pools: permission denied\n"));
3714
3715 free(searchdirs);
3716 nvlist_free(props);
3717 nvlist_free(policy);
3718 return (1);
3719 }
3720
3721 /*
3722 * Depending on the arguments given, we do one of the following:
3723 *
3724 * <none> Iterate through all pools and display information about
3725 * each one.
3726 *
3727 * -a Iterate through all pools and try to import each one.
3728 *
3729 * <id> Find the pool that corresponds to the given GUID/pool
3730 * name and import that one.
3731 *
3732 * -D Above options applies only to destroyed pools.
3733 */
3734 if (argc != 0) {
3735 char *endptr;
3736
3737 errno = 0;
3738 searchguid = strtoull(argv[0], &endptr, 10);
3739 if (errno != 0 || *endptr != '\0') {
3740 searchname = argv[0];
3741 searchguid = 0;
3742 }
3743 pool_specified = B_TRUE;
3744
3745 /*
3746 * User specified a name or guid. Ensure it's unique.
3747 */
3748 target_exists_args_t search = {searchname, searchguid};
3749 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
3750 }
3751
3752 /*
3753 * Check the environment for the preferred search path.
3754 */
3755 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
3756 char *dir, *tmp = NULL;
3757
3758 envdup = strdup(env);
3759
3760 for (dir = strtok_r(envdup, ":", &tmp);
3761 dir != NULL;
3762 dir = strtok_r(NULL, ":", &tmp)) {
3763 searchdirs = safe_realloc(searchdirs,
3764 (nsearch + 1) * sizeof (char *));
3765 searchdirs[nsearch++] = dir;
3766 }
3767 }
3768
3769 idata.path = searchdirs;
3770 idata.paths = nsearch;
3771 idata.poolname = searchname;
3772 idata.guid = searchguid;
3773 idata.cachefile = cachefile;
3774 idata.scan = do_scan;
3775 idata.policy = policy;
3776
3777 libpc_handle_t lpch = {
3778 .lpc_lib_handle = g_zfs,
3779 .lpc_ops = &libzfs_config_ops,
3780 .lpc_printerr = B_TRUE
3781 };
3782 pools = zpool_search_import(&lpch, &idata);
3783
3784 if (pools != NULL && pool_exists &&
3785 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
3786 (void) fprintf(stderr, gettext("cannot import '%s': "
3787 "a pool with that name already exists\n"),
3788 argv[0]);
3789 (void) fprintf(stderr, gettext("use the form '%s "
3790 "<pool | id> <newpool>' to give it a new name\n"),
3791 "zpool import");
3792 err = 1;
3793 } else if (pools == NULL && pool_exists) {
3794 (void) fprintf(stderr, gettext("cannot import '%s': "
3795 "a pool with that name is already created/imported,\n"),
3796 argv[0]);
3797 (void) fprintf(stderr, gettext("and no additional pools "
3798 "with that name were found\n"));
3799 err = 1;
3800 } else if (pools == NULL) {
3801 if (argc != 0) {
3802 (void) fprintf(stderr, gettext("cannot import '%s': "
3803 "no such pool available\n"), argv[0]);
3804 }
3805 err = 1;
3806 }
3807
3808 if (err == 1) {
3809 free(searchdirs);
3810 free(envdup);
3811 nvlist_free(policy);
3812 nvlist_free(pools);
3813 nvlist_free(props);
3814 return (1);
3815 }
3816
3817 err = import_pools(pools, props, mntopts, flags,
3818 argc >= 1 ? argv[0] : NULL,
3819 argc >= 2 ? argv[1] : NULL,
3820 do_destroyed, pool_specified, do_all, &idata);
3821
3822 /*
3823 * If we're using the cachefile and we failed to import, then
3824 * fallback to scanning the directory for pools that match
3825 * those in the cachefile.
3826 */
3827 if (err != 0 && cachefile != NULL) {
3828 (void) printf(gettext("cachefile import failed, retrying\n"));
3829
3830 /*
3831 * We use the scan flag to gather the directories that exist
3832 * in the cachefile. If we need to fallback to searching for
3833 * the pool config, we will only search devices in these
3834 * directories.
3835 */
3836 idata.scan = B_TRUE;
3837 nvlist_free(pools);
3838 pools = zpool_search_import(&lpch, &idata);
3839
3840 err = import_pools(pools, props, mntopts, flags,
3841 argc >= 1 ? argv[0] : NULL,
3842 argc >= 2 ? argv[1] : NULL,
3843 do_destroyed, pool_specified, do_all, &idata);
3844 }
3845
3846 error:
3847 nvlist_free(props);
3848 nvlist_free(pools);
3849 nvlist_free(policy);
3850 free(searchdirs);
3851 free(envdup);
3852
3853 return (err ? 1 : 0);
3854 }
3855
3856 /*
3857 * zpool sync [-f] [pool] ...
3858 *
3859 * -f (undocumented) force uberblock (and config including zpool cache file)
3860 * update.
3861 *
3862 * Sync the specified pool(s).
3863 * Without arguments "zpool sync" will sync all pools.
3864 * This command initiates TXG sync(s) and will return after the TXG(s) commit.
3865 *
3866 */
3867 static int
3868 zpool_do_sync(int argc, char **argv)
3869 {
3870 int ret;
3871 boolean_t force = B_FALSE;
3872
3873 /* check options */
3874 while ((ret = getopt(argc, argv, "f")) != -1) {
3875 switch (ret) {
3876 case 'f':
3877 force = B_TRUE;
3878 break;
3879 case '?':
3880 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
3881 optopt);
3882 usage(B_FALSE);
3883 }
3884 }
3885
3886 argc -= optind;
3887 argv += optind;
3888
3889 /* if argc == 0 we will execute zpool_sync_one on all pools */
3890 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
3891 B_FALSE, zpool_sync_one, &force);
3892
3893 return (ret);
3894 }
3895
3896 typedef struct iostat_cbdata {
3897 uint64_t cb_flags;
3898 int cb_namewidth;
3899 int cb_iteration;
3900 boolean_t cb_verbose;
3901 boolean_t cb_literal;
3902 boolean_t cb_scripted;
3903 zpool_list_t *cb_list;
3904 vdev_cmd_data_list_t *vcdl;
3905 vdev_cbdata_t cb_vdevs;
3906 } iostat_cbdata_t;
3907
3908 /* iostat labels */
3909 typedef struct name_and_columns {
3910 const char *name; /* Column name */
3911 unsigned int columns; /* Center name to this number of columns */
3912 } name_and_columns_t;
3913
3914 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */
3915
3916 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
3917 {
3918 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
3919 {NULL}},
3920 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
3921 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
3922 {NULL}},
3923 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
3924 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
3925 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
3926 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
3927 {"asyncq_wait", 2}, {NULL}},
3928 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
3929 {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
3930 {"trim", 2}, {"rebuild", 2}, {NULL}},
3931 };
3932
3933 /* Shorthand - if "columns" field not set, default to 1 column */
3934 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
3935 {
3936 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
3937 {"write"}, {NULL}},
3938 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
3939 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
3940 {NULL}},
3941 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
3942 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
3943 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
3944 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
3945 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
3946 {NULL}},
3947 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
3948 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
3949 {"ind"}, {"agg"}, {NULL}},
3950 };
3951
3952 static const char *histo_to_title[] = {
3953 [IOS_L_HISTO] = "latency",
3954 [IOS_RQ_HISTO] = "req_size",
3955 };
3956
3957 /*
3958 * Return the number of labels in a null-terminated name_and_columns_t
3959 * array.
3960 *
3961 */
3962 static unsigned int
3963 label_array_len(const name_and_columns_t *labels)
3964 {
3965 int i = 0;
3966
3967 while (labels[i].name)
3968 i++;
3969
3970 return (i);
3971 }
3972
3973 /*
3974 * Return the number of strings in a null-terminated string array.
3975 * For example:
3976 *
3977 * const char foo[] = {"bar", "baz", NULL}
3978 *
3979 * returns 2
3980 */
3981 static uint64_t
3982 str_array_len(const char *array[])
3983 {
3984 uint64_t i = 0;
3985 while (array[i])
3986 i++;
3987
3988 return (i);
3989 }
3990
3991
3992 /*
3993 * Return a default column width for default/latency/queue columns. This does
3994 * not include histograms, which have their columns autosized.
3995 */
3996 static unsigned int
3997 default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
3998 {
3999 unsigned long column_width = 5; /* Normal niceprint */
4000 static unsigned long widths[] = {
4001 /*
4002 * Choose some sane default column sizes for printing the
4003 * raw numbers.
4004 */
4005 [IOS_DEFAULT] = 15, /* 1PB capacity */
4006 [IOS_LATENCY] = 10, /* 1B ns = 10sec */
4007 [IOS_QUEUES] = 6, /* 1M queue entries */
4008 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */
4009 [IOS_RQ_HISTO] = 6, /* 1M queue entries */
4010 };
4011
4012 if (cb->cb_literal)
4013 column_width = widths[type];
4014
4015 return (column_width);
4016 }
4017
4018 /*
4019 * Print the column labels, i.e:
4020 *
4021 * capacity operations bandwidth
4022 * alloc free read write read write ...
4023 *
4024 * If force_column_width is set, use it for the column width. If not set, use
4025 * the default column width.
4026 */
4027 static void
4028 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
4029 const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
4030 {
4031 int i, idx, s;
4032 int text_start, rw_column_width, spaces_to_end;
4033 uint64_t flags = cb->cb_flags;
4034 uint64_t f;
4035 unsigned int column_width = force_column_width;
4036
4037 /* For each bit set in flags */
4038 for (f = flags; f; f &= ~(1ULL << idx)) {
4039 idx = lowbit64(f) - 1;
4040 if (!force_column_width)
4041 column_width = default_column_width(cb, idx);
4042 /* Print our top labels centered over "read write" label. */
4043 for (i = 0; i < label_array_len(labels[idx]); i++) {
4044 const char *name = labels[idx][i].name;
4045 /*
4046 * We treat labels[][].columns == 0 as shorthand
4047 * for one column. It makes writing out the label
4048 * tables more concise.
4049 */
4050 unsigned int columns = MAX(1, labels[idx][i].columns);
4051 unsigned int slen = strlen(name);
4052
4053 rw_column_width = (column_width * columns) +
4054 (2 * (columns - 1));
4055
4056 text_start = (int)((rw_column_width) / columns -
4057 slen / columns);
4058 if (text_start < 0)
4059 text_start = 0;
4060
4061 printf(" "); /* Two spaces between columns */
4062
4063 /* Space from beginning of column to label */
4064 for (s = 0; s < text_start; s++)
4065 printf(" ");
4066
4067 printf("%s", name);
4068
4069 /* Print space after label to end of column */
4070 spaces_to_end = rw_column_width - text_start - slen;
4071 if (spaces_to_end < 0)
4072 spaces_to_end = 0;
4073
4074 for (s = 0; s < spaces_to_end; s++)
4075 printf(" ");
4076 }
4077 }
4078 }
4079
4080
4081 /*
4082 * print_cmd_columns - Print custom column titles from -c
4083 *
4084 * If the user specified the "zpool status|iostat -c" then print their custom
4085 * column titles in the header. For example, print_cmd_columns() would print
4086 * the " col1 col2" part of this:
4087 *
4088 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
4089 * ...
4090 * capacity operations bandwidth
4091 * pool alloc free read write read write col1 col2
4092 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4093 * mypool 269K 1008M 0 0 107 946
4094 * mirror 269K 1008M 0 0 107 946
4095 * sdb - - 0 0 102 473 val1 val2
4096 * sdc - - 0 0 5 473 val1 val2
4097 * ---------- ----- ----- ----- ----- ----- ----- ---- ----
4098 */
4099 static void
4100 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
4101 {
4102 int i, j;
4103 vdev_cmd_data_t *data = &vcdl->data[0];
4104
4105 if (vcdl->count == 0 || data == NULL)
4106 return;
4107
4108 /*
4109 * Each vdev cmd should have the same column names unless the user did
4110 * something weird with their cmd. Just take the column names from the
4111 * first vdev and assume it works for all of them.
4112 */
4113 for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
4114 printf(" ");
4115 if (use_dashes) {
4116 for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
4117 printf("-");
4118 } else {
4119 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
4120 vcdl->uniq_cols[i]);
4121 }
4122 }
4123 }
4124
4125
4126 /*
4127 * Utility function to print out a line of dashes like:
4128 *
4129 * -------------------------------- ----- ----- ----- ----- -----
4130 *
4131 * ...or a dashed named-row line like:
4132 *
4133 * logs - - - - -
4134 *
4135 * @cb: iostat data
4136 *
4137 * @force_column_width If non-zero, use the value as the column width.
4138 * Otherwise use the default column widths.
4139 *
4140 * @name: Print a dashed named-row line starting
4141 * with @name. Otherwise, print a regular
4142 * dashed line.
4143 */
4144 static void
4145 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
4146 const char *name)
4147 {
4148 int i;
4149 unsigned int namewidth;
4150 uint64_t flags = cb->cb_flags;
4151 uint64_t f;
4152 int idx;
4153 const name_and_columns_t *labels;
4154 const char *title;
4155
4156
4157 if (cb->cb_flags & IOS_ANYHISTO_M) {
4158 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4159 } else if (cb->cb_vdevs.cb_names_count) {
4160 title = "vdev";
4161 } else {
4162 title = "pool";
4163 }
4164
4165 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4166 name ? strlen(name) : 0);
4167
4168
4169 if (name) {
4170 printf("%-*s", namewidth, name);
4171 } else {
4172 for (i = 0; i < namewidth; i++)
4173 (void) printf("-");
4174 }
4175
4176 /* For each bit in flags */
4177 for (f = flags; f; f &= ~(1ULL << idx)) {
4178 unsigned int column_width;
4179 idx = lowbit64(f) - 1;
4180 if (force_column_width)
4181 column_width = force_column_width;
4182 else
4183 column_width = default_column_width(cb, idx);
4184
4185 labels = iostat_bottom_labels[idx];
4186 for (i = 0; i < label_array_len(labels); i++) {
4187 if (name)
4188 printf(" %*s-", column_width - 1, " ");
4189 else
4190 printf(" %.*s", column_width,
4191 "--------------------");
4192 }
4193 }
4194 }
4195
4196
4197 static void
4198 print_iostat_separator_impl(iostat_cbdata_t *cb,
4199 unsigned int force_column_width)
4200 {
4201 print_iostat_dashes(cb, force_column_width, NULL);
4202 }
4203
4204 static void
4205 print_iostat_separator(iostat_cbdata_t *cb)
4206 {
4207 print_iostat_separator_impl(cb, 0);
4208 }
4209
4210 static void
4211 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
4212 const char *histo_vdev_name)
4213 {
4214 unsigned int namewidth;
4215 const char *title;
4216
4217 if (cb->cb_flags & IOS_ANYHISTO_M) {
4218 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
4219 } else if (cb->cb_vdevs.cb_names_count) {
4220 title = "vdev";
4221 } else {
4222 title = "pool";
4223 }
4224
4225 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
4226 histo_vdev_name ? strlen(histo_vdev_name) : 0);
4227
4228 if (histo_vdev_name)
4229 printf("%-*s", namewidth, histo_vdev_name);
4230 else
4231 printf("%*s", namewidth, "");
4232
4233
4234 print_iostat_labels(cb, force_column_width, iostat_top_labels);
4235 printf("\n");
4236
4237 printf("%-*s", namewidth, title);
4238
4239 print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
4240 if (cb->vcdl != NULL)
4241 print_cmd_columns(cb->vcdl, 0);
4242
4243 printf("\n");
4244
4245 print_iostat_separator_impl(cb, force_column_width);
4246
4247 if (cb->vcdl != NULL)
4248 print_cmd_columns(cb->vcdl, 1);
4249
4250 printf("\n");
4251 }
4252
4253 static void
4254 print_iostat_header(iostat_cbdata_t *cb)
4255 {
4256 print_iostat_header_impl(cb, 0, NULL);
4257 }
4258
4259
4260 /*
4261 * Display a single statistic.
4262 */
4263 static void
4264 print_one_stat(uint64_t value, enum zfs_nicenum_format format,
4265 unsigned int column_size, boolean_t scripted)
4266 {
4267 char buf[64];
4268
4269 zfs_nicenum_format(value, buf, sizeof (buf), format);
4270
4271 if (scripted)
4272 printf("\t%s", buf);
4273 else
4274 printf(" %*s", column_size, buf);
4275 }
4276
4277 /*
4278 * Calculate the default vdev stats
4279 *
4280 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting
4281 * stats into calcvs.
4282 */
4283 static void
4284 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
4285 vdev_stat_t *calcvs)
4286 {
4287 int i;
4288
4289 memcpy(calcvs, newvs, sizeof (*calcvs));
4290 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
4291 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
4292
4293 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
4294 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
4295 }
4296
4297 /*
4298 * Internal representation of the extended iostats data.
4299 *
4300 * The extended iostat stats are exported in nvlists as either uint64_t arrays
4301 * or single uint64_t's. We make both look like arrays to make them easier
4302 * to process. In order to make single uint64_t's look like arrays, we set
4303 * __data to the stat data, and then set *data = &__data with count = 1. Then,
4304 * we can just use *data and count.
4305 */
4306 struct stat_array {
4307 uint64_t *data;
4308 uint_t count; /* Number of entries in data[] */
4309 uint64_t __data; /* Only used when data is a single uint64_t */
4310 };
4311
4312 static uint64_t
4313 stat_histo_max(struct stat_array *nva, unsigned int len)
4314 {
4315 uint64_t max = 0;
4316 int i;
4317 for (i = 0; i < len; i++)
4318 max = MAX(max, array64_max(nva[i].data, nva[i].count));
4319
4320 return (max);
4321 }
4322
4323 /*
4324 * Helper function to lookup a uint64_t array or uint64_t value and store its
4325 * data as a stat_array. If the nvpair is a single uint64_t value, then we make
4326 * it look like a one element array to make it easier to process.
4327 */
4328 static int
4329 nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
4330 struct stat_array *nva)
4331 {
4332 nvpair_t *tmp;
4333 int ret;
4334
4335 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
4336 switch (nvpair_type(tmp)) {
4337 case DATA_TYPE_UINT64_ARRAY:
4338 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
4339 break;
4340 case DATA_TYPE_UINT64:
4341 ret = nvpair_value_uint64(tmp, &nva->__data);
4342 nva->data = &nva->__data;
4343 nva->count = 1;
4344 break;
4345 default:
4346 /* Not a uint64_t */
4347 ret = EINVAL;
4348 break;
4349 }
4350
4351 return (ret);
4352 }
4353
4354 /*
4355 * Given a list of nvlist names, look up the extended stats in newnv and oldnv,
4356 * subtract them, and return the results in a newly allocated stat_array.
4357 * You must free the returned array after you are done with it with
4358 * free_calc_stats().
4359 *
4360 * Additionally, you can set "oldnv" to NULL if you simply want the newnv
4361 * values.
4362 */
4363 static struct stat_array *
4364 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
4365 nvlist_t *newnv)
4366 {
4367 nvlist_t *oldnvx = NULL, *newnvx;
4368 struct stat_array *oldnva, *newnva, *calcnva;
4369 int i, j;
4370 unsigned int alloc_size = (sizeof (struct stat_array)) * len;
4371
4372 /* Extract our extended stats nvlist from the main list */
4373 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4374 &newnvx) == 0);
4375 if (oldnv) {
4376 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
4377 &oldnvx) == 0);
4378 }
4379
4380 newnva = safe_malloc(alloc_size);
4381 oldnva = safe_malloc(alloc_size);
4382 calcnva = safe_malloc(alloc_size);
4383
4384 for (j = 0; j < len; j++) {
4385 verify(nvpair64_to_stat_array(newnvx, names[j],
4386 &newnva[j]) == 0);
4387 calcnva[j].count = newnva[j].count;
4388 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
4389 calcnva[j].data = safe_malloc(alloc_size);
4390 memcpy(calcnva[j].data, newnva[j].data, alloc_size);
4391
4392 if (oldnvx) {
4393 verify(nvpair64_to_stat_array(oldnvx, names[j],
4394 &oldnva[j]) == 0);
4395 for (i = 0; i < oldnva[j].count; i++)
4396 calcnva[j].data[i] -= oldnva[j].data[i];
4397 }
4398 }
4399 free(newnva);
4400 free(oldnva);
4401 return (calcnva);
4402 }
4403
4404 static void
4405 free_calc_stats(struct stat_array *nva, unsigned int len)
4406 {
4407 int i;
4408 for (i = 0; i < len; i++)
4409 free(nva[i].data);
4410
4411 free(nva);
4412 }
4413
4414 static void
4415 print_iostat_histo(struct stat_array *nva, unsigned int len,
4416 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
4417 double scale)
4418 {
4419 int i, j;
4420 char buf[6];
4421 uint64_t val;
4422 enum zfs_nicenum_format format;
4423 unsigned int buckets;
4424 unsigned int start_bucket;
4425
4426 if (cb->cb_literal)
4427 format = ZFS_NICENUM_RAW;
4428 else
4429 format = ZFS_NICENUM_1024;
4430
4431 /* All these histos are the same size, so just use nva[0].count */
4432 buckets = nva[0].count;
4433
4434 if (cb->cb_flags & IOS_RQ_HISTO_M) {
4435 /* Start at 512 - req size should never be lower than this */
4436 start_bucket = 9;
4437 } else {
4438 start_bucket = 0;
4439 }
4440
4441 for (j = start_bucket; j < buckets; j++) {
4442 /* Print histogram bucket label */
4443 if (cb->cb_flags & IOS_L_HISTO_M) {
4444 /* Ending range of this bucket */
4445 val = (1UL << (j + 1)) - 1;
4446 zfs_nicetime(val, buf, sizeof (buf));
4447 } else {
4448 /* Request size (starting range of bucket) */
4449 val = (1UL << j);
4450 zfs_nicenum(val, buf, sizeof (buf));
4451 }
4452
4453 if (cb->cb_scripted)
4454 printf("%llu", (u_longlong_t)val);
4455 else
4456 printf("%-*s", namewidth, buf);
4457
4458 /* Print the values on the line */
4459 for (i = 0; i < len; i++) {
4460 print_one_stat(nva[i].data[j] * scale, format,
4461 column_width, cb->cb_scripted);
4462 }
4463 printf("\n");
4464 }
4465 }
4466
4467 static void
4468 print_solid_separator(unsigned int length)
4469 {
4470 while (length--)
4471 printf("-");
4472 printf("\n");
4473 }
4474
4475 static void
4476 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
4477 nvlist_t *newnv, double scale, const char *name)
4478 {
4479 unsigned int column_width;
4480 unsigned int namewidth;
4481 unsigned int entire_width;
4482 enum iostat_type type;
4483 struct stat_array *nva;
4484 const char **names;
4485 unsigned int names_len;
4486
4487 /* What type of histo are we? */
4488 type = IOS_HISTO_IDX(cb->cb_flags);
4489
4490 /* Get NULL-terminated array of nvlist names for our histo */
4491 names = vsx_type_to_nvlist[type];
4492 names_len = str_array_len(names); /* num of names */
4493
4494 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
4495
4496 if (cb->cb_literal) {
4497 column_width = MAX(5,
4498 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
4499 } else {
4500 column_width = 5;
4501 }
4502
4503 namewidth = MAX(cb->cb_namewidth,
4504 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
4505
4506 /*
4507 * Calculate the entire line width of what we're printing. The
4508 * +2 is for the two spaces between columns:
4509 */
4510 /* read write */
4511 /* ----- ----- */
4512 /* |___| <---------- column_width */
4513 /* */
4514 /* |__________| <--- entire_width */
4515 /* */
4516 entire_width = namewidth + (column_width + 2) *
4517 label_array_len(iostat_bottom_labels[type]);
4518
4519 if (cb->cb_scripted)
4520 printf("%s\n", name);
4521 else
4522 print_iostat_header_impl(cb, column_width, name);
4523
4524 print_iostat_histo(nva, names_len, cb, column_width,
4525 namewidth, scale);
4526
4527 free_calc_stats(nva, names_len);
4528 if (!cb->cb_scripted)
4529 print_solid_separator(entire_width);
4530 }
4531
4532 /*
4533 * Calculate the average latency of a power-of-two latency histogram
4534 */
4535 static uint64_t
4536 single_histo_average(uint64_t *histo, unsigned int buckets)
4537 {
4538 int i;
4539 uint64_t count = 0, total = 0;
4540
4541 for (i = 0; i < buckets; i++) {
4542 /*
4543 * Our buckets are power-of-two latency ranges. Use the
4544 * midpoint latency of each bucket to calculate the average.
4545 * For example:
4546 *
4547 * Bucket Midpoint
4548 * 8ns-15ns: 12ns
4549 * 16ns-31ns: 24ns
4550 * ...
4551 */
4552 if (histo[i] != 0) {
4553 total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
4554 count += histo[i];
4555 }
4556 }
4557
4558 /* Prevent divide by zero */
4559 return (count == 0 ? 0 : total / count);
4560 }
4561
4562 static void
4563 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv)
4564 {
4565 const char *names[] = {
4566 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
4567 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
4568 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
4569 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
4570 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
4571 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
4572 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
4573 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
4574 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
4575 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
4576 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
4577 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
4578 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
4579 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
4580 };
4581
4582 struct stat_array *nva;
4583
4584 unsigned int column_width = default_column_width(cb, IOS_QUEUES);
4585 enum zfs_nicenum_format format;
4586
4587 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
4588
4589 if (cb->cb_literal)
4590 format = ZFS_NICENUM_RAW;
4591 else
4592 format = ZFS_NICENUM_1024;
4593
4594 for (int i = 0; i < ARRAY_SIZE(names); i++) {
4595 uint64_t val = nva[i].data[0];
4596 print_one_stat(val, format, column_width, cb->cb_scripted);
4597 }
4598
4599 free_calc_stats(nva, ARRAY_SIZE(names));
4600 }
4601
4602 static void
4603 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
4604 nvlist_t *newnv)
4605 {
4606 int i;
4607 uint64_t val;
4608 const char *names[] = {
4609 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
4610 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
4611 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
4612 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
4613 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
4614 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
4615 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
4616 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
4617 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
4618 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
4619 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
4620 };
4621 struct stat_array *nva;
4622
4623 unsigned int column_width = default_column_width(cb, IOS_LATENCY);
4624 enum zfs_nicenum_format format;
4625
4626 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
4627
4628 if (cb->cb_literal)
4629 format = ZFS_NICENUM_RAWTIME;
4630 else
4631 format = ZFS_NICENUM_TIME;
4632
4633 /* Print our avg latencies on the line */
4634 for (i = 0; i < ARRAY_SIZE(names); i++) {
4635 /* Compute average latency for a latency histo */
4636 val = single_histo_average(nva[i].data, nva[i].count);
4637 print_one_stat(val, format, column_width, cb->cb_scripted);
4638 }
4639 free_calc_stats(nva, ARRAY_SIZE(names));
4640 }
4641
4642 /*
4643 * Print default statistics (capacity/operations/bandwidth)
4644 */
4645 static void
4646 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
4647 {
4648 unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
4649 enum zfs_nicenum_format format;
4650 char na; /* char to print for "not applicable" values */
4651
4652 if (cb->cb_literal) {
4653 format = ZFS_NICENUM_RAW;
4654 na = '';
4655 } else {
4656 format = ZFS_NICENUM_1024;
4657 na = '-';
4658 }
4659
4660 /* only toplevel vdevs have capacity stats */
4661 if (vs->vs_space == 0) {
4662 if (cb->cb_scripted)
4663 printf("\t%c\t%c", na, na);
4664 else
4665 printf(" %*c %*c", column_width, na, column_width,
4666 na);
4667 } else {
4668 print_one_stat(vs->vs_alloc, format, column_width,
4669 cb->cb_scripted);
4670 print_one_stat(vs->vs_space - vs->vs_alloc, format,
4671 column_width, cb->cb_scripted);
4672 }
4673
4674 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
4675 format, column_width, cb->cb_scripted);
4676 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
4677 format, column_width, cb->cb_scripted);
4678 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
4679 format, column_width, cb->cb_scripted);
4680 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
4681 format, column_width, cb->cb_scripted);
4682 }
4683
4684 static const char *const class_name[] = {
4685 VDEV_ALLOC_BIAS_DEDUP,
4686 VDEV_ALLOC_BIAS_SPECIAL,
4687 VDEV_ALLOC_CLASS_LOGS
4688 };
4689
4690 /*
4691 * Print out all the statistics for the given vdev. This can either be the
4692 * toplevel configuration, or called recursively. If 'name' is NULL, then this
4693 * is a verbose output, and we don't want to display the toplevel pool stats.
4694 *
4695 * Returns the number of stat lines printed.
4696 */
4697 static unsigned int
4698 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
4699 nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
4700 {
4701 nvlist_t **oldchild, **newchild;
4702 uint_t c, children, oldchildren;
4703 vdev_stat_t *oldvs, *newvs, *calcvs;
4704 vdev_stat_t zerovs = { 0 };
4705 char *vname;
4706 int i;
4707 int ret = 0;
4708 uint64_t tdelta;
4709 double scale;
4710
4711 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
4712 return (ret);
4713
4714 calcvs = safe_malloc(sizeof (*calcvs));
4715
4716 if (oldnv != NULL) {
4717 verify(nvlist_lookup_uint64_array(oldnv,
4718 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
4719 } else {
4720 oldvs = &zerovs;
4721 }
4722
4723 /* Do we only want to see a specific vdev? */
4724 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) {
4725 /* Yes we do. Is this the vdev? */
4726 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) {
4727 /*
4728 * This is our vdev. Since it is the only vdev we
4729 * will be displaying, make depth = 0 so that it
4730 * doesn't get indented.
4731 */
4732 depth = 0;
4733 break;
4734 }
4735 }
4736
4737 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) {
4738 /* Couldn't match the name */
4739 goto children;
4740 }
4741
4742
4743 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
4744 (uint64_t **)&newvs, &c) == 0);
4745
4746 /*
4747 * Print the vdev name unless it's is a histogram. Histograms
4748 * display the vdev name in the header itself.
4749 */
4750 if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
4751 if (cb->cb_scripted) {
4752 printf("%s", name);
4753 } else {
4754 if (strlen(name) + depth > cb->cb_namewidth)
4755 (void) printf("%*s%s", depth, "", name);
4756 else
4757 (void) printf("%*s%s%*s", depth, "", name,
4758 (int)(cb->cb_namewidth - strlen(name) -
4759 depth), "");
4760 }
4761 }
4762
4763 /* Calculate our scaling factor */
4764 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
4765 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
4766 /*
4767 * If we specify printing histograms with no time interval, then
4768 * print the histogram numbers over the entire lifetime of the
4769 * vdev.
4770 */
4771 scale = 1;
4772 } else {
4773 if (tdelta == 0)
4774 scale = 1.0;
4775 else
4776 scale = (double)NANOSEC / tdelta;
4777 }
4778
4779 if (cb->cb_flags & IOS_DEFAULT_M) {
4780 calc_default_iostats(oldvs, newvs, calcvs);
4781 print_iostat_default(calcvs, cb, scale);
4782 }
4783 if (cb->cb_flags & IOS_LATENCY_M)
4784 print_iostat_latency(cb, oldnv, newnv);
4785 if (cb->cb_flags & IOS_QUEUES_M)
4786 print_iostat_queues(cb, newnv);
4787 if (cb->cb_flags & IOS_ANYHISTO_M) {
4788 printf("\n");
4789 print_iostat_histos(cb, oldnv, newnv, scale, name);
4790 }
4791
4792 if (cb->vcdl != NULL) {
4793 char *path;
4794 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
4795 &path) == 0) {
4796 printf(" ");
4797 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
4798 }
4799 }
4800
4801 if (!(cb->cb_flags & IOS_ANYHISTO_M))
4802 printf("\n");
4803
4804 ret++;
4805
4806 children:
4807
4808 free(calcvs);
4809
4810 if (!cb->cb_verbose)
4811 return (ret);
4812
4813 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
4814 &newchild, &children) != 0)
4815 return (ret);
4816
4817 if (oldnv) {
4818 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
4819 &oldchild, &oldchildren) != 0)
4820 return (ret);
4821
4822 children = MIN(oldchildren, children);
4823 }
4824
4825 /*
4826 * print normal top-level devices
4827 */
4828 for (c = 0; c < children; c++) {
4829 uint64_t ishole = B_FALSE, islog = B_FALSE;
4830
4831 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
4832 &ishole);
4833
4834 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
4835 &islog);
4836
4837 if (ishole || islog)
4838 continue;
4839
4840 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
4841 continue;
4842
4843 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
4844 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
4845 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
4846 newchild[c], cb, depth + 2);
4847 free(vname);
4848 }
4849
4850 /*
4851 * print all other top-level devices
4852 */
4853 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
4854 boolean_t printed = B_FALSE;
4855
4856 for (c = 0; c < children; c++) {
4857 uint64_t islog = B_FALSE;
4858 char *bias = NULL;
4859 char *type = NULL;
4860
4861 (void) nvlist_lookup_uint64(newchild[c],
4862 ZPOOL_CONFIG_IS_LOG, &islog);
4863 if (islog) {
4864 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
4865 } else {
4866 (void) nvlist_lookup_string(newchild[c],
4867 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
4868 (void) nvlist_lookup_string(newchild[c],
4869 ZPOOL_CONFIG_TYPE, &type);
4870 }
4871 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
4872 continue;
4873 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
4874 continue;
4875
4876 if (!printed) {
4877 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
4878 !cb->cb_scripted &&
4879 !cb->cb_vdevs.cb_names) {
4880 print_iostat_dashes(cb, 0,
4881 class_name[n]);
4882 }
4883 printf("\n");
4884 printed = B_TRUE;
4885 }
4886
4887 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
4888 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID);
4889 ret += print_vdev_stats(zhp, vname, oldnv ?
4890 oldchild[c] : NULL, newchild[c], cb, depth + 2);
4891 free(vname);
4892 }
4893 }
4894
4895 /*
4896 * Include level 2 ARC devices in iostat output
4897 */
4898 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
4899 &newchild, &children) != 0)
4900 return (ret);
4901
4902 if (oldnv) {
4903 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
4904 &oldchild, &oldchildren) != 0)
4905 return (ret);
4906
4907 children = MIN(oldchildren, children);
4908 }
4909
4910 if (children > 0) {
4911 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
4912 !cb->cb_vdevs.cb_names) {
4913 print_iostat_dashes(cb, 0, "cache");
4914 }
4915 printf("\n");
4916
4917 for (c = 0; c < children; c++) {
4918 vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
4919 cb->cb_vdevs.cb_name_flags);
4920 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
4921 : NULL, newchild[c], cb, depth + 2);
4922 free(vname);
4923 }
4924 }
4925
4926 return (ret);
4927 }
4928
4929 static int
4930 refresh_iostat(zpool_handle_t *zhp, void *data)
4931 {
4932 iostat_cbdata_t *cb = data;
4933 boolean_t missing;
4934
4935 /*
4936 * If the pool has disappeared, remove it from the list and continue.
4937 */
4938 if (zpool_refresh_stats(zhp, &missing) != 0)
4939 return (-1);
4940
4941 if (missing)
4942 pool_list_remove(cb->cb_list, zhp);
4943
4944 return (0);
4945 }
4946
4947 /*
4948 * Callback to print out the iostats for the given pool.
4949 */
4950 static int
4951 print_iostat(zpool_handle_t *zhp, void *data)
4952 {
4953 iostat_cbdata_t *cb = data;
4954 nvlist_t *oldconfig, *newconfig;
4955 nvlist_t *oldnvroot, *newnvroot;
4956 int ret;
4957
4958 newconfig = zpool_get_config(zhp, &oldconfig);
4959
4960 if (cb->cb_iteration == 1)
4961 oldconfig = NULL;
4962
4963 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
4964 &newnvroot) == 0);
4965
4966 if (oldconfig == NULL)
4967 oldnvroot = NULL;
4968 else
4969 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
4970 &oldnvroot) == 0);
4971
4972 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
4973 cb, 0);
4974 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
4975 !cb->cb_scripted && cb->cb_verbose &&
4976 !cb->cb_vdevs.cb_names_count) {
4977 print_iostat_separator(cb);
4978 if (cb->vcdl != NULL) {
4979 print_cmd_columns(cb->vcdl, 1);
4980 }
4981 printf("\n");
4982 }
4983
4984 return (ret);
4985 }
4986
4987 static int
4988 get_columns(void)
4989 {
4990 struct winsize ws;
4991 int columns = 80;
4992 int error;
4993
4994 if (isatty(STDOUT_FILENO)) {
4995 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
4996 if (error == 0)
4997 columns = ws.ws_col;
4998 } else {
4999 columns = 999;
5000 }
5001
5002 return (columns);
5003 }
5004
5005 /*
5006 * Return the required length of the pool/vdev name column. The minimum
5007 * allowed width and output formatting flags must be provided.
5008 */
5009 static int
5010 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
5011 {
5012 nvlist_t *config, *nvroot;
5013 int width = min_width;
5014
5015 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
5016 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5017 &nvroot) == 0);
5018 size_t poolname_len = strlen(zpool_get_name(zhp));
5019 if (verbose == B_FALSE) {
5020 width = MAX(poolname_len, min_width);
5021 } else {
5022 width = MAX(poolname_len,
5023 max_width(zhp, nvroot, 0, min_width, flags));
5024 }
5025 }
5026
5027 return (width);
5028 }
5029
5030 /*
5031 * Parse the input string, get the 'interval' and 'count' value if there is one.
5032 */
5033 static void
5034 get_interval_count(int *argcp, char **argv, float *iv,
5035 unsigned long *cnt)
5036 {
5037 float interval = 0;
5038 unsigned long count = 0;
5039 int argc = *argcp;
5040
5041 /*
5042 * Determine if the last argument is an integer or a pool name
5043 */
5044 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5045 char *end;
5046
5047 errno = 0;
5048 interval = strtof(argv[argc - 1], &end);
5049
5050 if (*end == '\0' && errno == 0) {
5051 if (interval == 0) {
5052 (void) fprintf(stderr, gettext(
5053 "interval cannot be zero\n"));
5054 usage(B_FALSE);
5055 }
5056 /*
5057 * Ignore the last parameter
5058 */
5059 argc--;
5060 } else {
5061 /*
5062 * If this is not a valid number, just plow on. The
5063 * user will get a more informative error message later
5064 * on.
5065 */
5066 interval = 0;
5067 }
5068 }
5069
5070 /*
5071 * If the last argument is also an integer, then we have both a count
5072 * and an interval.
5073 */
5074 if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
5075 char *end;
5076
5077 errno = 0;
5078 count = interval;
5079 interval = strtof(argv[argc - 1], &end);
5080
5081 if (*end == '\0' && errno == 0) {
5082 if (interval == 0) {
5083 (void) fprintf(stderr, gettext(
5084 "interval cannot be zero\n"));
5085 usage(B_FALSE);
5086 }
5087
5088 /*
5089 * Ignore the last parameter
5090 */
5091 argc--;
5092 } else {
5093 interval = 0;
5094 }
5095 }
5096
5097 *iv = interval;
5098 *cnt = count;
5099 *argcp = argc;
5100 }
5101
5102 static void
5103 get_timestamp_arg(char c)
5104 {
5105 if (c == 'u')
5106 timestamp_fmt = UDATE;
5107 else if (c == 'd')
5108 timestamp_fmt = DDATE;
5109 else
5110 usage(B_FALSE);
5111 }
5112
5113 /*
5114 * Return stat flags that are supported by all pools by both the module and
5115 * zpool iostat. "*data" should be initialized to all 0xFFs before running.
5116 * It will get ANDed down until only the flags that are supported on all pools
5117 * remain.
5118 */
5119 static int
5120 get_stat_flags_cb(zpool_handle_t *zhp, void *data)
5121 {
5122 uint64_t *mask = data;
5123 nvlist_t *config, *nvroot, *nvx;
5124 uint64_t flags = 0;
5125 int i, j;
5126
5127 config = zpool_get_config(zhp, NULL);
5128 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
5129 &nvroot) == 0);
5130
5131 /* Default stats are always supported, but for completeness.. */
5132 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
5133 flags |= IOS_DEFAULT_M;
5134
5135 /* Get our extended stats nvlist from the main list */
5136 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
5137 &nvx) != 0) {
5138 /*
5139 * No extended stats; they're probably running an older
5140 * module. No big deal, we support that too.
5141 */
5142 goto end;
5143 }
5144
5145 /* For each extended stat, make sure all its nvpairs are supported */
5146 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
5147 if (!vsx_type_to_nvlist[j][0])
5148 continue;
5149
5150 /* Start off by assuming the flag is supported, then check */
5151 flags |= (1ULL << j);
5152 for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
5153 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
5154 /* flag isn't supported */
5155 flags = flags & ~(1ULL << j);
5156 break;
5157 }
5158 }
5159 }
5160 end:
5161 *mask = *mask & flags;
5162 return (0);
5163 }
5164
5165 /*
5166 * Return a bitmask of stats that are supported on all pools by both the module
5167 * and zpool iostat.
5168 */
5169 static uint64_t
5170 get_stat_flags(zpool_list_t *list)
5171 {
5172 uint64_t mask = -1;
5173
5174 /*
5175 * get_stat_flags_cb() will lop off bits from "mask" until only the
5176 * flags that are supported on all pools remain.
5177 */
5178 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
5179 return (mask);
5180 }
5181
5182 /*
5183 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise.
5184 */
5185 static int
5186 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
5187 {
5188 uint64_t guid;
5189 vdev_cbdata_t *cb = cb_data;
5190 zpool_handle_t *zhp = zhp_data;
5191
5192 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
5193 return (0);
5194
5195 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0]));
5196 }
5197
5198 /*
5199 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise.
5200 */
5201 static int
5202 is_vdev(zpool_handle_t *zhp, void *cb_data)
5203 {
5204 return (for_each_vdev(zhp, is_vdev_cb, cb_data));
5205 }
5206
5207 /*
5208 * Check if vdevs are in a pool
5209 *
5210 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
5211 * return 0. If pool_name is NULL, then search all pools.
5212 */
5213 static int
5214 are_vdevs_in_pool(int argc, char **argv, char *pool_name,
5215 vdev_cbdata_t *cb)
5216 {
5217 char **tmp_name;
5218 int ret = 0;
5219 int i;
5220 int pool_count = 0;
5221
5222 if ((argc == 0) || !*argv)
5223 return (0);
5224
5225 if (pool_name)
5226 pool_count = 1;
5227
5228 /* Temporarily hijack cb_names for a second... */
5229 tmp_name = cb->cb_names;
5230
5231 /* Go though our list of prospective vdev names */
5232 for (i = 0; i < argc; i++) {
5233 cb->cb_names = argv + i;
5234
5235 /* Is this name a vdev in our pools? */
5236 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
5237 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb);
5238 if (!ret) {
5239 /* No match */
5240 break;
5241 }
5242 }
5243
5244 cb->cb_names = tmp_name;
5245
5246 return (ret);
5247 }
5248
5249 static int
5250 is_pool_cb(zpool_handle_t *zhp, void *data)
5251 {
5252 char *name = data;
5253 if (strcmp(name, zpool_get_name(zhp)) == 0)
5254 return (1);
5255
5256 return (0);
5257 }
5258
5259 /*
5260 * Do we have a pool named *name? If so, return 1, otherwise 0.
5261 */
5262 static int
5263 is_pool(char *name)
5264 {
5265 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE,
5266 is_pool_cb, name));
5267 }
5268
5269 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
5270 static int
5271 are_all_pools(int argc, char **argv)
5272 {
5273 if ((argc == 0) || !*argv)
5274 return (0);
5275
5276 while (--argc >= 0)
5277 if (!is_pool(argv[argc]))
5278 return (0);
5279
5280 return (1);
5281 }
5282
5283 /*
5284 * Helper function to print out vdev/pool names we can't resolve. Used for an
5285 * error message.
5286 */
5287 static void
5288 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
5289 vdev_cbdata_t *cb)
5290 {
5291 int i;
5292 char *name;
5293 char *str;
5294 for (i = 0; i < argc; i++) {
5295 name = argv[i];
5296
5297 if (is_pool(name))
5298 str = gettext("pool");
5299 else if (are_vdevs_in_pool(1, &name, pool_name, cb))
5300 str = gettext("vdev in this pool");
5301 else if (are_vdevs_in_pool(1, &name, NULL, cb))
5302 str = gettext("vdev in another pool");
5303 else
5304 str = gettext("unknown");
5305
5306 fprintf(stderr, "\t%s (%s)\n", name, str);
5307 }
5308 }
5309
5310 /*
5311 * Same as get_interval_count(), but with additional checks to not misinterpret
5312 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in
5313 * cb.cb_vdevs.cb_name_flags.
5314 */
5315 static void
5316 get_interval_count_filter_guids(int *argc, char **argv, float *interval,
5317 unsigned long *count, iostat_cbdata_t *cb)
5318 {
5319 char **tmpargv = argv;
5320 int argc_for_interval = 0;
5321
5322 /* Is the last arg an interval value? Or a guid? */
5323 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL,
5324 &cb->cb_vdevs)) {
5325 /*
5326 * The last arg is not a guid, so it's probably an
5327 * interval value.
5328 */
5329 argc_for_interval++;
5330
5331 if (*argc >= 2 &&
5332 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL,
5333 &cb->cb_vdevs)) {
5334 /*
5335 * The 2nd to last arg is not a guid, so it's probably
5336 * an interval value.
5337 */
5338 argc_for_interval++;
5339 }
5340 }
5341
5342 /* Point to our list of possible intervals */
5343 tmpargv = &argv[*argc - argc_for_interval];
5344
5345 *argc = *argc - argc_for_interval;
5346 get_interval_count(&argc_for_interval, tmpargv,
5347 interval, count);
5348 }
5349
5350 /*
5351 * Floating point sleep(). Allows you to pass in a floating point value for
5352 * seconds.
5353 */
5354 static void
5355 fsleep(float sec)
5356 {
5357 struct timespec req;
5358 req.tv_sec = floor(sec);
5359 req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
5360 nanosleep(&req, NULL);
5361 }
5362
5363 /*
5364 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
5365 * if we were unable to determine its size.
5366 */
5367 static int
5368 terminal_height(void)
5369 {
5370 struct winsize win;
5371
5372 if (isatty(STDOUT_FILENO) == 0)
5373 return (-1);
5374
5375 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
5376 return (win.ws_row);
5377
5378 return (-1);
5379 }
5380
5381 /*
5382 * Run one of the zpool status/iostat -c scripts with the help (-h) option and
5383 * print the result.
5384 *
5385 * name: Short name of the script ('iostat').
5386 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
5387 */
5388 static void
5389 print_zpool_script_help(char *name, char *path)
5390 {
5391 char *argv[] = {path, (char *)"-h", NULL};
5392 char **lines = NULL;
5393 int lines_cnt = 0;
5394 int rc;
5395
5396 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
5397 &lines_cnt);
5398 if (rc != 0 || lines == NULL || lines_cnt <= 0) {
5399 if (lines != NULL)
5400 libzfs_free_str_array(lines, lines_cnt);
5401 return;
5402 }
5403
5404 for (int i = 0; i < lines_cnt; i++)
5405 if (!is_blank_str(lines[i]))
5406 printf(" %-14s %s\n", name, lines[i]);
5407
5408 libzfs_free_str_array(lines, lines_cnt);
5409 }
5410
5411 /*
5412 * Go though the zpool status/iostat -c scripts in the user's path, run their
5413 * help option (-h), and print out the results.
5414 */
5415 static void
5416 print_zpool_dir_scripts(char *dirpath)
5417 {
5418 DIR *dir;
5419 struct dirent *ent;
5420 char fullpath[MAXPATHLEN];
5421 struct stat dir_stat;
5422
5423 if ((dir = opendir(dirpath)) != NULL) {
5424 /* print all the files and directories within directory */
5425 while ((ent = readdir(dir)) != NULL) {
5426 if (snprintf(fullpath, sizeof (fullpath), "%s/%s",
5427 dirpath, ent->d_name) >= sizeof (fullpath)) {
5428 (void) fprintf(stderr,
5429 gettext("internal error: "
5430 "ZPOOL_SCRIPTS_PATH too large.\n"));
5431 exit(1);
5432 }
5433
5434 /* Print the scripts */
5435 if (stat(fullpath, &dir_stat) == 0)
5436 if (dir_stat.st_mode & S_IXUSR &&
5437 S_ISREG(dir_stat.st_mode))
5438 print_zpool_script_help(ent->d_name,
5439 fullpath);
5440 }
5441 closedir(dir);
5442 }
5443 }
5444
5445 /*
5446 * Print out help text for all zpool status/iostat -c scripts.
5447 */
5448 static void
5449 print_zpool_script_list(const char *subcommand)
5450 {
5451 char *dir, *sp, *tmp;
5452
5453 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
5454
5455 sp = zpool_get_cmd_search_path();
5456 if (sp == NULL)
5457 return;
5458
5459 for (dir = strtok_r(sp, ":", &tmp);
5460 dir != NULL;
5461 dir = strtok_r(NULL, ":", &tmp))
5462 print_zpool_dir_scripts(dir);
5463
5464 free(sp);
5465 }
5466
5467 /*
5468 * Set the minimum pool/vdev name column width. The width must be at least 10,
5469 * but may be as large as the column width - 42 so it still fits on one line.
5470 * NOTE: 42 is the width of the default capacity/operations/bandwidth output
5471 */
5472 static int
5473 get_namewidth_iostat(zpool_handle_t *zhp, void *data)
5474 {
5475 iostat_cbdata_t *cb = data;
5476 int width, available_width;
5477
5478 /*
5479 * get_namewidth() returns the maximum width of any name in that column
5480 * for any pool/vdev/device line that will be output.
5481 */
5482 width = get_namewidth(zhp, cb->cb_namewidth,
5483 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
5484
5485 /*
5486 * The width we are calculating is the width of the header and also the
5487 * padding width for names that are less than maximum width. The stats
5488 * take up 42 characters, so the width available for names is:
5489 */
5490 available_width = get_columns() - 42;
5491
5492 /*
5493 * If the maximum width fits on a screen, then great! Make everything
5494 * line up by justifying all lines to the same width. If that max
5495 * width is larger than what's available, the name plus stats won't fit
5496 * on one line, and justifying to that width would cause every line to
5497 * wrap on the screen. We only want lines with long names to wrap.
5498 * Limit the padding to what won't wrap.
5499 */
5500 if (width > available_width)
5501 width = available_width;
5502
5503 /*
5504 * And regardless of whatever the screen width is (get_columns can
5505 * return 0 if the width is not known or less than 42 for a narrow
5506 * terminal) have the width be a minimum of 10.
5507 */
5508 if (width < 10)
5509 width = 10;
5510
5511 /* Save the calculated width */
5512 cb->cb_namewidth = width;
5513
5514 return (0);
5515 }
5516
5517 /*
5518 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
5519 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
5520 * [interval [count]]
5521 *
5522 * -c CMD For each vdev, run command CMD
5523 * -g Display guid for individual vdev name.
5524 * -L Follow links when resolving vdev path name.
5525 * -P Display full path for vdev name.
5526 * -v Display statistics for individual vdevs
5527 * -h Display help
5528 * -p Display values in parsable (exact) format.
5529 * -H Scripted mode. Don't display headers, and separate properties
5530 * by a single tab.
5531 * -l Display average latency
5532 * -q Display queue depths
5533 * -w Display latency histograms
5534 * -r Display request size histogram
5535 * -T Display a timestamp in date(1) or Unix format
5536 * -n Only print headers once
5537 *
5538 * This command can be tricky because we want to be able to deal with pool
5539 * creation/destruction as well as vdev configuration changes. The bulk of this
5540 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely
5541 * on pool_list_update() to detect the addition of new pools. Configuration
5542 * changes are all handled within libzfs.
5543 */
5544 int
5545 zpool_do_iostat(int argc, char **argv)
5546 {
5547 int c;
5548 int ret;
5549 int npools;
5550 float interval = 0;
5551 unsigned long count = 0;
5552 int winheight = 24;
5553 zpool_list_t *list;
5554 boolean_t verbose = B_FALSE;
5555 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
5556 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
5557 boolean_t omit_since_boot = B_FALSE;
5558 boolean_t guid = B_FALSE;
5559 boolean_t follow_links = B_FALSE;
5560 boolean_t full_name = B_FALSE;
5561 boolean_t headers_once = B_FALSE;
5562 iostat_cbdata_t cb = { 0 };
5563 char *cmd = NULL;
5564
5565 /* Used for printing error message */
5566 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
5567 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
5568
5569 uint64_t unsupported_flags;
5570
5571 /* check options */
5572 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
5573 switch (c) {
5574 case 'c':
5575 if (cmd != NULL) {
5576 fprintf(stderr,
5577 gettext("Can't set -c flag twice\n"));
5578 exit(1);
5579 }
5580
5581 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
5582 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
5583 fprintf(stderr, gettext(
5584 "Can't run -c, disabled by "
5585 "ZPOOL_SCRIPTS_ENABLED.\n"));
5586 exit(1);
5587 }
5588
5589 if ((getuid() <= 0 || geteuid() <= 0) &&
5590 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
5591 fprintf(stderr, gettext(
5592 "Can't run -c with root privileges "
5593 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
5594 exit(1);
5595 }
5596 cmd = optarg;
5597 verbose = B_TRUE;
5598 break;
5599 case 'g':
5600 guid = B_TRUE;
5601 break;
5602 case 'L':
5603 follow_links = B_TRUE;
5604 break;
5605 case 'P':
5606 full_name = B_TRUE;
5607 break;
5608 case 'T':
5609 get_timestamp_arg(*optarg);
5610 break;
5611 case 'v':
5612 verbose = B_TRUE;
5613 break;
5614 case 'p':
5615 parsable = B_TRUE;
5616 break;
5617 case 'l':
5618 latency = B_TRUE;
5619 break;
5620 case 'q':
5621 queues = B_TRUE;
5622 break;
5623 case 'H':
5624 scripted = B_TRUE;
5625 break;
5626 case 'w':
5627 l_histo = B_TRUE;
5628 break;
5629 case 'r':
5630 rq_histo = B_TRUE;
5631 break;
5632 case 'y':
5633 omit_since_boot = B_TRUE;
5634 break;
5635 case 'n':
5636 headers_once = B_TRUE;
5637 break;
5638 case 'h':
5639 usage(B_FALSE);
5640 break;
5641 case '?':
5642 if (optopt == 'c') {
5643 print_zpool_script_list("iostat");
5644 exit(0);
5645 } else {
5646 fprintf(stderr,
5647 gettext("invalid option '%c'\n"), optopt);
5648 }
5649 usage(B_FALSE);
5650 }
5651 }
5652
5653 argc -= optind;
5654 argv += optind;
5655
5656 cb.cb_literal = parsable;
5657 cb.cb_scripted = scripted;
5658
5659 if (guid)
5660 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID;
5661 if (follow_links)
5662 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
5663 if (full_name)
5664 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH;
5665 cb.cb_iteration = 0;
5666 cb.cb_namewidth = 0;
5667 cb.cb_verbose = verbose;
5668
5669 /* Get our interval and count values (if any) */
5670 if (guid) {
5671 get_interval_count_filter_guids(&argc, argv, &interval,
5672 &count, &cb);
5673 } else {
5674 get_interval_count(&argc, argv, &interval, &count);
5675 }
5676
5677 if (argc == 0) {
5678 /* No args, so just print the defaults. */
5679 } else if (are_all_pools(argc, argv)) {
5680 /* All the args are pool names */
5681 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) {
5682 /* All the args are vdevs */
5683 cb.cb_vdevs.cb_names = argv;
5684 cb.cb_vdevs.cb_names_count = argc;
5685 argc = 0; /* No pools to process */
5686 } else if (are_all_pools(1, argv)) {
5687 /* The first arg is a pool name */
5688 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
5689 &cb.cb_vdevs)) {
5690 /* ...and the rest are vdev names */
5691 cb.cb_vdevs.cb_names = argv + 1;
5692 cb.cb_vdevs.cb_names_count = argc - 1;
5693 argc = 1; /* One pool to process */
5694 } else {
5695 fprintf(stderr, gettext("Expected either a list of "));
5696 fprintf(stderr, gettext("pools, or list of vdevs in"));
5697 fprintf(stderr, " \"%s\", ", argv[0]);
5698 fprintf(stderr, gettext("but got:\n"));
5699 error_list_unresolved_vdevs(argc - 1, argv + 1,
5700 argv[0], &cb.cb_vdevs);
5701 fprintf(stderr, "\n");
5702 usage(B_FALSE);
5703 return (1);
5704 }
5705 } else {
5706 /*
5707 * The args don't make sense. The first arg isn't a pool name,
5708 * nor are all the args vdevs.
5709 */
5710 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
5711 fprintf(stderr, "\n");
5712 return (1);
5713 }
5714
5715 if (cb.cb_vdevs.cb_names_count != 0) {
5716 /*
5717 * If user specified vdevs, it implies verbose.
5718 */
5719 cb.cb_verbose = B_TRUE;
5720 }
5721
5722 /*
5723 * Construct the list of all interesting pools.
5724 */
5725 ret = 0;
5726 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable,
5727 &ret)) == NULL)
5728 return (1);
5729
5730 if (pool_list_count(list) == 0 && argc != 0) {
5731 pool_list_free(list);
5732 return (1);
5733 }
5734
5735 if (pool_list_count(list) == 0 && interval == 0) {
5736 pool_list_free(list);
5737 (void) fprintf(stderr, gettext("no pools available\n"));
5738 return (1);
5739 }
5740
5741 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
5742 pool_list_free(list);
5743 (void) fprintf(stderr,
5744 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
5745 usage(B_FALSE);
5746 return (1);
5747 }
5748
5749 if (l_histo && rq_histo) {
5750 pool_list_free(list);
5751 (void) fprintf(stderr,
5752 gettext("Only one of [-r|-w] can be passed at a time\n"));
5753 usage(B_FALSE);
5754 return (1);
5755 }
5756
5757 /*
5758 * Enter the main iostat loop.
5759 */
5760 cb.cb_list = list;
5761
5762 if (l_histo) {
5763 /*
5764 * Histograms tables look out of place when you try to display
5765 * them with the other stats, so make a rule that you can only
5766 * print histograms by themselves.
5767 */
5768 cb.cb_flags = IOS_L_HISTO_M;
5769 } else if (rq_histo) {
5770 cb.cb_flags = IOS_RQ_HISTO_M;
5771 } else {
5772 cb.cb_flags = IOS_DEFAULT_M;
5773 if (latency)
5774 cb.cb_flags |= IOS_LATENCY_M;
5775 if (queues)
5776 cb.cb_flags |= IOS_QUEUES_M;
5777 }
5778
5779 /*
5780 * See if the module supports all the stats we want to display.
5781 */
5782 unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
5783 if (unsupported_flags) {
5784 uint64_t f;
5785 int idx;
5786 fprintf(stderr,
5787 gettext("The loaded zfs module doesn't support:"));
5788
5789 /* for each bit set in unsupported_flags */
5790 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
5791 idx = lowbit64(f) - 1;
5792 fprintf(stderr, " -%c", flag_to_arg[idx]);
5793 }
5794
5795 fprintf(stderr, ". Try running a newer module.\n");
5796 pool_list_free(list);
5797
5798 return (1);
5799 }
5800
5801 for (;;) {
5802 if ((npools = pool_list_count(list)) == 0)
5803 (void) fprintf(stderr, gettext("no pools available\n"));
5804 else {
5805 /*
5806 * If this is the first iteration and -y was supplied
5807 * we skip any printing.
5808 */
5809 boolean_t skip = (omit_since_boot &&
5810 cb.cb_iteration == 0);
5811
5812 /*
5813 * Refresh all statistics. This is done as an
5814 * explicit step before calculating the maximum name
5815 * width, so that any * configuration changes are
5816 * properly accounted for.
5817 */
5818 (void) pool_list_iter(list, B_FALSE, refresh_iostat,
5819 &cb);
5820
5821 /*
5822 * Iterate over all pools to determine the maximum width
5823 * for the pool / device name column across all pools.
5824 */
5825 cb.cb_namewidth = 0;
5826 (void) pool_list_iter(list, B_FALSE,
5827 get_namewidth_iostat, &cb);
5828
5829 if (timestamp_fmt != NODATE)
5830 print_timestamp(timestamp_fmt);
5831
5832 if (cmd != NULL && cb.cb_verbose &&
5833 !(cb.cb_flags & IOS_ANYHISTO_M)) {
5834 cb.vcdl = all_pools_for_each_vdev_run(argc,
5835 argv, cmd, g_zfs, cb.cb_vdevs.cb_names,
5836 cb.cb_vdevs.cb_names_count,
5837 cb.cb_vdevs.cb_name_flags);
5838 } else {
5839 cb.vcdl = NULL;
5840 }
5841
5842
5843 /*
5844 * Check terminal size so we can print headers
5845 * even when terminal window has its height
5846 * changed.
5847 */
5848 winheight = terminal_height();
5849 /*
5850 * Are we connected to TTY? If not, headers_once
5851 * should be true, to avoid breaking scripts.
5852 */
5853 if (winheight < 0)
5854 headers_once = B_TRUE;
5855
5856 /*
5857 * If it's the first time and we're not skipping it,
5858 * or either skip or verbose mode, print the header.
5859 *
5860 * The histogram code explicitly prints its header on
5861 * every vdev, so skip this for histograms.
5862 */
5863 if (((++cb.cb_iteration == 1 && !skip) ||
5864 (skip != verbose) ||
5865 (!headers_once &&
5866 (cb.cb_iteration % winheight) == 0)) &&
5867 (!(cb.cb_flags & IOS_ANYHISTO_M)) &&
5868 !cb.cb_scripted)
5869 print_iostat_header(&cb);
5870
5871 if (skip) {
5872 (void) fsleep(interval);
5873 continue;
5874 }
5875
5876 pool_list_iter(list, B_FALSE, print_iostat, &cb);
5877
5878 /*
5879 * If there's more than one pool, and we're not in
5880 * verbose mode (which prints a separator for us),
5881 * then print a separator.
5882 *
5883 * In addition, if we're printing specific vdevs then
5884 * we also want an ending separator.
5885 */
5886 if (((npools > 1 && !verbose &&
5887 !(cb.cb_flags & IOS_ANYHISTO_M)) ||
5888 (!(cb.cb_flags & IOS_ANYHISTO_M) &&
5889 cb.cb_vdevs.cb_names_count)) &&
5890 !cb.cb_scripted) {
5891 print_iostat_separator(&cb);
5892 if (cb.vcdl != NULL)
5893 print_cmd_columns(cb.vcdl, 1);
5894 printf("\n");
5895 }
5896
5897 if (cb.vcdl != NULL)
5898 free_vdev_cmd_data_list(cb.vcdl);
5899
5900 }
5901
5902 /*
5903 * Flush the output so that redirection to a file isn't buffered
5904 * indefinitely.
5905 */
5906 (void) fflush(stdout);
5907
5908 if (interval == 0)
5909 break;
5910
5911 if (count != 0 && --count == 0)
5912 break;
5913
5914 (void) fsleep(interval);
5915 }
5916
5917 pool_list_free(list);
5918
5919 return (ret);
5920 }
5921
5922 typedef struct list_cbdata {
5923 boolean_t cb_verbose;
5924 int cb_name_flags;
5925 int cb_namewidth;
5926 boolean_t cb_scripted;
5927 zprop_list_t *cb_proplist;
5928 boolean_t cb_literal;
5929 } list_cbdata_t;
5930
5931
5932 /*
5933 * Given a list of columns to display, output appropriate headers for each one.
5934 */
5935 static void
5936 print_header(list_cbdata_t *cb)
5937 {
5938 zprop_list_t *pl = cb->cb_proplist;
5939 char headerbuf[ZPOOL_MAXPROPLEN];
5940 const char *header;
5941 boolean_t first = B_TRUE;
5942 boolean_t right_justify;
5943 size_t width = 0;
5944
5945 for (; pl != NULL; pl = pl->pl_next) {
5946 width = pl->pl_width;
5947 if (first && cb->cb_verbose) {
5948 /*
5949 * Reset the width to accommodate the verbose listing
5950 * of devices.
5951 */
5952 width = cb->cb_namewidth;
5953 }
5954
5955 if (!first)
5956 (void) fputs(" ", stdout);
5957 else
5958 first = B_FALSE;
5959
5960 right_justify = B_FALSE;
5961 if (pl->pl_prop != ZPROP_USERPROP) {
5962 header = zpool_prop_column_name(pl->pl_prop);
5963 right_justify = zpool_prop_align_right(pl->pl_prop);
5964 } else {
5965 int i;
5966
5967 for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
5968 headerbuf[i] = toupper(pl->pl_user_prop[i]);
5969 headerbuf[i] = '\0';
5970 header = headerbuf;
5971 }
5972
5973 if (pl->pl_next == NULL && !right_justify)
5974 (void) fputs(header, stdout);
5975 else if (right_justify)
5976 (void) printf("%*s", (int)width, header);
5977 else
5978 (void) printf("%-*s", (int)width, header);
5979 }
5980
5981 (void) fputc('\n', stdout);
5982 }
5983
5984 /*
5985 * Given a pool and a list of properties, print out all the properties according
5986 * to the described layout. Used by zpool_do_list().
5987 */
5988 static void
5989 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
5990 {
5991 zprop_list_t *pl = cb->cb_proplist;
5992 boolean_t first = B_TRUE;
5993 char property[ZPOOL_MAXPROPLEN];
5994 const char *propstr;
5995 boolean_t right_justify;
5996 size_t width;
5997
5998 for (; pl != NULL; pl = pl->pl_next) {
5999
6000 width = pl->pl_width;
6001 if (first && cb->cb_verbose) {
6002 /*
6003 * Reset the width to accommodate the verbose listing
6004 * of devices.
6005 */
6006 width = cb->cb_namewidth;
6007 }
6008
6009 if (!first) {
6010 if (cb->cb_scripted)
6011 (void) fputc('\t', stdout);
6012 else
6013 (void) fputs(" ", stdout);
6014 } else {
6015 first = B_FALSE;
6016 }
6017
6018 right_justify = B_FALSE;
6019 if (pl->pl_prop != ZPROP_USERPROP) {
6020 if (zpool_get_prop(zhp, pl->pl_prop, property,
6021 sizeof (property), NULL, cb->cb_literal) != 0)
6022 propstr = "-";
6023 else
6024 propstr = property;
6025
6026 right_justify = zpool_prop_align_right(pl->pl_prop);
6027 } else if ((zpool_prop_feature(pl->pl_user_prop) ||
6028 zpool_prop_unsupported(pl->pl_user_prop)) &&
6029 zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
6030 sizeof (property)) == 0) {
6031 propstr = property;
6032 } else {
6033 propstr = "-";
6034 }
6035
6036
6037 /*
6038 * If this is being called in scripted mode, or if this is the
6039 * last column and it is left-justified, don't include a width
6040 * format specifier.
6041 */
6042 if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
6043 (void) fputs(propstr, stdout);
6044 else if (right_justify)
6045 (void) printf("%*s", (int)width, propstr);
6046 else
6047 (void) printf("%-*s", (int)width, propstr);
6048 }
6049
6050 (void) fputc('\n', stdout);
6051 }
6052
6053 static void
6054 print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
6055 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
6056 {
6057 char propval[64];
6058 boolean_t fixed;
6059 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
6060
6061 switch (prop) {
6062 case ZPOOL_PROP_SIZE:
6063 case ZPOOL_PROP_EXPANDSZ:
6064 case ZPOOL_PROP_CHECKPOINT:
6065 case ZPOOL_PROP_DEDUPRATIO:
6066 if (value == 0)
6067 (void) strlcpy(propval, "-", sizeof (propval));
6068 else
6069 zfs_nicenum_format(value, propval, sizeof (propval),
6070 format);
6071 break;
6072 case ZPOOL_PROP_FRAGMENTATION:
6073 if (value == ZFS_FRAG_INVALID) {
6074 (void) strlcpy(propval, "-", sizeof (propval));
6075 } else if (format == ZFS_NICENUM_RAW) {
6076 (void) snprintf(propval, sizeof (propval), "%llu",
6077 (unsigned long long)value);
6078 } else {
6079 (void) snprintf(propval, sizeof (propval), "%llu%%",
6080 (unsigned long long)value);
6081 }
6082 break;
6083 case ZPOOL_PROP_CAPACITY:
6084 /* capacity value is in parts-per-10,000 (aka permyriad) */
6085 if (format == ZFS_NICENUM_RAW)
6086 (void) snprintf(propval, sizeof (propval), "%llu",
6087 (unsigned long long)value / 100);
6088 else
6089 (void) snprintf(propval, sizeof (propval),
6090 value < 1000 ? "%1.2f%%" : value < 10000 ?
6091 "%2.1f%%" : "%3.0f%%", value / 100.0);
6092 break;
6093 case ZPOOL_PROP_HEALTH:
6094 width = 8;
6095 (void) strlcpy(propval, str, sizeof (propval));
6096 break;
6097 default:
6098 zfs_nicenum_format(value, propval, sizeof (propval), format);
6099 }
6100
6101 if (!valid)
6102 (void) strlcpy(propval, "-", sizeof (propval));
6103
6104 if (scripted)
6105 (void) printf("\t%s", propval);
6106 else
6107 (void) printf(" %*s", (int)width, propval);
6108 }
6109
6110 /*
6111 * print static default line per vdev
6112 * not compatible with '-o' <proplist> option
6113 */
6114 static void
6115 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
6116 list_cbdata_t *cb, int depth, boolean_t isspare)
6117 {
6118 nvlist_t **child;
6119 vdev_stat_t *vs;
6120 uint_t c, children;
6121 char *vname;
6122 boolean_t scripted = cb->cb_scripted;
6123 uint64_t islog = B_FALSE;
6124 const char *dashes = "%-*s - - - - "
6125 "- - - - -\n";
6126
6127 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
6128 (uint64_t **)&vs, &c) == 0);
6129
6130 if (name != NULL) {
6131 boolean_t toplevel = (vs->vs_space != 0);
6132 uint64_t cap;
6133 enum zfs_nicenum_format format;
6134 const char *state;
6135
6136 if (cb->cb_literal)
6137 format = ZFS_NICENUM_RAW;
6138 else
6139 format = ZFS_NICENUM_1024;
6140
6141 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
6142 return;
6143
6144 if (scripted)
6145 (void) printf("\t%s", name);
6146 else if (strlen(name) + depth > cb->cb_namewidth)
6147 (void) printf("%*s%s", depth, "", name);
6148 else
6149 (void) printf("%*s%s%*s", depth, "", name,
6150 (int)(cb->cb_namewidth - strlen(name) - depth), "");
6151
6152 /*
6153 * Print the properties for the individual vdevs. Some
6154 * properties are only applicable to toplevel vdevs. The
6155 * 'toplevel' boolean value is passed to the print_one_column()
6156 * to indicate that the value is valid.
6157 */
6158 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace)
6159 print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
6160 scripted, B_TRUE, format);
6161 else
6162 print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
6163 scripted, toplevel, format);
6164 print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
6165 scripted, toplevel, format);
6166 print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
6167 NULL, scripted, toplevel, format);
6168 print_one_column(ZPOOL_PROP_CHECKPOINT,
6169 vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
6170 print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
6171 scripted, B_TRUE, format);
6172 print_one_column(ZPOOL_PROP_FRAGMENTATION,
6173 vs->vs_fragmentation, NULL, scripted,
6174 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
6175 format);
6176 cap = (vs->vs_space == 0) ? 0 :
6177 (vs->vs_alloc * 10000 / vs->vs_space);
6178 print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
6179 scripted, toplevel, format);
6180 print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
6181 scripted, toplevel, format);
6182 state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
6183 if (isspare) {
6184 if (vs->vs_aux == VDEV_AUX_SPARED)
6185 state = "INUSE";
6186 else if (vs->vs_state == VDEV_STATE_HEALTHY)
6187 state = "AVAIL";
6188 }
6189 print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
6190 B_TRUE, format);
6191 (void) fputc('\n', stdout);
6192 }
6193
6194 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
6195 &child, &children) != 0)
6196 return;
6197
6198 /* list the normal vdevs first */
6199 for (c = 0; c < children; c++) {
6200 uint64_t ishole = B_FALSE;
6201
6202 if (nvlist_lookup_uint64(child[c],
6203 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
6204 continue;
6205
6206 if (nvlist_lookup_uint64(child[c],
6207 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
6208 continue;
6209
6210 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
6211 continue;
6212
6213 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6214 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6215 print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
6216 free(vname);
6217 }
6218
6219 /* list the classes: 'logs', 'dedup', and 'special' */
6220 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) {
6221 boolean_t printed = B_FALSE;
6222
6223 for (c = 0; c < children; c++) {
6224 char *bias = NULL;
6225 char *type = NULL;
6226
6227 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
6228 &islog) == 0 && islog) {
6229 bias = (char *)VDEV_ALLOC_CLASS_LOGS;
6230 } else {
6231 (void) nvlist_lookup_string(child[c],
6232 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
6233 (void) nvlist_lookup_string(child[c],
6234 ZPOOL_CONFIG_TYPE, &type);
6235 }
6236 if (bias == NULL || strcmp(bias, class_name[n]) != 0)
6237 continue;
6238 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
6239 continue;
6240
6241 if (!printed) {
6242 /* LINTED E_SEC_PRINTF_VAR_FMT */
6243 (void) printf(dashes, cb->cb_namewidth,
6244 class_name[n]);
6245 printed = B_TRUE;
6246 }
6247 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6248 cb->cb_name_flags | VDEV_NAME_TYPE_ID);
6249 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6250 B_FALSE);
6251 free(vname);
6252 }
6253 }
6254
6255 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
6256 &child, &children) == 0 && children > 0) {
6257 /* LINTED E_SEC_PRINTF_VAR_FMT */
6258 (void) printf(dashes, cb->cb_namewidth, "cache");
6259 for (c = 0; c < children; c++) {
6260 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6261 cb->cb_name_flags);
6262 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6263 B_FALSE);
6264 free(vname);
6265 }
6266 }
6267
6268 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
6269 &children) == 0 && children > 0) {
6270 /* LINTED E_SEC_PRINTF_VAR_FMT */
6271 (void) printf(dashes, cb->cb_namewidth, "spare");
6272 for (c = 0; c < children; c++) {
6273 vname = zpool_vdev_name(g_zfs, zhp, child[c],
6274 cb->cb_name_flags);
6275 print_list_stats(zhp, vname, child[c], cb, depth + 2,
6276 B_TRUE);
6277 free(vname);
6278 }
6279 }
6280 }
6281
6282 /*
6283 * Generic callback function to list a pool.
6284 */
6285 static int
6286 list_callback(zpool_handle_t *zhp, void *data)
6287 {
6288 list_cbdata_t *cbp = data;
6289
6290 print_pool(zhp, cbp);
6291
6292 if (cbp->cb_verbose) {
6293 nvlist_t *config, *nvroot;
6294
6295 config = zpool_get_config(zhp, NULL);
6296 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
6297 &nvroot) == 0);
6298 print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
6299 }
6300
6301 return (0);
6302 }
6303
6304 /*
6305 * Set the minimum pool/vdev name column width. The width must be at least 9,
6306 * but may be as large as needed.
6307 */
6308 static int
6309 get_namewidth_list(zpool_handle_t *zhp, void *data)
6310 {
6311 list_cbdata_t *cb = data;
6312 int width;
6313
6314 width = get_namewidth(zhp, cb->cb_namewidth,
6315 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose);
6316
6317 if (width < 9)
6318 width = 9;
6319
6320 cb->cb_namewidth = width;
6321
6322 return (0);
6323 }
6324
6325 /*
6326 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
6327 *
6328 * -g Display guid for individual vdev name.
6329 * -H Scripted mode. Don't display headers, and separate properties
6330 * by a single tab.
6331 * -L Follow links when resolving vdev path name.
6332 * -o List of properties to display. Defaults to
6333 * "name,size,allocated,free,expandsize,fragmentation,capacity,"
6334 * "dedupratio,health,altroot"
6335 * -p Display values in parsable (exact) format.
6336 * -P Display full path for vdev name.
6337 * -T Display a timestamp in date(1) or Unix format
6338 *
6339 * List all pools in the system, whether or not they're healthy. Output space
6340 * statistics for each one, as well as health status summary.
6341 */
6342 int
6343 zpool_do_list(int argc, char **argv)
6344 {
6345 int c;
6346 int ret = 0;
6347 list_cbdata_t cb = { 0 };
6348 static char default_props[] =
6349 "name,size,allocated,free,checkpoint,expandsize,fragmentation,"
6350 "capacity,dedupratio,health,altroot";
6351 char *props = default_props;
6352 float interval = 0;
6353 unsigned long count = 0;
6354 zpool_list_t *list;
6355 boolean_t first = B_TRUE;
6356 current_prop_type = ZFS_TYPE_POOL;
6357
6358 /* check options */
6359 while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
6360 switch (c) {
6361 case 'g':
6362 cb.cb_name_flags |= VDEV_NAME_GUID;
6363 break;
6364 case 'H':
6365 cb.cb_scripted = B_TRUE;
6366 break;
6367 case 'L':
6368 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
6369 break;
6370 case 'o':
6371 props = optarg;
6372 break;
6373 case 'P':
6374 cb.cb_name_flags |= VDEV_NAME_PATH;
6375 break;
6376 case 'p':
6377 cb.cb_literal = B_TRUE;
6378 break;
6379 case 'T':
6380 get_timestamp_arg(*optarg);
6381 break;
6382 case 'v':
6383 cb.cb_verbose = B_TRUE;
6384 cb.cb_namewidth = 8; /* 8 until precalc is avail */
6385 break;
6386 case ':':
6387 (void) fprintf(stderr, gettext("missing argument for "
6388 "'%c' option\n"), optopt);
6389 usage(B_FALSE);
6390 break;
6391 case '?':
6392 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6393 optopt);
6394 usage(B_FALSE);
6395 }
6396 }
6397
6398 argc -= optind;
6399 argv += optind;
6400
6401 get_interval_count(&argc, argv, &interval, &count);
6402
6403 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
6404 usage(B_FALSE);
6405
6406 for (;;) {
6407 if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
6408 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL)
6409 return (1);
6410
6411 if (pool_list_count(list) == 0)
6412 break;
6413
6414 cb.cb_namewidth = 0;
6415 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
6416
6417 if (timestamp_fmt != NODATE)
6418 print_timestamp(timestamp_fmt);
6419
6420 if (!cb.cb_scripted && (first || cb.cb_verbose)) {
6421 print_header(&cb);
6422 first = B_FALSE;
6423 }
6424 ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
6425
6426 if (interval == 0)
6427 break;
6428
6429 if (count != 0 && --count == 0)
6430 break;
6431
6432 pool_list_free(list);
6433 (void) fsleep(interval);
6434 }
6435
6436 if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
6437 (void) printf(gettext("no pools available\n"));
6438 ret = 0;
6439 }
6440
6441 pool_list_free(list);
6442 zprop_free_list(cb.cb_proplist);
6443 return (ret);
6444 }
6445
6446 static int
6447 zpool_do_attach_or_replace(int argc, char **argv, int replacing)
6448 {
6449 boolean_t force = B_FALSE;
6450 boolean_t rebuild = B_FALSE;
6451 boolean_t wait = B_FALSE;
6452 int c;
6453 nvlist_t *nvroot;
6454 char *poolname, *old_disk, *new_disk;
6455 zpool_handle_t *zhp;
6456 nvlist_t *props = NULL;
6457 char *propval;
6458 int ret;
6459
6460 /* check options */
6461 while ((c = getopt(argc, argv, "fo:sw")) != -1) {
6462 switch (c) {
6463 case 'f':
6464 force = B_TRUE;
6465 break;
6466 case 'o':
6467 if ((propval = strchr(optarg, '=')) == NULL) {
6468 (void) fprintf(stderr, gettext("missing "
6469 "'=' for -o option\n"));
6470 usage(B_FALSE);
6471 }
6472 *propval = '\0';
6473 propval++;
6474
6475 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
6476 (add_prop_list(optarg, propval, &props, B_TRUE)))
6477 usage(B_FALSE);
6478 break;
6479 case 's':
6480 rebuild = B_TRUE;
6481 break;
6482 case 'w':
6483 wait = B_TRUE;
6484 break;
6485 case '?':
6486 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6487 optopt);
6488 usage(B_FALSE);
6489 }
6490 }
6491
6492 argc -= optind;
6493 argv += optind;
6494
6495 /* get pool name and check number of arguments */
6496 if (argc < 1) {
6497 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6498 usage(B_FALSE);
6499 }
6500
6501 poolname = argv[0];
6502
6503 if (argc < 2) {
6504 (void) fprintf(stderr,
6505 gettext("missing <device> specification\n"));
6506 usage(B_FALSE);
6507 }
6508
6509 old_disk = argv[1];
6510
6511 if (argc < 3) {
6512 if (!replacing) {
6513 (void) fprintf(stderr,
6514 gettext("missing <new_device> specification\n"));
6515 usage(B_FALSE);
6516 }
6517 new_disk = old_disk;
6518 argc -= 1;
6519 argv += 1;
6520 } else {
6521 new_disk = argv[2];
6522 argc -= 2;
6523 argv += 2;
6524 }
6525
6526 if (argc > 1) {
6527 (void) fprintf(stderr, gettext("too many arguments\n"));
6528 usage(B_FALSE);
6529 }
6530
6531 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
6532 nvlist_free(props);
6533 return (1);
6534 }
6535
6536 if (zpool_get_config(zhp, NULL) == NULL) {
6537 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
6538 poolname);
6539 zpool_close(zhp);
6540 nvlist_free(props);
6541 return (1);
6542 }
6543
6544 /* unless manually specified use "ashift" pool property (if set) */
6545 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
6546 int intval;
6547 zprop_source_t src;
6548 char strval[ZPOOL_MAXPROPLEN];
6549
6550 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
6551 if (src != ZPROP_SRC_DEFAULT) {
6552 (void) sprintf(strval, "%" PRId32, intval);
6553 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
6554 &props, B_TRUE) == 0);
6555 }
6556 }
6557
6558 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
6559 argc, argv);
6560 if (nvroot == NULL) {
6561 zpool_close(zhp);
6562 nvlist_free(props);
6563 return (1);
6564 }
6565
6566 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
6567 rebuild);
6568
6569 if (ret == 0 && wait)
6570 ret = zpool_wait(zhp,
6571 replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
6572
6573 nvlist_free(props);
6574 nvlist_free(nvroot);
6575 zpool_close(zhp);
6576
6577 return (ret);
6578 }
6579
6580 /*
6581 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
6582 *
6583 * -f Force attach, even if <new_device> appears to be in use.
6584 * -s Use sequential instead of healing reconstruction for resilver.
6585 * -o Set property=value.
6586 * -w Wait for replacing to complete before returning
6587 *
6588 * Replace <device> with <new_device>.
6589 */
6590 int
6591 zpool_do_replace(int argc, char **argv)
6592 {
6593 return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
6594 }
6595
6596 /*
6597 * zpool attach [-fsw] [-o property=value] <pool> <device> <new_device>
6598 *
6599 * -f Force attach, even if <new_device> appears to be in use.
6600 * -s Use sequential instead of healing reconstruction for resilver.
6601 * -o Set property=value.
6602 * -w Wait for resilvering to complete before returning
6603 *
6604 * Attach <new_device> to the mirror containing <device>. If <device> is not
6605 * part of a mirror, then <device> will be transformed into a mirror of
6606 * <device> and <new_device>. In either case, <new_device> will begin life
6607 * with a DTL of [0, now], and will immediately begin to resilver itself.
6608 */
6609 int
6610 zpool_do_attach(int argc, char **argv)
6611 {
6612 return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
6613 }
6614
6615 /*
6616 * zpool detach [-f] <pool> <device>
6617 *
6618 * -f Force detach of <device>, even if DTLs argue against it
6619 * (not supported yet)
6620 *
6621 * Detach a device from a mirror. The operation will be refused if <device>
6622 * is the last device in the mirror, or if the DTLs indicate that this device
6623 * has the only valid copy of some data.
6624 */
6625 int
6626 zpool_do_detach(int argc, char **argv)
6627 {
6628 int c;
6629 char *poolname, *path;
6630 zpool_handle_t *zhp;
6631 int ret;
6632
6633 /* check options */
6634 while ((c = getopt(argc, argv, "")) != -1) {
6635 switch (c) {
6636 case '?':
6637 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6638 optopt);
6639 usage(B_FALSE);
6640 }
6641 }
6642
6643 argc -= optind;
6644 argv += optind;
6645
6646 /* get pool name and check number of arguments */
6647 if (argc < 1) {
6648 (void) fprintf(stderr, gettext("missing pool name argument\n"));
6649 usage(B_FALSE);
6650 }
6651
6652 if (argc < 2) {
6653 (void) fprintf(stderr,
6654 gettext("missing <device> specification\n"));
6655 usage(B_FALSE);
6656 }
6657
6658 poolname = argv[0];
6659 path = argv[1];
6660
6661 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6662 return (1);
6663
6664 ret = zpool_vdev_detach(zhp, path);
6665
6666 zpool_close(zhp);
6667
6668 return (ret);
6669 }
6670
6671 /*
6672 * zpool split [-gLnP] [-o prop=val] ...
6673 * [-o mntopt] ...
6674 * [-R altroot] <pool> <newpool> [<device> ...]
6675 *
6676 * -g Display guid for individual vdev name.
6677 * -L Follow links when resolving vdev path name.
6678 * -n Do not split the pool, but display the resulting layout if
6679 * it were to be split.
6680 * -o Set property=value, or set mount options.
6681 * -P Display full path for vdev name.
6682 * -R Mount the split-off pool under an alternate root.
6683 * -l Load encryption keys while importing.
6684 *
6685 * Splits the named pool and gives it the new pool name. Devices to be split
6686 * off may be listed, provided that no more than one device is specified
6687 * per top-level vdev mirror. The newly split pool is left in an exported
6688 * state unless -R is specified.
6689 *
6690 * Restrictions: the top-level of the pool pool must only be made up of
6691 * mirrors; all devices in the pool must be healthy; no device may be
6692 * undergoing a resilvering operation.
6693 */
6694 int
6695 zpool_do_split(int argc, char **argv)
6696 {
6697 char *srcpool, *newpool, *propval;
6698 char *mntopts = NULL;
6699 splitflags_t flags;
6700 int c, ret = 0;
6701 boolean_t loadkeys = B_FALSE;
6702 zpool_handle_t *zhp;
6703 nvlist_t *config, *props = NULL;
6704
6705 flags.dryrun = B_FALSE;
6706 flags.import = B_FALSE;
6707 flags.name_flags = 0;
6708
6709 /* check options */
6710 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
6711 switch (c) {
6712 case 'g':
6713 flags.name_flags |= VDEV_NAME_GUID;
6714 break;
6715 case 'L':
6716 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
6717 break;
6718 case 'R':
6719 flags.import = B_TRUE;
6720 if (add_prop_list(
6721 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
6722 &props, B_TRUE) != 0) {
6723 nvlist_free(props);
6724 usage(B_FALSE);
6725 }
6726 break;
6727 case 'l':
6728 loadkeys = B_TRUE;
6729 break;
6730 case 'n':
6731 flags.dryrun = B_TRUE;
6732 break;
6733 case 'o':
6734 if ((propval = strchr(optarg, '=')) != NULL) {
6735 *propval = '\0';
6736 propval++;
6737 if (add_prop_list(optarg, propval,
6738 &props, B_TRUE) != 0) {
6739 nvlist_free(props);
6740 usage(B_FALSE);
6741 }
6742 } else {
6743 mntopts = optarg;
6744 }
6745 break;
6746 case 'P':
6747 flags.name_flags |= VDEV_NAME_PATH;
6748 break;
6749 case ':':
6750 (void) fprintf(stderr, gettext("missing argument for "
6751 "'%c' option\n"), optopt);
6752 usage(B_FALSE);
6753 break;
6754 case '?':
6755 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6756 optopt);
6757 usage(B_FALSE);
6758 break;
6759 }
6760 }
6761
6762 if (!flags.import && mntopts != NULL) {
6763 (void) fprintf(stderr, gettext("setting mntopts is only "
6764 "valid when importing the pool\n"));
6765 usage(B_FALSE);
6766 }
6767
6768 if (!flags.import && loadkeys) {
6769 (void) fprintf(stderr, gettext("loading keys is only "
6770 "valid when importing the pool\n"));
6771 usage(B_FALSE);
6772 }
6773
6774 argc -= optind;
6775 argv += optind;
6776
6777 if (argc < 1) {
6778 (void) fprintf(stderr, gettext("Missing pool name\n"));
6779 usage(B_FALSE);
6780 }
6781 if (argc < 2) {
6782 (void) fprintf(stderr, gettext("Missing new pool name\n"));
6783 usage(B_FALSE);
6784 }
6785
6786 srcpool = argv[0];
6787 newpool = argv[1];
6788
6789 argc -= 2;
6790 argv += 2;
6791
6792 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
6793 nvlist_free(props);
6794 return (1);
6795 }
6796
6797 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
6798 if (config == NULL) {
6799 ret = 1;
6800 } else {
6801 if (flags.dryrun) {
6802 (void) printf(gettext("would create '%s' with the "
6803 "following layout:\n\n"), newpool);
6804 print_vdev_tree(NULL, newpool, config, 0, "",
6805 flags.name_flags);
6806 print_vdev_tree(NULL, "dedup", config, 0,
6807 VDEV_ALLOC_BIAS_DEDUP, 0);
6808 print_vdev_tree(NULL, "special", config, 0,
6809 VDEV_ALLOC_BIAS_SPECIAL, 0);
6810 }
6811 }
6812
6813 zpool_close(zhp);
6814
6815 if (ret != 0 || flags.dryrun || !flags.import) {
6816 nvlist_free(config);
6817 nvlist_free(props);
6818 return (ret);
6819 }
6820
6821 /*
6822 * The split was successful. Now we need to open the new
6823 * pool and import it.
6824 */
6825 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
6826 nvlist_free(config);
6827 nvlist_free(props);
6828 return (1);
6829 }
6830
6831 if (loadkeys) {
6832 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
6833 if (ret != 0)
6834 ret = 1;
6835 }
6836
6837 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
6838 zpool_enable_datasets(zhp, mntopts, 0) != 0) {
6839 ret = 1;
6840 (void) fprintf(stderr, gettext("Split was successful, but "
6841 "the datasets could not all be mounted\n"));
6842 (void) fprintf(stderr, gettext("Try doing '%s' with a "
6843 "different altroot\n"), "zpool import");
6844 }
6845 zpool_close(zhp);
6846 nvlist_free(config);
6847 nvlist_free(props);
6848
6849 return (ret);
6850 }
6851
6852
6853
6854 /*
6855 * zpool online <pool> <device> ...
6856 */
6857 int
6858 zpool_do_online(int argc, char **argv)
6859 {
6860 int c, i;
6861 char *poolname;
6862 zpool_handle_t *zhp;
6863 int ret = 0;
6864 vdev_state_t newstate;
6865 int flags = 0;
6866
6867 /* check options */
6868 while ((c = getopt(argc, argv, "e")) != -1) {
6869 switch (c) {
6870 case 'e':
6871 flags |= ZFS_ONLINE_EXPAND;
6872 break;
6873 case '?':
6874 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6875 optopt);
6876 usage(B_FALSE);
6877 }
6878 }
6879
6880 argc -= optind;
6881 argv += optind;
6882
6883 /* get pool name and check number of arguments */
6884 if (argc < 1) {
6885 (void) fprintf(stderr, gettext("missing pool name\n"));
6886 usage(B_FALSE);
6887 }
6888 if (argc < 2) {
6889 (void) fprintf(stderr, gettext("missing device name\n"));
6890 usage(B_FALSE);
6891 }
6892
6893 poolname = argv[0];
6894
6895 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6896 return (1);
6897
6898 for (i = 1; i < argc; i++) {
6899 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
6900 if (newstate != VDEV_STATE_HEALTHY) {
6901 (void) printf(gettext("warning: device '%s' "
6902 "onlined, but remains in faulted state\n"),
6903 argv[i]);
6904 if (newstate == VDEV_STATE_FAULTED)
6905 (void) printf(gettext("use 'zpool "
6906 "clear' to restore a faulted "
6907 "device\n"));
6908 else
6909 (void) printf(gettext("use 'zpool "
6910 "replace' to replace devices "
6911 "that are no longer present\n"));
6912 }
6913 } else {
6914 ret = 1;
6915 }
6916 }
6917
6918 zpool_close(zhp);
6919
6920 return (ret);
6921 }
6922
6923 /*
6924 * zpool offline [-ft] <pool> <device> ...
6925 *
6926 * -f Force the device into a faulted state.
6927 *
6928 * -t Only take the device off-line temporarily. The offline/faulted
6929 * state will not be persistent across reboots.
6930 */
6931 int
6932 zpool_do_offline(int argc, char **argv)
6933 {
6934 int c, i;
6935 char *poolname;
6936 zpool_handle_t *zhp;
6937 int ret = 0;
6938 boolean_t istmp = B_FALSE;
6939 boolean_t fault = B_FALSE;
6940
6941 /* check options */
6942 while ((c = getopt(argc, argv, "ft")) != -1) {
6943 switch (c) {
6944 case 'f':
6945 fault = B_TRUE;
6946 break;
6947 case 't':
6948 istmp = B_TRUE;
6949 break;
6950 case '?':
6951 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
6952 optopt);
6953 usage(B_FALSE);
6954 }
6955 }
6956
6957 argc -= optind;
6958 argv += optind;
6959
6960 /* get pool name and check number of arguments */
6961 if (argc < 1) {
6962 (void) fprintf(stderr, gettext("missing pool name\n"));
6963 usage(B_FALSE);
6964 }
6965 if (argc < 2) {
6966 (void) fprintf(stderr, gettext("missing device name\n"));
6967 usage(B_FALSE);
6968 }
6969
6970 poolname = argv[0];
6971
6972 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
6973 return (1);
6974
6975 for (i = 1; i < argc; i++) {
6976 if (fault) {
6977 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
6978 vdev_aux_t aux;
6979 if (istmp == B_FALSE) {
6980 /* Force the fault to persist across imports */
6981 aux = VDEV_AUX_EXTERNAL_PERSIST;
6982 } else {
6983 aux = VDEV_AUX_EXTERNAL;
6984 }
6985
6986 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
6987 ret = 1;
6988 } else {
6989 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
6990 ret = 1;
6991 }
6992 }
6993
6994 zpool_close(zhp);
6995
6996 return (ret);
6997 }
6998
6999 /*
7000 * zpool clear <pool> [device]
7001 *
7002 * Clear all errors associated with a pool or a particular device.
7003 */
7004 int
7005 zpool_do_clear(int argc, char **argv)
7006 {
7007 int c;
7008 int ret = 0;
7009 boolean_t dryrun = B_FALSE;
7010 boolean_t do_rewind = B_FALSE;
7011 boolean_t xtreme_rewind = B_FALSE;
7012 uint32_t rewind_policy = ZPOOL_NO_REWIND;
7013 nvlist_t *policy = NULL;
7014 zpool_handle_t *zhp;
7015 char *pool, *device;
7016
7017 /* check options */
7018 while ((c = getopt(argc, argv, "FnX")) != -1) {
7019 switch (c) {
7020 case 'F':
7021 do_rewind = B_TRUE;
7022 break;
7023 case 'n':
7024 dryrun = B_TRUE;
7025 break;
7026 case 'X':
7027 xtreme_rewind = B_TRUE;
7028 break;
7029 case '?':
7030 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7031 optopt);
7032 usage(B_FALSE);
7033 }
7034 }
7035
7036 argc -= optind;
7037 argv += optind;
7038
7039 if (argc < 1) {
7040 (void) fprintf(stderr, gettext("missing pool name\n"));
7041 usage(B_FALSE);
7042 }
7043
7044 if (argc > 2) {
7045 (void) fprintf(stderr, gettext("too many arguments\n"));
7046 usage(B_FALSE);
7047 }
7048
7049 if ((dryrun || xtreme_rewind) && !do_rewind) {
7050 (void) fprintf(stderr,
7051 gettext("-n or -X only meaningful with -F\n"));
7052 usage(B_FALSE);
7053 }
7054 if (dryrun)
7055 rewind_policy = ZPOOL_TRY_REWIND;
7056 else if (do_rewind)
7057 rewind_policy = ZPOOL_DO_REWIND;
7058 if (xtreme_rewind)
7059 rewind_policy |= ZPOOL_EXTREME_REWIND;
7060
7061 /* In future, further rewind policy choices can be passed along here */
7062 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
7063 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
7064 rewind_policy) != 0) {
7065 return (1);
7066 }
7067
7068 pool = argv[0];
7069 device = argc == 2 ? argv[1] : NULL;
7070
7071 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
7072 nvlist_free(policy);
7073 return (1);
7074 }
7075
7076 if (zpool_clear(zhp, device, policy) != 0)
7077 ret = 1;
7078
7079 zpool_close(zhp);
7080
7081 nvlist_free(policy);
7082
7083 return (ret);
7084 }
7085
7086 /*
7087 * zpool reguid <pool>
7088 */
7089 int
7090 zpool_do_reguid(int argc, char **argv)
7091 {
7092 int c;
7093 char *poolname;
7094 zpool_handle_t *zhp;
7095 int ret = 0;
7096
7097 /* check options */
7098 while ((c = getopt(argc, argv, "")) != -1) {
7099 switch (c) {
7100 case '?':
7101 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7102 optopt);
7103 usage(B_FALSE);
7104 }
7105 }
7106
7107 argc -= optind;
7108 argv += optind;
7109
7110 /* get pool name and check number of arguments */
7111 if (argc < 1) {
7112 (void) fprintf(stderr, gettext("missing pool name\n"));
7113 usage(B_FALSE);
7114 }
7115
7116 if (argc > 1) {
7117 (void) fprintf(stderr, gettext("too many arguments\n"));
7118 usage(B_FALSE);
7119 }
7120
7121 poolname = argv[0];
7122 if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
7123 return (1);
7124
7125 ret = zpool_reguid(zhp);
7126
7127 zpool_close(zhp);
7128 return (ret);
7129 }
7130
7131
7132 /*
7133 * zpool reopen <pool>
7134 *
7135 * Reopen the pool so that the kernel can update the sizes of all vdevs.
7136 */
7137 int
7138 zpool_do_reopen(int argc, char **argv)
7139 {
7140 int c;
7141 int ret = 0;
7142 boolean_t scrub_restart = B_TRUE;
7143
7144 /* check options */
7145 while ((c = getopt(argc, argv, "n")) != -1) {
7146 switch (c) {
7147 case 'n':
7148 scrub_restart = B_FALSE;
7149 break;
7150 case '?':
7151 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7152 optopt);
7153 usage(B_FALSE);
7154 }
7155 }
7156
7157 argc -= optind;
7158 argv += optind;
7159
7160 /* if argc == 0 we will execute zpool_reopen_one on all pools */
7161 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7162 B_FALSE, zpool_reopen_one, &scrub_restart);
7163
7164 return (ret);
7165 }
7166
7167 typedef struct scrub_cbdata {
7168 int cb_type;
7169 pool_scrub_cmd_t cb_scrub_cmd;
7170 } scrub_cbdata_t;
7171
7172 static boolean_t
7173 zpool_has_checkpoint(zpool_handle_t *zhp)
7174 {
7175 nvlist_t *config, *nvroot;
7176
7177 config = zpool_get_config(zhp, NULL);
7178
7179 if (config != NULL) {
7180 pool_checkpoint_stat_t *pcs = NULL;
7181 uint_t c;
7182
7183 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
7184 (void) nvlist_lookup_uint64_array(nvroot,
7185 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
7186
7187 if (pcs == NULL || pcs->pcs_state == CS_NONE)
7188 return (B_FALSE);
7189
7190 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
7191 pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
7192 return (B_TRUE);
7193 }
7194
7195 return (B_FALSE);
7196 }
7197
7198 static int
7199 scrub_callback(zpool_handle_t *zhp, void *data)
7200 {
7201 scrub_cbdata_t *cb = data;
7202 int err;
7203
7204 /*
7205 * Ignore faulted pools.
7206 */
7207 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
7208 (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
7209 "currently unavailable\n"), zpool_get_name(zhp));
7210 return (1);
7211 }
7212
7213 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
7214
7215 if (err == 0 && zpool_has_checkpoint(zhp) &&
7216 cb->cb_type == POOL_SCAN_SCRUB) {
7217 (void) printf(gettext("warning: will not scrub state that "
7218 "belongs to the checkpoint of pool '%s'\n"),
7219 zpool_get_name(zhp));
7220 }
7221
7222 return (err != 0);
7223 }
7224
7225 static int
7226 wait_callback(zpool_handle_t *zhp, void *data)
7227 {
7228 zpool_wait_activity_t *act = data;
7229 return (zpool_wait(zhp, *act));
7230 }
7231
7232 /*
7233 * zpool scrub [-s | -p] [-w] <pool> ...
7234 *
7235 * -s Stop. Stops any in-progress scrub.
7236 * -p Pause. Pause in-progress scrub.
7237 * -w Wait. Blocks until scrub has completed.
7238 */
7239 int
7240 zpool_do_scrub(int argc, char **argv)
7241 {
7242 int c;
7243 scrub_cbdata_t cb;
7244 boolean_t wait = B_FALSE;
7245 int error;
7246
7247 cb.cb_type = POOL_SCAN_SCRUB;
7248 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7249
7250 /* check options */
7251 while ((c = getopt(argc, argv, "spw")) != -1) {
7252 switch (c) {
7253 case 's':
7254 cb.cb_type = POOL_SCAN_NONE;
7255 break;
7256 case 'p':
7257 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
7258 break;
7259 case 'w':
7260 wait = B_TRUE;
7261 break;
7262 case '?':
7263 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7264 optopt);
7265 usage(B_FALSE);
7266 }
7267 }
7268
7269 if (cb.cb_type == POOL_SCAN_NONE &&
7270 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) {
7271 (void) fprintf(stderr, gettext("invalid option combination: "
7272 "-s and -p are mutually exclusive\n"));
7273 usage(B_FALSE);
7274 }
7275
7276 if (wait && (cb.cb_type == POOL_SCAN_NONE ||
7277 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
7278 (void) fprintf(stderr, gettext("invalid option combination: "
7279 "-w cannot be used with -p or -s\n"));
7280 usage(B_FALSE);
7281 }
7282
7283 argc -= optind;
7284 argv += optind;
7285
7286 if (argc < 1) {
7287 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7288 usage(B_FALSE);
7289 }
7290
7291 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7292 B_FALSE, scrub_callback, &cb);
7293
7294 if (wait && !error) {
7295 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
7296 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7297 B_FALSE, wait_callback, &act);
7298 }
7299
7300 return (error);
7301 }
7302
7303 /*
7304 * zpool resilver <pool> ...
7305 *
7306 * Restarts any in-progress resilver
7307 */
7308 int
7309 zpool_do_resilver(int argc, char **argv)
7310 {
7311 int c;
7312 scrub_cbdata_t cb;
7313
7314 cb.cb_type = POOL_SCAN_RESILVER;
7315 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
7316
7317 /* check options */
7318 while ((c = getopt(argc, argv, "")) != -1) {
7319 switch (c) {
7320 case '?':
7321 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
7322 optopt);
7323 usage(B_FALSE);
7324 }
7325 }
7326
7327 argc -= optind;
7328 argv += optind;
7329
7330 if (argc < 1) {
7331 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7332 usage(B_FALSE);
7333 }
7334
7335 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
7336 B_FALSE, scrub_callback, &cb));
7337 }
7338
7339 /*
7340 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
7341 *
7342 * -c Cancel. Ends any in-progress trim.
7343 * -d Secure trim. Requires kernel and device support.
7344 * -r <rate> Sets the TRIM rate in bytes (per second). Supports
7345 * adding a multiplier suffix such as 'k' or 'm'.
7346 * -s Suspend. TRIM can then be restarted with no flags.
7347 * -w Wait. Blocks until trimming has completed.
7348 */
7349 int
7350 zpool_do_trim(int argc, char **argv)
7351 {
7352 struct option long_options[] = {
7353 {"cancel", no_argument, NULL, 'c'},
7354 {"secure", no_argument, NULL, 'd'},
7355 {"rate", required_argument, NULL, 'r'},
7356 {"suspend", no_argument, NULL, 's'},
7357 {"wait", no_argument, NULL, 'w'},
7358 {0, 0, 0, 0}
7359 };
7360
7361 pool_trim_func_t cmd_type = POOL_TRIM_START;
7362 uint64_t rate = 0;
7363 boolean_t secure = B_FALSE;
7364 boolean_t wait = B_FALSE;
7365
7366 int c;
7367 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
7368 != -1) {
7369 switch (c) {
7370 case 'c':
7371 if (cmd_type != POOL_TRIM_START &&
7372 cmd_type != POOL_TRIM_CANCEL) {
7373 (void) fprintf(stderr, gettext("-c cannot be "
7374 "combined with other options\n"));
7375 usage(B_FALSE);
7376 }
7377 cmd_type = POOL_TRIM_CANCEL;
7378 break;
7379 case 'd':
7380 if (cmd_type != POOL_TRIM_START) {
7381 (void) fprintf(stderr, gettext("-d cannot be "
7382 "combined with the -c or -s options\n"));
7383 usage(B_FALSE);
7384 }
7385 secure = B_TRUE;
7386 break;
7387 case 'r':
7388 if (cmd_type != POOL_TRIM_START) {
7389 (void) fprintf(stderr, gettext("-r cannot be "
7390 "combined with the -c or -s options\n"));
7391 usage(B_FALSE);
7392 }
7393 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) {
7394 (void) fprintf(stderr, "%s: %s\n",
7395 gettext("invalid value for rate"),
7396 libzfs_error_description(g_zfs));
7397 usage(B_FALSE);
7398 }
7399 break;
7400 case 's':
7401 if (cmd_type != POOL_TRIM_START &&
7402 cmd_type != POOL_TRIM_SUSPEND) {
7403 (void) fprintf(stderr, gettext("-s cannot be "
7404 "combined with other options\n"));
7405 usage(B_FALSE);
7406 }
7407 cmd_type = POOL_TRIM_SUSPEND;
7408 break;
7409 case 'w':
7410 wait = B_TRUE;
7411 break;
7412 case '?':
7413 if (optopt != 0) {
7414 (void) fprintf(stderr,
7415 gettext("invalid option '%c'\n"), optopt);
7416 } else {
7417 (void) fprintf(stderr,
7418 gettext("invalid option '%s'\n"),
7419 argv[optind - 1]);
7420 }
7421 usage(B_FALSE);
7422 }
7423 }
7424
7425 argc -= optind;
7426 argv += optind;
7427
7428 if (argc < 1) {
7429 (void) fprintf(stderr, gettext("missing pool name argument\n"));
7430 usage(B_FALSE);
7431 return (-1);
7432 }
7433
7434 if (wait && (cmd_type != POOL_TRIM_START)) {
7435 (void) fprintf(stderr, gettext("-w cannot be used with -c or "
7436 "-s\n"));
7437 usage(B_FALSE);
7438 }
7439
7440 char *poolname = argv[0];
7441 zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
7442 if (zhp == NULL)
7443 return (-1);
7444
7445 trimflags_t trim_flags = {
7446 .secure = secure,
7447 .rate = rate,
7448 .wait = wait,
7449 };
7450
7451 nvlist_t *vdevs = fnvlist_alloc();
7452 if (argc == 1) {
7453 /* no individual leaf vdevs specified, so add them all */
7454 nvlist_t *config = zpool_get_config(zhp, NULL);
7455 nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
7456 ZPOOL_CONFIG_VDEV_TREE);
7457 zpool_collect_leaves(zhp, nvroot, vdevs);
7458 trim_flags.fullpool = B_TRUE;
7459 } else {
7460 trim_flags.fullpool = B_FALSE;
7461 for (int i = 1; i < argc; i++) {
7462 fnvlist_add_boolean(vdevs, argv[i]);
7463 }
7464 }
7465
7466 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
7467
7468 fnvlist_free(vdevs);
7469 zpool_close(zhp);
7470
7471 return (error);
7472 }
7473
7474 /*
7475 * Converts a total number of seconds to a human readable string broken
7476 * down in to days/hours/minutes/seconds.
7477 */
7478 static void
7479 secs_to_dhms(uint64_t total, char *buf)
7480 {
7481 uint64_t days = total / 60 / 60 / 24;
7482 uint64_t hours = (total / 60 / 60) % 24;
7483 uint64_t mins = (total / 60) % 60;
7484 uint64_t secs = (total % 60);
7485
7486 if (days > 0) {
7487 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
7488 (u_longlong_t)days, (u_longlong_t)hours,
7489 (u_longlong_t)mins, (u_longlong_t)secs);
7490 } else {
7491 (void) sprintf(buf, "%02llu:%02llu:%02llu",
7492 (u_longlong_t)hours, (u_longlong_t)mins,
7493 (u_longlong_t)secs);
7494 }
7495 }
7496
7497 /*
7498 * Print out detailed scrub status.
7499 */
7500 static void
7501 print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
7502 {
7503 time_t start, end, pause;
7504 uint64_t pass_scanned, scanned, pass_issued, issued, total;
7505 uint64_t elapsed, scan_rate, issue_rate;
7506 double fraction_done;
7507 char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7];
7508 char srate_buf[7], irate_buf[7], time_buf[32];
7509
7510 printf(" ");
7511 printf_color(ANSI_BOLD, gettext("scan:"));
7512 printf(" ");
7513
7514 /* If there's never been a scan, there's not much to say. */
7515 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
7516 ps->pss_func >= POOL_SCAN_FUNCS) {
7517 (void) printf(gettext("none requested\n"));
7518 return;
7519 }
7520
7521 start = ps->pss_start_time;
7522 end = ps->pss_end_time;
7523 pause = ps->pss_pass_scrub_pause;
7524
7525 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
7526
7527 assert(ps->pss_func == POOL_SCAN_SCRUB ||
7528 ps->pss_func == POOL_SCAN_RESILVER);
7529
7530 /* Scan is finished or canceled. */
7531 if (ps->pss_state == DSS_FINISHED) {
7532 secs_to_dhms(end - start, time_buf);
7533
7534 if (ps->pss_func == POOL_SCAN_SCRUB) {
7535 (void) printf(gettext("scrub repaired %s "
7536 "in %s with %llu errors on %s"), processed_buf,
7537 time_buf, (u_longlong_t)ps->pss_errors,
7538 ctime(&end));
7539 } else if (ps->pss_func == POOL_SCAN_RESILVER) {
7540 (void) printf(gettext("resilvered %s "
7541 "in %s with %llu errors on %s"), processed_buf,
7542 time_buf, (u_longlong_t)ps->pss_errors,
7543 ctime(&end));
7544 }
7545 return;
7546 } else if (ps->pss_state == DSS_CANCELED) {
7547 if (ps->pss_func == POOL_SCAN_SCRUB) {
7548 (void) printf(gettext("scrub canceled on %s"),
7549 ctime(&end));
7550 } else if (ps->pss_func == POOL_SCAN_RESILVER) {
7551 (void) printf(gettext("resilver canceled on %s"),
7552 ctime(&end));
7553 }
7554 return;
7555 }
7556
7557 assert(ps->pss_state == DSS_SCANNING);
7558
7559 /* Scan is in progress. Resilvers can't be paused. */
7560 if (ps->pss_func == POOL_SCAN_SCRUB) {
7561 if (pause == 0) {
7562 (void) printf(gettext("scrub in progress since %s"),
7563 ctime(&start));
7564 } else {
7565 (void) printf(gettext("scrub paused since %s"),
7566 ctime(&pause));
7567 (void) printf(gettext("\tscrub started on %s"),
7568 ctime(&start));
7569 }
7570 } else if (ps->pss_func == POOL_SCAN_RESILVER) {
7571 (void) printf(gettext("resilver in progress since %s"),
7572 ctime(&start));
7573 }
7574
7575 scanned = ps->pss_examined;
7576 pass_scanned = ps->pss_pass_exam;
7577 issued = ps->pss_issued;
7578 pass_issued = ps->pss_pass_issued;
7579 total = ps->pss_to_examine;
7580
7581 /* we are only done with a block once we have issued the IO for it */
7582 fraction_done = (double)issued / total;
7583
7584 /* elapsed time for this pass, rounding up to 1 if it's 0 */
7585 elapsed = time(NULL) - ps->pss_pass_start;
7586 elapsed -= ps->pss_pass_scrub_spent_paused;
7587 elapsed = (elapsed != 0) ? elapsed : 1;
7588
7589 scan_rate = pass_scanned / elapsed;
7590 issue_rate = pass_issued / elapsed;
7591 uint64_t total_secs_left = (issue_rate != 0 && total >= issued) ?
7592 ((total - issued) / issue_rate) : UINT64_MAX;
7593 secs_to_dhms(total_secs_left, time_buf);
7594
7595 /* format all of the numbers we will be reporting */
7596 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
7597 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
7598 zfs_nicebytes(total, total_buf, sizeof (total_buf));
7599 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
7600 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
7601
7602 /* do not print estimated time if we have a paused scrub */
7603 if (pause == 0) {
7604 (void) printf(gettext("\t%s scanned at %s/s, "
7605 "%s issued at %s/s, %s total\n"),
7606 scanned_buf, srate_buf, issued_buf, irate_buf, total_buf);
7607 } else {
7608 (void) printf(gettext("\t%s scanned, %s issued, %s total\n"),
7609 scanned_buf, issued_buf, total_buf);
7610 }
7611
7612 if (ps->pss_func == POOL_SCAN_RESILVER) {
7613 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
7614 processed_buf, 100 * fraction_done);
7615 } else if (ps->pss_func == POOL_SCAN_SCRUB) {
7616 (void) printf(gettext("\t%s repaired, %.2f%% done"),
7617 processed_buf, 100 * fraction_done);
7618 }
7619
7620 if (pause == 0) {
7621 if (total_secs_left != UINT64_MAX &&
7622 issue_rate >= 10 * 1024 * 1024) {
7623 (void) printf(gettext(", %s to go\n"), time_buf);
7624 } else {
7625 (void) printf(gettext(", no estimated "
7626 "completion time\n"));
7627 }
7628 } else {
7629 (void) printf(gettext("\n"));
7630 }
7631 }
7632
7633 static void
7634 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, char *vdev_name)
7635 {
7636 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
7637 return;
7638
7639 printf(" ");
7640 printf_color(ANSI_BOLD, gettext("scan:"));
7641 printf(" ");
7642
7643 uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
7644 uint64_t bytes_issued = vrs->vrs_bytes_issued;
7645 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
7646 uint64_t bytes_est = vrs->vrs_bytes_est;
7647 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
7648 (vrs->vrs_pass_time_ms + 1)) * 1000;
7649 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
7650 (vrs->vrs_pass_time_ms + 1)) * 1000;
7651 double scan_pct = MIN((double)bytes_scanned * 100 /
7652 (bytes_est + 1), 100);
7653
7654 /* Format all of the numbers we will be reporting */
7655 char bytes_scanned_buf[7], bytes_issued_buf[7];
7656 char bytes_rebuilt_buf[7], bytes_est_buf[7];
7657 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
7658 zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
7659 sizeof (bytes_scanned_buf));
7660 zfs_nicebytes(bytes_issued, bytes_issued_buf,
7661 sizeof (bytes_issued_buf));
7662 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
7663 sizeof (bytes_rebuilt_buf));
7664 zfs_nicebytes(bytes_est, bytes_est_buf, sizeof (bytes_est_buf));
7665 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
7666 zfs_nicebytes(issue_rate, issue_rate_buf, sizeof (issue_rate_buf));
7667
7668 time_t start = vrs->vrs_start_time;
7669 time_t end = vrs->vrs_end_time;
7670
7671 /* Rebuild is finished or canceled. */
7672 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
7673 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
7674 (void) printf(gettext("resilvered (%s) %s in %s "
7675 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
7676 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
7677 return;
7678 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
7679 (void) printf(gettext("resilver (%s) canceled on %s"),
7680 vdev_name, ctime(&end));
7681 return;
7682 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
7683 (void) printf(gettext("resilver (%s) in progress since %s"),
7684 vdev_name, ctime(&start));
7685 }
7686
7687 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
7688
7689 secs_to_dhms(MAX((int64_t)bytes_est - (int64_t)bytes_scanned, 0) /
7690 MAX(scan_rate, 1), time_buf);
7691
7692 (void) printf(gettext("\t%s scanned at %s/s, %s issued %s/s, "
7693 "%s total\n"), bytes_scanned_buf, scan_rate_buf,
7694 bytes_issued_buf, issue_rate_buf, bytes_est_buf);
7695 (void) printf(gettext("\t%s resilvered, %.2f%% done"),
7696 bytes_rebuilt_buf, scan_pct);
7697
7698 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
7699 if (scan_rate >= 10 * 1024 * 1024) {
7700 (void) printf(gettext(", %s to go\n"), time_buf);
7701 } else {
7702 (void) printf(gettext(", no estimated "
7703 "completion time\n"));
7704 }
7705 } else {
7706 (void) printf(gettext("\n"));
7707 }
7708 }
7709
7710 /*
7711 * Print rebuild status for top-level vdevs.
7712 */
7713 static void
7714 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
7715 {
7716 nvlist_t **child;
7717 uint_t children;
7718
7719 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
7720 &child, &children) != 0)
7721 children = 0;
7722
7723 for (uint_t c = 0; c < children; c++) {
7724 vdev_rebuild_stat_t *vrs;
7725 uint_t i;
7726
7727 if (nvlist_lookup_uint64_array(child[c],
7728 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
7729 char *name = zpool_vdev_name(g_zfs, zhp,
7730 child[c], VDEV_NAME_TYPE_ID);
7731 print_rebuild_status_impl(vrs, name);
7732 free(name);
7733 }
7734 }
7735 }
7736
7737 /*
7738 * As we don't scrub checkpointed blocks, we want to warn the user that we
7739 * skipped scanning some blocks if a checkpoint exists or existed at any
7740 * time during the scan. If a sequential instead of healing reconstruction
7741 * was performed then the blocks were reconstructed. However, their checksums
7742 * have not been verified so we still print the warning.
7743 */
7744 static void
7745 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
7746 {
7747 if (ps == NULL || pcs == NULL)
7748 return;
7749
7750 if (pcs->pcs_state == CS_NONE ||
7751 pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
7752 return;
7753
7754 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
7755
7756 if (ps->pss_state == DSS_NONE)
7757 return;
7758
7759 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
7760 ps->pss_end_time < pcs->pcs_start_time)
7761 return;
7762
7763 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
7764 (void) printf(gettext(" scan warning: skipped blocks "
7765 "that are only referenced by the checkpoint.\n"));
7766 } else {
7767 assert(ps->pss_state == DSS_SCANNING);
7768 (void) printf(gettext(" scan warning: skipping blocks "
7769 "that are only referenced by the checkpoint.\n"));
7770 }
7771 }
7772
7773 /*
7774 * Returns B_TRUE if there is an active rebuild in progress. Otherwise,
7775 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for
7776 * the last completed (or cancelled) rebuild.
7777 */
7778 static boolean_t
7779 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
7780 {
7781 nvlist_t **child;
7782 uint_t children;
7783 boolean_t rebuilding = B_FALSE;
7784 uint64_t end_time = 0;
7785
7786 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
7787 &child, &children) != 0)
7788 children = 0;
7789
7790 for (uint_t c = 0; c < children; c++) {
7791 vdev_rebuild_stat_t *vrs;
7792 uint_t i;
7793
7794 if (nvlist_lookup_uint64_array(child[c],
7795 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
7796
7797 if (vrs->vrs_end_time > end_time)
7798 end_time = vrs->vrs_end_time;
7799
7800 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
7801 rebuilding = B_TRUE;
7802 end_time = 0;
7803 break;
7804 }
7805 }
7806 }
7807
7808 if (rebuild_end_time != NULL)
7809 *rebuild_end_time = end_time;
7810
7811 return (rebuilding);
7812 }
7813
7814 /*
7815 * Print the scan status.
7816 */
7817 static void
7818 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
7819 {
7820 uint64_t rebuild_end_time = 0, resilver_end_time = 0;
7821 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
7822 boolean_t active_resilver = B_FALSE;
7823 pool_checkpoint_stat_t *pcs = NULL;
7824 pool_scan_stat_t *ps = NULL;
7825 uint_t c;
7826
7827 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
7828 (uint64_t **)&ps, &c) == 0) {
7829 if (ps->pss_func == POOL_SCAN_RESILVER) {
7830 resilver_end_time = ps->pss_end_time;
7831 active_resilver = (ps->pss_state == DSS_SCANNING);
7832 }
7833
7834 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
7835 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
7836 }
7837
7838 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
7839 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
7840
7841 /* Always print the scrub status when available. */
7842 if (have_scrub)
7843 print_scan_scrub_resilver_status(ps);
7844
7845 /*
7846 * When there is an active resilver or rebuild print its status.
7847 * Otherwise print the status of the last resilver or rebuild.
7848 */
7849 if (active_resilver || (!active_rebuild && have_resilver &&
7850 resilver_end_time && resilver_end_time > rebuild_end_time)) {
7851 print_scan_scrub_resilver_status(ps);
7852 } else if (active_rebuild || (!active_resilver && have_rebuild &&
7853 rebuild_end_time && rebuild_end_time > resilver_end_time)) {
7854 print_rebuild_status(zhp, nvroot);
7855 }
7856
7857 (void) nvlist_lookup_uint64_array(nvroot,
7858 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
7859 print_checkpoint_scan_warning(ps, pcs);
7860 }
7861
7862 /*
7863 * Print out detailed removal status.
7864 */
7865 static void
7866 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
7867 {
7868 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
7869 time_t start, end;
7870 nvlist_t *config, *nvroot;
7871 nvlist_t **child;
7872 uint_t children;
7873 char *vdev_name;
7874
7875 if (prs == NULL || prs->prs_state == DSS_NONE)
7876 return;
7877
7878 /*
7879 * Determine name of vdev.
7880 */
7881 config = zpool_get_config(zhp, NULL);
7882 nvroot = fnvlist_lookup_nvlist(config,
7883 ZPOOL_CONFIG_VDEV_TREE);
7884 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
7885 &child, &children) == 0);
7886 assert(prs->prs_removing_vdev < children);
7887 vdev_name = zpool_vdev_name(g_zfs, zhp,
7888 child[prs->prs_removing_vdev], B_TRUE);
7889
7890 printf_color(ANSI_BOLD, gettext("remove: "));
7891
7892 start = prs->prs_start_time;
7893 end = prs->prs_end_time;
7894 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
7895
7896 /*
7897 * Removal is finished or canceled.
7898 */
7899 if (prs->prs_state == DSS_FINISHED) {
7900 uint64_t minutes_taken = (end - start) / 60;
7901
7902 (void) printf(gettext("Removal of vdev %llu copied %s "
7903 "in %lluh%um, completed on %s"),
7904 (longlong_t)prs->prs_removing_vdev,
7905 copied_buf,
7906 (u_longlong_t)(minutes_taken / 60),
7907 (uint_t)(minutes_taken % 60),
7908 ctime((time_t *)&end));
7909 } else if (prs->prs_state == DSS_CANCELED) {
7910 (void) printf(gettext("Removal of %s canceled on %s"),
7911 vdev_name, ctime(&end));
7912 } else {
7913 uint64_t copied, total, elapsed, mins_left, hours_left;
7914 double fraction_done;
7915 uint_t rate;
7916
7917 assert(prs->prs_state == DSS_SCANNING);
7918
7919 /*
7920 * Removal is in progress.
7921 */
7922 (void) printf(gettext(
7923 "Evacuation of %s in progress since %s"),
7924 vdev_name, ctime(&start));
7925
7926 copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
7927 total = prs->prs_to_copy;
7928 fraction_done = (double)copied / total;
7929
7930 /* elapsed time for this pass */
7931 elapsed = time(NULL) - prs->prs_start_time;
7932 elapsed = elapsed > 0 ? elapsed : 1;
7933 rate = copied / elapsed;
7934 rate = rate > 0 ? rate : 1;
7935 mins_left = ((total - copied) / rate) / 60;
7936 hours_left = mins_left / 60;
7937
7938 zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
7939 zfs_nicenum(total, total_buf, sizeof (total_buf));
7940 zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
7941
7942 /*
7943 * do not print estimated time if hours_left is more than
7944 * 30 days
7945 */
7946 (void) printf(gettext(
7947 "\t%s copied out of %s at %s/s, %.2f%% done"),
7948 examined_buf, total_buf, rate_buf, 100 * fraction_done);
7949 if (hours_left < (30 * 24)) {
7950 (void) printf(gettext(", %lluh%um to go\n"),
7951 (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
7952 } else {
7953 (void) printf(gettext(
7954 ", (copy is slow, no estimated time)\n"));
7955 }
7956 }
7957 free(vdev_name);
7958
7959 if (prs->prs_mapping_memory > 0) {
7960 char mem_buf[7];
7961 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
7962 (void) printf(gettext(
7963 "\t%s memory used for removed device mappings\n"),
7964 mem_buf);
7965 }
7966 }
7967
7968 static void
7969 print_checkpoint_status(pool_checkpoint_stat_t *pcs)
7970 {
7971 time_t start;
7972 char space_buf[7];
7973
7974 if (pcs == NULL || pcs->pcs_state == CS_NONE)
7975 return;
7976
7977 (void) printf(gettext("checkpoint: "));
7978
7979 start = pcs->pcs_start_time;
7980 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
7981
7982 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
7983 char *date = ctime(&start);
7984
7985 /*
7986 * ctime() adds a newline at the end of the generated
7987 * string, thus the weird format specifier and the
7988 * strlen() call used to chop it off from the output.
7989 */
7990 (void) printf(gettext("created %.*s, consumes %s\n"),
7991 (int)(strlen(date) - 1), date, space_buf);
7992 return;
7993 }
7994
7995 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
7996
7997 (void) printf(gettext("discarding, %s remaining.\n"),
7998 space_buf);
7999 }
8000
8001 static void
8002 print_error_log(zpool_handle_t *zhp)
8003 {
8004 nvlist_t *nverrlist = NULL;
8005 nvpair_t *elem;
8006 char *pathname;
8007 size_t len = MAXPATHLEN * 2;
8008
8009 if (zpool_get_errlog(zhp, &nverrlist) != 0)
8010 return;
8011
8012 (void) printf("errors: Permanent errors have been "
8013 "detected in the following files:\n\n");
8014
8015 pathname = safe_malloc(len);
8016 elem = NULL;
8017 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
8018 nvlist_t *nv;
8019 uint64_t dsobj, obj;
8020
8021 verify(nvpair_value_nvlist(elem, &nv) == 0);
8022 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
8023 &dsobj) == 0);
8024 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
8025 &obj) == 0);
8026 zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
8027 (void) printf("%7s %s\n", "", pathname);
8028 }
8029 free(pathname);
8030 nvlist_free(nverrlist);
8031 }
8032
8033 static void
8034 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
8035 uint_t nspares)
8036 {
8037 uint_t i;
8038 char *name;
8039
8040 if (nspares == 0)
8041 return;
8042
8043 (void) printf(gettext("\tspares\n"));
8044
8045 for (i = 0; i < nspares; i++) {
8046 name = zpool_vdev_name(g_zfs, zhp, spares[i],
8047 cb->cb_name_flags);
8048 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
8049 free(name);
8050 }
8051 }
8052
8053 static void
8054 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
8055 uint_t nl2cache)
8056 {
8057 uint_t i;
8058 char *name;
8059
8060 if (nl2cache == 0)
8061 return;
8062
8063 (void) printf(gettext("\tcache\n"));
8064
8065 for (i = 0; i < nl2cache; i++) {
8066 name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
8067 cb->cb_name_flags);
8068 print_status_config(zhp, cb, name, l2cache[i], 2,
8069 B_FALSE, NULL);
8070 free(name);
8071 }
8072 }
8073
8074 static void
8075 print_dedup_stats(nvlist_t *config)
8076 {
8077 ddt_histogram_t *ddh;
8078 ddt_stat_t *dds;
8079 ddt_object_t *ddo;
8080 uint_t c;
8081 char dspace[6], mspace[6];
8082
8083 /*
8084 * If the pool was faulted then we may not have been able to
8085 * obtain the config. Otherwise, if we have anything in the dedup
8086 * table continue processing the stats.
8087 */
8088 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
8089 (uint64_t **)&ddo, &c) != 0)
8090 return;
8091
8092 (void) printf("\n");
8093 (void) printf(gettext(" dedup: "));
8094 if (ddo->ddo_count == 0) {
8095 (void) printf(gettext("no DDT entries\n"));
8096 return;
8097 }
8098
8099 zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
8100 zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
8101 (void) printf("DDT entries %llu, size %s on disk, %s in core\n",
8102 (u_longlong_t)ddo->ddo_count,
8103 dspace,
8104 mspace);
8105
8106 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
8107 (uint64_t **)&dds, &c) == 0);
8108 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
8109 (uint64_t **)&ddh, &c) == 0);
8110 zpool_dump_ddt(dds, ddh);
8111 }
8112
8113 /*
8114 * Display a summary of pool status. Displays a summary such as:
8115 *
8116 * pool: tank
8117 * status: DEGRADED
8118 * reason: One or more devices ...
8119 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
8120 * config:
8121 * mirror DEGRADED
8122 * c1t0d0 OK
8123 * c2t0d0 UNAVAIL
8124 *
8125 * When given the '-v' option, we print out the complete config. If the '-e'
8126 * option is specified, then we print out error rate information as well.
8127 */
8128 static int
8129 status_callback(zpool_handle_t *zhp, void *data)
8130 {
8131 status_cbdata_t *cbp = data;
8132 nvlist_t *config, *nvroot;
8133 const char *msgid;
8134 zpool_status_t reason;
8135 zpool_errata_t errata;
8136 const char *health;
8137 uint_t c;
8138 vdev_stat_t *vs;
8139
8140 config = zpool_get_config(zhp, NULL);
8141 reason = zpool_get_status(zhp, &msgid, &errata);
8142
8143 cbp->cb_count++;
8144
8145 /*
8146 * If we were given 'zpool status -x', only report those pools with
8147 * problems.
8148 */
8149 if (cbp->cb_explain &&
8150 (reason == ZPOOL_STATUS_OK ||
8151 reason == ZPOOL_STATUS_VERSION_OLDER ||
8152 reason == ZPOOL_STATUS_FEAT_DISABLED ||
8153 reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
8154 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
8155 if (!cbp->cb_allpools) {
8156 (void) printf(gettext("pool '%s' is healthy\n"),
8157 zpool_get_name(zhp));
8158 if (cbp->cb_first)
8159 cbp->cb_first = B_FALSE;
8160 }
8161 return (0);
8162 }
8163
8164 if (cbp->cb_first)
8165 cbp->cb_first = B_FALSE;
8166 else
8167 (void) printf("\n");
8168
8169 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
8170 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
8171 (uint64_t **)&vs, &c) == 0);
8172
8173 health = zpool_get_state_str(zhp);
8174
8175 printf(" ");
8176 printf_color(ANSI_BOLD, gettext("pool:"));
8177 printf(" %s\n", zpool_get_name(zhp));
8178 fputc(' ', stdout);
8179 printf_color(ANSI_BOLD, gettext("state: "));
8180
8181 printf_color(health_str_to_color(health), "%s", health);
8182
8183 fputc('\n', stdout);
8184
8185 switch (reason) {
8186 case ZPOOL_STATUS_MISSING_DEV_R:
8187 printf_color(ANSI_BOLD, gettext("status: "));
8188 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8189 "not be opened. Sufficient replicas exist for\n\tthe pool "
8190 "to continue functioning in a degraded state.\n"));
8191 printf_color(ANSI_BOLD, gettext("action: "));
8192 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8193 "and online it using 'zpool online'.\n"));
8194 break;
8195
8196 case ZPOOL_STATUS_MISSING_DEV_NR:
8197 printf_color(ANSI_BOLD, gettext("status: "));
8198 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8199 "not be opened. There are insufficient\n\treplicas for the"
8200 " pool to continue functioning.\n"));
8201 printf_color(ANSI_BOLD, gettext("action: "));
8202 printf_color(ANSI_YELLOW, gettext("Attach the missing device "
8203 "and online it using 'zpool online'.\n"));
8204 break;
8205
8206 case ZPOOL_STATUS_CORRUPT_LABEL_R:
8207 printf_color(ANSI_BOLD, gettext("status: "));
8208 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8209 "not be used because the label is missing or\n\tinvalid. "
8210 "Sufficient replicas exist for the pool to continue\n\t"
8211 "functioning in a degraded state.\n"));
8212 printf_color(ANSI_BOLD, gettext("action: "));
8213 printf_color(ANSI_YELLOW, gettext("Replace the device using "
8214 "'zpool replace'.\n"));
8215 break;
8216
8217 case ZPOOL_STATUS_CORRUPT_LABEL_NR:
8218 printf_color(ANSI_BOLD, gettext("status: "));
8219 printf_color(ANSI_YELLOW, gettext("One or more devices could "
8220 "not be used because the label is missing \n\tor invalid. "
8221 "There are insufficient replicas for the pool to "
8222 "continue\n\tfunctioning.\n"));
8223 zpool_explain_recover(zpool_get_handle(zhp),
8224 zpool_get_name(zhp), reason, config);
8225 break;
8226
8227 case ZPOOL_STATUS_FAILING_DEV:
8228 printf_color(ANSI_BOLD, gettext("status: "));
8229 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8230 "experienced an unrecoverable error. An\n\tattempt was "
8231 "made to correct the error. Applications are "
8232 "unaffected.\n"));
8233 printf_color(ANSI_BOLD, gettext("action: "));
8234 printf_color(ANSI_YELLOW, gettext("Determine if the "
8235 "device needs to be replaced, and clear the errors\n\tusing"
8236 " 'zpool clear' or replace the device with 'zpool "
8237 "replace'.\n"));
8238 break;
8239
8240 case ZPOOL_STATUS_OFFLINE_DEV:
8241 printf_color(ANSI_BOLD, gettext("status: "));
8242 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8243 "been taken offline by the administrator.\n\tSufficient "
8244 "replicas exist for the pool to continue functioning in "
8245 "a\n\tdegraded state.\n"));
8246 printf_color(ANSI_BOLD, gettext("action: "));
8247 printf_color(ANSI_YELLOW, gettext("Online the device "
8248 "using 'zpool online' or replace the device with\n\t'zpool "
8249 "replace'.\n"));
8250 break;
8251
8252 case ZPOOL_STATUS_REMOVED_DEV:
8253 printf_color(ANSI_BOLD, gettext("status: "));
8254 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8255 "been removed by the administrator.\n\tSufficient "
8256 "replicas exist for the pool to continue functioning in "
8257 "a\n\tdegraded state.\n"));
8258 printf_color(ANSI_BOLD, gettext("action: "));
8259 printf_color(ANSI_YELLOW, gettext("Online the device "
8260 "using zpool online' or replace the device with\n\t'zpool "
8261 "replace'.\n"));
8262 break;
8263
8264 case ZPOOL_STATUS_RESILVERING:
8265 case ZPOOL_STATUS_REBUILDING:
8266 printf_color(ANSI_BOLD, gettext("status: "));
8267 printf_color(ANSI_YELLOW, gettext("One or more devices is "
8268 "currently being resilvered. The pool will\n\tcontinue "
8269 "to function, possibly in a degraded state.\n"));
8270 printf_color(ANSI_BOLD, gettext("action: "));
8271 printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
8272 "complete.\n"));
8273 break;
8274
8275 case ZPOOL_STATUS_REBUILD_SCRUB:
8276 printf_color(ANSI_BOLD, gettext("status: "));
8277 printf_color(ANSI_YELLOW, gettext("One or more devices have "
8278 "been sequentially resilvered, scrubbing\n\tthe pool "
8279 "is recommended.\n"));
8280 printf_color(ANSI_BOLD, gettext("action: "));
8281 printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
8282 "verify all data checksums.\n"));
8283 break;
8284
8285 case ZPOOL_STATUS_CORRUPT_DATA:
8286 printf_color(ANSI_BOLD, gettext("status: "));
8287 printf_color(ANSI_YELLOW, gettext("One or more devices has "
8288 "experienced an error resulting in data\n\tcorruption. "
8289 "Applications may be affected.\n"));
8290 printf_color(ANSI_BOLD, gettext("action: "));
8291 printf_color(ANSI_YELLOW, gettext("Restore the file in question"
8292 " if possible. Otherwise restore the\n\tentire pool from "
8293 "backup.\n"));
8294 break;
8295
8296 case ZPOOL_STATUS_CORRUPT_POOL:
8297 printf_color(ANSI_BOLD, gettext("status: "));
8298 printf_color(ANSI_YELLOW, gettext("The pool metadata is "
8299 "corrupted and the pool cannot be opened.\n"));
8300 zpool_explain_recover(zpool_get_handle(zhp),
8301 zpool_get_name(zhp), reason, config);
8302 break;
8303
8304 case ZPOOL_STATUS_VERSION_OLDER:
8305 printf_color(ANSI_BOLD, gettext("status: "));
8306 printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
8307 "a legacy on-disk format. The pool can\n\tstill be used, "
8308 "but some features are unavailable.\n"));
8309 printf_color(ANSI_BOLD, gettext("action: "));
8310 printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
8311 "'zpool upgrade'. Once this is done, the\n\tpool will no "
8312 "longer be accessible on software that does not support\n\t"
8313 "feature flags.\n"));
8314 break;
8315
8316 case ZPOOL_STATUS_VERSION_NEWER:
8317 printf_color(ANSI_BOLD, gettext("status: "));
8318 printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
8319 "to a newer, incompatible on-disk version.\n\tThe pool "
8320 "cannot be accessed on this system.\n"));
8321 printf_color(ANSI_BOLD, gettext("action: "));
8322 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8323 "system running more recent software, or\n\trestore the "
8324 "pool from backup.\n"));
8325 break;
8326
8327 case ZPOOL_STATUS_FEAT_DISABLED:
8328 printf_color(ANSI_BOLD, gettext("status: "));
8329 printf_color(ANSI_YELLOW, gettext("Some supported and "
8330 "requested features are not enabled on the pool.\n\t"
8331 "The pool can still be used, but some features are "
8332 "unavailable.\n"));
8333 printf_color(ANSI_BOLD, gettext("action: "));
8334 printf_color(ANSI_YELLOW, gettext("Enable all features using "
8335 "'zpool upgrade'. Once this is done,\n\tthe pool may no "
8336 "longer be accessible by software that does not support\n\t"
8337 "the features. See zpool-features(7) for details.\n"));
8338 break;
8339
8340 case ZPOOL_STATUS_COMPATIBILITY_ERR:
8341 printf_color(ANSI_BOLD, gettext("status: "));
8342 printf_color(ANSI_YELLOW, gettext("This pool has a "
8343 "compatibility list specified, but it could not be\n\t"
8344 "read/parsed at this time. The pool can still be used, "
8345 "but this\n\tshould be investigated.\n"));
8346 printf_color(ANSI_BOLD, gettext("action: "));
8347 printf_color(ANSI_YELLOW, gettext("Check the value of the "
8348 "'compatibility' property against the\n\t"
8349 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
8350 ZPOOL_DATA_COMPAT_D ".\n"));
8351 break;
8352
8353 case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
8354 printf_color(ANSI_BOLD, gettext("status: "));
8355 printf_color(ANSI_YELLOW, gettext("One or more features "
8356 "are enabled on the pool despite not being\n\t"
8357 "requested by the 'compatibility' property.\n"));
8358 printf_color(ANSI_BOLD, gettext("action: "));
8359 printf_color(ANSI_YELLOW, gettext("Consider setting "
8360 "'compatibility' to an appropriate value, or\n\t"
8361 "adding needed features to the relevant file in\n\t"
8362 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
8363 break;
8364
8365 case ZPOOL_STATUS_UNSUP_FEAT_READ:
8366 printf_color(ANSI_BOLD, gettext("status: "));
8367 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8368 "on this system because it uses the\n\tfollowing feature(s)"
8369 " not supported on this system:\n"));
8370 zpool_print_unsup_feat(config);
8371 (void) printf("\n");
8372 printf_color(ANSI_BOLD, gettext("action: "));
8373 printf_color(ANSI_YELLOW, gettext("Access the pool from a "
8374 "system that supports the required feature(s),\n\tor "
8375 "restore the pool from backup.\n"));
8376 break;
8377
8378 case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
8379 printf_color(ANSI_BOLD, gettext("status: "));
8380 printf_color(ANSI_YELLOW, gettext("The pool can only be "
8381 "accessed in read-only mode on this system. It\n\tcannot be"
8382 " accessed in read-write mode because it uses the "
8383 "following\n\tfeature(s) not supported on this system:\n"));
8384 zpool_print_unsup_feat(config);
8385 (void) printf("\n");
8386 printf_color(ANSI_BOLD, gettext("action: "));
8387 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
8388 "in read-write mode. Import the pool with\n"
8389 "\t\"-o readonly=on\", access the pool from a system that "
8390 "supports the\n\trequired feature(s), or restore the "
8391 "pool from backup.\n"));
8392 break;
8393
8394 case ZPOOL_STATUS_FAULTED_DEV_R:
8395 printf_color(ANSI_BOLD, gettext("status: "));
8396 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8397 "faulted in response to persistent errors.\n\tSufficient "
8398 "replicas exist for the pool to continue functioning "
8399 "in a\n\tdegraded state.\n"));
8400 printf_color(ANSI_BOLD, gettext("action: "));
8401 printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
8402 "or use 'zpool clear' to mark the device\n\trepaired.\n"));
8403 break;
8404
8405 case ZPOOL_STATUS_FAULTED_DEV_NR:
8406 printf_color(ANSI_BOLD, gettext("status: "));
8407 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8408 "faulted in response to persistent errors. There are "
8409 "insufficient replicas for the pool to\n\tcontinue "
8410 "functioning.\n"));
8411 printf_color(ANSI_BOLD, gettext("action: "));
8412 printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
8413 "pool from a backup source. Manually marking the device\n"
8414 "\trepaired using 'zpool clear' may allow some data "
8415 "to be recovered.\n"));
8416 break;
8417
8418 case ZPOOL_STATUS_IO_FAILURE_MMP:
8419 printf_color(ANSI_BOLD, gettext("status: "));
8420 printf_color(ANSI_YELLOW, gettext("The pool is suspended "
8421 "because multihost writes failed or were delayed;\n\t"
8422 "another system could import the pool undetected.\n"));
8423 printf_color(ANSI_BOLD, gettext("action: "));
8424 printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
8425 " are connected, then reboot your system and\n\timport the "
8426 "pool.\n"));
8427 break;
8428
8429 case ZPOOL_STATUS_IO_FAILURE_WAIT:
8430 case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
8431 printf_color(ANSI_BOLD, gettext("status: "));
8432 printf_color(ANSI_YELLOW, gettext("One or more devices are "
8433 "faulted in response to IO failures.\n"));
8434 printf_color(ANSI_BOLD, gettext("action: "));
8435 printf_color(ANSI_YELLOW, gettext("Make sure the affected "
8436 "devices are connected, then run 'zpool clear'.\n"));
8437 break;
8438
8439 case ZPOOL_STATUS_BAD_LOG:
8440 printf_color(ANSI_BOLD, gettext("status: "));
8441 printf_color(ANSI_YELLOW, gettext("An intent log record "
8442 "could not be read.\n"
8443 "\tWaiting for administrator intervention to fix the "
8444 "faulted pool.\n"));
8445 printf_color(ANSI_BOLD, gettext("action: "));
8446 printf_color(ANSI_YELLOW, gettext("Either restore the affected "
8447 "device(s) and run 'zpool online',\n"
8448 "\tor ignore the intent log records by running "
8449 "'zpool clear'.\n"));
8450 break;
8451
8452 case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
8453 (void) printf(gettext("status: One or more devices are "
8454 "configured to use a non-native block size.\n"
8455 "\tExpect reduced performance.\n"));
8456 (void) printf(gettext("action: Replace affected devices with "
8457 "devices that support the\n\tconfigured block size, or "
8458 "migrate data to a properly configured\n\tpool.\n"));
8459 break;
8460
8461 case ZPOOL_STATUS_HOSTID_MISMATCH:
8462 printf_color(ANSI_BOLD, gettext("status: "));
8463 printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
8464 " and system hostid on imported pool.\n\tThis pool was "
8465 "previously imported into a system with a different "
8466 "hostid,\n\tand then was verbatim imported into this "
8467 "system.\n"));
8468 printf_color(ANSI_BOLD, gettext("action: "));
8469 printf_color(ANSI_YELLOW, gettext("Export this pool on all "
8470 "systems on which it is imported.\n"
8471 "\tThen import it to correct the mismatch.\n"));
8472 break;
8473
8474 case ZPOOL_STATUS_ERRATA:
8475 printf_color(ANSI_BOLD, gettext("status: "));
8476 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
8477 errata);
8478
8479 switch (errata) {
8480 case ZPOOL_ERRATA_NONE:
8481 break;
8482
8483 case ZPOOL_ERRATA_ZOL_2094_SCRUB:
8484 printf_color(ANSI_BOLD, gettext("action: "));
8485 printf_color(ANSI_YELLOW, gettext("To correct the issue"
8486 " run 'zpool scrub'.\n"));
8487 break;
8488
8489 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
8490 (void) printf(gettext("\tExisting encrypted datasets "
8491 "contain an on-disk incompatibility\n\twhich "
8492 "needs to be corrected.\n"));
8493 printf_color(ANSI_BOLD, gettext("action: "));
8494 printf_color(ANSI_YELLOW, gettext("To correct the issue"
8495 " backup existing encrypted datasets to new\n\t"
8496 "encrypted datasets and destroy the old ones. "
8497 "'zfs mount -o ro' can\n\tbe used to temporarily "
8498 "mount existing encrypted datasets readonly.\n"));
8499 break;
8500
8501 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
8502 (void) printf(gettext("\tExisting encrypted snapshots "
8503 "and bookmarks contain an on-disk\n\tincompat"
8504 "ibility. This may cause on-disk corruption if "
8505 "they are used\n\twith 'zfs recv'.\n"));
8506 printf_color(ANSI_BOLD, gettext("action: "));
8507 printf_color(ANSI_YELLOW, gettext("To correct the"
8508 "issue, enable the bookmark_v2 feature. No "
8509 "additional\n\taction is needed if there are no "
8510 "encrypted snapshots or bookmarks.\n\tIf preserving"
8511 "the encrypted snapshots and bookmarks is required,"
8512 " use\n\ta non-raw send to backup and restore them."
8513 " Alternately, they may be\n\tremoved to resolve "
8514 "the incompatibility.\n"));
8515 break;
8516
8517 default:
8518 /*
8519 * All errata which allow the pool to be imported
8520 * must contain an action message.
8521 */
8522 assert(0);
8523 }
8524 break;
8525
8526 default:
8527 /*
8528 * The remaining errors can't actually be generated, yet.
8529 */
8530 assert(reason == ZPOOL_STATUS_OK);
8531 }
8532
8533 if (msgid != NULL) {
8534 printf(" ");
8535 printf_color(ANSI_BOLD, gettext("see:"));
8536 printf(gettext(
8537 " https://openzfs.github.io/openzfs-docs/msg/%s\n"),
8538 msgid);
8539 }
8540
8541 if (config != NULL) {
8542 uint64_t nerr;
8543 nvlist_t **spares, **l2cache;
8544 uint_t nspares, nl2cache;
8545 pool_checkpoint_stat_t *pcs = NULL;
8546 pool_removal_stat_t *prs = NULL;
8547
8548 print_scan_status(zhp, nvroot);
8549
8550 (void) nvlist_lookup_uint64_array(nvroot,
8551 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
8552 print_removal_status(zhp, prs);
8553
8554 (void) nvlist_lookup_uint64_array(nvroot,
8555 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
8556 print_checkpoint_status(pcs);
8557
8558 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
8559 cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
8560 if (cbp->cb_namewidth < 10)
8561 cbp->cb_namewidth = 10;
8562
8563 color_start(ANSI_BOLD);
8564 (void) printf(gettext("config:\n\n"));
8565 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
8566 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
8567 "CKSUM");
8568 color_end();
8569
8570 if (cbp->cb_print_slow_ios) {
8571 printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
8572 }
8573
8574 if (cbp->vcdl != NULL)
8575 print_cmd_columns(cbp->vcdl, 0);
8576
8577 printf("\n");
8578
8579 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
8580 B_FALSE, NULL);
8581
8582 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
8583 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
8584 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
8585
8586 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
8587 &l2cache, &nl2cache) == 0)
8588 print_l2cache(zhp, cbp, l2cache, nl2cache);
8589
8590 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
8591 &spares, &nspares) == 0)
8592 print_spares(zhp, cbp, spares, nspares);
8593
8594 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
8595 &nerr) == 0) {
8596 (void) printf("\n");
8597 if (nerr == 0) {
8598 (void) printf(gettext(
8599 "errors: No known data errors\n"));
8600 } else if (!cbp->cb_verbose) {
8601 (void) printf(gettext("errors: %llu data "
8602 "errors, use '-v' for a list\n"),
8603 (u_longlong_t)nerr);
8604 } else {
8605 print_error_log(zhp);
8606 }
8607 }
8608
8609 if (cbp->cb_dedup_stats)
8610 print_dedup_stats(config);
8611 } else {
8612 (void) printf(gettext("config: The configuration cannot be "
8613 "determined.\n"));
8614 }
8615
8616 return (0);
8617 }
8618
8619 /*
8620 * zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ...
8621 * [interval [count]]
8622 *
8623 * -c CMD For each vdev, run command CMD
8624 * -i Display vdev initialization status.
8625 * -g Display guid for individual vdev name.
8626 * -L Follow links when resolving vdev path name.
8627 * -p Display values in parsable (exact) format.
8628 * -P Display full path for vdev name.
8629 * -s Display slow IOs column.
8630 * -v Display complete error logs
8631 * -x Display only pools with potential problems
8632 * -D Display dedup status (undocumented)
8633 * -t Display vdev TRIM status.
8634 * -T Display a timestamp in date(1) or Unix format
8635 *
8636 * Describes the health status of all pools or some subset.
8637 */
8638 int
8639 zpool_do_status(int argc, char **argv)
8640 {
8641 int c;
8642 int ret;
8643 float interval = 0;
8644 unsigned long count = 0;
8645 status_cbdata_t cb = { 0 };
8646 char *cmd = NULL;
8647
8648 /* check options */
8649 while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) {
8650 switch (c) {
8651 case 'c':
8652 if (cmd != NULL) {
8653 fprintf(stderr,
8654 gettext("Can't set -c flag twice\n"));
8655 exit(1);
8656 }
8657
8658 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
8659 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
8660 fprintf(stderr, gettext(
8661 "Can't run -c, disabled by "
8662 "ZPOOL_SCRIPTS_ENABLED.\n"));
8663 exit(1);
8664 }
8665
8666 if ((getuid() <= 0 || geteuid() <= 0) &&
8667 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
8668 fprintf(stderr, gettext(
8669 "Can't run -c with root privileges "
8670 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
8671 exit(1);
8672 }
8673 cmd = optarg;
8674 break;
8675 case 'i':
8676 cb.cb_print_vdev_init = B_TRUE;
8677 break;
8678 case 'g':
8679 cb.cb_name_flags |= VDEV_NAME_GUID;
8680 break;
8681 case 'L':
8682 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
8683 break;
8684 case 'p':
8685 cb.cb_literal = B_TRUE;
8686 break;
8687 case 'P':
8688 cb.cb_name_flags |= VDEV_NAME_PATH;
8689 break;
8690 case 's':
8691 cb.cb_print_slow_ios = B_TRUE;
8692 break;
8693 case 'v':
8694 cb.cb_verbose = B_TRUE;
8695 break;
8696 case 'x':
8697 cb.cb_explain = B_TRUE;
8698 break;
8699 case 'D':
8700 cb.cb_dedup_stats = B_TRUE;
8701 break;
8702 case 't':
8703 cb.cb_print_vdev_trim = B_TRUE;
8704 break;
8705 case 'T':
8706 get_timestamp_arg(*optarg);
8707 break;
8708 case '?':
8709 if (optopt == 'c') {
8710 print_zpool_script_list("status");
8711 exit(0);
8712 } else {
8713 fprintf(stderr,
8714 gettext("invalid option '%c'\n"), optopt);
8715 }
8716 usage(B_FALSE);
8717 }
8718 }
8719
8720 argc -= optind;
8721 argv += optind;
8722
8723 get_interval_count(&argc, argv, &interval, &count);
8724
8725 if (argc == 0)
8726 cb.cb_allpools = B_TRUE;
8727
8728 cb.cb_first = B_TRUE;
8729 cb.cb_print_status = B_TRUE;
8730
8731 for (;;) {
8732 if (timestamp_fmt != NODATE)
8733 print_timestamp(timestamp_fmt);
8734
8735 if (cmd != NULL)
8736 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
8737 NULL, NULL, 0, 0);
8738
8739 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
8740 cb.cb_literal, status_callback, &cb);
8741
8742 if (cb.vcdl != NULL)
8743 free_vdev_cmd_data_list(cb.vcdl);
8744
8745 if (argc == 0 && cb.cb_count == 0)
8746 (void) fprintf(stderr, gettext("no pools available\n"));
8747 else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
8748 (void) printf(gettext("all pools are healthy\n"));
8749
8750 if (ret != 0)
8751 return (ret);
8752
8753 if (interval == 0)
8754 break;
8755
8756 if (count != 0 && --count == 0)
8757 break;
8758
8759 (void) fsleep(interval);
8760 }
8761
8762 return (0);
8763 }
8764
8765 typedef struct upgrade_cbdata {
8766 int cb_first;
8767 int cb_argc;
8768 uint64_t cb_version;
8769 char **cb_argv;
8770 } upgrade_cbdata_t;
8771
8772 static int
8773 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
8774 {
8775 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
8776 int *count = (int *)unsupp_fs;
8777
8778 if (zfs_version > ZPL_VERSION) {
8779 (void) printf(gettext("%s (v%d) is not supported by this "
8780 "implementation of ZFS.\n"),
8781 zfs_get_name(zhp), zfs_version);
8782 (*count)++;
8783 }
8784
8785 zfs_iter_filesystems(zhp, 0, check_unsupp_fs, unsupp_fs);
8786
8787 zfs_close(zhp);
8788
8789 return (0);
8790 }
8791
8792 static int
8793 upgrade_version(zpool_handle_t *zhp, uint64_t version)
8794 {
8795 int ret;
8796 nvlist_t *config;
8797 uint64_t oldversion;
8798 int unsupp_fs = 0;
8799
8800 config = zpool_get_config(zhp, NULL);
8801 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8802 &oldversion) == 0);
8803
8804 char compat[ZFS_MAXPROPLEN];
8805 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
8806 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
8807 compat[0] = '\0';
8808
8809 assert(SPA_VERSION_IS_SUPPORTED(oldversion));
8810 assert(oldversion < version);
8811
8812 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
8813 if (ret != 0)
8814 return (ret);
8815
8816 if (unsupp_fs) {
8817 (void) fprintf(stderr, gettext("Upgrade not performed due "
8818 "to %d unsupported filesystems (max v%d).\n"),
8819 unsupp_fs, (int)ZPL_VERSION);
8820 return (1);
8821 }
8822
8823 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
8824 (void) fprintf(stderr, gettext("Upgrade not performed because "
8825 "'compatibility' property set to '"
8826 ZPOOL_COMPAT_LEGACY "'.\n"));
8827 return (1);
8828 }
8829
8830 ret = zpool_upgrade(zhp, version);
8831 if (ret != 0)
8832 return (ret);
8833
8834 if (version >= SPA_VERSION_FEATURES) {
8835 (void) printf(gettext("Successfully upgraded "
8836 "'%s' from version %llu to feature flags.\n"),
8837 zpool_get_name(zhp), (u_longlong_t)oldversion);
8838 } else {
8839 (void) printf(gettext("Successfully upgraded "
8840 "'%s' from version %llu to version %llu.\n"),
8841 zpool_get_name(zhp), (u_longlong_t)oldversion,
8842 (u_longlong_t)version);
8843 }
8844
8845 return (0);
8846 }
8847
8848 static int
8849 upgrade_enable_all(zpool_handle_t *zhp, int *countp)
8850 {
8851 int i, ret, count;
8852 boolean_t firstff = B_TRUE;
8853 nvlist_t *enabled = zpool_get_features(zhp);
8854
8855 char compat[ZFS_MAXPROPLEN];
8856 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
8857 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
8858 compat[0] = '\0';
8859
8860 boolean_t requested_features[SPA_FEATURES];
8861 if (zpool_do_load_compat(compat, requested_features) !=
8862 ZPOOL_COMPATIBILITY_OK)
8863 return (-1);
8864
8865 count = 0;
8866 for (i = 0; i < SPA_FEATURES; i++) {
8867 const char *fname = spa_feature_table[i].fi_uname;
8868 const char *fguid = spa_feature_table[i].fi_guid;
8869
8870 if (!spa_feature_table[i].fi_zfs_mod_supported)
8871 continue;
8872
8873 if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
8874 char *propname;
8875 verify(-1 != asprintf(&propname, "feature@%s", fname));
8876 ret = zpool_set_prop(zhp, propname,
8877 ZFS_FEATURE_ENABLED);
8878 if (ret != 0) {
8879 free(propname);
8880 return (ret);
8881 }
8882 count++;
8883
8884 if (firstff) {
8885 (void) printf(gettext("Enabled the "
8886 "following features on '%s':\n"),
8887 zpool_get_name(zhp));
8888 firstff = B_FALSE;
8889 }
8890 (void) printf(gettext(" %s\n"), fname);
8891 free(propname);
8892 }
8893 }
8894
8895 if (countp != NULL)
8896 *countp = count;
8897 return (0);
8898 }
8899
8900 static int
8901 upgrade_cb(zpool_handle_t *zhp, void *arg)
8902 {
8903 upgrade_cbdata_t *cbp = arg;
8904 nvlist_t *config;
8905 uint64_t version;
8906 boolean_t modified_pool = B_FALSE;
8907 int ret;
8908
8909 config = zpool_get_config(zhp, NULL);
8910 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8911 &version) == 0);
8912
8913 assert(SPA_VERSION_IS_SUPPORTED(version));
8914
8915 if (version < cbp->cb_version) {
8916 cbp->cb_first = B_FALSE;
8917 ret = upgrade_version(zhp, cbp->cb_version);
8918 if (ret != 0)
8919 return (ret);
8920 modified_pool = B_TRUE;
8921
8922 /*
8923 * If they did "zpool upgrade -a", then we could
8924 * be doing ioctls to different pools. We need
8925 * to log this history once to each pool, and bypass
8926 * the normal history logging that happens in main().
8927 */
8928 (void) zpool_log_history(g_zfs, history_str);
8929 log_history = B_FALSE;
8930 }
8931
8932 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
8933 int count;
8934 ret = upgrade_enable_all(zhp, &count);
8935 if (ret != 0)
8936 return (ret);
8937
8938 if (count > 0) {
8939 cbp->cb_first = B_FALSE;
8940 modified_pool = B_TRUE;
8941 }
8942 }
8943
8944 if (modified_pool) {
8945 (void) printf("\n");
8946 (void) after_zpool_upgrade(zhp);
8947 }
8948
8949 return (0);
8950 }
8951
8952 static int
8953 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
8954 {
8955 upgrade_cbdata_t *cbp = arg;
8956 nvlist_t *config;
8957 uint64_t version;
8958
8959 config = zpool_get_config(zhp, NULL);
8960 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8961 &version) == 0);
8962
8963 assert(SPA_VERSION_IS_SUPPORTED(version));
8964
8965 if (version < SPA_VERSION_FEATURES) {
8966 if (cbp->cb_first) {
8967 (void) printf(gettext("The following pools are "
8968 "formatted with legacy version numbers and can\n"
8969 "be upgraded to use feature flags. After "
8970 "being upgraded, these pools\nwill no "
8971 "longer be accessible by software that does not "
8972 "support feature\nflags.\n\n"
8973 "Note that setting a pool's 'compatibility' "
8974 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
8975 "inhibit upgrades.\n\n"));
8976 (void) printf(gettext("VER POOL\n"));
8977 (void) printf(gettext("--- ------------\n"));
8978 cbp->cb_first = B_FALSE;
8979 }
8980
8981 (void) printf("%2llu %s\n", (u_longlong_t)version,
8982 zpool_get_name(zhp));
8983 }
8984
8985 return (0);
8986 }
8987
8988 static int
8989 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
8990 {
8991 upgrade_cbdata_t *cbp = arg;
8992 nvlist_t *config;
8993 uint64_t version;
8994
8995 config = zpool_get_config(zhp, NULL);
8996 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
8997 &version) == 0);
8998
8999 if (version >= SPA_VERSION_FEATURES) {
9000 int i;
9001 boolean_t poolfirst = B_TRUE;
9002 nvlist_t *enabled = zpool_get_features(zhp);
9003
9004 for (i = 0; i < SPA_FEATURES; i++) {
9005 const char *fguid = spa_feature_table[i].fi_guid;
9006 const char *fname = spa_feature_table[i].fi_uname;
9007
9008 if (!spa_feature_table[i].fi_zfs_mod_supported)
9009 continue;
9010
9011 if (!nvlist_exists(enabled, fguid)) {
9012 if (cbp->cb_first) {
9013 (void) printf(gettext("\nSome "
9014 "supported features are not "
9015 "enabled on the following pools. "
9016 "Once a\nfeature is enabled the "
9017 "pool may become incompatible with "
9018 "software\nthat does not support "
9019 "the feature. See "
9020 "zpool-features(7) for "
9021 "details.\n\n"
9022 "Note that the pool "
9023 "'compatibility' feature can be "
9024 "used to inhibit\nfeature "
9025 "upgrades.\n\n"));
9026 (void) printf(gettext("POOL "
9027 "FEATURE\n"));
9028 (void) printf(gettext("------"
9029 "---------\n"));
9030 cbp->cb_first = B_FALSE;
9031 }
9032
9033 if (poolfirst) {
9034 (void) printf(gettext("%s\n"),
9035 zpool_get_name(zhp));
9036 poolfirst = B_FALSE;
9037 }
9038
9039 (void) printf(gettext(" %s\n"), fname);
9040 }
9041 /*
9042 * If they did "zpool upgrade -a", then we could
9043 * be doing ioctls to different pools. We need
9044 * to log this history once to each pool, and bypass
9045 * the normal history logging that happens in main().
9046 */
9047 (void) zpool_log_history(g_zfs, history_str);
9048 log_history = B_FALSE;
9049 }
9050 }
9051
9052 return (0);
9053 }
9054
9055 static int
9056 upgrade_one(zpool_handle_t *zhp, void *data)
9057 {
9058 boolean_t modified_pool = B_FALSE;
9059 upgrade_cbdata_t *cbp = data;
9060 uint64_t cur_version;
9061 int ret;
9062
9063 if (strcmp("log", zpool_get_name(zhp)) == 0) {
9064 (void) fprintf(stderr, gettext("'log' is now a reserved word\n"
9065 "Pool 'log' must be renamed using export and import"
9066 " to upgrade.\n"));
9067 return (1);
9068 }
9069
9070 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
9071 if (cur_version > cbp->cb_version) {
9072 (void) printf(gettext("Pool '%s' is already formatted "
9073 "using more current version '%llu'.\n\n"),
9074 zpool_get_name(zhp), (u_longlong_t)cur_version);
9075 return (0);
9076 }
9077
9078 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
9079 (void) printf(gettext("Pool '%s' is already formatted "
9080 "using version %llu.\n\n"), zpool_get_name(zhp),
9081 (u_longlong_t)cbp->cb_version);
9082 return (0);
9083 }
9084
9085 if (cur_version != cbp->cb_version) {
9086 modified_pool = B_TRUE;
9087 ret = upgrade_version(zhp, cbp->cb_version);
9088 if (ret != 0)
9089 return (ret);
9090 }
9091
9092 if (cbp->cb_version >= SPA_VERSION_FEATURES) {
9093 int count = 0;
9094 ret = upgrade_enable_all(zhp, &count);
9095 if (ret != 0)
9096 return (ret);
9097
9098 if (count != 0) {
9099 modified_pool = B_TRUE;
9100 } else if (cur_version == SPA_VERSION) {
9101 (void) printf(gettext("Pool '%s' already has all "
9102 "supported and requested features enabled.\n"),
9103 zpool_get_name(zhp));
9104 }
9105 }
9106
9107 if (modified_pool) {
9108 (void) printf("\n");
9109 (void) after_zpool_upgrade(zhp);
9110 }
9111
9112 return (0);
9113 }
9114
9115 /*
9116 * zpool upgrade
9117 * zpool upgrade -v
9118 * zpool upgrade [-V version] <-a | pool ...>
9119 *
9120 * With no arguments, display downrev'd ZFS pool available for upgrade.
9121 * Individual pools can be upgraded by specifying the pool, and '-a' will
9122 * upgrade all pools.
9123 */
9124 int
9125 zpool_do_upgrade(int argc, char **argv)
9126 {
9127 int c;
9128 upgrade_cbdata_t cb = { 0 };
9129 int ret = 0;
9130 boolean_t showversions = B_FALSE;
9131 boolean_t upgradeall = B_FALSE;
9132 char *end;
9133
9134
9135 /* check options */
9136 while ((c = getopt(argc, argv, ":avV:")) != -1) {
9137 switch (c) {
9138 case 'a':
9139 upgradeall = B_TRUE;
9140 break;
9141 case 'v':
9142 showversions = B_TRUE;
9143 break;
9144 case 'V':
9145 cb.cb_version = strtoll(optarg, &end, 10);
9146 if (*end != '\0' ||
9147 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
9148 (void) fprintf(stderr,
9149 gettext("invalid version '%s'\n"), optarg);
9150 usage(B_FALSE);
9151 }
9152 break;
9153 case ':':
9154 (void) fprintf(stderr, gettext("missing argument for "
9155 "'%c' option\n"), optopt);
9156 usage(B_FALSE);
9157 break;
9158 case '?':
9159 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9160 optopt);
9161 usage(B_FALSE);
9162 }
9163 }
9164
9165 cb.cb_argc = argc;
9166 cb.cb_argv = argv;
9167 argc -= optind;
9168 argv += optind;
9169
9170 if (cb.cb_version == 0) {
9171 cb.cb_version = SPA_VERSION;
9172 } else if (!upgradeall && argc == 0) {
9173 (void) fprintf(stderr, gettext("-V option is "
9174 "incompatible with other arguments\n"));
9175 usage(B_FALSE);
9176 }
9177
9178 if (showversions) {
9179 if (upgradeall || argc != 0) {
9180 (void) fprintf(stderr, gettext("-v option is "
9181 "incompatible with other arguments\n"));
9182 usage(B_FALSE);
9183 }
9184 } else if (upgradeall) {
9185 if (argc != 0) {
9186 (void) fprintf(stderr, gettext("-a option should not "
9187 "be used along with a pool name\n"));
9188 usage(B_FALSE);
9189 }
9190 }
9191
9192 (void) printf("%s", gettext("This system supports ZFS pool feature "
9193 "flags.\n\n"));
9194 if (showversions) {
9195 int i;
9196
9197 (void) printf(gettext("The following features are "
9198 "supported:\n\n"));
9199 (void) printf(gettext("FEAT DESCRIPTION\n"));
9200 (void) printf("----------------------------------------------"
9201 "---------------\n");
9202 for (i = 0; i < SPA_FEATURES; i++) {
9203 zfeature_info_t *fi = &spa_feature_table[i];
9204 if (!fi->fi_zfs_mod_supported)
9205 continue;
9206 const char *ro =
9207 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
9208 " (read-only compatible)" : "";
9209
9210 (void) printf("%-37s%s\n", fi->fi_uname, ro);
9211 (void) printf(" %s\n", fi->fi_desc);
9212 }
9213 (void) printf("\n");
9214
9215 (void) printf(gettext("The following legacy versions are also "
9216 "supported:\n\n"));
9217 (void) printf(gettext("VER DESCRIPTION\n"));
9218 (void) printf("--- -----------------------------------------"
9219 "---------------\n");
9220 (void) printf(gettext(" 1 Initial ZFS version\n"));
9221 (void) printf(gettext(" 2 Ditto blocks "
9222 "(replicated metadata)\n"));
9223 (void) printf(gettext(" 3 Hot spares and double parity "
9224 "RAID-Z\n"));
9225 (void) printf(gettext(" 4 zpool history\n"));
9226 (void) printf(gettext(" 5 Compression using the gzip "
9227 "algorithm\n"));
9228 (void) printf(gettext(" 6 bootfs pool property\n"));
9229 (void) printf(gettext(" 7 Separate intent log devices\n"));
9230 (void) printf(gettext(" 8 Delegated administration\n"));
9231 (void) printf(gettext(" 9 refquota and refreservation "
9232 "properties\n"));
9233 (void) printf(gettext(" 10 Cache devices\n"));
9234 (void) printf(gettext(" 11 Improved scrub performance\n"));
9235 (void) printf(gettext(" 12 Snapshot properties\n"));
9236 (void) printf(gettext(" 13 snapused property\n"));
9237 (void) printf(gettext(" 14 passthrough-x aclinherit\n"));
9238 (void) printf(gettext(" 15 user/group space accounting\n"));
9239 (void) printf(gettext(" 16 stmf property support\n"));
9240 (void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
9241 (void) printf(gettext(" 18 Snapshot user holds\n"));
9242 (void) printf(gettext(" 19 Log device removal\n"));
9243 (void) printf(gettext(" 20 Compression using zle "
9244 "(zero-length encoding)\n"));
9245 (void) printf(gettext(" 21 Deduplication\n"));
9246 (void) printf(gettext(" 22 Received properties\n"));
9247 (void) printf(gettext(" 23 Slim ZIL\n"));
9248 (void) printf(gettext(" 24 System attributes\n"));
9249 (void) printf(gettext(" 25 Improved scrub stats\n"));
9250 (void) printf(gettext(" 26 Improved snapshot deletion "
9251 "performance\n"));
9252 (void) printf(gettext(" 27 Improved snapshot creation "
9253 "performance\n"));
9254 (void) printf(gettext(" 28 Multiple vdev replacements\n"));
9255 (void) printf(gettext("\nFor more information on a particular "
9256 "version, including supported releases,\n"));
9257 (void) printf(gettext("see the ZFS Administration Guide.\n\n"));
9258 } else if (argc == 0 && upgradeall) {
9259 cb.cb_first = B_TRUE;
9260 ret = zpool_iter(g_zfs, upgrade_cb, &cb);
9261 if (ret == 0 && cb.cb_first) {
9262 if (cb.cb_version == SPA_VERSION) {
9263 (void) printf(gettext("All pools are already "
9264 "formatted using feature flags.\n\n"));
9265 (void) printf(gettext("Every feature flags "
9266 "pool already has all supported and "
9267 "requested features enabled.\n"));
9268 } else {
9269 (void) printf(gettext("All pools are already "
9270 "formatted with version %llu or higher.\n"),
9271 (u_longlong_t)cb.cb_version);
9272 }
9273 }
9274 } else if (argc == 0) {
9275 cb.cb_first = B_TRUE;
9276 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
9277 assert(ret == 0);
9278
9279 if (cb.cb_first) {
9280 (void) printf(gettext("All pools are formatted "
9281 "using feature flags.\n\n"));
9282 } else {
9283 (void) printf(gettext("\nUse 'zpool upgrade -v' "
9284 "for a list of available legacy versions.\n"));
9285 }
9286
9287 cb.cb_first = B_TRUE;
9288 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
9289 assert(ret == 0);
9290
9291 if (cb.cb_first) {
9292 (void) printf(gettext("Every feature flags pool has "
9293 "all supported and requested features enabled.\n"));
9294 } else {
9295 (void) printf(gettext("\n"));
9296 }
9297 } else {
9298 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
9299 B_FALSE, upgrade_one, &cb);
9300 }
9301
9302 return (ret);
9303 }
9304
9305 typedef struct hist_cbdata {
9306 boolean_t first;
9307 boolean_t longfmt;
9308 boolean_t internal;
9309 } hist_cbdata_t;
9310
9311 static void
9312 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
9313 {
9314 nvlist_t **records;
9315 uint_t numrecords;
9316 int i;
9317
9318 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
9319 &records, &numrecords) == 0);
9320 for (i = 0; i < numrecords; i++) {
9321 nvlist_t *rec = records[i];
9322 char tbuf[64] = "";
9323
9324 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
9325 time_t tsec;
9326 struct tm t;
9327
9328 tsec = fnvlist_lookup_uint64(records[i],
9329 ZPOOL_HIST_TIME);
9330 (void) localtime_r(&tsec, &t);
9331 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
9332 }
9333
9334 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
9335 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
9336 ZPOOL_HIST_ELAPSED_NS);
9337 (void) snprintf(tbuf + strlen(tbuf),
9338 sizeof (tbuf) - strlen(tbuf),
9339 " (%lldms)", (long long)elapsed_ns / 1000 / 1000);
9340 }
9341
9342 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
9343 (void) printf("%s %s", tbuf,
9344 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
9345 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
9346 int ievent =
9347 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
9348 if (!cb->internal)
9349 continue;
9350 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
9351 (void) printf("%s unrecognized record:\n",
9352 tbuf);
9353 dump_nvlist(rec, 4);
9354 continue;
9355 }
9356 (void) printf("%s [internal %s txg:%lld] %s", tbuf,
9357 zfs_history_event_names[ievent],
9358 (longlong_t)fnvlist_lookup_uint64(
9359 rec, ZPOOL_HIST_TXG),
9360 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
9361 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
9362 if (!cb->internal)
9363 continue;
9364 (void) printf("%s [txg:%lld] %s", tbuf,
9365 (longlong_t)fnvlist_lookup_uint64(
9366 rec, ZPOOL_HIST_TXG),
9367 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
9368 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
9369 (void) printf(" %s (%llu)",
9370 fnvlist_lookup_string(rec,
9371 ZPOOL_HIST_DSNAME),
9372 (u_longlong_t)fnvlist_lookup_uint64(rec,
9373 ZPOOL_HIST_DSID));
9374 }
9375 (void) printf(" %s", fnvlist_lookup_string(rec,
9376 ZPOOL_HIST_INT_STR));
9377 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
9378 if (!cb->internal)
9379 continue;
9380 (void) printf("%s ioctl %s\n", tbuf,
9381 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
9382 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
9383 (void) printf(" input:\n");
9384 dump_nvlist(fnvlist_lookup_nvlist(rec,
9385 ZPOOL_HIST_INPUT_NVL), 8);
9386 }
9387 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
9388 (void) printf(" output:\n");
9389 dump_nvlist(fnvlist_lookup_nvlist(rec,
9390 ZPOOL_HIST_OUTPUT_NVL), 8);
9391 }
9392 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
9393 (void) printf(" output nvlist omitted; "
9394 "original size: %lldKB\n",
9395 (longlong_t)fnvlist_lookup_int64(rec,
9396 ZPOOL_HIST_OUTPUT_SIZE) / 1024);
9397 }
9398 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
9399 (void) printf(" errno: %lld\n",
9400 (longlong_t)fnvlist_lookup_int64(rec,
9401 ZPOOL_HIST_ERRNO));
9402 }
9403 } else {
9404 if (!cb->internal)
9405 continue;
9406 (void) printf("%s unrecognized record:\n", tbuf);
9407 dump_nvlist(rec, 4);
9408 }
9409
9410 if (!cb->longfmt) {
9411 (void) printf("\n");
9412 continue;
9413 }
9414 (void) printf(" [");
9415 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
9416 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
9417 struct passwd *pwd = getpwuid(who);
9418 (void) printf("user %d ", (int)who);
9419 if (pwd != NULL)
9420 (void) printf("(%s) ", pwd->pw_name);
9421 }
9422 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
9423 (void) printf("on %s",
9424 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
9425 }
9426 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
9427 (void) printf(":%s",
9428 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
9429 }
9430
9431 (void) printf("]");
9432 (void) printf("\n");
9433 }
9434 }
9435
9436 /*
9437 * Print out the command history for a specific pool.
9438 */
9439 static int
9440 get_history_one(zpool_handle_t *zhp, void *data)
9441 {
9442 nvlist_t *nvhis;
9443 int ret;
9444 hist_cbdata_t *cb = (hist_cbdata_t *)data;
9445 uint64_t off = 0;
9446 boolean_t eof = B_FALSE;
9447
9448 cb->first = B_FALSE;
9449
9450 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
9451
9452 while (!eof) {
9453 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
9454 return (ret);
9455
9456 print_history_records(nvhis, cb);
9457 nvlist_free(nvhis);
9458 }
9459 (void) printf("\n");
9460
9461 return (ret);
9462 }
9463
9464 /*
9465 * zpool history <pool>
9466 *
9467 * Displays the history of commands that modified pools.
9468 */
9469 int
9470 zpool_do_history(int argc, char **argv)
9471 {
9472 hist_cbdata_t cbdata = { 0 };
9473 int ret;
9474 int c;
9475
9476 cbdata.first = B_TRUE;
9477 /* check options */
9478 while ((c = getopt(argc, argv, "li")) != -1) {
9479 switch (c) {
9480 case 'l':
9481 cbdata.longfmt = B_TRUE;
9482 break;
9483 case 'i':
9484 cbdata.internal = B_TRUE;
9485 break;
9486 case '?':
9487 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9488 optopt);
9489 usage(B_FALSE);
9490 }
9491 }
9492 argc -= optind;
9493 argv += optind;
9494
9495 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL,
9496 B_FALSE, get_history_one, &cbdata);
9497
9498 if (argc == 0 && cbdata.first == B_TRUE) {
9499 (void) fprintf(stderr, gettext("no pools available\n"));
9500 return (0);
9501 }
9502
9503 return (ret);
9504 }
9505
9506 typedef struct ev_opts {
9507 int verbose;
9508 int scripted;
9509 int follow;
9510 int clear;
9511 char poolname[ZFS_MAX_DATASET_NAME_LEN];
9512 } ev_opts_t;
9513
9514 static void
9515 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
9516 {
9517 char ctime_str[26], str[32], *ptr;
9518 int64_t *tv;
9519 uint_t n;
9520
9521 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
9522 memset(str, ' ', 32);
9523 (void) ctime_r((const time_t *)&tv[0], ctime_str);
9524 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
9525 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
9526 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
9527 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
9528 if (opts->scripted)
9529 (void) printf(gettext("%s\t"), str);
9530 else
9531 (void) printf(gettext("%s "), str);
9532
9533 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
9534 (void) printf(gettext("%s\n"), ptr);
9535 }
9536
9537 static void
9538 zpool_do_events_nvprint(nvlist_t *nvl, int depth)
9539 {
9540 nvpair_t *nvp;
9541
9542 for (nvp = nvlist_next_nvpair(nvl, NULL);
9543 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
9544
9545 data_type_t type = nvpair_type(nvp);
9546 const char *name = nvpair_name(nvp);
9547
9548 boolean_t b;
9549 uint8_t i8;
9550 uint16_t i16;
9551 uint32_t i32;
9552 uint64_t i64;
9553 char *str;
9554 nvlist_t *cnv;
9555
9556 printf(gettext("%*s%s = "), depth, "", name);
9557
9558 switch (type) {
9559 case DATA_TYPE_BOOLEAN:
9560 printf(gettext("%s"), "1");
9561 break;
9562
9563 case DATA_TYPE_BOOLEAN_VALUE:
9564 (void) nvpair_value_boolean_value(nvp, &b);
9565 printf(gettext("%s"), b ? "1" : "");
9566 break;
9567
9568 case DATA_TYPE_BYTE:
9569 (void) nvpair_value_byte(nvp, &i8);
9570 printf(gettext("0x%x"), i8);
9571 break;
9572
9573 case DATA_TYPE_INT8:
9574 (void) nvpair_value_int8(nvp, (void *)&i8);
9575 printf(gettext("0x%x"), i8);
9576 break;
9577
9578 case DATA_TYPE_UINT8:
9579 (void) nvpair_value_uint8(nvp, &i8);
9580 printf(gettext("0x%x"), i8);
9581 break;
9582
9583 case DATA_TYPE_INT16:
9584 (void) nvpair_value_int16(nvp, (void *)&i16);
9585 printf(gettext("0x%x"), i16);
9586 break;
9587
9588 case DATA_TYPE_UINT16:
9589 (void) nvpair_value_uint16(nvp, &i16);
9590 printf(gettext("0x%x"), i16);
9591 break;
9592
9593 case DATA_TYPE_INT32:
9594 (void) nvpair_value_int32(nvp, (void *)&i32);
9595 printf(gettext("0x%x"), i32);
9596 break;
9597
9598 case DATA_TYPE_UINT32:
9599 (void) nvpair_value_uint32(nvp, &i32);
9600 printf(gettext("0x%x"), i32);
9601 break;
9602
9603 case DATA_TYPE_INT64:
9604 (void) nvpair_value_int64(nvp, (void *)&i64);
9605 printf(gettext("0x%llx"), (u_longlong_t)i64);
9606 break;
9607
9608 case DATA_TYPE_UINT64:
9609 (void) nvpair_value_uint64(nvp, &i64);
9610 /*
9611 * translate vdev state values to readable
9612 * strings to aide zpool events consumers
9613 */
9614 if (strcmp(name,
9615 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
9616 strcmp(name,
9617 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
9618 printf(gettext("\"%s\" (0x%llx)"),
9619 zpool_state_to_name(i64, VDEV_AUX_NONE),
9620 (u_longlong_t)i64);
9621 } else {
9622 printf(gettext("0x%llx"), (u_longlong_t)i64);
9623 }
9624 break;
9625
9626 case DATA_TYPE_HRTIME:
9627 (void) nvpair_value_hrtime(nvp, (void *)&i64);
9628 printf(gettext("0x%llx"), (u_longlong_t)i64);
9629 break;
9630
9631 case DATA_TYPE_STRING:
9632 (void) nvpair_value_string(nvp, &str);
9633 printf(gettext("\"%s\""), str ? str : "<NULL>");
9634 break;
9635
9636 case DATA_TYPE_NVLIST:
9637 printf(gettext("(embedded nvlist)\n"));
9638 (void) nvpair_value_nvlist(nvp, &cnv);
9639 zpool_do_events_nvprint(cnv, depth + 8);
9640 printf(gettext("%*s(end %s)"), depth, "", name);
9641 break;
9642
9643 case DATA_TYPE_NVLIST_ARRAY: {
9644 nvlist_t **val;
9645 uint_t i, nelem;
9646
9647 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
9648 printf(gettext("(%d embedded nvlists)\n"), nelem);
9649 for (i = 0; i < nelem; i++) {
9650 printf(gettext("%*s%s[%d] = %s\n"),
9651 depth, "", name, i, "(embedded nvlist)");
9652 zpool_do_events_nvprint(val[i], depth + 8);
9653 printf(gettext("%*s(end %s[%i])\n"),
9654 depth, "", name, i);
9655 }
9656 printf(gettext("%*s(end %s)\n"), depth, "", name);
9657 }
9658 break;
9659
9660 case DATA_TYPE_INT8_ARRAY: {
9661 int8_t *val;
9662 uint_t i, nelem;
9663
9664 (void) nvpair_value_int8_array(nvp, &val, &nelem);
9665 for (i = 0; i < nelem; i++)
9666 printf(gettext("0x%x "), val[i]);
9667
9668 break;
9669 }
9670
9671 case DATA_TYPE_UINT8_ARRAY: {
9672 uint8_t *val;
9673 uint_t i, nelem;
9674
9675 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
9676 for (i = 0; i < nelem; i++)
9677 printf(gettext("0x%x "), val[i]);
9678
9679 break;
9680 }
9681
9682 case DATA_TYPE_INT16_ARRAY: {
9683 int16_t *val;
9684 uint_t i, nelem;
9685
9686 (void) nvpair_value_int16_array(nvp, &val, &nelem);
9687 for (i = 0; i < nelem; i++)
9688 printf(gettext("0x%x "), val[i]);
9689
9690 break;
9691 }
9692
9693 case DATA_TYPE_UINT16_ARRAY: {
9694 uint16_t *val;
9695 uint_t i, nelem;
9696
9697 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
9698 for (i = 0; i < nelem; i++)
9699 printf(gettext("0x%x "), val[i]);
9700
9701 break;
9702 }
9703
9704 case DATA_TYPE_INT32_ARRAY: {
9705 int32_t *val;
9706 uint_t i, nelem;
9707
9708 (void) nvpair_value_int32_array(nvp, &val, &nelem);
9709 for (i = 0; i < nelem; i++)
9710 printf(gettext("0x%x "), val[i]);
9711
9712 break;
9713 }
9714
9715 case DATA_TYPE_UINT32_ARRAY: {
9716 uint32_t *val;
9717 uint_t i, nelem;
9718
9719 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
9720 for (i = 0; i < nelem; i++)
9721 printf(gettext("0x%x "), val[i]);
9722
9723 break;
9724 }
9725
9726 case DATA_TYPE_INT64_ARRAY: {
9727 int64_t *val;
9728 uint_t i, nelem;
9729
9730 (void) nvpair_value_int64_array(nvp, &val, &nelem);
9731 for (i = 0; i < nelem; i++)
9732 printf(gettext("0x%llx "),
9733 (u_longlong_t)val[i]);
9734
9735 break;
9736 }
9737
9738 case DATA_TYPE_UINT64_ARRAY: {
9739 uint64_t *val;
9740 uint_t i, nelem;
9741
9742 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
9743 for (i = 0; i < nelem; i++)
9744 printf(gettext("0x%llx "),
9745 (u_longlong_t)val[i]);
9746
9747 break;
9748 }
9749
9750 case DATA_TYPE_STRING_ARRAY: {
9751 char **str;
9752 uint_t i, nelem;
9753
9754 (void) nvpair_value_string_array(nvp, &str, &nelem);
9755 for (i = 0; i < nelem; i++)
9756 printf(gettext("\"%s\" "),
9757 str[i] ? str[i] : "<NULL>");
9758
9759 break;
9760 }
9761
9762 case DATA_TYPE_BOOLEAN_ARRAY:
9763 case DATA_TYPE_BYTE_ARRAY:
9764 case DATA_TYPE_DOUBLE:
9765 case DATA_TYPE_DONTCARE:
9766 case DATA_TYPE_UNKNOWN:
9767 printf(gettext("<unknown>"));
9768 break;
9769 }
9770
9771 printf(gettext("\n"));
9772 }
9773 }
9774
9775 static int
9776 zpool_do_events_next(ev_opts_t *opts)
9777 {
9778 nvlist_t *nvl;
9779 int zevent_fd, ret, dropped;
9780 char *pool;
9781
9782 zevent_fd = open(ZFS_DEV, O_RDWR);
9783 VERIFY(zevent_fd >= 0);
9784
9785 if (!opts->scripted)
9786 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
9787
9788 while (1) {
9789 ret = zpool_events_next(g_zfs, &nvl, &dropped,
9790 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
9791 if (ret || nvl == NULL)
9792 break;
9793
9794 if (dropped > 0)
9795 (void) printf(gettext("dropped %d events\n"), dropped);
9796
9797 if (strlen(opts->poolname) > 0 &&
9798 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
9799 strcmp(opts->poolname, pool) != 0)
9800 continue;
9801
9802 zpool_do_events_short(nvl, opts);
9803
9804 if (opts->verbose) {
9805 zpool_do_events_nvprint(nvl, 8);
9806 printf(gettext("\n"));
9807 }
9808 (void) fflush(stdout);
9809
9810 nvlist_free(nvl);
9811 }
9812
9813 VERIFY(0 == close(zevent_fd));
9814
9815 return (ret);
9816 }
9817
9818 static int
9819 zpool_do_events_clear(void)
9820 {
9821 int count, ret;
9822
9823 ret = zpool_events_clear(g_zfs, &count);
9824 if (!ret)
9825 (void) printf(gettext("cleared %d events\n"), count);
9826
9827 return (ret);
9828 }
9829
9830 /*
9831 * zpool events [-vHf [pool] | -c]
9832 *
9833 * Displays events logs by ZFS.
9834 */
9835 int
9836 zpool_do_events(int argc, char **argv)
9837 {
9838 ev_opts_t opts = { 0 };
9839 int ret;
9840 int c;
9841
9842 /* check options */
9843 while ((c = getopt(argc, argv, "vHfc")) != -1) {
9844 switch (c) {
9845 case 'v':
9846 opts.verbose = 1;
9847 break;
9848 case 'H':
9849 opts.scripted = 1;
9850 break;
9851 case 'f':
9852 opts.follow = 1;
9853 break;
9854 case 'c':
9855 opts.clear = 1;
9856 break;
9857 case '?':
9858 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
9859 optopt);
9860 usage(B_FALSE);
9861 }
9862 }
9863 argc -= optind;
9864 argv += optind;
9865
9866 if (argc > 1) {
9867 (void) fprintf(stderr, gettext("too many arguments\n"));
9868 usage(B_FALSE);
9869 } else if (argc == 1) {
9870 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
9871 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
9872 (void) fprintf(stderr,
9873 gettext("invalid pool name '%s'\n"), opts.poolname);
9874 usage(B_FALSE);
9875 }
9876 }
9877
9878 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
9879 opts.clear) {
9880 (void) fprintf(stderr,
9881 gettext("invalid options combined with -c\n"));
9882 usage(B_FALSE);
9883 }
9884
9885 if (opts.clear)
9886 ret = zpool_do_events_clear();
9887 else
9888 ret = zpool_do_events_next(&opts);
9889
9890 return (ret);
9891 }
9892
9893 static int
9894 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
9895 {
9896 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9897 char value[ZFS_MAXPROPLEN];
9898 zprop_source_t srctype;
9899
9900 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL;
9901 pl = pl->pl_next) {
9902 char *prop_name;
9903 /*
9904 * If the first property is pool name, it is a special
9905 * placeholder that we can skip. This will also skip
9906 * over the name property when 'all' is specified.
9907 */
9908 if (pl->pl_prop == ZPOOL_PROP_NAME &&
9909 pl == cbp->cb_proplist)
9910 continue;
9911
9912 if (pl->pl_prop == ZPROP_INVAL) {
9913 prop_name = pl->pl_user_prop;
9914 } else {
9915 prop_name = (char *)vdev_prop_to_name(pl->pl_prop);
9916 }
9917 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
9918 prop_name, value, sizeof (value), &srctype,
9919 cbp->cb_literal) == 0) {
9920 zprop_print_one_property(vdevname, cbp, prop_name,
9921 value, srctype, NULL, NULL);
9922 }
9923 }
9924
9925 return (0);
9926 }
9927
9928 static int
9929 get_callback_vdev_width_cb(void *zhp_data, nvlist_t *nv, void *data)
9930 {
9931 zpool_handle_t *zhp = zhp_data;
9932 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9933 char *vdevname = zpool_vdev_name(g_zfs, zhp, nv,
9934 cbp->cb_vdevs.cb_name_flags);
9935 int ret;
9936
9937 /* Adjust the column widths for the vdev properties */
9938 ret = vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
9939
9940 return (ret);
9941 }
9942
9943 static int
9944 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
9945 {
9946 zpool_handle_t *zhp = zhp_data;
9947 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9948 char *vdevname = zpool_vdev_name(g_zfs, zhp, nv,
9949 cbp->cb_vdevs.cb_name_flags);
9950 int ret;
9951
9952 /* Display the properties */
9953 ret = get_callback_vdev(zhp, vdevname, data);
9954
9955 return (ret);
9956 }
9957
9958 static int
9959 get_callback(zpool_handle_t *zhp, void *data)
9960 {
9961 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
9962 char value[MAXNAMELEN];
9963 zprop_source_t srctype;
9964 zprop_list_t *pl;
9965 int vid;
9966
9967 if (cbp->cb_type == ZFS_TYPE_VDEV) {
9968 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
9969 for_each_vdev(zhp, get_callback_vdev_width_cb, data);
9970 for_each_vdev(zhp, get_callback_vdev_cb, data);
9971 } else {
9972 /* Adjust column widths for vdev properties */
9973 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
9974 vid++) {
9975 vdev_expand_proplist(zhp,
9976 cbp->cb_vdevs.cb_names[vid],
9977 &cbp->cb_proplist);
9978 }
9979 /* Display the properties */
9980 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
9981 vid++) {
9982 get_callback_vdev(zhp,
9983 cbp->cb_vdevs.cb_names[vid], data);
9984 }
9985 }
9986 } else {
9987 assert(cbp->cb_type == ZFS_TYPE_POOL);
9988 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
9989 /*
9990 * Skip the special fake placeholder. This will also
9991 * skip over the name property when 'all' is specified.
9992 */
9993 if (pl->pl_prop == ZPOOL_PROP_NAME &&
9994 pl == cbp->cb_proplist)
9995 continue;
9996
9997 if (pl->pl_prop == ZPROP_INVAL &&
9998 (zpool_prop_feature(pl->pl_user_prop) ||
9999 zpool_prop_unsupported(pl->pl_user_prop))) {
10000 srctype = ZPROP_SRC_LOCAL;
10001
10002 if (zpool_prop_get_feature(zhp,
10003 pl->pl_user_prop, value,
10004 sizeof (value)) == 0) {
10005 zprop_print_one_property(
10006 zpool_get_name(zhp), cbp,
10007 pl->pl_user_prop, value, srctype,
10008 NULL, NULL);
10009 }
10010 } else {
10011 if (zpool_get_prop(zhp, pl->pl_prop, value,
10012 sizeof (value), &srctype,
10013 cbp->cb_literal) != 0)
10014 continue;
10015
10016 zprop_print_one_property(zpool_get_name(zhp),
10017 cbp, zpool_prop_to_name(pl->pl_prop),
10018 value, srctype, NULL, NULL);
10019 }
10020 }
10021 }
10022
10023 return (0);
10024 }
10025
10026 /*
10027 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
10028 *
10029 * -H Scripted mode. Don't display headers, and separate properties
10030 * by a single tab.
10031 * -o List of columns to display. Defaults to
10032 * "name,property,value,source".
10033 * -p Display values in parsable (exact) format.
10034 *
10035 * Get properties of pools in the system. Output space statistics
10036 * for each one as well as other attributes.
10037 */
10038 int
10039 zpool_do_get(int argc, char **argv)
10040 {
10041 zprop_get_cbdata_t cb = { 0 };
10042 zprop_list_t fake_name = { 0 };
10043 int ret;
10044 int c, i;
10045 char *propstr = NULL;
10046
10047 cb.cb_first = B_TRUE;
10048
10049 /*
10050 * Set up default columns and sources.
10051 */
10052 cb.cb_sources = ZPROP_SRC_ALL;
10053 cb.cb_columns[0] = GET_COL_NAME;
10054 cb.cb_columns[1] = GET_COL_PROPERTY;
10055 cb.cb_columns[2] = GET_COL_VALUE;
10056 cb.cb_columns[3] = GET_COL_SOURCE;
10057 cb.cb_type = ZFS_TYPE_POOL;
10058 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10059 current_prop_type = cb.cb_type;
10060
10061 /* check options */
10062 while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
10063 switch (c) {
10064 case 'p':
10065 cb.cb_literal = B_TRUE;
10066 break;
10067 case 'H':
10068 cb.cb_scripted = B_TRUE;
10069 break;
10070 case 'o':
10071 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns));
10072 i = 0;
10073
10074 for (char *tok; (tok = strsep(&optarg, ",")); ) {
10075 static const char *const col_opts[] =
10076 { "name", "property", "value", "source",
10077 "all" };
10078 static const zfs_get_column_t col_cols[] =
10079 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE,
10080 GET_COL_SOURCE };
10081
10082 if (i == ZFS_GET_NCOLS - 1) {
10083 (void) fprintf(stderr, gettext("too "
10084 "many fields given to -o "
10085 "option\n"));
10086 usage(B_FALSE);
10087 }
10088
10089 for (c = 0; c < ARRAY_SIZE(col_opts); ++c)
10090 if (strcmp(tok, col_opts[c]) == 0)
10091 goto found;
10092
10093 (void) fprintf(stderr,
10094 gettext("invalid column name '%s'\n"), tok);
10095 usage(B_FALSE);
10096
10097 found:
10098 if (c >= 4) {
10099 if (i > 0) {
10100 (void) fprintf(stderr,
10101 gettext("\"all\" conflicts "
10102 "with specific fields "
10103 "given to -o option\n"));
10104 usage(B_FALSE);
10105 }
10106
10107 memcpy(cb.cb_columns, col_cols,
10108 sizeof (col_cols));
10109 i = ZFS_GET_NCOLS - 1;
10110 } else
10111 cb.cb_columns[i++] = col_cols[c];
10112 }
10113 break;
10114 case '?':
10115 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10116 optopt);
10117 usage(B_FALSE);
10118 }
10119 }
10120
10121 argc -= optind;
10122 argv += optind;
10123
10124 if (argc < 1) {
10125 (void) fprintf(stderr, gettext("missing property "
10126 "argument\n"));
10127 usage(B_FALSE);
10128 }
10129
10130 /* Properties list is needed later by zprop_get_list() */
10131 propstr = argv[0];
10132
10133 argc--;
10134 argv++;
10135
10136 if (argc == 0) {
10137 /* No args, so just print the defaults. */
10138 } else if (are_all_pools(argc, argv)) {
10139 /* All the args are pool names */
10140 } else if (are_all_pools(1, argv)) {
10141 /* The first arg is a pool name */
10142 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
10143 are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
10144 &cb.cb_vdevs)) {
10145 /* ... and the rest are vdev names */
10146 cb.cb_vdevs.cb_names = argv + 1;
10147 cb.cb_vdevs.cb_names_count = argc - 1;
10148 cb.cb_type = ZFS_TYPE_VDEV;
10149 argc = 1; /* One pool to process */
10150 } else {
10151 fprintf(stderr, gettext("Expected a list of vdevs in"
10152 " \"%s\", but got:\n"), argv[0]);
10153 error_list_unresolved_vdevs(argc - 1, argv + 1,
10154 argv[0], &cb.cb_vdevs);
10155 fprintf(stderr, "\n");
10156 usage(B_FALSE);
10157 return (1);
10158 }
10159 } else {
10160 /*
10161 * The first arg isn't a pool name,
10162 */
10163 fprintf(stderr, gettext("missing pool name.\n"));
10164 fprintf(stderr, "\n");
10165 usage(B_FALSE);
10166 return (1);
10167 }
10168
10169 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist,
10170 cb.cb_type) != 0) {
10171 /* Use correct list of valid properties (pool or vdev) */
10172 current_prop_type = cb.cb_type;
10173 usage(B_FALSE);
10174 }
10175
10176 if (cb.cb_proplist != NULL) {
10177 fake_name.pl_prop = ZPOOL_PROP_NAME;
10178 fake_name.pl_width = strlen(gettext("NAME"));
10179 fake_name.pl_next = cb.cb_proplist;
10180 cb.cb_proplist = &fake_name;
10181 }
10182
10183 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type,
10184 cb.cb_literal, get_callback, &cb);
10185
10186 if (cb.cb_proplist == &fake_name)
10187 zprop_free_list(fake_name.pl_next);
10188 else
10189 zprop_free_list(cb.cb_proplist);
10190
10191 return (ret);
10192 }
10193
10194 typedef struct set_cbdata {
10195 char *cb_propname;
10196 char *cb_value;
10197 zfs_type_t cb_type;
10198 vdev_cbdata_t cb_vdevs;
10199 boolean_t cb_any_successful;
10200 } set_cbdata_t;
10201
10202 static int
10203 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb)
10204 {
10205 int error;
10206
10207 /* Check if we have out-of-bounds features */
10208 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
10209 boolean_t features[SPA_FEATURES];
10210 if (zpool_do_load_compat(cb->cb_value, features) !=
10211 ZPOOL_COMPATIBILITY_OK)
10212 return (-1);
10213
10214 nvlist_t *enabled = zpool_get_features(zhp);
10215 spa_feature_t i;
10216 for (i = 0; i < SPA_FEATURES; i++) {
10217 const char *fguid = spa_feature_table[i].fi_guid;
10218 if (nvlist_exists(enabled, fguid) && !features[i])
10219 break;
10220 }
10221 if (i < SPA_FEATURES)
10222 (void) fprintf(stderr, gettext("Warning: one or "
10223 "more features already enabled on pool '%s'\n"
10224 "are not present in this compatibility set.\n"),
10225 zpool_get_name(zhp));
10226 }
10227
10228 /* if we're setting a feature, check it's in compatibility set */
10229 if (zpool_prop_feature(cb->cb_propname) &&
10230 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
10231 char *fname = strchr(cb->cb_propname, '@') + 1;
10232 spa_feature_t f;
10233
10234 if (zfeature_lookup_name(fname, &f) == 0) {
10235 char compat[ZFS_MAXPROPLEN];
10236 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
10237 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
10238 compat[0] = '\0';
10239
10240 boolean_t features[SPA_FEATURES];
10241 if (zpool_do_load_compat(compat, features) !=
10242 ZPOOL_COMPATIBILITY_OK) {
10243 (void) fprintf(stderr, gettext("Error: "
10244 "cannot enable feature '%s' on pool '%s'\n"
10245 "because the pool's 'compatibility' "
10246 "property cannot be parsed.\n"),
10247 fname, zpool_get_name(zhp));
10248 return (-1);
10249 }
10250
10251 if (!features[f]) {
10252 (void) fprintf(stderr, gettext("Error: "
10253 "cannot enable feature '%s' on pool '%s'\n"
10254 "as it is not specified in this pool's "
10255 "current compatibility set.\n"
10256 "Consider setting 'compatibility' to a "
10257 "less restrictive set, or to 'off'.\n"),
10258 fname, zpool_get_name(zhp));
10259 return (-1);
10260 }
10261 }
10262 }
10263
10264 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
10265
10266 return (error);
10267 }
10268
10269 static int
10270 set_callback(zpool_handle_t *zhp, void *data)
10271 {
10272 int error;
10273 set_cbdata_t *cb = (set_cbdata_t *)data;
10274
10275 if (cb->cb_type == ZFS_TYPE_VDEV) {
10276 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names,
10277 cb->cb_propname, cb->cb_value);
10278 } else {
10279 assert(cb->cb_type == ZFS_TYPE_POOL);
10280 error = set_pool_callback(zhp, cb);
10281 }
10282
10283 cb->cb_any_successful = !error;
10284 return (error);
10285 }
10286
10287 int
10288 zpool_do_set(int argc, char **argv)
10289 {
10290 set_cbdata_t cb = { 0 };
10291 int error;
10292
10293 current_prop_type = ZFS_TYPE_POOL;
10294 if (argc > 1 && argv[1][0] == '-') {
10295 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10296 argv[1][1]);
10297 usage(B_FALSE);
10298 }
10299
10300 if (argc < 2) {
10301 (void) fprintf(stderr, gettext("missing property=value "
10302 "argument\n"));
10303 usage(B_FALSE);
10304 }
10305
10306 if (argc < 3) {
10307 (void) fprintf(stderr, gettext("missing pool name\n"));
10308 usage(B_FALSE);
10309 }
10310
10311 if (argc > 4) {
10312 (void) fprintf(stderr, gettext("too many pool names\n"));
10313 usage(B_FALSE);
10314 }
10315
10316 cb.cb_propname = argv[1];
10317 cb.cb_type = ZFS_TYPE_POOL;
10318 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID;
10319 cb.cb_value = strchr(cb.cb_propname, '=');
10320 if (cb.cb_value == NULL) {
10321 (void) fprintf(stderr, gettext("missing value in "
10322 "property=value argument\n"));
10323 usage(B_FALSE);
10324 }
10325
10326 *(cb.cb_value) = '\0';
10327 cb.cb_value++;
10328 argc -= 2;
10329 argv += 2;
10330
10331 /* argv[0] is pool name */
10332 if (!is_pool(argv[0])) {
10333 (void) fprintf(stderr,
10334 gettext("cannot open '%s': is not a pool\n"), argv[0]);
10335 return (EINVAL);
10336 }
10337
10338 /* argv[1], when supplied, is vdev name */
10339 if (argc == 2) {
10340 if (!are_vdevs_in_pool(1, argv + 1, argv[0], &cb.cb_vdevs)) {
10341 (void) fprintf(stderr, gettext(
10342 "cannot find '%s' in '%s': device not in pool\n"),
10343 argv[1], argv[0]);
10344 return (EINVAL);
10345 }
10346 cb.cb_vdevs.cb_names = argv + 1;
10347 cb.cb_vdevs.cb_names_count = 1;
10348 cb.cb_type = ZFS_TYPE_VDEV;
10349 }
10350
10351 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
10352 B_FALSE, set_callback, &cb);
10353
10354 return (error);
10355 }
10356
10357 /* Add up the total number of bytes left to initialize/trim across all vdevs */
10358 static uint64_t
10359 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
10360 {
10361 uint64_t bytes_remaining;
10362 nvlist_t **child;
10363 uint_t c, children;
10364 vdev_stat_t *vs;
10365
10366 assert(activity == ZPOOL_WAIT_INITIALIZE ||
10367 activity == ZPOOL_WAIT_TRIM);
10368
10369 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
10370 (uint64_t **)&vs, &c) == 0);
10371
10372 if (activity == ZPOOL_WAIT_INITIALIZE &&
10373 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
10374 bytes_remaining = vs->vs_initialize_bytes_est -
10375 vs->vs_initialize_bytes_done;
10376 else if (activity == ZPOOL_WAIT_TRIM &&
10377 vs->vs_trim_state == VDEV_TRIM_ACTIVE)
10378 bytes_remaining = vs->vs_trim_bytes_est -
10379 vs->vs_trim_bytes_done;
10380 else
10381 bytes_remaining = 0;
10382
10383 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10384 &child, &children) != 0)
10385 children = 0;
10386
10387 for (c = 0; c < children; c++)
10388 bytes_remaining += vdev_activity_remaining(child[c], activity);
10389
10390 return (bytes_remaining);
10391 }
10392
10393 /* Add up the total number of bytes left to rebuild across top-level vdevs */
10394 static uint64_t
10395 vdev_activity_top_remaining(nvlist_t *nv)
10396 {
10397 uint64_t bytes_remaining = 0;
10398 nvlist_t **child;
10399 uint_t children;
10400 int error;
10401
10402 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10403 &child, &children) != 0)
10404 children = 0;
10405
10406 for (uint_t c = 0; c < children; c++) {
10407 vdev_rebuild_stat_t *vrs;
10408 uint_t i;
10409
10410 error = nvlist_lookup_uint64_array(child[c],
10411 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
10412 if (error == 0) {
10413 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
10414 bytes_remaining += (vrs->vrs_bytes_est -
10415 vrs->vrs_bytes_rebuilt);
10416 }
10417 }
10418 }
10419
10420 return (bytes_remaining);
10421 }
10422
10423 /* Whether any vdevs are 'spare' or 'replacing' vdevs */
10424 static boolean_t
10425 vdev_any_spare_replacing(nvlist_t *nv)
10426 {
10427 nvlist_t **child;
10428 uint_t c, children;
10429 char *vdev_type;
10430
10431 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
10432
10433 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
10434 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
10435 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
10436 return (B_TRUE);
10437 }
10438
10439 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
10440 &child, &children) != 0)
10441 children = 0;
10442
10443 for (c = 0; c < children; c++) {
10444 if (vdev_any_spare_replacing(child[c]))
10445 return (B_TRUE);
10446 }
10447
10448 return (B_FALSE);
10449 }
10450
10451 typedef struct wait_data {
10452 char *wd_poolname;
10453 boolean_t wd_scripted;
10454 boolean_t wd_exact;
10455 boolean_t wd_headers_once;
10456 boolean_t wd_should_exit;
10457 /* Which activities to wait for */
10458 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
10459 float wd_interval;
10460 pthread_cond_t wd_cv;
10461 pthread_mutex_t wd_mutex;
10462 } wait_data_t;
10463
10464 /*
10465 * Print to stdout a single line, containing one column for each activity that
10466 * we are waiting for specifying how many bytes of work are left for that
10467 * activity.
10468 */
10469 static void
10470 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
10471 {
10472 nvlist_t *config, *nvroot;
10473 uint_t c;
10474 int i;
10475 pool_checkpoint_stat_t *pcs = NULL;
10476 pool_scan_stat_t *pss = NULL;
10477 pool_removal_stat_t *prs = NULL;
10478 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
10479 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM"};
10480 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
10481
10482 /* Calculate the width of each column */
10483 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10484 /*
10485 * Make sure we have enough space in the col for pretty-printed
10486 * numbers and for the column header, and then leave a couple
10487 * spaces between cols for readability.
10488 */
10489 col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
10490 }
10491
10492 /* Print header if appropriate */
10493 int term_height = terminal_height();
10494 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
10495 row % (term_height-1) == 0);
10496 if (!wd->wd_scripted && (row == 0 || reprint_header)) {
10497 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10498 if (wd->wd_enabled[i])
10499 (void) printf("%*s", col_widths[i], headers[i]);
10500 }
10501 (void) fputc('\n', stdout);
10502 }
10503
10504 /* Bytes of work remaining in each activity */
10505 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
10506
10507 bytes_rem[ZPOOL_WAIT_FREE] =
10508 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
10509
10510 config = zpool_get_config(zhp, NULL);
10511 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
10512
10513 (void) nvlist_lookup_uint64_array(nvroot,
10514 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
10515 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
10516 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
10517
10518 (void) nvlist_lookup_uint64_array(nvroot,
10519 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
10520 if (prs != NULL && prs->prs_state == DSS_SCANNING)
10521 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
10522 prs->prs_copied;
10523
10524 (void) nvlist_lookup_uint64_array(nvroot,
10525 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
10526 if (pss != NULL && pss->pss_state == DSS_SCANNING &&
10527 pss->pss_pass_scrub_pause == 0) {
10528 int64_t rem = pss->pss_to_examine - pss->pss_issued;
10529 if (pss->pss_func == POOL_SCAN_SCRUB)
10530 bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
10531 else
10532 bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
10533 } else if (check_rebuilding(nvroot, NULL)) {
10534 bytes_rem[ZPOOL_WAIT_RESILVER] =
10535 vdev_activity_top_remaining(nvroot);
10536 }
10537
10538 bytes_rem[ZPOOL_WAIT_INITIALIZE] =
10539 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
10540 bytes_rem[ZPOOL_WAIT_TRIM] =
10541 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
10542
10543 /*
10544 * A replace finishes after resilvering finishes, so the amount of work
10545 * left for a replace is the same as for resilvering.
10546 *
10547 * It isn't quite correct to say that if we have any 'spare' or
10548 * 'replacing' vdevs and a resilver is happening, then a replace is in
10549 * progress, like we do here. When a hot spare is used, the faulted vdev
10550 * is not removed after the hot spare is resilvered, so parent 'spare'
10551 * vdev is not removed either. So we could have a 'spare' vdev, but be
10552 * resilvering for a different reason. However, we use it as a heuristic
10553 * because we don't have access to the DTLs, which could tell us whether
10554 * or not we have really finished resilvering a hot spare.
10555 */
10556 if (vdev_any_spare_replacing(nvroot))
10557 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
10558
10559 if (timestamp_fmt != NODATE)
10560 print_timestamp(timestamp_fmt);
10561
10562 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10563 char buf[64];
10564 if (!wd->wd_enabled[i])
10565 continue;
10566
10567 if (wd->wd_exact)
10568 (void) snprintf(buf, sizeof (buf), "%" PRIi64,
10569 bytes_rem[i]);
10570 else
10571 zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
10572
10573 if (wd->wd_scripted)
10574 (void) printf(i == 0 ? "%s" : "\t%s", buf);
10575 else
10576 (void) printf(" %*s", col_widths[i] - 1, buf);
10577 }
10578 (void) printf("\n");
10579 (void) fflush(stdout);
10580 }
10581
10582 static void *
10583 wait_status_thread(void *arg)
10584 {
10585 wait_data_t *wd = (wait_data_t *)arg;
10586 zpool_handle_t *zhp;
10587
10588 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
10589 return (void *)(1);
10590
10591 for (int row = 0; ; row++) {
10592 boolean_t missing;
10593 struct timespec timeout;
10594 int ret = 0;
10595 (void) clock_gettime(CLOCK_REALTIME, &timeout);
10596
10597 if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
10598 zpool_props_refresh(zhp) != 0) {
10599 zpool_close(zhp);
10600 return (void *)(uintptr_t)(missing ? 0 : 1);
10601 }
10602
10603 print_wait_status_row(wd, zhp, row);
10604
10605 timeout.tv_sec += floor(wd->wd_interval);
10606 long nanos = timeout.tv_nsec +
10607 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
10608 if (nanos >= NANOSEC) {
10609 timeout.tv_sec++;
10610 timeout.tv_nsec = nanos - NANOSEC;
10611 } else {
10612 timeout.tv_nsec = nanos;
10613 }
10614 pthread_mutex_lock(&wd->wd_mutex);
10615 if (!wd->wd_should_exit)
10616 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
10617 &timeout);
10618 pthread_mutex_unlock(&wd->wd_mutex);
10619 if (ret == 0) {
10620 break; /* signaled by main thread */
10621 } else if (ret != ETIMEDOUT) {
10622 (void) fprintf(stderr, gettext("pthread_cond_timedwait "
10623 "failed: %s\n"), strerror(ret));
10624 zpool_close(zhp);
10625 return (void *)(uintptr_t)(1);
10626 }
10627 }
10628
10629 zpool_close(zhp);
10630 return (void *)(0);
10631 }
10632
10633 int
10634 zpool_do_wait(int argc, char **argv)
10635 {
10636 boolean_t verbose = B_FALSE;
10637 int c, i;
10638 unsigned long count;
10639 pthread_t status_thr;
10640 int error = 0;
10641 zpool_handle_t *zhp;
10642
10643 wait_data_t wd;
10644 wd.wd_scripted = B_FALSE;
10645 wd.wd_exact = B_FALSE;
10646 wd.wd_headers_once = B_FALSE;
10647 wd.wd_should_exit = B_FALSE;
10648
10649 pthread_mutex_init(&wd.wd_mutex, NULL);
10650 pthread_cond_init(&wd.wd_cv, NULL);
10651
10652 /* By default, wait for all types of activity. */
10653 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
10654 wd.wd_enabled[i] = B_TRUE;
10655
10656 while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
10657 switch (c) {
10658 case 'H':
10659 wd.wd_scripted = B_TRUE;
10660 break;
10661 case 'n':
10662 wd.wd_headers_once = B_TRUE;
10663 break;
10664 case 'p':
10665 wd.wd_exact = B_TRUE;
10666 break;
10667 case 'T':
10668 get_timestamp_arg(*optarg);
10669 break;
10670 case 't':
10671 /* Reset activities array */
10672 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled));
10673
10674 for (char *tok; (tok = strsep(&optarg, ",")); ) {
10675 static const char *const col_opts[] = {
10676 "discard", "free", "initialize", "replace",
10677 "remove", "resilver", "scrub", "trim" };
10678
10679 for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
10680 if (strcmp(tok, col_opts[i]) == 0) {
10681 wd.wd_enabled[i] = B_TRUE;
10682 goto found;
10683 }
10684
10685 (void) fprintf(stderr,
10686 gettext("invalid activity '%s'\n"), tok);
10687 usage(B_FALSE);
10688 found:;
10689 }
10690 break;
10691 case '?':
10692 (void) fprintf(stderr, gettext("invalid option '%c'\n"),
10693 optopt);
10694 usage(B_FALSE);
10695 }
10696 }
10697
10698 argc -= optind;
10699 argv += optind;
10700
10701 get_interval_count(&argc, argv, &wd.wd_interval, &count);
10702 if (count != 0) {
10703 /* This subcmd only accepts an interval, not a count */
10704 (void) fprintf(stderr, gettext("too many arguments\n"));
10705 usage(B_FALSE);
10706 }
10707
10708 if (wd.wd_interval != 0)
10709 verbose = B_TRUE;
10710
10711 if (argc < 1) {
10712 (void) fprintf(stderr, gettext("missing 'pool' argument\n"));
10713 usage(B_FALSE);
10714 }
10715 if (argc > 1) {
10716 (void) fprintf(stderr, gettext("too many arguments\n"));
10717 usage(B_FALSE);
10718 }
10719
10720 wd.wd_poolname = argv[0];
10721
10722 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
10723 return (1);
10724
10725 if (verbose) {
10726 /*
10727 * We use a separate thread for printing status updates because
10728 * the main thread will call lzc_wait(), which blocks as long
10729 * as an activity is in progress, which can be a long time.
10730 */
10731 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
10732 != 0) {
10733 (void) fprintf(stderr, gettext("failed to create status"
10734 "thread: %s\n"), strerror(errno));
10735 zpool_close(zhp);
10736 return (1);
10737 }
10738 }
10739
10740 /*
10741 * Loop over all activities that we are supposed to wait for until none
10742 * of them are in progress. Note that this means we can end up waiting
10743 * for more activities to complete than just those that were in progress
10744 * when we began waiting; if an activity we are interested in begins
10745 * while we are waiting for another activity, we will wait for both to
10746 * complete before exiting.
10747 */
10748 for (;;) {
10749 boolean_t missing = B_FALSE;
10750 boolean_t any_waited = B_FALSE;
10751
10752 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
10753 boolean_t waited;
10754
10755 if (!wd.wd_enabled[i])
10756 continue;
10757
10758 error = zpool_wait_status(zhp, i, &missing, &waited);
10759 if (error != 0 || missing)
10760 break;
10761
10762 any_waited = (any_waited || waited);
10763 }
10764
10765 if (error != 0 || missing || !any_waited)
10766 break;
10767 }
10768
10769 zpool_close(zhp);
10770
10771 if (verbose) {
10772 uintptr_t status;
10773 pthread_mutex_lock(&wd.wd_mutex);
10774 wd.wd_should_exit = B_TRUE;
10775 pthread_cond_signal(&wd.wd_cv);
10776 pthread_mutex_unlock(&wd.wd_mutex);
10777 (void) pthread_join(status_thr, (void *)&status);
10778 if (status != 0)
10779 error = status;
10780 }
10781
10782 pthread_mutex_destroy(&wd.wd_mutex);
10783 pthread_cond_destroy(&wd.wd_cv);
10784 return (error);
10785 }
10786
10787 static int
10788 find_command_idx(const char *command, int *idx)
10789 {
10790 for (int i = 0; i < NCOMMAND; ++i) {
10791 if (command_table[i].name == NULL)
10792 continue;
10793
10794 if (strcmp(command, command_table[i].name) == 0) {
10795 *idx = i;
10796 return (0);
10797 }
10798 }
10799 return (1);
10800 }
10801
10802 /*
10803 * Display version message
10804 */
10805 static int
10806 zpool_do_version(int argc, char **argv)
10807 {
10808 (void) argc, (void) argv;
10809 return (zfs_version_print() != 0);
10810 }
10811
10812 /*
10813 * Do zpool_load_compat() and print error message on failure
10814 */
10815 static zpool_compat_status_t
10816 zpool_do_load_compat(const char *compat, boolean_t *list)
10817 {
10818 char report[1024];
10819
10820 zpool_compat_status_t ret;
10821
10822 ret = zpool_load_compat(compat, list, report, 1024);
10823 switch (ret) {
10824
10825 case ZPOOL_COMPATIBILITY_OK:
10826 break;
10827
10828 case ZPOOL_COMPATIBILITY_NOFILES:
10829 case ZPOOL_COMPATIBILITY_BADFILE:
10830 case ZPOOL_COMPATIBILITY_BADTOKEN:
10831 (void) fprintf(stderr, "Error: %s\n", report);
10832 break;
10833
10834 case ZPOOL_COMPATIBILITY_WARNTOKEN:
10835 (void) fprintf(stderr, "Warning: %s\n", report);
10836 ret = ZPOOL_COMPATIBILITY_OK;
10837 break;
10838 }
10839 return (ret);
10840 }
10841
10842 int
10843 main(int argc, char **argv)
10844 {
10845 int ret = 0;
10846 int i = 0;
10847 char *cmdname;
10848 char **newargv;
10849
10850 (void) setlocale(LC_ALL, "");
10851 (void) setlocale(LC_NUMERIC, "C");
10852 (void) textdomain(TEXT_DOMAIN);
10853 srand(time(NULL));
10854
10855 opterr = 0;
10856
10857 /*
10858 * Make sure the user has specified some command.
10859 */
10860 if (argc < 2) {
10861 (void) fprintf(stderr, gettext("missing command\n"));
10862 usage(B_FALSE);
10863 }
10864
10865 cmdname = argv[1];
10866
10867 /*
10868 * Special case '-?'
10869 */
10870 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
10871 usage(B_TRUE);
10872
10873 /*
10874 * Special case '-V|--version'
10875 */
10876 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
10877 return (zpool_do_version(argc, argv));
10878
10879 if ((g_zfs = libzfs_init()) == NULL) {
10880 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
10881 return (1);
10882 }
10883
10884 libzfs_print_on_error(g_zfs, B_TRUE);
10885
10886 zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
10887
10888 /*
10889 * Many commands modify input strings for string parsing reasons.
10890 * We create a copy to protect the original argv.
10891 */
10892 newargv = safe_malloc((argc + 1) * sizeof (newargv[0]));
10893 for (i = 0; i < argc; i++)
10894 newargv[i] = strdup(argv[i]);
10895 newargv[argc] = NULL;
10896
10897 /*
10898 * Run the appropriate command.
10899 */
10900 if (find_command_idx(cmdname, &i) == 0) {
10901 current_command = &command_table[i];
10902 ret = command_table[i].func(argc - 1, newargv + 1);
10903 } else if (strchr(cmdname, '=')) {
10904 verify(find_command_idx("set", &i) == 0);
10905 current_command = &command_table[i];
10906 ret = command_table[i].func(argc, newargv);
10907 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
10908 /*
10909 * 'freeze' is a vile debugging abomination, so we treat
10910 * it as such.
10911 */
10912 zfs_cmd_t zc = {"\0"};
10913
10914 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
10915 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
10916 if (ret != 0) {
10917 (void) fprintf(stderr,
10918 gettext("failed to freeze pool: %d\n"), errno);
10919 ret = 1;
10920 }
10921
10922 log_history = 0;
10923 } else {
10924 (void) fprintf(stderr, gettext("unrecognized "
10925 "command '%s'\n"), cmdname);
10926 usage(B_FALSE);
10927 ret = 1;
10928 }
10929
10930 for (i = 0; i < argc; i++)
10931 free(newargv[i]);
10932 free(newargv);
10933
10934 if (ret == 0 && log_history)
10935 (void) zpool_log_history(g_zfs, history_str);
10936
10937 libzfs_fini(g_zfs);
10938
10939 /*
10940 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
10941 * for the purposes of running ::findleaks.
10942 */
10943 if (getenv("ZFS_ABORT") != NULL) {
10944 (void) printf("dumping core by request\n");
10945 abort();
10946 }
10947
10948 return (ret);
10949 }
Cache object: 0554511d9f726302ca3ef3c9fd4e300c
|