The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_shutdown.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1986, 1988, 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  * (c) UNIX System Laboratories, Inc.
    7  * All or some portions of this file are derived from material licensed
    8  * to the University of California by American Telephone and Telegraph
    9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   10  * the permission of UNIX System Laboratories, Inc.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      @(#)kern_shutdown.c     8.3 (Berkeley) 1/21/94
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD$");
   41 
   42 #include "opt_ddb.h"
   43 #include "opt_ekcd.h"
   44 #include "opt_kdb.h"
   45 #include "opt_panic.h"
   46 #include "opt_printf.h"
   47 #include "opt_sched.h"
   48 #include "opt_watchdog.h"
   49 
   50 #include <sys/param.h>
   51 #include <sys/systm.h>
   52 #include <sys/bio.h>
   53 #include <sys/boottrace.h>
   54 #include <sys/buf.h>
   55 #include <sys/conf.h>
   56 #include <sys/compressor.h>
   57 #include <sys/cons.h>
   58 #include <sys/disk.h>
   59 #include <sys/eventhandler.h>
   60 #include <sys/filedesc.h>
   61 #include <sys/jail.h>
   62 #include <sys/kdb.h>
   63 #include <sys/kernel.h>
   64 #include <sys/kerneldump.h>
   65 #include <sys/kthread.h>
   66 #include <sys/ktr.h>
   67 #include <sys/malloc.h>
   68 #include <sys/mbuf.h>
   69 #include <sys/mount.h>
   70 #include <sys/priv.h>
   71 #include <sys/proc.h>
   72 #include <sys/reboot.h>
   73 #include <sys/resourcevar.h>
   74 #include <sys/rwlock.h>
   75 #include <sys/sbuf.h>
   76 #include <sys/sched.h>
   77 #include <sys/smp.h>
   78 #include <sys/sysctl.h>
   79 #include <sys/sysproto.h>
   80 #include <sys/taskqueue.h>
   81 #include <sys/vnode.h>
   82 #include <sys/watchdog.h>
   83 
   84 #include <crypto/chacha20/chacha.h>
   85 #include <crypto/rijndael/rijndael-api-fst.h>
   86 #include <crypto/sha2/sha256.h>
   87 
   88 #include <ddb/ddb.h>
   89 
   90 #include <machine/cpu.h>
   91 #include <machine/dump.h>
   92 #include <machine/pcb.h>
   93 #include <machine/smp.h>
   94 
   95 #include <security/mac/mac_framework.h>
   96 
   97 #include <vm/vm.h>
   98 #include <vm/vm_object.h>
   99 #include <vm/vm_page.h>
  100 #include <vm/vm_pager.h>
  101 #include <vm/swap_pager.h>
  102 
  103 #include <sys/signalvar.h>
  104 
  105 static MALLOC_DEFINE(M_DUMPER, "dumper", "dumper block buffer");
  106 
  107 #ifndef PANIC_REBOOT_WAIT_TIME
  108 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
  109 #endif
  110 static int panic_reboot_wait_time = PANIC_REBOOT_WAIT_TIME;
  111 SYSCTL_INT(_kern, OID_AUTO, panic_reboot_wait_time, CTLFLAG_RWTUN,
  112     &panic_reboot_wait_time, 0,
  113     "Seconds to wait before rebooting after a panic");
  114 static int reboot_wait_time = 0;
  115 SYSCTL_INT(_kern, OID_AUTO, reboot_wait_time, CTLFLAG_RWTUN,
  116     &reboot_wait_time, 0,
  117     "Seconds to wait before rebooting");
  118 
  119 /*
  120  * Note that stdarg.h and the ANSI style va_start macro is used for both
  121  * ANSI and traditional C compilers.
  122  */
  123 #include <machine/stdarg.h>
  124 
  125 #ifdef KDB
  126 #ifdef KDB_UNATTENDED
  127 int debugger_on_panic = 0;
  128 #else
  129 int debugger_on_panic = 1;
  130 #endif
  131 SYSCTL_INT(_debug, OID_AUTO, debugger_on_panic,
  132     CTLFLAG_RWTUN | CTLFLAG_SECURE,
  133     &debugger_on_panic, 0, "Run debugger on kernel panic");
  134 
  135 static bool debugger_on_recursive_panic = false;
  136 SYSCTL_BOOL(_debug, OID_AUTO, debugger_on_recursive_panic,
  137     CTLFLAG_RWTUN | CTLFLAG_SECURE,
  138     &debugger_on_recursive_panic, 0, "Run debugger on recursive kernel panic");
  139 
  140 int debugger_on_trap = 0;
  141 SYSCTL_INT(_debug, OID_AUTO, debugger_on_trap,
  142     CTLFLAG_RWTUN | CTLFLAG_SECURE,
  143     &debugger_on_trap, 0, "Run debugger on kernel trap before panic");
  144 
  145 #ifdef KDB_TRACE
  146 static int trace_on_panic = 1;
  147 static bool trace_all_panics = true;
  148 #else
  149 static int trace_on_panic = 0;
  150 static bool trace_all_panics = false;
  151 #endif
  152 SYSCTL_INT(_debug, OID_AUTO, trace_on_panic,
  153     CTLFLAG_RWTUN | CTLFLAG_SECURE,
  154     &trace_on_panic, 0, "Print stack trace on kernel panic");
  155 SYSCTL_BOOL(_debug, OID_AUTO, trace_all_panics, CTLFLAG_RWTUN,
  156     &trace_all_panics, 0, "Print stack traces on secondary kernel panics");
  157 #endif /* KDB */
  158 
  159 static int sync_on_panic = 0;
  160 SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RWTUN,
  161         &sync_on_panic, 0, "Do a sync before rebooting from a panic");
  162 
  163 static bool poweroff_on_panic = 0;
  164 SYSCTL_BOOL(_kern, OID_AUTO, poweroff_on_panic, CTLFLAG_RWTUN,
  165         &poweroff_on_panic, 0, "Do a power off instead of a reboot on a panic");
  166 
  167 static bool powercycle_on_panic = 0;
  168 SYSCTL_BOOL(_kern, OID_AUTO, powercycle_on_panic, CTLFLAG_RWTUN,
  169         &powercycle_on_panic, 0, "Do a power cycle instead of a reboot on a panic");
  170 
  171 static SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
  172     "Shutdown environment");
  173 
  174 #ifndef DIAGNOSTIC
  175 static int show_busybufs;
  176 #else
  177 static int show_busybufs = 1;
  178 #endif
  179 SYSCTL_INT(_kern_shutdown, OID_AUTO, show_busybufs, CTLFLAG_RW,
  180     &show_busybufs, 0,
  181     "Show busy buffers during shutdown");
  182 
  183 int suspend_blocked = 0;
  184 SYSCTL_INT(_kern, OID_AUTO, suspend_blocked, CTLFLAG_RW,
  185         &suspend_blocked, 0, "Block suspend due to a pending shutdown");
  186 
  187 #ifdef EKCD
  188 FEATURE(ekcd, "Encrypted kernel crash dumps support");
  189 
  190 MALLOC_DEFINE(M_EKCD, "ekcd", "Encrypted kernel crash dumps data");
  191 
  192 struct kerneldumpcrypto {
  193         uint8_t                 kdc_encryption;
  194         uint8_t                 kdc_iv[KERNELDUMP_IV_MAX_SIZE];
  195         union {
  196                 struct {
  197                         keyInstance     aes_ki;
  198                         cipherInstance  aes_ci;
  199                 } u_aes;
  200                 struct chacha_ctx       u_chacha;
  201         } u;
  202 #define kdc_ki  u.u_aes.aes_ki
  203 #define kdc_ci  u.u_aes.aes_ci
  204 #define kdc_chacha      u.u_chacha
  205         uint32_t                kdc_dumpkeysize;
  206         struct kerneldumpkey    kdc_dumpkey[];
  207 };
  208 #endif
  209 
  210 struct kerneldumpcomp {
  211         uint8_t                 kdc_format;
  212         struct compressor       *kdc_stream;
  213         uint8_t                 *kdc_buf;
  214         size_t                  kdc_resid;
  215 };
  216 
  217 static struct kerneldumpcomp *kerneldumpcomp_create(struct dumperinfo *di,
  218                     uint8_t compression);
  219 static void     kerneldumpcomp_destroy(struct dumperinfo *di);
  220 static int      kerneldumpcomp_write_cb(void *base, size_t len, off_t off, void *arg);
  221 
  222 static int kerneldump_gzlevel = 6;
  223 SYSCTL_INT(_kern, OID_AUTO, kerneldump_gzlevel, CTLFLAG_RWTUN,
  224     &kerneldump_gzlevel, 0,
  225     "Kernel crash dump compression level");
  226 
  227 /*
  228  * Variable panicstr contains argument to first call to panic; used as flag
  229  * to indicate that the kernel has already called panic.
  230  */
  231 const char *panicstr;
  232 bool __read_frequently panicked;
  233 
  234 int __read_mostly dumping;              /* system is dumping */
  235 int rebooting;                          /* system is rebooting */
  236 /*
  237  * Used to serialize between sysctl kern.shutdown.dumpdevname and list
  238  * modifications via ioctl.
  239  */
  240 static struct mtx dumpconf_list_lk;
  241 MTX_SYSINIT(dumper_configs, &dumpconf_list_lk, "dumper config list", MTX_DEF);
  242 
  243 /* Our selected dumper(s). */
  244 static TAILQ_HEAD(dumpconflist, dumperinfo) dumper_configs =
  245     TAILQ_HEAD_INITIALIZER(dumper_configs);
  246 
  247 /* Context information for dump-debuggers. */
  248 static struct pcb dumppcb;              /* Registers. */
  249 lwpid_t dumptid;                        /* Thread ID. */
  250 
  251 static struct cdevsw reroot_cdevsw = {
  252      .d_version = D_VERSION,
  253      .d_name    = "reroot",
  254 };
  255 
  256 static void poweroff_wait(void *, int);
  257 static void shutdown_halt(void *junk, int howto);
  258 static void shutdown_panic(void *junk, int howto);
  259 static void shutdown_reset(void *junk, int howto);
  260 static int kern_reroot(void);
  261 
  262 /* register various local shutdown events */
  263 static void
  264 shutdown_conf(void *unused)
  265 {
  266 
  267         EVENTHANDLER_REGISTER(shutdown_final, poweroff_wait, NULL,
  268             SHUTDOWN_PRI_FIRST);
  269         EVENTHANDLER_REGISTER(shutdown_final, shutdown_halt, NULL,
  270             SHUTDOWN_PRI_LAST + 100);
  271         EVENTHANDLER_REGISTER(shutdown_final, shutdown_panic, NULL,
  272             SHUTDOWN_PRI_LAST + 100);
  273 }
  274 
  275 SYSINIT(shutdown_conf, SI_SUB_INTRINSIC, SI_ORDER_ANY, shutdown_conf, NULL);
  276 
  277 /*
  278  * The only reason this exists is to create the /dev/reroot/ directory,
  279  * used by reroot code in init(8) as a mountpoint for tmpfs.
  280  */
  281 static void
  282 reroot_conf(void *unused)
  283 {
  284         int error;
  285         struct cdev *cdev;
  286 
  287         error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &cdev,
  288             &reroot_cdevsw, NULL, UID_ROOT, GID_WHEEL, 0600, "reroot/reroot");
  289         if (error != 0) {
  290                 printf("%s: failed to create device node, error %d",
  291                     __func__, error);
  292         }
  293 }
  294 
  295 SYSINIT(reroot_conf, SI_SUB_DEVFS, SI_ORDER_ANY, reroot_conf, NULL);
  296 
  297 /*
  298  * The system call that results in a reboot.
  299  */
  300 /* ARGSUSED */
  301 int
  302 sys_reboot(struct thread *td, struct reboot_args *uap)
  303 {
  304         int error;
  305 
  306         error = 0;
  307 #ifdef MAC
  308         error = mac_system_check_reboot(td->td_ucred, uap->opt);
  309 #endif
  310         if (error == 0)
  311                 error = priv_check(td, PRIV_REBOOT);
  312         if (error == 0) {
  313                 if (uap->opt & RB_REROOT)
  314                         error = kern_reroot();
  315                 else
  316                         kern_reboot(uap->opt);
  317         }
  318         return (error);
  319 }
  320 
  321 static void
  322 shutdown_nice_task_fn(void *arg, int pending __unused)
  323 {
  324         int howto;
  325 
  326         howto = (uintptr_t)arg;
  327         /* Send a signal to init(8) and have it shutdown the world. */
  328         PROC_LOCK(initproc);
  329         if ((howto & RB_POWEROFF) != 0) {
  330                 BOOTTRACE("SIGUSR2 to init(8)");
  331                 kern_psignal(initproc, SIGUSR2);
  332         } else if ((howto & RB_POWERCYCLE) != 0) {
  333                 BOOTTRACE("SIGWINCH to init(8)");
  334                 kern_psignal(initproc, SIGWINCH);
  335         } else if ((howto & RB_HALT) != 0) {
  336                 BOOTTRACE("SIGUSR1 to init(8)");
  337                 kern_psignal(initproc, SIGUSR1);
  338         } else {
  339                 BOOTTRACE("SIGINT to init(8)");
  340                 kern_psignal(initproc, SIGINT);
  341         }
  342         PROC_UNLOCK(initproc);
  343 }
  344 
  345 static struct task shutdown_nice_task = TASK_INITIALIZER(0,
  346     &shutdown_nice_task_fn, NULL);
  347 
  348 /*
  349  * Called by events that want to shut down.. e.g  <CTL><ALT><DEL> on a PC
  350  */
  351 void
  352 shutdown_nice(int howto)
  353 {
  354 
  355         if (initproc != NULL && !SCHEDULER_STOPPED()) {
  356                 BOOTTRACE("shutdown initiated");
  357                 shutdown_nice_task.ta_context = (void *)(uintptr_t)howto;
  358                 taskqueue_enqueue(taskqueue_fast, &shutdown_nice_task);
  359         } else {
  360                 /*
  361                  * No init(8) running, or scheduler would not allow it
  362                  * to run, so simply reboot.
  363                  */
  364                 kern_reboot(howto | RB_NOSYNC);
  365         }
  366 }
  367 
  368 static void
  369 print_uptime(void)
  370 {
  371         int f;
  372         struct timespec ts;
  373 
  374         getnanouptime(&ts);
  375         printf("Uptime: ");
  376         f = 0;
  377         if (ts.tv_sec >= 86400) {
  378                 printf("%ldd", (long)ts.tv_sec / 86400);
  379                 ts.tv_sec %= 86400;
  380                 f = 1;
  381         }
  382         if (f || ts.tv_sec >= 3600) {
  383                 printf("%ldh", (long)ts.tv_sec / 3600);
  384                 ts.tv_sec %= 3600;
  385                 f = 1;
  386         }
  387         if (f || ts.tv_sec >= 60) {
  388                 printf("%ldm", (long)ts.tv_sec / 60);
  389                 ts.tv_sec %= 60;
  390                 f = 1;
  391         }
  392         printf("%lds\n", (long)ts.tv_sec);
  393 }
  394 
  395 /*
  396  * Set up a context that can be extracted from the dump.
  397  */
  398 void
  399 dump_savectx(void)
  400 {
  401 
  402         savectx(&dumppcb);
  403         dumptid = curthread->td_tid;
  404 }
  405 
  406 int
  407 doadump(boolean_t textdump)
  408 {
  409         boolean_t coredump;
  410         int error;
  411 
  412         error = 0;
  413         if (dumping)
  414                 return (EBUSY);
  415         if (TAILQ_EMPTY(&dumper_configs))
  416                 return (ENXIO);
  417 
  418         dump_savectx();
  419         dumping++;
  420 
  421         coredump = TRUE;
  422 #ifdef DDB
  423         if (textdump && textdump_pending) {
  424                 coredump = FALSE;
  425                 textdump_dumpsys(TAILQ_FIRST(&dumper_configs));
  426         }
  427 #endif
  428         if (coredump) {
  429                 struct dumperinfo *di;
  430 
  431                 TAILQ_FOREACH(di, &dumper_configs, di_next) {
  432                         error = dumpsys(di);
  433                         if (error == 0)
  434                                 break;
  435                 }
  436         }
  437 
  438         dumping--;
  439         return (error);
  440 }
  441 
  442 /*
  443  * Trace the shutdown reason.
  444  */
  445 static void
  446 reboottrace(int howto)
  447 {
  448         if ((howto & RB_DUMP) != 0) {
  449                 if ((howto & RB_HALT) != 0)
  450                         BOOTTRACE("system panic: halting...");
  451                 if ((howto & RB_POWEROFF) != 0)
  452                         BOOTTRACE("system panic: powering off...");
  453                 if ((howto & (RB_HALT|RB_POWEROFF)) == 0)
  454                         BOOTTRACE("system panic: rebooting...");
  455         } else {
  456                 if ((howto & RB_HALT) != 0)
  457                         BOOTTRACE("system halting...");
  458                 if ((howto & RB_POWEROFF) != 0)
  459                         BOOTTRACE("system powering off...");
  460                 if ((howto & (RB_HALT|RB_POWEROFF)) == 0)
  461                         BOOTTRACE("system rebooting...");
  462         }
  463 }
  464 
  465 /*
  466  * kern_reboot(9): Shut down the system cleanly to prepare for reboot, halt, or
  467  * power off.
  468  */
  469 void
  470 kern_reboot(int howto)
  471 {
  472         static int once = 0;
  473 
  474         if (initproc != NULL && curproc != initproc)
  475                 BOOTTRACE("kernel shutdown (dirty) started");
  476         else
  477                 BOOTTRACE("kernel shutdown (clean) started");
  478 
  479         /*
  480          * Normal paths here don't hold Giant, but we can wind up here
  481          * unexpectedly with it held.  Drop it now so we don't have to
  482          * drop and pick it up elsewhere. The paths it is locking will
  483          * never be returned to, and it is preferable to preclude
  484          * deadlock than to lock against code that won't ever
  485          * continue.
  486          */
  487         while (mtx_owned(&Giant))
  488                 mtx_unlock(&Giant);
  489 
  490 #if defined(SMP)
  491         /*
  492          * Bind us to the first CPU so that all shutdown code runs there.  Some
  493          * systems don't shutdown properly (i.e., ACPI power off) if we
  494          * run on another processor.
  495          */
  496         if (!SCHEDULER_STOPPED()) {
  497                 thread_lock(curthread);
  498                 sched_bind(curthread, CPU_FIRST());
  499                 thread_unlock(curthread);
  500                 KASSERT(PCPU_GET(cpuid) == CPU_FIRST(),
  501                     ("%s: not running on cpu 0", __func__));
  502         }
  503 #endif
  504         /* We're in the process of rebooting. */
  505         rebooting = 1;
  506         reboottrace(howto);
  507 
  508         /* We are out of the debugger now. */
  509         kdb_active = 0;
  510 
  511         /*
  512          * Do any callouts that should be done BEFORE syncing the filesystems.
  513          */
  514         EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);
  515         BOOTTRACE("shutdown pre sync complete");
  516 
  517         /* 
  518          * Now sync filesystems
  519          */
  520         if (!cold && (howto & RB_NOSYNC) == 0 && once == 0) {
  521                 once = 1;
  522                 BOOTTRACE("bufshutdown begin");
  523                 bufshutdown(show_busybufs);
  524                 BOOTTRACE("bufshutdown end");
  525         }
  526 
  527         print_uptime();
  528 
  529         cngrab();
  530 
  531         /*
  532          * Ok, now do things that assume all filesystem activity has
  533          * been completed.
  534          */
  535         EVENTHANDLER_INVOKE(shutdown_post_sync, howto);
  536         BOOTTRACE("shutdown post sync complete");
  537 
  538         if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 
  539                 doadump(TRUE);
  540 
  541         /* Now that we're going to really halt the system... */
  542         BOOTTRACE("shutdown final begin");
  543 
  544         if (shutdown_trace)
  545                 boottrace_dump_console();
  546 
  547         EVENTHANDLER_INVOKE(shutdown_final, howto);
  548 
  549         /*
  550          * Call this directly so that reset is attempted even if shutdown
  551          * handlers are not yet registered.
  552          */
  553         shutdown_reset(NULL, howto);
  554 
  555         for(;;) ;       /* safety against shutdown_reset not working */
  556         /* NOTREACHED */
  557 }
  558 
  559 /*
  560  * The system call that results in changing the rootfs.
  561  */
  562 static int
  563 kern_reroot(void)
  564 {
  565         struct vnode *oldrootvnode, *vp;
  566         struct mount *mp, *devmp;
  567         int error;
  568 
  569         if (curproc != initproc)
  570                 return (EPERM);
  571 
  572         /*
  573          * Mark the filesystem containing currently-running executable
  574          * (the temporary copy of init(8)) busy.
  575          */
  576         vp = curproc->p_textvp;
  577         error = vn_lock(vp, LK_SHARED);
  578         if (error != 0)
  579                 return (error);
  580         mp = vp->v_mount;
  581         error = vfs_busy(mp, MBF_NOWAIT);
  582         if (error != 0) {
  583                 vfs_ref(mp);
  584                 VOP_UNLOCK(vp);
  585                 error = vfs_busy(mp, 0);
  586                 vn_lock(vp, LK_SHARED | LK_RETRY);
  587                 vfs_rel(mp);
  588                 if (error != 0) {
  589                         VOP_UNLOCK(vp);
  590                         return (ENOENT);
  591                 }
  592                 if (VN_IS_DOOMED(vp)) {
  593                         VOP_UNLOCK(vp);
  594                         vfs_unbusy(mp);
  595                         return (ENOENT);
  596                 }
  597         }
  598         VOP_UNLOCK(vp);
  599 
  600         /*
  601          * Remove the filesystem containing currently-running executable
  602          * from the mount list, to prevent it from being unmounted
  603          * by vfs_unmountall(), and to avoid confusing vfs_mountroot().
  604          *
  605          * Also preserve /dev - forcibly unmounting it could cause driver
  606          * reinitialization.
  607          */
  608 
  609         vfs_ref(rootdevmp);
  610         devmp = rootdevmp;
  611         rootdevmp = NULL;
  612 
  613         mtx_lock(&mountlist_mtx);
  614         TAILQ_REMOVE(&mountlist, mp, mnt_list);
  615         TAILQ_REMOVE(&mountlist, devmp, mnt_list);
  616         mtx_unlock(&mountlist_mtx);
  617 
  618         oldrootvnode = rootvnode;
  619 
  620         /*
  621          * Unmount everything except for the two filesystems preserved above.
  622          */
  623         vfs_unmountall();
  624 
  625         /*
  626          * Add /dev back; vfs_mountroot() will move it into its new place.
  627          */
  628         mtx_lock(&mountlist_mtx);
  629         TAILQ_INSERT_HEAD(&mountlist, devmp, mnt_list);
  630         mtx_unlock(&mountlist_mtx);
  631         rootdevmp = devmp;
  632         vfs_rel(rootdevmp);
  633 
  634         /*
  635          * Mount the new rootfs.
  636          */
  637         vfs_mountroot();
  638 
  639         /*
  640          * Update all references to the old rootvnode.
  641          */
  642         mountcheckdirs(oldrootvnode, rootvnode);
  643 
  644         /*
  645          * Add the temporary filesystem back and unbusy it.
  646          */
  647         mtx_lock(&mountlist_mtx);
  648         TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
  649         mtx_unlock(&mountlist_mtx);
  650         vfs_unbusy(mp);
  651 
  652         return (0);
  653 }
  654 
  655 /*
  656  * If the shutdown was a clean halt, behave accordingly.
  657  */
  658 static void
  659 shutdown_halt(void *junk, int howto)
  660 {
  661 
  662         if (howto & RB_HALT) {
  663                 printf("\n");
  664                 printf("The operating system has halted.\n");
  665                 printf("Please press any key to reboot.\n\n");
  666 
  667                 wdog_kern_pat(WD_TO_NEVER);
  668 
  669                 switch (cngetc()) {
  670                 case -1:                /* No console, just die */
  671                         cpu_halt();
  672                         /* NOTREACHED */
  673                 default:
  674                         break;
  675                 }
  676         }
  677 }
  678 
  679 /*
  680  * Check to see if the system panicked, pause and then reboot
  681  * according to the specified delay.
  682  */
  683 static void
  684 shutdown_panic(void *junk, int howto)
  685 {
  686         int loop;
  687 
  688         if (howto & RB_DUMP) {
  689                 if (panic_reboot_wait_time != 0) {
  690                         if (panic_reboot_wait_time != -1) {
  691                                 printf("Automatic reboot in %d seconds - "
  692                                        "press a key on the console to abort\n",
  693                                         panic_reboot_wait_time);
  694                                 for (loop = panic_reboot_wait_time * 10;
  695                                      loop > 0; --loop) {
  696                                         DELAY(1000 * 100); /* 1/10th second */
  697                                         /* Did user type a key? */
  698                                         if (cncheckc() != -1)
  699                                                 break;
  700                                 }
  701                                 if (!loop)
  702                                         return;
  703                         }
  704                 } else { /* zero time specified - reboot NOW */
  705                         return;
  706                 }
  707                 printf("--> Press a key on the console to reboot,\n");
  708                 printf("--> or switch off the system now.\n");
  709                 cngetc();
  710         }
  711 }
  712 
  713 /*
  714  * Everything done, now reset
  715  */
  716 static void
  717 shutdown_reset(void *junk, int howto)
  718 {
  719 
  720         printf("Rebooting...\n");
  721         DELAY(reboot_wait_time * 1000000);
  722 
  723         /*
  724          * Acquiring smp_ipi_mtx here has a double effect:
  725          * - it disables interrupts avoiding CPU0 preemption
  726          *   by fast handlers (thus deadlocking  against other CPUs)
  727          * - it avoids deadlocks against smp_rendezvous() or, more 
  728          *   generally, threads busy-waiting, with this spinlock held,
  729          *   and waiting for responses by threads on other CPUs
  730          *   (ie. smp_tlb_shootdown()).
  731          *
  732          * For the !SMP case it just needs to handle the former problem.
  733          */
  734 #ifdef SMP
  735         mtx_lock_spin(&smp_ipi_mtx);
  736 #else
  737         spinlock_enter();
  738 #endif
  739 
  740         cpu_reset();
  741         /* NOTREACHED */ /* assuming reset worked */
  742 }
  743 
  744 #if defined(WITNESS) || defined(INVARIANT_SUPPORT)
  745 static int kassert_warn_only = 0;
  746 #ifdef KDB
  747 static int kassert_do_kdb = 0;
  748 #endif
  749 #ifdef KTR
  750 static int kassert_do_ktr = 0;
  751 #endif
  752 static int kassert_do_log = 1;
  753 static int kassert_log_pps_limit = 4;
  754 static int kassert_log_mute_at = 0;
  755 static int kassert_log_panic_at = 0;
  756 static int kassert_suppress_in_panic = 0;
  757 static int kassert_warnings = 0;
  758 
  759 SYSCTL_NODE(_debug, OID_AUTO, kassert, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
  760     "kassert options");
  761 
  762 #ifdef KASSERT_PANIC_OPTIONAL
  763 #define KASSERT_RWTUN   CTLFLAG_RWTUN
  764 #else
  765 #define KASSERT_RWTUN   CTLFLAG_RDTUN
  766 #endif
  767 
  768 SYSCTL_INT(_debug_kassert, OID_AUTO, warn_only, KASSERT_RWTUN,
  769     &kassert_warn_only, 0,
  770     "KASSERT triggers a panic (0) or just a warning (1)");
  771 
  772 #ifdef KDB
  773 SYSCTL_INT(_debug_kassert, OID_AUTO, do_kdb, KASSERT_RWTUN,
  774     &kassert_do_kdb, 0, "KASSERT will enter the debugger");
  775 #endif
  776 
  777 #ifdef KTR
  778 SYSCTL_UINT(_debug_kassert, OID_AUTO, do_ktr, KASSERT_RWTUN,
  779     &kassert_do_ktr, 0,
  780     "KASSERT does a KTR, set this to the KTRMASK you want");
  781 #endif
  782 
  783 SYSCTL_INT(_debug_kassert, OID_AUTO, do_log, KASSERT_RWTUN,
  784     &kassert_do_log, 0,
  785     "If warn_only is enabled, log (1) or do not log (0) assertion violations");
  786 
  787 SYSCTL_INT(_debug_kassert, OID_AUTO, warnings, CTLFLAG_RD | CTLFLAG_STATS,
  788     &kassert_warnings, 0, "number of KASSERTs that have been triggered");
  789 
  790 SYSCTL_INT(_debug_kassert, OID_AUTO, log_panic_at, KASSERT_RWTUN,
  791     &kassert_log_panic_at, 0, "max number of KASSERTS before we will panic");
  792 
  793 SYSCTL_INT(_debug_kassert, OID_AUTO, log_pps_limit, KASSERT_RWTUN,
  794     &kassert_log_pps_limit, 0, "limit number of log messages per second");
  795 
  796 SYSCTL_INT(_debug_kassert, OID_AUTO, log_mute_at, KASSERT_RWTUN,
  797     &kassert_log_mute_at, 0, "max number of KASSERTS to log");
  798 
  799 SYSCTL_INT(_debug_kassert, OID_AUTO, suppress_in_panic, KASSERT_RWTUN,
  800     &kassert_suppress_in_panic, 0,
  801     "KASSERTs will be suppressed while handling a panic");
  802 #undef KASSERT_RWTUN
  803 
  804 static int kassert_sysctl_kassert(SYSCTL_HANDLER_ARGS);
  805 
  806 SYSCTL_PROC(_debug_kassert, OID_AUTO, kassert,
  807     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE | CTLFLAG_MPSAFE, NULL, 0,
  808     kassert_sysctl_kassert, "I",
  809     "set to trigger a test kassert");
  810 
  811 static int
  812 kassert_sysctl_kassert(SYSCTL_HANDLER_ARGS)
  813 {
  814         int error, i;
  815 
  816         error = sysctl_wire_old_buffer(req, sizeof(int));
  817         if (error == 0) {
  818                 i = 0;
  819                 error = sysctl_handle_int(oidp, &i, 0, req);
  820         }
  821         if (error != 0 || req->newptr == NULL)
  822                 return (error);
  823         KASSERT(0, ("kassert_sysctl_kassert triggered kassert %d", i));
  824         return (0);
  825 }
  826 
  827 #ifdef KASSERT_PANIC_OPTIONAL
  828 /*
  829  * Called by KASSERT, this decides if we will panic
  830  * or if we will log via printf and/or ktr.
  831  */
  832 void
  833 kassert_panic(const char *fmt, ...)
  834 {
  835         static char buf[256];
  836         va_list ap;
  837 
  838         va_start(ap, fmt);
  839         (void)vsnprintf(buf, sizeof(buf), fmt, ap);
  840         va_end(ap);
  841 
  842         /*
  843          * If we are suppressing secondary panics, log the warning but do not
  844          * re-enter panic/kdb.
  845          */
  846         if (KERNEL_PANICKED() && kassert_suppress_in_panic) {
  847                 if (kassert_do_log) {
  848                         printf("KASSERT failed: %s\n", buf);
  849 #ifdef KDB
  850                         if (trace_all_panics && trace_on_panic)
  851                                 kdb_backtrace();
  852 #endif
  853                 }
  854                 return;
  855         }
  856 
  857         /*
  858          * panic if we're not just warning, or if we've exceeded
  859          * kassert_log_panic_at warnings.
  860          */
  861         if (!kassert_warn_only ||
  862             (kassert_log_panic_at > 0 &&
  863              kassert_warnings >= kassert_log_panic_at)) {
  864                 va_start(ap, fmt);
  865                 vpanic(fmt, ap);
  866                 /* NORETURN */
  867         }
  868 #ifdef KTR
  869         if (kassert_do_ktr)
  870                 CTR0(ktr_mask, buf);
  871 #endif /* KTR */
  872         /*
  873          * log if we've not yet met the mute limit.
  874          */
  875         if (kassert_do_log &&
  876             (kassert_log_mute_at == 0 ||
  877              kassert_warnings < kassert_log_mute_at)) {
  878                 static  struct timeval lasterr;
  879                 static  int curerr;
  880 
  881                 if (ppsratecheck(&lasterr, &curerr, kassert_log_pps_limit)) {
  882                         printf("KASSERT failed: %s\n", buf);
  883                         kdb_backtrace();
  884                 }
  885         }
  886 #ifdef KDB
  887         if (kassert_do_kdb) {
  888                 kdb_enter(KDB_WHY_KASSERT, buf);
  889         }
  890 #endif
  891         atomic_add_int(&kassert_warnings, 1);
  892 }
  893 #endif /* KASSERT_PANIC_OPTIONAL */
  894 #endif
  895 
  896 /*
  897  * Panic is called on unresolvable fatal errors.  It prints "panic: mesg",
  898  * and then reboots.  If we are called twice, then we avoid trying to sync
  899  * the disks as this often leads to recursive panics.
  900  */
  901 void
  902 panic(const char *fmt, ...)
  903 {
  904         va_list ap;
  905 
  906         va_start(ap, fmt);
  907         vpanic(fmt, ap);
  908 }
  909 
  910 void
  911 vpanic(const char *fmt, va_list ap)
  912 {
  913 #ifdef SMP
  914         cpuset_t other_cpus;
  915 #endif
  916         struct thread *td = curthread;
  917         int bootopt, newpanic;
  918         static char buf[256];
  919 
  920         spinlock_enter();
  921 
  922 #ifdef SMP
  923         /*
  924          * stop_cpus_hard(other_cpus) should prevent multiple CPUs from
  925          * concurrently entering panic.  Only the winner will proceed
  926          * further.
  927          */
  928         if (panicstr == NULL && !kdb_active) {
  929                 other_cpus = all_cpus;
  930                 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
  931                 stop_cpus_hard(other_cpus);
  932         }
  933 #endif
  934 
  935         /*
  936          * Ensure that the scheduler is stopped while panicking, even if panic
  937          * has been entered from kdb.
  938          */
  939         td->td_stopsched = 1;
  940 
  941         bootopt = RB_AUTOBOOT;
  942         newpanic = 0;
  943         if (KERNEL_PANICKED())
  944                 bootopt |= RB_NOSYNC;
  945         else {
  946                 bootopt |= RB_DUMP;
  947                 panicstr = fmt;
  948                 panicked = true;
  949                 newpanic = 1;
  950         }
  951 
  952         if (newpanic) {
  953                 (void)vsnprintf(buf, sizeof(buf), fmt, ap);
  954                 panicstr = buf;
  955                 cngrab();
  956                 printf("panic: %s\n", buf);
  957         } else {
  958                 printf("panic: ");
  959                 vprintf(fmt, ap);
  960                 printf("\n");
  961         }
  962 #ifdef SMP
  963         printf("cpuid = %d\n", PCPU_GET(cpuid));
  964 #endif
  965         printf("time = %jd\n", (intmax_t )time_second);
  966 #ifdef KDB
  967         if ((newpanic || trace_all_panics) && trace_on_panic)
  968                 kdb_backtrace();
  969         if (debugger_on_panic)
  970                 kdb_enter(KDB_WHY_PANIC, "panic");
  971         else if (!newpanic && debugger_on_recursive_panic)
  972                 kdb_enter(KDB_WHY_PANIC, "re-panic");
  973 #endif
  974         /*thread_lock(td); */
  975         td->td_flags |= TDF_INPANIC;
  976         /* thread_unlock(td); */
  977         if (!sync_on_panic)
  978                 bootopt |= RB_NOSYNC;
  979         if (poweroff_on_panic)
  980                 bootopt |= RB_POWEROFF;
  981         if (powercycle_on_panic)
  982                 bootopt |= RB_POWERCYCLE;
  983         kern_reboot(bootopt);
  984 }
  985 
  986 /*
  987  * Support for poweroff delay.
  988  *
  989  * Please note that setting this delay too short might power off your machine
  990  * before the write cache on your hard disk has been flushed, leading to
  991  * soft-updates inconsistencies.
  992  */
  993 #ifndef POWEROFF_DELAY
  994 # define POWEROFF_DELAY 5000
  995 #endif
  996 static int poweroff_delay = POWEROFF_DELAY;
  997 
  998 SYSCTL_INT(_kern_shutdown, OID_AUTO, poweroff_delay, CTLFLAG_RW,
  999     &poweroff_delay, 0, "Delay before poweroff to write disk caches (msec)");
 1000 
 1001 static void
 1002 poweroff_wait(void *junk, int howto)
 1003 {
 1004 
 1005         if ((howto & (RB_POWEROFF | RB_POWERCYCLE)) == 0 || poweroff_delay <= 0)
 1006                 return;
 1007         DELAY(poweroff_delay * 1000);
 1008 }
 1009 
 1010 /*
 1011  * Some system processes (e.g. syncer) need to be stopped at appropriate
 1012  * points in their main loops prior to a system shutdown, so that they
 1013  * won't interfere with the shutdown process (e.g. by holding a disk buf
 1014  * to cause sync to fail).  For each of these system processes, register
 1015  * shutdown_kproc() as a handler for one of shutdown events.
 1016  */
 1017 static int kproc_shutdown_wait = 60;
 1018 SYSCTL_INT(_kern_shutdown, OID_AUTO, kproc_shutdown_wait, CTLFLAG_RW,
 1019     &kproc_shutdown_wait, 0, "Max wait time (sec) to stop for each process");
 1020 
 1021 void
 1022 kproc_shutdown(void *arg, int howto)
 1023 {
 1024         struct proc *p;
 1025         int error;
 1026 
 1027         if (KERNEL_PANICKED())
 1028                 return;
 1029 
 1030         p = (struct proc *)arg;
 1031         printf("Waiting (max %d seconds) for system process `%s' to stop... ",
 1032             kproc_shutdown_wait, p->p_comm);
 1033         error = kproc_suspend(p, kproc_shutdown_wait * hz);
 1034 
 1035         if (error == EWOULDBLOCK)
 1036                 printf("timed out\n");
 1037         else
 1038                 printf("done\n");
 1039 }
 1040 
 1041 void
 1042 kthread_shutdown(void *arg, int howto)
 1043 {
 1044         struct thread *td;
 1045         int error;
 1046 
 1047         if (KERNEL_PANICKED())
 1048                 return;
 1049 
 1050         td = (struct thread *)arg;
 1051         printf("Waiting (max %d seconds) for system thread `%s' to stop... ",
 1052             kproc_shutdown_wait, td->td_name);
 1053         error = kthread_suspend(td, kproc_shutdown_wait * hz);
 1054 
 1055         if (error == EWOULDBLOCK)
 1056                 printf("timed out\n");
 1057         else
 1058                 printf("done\n");
 1059 }
 1060 
 1061 static int
 1062 dumpdevname_sysctl_handler(SYSCTL_HANDLER_ARGS)
 1063 {
 1064         char buf[256];
 1065         struct dumperinfo *di;
 1066         struct sbuf sb;
 1067         int error;
 1068 
 1069         error = sysctl_wire_old_buffer(req, 0);
 1070         if (error != 0)
 1071                 return (error);
 1072 
 1073         sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req);
 1074 
 1075         mtx_lock(&dumpconf_list_lk);
 1076         TAILQ_FOREACH(di, &dumper_configs, di_next) {
 1077                 if (di != TAILQ_FIRST(&dumper_configs))
 1078                         sbuf_putc(&sb, ',');
 1079                 sbuf_cat(&sb, di->di_devname);
 1080         }
 1081         mtx_unlock(&dumpconf_list_lk);
 1082 
 1083         error = sbuf_finish(&sb);
 1084         sbuf_delete(&sb);
 1085         return (error);
 1086 }
 1087 SYSCTL_PROC(_kern_shutdown, OID_AUTO, dumpdevname,
 1088     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, &dumper_configs, 0,
 1089     dumpdevname_sysctl_handler, "A",
 1090     "Device(s) for kernel dumps");
 1091 
 1092 static int _dump_append(struct dumperinfo *di, void *virtual, size_t length);
 1093 
 1094 #ifdef EKCD
 1095 static struct kerneldumpcrypto *
 1096 kerneldumpcrypto_create(size_t blocksize, uint8_t encryption,
 1097     const uint8_t *key, uint32_t encryptedkeysize, const uint8_t *encryptedkey)
 1098 {
 1099         struct kerneldumpcrypto *kdc;
 1100         struct kerneldumpkey *kdk;
 1101         uint32_t dumpkeysize;
 1102 
 1103         dumpkeysize = roundup2(sizeof(*kdk) + encryptedkeysize, blocksize);
 1104         kdc = malloc(sizeof(*kdc) + dumpkeysize, M_EKCD, M_WAITOK | M_ZERO);
 1105 
 1106         arc4rand(kdc->kdc_iv, sizeof(kdc->kdc_iv), 0);
 1107 
 1108         kdc->kdc_encryption = encryption;
 1109         switch (kdc->kdc_encryption) {
 1110         case KERNELDUMP_ENC_AES_256_CBC:
 1111                 if (rijndael_makeKey(&kdc->kdc_ki, DIR_ENCRYPT, 256, key) <= 0)
 1112                         goto failed;
 1113                 break;
 1114         case KERNELDUMP_ENC_CHACHA20:
 1115                 chacha_keysetup(&kdc->kdc_chacha, key, 256);
 1116                 break;
 1117         default:
 1118                 goto failed;
 1119         }
 1120 
 1121         kdc->kdc_dumpkeysize = dumpkeysize;
 1122         kdk = kdc->kdc_dumpkey;
 1123         kdk->kdk_encryption = kdc->kdc_encryption;
 1124         memcpy(kdk->kdk_iv, kdc->kdc_iv, sizeof(kdk->kdk_iv));
 1125         kdk->kdk_encryptedkeysize = htod32(encryptedkeysize);
 1126         memcpy(kdk->kdk_encryptedkey, encryptedkey, encryptedkeysize);
 1127 
 1128         return (kdc);
 1129 failed:
 1130         zfree(kdc, M_EKCD);
 1131         return (NULL);
 1132 }
 1133 
 1134 static int
 1135 kerneldumpcrypto_init(struct kerneldumpcrypto *kdc)
 1136 {
 1137         uint8_t hash[SHA256_DIGEST_LENGTH];
 1138         SHA256_CTX ctx;
 1139         struct kerneldumpkey *kdk;
 1140         int error;
 1141 
 1142         error = 0;
 1143 
 1144         if (kdc == NULL)
 1145                 return (0);
 1146 
 1147         /*
 1148          * When a user enters ddb it can write a crash dump multiple times.
 1149          * Each time it should be encrypted using a different IV.
 1150          */
 1151         SHA256_Init(&ctx);
 1152         SHA256_Update(&ctx, kdc->kdc_iv, sizeof(kdc->kdc_iv));
 1153         SHA256_Final(hash, &ctx);
 1154         bcopy(hash, kdc->kdc_iv, sizeof(kdc->kdc_iv));
 1155 
 1156         switch (kdc->kdc_encryption) {
 1157         case KERNELDUMP_ENC_AES_256_CBC:
 1158                 if (rijndael_cipherInit(&kdc->kdc_ci, MODE_CBC,
 1159                     kdc->kdc_iv) <= 0) {
 1160                         error = EINVAL;
 1161                         goto out;
 1162                 }
 1163                 break;
 1164         case KERNELDUMP_ENC_CHACHA20:
 1165                 chacha_ivsetup(&kdc->kdc_chacha, kdc->kdc_iv, NULL);
 1166                 break;
 1167         default:
 1168                 error = EINVAL;
 1169                 goto out;
 1170         }
 1171 
 1172         kdk = kdc->kdc_dumpkey;
 1173         memcpy(kdk->kdk_iv, kdc->kdc_iv, sizeof(kdk->kdk_iv));
 1174 out:
 1175         explicit_bzero(hash, sizeof(hash));
 1176         return (error);
 1177 }
 1178 
 1179 static uint32_t
 1180 kerneldumpcrypto_dumpkeysize(const struct kerneldumpcrypto *kdc)
 1181 {
 1182 
 1183         if (kdc == NULL)
 1184                 return (0);
 1185         return (kdc->kdc_dumpkeysize);
 1186 }
 1187 #endif /* EKCD */
 1188 
 1189 static struct kerneldumpcomp *
 1190 kerneldumpcomp_create(struct dumperinfo *di, uint8_t compression)
 1191 {
 1192         struct kerneldumpcomp *kdcomp;
 1193         int format;
 1194 
 1195         switch (compression) {
 1196         case KERNELDUMP_COMP_GZIP:
 1197                 format = COMPRESS_GZIP;
 1198                 break;
 1199         case KERNELDUMP_COMP_ZSTD:
 1200                 format = COMPRESS_ZSTD;
 1201                 break;
 1202         default:
 1203                 return (NULL);
 1204         }
 1205 
 1206         kdcomp = malloc(sizeof(*kdcomp), M_DUMPER, M_WAITOK | M_ZERO);
 1207         kdcomp->kdc_format = compression;
 1208         kdcomp->kdc_stream = compressor_init(kerneldumpcomp_write_cb,
 1209             format, di->maxiosize, kerneldump_gzlevel, di);
 1210         if (kdcomp->kdc_stream == NULL) {
 1211                 free(kdcomp, M_DUMPER);
 1212                 return (NULL);
 1213         }
 1214         kdcomp->kdc_buf = malloc(di->maxiosize, M_DUMPER, M_WAITOK | M_NODUMP);
 1215         return (kdcomp);
 1216 }
 1217 
 1218 static void
 1219 kerneldumpcomp_destroy(struct dumperinfo *di)
 1220 {
 1221         struct kerneldumpcomp *kdcomp;
 1222 
 1223         kdcomp = di->kdcomp;
 1224         if (kdcomp == NULL)
 1225                 return;
 1226         compressor_fini(kdcomp->kdc_stream);
 1227         zfree(kdcomp->kdc_buf, M_DUMPER);
 1228         free(kdcomp, M_DUMPER);
 1229 }
 1230 
 1231 /*
 1232  * Free a dumper. Must not be present on global list.
 1233  */
 1234 void
 1235 dumper_destroy(struct dumperinfo *di)
 1236 {
 1237 
 1238         if (di == NULL)
 1239                 return;
 1240 
 1241         zfree(di->blockbuf, M_DUMPER);
 1242         kerneldumpcomp_destroy(di);
 1243 #ifdef EKCD
 1244         zfree(di->kdcrypto, M_EKCD);
 1245 #endif
 1246         zfree(di, M_DUMPER);
 1247 }
 1248 
 1249 /*
 1250  * Allocate and set up a new dumper from the provided template.
 1251  */
 1252 int
 1253 dumper_create(const struct dumperinfo *di_template, const char *devname,
 1254     const struct diocskerneldump_arg *kda, struct dumperinfo **dip)
 1255 {
 1256         struct dumperinfo *newdi;
 1257         int error = 0;
 1258 
 1259         if (dip == NULL)
 1260                 return (EINVAL);
 1261 
 1262         /* Allocate a new dumper */
 1263         newdi = malloc(sizeof(*newdi) + strlen(devname) + 1, M_DUMPER,
 1264             M_WAITOK | M_ZERO);
 1265         memcpy(newdi, di_template, sizeof(*newdi));
 1266         newdi->blockbuf = NULL;
 1267         newdi->kdcrypto = NULL;
 1268         newdi->kdcomp = NULL;
 1269         strcpy(newdi->di_devname, devname);
 1270 
 1271         if (kda->kda_encryption != KERNELDUMP_ENC_NONE) {
 1272 #ifdef EKCD
 1273                 newdi->kdcrypto = kerneldumpcrypto_create(newdi->blocksize,
 1274                     kda->kda_encryption, kda->kda_key,
 1275                     kda->kda_encryptedkeysize, kda->kda_encryptedkey);
 1276                 if (newdi->kdcrypto == NULL) {
 1277                         error = EINVAL;
 1278                         goto cleanup;
 1279                 }
 1280 #else
 1281                 error = EOPNOTSUPP;
 1282                 goto cleanup;
 1283 #endif
 1284         }
 1285         if (kda->kda_compression != KERNELDUMP_COMP_NONE) {
 1286 #ifdef EKCD
 1287                 /*
 1288                  * We can't support simultaneous unpadded block cipher
 1289                  * encryption and compression because there is no guarantee the
 1290                  * length of the compressed result is exactly a multiple of the
 1291                  * cipher block size.
 1292                  */
 1293                 if (kda->kda_encryption == KERNELDUMP_ENC_AES_256_CBC) {
 1294                         error = EOPNOTSUPP;
 1295                         goto cleanup;
 1296                 }
 1297 #endif
 1298                 newdi->kdcomp = kerneldumpcomp_create(newdi,
 1299                     kda->kda_compression);
 1300                 if (newdi->kdcomp == NULL) {
 1301                         error = EINVAL;
 1302                         goto cleanup;
 1303                 }
 1304         }
 1305         newdi->blockbuf = malloc(newdi->blocksize, M_DUMPER, M_WAITOK | M_ZERO);
 1306 
 1307         *dip = newdi;
 1308         return (0);
 1309 cleanup:
 1310         dumper_destroy(newdi);
 1311         return (error);
 1312 }
 1313 
 1314 /*
 1315  * Create a new dumper and register it in the global list.
 1316  */
 1317 int
 1318 dumper_insert(const struct dumperinfo *di_template, const char *devname,
 1319     const struct diocskerneldump_arg *kda)
 1320 {
 1321         struct dumperinfo *newdi, *listdi;
 1322         bool inserted;
 1323         uint8_t index;
 1324         int error;
 1325 
 1326         index = kda->kda_index;
 1327         MPASS(index != KDA_REMOVE && index != KDA_REMOVE_DEV &&
 1328             index != KDA_REMOVE_ALL);
 1329 
 1330         error = priv_check(curthread, PRIV_SETDUMPER);
 1331         if (error != 0)
 1332                 return (error);
 1333 
 1334         error = dumper_create(di_template, devname, kda, &newdi);
 1335         if (error != 0)
 1336                 return (error);
 1337 
 1338         /* Add the new configuration to the queue */
 1339         mtx_lock(&dumpconf_list_lk);
 1340         inserted = false;
 1341         TAILQ_FOREACH(listdi, &dumper_configs, di_next) {
 1342                 if (index == 0) {
 1343                         TAILQ_INSERT_BEFORE(listdi, newdi, di_next);
 1344                         inserted = true;
 1345                         break;
 1346                 }
 1347                 index--;
 1348         }
 1349         if (!inserted)
 1350                 TAILQ_INSERT_TAIL(&dumper_configs, newdi, di_next);
 1351         mtx_unlock(&dumpconf_list_lk);
 1352 
 1353         return (0);
 1354 }
 1355 
 1356 #ifdef DDB
 1357 void
 1358 dumper_ddb_insert(struct dumperinfo *newdi)
 1359 {
 1360         TAILQ_INSERT_HEAD(&dumper_configs, newdi, di_next);
 1361 }
 1362 
 1363 void
 1364 dumper_ddb_remove(struct dumperinfo *di)
 1365 {
 1366         TAILQ_REMOVE(&dumper_configs, di, di_next);
 1367 }
 1368 #endif
 1369 
 1370 static bool
 1371 dumper_config_match(const struct dumperinfo *di, const char *devname,
 1372     const struct diocskerneldump_arg *kda)
 1373 {
 1374         if (kda->kda_index == KDA_REMOVE_ALL)
 1375                 return (true);
 1376 
 1377         if (strcmp(di->di_devname, devname) != 0)
 1378                 return (false);
 1379 
 1380         /*
 1381          * Allow wildcard removal of configs matching a device on g_dev_orphan.
 1382          */
 1383         if (kda->kda_index == KDA_REMOVE_DEV)
 1384                 return (true);
 1385 
 1386         if (di->kdcomp != NULL) {
 1387                 if (di->kdcomp->kdc_format != kda->kda_compression)
 1388                         return (false);
 1389         } else if (kda->kda_compression != KERNELDUMP_COMP_NONE)
 1390                 return (false);
 1391 #ifdef EKCD
 1392         if (di->kdcrypto != NULL) {
 1393                 if (di->kdcrypto->kdc_encryption != kda->kda_encryption)
 1394                         return (false);
 1395                 /*
 1396                  * Do we care to verify keys match to delete?  It seems weird
 1397                  * to expect multiple fallback dump configurations on the same
 1398                  * device that only differ in crypto key.
 1399                  */
 1400         } else
 1401 #endif
 1402                 if (kda->kda_encryption != KERNELDUMP_ENC_NONE)
 1403                         return (false);
 1404 
 1405         return (true);
 1406 }
 1407 
 1408 /*
 1409  * Remove and free the requested dumper(s) from the global list.
 1410  */
 1411 int
 1412 dumper_remove(const char *devname, const struct diocskerneldump_arg *kda)
 1413 {
 1414         struct dumperinfo *di, *sdi;
 1415         bool found;
 1416         int error;
 1417 
 1418         error = priv_check(curthread, PRIV_SETDUMPER);
 1419         if (error != 0)
 1420                 return (error);
 1421 
 1422         /*
 1423          * Try to find a matching configuration, and kill it.
 1424          *
 1425          * NULL 'kda' indicates remove any configuration matching 'devname',
 1426          * which may remove multiple configurations in atypical configurations.
 1427          */
 1428         found = false;
 1429         mtx_lock(&dumpconf_list_lk);
 1430         TAILQ_FOREACH_SAFE(di, &dumper_configs, di_next, sdi) {
 1431                 if (dumper_config_match(di, devname, kda)) {
 1432                         found = true;
 1433                         TAILQ_REMOVE(&dumper_configs, di, di_next);
 1434                         dumper_destroy(di);
 1435                 }
 1436         }
 1437         mtx_unlock(&dumpconf_list_lk);
 1438 
 1439         /* Only produce ENOENT if a more targeted match didn't match. */
 1440         if (!found && kda->kda_index == KDA_REMOVE)
 1441                 return (ENOENT);
 1442         return (0);
 1443 }
 1444 
 1445 static int
 1446 dump_check_bounds(struct dumperinfo *di, off_t offset, size_t length)
 1447 {
 1448 
 1449         if (di->mediasize > 0 && length != 0 && (offset < di->mediaoffset ||
 1450             offset - di->mediaoffset + length > di->mediasize)) {
 1451                 if (di->kdcomp != NULL && offset >= di->mediaoffset) {
 1452                         printf(
 1453                     "Compressed dump failed to fit in device boundaries.\n");
 1454                         return (E2BIG);
 1455                 }
 1456 
 1457                 printf("Attempt to write outside dump device boundaries.\n"
 1458             "offset(%jd), mediaoffset(%jd), length(%ju), mediasize(%jd).\n",
 1459                     (intmax_t)offset, (intmax_t)di->mediaoffset,
 1460                     (uintmax_t)length, (intmax_t)di->mediasize);
 1461                 return (ENOSPC);
 1462         }
 1463         if (length % di->blocksize != 0) {
 1464                 printf("Attempt to write partial block of length %ju.\n",
 1465                     (uintmax_t)length);
 1466                 return (EINVAL);
 1467         }
 1468         if (offset % di->blocksize != 0) {
 1469                 printf("Attempt to write at unaligned offset %jd.\n",
 1470                     (intmax_t)offset);
 1471                 return (EINVAL);
 1472         }
 1473 
 1474         return (0);
 1475 }
 1476 
 1477 #ifdef EKCD
 1478 static int
 1479 dump_encrypt(struct kerneldumpcrypto *kdc, uint8_t *buf, size_t size)
 1480 {
 1481 
 1482         switch (kdc->kdc_encryption) {
 1483         case KERNELDUMP_ENC_AES_256_CBC:
 1484                 if (rijndael_blockEncrypt(&kdc->kdc_ci, &kdc->kdc_ki, buf,
 1485                     8 * size, buf) <= 0) {
 1486                         return (EIO);
 1487                 }
 1488                 if (rijndael_cipherInit(&kdc->kdc_ci, MODE_CBC,
 1489                     buf + size - 16 /* IV size for AES-256-CBC */) <= 0) {
 1490                         return (EIO);
 1491                 }
 1492                 break;
 1493         case KERNELDUMP_ENC_CHACHA20:
 1494                 chacha_encrypt_bytes(&kdc->kdc_chacha, buf, buf, size);
 1495                 break;
 1496         default:
 1497                 return (EINVAL);
 1498         }
 1499 
 1500         return (0);
 1501 }
 1502 
 1503 /* Encrypt data and call dumper. */
 1504 static int
 1505 dump_encrypted_write(struct dumperinfo *di, void *virtual, off_t offset,
 1506     size_t length)
 1507 {
 1508         static uint8_t buf[KERNELDUMP_BUFFER_SIZE];
 1509         struct kerneldumpcrypto *kdc;
 1510         int error;
 1511         size_t nbytes;
 1512 
 1513         kdc = di->kdcrypto;
 1514 
 1515         while (length > 0) {
 1516                 nbytes = MIN(length, sizeof(buf));
 1517                 bcopy(virtual, buf, nbytes);
 1518 
 1519                 if (dump_encrypt(kdc, buf, nbytes) != 0)
 1520                         return (EIO);
 1521 
 1522                 error = dump_write(di, buf, offset, nbytes);
 1523                 if (error != 0)
 1524                         return (error);
 1525 
 1526                 offset += nbytes;
 1527                 virtual = (void *)((uint8_t *)virtual + nbytes);
 1528                 length -= nbytes;
 1529         }
 1530 
 1531         return (0);
 1532 }
 1533 #endif /* EKCD */
 1534 
 1535 static int
 1536 kerneldumpcomp_write_cb(void *base, size_t length, off_t offset, void *arg)
 1537 {
 1538         struct dumperinfo *di;
 1539         size_t resid, rlength;
 1540         int error;
 1541 
 1542         di = arg;
 1543 
 1544         if (length % di->blocksize != 0) {
 1545                 /*
 1546                  * This must be the final write after flushing the compression
 1547                  * stream. Write as many full blocks as possible and stash the
 1548                  * residual data in the dumper's block buffer. It will be
 1549                  * padded and written in dump_finish().
 1550                  */
 1551                 rlength = rounddown(length, di->blocksize);
 1552                 if (rlength != 0) {
 1553                         error = _dump_append(di, base, rlength);
 1554                         if (error != 0)
 1555                                 return (error);
 1556                 }
 1557                 resid = length - rlength;
 1558                 memmove(di->blockbuf, (uint8_t *)base + rlength, resid);
 1559                 bzero((uint8_t *)di->blockbuf + resid, di->blocksize - resid);
 1560                 di->kdcomp->kdc_resid = resid;
 1561                 return (EAGAIN);
 1562         }
 1563         return (_dump_append(di, base, length));
 1564 }
 1565 
 1566 /*
 1567  * Write kernel dump headers at the beginning and end of the dump extent.
 1568  * Write the kernel dump encryption key after the leading header if we were
 1569  * configured to do so.
 1570  */
 1571 static int
 1572 dump_write_headers(struct dumperinfo *di, struct kerneldumpheader *kdh)
 1573 {
 1574 #ifdef EKCD
 1575         struct kerneldumpcrypto *kdc;
 1576 #endif
 1577         void *buf;
 1578         size_t hdrsz;
 1579         uint64_t extent;
 1580         uint32_t keysize;
 1581         int error;
 1582 
 1583         hdrsz = sizeof(*kdh);
 1584         if (hdrsz > di->blocksize)
 1585                 return (ENOMEM);
 1586 
 1587 #ifdef EKCD
 1588         kdc = di->kdcrypto;
 1589         keysize = kerneldumpcrypto_dumpkeysize(kdc);
 1590 #else
 1591         keysize = 0;
 1592 #endif
 1593 
 1594         /*
 1595          * If the dump device has special handling for headers, let it take care
 1596          * of writing them out.
 1597          */
 1598         if (di->dumper_hdr != NULL)
 1599                 return (di->dumper_hdr(di, kdh));
 1600 
 1601         if (hdrsz == di->blocksize)
 1602                 buf = kdh;
 1603         else {
 1604                 buf = di->blockbuf;
 1605                 memset(buf, 0, di->blocksize);
 1606                 memcpy(buf, kdh, hdrsz);
 1607         }
 1608 
 1609         extent = dtoh64(kdh->dumpextent);
 1610 #ifdef EKCD
 1611         if (kdc != NULL) {
 1612                 error = dump_write(di, kdc->kdc_dumpkey,
 1613                     di->mediaoffset + di->mediasize - di->blocksize - extent -
 1614                     keysize, keysize);
 1615                 if (error != 0)
 1616                         return (error);
 1617         }
 1618 #endif
 1619 
 1620         error = dump_write(di, buf,
 1621             di->mediaoffset + di->mediasize - 2 * di->blocksize - extent -
 1622             keysize, di->blocksize);
 1623         if (error == 0)
 1624                 error = dump_write(di, buf, di->mediaoffset + di->mediasize -
 1625                     di->blocksize, di->blocksize);
 1626         return (error);
 1627 }
 1628 
 1629 /*
 1630  * Don't touch the first SIZEOF_METADATA bytes on the dump device.  This is to
 1631  * protect us from metadata and metadata from us.
 1632  */
 1633 #define SIZEOF_METADATA         (64 * 1024)
 1634 
 1635 /*
 1636  * Do some preliminary setup for a kernel dump: initialize state for encryption,
 1637  * if requested, and make sure that we have enough space on the dump device.
 1638  *
 1639  * We set things up so that the dump ends before the last sector of the dump
 1640  * device, at which the trailing header is written.
 1641  *
 1642  *     +-----------+------+-----+----------------------------+------+
 1643  *     |           | lhdr | key |    ... kernel dump ...     | thdr |
 1644  *     +-----------+------+-----+----------------------------+------+
 1645  *                   1 blk  opt <------- dump extent --------> 1 blk
 1646  *
 1647  * Dumps written using dump_append() start at the beginning of the extent.
 1648  * Uncompressed dumps will use the entire extent, but compressed dumps typically
 1649  * will not. The true length of the dump is recorded in the leading and trailing
 1650  * headers once the dump has been completed.
 1651  *
 1652  * The dump device may provide a callback, in which case it will initialize
 1653  * dumpoff and take care of laying out the headers.
 1654  */
 1655 int
 1656 dump_start(struct dumperinfo *di, struct kerneldumpheader *kdh)
 1657 {
 1658 #ifdef EKCD
 1659         struct kerneldumpcrypto *kdc;
 1660 #endif
 1661         void *key;
 1662         uint64_t dumpextent, span;
 1663         uint32_t keysize;
 1664         int error;
 1665 
 1666 #ifdef EKCD
 1667         /* Send the key before the dump so a partial dump is still usable. */
 1668         kdc = di->kdcrypto;
 1669         error = kerneldumpcrypto_init(kdc);
 1670         if (error != 0)
 1671                 return (error);
 1672         keysize = kerneldumpcrypto_dumpkeysize(kdc);
 1673         key = keysize > 0 ? kdc->kdc_dumpkey : NULL;
 1674 #else
 1675         error = 0;
 1676         keysize = 0;
 1677         key = NULL;
 1678 #endif
 1679 
 1680         if (di->dumper_start != NULL) {
 1681                 error = di->dumper_start(di, key, keysize);
 1682         } else {
 1683                 dumpextent = dtoh64(kdh->dumpextent);
 1684                 span = SIZEOF_METADATA + dumpextent + 2 * di->blocksize +
 1685                     keysize;
 1686                 if (di->mediasize < span) {
 1687                         if (di->kdcomp == NULL)
 1688                                 return (E2BIG);
 1689 
 1690                         /*
 1691                          * We don't yet know how much space the compressed dump
 1692                          * will occupy, so try to use the whole swap partition
 1693                          * (minus the first 64KB) in the hope that the
 1694                          * compressed dump will fit. If that doesn't turn out to
 1695                          * be enough, the bounds checking in dump_write()
 1696                          * will catch us and cause the dump to fail.
 1697                          */
 1698                         dumpextent = di->mediasize - span + dumpextent;
 1699                         kdh->dumpextent = htod64(dumpextent);
 1700                 }
 1701 
 1702                 /*
 1703                  * The offset at which to begin writing the dump.
 1704                  */
 1705                 di->dumpoff = di->mediaoffset + di->mediasize - di->blocksize -
 1706                     dumpextent;
 1707         }
 1708         di->origdumpoff = di->dumpoff;
 1709         return (error);
 1710 }
 1711 
 1712 static int
 1713 _dump_append(struct dumperinfo *di, void *virtual, size_t length)
 1714 {
 1715         int error;
 1716 
 1717 #ifdef EKCD
 1718         if (di->kdcrypto != NULL)
 1719                 error = dump_encrypted_write(di, virtual, di->dumpoff, length);
 1720         else
 1721 #endif
 1722                 error = dump_write(di, virtual, di->dumpoff, length);
 1723         if (error == 0)
 1724                 di->dumpoff += length;
 1725         return (error);
 1726 }
 1727 
 1728 /*
 1729  * Write to the dump device starting at dumpoff. When compression is enabled,
 1730  * writes to the device will be performed using a callback that gets invoked
 1731  * when the compression stream's output buffer is full.
 1732  */
 1733 int
 1734 dump_append(struct dumperinfo *di, void *virtual, size_t length)
 1735 {
 1736         void *buf;
 1737 
 1738         if (di->kdcomp != NULL) {
 1739                 /* Bounce through a buffer to avoid CRC errors. */
 1740                 if (length > di->maxiosize)
 1741                         return (EINVAL);
 1742                 buf = di->kdcomp->kdc_buf;
 1743                 memmove(buf, virtual, length);
 1744                 return (compressor_write(di->kdcomp->kdc_stream, buf, length));
 1745         }
 1746         return (_dump_append(di, virtual, length));
 1747 }
 1748 
 1749 /*
 1750  * Write to the dump device at the specified offset.
 1751  */
 1752 int
 1753 dump_write(struct dumperinfo *di, void *virtual, off_t offset, size_t length)
 1754 {
 1755         int error;
 1756 
 1757         error = dump_check_bounds(di, offset, length);
 1758         if (error != 0)
 1759                 return (error);
 1760         return (di->dumper(di->priv, virtual, offset, length));
 1761 }
 1762 
 1763 /*
 1764  * Perform kernel dump finalization: flush the compression stream, if necessary,
 1765  * write the leading and trailing kernel dump headers now that we know the true
 1766  * length of the dump, and optionally write the encryption key following the
 1767  * leading header.
 1768  */
 1769 int
 1770 dump_finish(struct dumperinfo *di, struct kerneldumpheader *kdh)
 1771 {
 1772         int error;
 1773 
 1774         if (di->kdcomp != NULL) {
 1775                 error = compressor_flush(di->kdcomp->kdc_stream);
 1776                 if (error == EAGAIN) {
 1777                         /* We have residual data in di->blockbuf. */
 1778                         error = _dump_append(di, di->blockbuf, di->blocksize);
 1779                         if (error == 0)
 1780                                 /* Compensate for _dump_append()'s adjustment. */
 1781                                 di->dumpoff -= di->blocksize - di->kdcomp->kdc_resid;
 1782                         di->kdcomp->kdc_resid = 0;
 1783                 }
 1784                 if (error != 0)
 1785                         return (error);
 1786 
 1787                 /*
 1788                  * We now know the size of the compressed dump, so update the
 1789                  * header accordingly and recompute parity.
 1790                  */
 1791                 kdh->dumplength = htod64(di->dumpoff - di->origdumpoff);
 1792                 kdh->parity = 0;
 1793                 kdh->parity = kerneldump_parity(kdh);
 1794 
 1795                 compressor_reset(di->kdcomp->kdc_stream);
 1796         }
 1797 
 1798         error = dump_write_headers(di, kdh);
 1799         if (error != 0)
 1800                 return (error);
 1801 
 1802         (void)dump_write(di, NULL, 0, 0);
 1803         return (0);
 1804 }
 1805 
 1806 void
 1807 dump_init_header(const struct dumperinfo *di, struct kerneldumpheader *kdh,
 1808     const char *magic, uint32_t archver, uint64_t dumplen)
 1809 {
 1810         size_t dstsize;
 1811 
 1812         bzero(kdh, sizeof(*kdh));
 1813         strlcpy(kdh->magic, magic, sizeof(kdh->magic));
 1814         strlcpy(kdh->architecture, MACHINE_ARCH, sizeof(kdh->architecture));
 1815         kdh->version = htod32(KERNELDUMPVERSION);
 1816         kdh->architectureversion = htod32(archver);
 1817         kdh->dumplength = htod64(dumplen);
 1818         kdh->dumpextent = kdh->dumplength;
 1819         kdh->dumptime = htod64(time_second);
 1820 #ifdef EKCD
 1821         kdh->dumpkeysize = htod32(kerneldumpcrypto_dumpkeysize(di->kdcrypto));
 1822 #else
 1823         kdh->dumpkeysize = 0;
 1824 #endif
 1825         kdh->blocksize = htod32(di->blocksize);
 1826         strlcpy(kdh->hostname, prison0.pr_hostname, sizeof(kdh->hostname));
 1827         dstsize = sizeof(kdh->versionstring);
 1828         if (strlcpy(kdh->versionstring, version, dstsize) >= dstsize)
 1829                 kdh->versionstring[dstsize - 2] = '\n';
 1830         if (panicstr != NULL)
 1831                 strlcpy(kdh->panicstring, panicstr, sizeof(kdh->panicstring));
 1832         if (di->kdcomp != NULL)
 1833                 kdh->compression = di->kdcomp->kdc_format;
 1834         kdh->parity = kerneldump_parity(kdh);
 1835 }
 1836 
 1837 #ifdef DDB
 1838 DB_SHOW_COMMAND_FLAGS(panic, db_show_panic, DB_CMD_MEMSAFE)
 1839 {
 1840 
 1841         if (panicstr == NULL)
 1842                 db_printf("panicstr not set\n");
 1843         else
 1844                 db_printf("panic: %s\n", panicstr);
 1845 }
 1846 #endif

Cache object: df0dafd0721c43e9868e4d1f64aaf0a3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.