The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/riscv/riscv/minidump_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 Peter Wemm
    3  * Copyright (c) 2015 The FreeBSD Foundation
    4  * All rights reserved.
    5  * Copyright (c) 2019 Mitchell Horne
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include "opt_watchdog.h"
   33 
   34 #include <sys/param.h>
   35 #include <sys/systm.h>
   36 #include <sys/conf.h>
   37 #include <sys/cons.h>
   38 #include <sys/kernel.h>
   39 #include <sys/kerneldump.h>
   40 #include <sys/msgbuf.h>
   41 #include <sys/watchdog.h>
   42 #include <sys/vmmeter.h>
   43 
   44 #include <vm/vm.h>
   45 #include <vm/vm_param.h>
   46 #include <vm/vm_page.h>
   47 #include <vm/vm_phys.h>
   48 #include <vm/vm_dumpset.h>
   49 #include <vm/pmap.h>
   50 
   51 #include <machine/atomic.h>
   52 #include <machine/elf.h>
   53 #include <machine/md_var.h>
   54 #include <machine/minidump.h>
   55 
   56 CTASSERT(sizeof(struct kerneldumpheader) == 512);
   57 
   58 static struct kerneldumpheader kdh;
   59 
   60 /* Handle chunked writes. */
   61 static size_t fragsz;
   62 static void *dump_va;
   63 static size_t dumpsize;
   64 
   65 static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)];
   66 
   67 static int
   68 blk_flush(struct dumperinfo *di)
   69 {
   70         int error;
   71 
   72         if (fragsz == 0)
   73                 return (0);
   74 
   75         error = dump_append(di, dump_va, fragsz);
   76         fragsz = 0;
   77         return (error);
   78 }
   79 
   80 /*
   81  * Write a block of data to the dump file.
   82  *
   83  * Caller can provide data through a pointer or by specifying its
   84  * physical address.
   85  *
   86  * XXX writes using pa should be no larger than PAGE_SIZE.
   87  */
   88 static int
   89 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
   90 {
   91         size_t len;
   92         int error, c;
   93         u_int maxdumpsz;
   94 
   95         maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
   96         if (maxdumpsz == 0)     /* seatbelt */
   97                 maxdumpsz = PAGE_SIZE;
   98         error = 0;
   99         if ((sz % PAGE_SIZE) != 0) {
  100                 printf("size not page aligned\n");
  101                 return (EINVAL);
  102         }
  103         if (ptr != NULL && pa != 0) {
  104                 printf("cant have both va and pa!\n");
  105                 return (EINVAL);
  106         }
  107         if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
  108                 printf("address not page aligned %#lx\n", (uintptr_t)pa);
  109                 return (EINVAL);
  110         }
  111         if (ptr != NULL) {
  112                 /*
  113                  * If we're doing a virtual dump, flush any
  114                  * pre-existing pa pages.
  115                  */
  116                 error = blk_flush(di);
  117                 if (error != 0)
  118                         return (error);
  119         }
  120         while (sz) {
  121                 len = maxdumpsz - fragsz;
  122                 if (len > sz)
  123                         len = sz;
  124 
  125                 dumpsys_pb_progress(len);
  126                 wdog_kern_pat(WD_LASTVAL);
  127 
  128                 if (ptr) {
  129                         error = dump_append(di, ptr, len);
  130                         if (error != 0)
  131                                 return (error);
  132                         ptr += len;
  133                         sz -= len;
  134                 } else {
  135                         dump_va = (void *)PHYS_TO_DMAP(pa);
  136                         fragsz += len;
  137                         pa += len;
  138                         sz -= len;
  139                         error = blk_flush(di);
  140                         if (error != 0)
  141                                 return (error);
  142                 }
  143 
  144                 /* Check for user abort */
  145                 c = cncheckc();
  146                 if (c == 0x03)
  147                         return (ECANCELED);
  148                 if (c != -1)
  149                         printf(" (CTRL-C to abort) ");
  150         }
  151 
  152         return (0);
  153 }
  154 
  155 int
  156 cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
  157 {
  158         pd_entry_t *l1, *l2, l2e;
  159         pt_entry_t *l3, l3e;
  160         struct minidumphdr mdhdr;
  161         struct msgbuf *mbp;
  162         uint32_t pmapsize;
  163         vm_offset_t va, kva_max;
  164         vm_paddr_t pa;
  165         int error;
  166         int i;
  167         int retry_count;
  168 
  169         retry_count = 0;
  170 retry:
  171         retry_count++;
  172         error = 0;
  173         pmapsize = 0;
  174 
  175         /* Snapshot the KVA upper bound in case it grows. */
  176         kva_max = kernel_vm_end;
  177 
  178         /*
  179          * Walk the kernel page table pages, setting the active entries in the
  180          * dump bitmap.
  181          *
  182          * NB: for a live dump, we may be racing with updates to the page
  183          * tables, so care must be taken to read each entry only once.
  184          */
  185         for (va = VM_MIN_KERNEL_ADDRESS; va < kva_max; va += L2_SIZE) {
  186                 pmapsize += PAGE_SIZE;
  187                 if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3))
  188                         continue;
  189 
  190                 /* We should always be using the l2 table for kvm */
  191                 if (l2 == NULL)
  192                         continue;
  193 
  194                 /* l2 may be a superpage */
  195                 l2e = atomic_load_64(l2);
  196                 if ((l2e & PTE_RWX) != 0) {
  197                         pa = (l2e >> PTE_PPN1_S) << L2_SHIFT;
  198                         for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
  199                                 if (vm_phys_is_dumpable(pa))
  200                                         vm_page_dump_add(state->dump_bitset,
  201                                             pa);
  202                         }
  203                 } else {
  204                         for (i = 0; i < Ln_ENTRIES; i++) {
  205                                 l3e = atomic_load_64(&l3[i]);
  206                                 if ((l3e & PTE_V) == 0)
  207                                         continue;
  208                                 pa = (l3e >> PTE_PPN0_S) * PAGE_SIZE;
  209                                 if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa))
  210                                         vm_page_dump_add(state->dump_bitset,
  211                                             pa);
  212                         }
  213                 }
  214         }
  215 
  216         /* Calculate dump size */
  217         mbp = state->msgbufp;
  218         dumpsize = pmapsize;
  219         dumpsize += round_page(mbp->msg_size);
  220         dumpsize += round_page(sizeof(dump_avail));
  221         dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
  222         VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
  223                 /* Clear out undumpable pages now if needed */
  224                 if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa))
  225                         dumpsize += PAGE_SIZE;
  226                 else
  227                         vm_page_dump_drop(state->dump_bitset, pa);
  228         }
  229         dumpsize += PAGE_SIZE;
  230 
  231         dumpsys_pb_init(dumpsize);
  232 
  233         /* Initialize mdhdr */
  234         bzero(&mdhdr, sizeof(mdhdr));
  235         strcpy(mdhdr.magic, MINIDUMP_MAGIC);
  236         mdhdr.version = MINIDUMP_VERSION;
  237         mdhdr.msgbufsize = mbp->msg_size;
  238         mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages));
  239         mdhdr.pmapsize = pmapsize;
  240         mdhdr.kernbase = KERNBASE;
  241         mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
  242         mdhdr.dmapbase = DMAP_MIN_ADDRESS;
  243         mdhdr.dmapend = DMAP_MAX_ADDRESS;
  244         mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
  245 
  246         dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_RISCV_VERSION,
  247             dumpsize);
  248 
  249         error = dump_start(di, &kdh);
  250         if (error != 0)
  251                 goto fail;
  252 
  253         printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
  254             ptoa((uintmax_t)physmem) / 1048576);
  255 
  256         /* Dump minidump header */
  257         bzero(&tmpbuffer, sizeof(tmpbuffer));
  258         bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
  259         error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
  260         if (error)
  261                 goto fail;
  262 
  263         /* Dump msgbuf up front */
  264         error = blk_write(di, mbp->msg_ptr, 0, round_page(mbp->msg_size));
  265         if (error)
  266                 goto fail;
  267 
  268         /* Dump dump_avail */
  269         _Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer),
  270             "Large dump_avail not handled");
  271         bzero(tmpbuffer, sizeof(tmpbuffer));
  272         memcpy(tmpbuffer, dump_avail, sizeof(dump_avail));
  273         error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
  274         if (error)
  275                 goto fail;
  276 
  277         /* Dump bitmap */
  278         error = blk_write(di, (char *)vm_page_dump, 0,
  279             round_page(BITSET_SIZE(vm_page_dump_pages)));
  280         if (error)
  281                 goto fail;
  282 
  283         /* Dump kernel page directory pages */
  284         bzero(&tmpbuffer, sizeof(tmpbuffer));
  285         for (va = VM_MIN_KERNEL_ADDRESS; va < kva_max; va += L2_SIZE) {
  286                 if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) {
  287                         /* We always write a page, even if it is zero */
  288                         error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
  289                         if (error)
  290                                 goto fail;
  291                         /* Flush, in case we reuse tmpbuffer in the same block */
  292                         error = blk_flush(di);
  293                         if (error)
  294                                 goto fail;
  295                         continue;
  296                 }
  297 
  298                 l2e = atomic_load_64(l2);
  299                 if ((l2e & PTE_RWX) != 0) {
  300                         /* Generate fake l3 entries based on the l2 superpage */
  301                         for (i = 0; i < Ln_ENTRIES; i++) {
  302                                 tmpbuffer[i] = (l2e | (i << PTE_PPN0_S));
  303                         }
  304                         /* We always write a page, even if it is zero */
  305                         error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
  306                         if (error)
  307                                 goto fail;
  308                         /* Flush, in case we reuse tmpbuffer in the same block */
  309                         error = blk_flush(di);
  310                         if (error)
  311                                 goto fail;
  312                         bzero(&tmpbuffer, sizeof(tmpbuffer));
  313                 } else {
  314                         pa = (l2e >> PTE_PPN0_S) * PAGE_SIZE;
  315 
  316                         /*
  317                          * We always write a page, even if it is zero. If pa
  318                          * is malformed, write the zeroed tmpbuffer.
  319                          */
  320                         if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa))
  321                                 error = blk_write(di, NULL, pa, PAGE_SIZE);
  322                         else
  323                                 error = blk_write(di, (char *)&tmpbuffer, 0,
  324                                     PAGE_SIZE);
  325                         if (error)
  326                                 goto fail;
  327                 }
  328         }
  329 
  330         /* Dump memory chunks */
  331         /* XXX cluster it up and use blk_dump() */
  332         VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
  333                 error = blk_write(di, 0, pa, PAGE_SIZE);
  334                 if (error)
  335                         goto fail;
  336         }
  337 
  338         error = blk_flush(di);
  339         if (error)
  340                 goto fail;
  341 
  342         error = dump_finish(di, &kdh);
  343         if (error != 0)
  344                 goto fail;
  345 
  346         printf("\nDump complete\n");
  347         return (0);
  348 
  349 fail:
  350         if (error < 0)
  351                 error = -error;
  352 
  353         printf("\n");
  354         if (error == ENOSPC) {
  355                 printf("Dump map grown while dumping. ");
  356                 if (retry_count < 5) {
  357                         printf("Retrying...\n");
  358                         goto retry;
  359                 }
  360                 printf("Dump failed.\n");
  361         }
  362         else if (error == ECANCELED)
  363                 printf("Dump aborted\n");
  364         else if (error == E2BIG) {
  365                 printf("Dump failed. Partition too small (about %lluMB were "
  366                     "needed this time).\n", (long long)dumpsize >> 20);
  367         } else
  368                 printf("** DUMP FAILED (ERROR %d) **\n", error);
  369         return (error);
  370 }

Cache object: 58db31e3d1bbaddbe0fd130a16a7d313


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.