1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2010 Oleksandr Tymoshenko <gonzo@freebsd.org>
5 * Copyright (c) 2008 Semihalf, Grzegorz Bernacki
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * from: FreeBSD: src/sys/arm/arm/minidump_machdep.c v214223
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/conf.h>
38 #include <sys/cons.h>
39 #include <sys/kernel.h>
40 #include <sys/kerneldump.h>
41 #include <sys/msgbuf.h>
42 #include <sys/watchdog.h>
43 #include <sys/vmmeter.h>
44 #include <vm/vm.h>
45 #include <vm/vm_param.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_phys.h>
49 #include <vm/vm_dumpset.h>
50 #include <machine/atomic.h>
51 #include <machine/elf.h>
52 #include <machine/md_var.h>
53 #include <machine/minidump.h>
54 #include <machine/cache.h>
55
56 CTASSERT(sizeof(struct kerneldumpheader) == 512);
57
58 static struct kerneldumpheader kdh;
59
60 /* Handle chunked writes. */
61 static uint64_t counter, progress, dumpsize;
62 /* Just auxiliary bufffer */
63 static char tmpbuffer[PAGE_SIZE] __aligned(sizeof(uint64_t));
64
65 extern pd_entry_t *kernel_segmap;
66
67 static int
68 is_dumpable(vm_paddr_t pa)
69 {
70 vm_page_t m;
71 int i;
72
73 if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
74 return ((m->flags & PG_NODUMP) == 0);
75 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
76 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
77 return (1);
78 }
79 return (0);
80 }
81
82 static struct {
83 int min_per;
84 int max_per;
85 int visited;
86 } progress_track[10] = {
87 { 0, 10, 0},
88 { 10, 20, 0},
89 { 20, 30, 0},
90 { 30, 40, 0},
91 { 40, 50, 0},
92 { 50, 60, 0},
93 { 60, 70, 0},
94 { 70, 80, 0},
95 { 80, 90, 0},
96 { 90, 100, 0}
97 };
98
99 static void
100 report_progress(uint64_t progress, uint64_t dumpsize)
101 {
102 int sofar, i;
103
104 sofar = 100 - ((progress * 100) / dumpsize);
105 for (i = 0; i < nitems(progress_track); i++) {
106 if (sofar < progress_track[i].min_per ||
107 sofar > progress_track[i].max_per)
108 continue;
109 if (progress_track[i].visited)
110 return;
111 progress_track[i].visited = 1;
112 printf("..%d%%", sofar);
113 return;
114 }
115 }
116
117 static int
118 write_buffer(struct dumperinfo *di, char *ptr, size_t sz)
119 {
120 size_t len;
121 int error, c;
122 u_int maxdumpsz;
123
124 maxdumpsz = di->maxiosize;
125
126 if (maxdumpsz == 0) /* seatbelt */
127 maxdumpsz = PAGE_SIZE;
128
129 error = 0;
130
131 while (sz) {
132 len = min(maxdumpsz, sz);
133 counter += len;
134 progress -= len;
135
136 if (counter >> 22) {
137 report_progress(progress, dumpsize);
138 counter &= (1<<22) - 1;
139 }
140
141 wdog_kern_pat(WD_LASTVAL);
142
143 if (ptr) {
144 error = dump_append(di, ptr, 0, len);
145 if (error)
146 return (error);
147 ptr += len;
148 sz -= len;
149 } else {
150 panic("pa is not supported");
151 }
152
153 /* Check for user abort. */
154 c = cncheckc();
155 if (c == 0x03)
156 return (ECANCELED);
157 if (c != -1)
158 printf(" (CTRL-C to abort) ");
159 }
160
161 return (0);
162 }
163
164 int
165 minidumpsys(struct dumperinfo *di)
166 {
167 struct minidumphdr mdhdr;
168 uint64_t *dump_avail_buf;
169 uint32_t ptesize;
170 vm_paddr_t pa;
171 vm_offset_t prev_pte = 0;
172 uint32_t count = 0;
173 vm_offset_t va;
174 pt_entry_t *pte;
175 int i, error;
176 void *dump_va;
177
178 /* Flush cache */
179 mips_dcache_wbinv_all();
180
181 counter = 0;
182 /* Walk page table pages, set bits in vm_page_dump */
183 ptesize = 0;
184
185 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += NBPDR) {
186 ptesize += PAGE_SIZE;
187 pte = pmap_pte(kernel_pmap, va);
188 KASSERT(pte != NULL, ("pte for %jx is NULL", (uintmax_t)va));
189 for (i = 0; i < NPTEPG; i++) {
190 if (pte_test(&pte[i], PTE_V)) {
191 pa = TLBLO_PTE_TO_PA(pte[i]);
192 if (is_dumpable(pa))
193 dump_add_page(pa);
194 }
195 }
196 }
197
198 /*
199 * Now mark pages from 0 to phys_avail[0], that's where kernel
200 * and pages allocated by pmap_steal reside
201 */
202 for (pa = 0; pa < phys_avail[0]; pa += PAGE_SIZE) {
203 if (is_dumpable(pa))
204 dump_add_page(pa);
205 }
206
207 /* Calculate dump size. */
208 dumpsize = ptesize;
209 dumpsize += round_page(msgbufp->msg_size);
210 dumpsize += round_page(nitems(dump_avail) * sizeof(uint64_t));
211 dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
212 VM_PAGE_DUMP_FOREACH(pa) {
213 /* Clear out undumpable pages now if needed */
214 if (is_dumpable(pa))
215 dumpsize += PAGE_SIZE;
216 else
217 dump_drop_page(pa);
218 }
219 dumpsize += PAGE_SIZE;
220
221 progress = dumpsize;
222
223 /* Initialize mdhdr */
224 bzero(&mdhdr, sizeof(mdhdr));
225 strcpy(mdhdr.magic, MINIDUMP_MAGIC);
226 mdhdr.version = MINIDUMP_VERSION;
227 mdhdr.msgbufsize = msgbufp->msg_size;
228 mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages));
229 mdhdr.ptesize = ptesize;
230 mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
231 mdhdr.dumpavailsize = round_page(nitems(dump_avail) * sizeof(uint64_t));
232
233 dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_MIPS_VERSION,
234 dumpsize);
235
236 error = dump_start(di, &kdh);
237 if (error != 0)
238 goto fail;
239
240 printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
241 ptoa((uintmax_t)physmem) / 1048576);
242
243 /* Dump my header */
244 bzero(tmpbuffer, sizeof(tmpbuffer));
245 bcopy(&mdhdr, tmpbuffer, sizeof(mdhdr));
246 error = write_buffer(di, tmpbuffer, PAGE_SIZE);
247 if (error)
248 goto fail;
249
250 /* Dump msgbuf up front */
251 error = write_buffer(di, (char *)msgbufp->msg_ptr,
252 round_page(msgbufp->msg_size));
253 if (error)
254 goto fail;
255
256 /* Dump dump_avail. Make a copy using 64-bit physical addresses. */
257 _Static_assert(nitems(dump_avail) * sizeof(uint64_t) <=
258 sizeof(tmpbuffer), "Large dump_avail not handled");
259 bzero(tmpbuffer, sizeof(tmpbuffer));
260 if (sizeof(dump_avail[0]) != sizeof(uint64_t)) {
261 dump_avail_buf = (uint64_t *)tmpbuffer;
262 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i++) {
263 dump_avail_buf[i] = dump_avail[i];
264 dump_avail_buf[i + 1] = dump_avail[i + 1];
265 }
266 } else {
267 memcpy(tmpbuffer, dump_avail, sizeof(dump_avail));
268 }
269 error = write_buffer(di, tmpbuffer, PAGE_SIZE);
270 if (error)
271 goto fail;
272
273 /* Dump bitmap */
274 error = write_buffer(di, (char *)vm_page_dump,
275 round_page(BITSET_SIZE(vm_page_dump_pages)));
276 if (error)
277 goto fail;
278
279 /* Dump kernel page table pages */
280 for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += NBPDR) {
281 pte = pmap_pte(kernel_pmap, va);
282 KASSERT(pte != NULL, ("pte for %jx is NULL", (uintmax_t)va));
283 if (!count) {
284 prev_pte = (vm_offset_t)pte;
285 count++;
286 } else {
287 if ((vm_offset_t)pte == (prev_pte + count * PAGE_SIZE))
288 count++;
289 else {
290 error = write_buffer(di, (char*)prev_pte,
291 count * PAGE_SIZE);
292 if (error)
293 goto fail;
294 count = 1;
295 prev_pte = (vm_offset_t)pte;
296 }
297 }
298 }
299
300 if (count) {
301 error = write_buffer(di, (char*)prev_pte, count * PAGE_SIZE);
302 if (error)
303 goto fail;
304 count = 0;
305 prev_pte = 0;
306 }
307
308 /* Dump memory chunks page by page*/
309 VM_PAGE_DUMP_FOREACH(pa) {
310 dump_va = pmap_kenter_temporary(pa, 0);
311 error = write_buffer(di, dump_va, PAGE_SIZE);
312 if (error)
313 goto fail;
314 pmap_kenter_temporary_free(pa);
315 }
316
317 error = dump_finish(di, &kdh);
318 if (error != 0)
319 goto fail;
320
321 printf("\nDump complete\n");
322 return (0);
323
324 fail:
325 if (error < 0)
326 error = -error;
327
328 if (error == ECANCELED)
329 printf("\nDump aborted\n");
330 else if (error == E2BIG || error == ENOSPC) {
331 printf("\nDump failed. Partition too small (about %lluMB were "
332 "needed this time).\n", (long long)dumpsize >> 20);
333 } else
334 printf("\n** DUMP FAILED (ERROR %d) **\n", error);
335 return (error);
336 }
Cache object: 937e56f3324769fa4230ac95dee9d684
|