FreeBSD/Linux Kernel Cross Reference
sys/kern/imgact_elf.c
1 /*-
2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 Søren Schmidt
4 * Copyright (c) 1996 Peter Wemm
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD: releng/10.1/sys/kern/imgact_elf.c 267561 2014-06-17 05:21:48Z dchagin $");
33
34 #include "opt_capsicum.h"
35 #include "opt_compat.h"
36 #include "opt_core.h"
37
38 #include <sys/param.h>
39 #include <sys/capability.h>
40 #include <sys/exec.h>
41 #include <sys/fcntl.h>
42 #include <sys/imgact.h>
43 #include <sys/imgact_elf.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/mman.h>
49 #include <sys/namei.h>
50 #include <sys/pioctl.h>
51 #include <sys/proc.h>
52 #include <sys/procfs.h>
53 #include <sys/racct.h>
54 #include <sys/resourcevar.h>
55 #include <sys/rwlock.h>
56 #include <sys/sbuf.h>
57 #include <sys/sf_buf.h>
58 #include <sys/smp.h>
59 #include <sys/systm.h>
60 #include <sys/signalvar.h>
61 #include <sys/stat.h>
62 #include <sys/sx.h>
63 #include <sys/syscall.h>
64 #include <sys/sysctl.h>
65 #include <sys/sysent.h>
66 #include <sys/vnode.h>
67 #include <sys/syslog.h>
68 #include <sys/eventhandler.h>
69 #include <sys/user.h>
70
71 #include <net/zlib.h>
72
73 #include <vm/vm.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_param.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_extern.h>
80
81 #include <machine/elf.h>
82 #include <machine/md_var.h>
83
84 #define ELF_NOTE_ROUNDSIZE 4
85 #define OLD_EI_BRAND 8
86
87 static int __elfN(check_header)(const Elf_Ehdr *hdr);
88 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
89 const char *interp, int interp_name_len, int32_t *osrel);
90 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
91 u_long *entry, size_t pagesize);
92 static int __elfN(load_section)(struct image_params *imgp, vm_offset_t offset,
93 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
94 size_t pagesize);
95 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
96 static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note,
97 int32_t *osrel);
98 static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
99 static boolean_t __elfN(check_note)(struct image_params *imgp,
100 Elf_Brandnote *checknote, int32_t *osrel);
101 static vm_prot_t __elfN(trans_prot)(Elf_Word);
102 static Elf_Word __elfN(untrans_prot)(vm_prot_t);
103
104 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
105 "");
106
107 #ifdef COMPRESS_USER_CORES
108 static int compress_core(gzFile, char *, char *, unsigned int,
109 struct thread * td);
110 #endif
111 #define CORE_BUF_SIZE (16 * 1024)
112
113 int __elfN(fallback_brand) = -1;
114 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
115 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
116 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
117 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
118 &__elfN(fallback_brand));
119
120 static int elf_legacy_coredump = 0;
121 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
122 &elf_legacy_coredump, 0, "");
123
124 int __elfN(nxstack) =
125 #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */
126 1;
127 #else
128 0;
129 #endif
130 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
131 nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
132 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
133
134 #if __ELF_WORD_SIZE == 32
135 #if defined(__amd64__) || defined(__ia64__)
136 int i386_read_exec = 0;
137 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
138 "enable execution from readable segments");
139 #endif
140 #endif
141
142 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
143
144 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
145 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
146 #define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
147
148 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
149
150 Elf_Brandnote __elfN(freebsd_brandnote) = {
151 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
152 .hdr.n_descsz = sizeof(int32_t),
153 .hdr.n_type = 1,
154 .vendor = FREEBSD_ABI_VENDOR,
155 .flags = BN_TRANSLATE_OSREL,
156 .trans_osrel = __elfN(freebsd_trans_osrel)
157 };
158
159 static boolean_t
160 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
161 {
162 uintptr_t p;
163
164 p = (uintptr_t)(note + 1);
165 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
166 *osrel = *(const int32_t *)(p);
167
168 return (TRUE);
169 }
170
171 static const char GNU_ABI_VENDOR[] = "GNU";
172 static int GNU_KFREEBSD_ABI_DESC = 3;
173
174 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
175 .hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
176 .hdr.n_descsz = 16, /* XXX at least 16 */
177 .hdr.n_type = 1,
178 .vendor = GNU_ABI_VENDOR,
179 .flags = BN_TRANSLATE_OSREL,
180 .trans_osrel = kfreebsd_trans_osrel
181 };
182
183 static boolean_t
184 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
185 {
186 const Elf32_Word *desc;
187 uintptr_t p;
188
189 p = (uintptr_t)(note + 1);
190 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
191
192 desc = (const Elf32_Word *)p;
193 if (desc[0] != GNU_KFREEBSD_ABI_DESC)
194 return (FALSE);
195
196 /*
197 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
198 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
199 */
200 *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
201
202 return (TRUE);
203 }
204
205 int
206 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
207 {
208 int i;
209
210 for (i = 0; i < MAX_BRANDS; i++) {
211 if (elf_brand_list[i] == NULL) {
212 elf_brand_list[i] = entry;
213 break;
214 }
215 }
216 if (i == MAX_BRANDS) {
217 printf("WARNING: %s: could not insert brandinfo entry: %p\n",
218 __func__, entry);
219 return (-1);
220 }
221 return (0);
222 }
223
224 int
225 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
226 {
227 int i;
228
229 for (i = 0; i < MAX_BRANDS; i++) {
230 if (elf_brand_list[i] == entry) {
231 elf_brand_list[i] = NULL;
232 break;
233 }
234 }
235 if (i == MAX_BRANDS)
236 return (-1);
237 return (0);
238 }
239
240 int
241 __elfN(brand_inuse)(Elf_Brandinfo *entry)
242 {
243 struct proc *p;
244 int rval = FALSE;
245
246 sx_slock(&allproc_lock);
247 FOREACH_PROC_IN_SYSTEM(p) {
248 if (p->p_sysent == entry->sysvec) {
249 rval = TRUE;
250 break;
251 }
252 }
253 sx_sunlock(&allproc_lock);
254
255 return (rval);
256 }
257
258 static Elf_Brandinfo *
259 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
260 int interp_name_len, int32_t *osrel)
261 {
262 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
263 Elf_Brandinfo *bi;
264 boolean_t ret;
265 int i;
266
267 /*
268 * We support four types of branding -- (1) the ELF EI_OSABI field
269 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
270 * branding w/in the ELF header, (3) path of the `interp_path'
271 * field, and (4) the ".note.ABI-tag" ELF section.
272 */
273
274 /* Look for an ".note.ABI-tag" ELF section */
275 for (i = 0; i < MAX_BRANDS; i++) {
276 bi = elf_brand_list[i];
277 if (bi == NULL)
278 continue;
279 if (hdr->e_machine == bi->machine && (bi->flags &
280 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
281 ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
282 if (ret)
283 return (bi);
284 }
285 }
286
287 /* If the executable has a brand, search for it in the brand list. */
288 for (i = 0; i < MAX_BRANDS; i++) {
289 bi = elf_brand_list[i];
290 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
291 continue;
292 if (hdr->e_machine == bi->machine &&
293 (hdr->e_ident[EI_OSABI] == bi->brand ||
294 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
295 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
296 return (bi);
297 }
298
299 /* Lacking a known brand, search for a recognized interpreter. */
300 if (interp != NULL) {
301 for (i = 0; i < MAX_BRANDS; i++) {
302 bi = elf_brand_list[i];
303 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
304 continue;
305 if (hdr->e_machine == bi->machine &&
306 /* ELF image p_filesz includes terminating zero */
307 strlen(bi->interp_path) + 1 == interp_name_len &&
308 strncmp(interp, bi->interp_path, interp_name_len)
309 == 0)
310 return (bi);
311 }
312 }
313
314 /* Lacking a recognized interpreter, try the default brand */
315 for (i = 0; i < MAX_BRANDS; i++) {
316 bi = elf_brand_list[i];
317 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
318 continue;
319 if (hdr->e_machine == bi->machine &&
320 __elfN(fallback_brand) == bi->brand)
321 return (bi);
322 }
323 return (NULL);
324 }
325
326 static int
327 __elfN(check_header)(const Elf_Ehdr *hdr)
328 {
329 Elf_Brandinfo *bi;
330 int i;
331
332 if (!IS_ELF(*hdr) ||
333 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
334 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
335 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
336 hdr->e_phentsize != sizeof(Elf_Phdr) ||
337 hdr->e_version != ELF_TARG_VER)
338 return (ENOEXEC);
339
340 /*
341 * Make sure we have at least one brand for this machine.
342 */
343
344 for (i = 0; i < MAX_BRANDS; i++) {
345 bi = elf_brand_list[i];
346 if (bi != NULL && bi->machine == hdr->e_machine)
347 break;
348 }
349 if (i == MAX_BRANDS)
350 return (ENOEXEC);
351
352 return (0);
353 }
354
355 static int
356 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
357 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
358 {
359 struct sf_buf *sf;
360 int error;
361 vm_offset_t off;
362
363 /*
364 * Create the page if it doesn't exist yet. Ignore errors.
365 */
366 vm_map_lock(map);
367 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
368 VM_PROT_ALL, VM_PROT_ALL, 0);
369 vm_map_unlock(map);
370
371 /*
372 * Find the page from the underlying object.
373 */
374 if (object) {
375 sf = vm_imgact_map_page(object, offset);
376 if (sf == NULL)
377 return (KERN_FAILURE);
378 off = offset - trunc_page(offset);
379 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
380 end - start);
381 vm_imgact_unmap_page(sf);
382 if (error) {
383 return (KERN_FAILURE);
384 }
385 }
386
387 return (KERN_SUCCESS);
388 }
389
390 static int
391 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
392 vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
393 {
394 struct sf_buf *sf;
395 vm_offset_t off;
396 vm_size_t sz;
397 int error, rv;
398
399 if (start != trunc_page(start)) {
400 rv = __elfN(map_partial)(map, object, offset, start,
401 round_page(start), prot);
402 if (rv)
403 return (rv);
404 offset += round_page(start) - start;
405 start = round_page(start);
406 }
407 if (end != round_page(end)) {
408 rv = __elfN(map_partial)(map, object, offset +
409 trunc_page(end) - start, trunc_page(end), end, prot);
410 if (rv)
411 return (rv);
412 end = trunc_page(end);
413 }
414 if (end > start) {
415 if (offset & PAGE_MASK) {
416 /*
417 * The mapping is not page aligned. This means we have
418 * to copy the data. Sigh.
419 */
420 rv = vm_map_find(map, NULL, 0, &start, end - start, 0,
421 VMFS_NO_SPACE, prot | VM_PROT_WRITE, VM_PROT_ALL,
422 0);
423 if (rv)
424 return (rv);
425 if (object == NULL)
426 return (KERN_SUCCESS);
427 for (; start < end; start += sz) {
428 sf = vm_imgact_map_page(object, offset);
429 if (sf == NULL)
430 return (KERN_FAILURE);
431 off = offset - trunc_page(offset);
432 sz = end - start;
433 if (sz > PAGE_SIZE - off)
434 sz = PAGE_SIZE - off;
435 error = copyout((caddr_t)sf_buf_kva(sf) + off,
436 (caddr_t)start, sz);
437 vm_imgact_unmap_page(sf);
438 if (error) {
439 return (KERN_FAILURE);
440 }
441 offset += sz;
442 }
443 rv = KERN_SUCCESS;
444 } else {
445 vm_object_reference(object);
446 vm_map_lock(map);
447 rv = vm_map_insert(map, object, offset, start, end,
448 prot, VM_PROT_ALL, cow);
449 vm_map_unlock(map);
450 if (rv != KERN_SUCCESS)
451 vm_object_deallocate(object);
452 }
453 return (rv);
454 } else {
455 return (KERN_SUCCESS);
456 }
457 }
458
459 static int
460 __elfN(load_section)(struct image_params *imgp, vm_offset_t offset,
461 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
462 size_t pagesize)
463 {
464 struct sf_buf *sf;
465 size_t map_len;
466 vm_map_t map;
467 vm_object_t object;
468 vm_offset_t map_addr;
469 int error, rv, cow;
470 size_t copy_len;
471 vm_offset_t file_addr;
472
473 /*
474 * It's necessary to fail if the filsz + offset taken from the
475 * header is greater than the actual file pager object's size.
476 * If we were to allow this, then the vm_map_find() below would
477 * walk right off the end of the file object and into the ether.
478 *
479 * While I'm here, might as well check for something else that
480 * is invalid: filsz cannot be greater than memsz.
481 */
482 if ((off_t)filsz + offset > imgp->attr->va_size || filsz > memsz) {
483 uprintf("elf_load_section: truncated ELF file\n");
484 return (ENOEXEC);
485 }
486
487 object = imgp->object;
488 map = &imgp->proc->p_vmspace->vm_map;
489 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
490 file_addr = trunc_page_ps(offset, pagesize);
491
492 /*
493 * We have two choices. We can either clear the data in the last page
494 * of an oversized mapping, or we can start the anon mapping a page
495 * early and copy the initialized data into that first page. We
496 * choose the second..
497 */
498 if (memsz > filsz)
499 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
500 else
501 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
502
503 if (map_len != 0) {
504 /* cow flags: don't dump readonly sections in core */
505 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
506 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
507
508 rv = __elfN(map_insert)(map,
509 object,
510 file_addr, /* file offset */
511 map_addr, /* virtual start */
512 map_addr + map_len,/* virtual end */
513 prot,
514 cow);
515 if (rv != KERN_SUCCESS)
516 return (EINVAL);
517
518 /* we can stop now if we've covered it all */
519 if (memsz == filsz) {
520 return (0);
521 }
522 }
523
524
525 /*
526 * We have to get the remaining bit of the file into the first part
527 * of the oversized map segment. This is normally because the .data
528 * segment in the file is extended to provide bss. It's a neat idea
529 * to try and save a page, but it's a pain in the behind to implement.
530 */
531 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
532 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
533 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
534 map_addr;
535
536 /* This had damn well better be true! */
537 if (map_len != 0) {
538 rv = __elfN(map_insert)(map, NULL, 0, map_addr, map_addr +
539 map_len, VM_PROT_ALL, 0);
540 if (rv != KERN_SUCCESS) {
541 return (EINVAL);
542 }
543 }
544
545 if (copy_len != 0) {
546 vm_offset_t off;
547
548 sf = vm_imgact_map_page(object, offset + filsz);
549 if (sf == NULL)
550 return (EIO);
551
552 /* send the page fragment to user space */
553 off = trunc_page_ps(offset + filsz, pagesize) -
554 trunc_page(offset + filsz);
555 error = copyout((caddr_t)sf_buf_kva(sf) + off,
556 (caddr_t)map_addr, copy_len);
557 vm_imgact_unmap_page(sf);
558 if (error) {
559 return (error);
560 }
561 }
562
563 /*
564 * set it to the specified protection.
565 * XXX had better undo the damage from pasting over the cracks here!
566 */
567 vm_map_protect(map, trunc_page(map_addr), round_page(map_addr +
568 map_len), prot, FALSE);
569
570 return (0);
571 }
572
573 /*
574 * Load the file "file" into memory. It may be either a shared object
575 * or an executable.
576 *
577 * The "addr" reference parameter is in/out. On entry, it specifies
578 * the address where a shared object should be loaded. If the file is
579 * an executable, this value is ignored. On exit, "addr" specifies
580 * where the file was actually loaded.
581 *
582 * The "entry" reference parameter is out only. On exit, it specifies
583 * the entry point for the loaded file.
584 */
585 static int
586 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
587 u_long *entry, size_t pagesize)
588 {
589 struct {
590 struct nameidata nd;
591 struct vattr attr;
592 struct image_params image_params;
593 } *tempdata;
594 const Elf_Ehdr *hdr = NULL;
595 const Elf_Phdr *phdr = NULL;
596 struct nameidata *nd;
597 struct vattr *attr;
598 struct image_params *imgp;
599 vm_prot_t prot;
600 u_long rbase;
601 u_long base_addr = 0;
602 int error, i, numsegs;
603
604 #ifdef CAPABILITY_MODE
605 /*
606 * XXXJA: This check can go away once we are sufficiently confident
607 * that the checks in namei() are correct.
608 */
609 if (IN_CAPABILITY_MODE(curthread))
610 return (ECAPMODE);
611 #endif
612
613 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
614 nd = &tempdata->nd;
615 attr = &tempdata->attr;
616 imgp = &tempdata->image_params;
617
618 /*
619 * Initialize part of the common data
620 */
621 imgp->proc = p;
622 imgp->attr = attr;
623 imgp->firstpage = NULL;
624 imgp->image_header = NULL;
625 imgp->object = NULL;
626 imgp->execlabel = NULL;
627
628 NDINIT(nd, LOOKUP, LOCKLEAF | FOLLOW, UIO_SYSSPACE, file, curthread);
629 if ((error = namei(nd)) != 0) {
630 nd->ni_vp = NULL;
631 goto fail;
632 }
633 NDFREE(nd, NDF_ONLY_PNBUF);
634 imgp->vp = nd->ni_vp;
635
636 /*
637 * Check permissions, modes, uid, etc on the file, and "open" it.
638 */
639 error = exec_check_permissions(imgp);
640 if (error)
641 goto fail;
642
643 error = exec_map_first_page(imgp);
644 if (error)
645 goto fail;
646
647 /*
648 * Also make certain that the interpreter stays the same, so set
649 * its VV_TEXT flag, too.
650 */
651 VOP_SET_TEXT(nd->ni_vp);
652
653 imgp->object = nd->ni_vp->v_object;
654
655 hdr = (const Elf_Ehdr *)imgp->image_header;
656 if ((error = __elfN(check_header)(hdr)) != 0)
657 goto fail;
658 if (hdr->e_type == ET_DYN)
659 rbase = *addr;
660 else if (hdr->e_type == ET_EXEC)
661 rbase = 0;
662 else {
663 error = ENOEXEC;
664 goto fail;
665 }
666
667 /* Only support headers that fit within first page for now */
668 if ((hdr->e_phoff > PAGE_SIZE) ||
669 (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) {
670 error = ENOEXEC;
671 goto fail;
672 }
673
674 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
675 if (!aligned(phdr, Elf_Addr)) {
676 error = ENOEXEC;
677 goto fail;
678 }
679
680 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
681 if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
682 /* Loadable segment */
683 prot = __elfN(trans_prot)(phdr[i].p_flags);
684 error = __elfN(load_section)(imgp, phdr[i].p_offset,
685 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
686 phdr[i].p_memsz, phdr[i].p_filesz, prot, pagesize);
687 if (error != 0)
688 goto fail;
689 /*
690 * Establish the base address if this is the
691 * first segment.
692 */
693 if (numsegs == 0)
694 base_addr = trunc_page(phdr[i].p_vaddr +
695 rbase);
696 numsegs++;
697 }
698 }
699 *addr = base_addr;
700 *entry = (unsigned long)hdr->e_entry + rbase;
701
702 fail:
703 if (imgp->firstpage)
704 exec_unmap_first_page(imgp);
705
706 if (nd->ni_vp)
707 vput(nd->ni_vp);
708
709 free(tempdata, M_TEMP);
710
711 return (error);
712 }
713
714 static int
715 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
716 {
717 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
718 const Elf_Phdr *phdr;
719 Elf_Auxargs *elf_auxargs;
720 struct vmspace *vmspace;
721 vm_prot_t prot;
722 u_long text_size = 0, data_size = 0, total_size = 0;
723 u_long text_addr = 0, data_addr = 0;
724 u_long seg_size, seg_addr;
725 u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0;
726 int32_t osrel = 0;
727 int error = 0, i, n, interp_name_len = 0;
728 const char *interp = NULL, *newinterp = NULL;
729 Elf_Brandinfo *brand_info;
730 char *path;
731 struct sysentvec *sv;
732
733 /*
734 * Do we have a valid ELF header ?
735 *
736 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
737 * if particular brand doesn't support it.
738 */
739 if (__elfN(check_header)(hdr) != 0 ||
740 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
741 return (-1);
742
743 /*
744 * From here on down, we return an errno, not -1, as we've
745 * detected an ELF file.
746 */
747
748 if ((hdr->e_phoff > PAGE_SIZE) ||
749 (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) {
750 /* Only support headers in first page for now */
751 return (ENOEXEC);
752 }
753 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
754 if (!aligned(phdr, Elf_Addr))
755 return (ENOEXEC);
756 n = 0;
757 baddr = 0;
758 for (i = 0; i < hdr->e_phnum; i++) {
759 switch (phdr[i].p_type) {
760 case PT_LOAD:
761 if (n == 0)
762 baddr = phdr[i].p_vaddr;
763 n++;
764 break;
765 case PT_INTERP:
766 /* Path to interpreter */
767 if (phdr[i].p_filesz > MAXPATHLEN ||
768 phdr[i].p_offset > PAGE_SIZE ||
769 phdr[i].p_filesz > PAGE_SIZE - phdr[i].p_offset)
770 return (ENOEXEC);
771 interp = imgp->image_header + phdr[i].p_offset;
772 interp_name_len = phdr[i].p_filesz;
773 break;
774 case PT_GNU_STACK:
775 if (__elfN(nxstack))
776 imgp->stack_prot =
777 __elfN(trans_prot)(phdr[i].p_flags);
778 break;
779 }
780 }
781
782 brand_info = __elfN(get_brandinfo)(imgp, interp, interp_name_len,
783 &osrel);
784 if (brand_info == NULL) {
785 uprintf("ELF binary type \"%u\" not known.\n",
786 hdr->e_ident[EI_OSABI]);
787 return (ENOEXEC);
788 }
789 if (hdr->e_type == ET_DYN) {
790 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0)
791 return (ENOEXEC);
792 /*
793 * Honour the base load address from the dso if it is
794 * non-zero for some reason.
795 */
796 if (baddr == 0)
797 et_dyn_addr = ET_DYN_LOAD_ADDR;
798 else
799 et_dyn_addr = 0;
800 } else
801 et_dyn_addr = 0;
802 sv = brand_info->sysvec;
803 if (interp != NULL && brand_info->interp_newpath != NULL)
804 newinterp = brand_info->interp_newpath;
805
806 /*
807 * Avoid a possible deadlock if the current address space is destroyed
808 * and that address space maps the locked vnode. In the common case,
809 * the locked vnode's v_usecount is decremented but remains greater
810 * than zero. Consequently, the vnode lock is not needed by vrele().
811 * However, in cases where the vnode lock is external, such as nullfs,
812 * v_usecount may become zero.
813 *
814 * The VV_TEXT flag prevents modifications to the executable while
815 * the vnode is unlocked.
816 */
817 VOP_UNLOCK(imgp->vp, 0);
818
819 error = exec_new_vmspace(imgp, sv);
820 imgp->proc->p_sysent = sv;
821
822 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
823 if (error)
824 return (error);
825
826 for (i = 0; i < hdr->e_phnum; i++) {
827 switch (phdr[i].p_type) {
828 case PT_LOAD: /* Loadable segment */
829 if (phdr[i].p_memsz == 0)
830 break;
831 prot = __elfN(trans_prot)(phdr[i].p_flags);
832 error = __elfN(load_section)(imgp, phdr[i].p_offset,
833 (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr,
834 phdr[i].p_memsz, phdr[i].p_filesz, prot,
835 sv->sv_pagesize);
836 if (error != 0)
837 return (error);
838
839 /*
840 * If this segment contains the program headers,
841 * remember their virtual address for the AT_PHDR
842 * aux entry. Static binaries don't usually include
843 * a PT_PHDR entry.
844 */
845 if (phdr[i].p_offset == 0 &&
846 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
847 <= phdr[i].p_filesz)
848 proghdr = phdr[i].p_vaddr + hdr->e_phoff +
849 et_dyn_addr;
850
851 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
852 seg_size = round_page(phdr[i].p_memsz +
853 phdr[i].p_vaddr + et_dyn_addr - seg_addr);
854
855 /*
856 * Make the largest executable segment the official
857 * text segment and all others data.
858 *
859 * Note that obreak() assumes that data_addr +
860 * data_size == end of data load area, and the ELF
861 * file format expects segments to be sorted by
862 * address. If multiple data segments exist, the
863 * last one will be used.
864 */
865
866 if (phdr[i].p_flags & PF_X && text_size < seg_size) {
867 text_size = seg_size;
868 text_addr = seg_addr;
869 } else {
870 data_size = seg_size;
871 data_addr = seg_addr;
872 }
873 total_size += seg_size;
874 break;
875 case PT_PHDR: /* Program header table info */
876 proghdr = phdr[i].p_vaddr + et_dyn_addr;
877 break;
878 default:
879 break;
880 }
881 }
882
883 if (data_addr == 0 && data_size == 0) {
884 data_addr = text_addr;
885 data_size = text_size;
886 }
887
888 entry = (u_long)hdr->e_entry + et_dyn_addr;
889
890 /*
891 * Check limits. It should be safe to check the
892 * limits after loading the segments since we do
893 * not actually fault in all the segments pages.
894 */
895 PROC_LOCK(imgp->proc);
896 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
897 text_size > maxtsiz ||
898 total_size > lim_cur(imgp->proc, RLIMIT_VMEM) ||
899 racct_set(imgp->proc, RACCT_DATA, data_size) != 0 ||
900 racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) {
901 PROC_UNLOCK(imgp->proc);
902 return (ENOMEM);
903 }
904
905 vmspace = imgp->proc->p_vmspace;
906 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
907 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
908 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
909 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
910
911 /*
912 * We load the dynamic linker where a userland call
913 * to mmap(0, ...) would put it. The rationale behind this
914 * calculation is that it leaves room for the heap to grow to
915 * its maximum allowed size.
916 */
917 addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(imgp->proc,
918 RLIMIT_DATA));
919 PROC_UNLOCK(imgp->proc);
920
921 imgp->entry_addr = entry;
922
923 if (interp != NULL) {
924 int have_interp = FALSE;
925 VOP_UNLOCK(imgp->vp, 0);
926 if (brand_info->emul_path != NULL &&
927 brand_info->emul_path[0] != '\0') {
928 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
929 snprintf(path, MAXPATHLEN, "%s%s",
930 brand_info->emul_path, interp);
931 error = __elfN(load_file)(imgp->proc, path, &addr,
932 &imgp->entry_addr, sv->sv_pagesize);
933 free(path, M_TEMP);
934 if (error == 0)
935 have_interp = TRUE;
936 }
937 if (!have_interp && newinterp != NULL) {
938 error = __elfN(load_file)(imgp->proc, newinterp, &addr,
939 &imgp->entry_addr, sv->sv_pagesize);
940 if (error == 0)
941 have_interp = TRUE;
942 }
943 if (!have_interp) {
944 error = __elfN(load_file)(imgp->proc, interp, &addr,
945 &imgp->entry_addr, sv->sv_pagesize);
946 }
947 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
948 if (error != 0) {
949 uprintf("ELF interpreter %s not found\n", interp);
950 return (error);
951 }
952 } else
953 addr = et_dyn_addr;
954
955 /*
956 * Construct auxargs table (used by the fixup routine)
957 */
958 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
959 elf_auxargs->execfd = -1;
960 elf_auxargs->phdr = proghdr;
961 elf_auxargs->phent = hdr->e_phentsize;
962 elf_auxargs->phnum = hdr->e_phnum;
963 elf_auxargs->pagesz = PAGE_SIZE;
964 elf_auxargs->base = addr;
965 elf_auxargs->flags = 0;
966 elf_auxargs->entry = entry;
967
968 imgp->auxargs = elf_auxargs;
969 imgp->interpreted = 0;
970 imgp->reloc_base = addr;
971 imgp->proc->p_osrel = osrel;
972
973 return (error);
974 }
975
976 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
977
978 int
979 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
980 {
981 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
982 Elf_Addr *base;
983 Elf_Addr *pos;
984
985 base = (Elf_Addr *)*stack_base;
986 pos = base + (imgp->args->argc + imgp->args->envc + 2);
987
988 if (args->execfd != -1)
989 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
990 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
991 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
992 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
993 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
994 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
995 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
996 AUXARGS_ENTRY(pos, AT_BASE, args->base);
997 if (imgp->execpathp != 0)
998 AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
999 AUXARGS_ENTRY(pos, AT_OSRELDATE, osreldate);
1000 if (imgp->canary != 0) {
1001 AUXARGS_ENTRY(pos, AT_CANARY, imgp->canary);
1002 AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen);
1003 }
1004 AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus);
1005 if (imgp->pagesizes != 0) {
1006 AUXARGS_ENTRY(pos, AT_PAGESIZES, imgp->pagesizes);
1007 AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen);
1008 }
1009 if (imgp->sysent->sv_timekeep_base != 0) {
1010 AUXARGS_ENTRY(pos, AT_TIMEKEEP,
1011 imgp->sysent->sv_timekeep_base);
1012 }
1013 AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj
1014 != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
1015 imgp->sysent->sv_stackprot);
1016 AUXARGS_ENTRY(pos, AT_NULL, 0);
1017
1018 free(imgp->auxargs, M_TEMP);
1019 imgp->auxargs = NULL;
1020
1021 base--;
1022 suword(base, (long)imgp->args->argc);
1023 *stack_base = (register_t *)base;
1024 return (0);
1025 }
1026
1027 /*
1028 * Code for generating ELF core dumps.
1029 */
1030
1031 typedef void (*segment_callback)(vm_map_entry_t, void *);
1032
1033 /* Closure for cb_put_phdr(). */
1034 struct phdr_closure {
1035 Elf_Phdr *phdr; /* Program header to fill in */
1036 Elf_Off offset; /* Offset of segment in core file */
1037 };
1038
1039 /* Closure for cb_size_segment(). */
1040 struct sseg_closure {
1041 int count; /* Count of writable segments. */
1042 size_t size; /* Total size of all writable segments. */
1043 };
1044
1045 typedef void (*outfunc_t)(void *, struct sbuf *, size_t *);
1046
1047 struct note_info {
1048 int type; /* Note type. */
1049 outfunc_t outfunc; /* Output function. */
1050 void *outarg; /* Argument for the output function. */
1051 size_t outsize; /* Output size. */
1052 TAILQ_ENTRY(note_info) link; /* Link to the next note info. */
1053 };
1054
1055 TAILQ_HEAD(note_info_list, note_info);
1056
1057 static void cb_put_phdr(vm_map_entry_t, void *);
1058 static void cb_size_segment(vm_map_entry_t, void *);
1059 static void each_writable_segment(struct thread *, segment_callback, void *);
1060 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
1061 int, void *, size_t, struct note_info_list *, size_t, gzFile);
1062 static void __elfN(prepare_notes)(struct thread *, struct note_info_list *,
1063 size_t *);
1064 static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t);
1065 static void __elfN(putnote)(struct note_info *, struct sbuf *);
1066 static size_t register_note(struct note_info_list *, int, outfunc_t, void *);
1067 static int sbuf_drain_core_output(void *, const char *, int);
1068 static int sbuf_drain_count(void *arg, const char *data, int len);
1069
1070 static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *);
1071 static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *);
1072 static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *);
1073 static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *);
1074 static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *);
1075 static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *);
1076 static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *);
1077 static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *);
1078 static void note_procstat_files(void *, struct sbuf *, size_t *);
1079 static void note_procstat_groups(void *, struct sbuf *, size_t *);
1080 static void note_procstat_osrel(void *, struct sbuf *, size_t *);
1081 static void note_procstat_rlimit(void *, struct sbuf *, size_t *);
1082 static void note_procstat_umask(void *, struct sbuf *, size_t *);
1083 static void note_procstat_vmmap(void *, struct sbuf *, size_t *);
1084
1085 #ifdef COMPRESS_USER_CORES
1086 extern int compress_user_cores;
1087 extern int compress_user_cores_gzlevel;
1088 #endif
1089
1090 static int
1091 core_output(struct vnode *vp, void *base, size_t len, off_t offset,
1092 struct ucred *active_cred, struct ucred *file_cred,
1093 struct thread *td, char *core_buf, gzFile gzfile) {
1094
1095 int error;
1096 if (gzfile) {
1097 #ifdef COMPRESS_USER_CORES
1098 error = compress_core(gzfile, base, core_buf, len, td);
1099 #else
1100 panic("shouldn't be here");
1101 #endif
1102 } else {
1103 error = vn_rdwr_inchunks(UIO_WRITE, vp, base, len, offset,
1104 UIO_USERSPACE, IO_UNIT | IO_DIRECT, active_cred, file_cred,
1105 NULL, td);
1106 }
1107 return (error);
1108 }
1109
1110 /* Coredump output parameters for sbuf drain routine. */
1111 struct sbuf_drain_core_params {
1112 off_t offset;
1113 struct ucred *active_cred;
1114 struct ucred *file_cred;
1115 struct thread *td;
1116 struct vnode *vp;
1117 #ifdef COMPRESS_USER_CORES
1118 gzFile gzfile;
1119 #endif
1120 };
1121
1122 /*
1123 * Drain into a core file.
1124 */
1125 static int
1126 sbuf_drain_core_output(void *arg, const char *data, int len)
1127 {
1128 struct sbuf_drain_core_params *p;
1129 int error, locked;
1130
1131 p = (struct sbuf_drain_core_params *)arg;
1132
1133 /*
1134 * Some kern_proc out routines that print to this sbuf may
1135 * call us with the process lock held. Draining with the
1136 * non-sleepable lock held is unsafe. The lock is needed for
1137 * those routines when dumping a live process. In our case we
1138 * can safely release the lock before draining and acquire
1139 * again after.
1140 */
1141 locked = PROC_LOCKED(p->td->td_proc);
1142 if (locked)
1143 PROC_UNLOCK(p->td->td_proc);
1144 #ifdef COMPRESS_USER_CORES
1145 if (p->gzfile != Z_NULL)
1146 error = compress_core(p->gzfile, NULL, __DECONST(char *, data),
1147 len, p->td);
1148 else
1149 #endif
1150 error = vn_rdwr_inchunks(UIO_WRITE, p->vp,
1151 __DECONST(void *, data), len, p->offset, UIO_SYSSPACE,
1152 IO_UNIT | IO_DIRECT, p->active_cred, p->file_cred, NULL,
1153 p->td);
1154 if (locked)
1155 PROC_LOCK(p->td->td_proc);
1156 if (error != 0)
1157 return (-error);
1158 p->offset += len;
1159 return (len);
1160 }
1161
1162 /*
1163 * Drain into a counter.
1164 */
1165 static int
1166 sbuf_drain_count(void *arg, const char *data __unused, int len)
1167 {
1168 size_t *sizep;
1169
1170 sizep = (size_t *)arg;
1171 *sizep += len;
1172 return (len);
1173 }
1174
1175 int
1176 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
1177 {
1178 struct ucred *cred = td->td_ucred;
1179 int error = 0;
1180 struct sseg_closure seginfo;
1181 struct note_info_list notelst;
1182 struct note_info *ninfo;
1183 void *hdr;
1184 size_t hdrsize, notesz, coresize;
1185
1186 gzFile gzfile = Z_NULL;
1187 char *core_buf = NULL;
1188 #ifdef COMPRESS_USER_CORES
1189 char gzopen_flags[8];
1190 char *p;
1191 int doing_compress = flags & IMGACT_CORE_COMPRESS;
1192 #endif
1193
1194 hdr = NULL;
1195 TAILQ_INIT(¬elst);
1196
1197 #ifdef COMPRESS_USER_CORES
1198 if (doing_compress) {
1199 p = gzopen_flags;
1200 *p++ = 'w';
1201 if (compress_user_cores_gzlevel >= 0 &&
1202 compress_user_cores_gzlevel <= 9)
1203 *p++ = '' + compress_user_cores_gzlevel;
1204 *p = 0;
1205 gzfile = gz_open("", gzopen_flags, vp);
1206 if (gzfile == Z_NULL) {
1207 error = EFAULT;
1208 goto done;
1209 }
1210 core_buf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO);
1211 if (!core_buf) {
1212 error = ENOMEM;
1213 goto done;
1214 }
1215 }
1216 #endif
1217
1218 /* Size the program segments. */
1219 seginfo.count = 0;
1220 seginfo.size = 0;
1221 each_writable_segment(td, cb_size_segment, &seginfo);
1222
1223 /*
1224 * Collect info about the core file header area.
1225 */
1226 hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count);
1227 __elfN(prepare_notes)(td, ¬elst, ¬esz);
1228 coresize = round_page(hdrsize + notesz) + seginfo.size;
1229
1230 #ifdef RACCT
1231 PROC_LOCK(td->td_proc);
1232 error = racct_add(td->td_proc, RACCT_CORE, coresize);
1233 PROC_UNLOCK(td->td_proc);
1234 if (error != 0) {
1235 error = EFAULT;
1236 goto done;
1237 }
1238 #endif
1239 if (coresize >= limit) {
1240 error = EFAULT;
1241 goto done;
1242 }
1243
1244 /*
1245 * Allocate memory for building the header, fill it up,
1246 * and write it out following the notes.
1247 */
1248 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1249 if (hdr == NULL) {
1250 error = EINVAL;
1251 goto done;
1252 }
1253 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize,
1254 ¬elst, notesz, gzfile);
1255
1256 /* Write the contents of all of the writable segments. */
1257 if (error == 0) {
1258 Elf_Phdr *php;
1259 off_t offset;
1260 int i;
1261
1262 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1263 offset = round_page(hdrsize + notesz);
1264 for (i = 0; i < seginfo.count; i++) {
1265 error = core_output(vp, (caddr_t)(uintptr_t)php->p_vaddr,
1266 php->p_filesz, offset, cred, NOCRED, curthread, core_buf, gzfile);
1267 if (error != 0)
1268 break;
1269 offset += php->p_filesz;
1270 php++;
1271 }
1272 }
1273 if (error) {
1274 log(LOG_WARNING,
1275 "Failed to write core file for process %s (error %d)\n",
1276 curproc->p_comm, error);
1277 }
1278
1279 done:
1280 #ifdef COMPRESS_USER_CORES
1281 if (core_buf)
1282 free(core_buf, M_TEMP);
1283 if (gzfile)
1284 gzclose(gzfile);
1285 #endif
1286 while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) {
1287 TAILQ_REMOVE(¬elst, ninfo, link);
1288 free(ninfo, M_TEMP);
1289 }
1290 if (hdr != NULL)
1291 free(hdr, M_TEMP);
1292
1293 return (error);
1294 }
1295
1296 /*
1297 * A callback for each_writable_segment() to write out the segment's
1298 * program header entry.
1299 */
1300 static void
1301 cb_put_phdr(entry, closure)
1302 vm_map_entry_t entry;
1303 void *closure;
1304 {
1305 struct phdr_closure *phc = (struct phdr_closure *)closure;
1306 Elf_Phdr *phdr = phc->phdr;
1307
1308 phc->offset = round_page(phc->offset);
1309
1310 phdr->p_type = PT_LOAD;
1311 phdr->p_offset = phc->offset;
1312 phdr->p_vaddr = entry->start;
1313 phdr->p_paddr = 0;
1314 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1315 phdr->p_align = PAGE_SIZE;
1316 phdr->p_flags = __elfN(untrans_prot)(entry->protection);
1317
1318 phc->offset += phdr->p_filesz;
1319 phc->phdr++;
1320 }
1321
1322 /*
1323 * A callback for each_writable_segment() to gather information about
1324 * the number of segments and their total size.
1325 */
1326 static void
1327 cb_size_segment(entry, closure)
1328 vm_map_entry_t entry;
1329 void *closure;
1330 {
1331 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1332
1333 ssc->count++;
1334 ssc->size += entry->end - entry->start;
1335 }
1336
1337 /*
1338 * For each writable segment in the process's memory map, call the given
1339 * function with a pointer to the map entry and some arbitrary
1340 * caller-supplied data.
1341 */
1342 static void
1343 each_writable_segment(td, func, closure)
1344 struct thread *td;
1345 segment_callback func;
1346 void *closure;
1347 {
1348 struct proc *p = td->td_proc;
1349 vm_map_t map = &p->p_vmspace->vm_map;
1350 vm_map_entry_t entry;
1351 vm_object_t backing_object, object;
1352 boolean_t ignore_entry;
1353
1354 vm_map_lock_read(map);
1355 for (entry = map->header.next; entry != &map->header;
1356 entry = entry->next) {
1357 /*
1358 * Don't dump inaccessible mappings, deal with legacy
1359 * coredump mode.
1360 *
1361 * Note that read-only segments related to the elf binary
1362 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1363 * need to arbitrarily ignore such segments.
1364 */
1365 if (elf_legacy_coredump) {
1366 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1367 continue;
1368 } else {
1369 if ((entry->protection & VM_PROT_ALL) == 0)
1370 continue;
1371 }
1372
1373 /*
1374 * Dont include memory segment in the coredump if
1375 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1376 * madvise(2). Do not dump submaps (i.e. parts of the
1377 * kernel map).
1378 */
1379 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1380 continue;
1381
1382 if ((object = entry->object.vm_object) == NULL)
1383 continue;
1384
1385 /* Ignore memory-mapped devices and such things. */
1386 VM_OBJECT_RLOCK(object);
1387 while ((backing_object = object->backing_object) != NULL) {
1388 VM_OBJECT_RLOCK(backing_object);
1389 VM_OBJECT_RUNLOCK(object);
1390 object = backing_object;
1391 }
1392 ignore_entry = object->type != OBJT_DEFAULT &&
1393 object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1394 VM_OBJECT_RUNLOCK(object);
1395 if (ignore_entry)
1396 continue;
1397
1398 (*func)(entry, closure);
1399 }
1400 vm_map_unlock_read(map);
1401 }
1402
1403 /*
1404 * Write the core file header to the file, including padding up to
1405 * the page boundary.
1406 */
1407 static int
1408 __elfN(corehdr)(struct thread *td, struct vnode *vp, struct ucred *cred,
1409 int numsegs, void *hdr, size_t hdrsize, struct note_info_list *notelst,
1410 size_t notesz, gzFile gzfile)
1411 {
1412 struct sbuf_drain_core_params params;
1413 struct note_info *ninfo;
1414 struct sbuf *sb;
1415 int error;
1416
1417 /* Fill in the header. */
1418 bzero(hdr, hdrsize);
1419 __elfN(puthdr)(td, hdr, hdrsize, numsegs, notesz);
1420
1421 params.offset = 0;
1422 params.active_cred = cred;
1423 params.file_cred = NOCRED;
1424 params.td = td;
1425 params.vp = vp;
1426 #ifdef COMPRESS_USER_CORES
1427 params.gzfile = gzfile;
1428 #endif
1429 sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN);
1430 sbuf_set_drain(sb, sbuf_drain_core_output, ¶ms);
1431 sbuf_start_section(sb, NULL);
1432 sbuf_bcat(sb, hdr, hdrsize);
1433 TAILQ_FOREACH(ninfo, notelst, link)
1434 __elfN(putnote)(ninfo, sb);
1435 /* Align up to a page boundary for the program segments. */
1436 sbuf_end_section(sb, -1, PAGE_SIZE, 0);
1437 error = sbuf_finish(sb);
1438 sbuf_delete(sb);
1439
1440 return (error);
1441 }
1442
1443 static void
1444 __elfN(prepare_notes)(struct thread *td, struct note_info_list *list,
1445 size_t *sizep)
1446 {
1447 struct proc *p;
1448 struct thread *thr;
1449 size_t size;
1450
1451 p = td->td_proc;
1452 size = 0;
1453
1454 size += register_note(list, NT_PRPSINFO, __elfN(note_prpsinfo), p);
1455
1456 /*
1457 * To have the debugger select the right thread (LWP) as the initial
1458 * thread, we dump the state of the thread passed to us in td first.
1459 * This is the thread that causes the core dump and thus likely to
1460 * be the right thread one wants to have selected in the debugger.
1461 */
1462 thr = td;
1463 while (thr != NULL) {
1464 size += register_note(list, NT_PRSTATUS,
1465 __elfN(note_prstatus), thr);
1466 size += register_note(list, NT_FPREGSET,
1467 __elfN(note_fpregset), thr);
1468 size += register_note(list, NT_THRMISC,
1469 __elfN(note_thrmisc), thr);
1470 size += register_note(list, -1,
1471 __elfN(note_threadmd), thr);
1472
1473 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1474 TAILQ_NEXT(thr, td_plist);
1475 if (thr == td)
1476 thr = TAILQ_NEXT(thr, td_plist);
1477 }
1478
1479 size += register_note(list, NT_PROCSTAT_PROC,
1480 __elfN(note_procstat_proc), p);
1481 size += register_note(list, NT_PROCSTAT_FILES,
1482 note_procstat_files, p);
1483 size += register_note(list, NT_PROCSTAT_VMMAP,
1484 note_procstat_vmmap, p);
1485 size += register_note(list, NT_PROCSTAT_GROUPS,
1486 note_procstat_groups, p);
1487 size += register_note(list, NT_PROCSTAT_UMASK,
1488 note_procstat_umask, p);
1489 size += register_note(list, NT_PROCSTAT_RLIMIT,
1490 note_procstat_rlimit, p);
1491 size += register_note(list, NT_PROCSTAT_OSREL,
1492 note_procstat_osrel, p);
1493 size += register_note(list, NT_PROCSTAT_PSSTRINGS,
1494 __elfN(note_procstat_psstrings), p);
1495 size += register_note(list, NT_PROCSTAT_AUXV,
1496 __elfN(note_procstat_auxv), p);
1497
1498 *sizep = size;
1499 }
1500
1501 static void
1502 __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs,
1503 size_t notesz)
1504 {
1505 Elf_Ehdr *ehdr;
1506 Elf_Phdr *phdr;
1507 struct phdr_closure phc;
1508
1509 ehdr = (Elf_Ehdr *)hdr;
1510 phdr = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr));
1511
1512 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1513 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1514 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1515 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1516 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1517 ehdr->e_ident[EI_DATA] = ELF_DATA;
1518 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1519 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1520 ehdr->e_ident[EI_ABIVERSION] = 0;
1521 ehdr->e_ident[EI_PAD] = 0;
1522 ehdr->e_type = ET_CORE;
1523 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1524 ehdr->e_machine = ELF_ARCH32;
1525 #else
1526 ehdr->e_machine = ELF_ARCH;
1527 #endif
1528 ehdr->e_version = EV_CURRENT;
1529 ehdr->e_entry = 0;
1530 ehdr->e_phoff = sizeof(Elf_Ehdr);
1531 ehdr->e_flags = 0;
1532 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1533 ehdr->e_phentsize = sizeof(Elf_Phdr);
1534 ehdr->e_phnum = numsegs + 1;
1535 ehdr->e_shentsize = sizeof(Elf_Shdr);
1536 ehdr->e_shnum = 0;
1537 ehdr->e_shstrndx = SHN_UNDEF;
1538
1539 /*
1540 * Fill in the program header entries.
1541 */
1542
1543 /* The note segement. */
1544 phdr->p_type = PT_NOTE;
1545 phdr->p_offset = hdrsize;
1546 phdr->p_vaddr = 0;
1547 phdr->p_paddr = 0;
1548 phdr->p_filesz = notesz;
1549 phdr->p_memsz = 0;
1550 phdr->p_flags = PF_R;
1551 phdr->p_align = ELF_NOTE_ROUNDSIZE;
1552 phdr++;
1553
1554 /* All the writable segments from the program. */
1555 phc.phdr = phdr;
1556 phc.offset = round_page(hdrsize + notesz);
1557 each_writable_segment(td, cb_put_phdr, &phc);
1558 }
1559
1560 static size_t
1561 register_note(struct note_info_list *list, int type, outfunc_t out, void *arg)
1562 {
1563 struct note_info *ninfo;
1564 size_t size, notesize;
1565
1566 size = 0;
1567 out(arg, NULL, &size);
1568 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK);
1569 ninfo->type = type;
1570 ninfo->outfunc = out;
1571 ninfo->outarg = arg;
1572 ninfo->outsize = size;
1573 TAILQ_INSERT_TAIL(list, ninfo, link);
1574
1575 if (type == -1)
1576 return (size);
1577
1578 notesize = sizeof(Elf_Note) + /* note header */
1579 roundup2(8, ELF_NOTE_ROUNDSIZE) + /* note name ("FreeBSD") */
1580 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
1581
1582 return (notesize);
1583 }
1584
1585 static void
1586 __elfN(putnote)(struct note_info *ninfo, struct sbuf *sb)
1587 {
1588 Elf_Note note;
1589 ssize_t old_len;
1590
1591 if (ninfo->type == -1) {
1592 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
1593 return;
1594 }
1595
1596 note.n_namesz = 8; /* strlen("FreeBSD") + 1 */
1597 note.n_descsz = ninfo->outsize;
1598 note.n_type = ninfo->type;
1599
1600 sbuf_bcat(sb, ¬e, sizeof(note));
1601 sbuf_start_section(sb, &old_len);
1602 sbuf_bcat(sb, "FreeBSD", note.n_namesz);
1603 sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
1604 if (note.n_descsz == 0)
1605 return;
1606 sbuf_start_section(sb, &old_len);
1607 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
1608 sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
1609 }
1610
1611 /*
1612 * Miscellaneous note out functions.
1613 */
1614
1615 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1616 #include <compat/freebsd32/freebsd32.h>
1617
1618 typedef struct prstatus32 elf_prstatus_t;
1619 typedef struct prpsinfo32 elf_prpsinfo_t;
1620 typedef struct fpreg32 elf_prfpregset_t;
1621 typedef struct fpreg32 elf_fpregset_t;
1622 typedef struct reg32 elf_gregset_t;
1623 typedef struct thrmisc32 elf_thrmisc_t;
1624 #define ELF_KERN_PROC_MASK KERN_PROC_MASK32
1625 typedef struct kinfo_proc32 elf_kinfo_proc_t;
1626 typedef uint32_t elf_ps_strings_t;
1627 #else
1628 typedef prstatus_t elf_prstatus_t;
1629 typedef prpsinfo_t elf_prpsinfo_t;
1630 typedef prfpregset_t elf_prfpregset_t;
1631 typedef prfpregset_t elf_fpregset_t;
1632 typedef gregset_t elf_gregset_t;
1633 typedef thrmisc_t elf_thrmisc_t;
1634 #define ELF_KERN_PROC_MASK 0
1635 typedef struct kinfo_proc elf_kinfo_proc_t;
1636 typedef vm_offset_t elf_ps_strings_t;
1637 #endif
1638
1639 static void
1640 __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep)
1641 {
1642 struct proc *p;
1643 elf_prpsinfo_t *psinfo;
1644
1645 p = (struct proc *)arg;
1646 if (sb != NULL) {
1647 KASSERT(*sizep == sizeof(*psinfo), ("invalid size"));
1648 psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK);
1649 psinfo->pr_version = PRPSINFO_VERSION;
1650 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1651 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1652 /*
1653 * XXX - We don't fill in the command line arguments properly
1654 * yet.
1655 */
1656 strlcpy(psinfo->pr_psargs, p->p_comm,
1657 sizeof(psinfo->pr_psargs));
1658
1659 sbuf_bcat(sb, psinfo, sizeof(*psinfo));
1660 free(psinfo, M_TEMP);
1661 }
1662 *sizep = sizeof(*psinfo);
1663 }
1664
1665 static void
1666 __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep)
1667 {
1668 struct thread *td;
1669 elf_prstatus_t *status;
1670
1671 td = (struct thread *)arg;
1672 if (sb != NULL) {
1673 KASSERT(*sizep == sizeof(*status), ("invalid size"));
1674 status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK);
1675 status->pr_version = PRSTATUS_VERSION;
1676 status->pr_statussz = sizeof(elf_prstatus_t);
1677 status->pr_gregsetsz = sizeof(elf_gregset_t);
1678 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1679 status->pr_osreldate = osreldate;
1680 status->pr_cursig = td->td_proc->p_sig;
1681 status->pr_pid = td->td_tid;
1682 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1683 fill_regs32(td, &status->pr_reg);
1684 #else
1685 fill_regs(td, &status->pr_reg);
1686 #endif
1687 sbuf_bcat(sb, status, sizeof(*status));
1688 free(status, M_TEMP);
1689 }
1690 *sizep = sizeof(*status);
1691 }
1692
1693 static void
1694 __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep)
1695 {
1696 struct thread *td;
1697 elf_prfpregset_t *fpregset;
1698
1699 td = (struct thread *)arg;
1700 if (sb != NULL) {
1701 KASSERT(*sizep == sizeof(*fpregset), ("invalid size"));
1702 fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK);
1703 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1704 fill_fpregs32(td, fpregset);
1705 #else
1706 fill_fpregs(td, fpregset);
1707 #endif
1708 sbuf_bcat(sb, fpregset, sizeof(*fpregset));
1709 free(fpregset, M_TEMP);
1710 }
1711 *sizep = sizeof(*fpregset);
1712 }
1713
1714 static void
1715 __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep)
1716 {
1717 struct thread *td;
1718 elf_thrmisc_t thrmisc;
1719
1720 td = (struct thread *)arg;
1721 if (sb != NULL) {
1722 KASSERT(*sizep == sizeof(thrmisc), ("invalid size"));
1723 bzero(&thrmisc._pad, sizeof(thrmisc._pad));
1724 strcpy(thrmisc.pr_tname, td->td_name);
1725 sbuf_bcat(sb, &thrmisc, sizeof(thrmisc));
1726 }
1727 *sizep = sizeof(thrmisc);
1728 }
1729
1730 /*
1731 * Allow for MD specific notes, as well as any MD
1732 * specific preparations for writing MI notes.
1733 */
1734 static void
1735 __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep)
1736 {
1737 struct thread *td;
1738 void *buf;
1739 size_t size;
1740
1741 td = (struct thread *)arg;
1742 size = *sizep;
1743 if (size != 0 && sb != NULL)
1744 buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK);
1745 else
1746 buf = NULL;
1747 size = 0;
1748 __elfN(dump_thread)(td, buf, &size);
1749 KASSERT(*sizep == size, ("invalid size"));
1750 if (size != 0 && sb != NULL)
1751 sbuf_bcat(sb, buf, size);
1752 free(buf, M_TEMP);
1753 *sizep = size;
1754 }
1755
1756 #ifdef KINFO_PROC_SIZE
1757 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
1758 #endif
1759
1760 static void
1761 __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep)
1762 {
1763 struct proc *p;
1764 size_t size;
1765 int structsize;
1766
1767 p = (struct proc *)arg;
1768 size = sizeof(structsize) + p->p_numthreads *
1769 sizeof(elf_kinfo_proc_t);
1770
1771 if (sb != NULL) {
1772 KASSERT(*sizep == size, ("invalid size"));
1773 structsize = sizeof(elf_kinfo_proc_t);
1774 sbuf_bcat(sb, &structsize, sizeof(structsize));
1775 PROC_LOCK(p);
1776 kern_proc_out(p, sb, ELF_KERN_PROC_MASK);
1777 }
1778 *sizep = size;
1779 }
1780
1781 #ifdef KINFO_FILE_SIZE
1782 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
1783 #endif
1784
1785 static void
1786 note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep)
1787 {
1788 struct proc *p;
1789 size_t size;
1790 int structsize;
1791
1792 p = (struct proc *)arg;
1793 if (sb == NULL) {
1794 size = 0;
1795 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
1796 sbuf_set_drain(sb, sbuf_drain_count, &size);
1797 sbuf_bcat(sb, &structsize, sizeof(structsize));
1798 PROC_LOCK(p);
1799 kern_proc_filedesc_out(p, sb, -1);
1800 sbuf_finish(sb);
1801 sbuf_delete(sb);
1802 *sizep = size;
1803 } else {
1804 structsize = sizeof(struct kinfo_file);
1805 sbuf_bcat(sb, &structsize, sizeof(structsize));
1806 PROC_LOCK(p);
1807 kern_proc_filedesc_out(p, sb, -1);
1808 }
1809 }
1810
1811 #ifdef KINFO_VMENTRY_SIZE
1812 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
1813 #endif
1814
1815 static void
1816 note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep)
1817 {
1818 struct proc *p;
1819 size_t size;
1820 int structsize;
1821
1822 p = (struct proc *)arg;
1823 if (sb == NULL) {
1824 size = 0;
1825 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
1826 sbuf_set_drain(sb, sbuf_drain_count, &size);
1827 sbuf_bcat(sb, &structsize, sizeof(structsize));
1828 PROC_LOCK(p);
1829 kern_proc_vmmap_out(p, sb);
1830 sbuf_finish(sb);
1831 sbuf_delete(sb);
1832 *sizep = size;
1833 } else {
1834 structsize = sizeof(struct kinfo_vmentry);
1835 sbuf_bcat(sb, &structsize, sizeof(structsize));
1836 PROC_LOCK(p);
1837 kern_proc_vmmap_out(p, sb);
1838 }
1839 }
1840
1841 static void
1842 note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep)
1843 {
1844 struct proc *p;
1845 size_t size;
1846 int structsize;
1847
1848 p = (struct proc *)arg;
1849 size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t);
1850 if (sb != NULL) {
1851 KASSERT(*sizep == size, ("invalid size"));
1852 structsize = sizeof(gid_t);
1853 sbuf_bcat(sb, &structsize, sizeof(structsize));
1854 sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups *
1855 sizeof(gid_t));
1856 }
1857 *sizep = size;
1858 }
1859
1860 static void
1861 note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep)
1862 {
1863 struct proc *p;
1864 size_t size;
1865 int structsize;
1866
1867 p = (struct proc *)arg;
1868 size = sizeof(structsize) + sizeof(p->p_fd->fd_cmask);
1869 if (sb != NULL) {
1870 KASSERT(*sizep == size, ("invalid size"));
1871 structsize = sizeof(p->p_fd->fd_cmask);
1872 sbuf_bcat(sb, &structsize, sizeof(structsize));
1873 sbuf_bcat(sb, &p->p_fd->fd_cmask, sizeof(p->p_fd->fd_cmask));
1874 }
1875 *sizep = size;
1876 }
1877
1878 static void
1879 note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep)
1880 {
1881 struct proc *p;
1882 struct rlimit rlim[RLIM_NLIMITS];
1883 size_t size;
1884 int structsize, i;
1885
1886 p = (struct proc *)arg;
1887 size = sizeof(structsize) + sizeof(rlim);
1888 if (sb != NULL) {
1889 KASSERT(*sizep == size, ("invalid size"));
1890 structsize = sizeof(rlim);
1891 sbuf_bcat(sb, &structsize, sizeof(structsize));
1892 PROC_LOCK(p);
1893 for (i = 0; i < RLIM_NLIMITS; i++)
1894 lim_rlimit(p, i, &rlim[i]);
1895 PROC_UNLOCK(p);
1896 sbuf_bcat(sb, rlim, sizeof(rlim));
1897 }
1898 *sizep = size;
1899 }
1900
1901 static void
1902 note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep)
1903 {
1904 struct proc *p;
1905 size_t size;
1906 int structsize;
1907
1908 p = (struct proc *)arg;
1909 size = sizeof(structsize) + sizeof(p->p_osrel);
1910 if (sb != NULL) {
1911 KASSERT(*sizep == size, ("invalid size"));
1912 structsize = sizeof(p->p_osrel);
1913 sbuf_bcat(sb, &structsize, sizeof(structsize));
1914 sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel));
1915 }
1916 *sizep = size;
1917 }
1918
1919 static void
1920 __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep)
1921 {
1922 struct proc *p;
1923 elf_ps_strings_t ps_strings;
1924 size_t size;
1925 int structsize;
1926
1927 p = (struct proc *)arg;
1928 size = sizeof(structsize) + sizeof(ps_strings);
1929 if (sb != NULL) {
1930 KASSERT(*sizep == size, ("invalid size"));
1931 structsize = sizeof(ps_strings);
1932 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1933 ps_strings = PTROUT(p->p_sysent->sv_psstrings);
1934 #else
1935 ps_strings = p->p_sysent->sv_psstrings;
1936 #endif
1937 sbuf_bcat(sb, &structsize, sizeof(structsize));
1938 sbuf_bcat(sb, &ps_strings, sizeof(ps_strings));
1939 }
1940 *sizep = size;
1941 }
1942
1943 static void
1944 __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep)
1945 {
1946 struct proc *p;
1947 size_t size;
1948 int structsize;
1949
1950 p = (struct proc *)arg;
1951 if (sb == NULL) {
1952 size = 0;
1953 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
1954 sbuf_set_drain(sb, sbuf_drain_count, &size);
1955 sbuf_bcat(sb, &structsize, sizeof(structsize));
1956 PHOLD(p);
1957 proc_getauxv(curthread, p, sb);
1958 PRELE(p);
1959 sbuf_finish(sb);
1960 sbuf_delete(sb);
1961 *sizep = size;
1962 } else {
1963 structsize = sizeof(Elf_Auxinfo);
1964 sbuf_bcat(sb, &structsize, sizeof(structsize));
1965 PHOLD(p);
1966 proc_getauxv(curthread, p, sb);
1967 PRELE(p);
1968 }
1969 }
1970
1971 static boolean_t
1972 __elfN(parse_notes)(struct image_params *imgp, Elf_Brandnote *checknote,
1973 int32_t *osrel, const Elf_Phdr *pnote)
1974 {
1975 const Elf_Note *note, *note0, *note_end;
1976 const char *note_name;
1977 int i;
1978
1979 if (pnote == NULL || pnote->p_offset > PAGE_SIZE ||
1980 pnote->p_filesz > PAGE_SIZE - pnote->p_offset)
1981 return (FALSE);
1982
1983 note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
1984 note_end = (const Elf_Note *)(imgp->image_header +
1985 pnote->p_offset + pnote->p_filesz);
1986 for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
1987 if (!aligned(note, Elf32_Addr) || (const char *)note_end -
1988 (const char *)note < sizeof(Elf_Note))
1989 return (FALSE);
1990 if (note->n_namesz != checknote->hdr.n_namesz ||
1991 note->n_descsz != checknote->hdr.n_descsz ||
1992 note->n_type != checknote->hdr.n_type)
1993 goto nextnote;
1994 note_name = (const char *)(note + 1);
1995 if (note_name + checknote->hdr.n_namesz >=
1996 (const char *)note_end || strncmp(checknote->vendor,
1997 note_name, checknote->hdr.n_namesz) != 0)
1998 goto nextnote;
1999
2000 /*
2001 * Fetch the osreldate for binary
2002 * from the ELF OSABI-note if necessary.
2003 */
2004 if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 &&
2005 checknote->trans_osrel != NULL)
2006 return (checknote->trans_osrel(note, osrel));
2007 return (TRUE);
2008
2009 nextnote:
2010 note = (const Elf_Note *)((const char *)(note + 1) +
2011 roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) +
2012 roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE));
2013 }
2014
2015 return (FALSE);
2016 }
2017
2018 /*
2019 * Try to find the appropriate ABI-note section for checknote,
2020 * fetch the osreldate for binary from the ELF OSABI-note. Only the
2021 * first page of the image is searched, the same as for headers.
2022 */
2023 static boolean_t
2024 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
2025 int32_t *osrel)
2026 {
2027 const Elf_Phdr *phdr;
2028 const Elf_Ehdr *hdr;
2029 int i;
2030
2031 hdr = (const Elf_Ehdr *)imgp->image_header;
2032 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
2033
2034 for (i = 0; i < hdr->e_phnum; i++) {
2035 if (phdr[i].p_type == PT_NOTE &&
2036 __elfN(parse_notes)(imgp, checknote, osrel, &phdr[i]))
2037 return (TRUE);
2038 }
2039 return (FALSE);
2040
2041 }
2042
2043 /*
2044 * Tell kern_execve.c about it, with a little help from the linker.
2045 */
2046 static struct execsw __elfN(execsw) = {
2047 __CONCAT(exec_, __elfN(imgact)),
2048 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
2049 };
2050 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
2051
2052 #ifdef COMPRESS_USER_CORES
2053 /*
2054 * Compress and write out a core segment for a user process.
2055 *
2056 * 'inbuf' is the starting address of a VM segment in the process' address
2057 * space that is to be compressed and written out to the core file. 'dest_buf'
2058 * is a buffer in the kernel's address space. The segment is copied from
2059 * 'inbuf' to 'dest_buf' first before being processed by the compression
2060 * routine gzwrite(). This copying is necessary because the content of the VM
2061 * segment may change between the compression pass and the crc-computation pass
2062 * in gzwrite(). This is because realtime threads may preempt the UNIX kernel.
2063 *
2064 * If inbuf is NULL it is assumed that data is already copied to 'dest_buf'.
2065 */
2066 static int
2067 compress_core (gzFile file, char *inbuf, char *dest_buf, unsigned int len,
2068 struct thread *td)
2069 {
2070 int len_compressed;
2071 int error = 0;
2072 unsigned int chunk_len;
2073
2074 while (len) {
2075 if (inbuf != NULL) {
2076 chunk_len = (len > CORE_BUF_SIZE) ? CORE_BUF_SIZE : len;
2077 copyin(inbuf, dest_buf, chunk_len);
2078 inbuf += chunk_len;
2079 } else {
2080 chunk_len = len;
2081 }
2082 len_compressed = gzwrite(file, dest_buf, chunk_len);
2083
2084 EVENTHANDLER_INVOKE(app_coredump_progress, td, len_compressed);
2085
2086 if ((unsigned int)len_compressed != chunk_len) {
2087 log(LOG_WARNING,
2088 "compress_core: length mismatch (0x%x returned, "
2089 "0x%x expected)\n", len_compressed, chunk_len);
2090 EVENTHANDLER_INVOKE(app_coredump_error, td,
2091 "compress_core: length mismatch %x -> %x",
2092 chunk_len, len_compressed);
2093 error = EFAULT;
2094 break;
2095 }
2096 len -= chunk_len;
2097 maybe_yield();
2098 }
2099
2100 return (error);
2101 }
2102 #endif /* COMPRESS_USER_CORES */
2103
2104 static vm_prot_t
2105 __elfN(trans_prot)(Elf_Word flags)
2106 {
2107 vm_prot_t prot;
2108
2109 prot = 0;
2110 if (flags & PF_X)
2111 prot |= VM_PROT_EXECUTE;
2112 if (flags & PF_W)
2113 prot |= VM_PROT_WRITE;
2114 if (flags & PF_R)
2115 prot |= VM_PROT_READ;
2116 #if __ELF_WORD_SIZE == 32
2117 #if defined(__amd64__) || defined(__ia64__)
2118 if (i386_read_exec && (flags & PF_R))
2119 prot |= VM_PROT_EXECUTE;
2120 #endif
2121 #endif
2122 return (prot);
2123 }
2124
2125 static Elf_Word
2126 __elfN(untrans_prot)(vm_prot_t prot)
2127 {
2128 Elf_Word flags;
2129
2130 flags = 0;
2131 if (prot & VM_PROT_EXECUTE)
2132 flags |= PF_X;
2133 if (prot & VM_PROT_READ)
2134 flags |= PF_R;
2135 if (prot & VM_PROT_WRITE)
2136 flags |= PF_W;
2137 return (flags);
2138 }
Cache object: ae8154c16109fd0a49c9516966e98179
|