FreeBSD/Linux Kernel Cross Reference
sys/kern/exec_elf.c
1 /* $OpenBSD: exec_elf.c,v 1.180 2023/01/16 07:09:11 guenther Exp $ */
2
3 /*
4 * Copyright (c) 1996 Per Fogelstrom
5 * All rights reserved.
6 *
7 * Copyright (c) 1994 Christos Zoulas
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34 /*
35 * Copyright (c) 2001 Wasabi Systems, Inc.
36 * All rights reserved.
37 *
38 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed for the NetBSD Project by
51 * Wasabi Systems, Inc.
52 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
53 * or promote products derived from this software without specific prior
54 * written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
58 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
59 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
60 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
61 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
62 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
63 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
64 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
65 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc.h>
72 #include <sys/malloc.h>
73 #include <sys/pool.h>
74 #include <sys/mount.h>
75 #include <sys/namei.h>
76 #include <sys/vnode.h>
77 #include <sys/core.h>
78 #include <sys/exec.h>
79 #include <sys/exec_elf.h>
80 #include <sys/fcntl.h>
81 #include <sys/ptrace.h>
82 #include <sys/signalvar.h>
83 #include <sys/pledge.h>
84
85 #include <sys/mman.h>
86
87 #include <uvm/uvm_extern.h>
88
89 #include <machine/reg.h>
90 #include <machine/exec.h>
91
92 int elf_load_file(struct proc *, char *, struct exec_package *,
93 struct elf_args *);
94 int elf_check_header(Elf_Ehdr *);
95 int elf_read_from(struct proc *, struct vnode *, u_long, void *, int);
96 void elf_load_psection(struct exec_vmcmd_set *, struct vnode *,
97 Elf_Phdr *, Elf_Addr *, Elf_Addr *, int *, int);
98 int elf_os_pt_note_name(Elf_Note *);
99 int elf_os_pt_note(struct proc *, struct exec_package *, Elf_Ehdr *, int *);
100
101 /* round up and down to page boundaries. */
102 #define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1))
103 #define ELF_TRUNC(a, b) ((a) & ~((b) - 1))
104
105 /*
106 * We limit the number of program headers to 32, this should
107 * be a reasonable limit for ELF, the most we have seen so far is 12
108 */
109 #define ELF_MAX_VALID_PHDR 32
110
111 #define ELF_NOTE_NAME_OPENBSD 0x01
112
113 struct elf_note_name {
114 char *name;
115 int id;
116 } elf_note_names[] = {
117 { "OpenBSD", ELF_NOTE_NAME_OPENBSD },
118 };
119
120 #define ELFROUNDSIZE sizeof(Elf_Word)
121 #define elfround(x) roundup((x), ELFROUNDSIZE)
122
123
124 /*
125 * Check header for validity; return 0 for ok, ENOEXEC if error
126 */
127 int
128 elf_check_header(Elf_Ehdr *ehdr)
129 {
130 /*
131 * We need to check magic, class size, endianness, and version before
132 * we look at the rest of the Elf_Ehdr structure. These few elements
133 * are represented in a machine independent fashion.
134 */
135 if (!IS_ELF(*ehdr) ||
136 ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
137 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
138 ehdr->e_ident[EI_VERSION] != ELF_TARG_VER)
139 return (ENOEXEC);
140
141 /* Now check the machine dependent header */
142 if (ehdr->e_machine != ELF_TARG_MACH ||
143 ehdr->e_version != ELF_TARG_VER)
144 return (ENOEXEC);
145
146 /* Don't allow an insane amount of sections. */
147 if (ehdr->e_phnum > ELF_MAX_VALID_PHDR)
148 return (ENOEXEC);
149
150 return (0);
151 }
152
153 /*
154 * Load a psection at the appropriate address
155 */
156 void
157 elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
158 Elf_Phdr *ph, Elf_Addr *addr, Elf_Addr *size, int *prot, int flags)
159 {
160 u_long msize, lsize, psize, rm, rf;
161 long diff, offset, bdiff;
162 Elf_Addr base;
163
164 /*
165 * If the user specified an address, then we load there.
166 */
167 if (*addr != ELF_NO_ADDR) {
168 if (ph->p_align > 1) {
169 *addr = ELF_TRUNC(*addr, ph->p_align);
170 diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align);
171 /* page align vaddr */
172 base = *addr + trunc_page(ph->p_vaddr)
173 - ELF_TRUNC(ph->p_vaddr, ph->p_align);
174 } else {
175 diff = 0;
176 base = *addr + trunc_page(ph->p_vaddr) - ph->p_vaddr;
177 }
178 } else {
179 *addr = ph->p_vaddr;
180 if (ph->p_align > 1)
181 *addr = ELF_TRUNC(*addr, ph->p_align);
182 base = trunc_page(ph->p_vaddr);
183 diff = ph->p_vaddr - *addr;
184 }
185 bdiff = ph->p_vaddr - trunc_page(ph->p_vaddr);
186
187 /*
188 * Enforce W^X and map W|X segments without X permission
189 * initially. The dynamic linker will make these read-only
190 * and add back X permission after relocation processing.
191 * Static executables with W|X segments will probably crash.
192 */
193 *prot |= (ph->p_flags & PF_R) ? PROT_READ : 0;
194 *prot |= (ph->p_flags & PF_W) ? PROT_WRITE : 0;
195 if ((ph->p_flags & PF_W) == 0)
196 *prot |= (ph->p_flags & PF_X) ? PROT_EXEC : 0;
197
198 /*
199 * Apply immutability as much as possible, but not text/rodata
200 * segments of textrel binaries, or RELRO or PT_OPENBSD_MUTABLE
201 * sections, or LOADS marked PF_OPENBSD_MUTABLE, or LOADS which
202 * violate W^X.
203 * Userland (meaning crt0 or ld.so) will repair those regions.
204 */
205 if ((ph->p_flags & (PF_X | PF_W)) != (PF_X | PF_W) &&
206 ((ph->p_flags & PF_OPENBSD_MUTABLE) == 0))
207 flags |= VMCMD_IMMUTABLE;
208 if ((flags & VMCMD_TEXTREL) && (ph->p_flags & PF_W) == 0)
209 flags &= ~VMCMD_IMMUTABLE;
210
211 msize = ph->p_memsz + diff;
212 offset = ph->p_offset - bdiff;
213 lsize = ph->p_filesz + bdiff;
214 psize = round_page(lsize);
215
216 /*
217 * Because the pagedvn pager can't handle zero fill of the last
218 * data page if it's not page aligned we map the last page readvn.
219 */
220 if (ph->p_flags & PF_W) {
221 psize = trunc_page(lsize);
222 if (psize > 0)
223 NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp,
224 offset, *prot, flags);
225 if (psize != lsize) {
226 NEW_VMCMD2(vcset, vmcmd_map_readvn, lsize - psize,
227 base + psize, vp, offset + psize, *prot, flags);
228 }
229 } else {
230 NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, offset,
231 *prot, flags);
232 }
233
234 /*
235 * Check if we need to extend the size of the segment
236 */
237 rm = round_page(*addr + ph->p_memsz + diff);
238 rf = round_page(*addr + ph->p_filesz + diff);
239
240 if (rm != rf) {
241 NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 0,
242 *prot, flags);
243 }
244 *size = msize;
245 }
246
247 /*
248 * Read from vnode into buffer at offset.
249 */
250 int
251 elf_read_from(struct proc *p, struct vnode *vp, u_long off, void *buf,
252 int size)
253 {
254 int error;
255 size_t resid;
256
257 if ((error = vn_rdwr(UIO_READ, vp, buf, size, off, UIO_SYSSPACE,
258 0, p->p_ucred, &resid, p)) != 0)
259 return error;
260 /*
261 * See if we got all of it
262 */
263 if (resid != 0)
264 return (ENOEXEC);
265 return (0);
266 }
267
268 /*
269 * Load a file (interpreter/library) pointed to by path [stolen from
270 * coff_load_shlib()]. Made slightly generic so it might be used externally.
271 */
272 int
273 elf_load_file(struct proc *p, char *path, struct exec_package *epp,
274 struct elf_args *ap)
275 {
276 int error, i;
277 struct nameidata nd;
278 Elf_Ehdr eh;
279 Elf_Phdr *ph = NULL;
280 u_long phsize = 0;
281 Elf_Addr addr;
282 struct vnode *vp;
283 Elf_Phdr *base_ph = NULL;
284 struct interp_ld_sec {
285 Elf_Addr vaddr;
286 u_long memsz;
287 } loadmap[ELF_MAX_VALID_PHDR];
288 int nload, idx = 0;
289 Elf_Addr pos;
290 int file_align;
291 int loop;
292 size_t randomizequota = ELF_RANDOMIZE_LIMIT;
293
294 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p);
295 nd.ni_pledge = PLEDGE_RPATH;
296 nd.ni_unveil = UNVEIL_READ;
297 if ((error = namei(&nd)) != 0) {
298 return (error);
299 }
300 vp = nd.ni_vp;
301 if (vp->v_type != VREG) {
302 error = EACCES;
303 goto bad;
304 }
305 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
306 goto bad;
307 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
308 error = EACCES;
309 goto bad;
310 }
311 if ((error = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) != 0)
312 goto bad1;
313 if ((error = elf_read_from(p, nd.ni_vp, 0, &eh, sizeof(eh))) != 0)
314 goto bad1;
315
316 if (elf_check_header(&eh) || eh.e_type != ET_DYN) {
317 error = ENOEXEC;
318 goto bad1;
319 }
320
321 ph = mallocarray(eh.e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK);
322 phsize = eh.e_phnum * sizeof(Elf_Phdr);
323
324 if ((error = elf_read_from(p, nd.ni_vp, eh.e_phoff, ph, phsize)) != 0)
325 goto bad1;
326
327 for (i = 0; i < eh.e_phnum; i++) {
328 if (ph[i].p_type == PT_LOAD) {
329 if (ph[i].p_filesz > ph[i].p_memsz ||
330 ph[i].p_memsz == 0) {
331 error = EINVAL;
332 goto bad1;
333 }
334 loadmap[idx].vaddr = trunc_page(ph[i].p_vaddr);
335 loadmap[idx].memsz = round_page (ph[i].p_vaddr +
336 ph[i].p_memsz - loadmap[idx].vaddr);
337 file_align = ph[i].p_align;
338 idx++;
339 }
340 }
341 nload = idx;
342
343 /*
344 * Load the interpreter where a non-fixed mmap(NULL, ...)
345 * would (i.e. something safely out of the way).
346 */
347 pos = uvm_map_hint(p->p_vmspace, PROT_EXEC, VM_MIN_ADDRESS,
348 VM_MAXUSER_ADDRESS);
349 pos = ELF_ROUND(pos, file_align);
350
351 loop = 0;
352 for (i = 0; i < nload;/**/) {
353 vaddr_t addr;
354 struct uvm_object *uobj;
355 off_t uoff;
356 size_t size;
357
358 #ifdef this_needs_fixing
359 if (i == 0) {
360 uobj = &vp->v_uvm.u_obj;
361 /* need to fix uoff */
362 } else {
363 #endif
364 uobj = NULL;
365 uoff = 0;
366 #ifdef this_needs_fixing
367 }
368 #endif
369
370 addr = trunc_page(pos + loadmap[i].vaddr);
371 size = round_page(addr + loadmap[i].memsz) - addr;
372
373 /* CRAP - map_findspace does not avoid daddr+BRKSIZ */
374 if ((addr + size > (vaddr_t)p->p_vmspace->vm_daddr) &&
375 (addr < (vaddr_t)p->p_vmspace->vm_daddr + BRKSIZ))
376 addr = round_page((vaddr_t)p->p_vmspace->vm_daddr +
377 BRKSIZ);
378
379 if (uvm_map_mquery(&p->p_vmspace->vm_map, &addr, size,
380 (i == 0 ? uoff : UVM_UNKNOWN_OFFSET), 0) != 0) {
381 if (loop == 0) {
382 loop = 1;
383 i = 0;
384 pos = 0;
385 continue;
386 }
387 error = ENOMEM;
388 goto bad1;
389 }
390 if (addr != pos + loadmap[i].vaddr) {
391 /* base changed. */
392 pos = addr - trunc_page(loadmap[i].vaddr);
393 pos = ELF_ROUND(pos,file_align);
394 i = 0;
395 continue;
396 }
397
398 i++;
399 }
400
401 /*
402 * Load all the necessary sections
403 */
404 for (i = 0; i < eh.e_phnum; i++) {
405 Elf_Addr size = 0;
406 int prot = 0;
407 int flags;
408
409 switch (ph[i].p_type) {
410 case PT_LOAD:
411 if (base_ph == NULL) {
412 flags = VMCMD_BASE;
413 addr = pos;
414 base_ph = &ph[i];
415 } else {
416 flags = VMCMD_RELATIVE;
417 addr = ph[i].p_vaddr - base_ph->p_vaddr;
418 }
419 elf_load_psection(&epp->ep_vmcmds, nd.ni_vp,
420 &ph[i], &addr, &size, &prot, flags | VMCMD_SYSCALL);
421 /* If entry is within this section it must be text */
422 if (eh.e_entry >= ph[i].p_vaddr &&
423 eh.e_entry < (ph[i].p_vaddr + size)) {
424 epp->ep_entry = addr + eh.e_entry -
425 ELF_TRUNC(ph[i].p_vaddr,ph[i].p_align);
426 if (flags == VMCMD_RELATIVE)
427 epp->ep_entry += pos;
428 ap->arg_interp = pos;
429 }
430 addr += size;
431 break;
432
433 case PT_PHDR:
434 case PT_NOTE:
435 break;
436
437 case PT_OPENBSD_RANDOMIZE:
438 if (ph[i].p_memsz > randomizequota) {
439 error = ENOMEM;
440 goto bad1;
441 }
442 randomizequota -= ph[i].p_memsz;
443 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize,
444 ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
445 break;
446
447 case PT_DYNAMIC:
448 #if defined (__mips__)
449 /* DT_DEBUG is not ready on mips */
450 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
451 ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
452 #endif
453 break;
454 case PT_GNU_RELRO:
455 case PT_OPENBSD_MUTABLE:
456 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
457 ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
458 break;
459
460 default:
461 break;
462 }
463 }
464
465 vn_marktext(nd.ni_vp);
466
467 bad1:
468 VOP_CLOSE(nd.ni_vp, FREAD, p->p_ucred, p);
469 bad:
470 free(ph, M_TEMP, phsize);
471
472 vput(nd.ni_vp);
473 return (error);
474 }
475
476 /*
477 * Prepare an Elf binary's exec package
478 *
479 * First, set of the various offsets/lengths in the exec package.
480 *
481 * Then, mark the text image busy (so it can be demand paged) or error out if
482 * this is not possible. Finally, set up vmcmds for the text, data, bss, and
483 * stack segments.
484 */
485 int
486 exec_elf_makecmds(struct proc *p, struct exec_package *epp)
487 {
488 Elf_Ehdr *eh = epp->ep_hdr;
489 Elf_Phdr *ph, *pp, *base_ph = NULL;
490 Elf_Addr phdr = 0, exe_base = 0;
491 int error, i, has_phdr = 0, names = 0, textrel = 0;
492 char *interp = NULL;
493 u_long phsize;
494 size_t randomizequota = ELF_RANDOMIZE_LIMIT;
495
496 if (epp->ep_hdrvalid < sizeof(Elf_Ehdr))
497 return (ENOEXEC);
498
499 if (elf_check_header(eh) ||
500 (eh->e_type != ET_EXEC && eh->e_type != ET_DYN))
501 return (ENOEXEC);
502
503 /*
504 * check if vnode is in open for writing, because we want to demand-
505 * page out of it. if it is, don't do it, for various reasons.
506 */
507 if (epp->ep_vp->v_writecount != 0) {
508 #ifdef DIAGNOSTIC
509 if (epp->ep_vp->v_flag & VTEXT)
510 panic("exec: a VTEXT vnode has writecount != 0");
511 #endif
512 return (ETXTBSY);
513 }
514 /*
515 * Allocate space to hold all the program headers, and read them
516 * from the file
517 */
518 ph = mallocarray(eh->e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK);
519 phsize = eh->e_phnum * sizeof(Elf_Phdr);
520
521 if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff, ph,
522 phsize)) != 0)
523 goto bad;
524
525 epp->ep_tsize = ELF_NO_ADDR;
526 epp->ep_dsize = ELF_NO_ADDR;
527
528 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) {
529 if (pp->p_type == PT_INTERP && !interp) {
530 if (pp->p_filesz < 2 || pp->p_filesz > MAXPATHLEN)
531 goto bad;
532 interp = pool_get(&namei_pool, PR_WAITOK);
533 if ((error = elf_read_from(p, epp->ep_vp,
534 pp->p_offset, interp, pp->p_filesz)) != 0) {
535 goto bad;
536 }
537 if (interp[pp->p_filesz - 1] != '\0')
538 goto bad;
539 } else if (pp->p_type == PT_LOAD) {
540 if (pp->p_filesz > pp->p_memsz ||
541 pp->p_memsz == 0) {
542 error = EINVAL;
543 goto bad;
544 }
545 if (base_ph == NULL)
546 base_ph = pp;
547 } else if (pp->p_type == PT_PHDR) {
548 has_phdr = 1;
549 }
550 }
551
552 /*
553 * Verify this is an OpenBSD executable. If it's marked that way
554 * via a PT_NOTE then also check for a PT_OPENBSD_WXNEEDED segment.
555 */
556 if ((error = elf_os_pt_note(p, epp, epp->ep_hdr, &names)) != 0)
557 goto bad;
558 if (eh->e_ident[EI_OSABI] == ELFOSABI_OPENBSD)
559 names |= ELF_NOTE_NAME_OPENBSD;
560
561 if (eh->e_type == ET_DYN) {
562 /* need phdr and load sections for PIE */
563 if (!has_phdr || base_ph == NULL) {
564 error = EINVAL;
565 goto bad;
566 }
567 /* randomize exe_base for PIE */
568 exe_base = uvm_map_pie(base_ph->p_align);
569
570 /*
571 * Check if DYNAMIC contains DT_TEXTREL
572 */
573 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) {
574 Elf_Dyn *dt;
575 int j;
576
577 switch (pp->p_type) {
578 case PT_DYNAMIC:
579 if (pp->p_filesz > 64*1024)
580 break;
581 dt = malloc(pp->p_filesz, M_TEMP, M_WAITOK);
582 error = vn_rdwr(UIO_READ, epp->ep_vp,
583 (caddr_t)dt, pp->p_filesz, pp->p_offset,
584 UIO_SYSSPACE, IO_UNIT, p->p_ucred, NULL, p);
585 if (error) {
586 free(dt, M_TEMP, pp->p_filesz);
587 break;
588 }
589 for (j = 0; j < pp->p_filesz / sizeof(*dt); j++) {
590 if (dt[j].d_tag == DT_TEXTREL) {
591 textrel = VMCMD_TEXTREL;
592 break;
593 }
594 }
595 free(dt, M_TEMP, pp->p_filesz);
596 break;
597 default:
598 break;
599 }
600 }
601 }
602
603 /*
604 * Load all the necessary sections
605 */
606 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) {
607 Elf_Addr addr, size = 0;
608 int prot = 0, syscall = 0;
609 int flags = 0;
610
611 switch (pp->p_type) {
612 case PT_LOAD:
613 if (exe_base != 0) {
614 if (pp == base_ph) {
615 flags = VMCMD_BASE;
616 addr = exe_base;
617 } else {
618 flags = VMCMD_RELATIVE;
619 addr = pp->p_vaddr - base_ph->p_vaddr;
620 }
621 } else
622 addr = ELF_NO_ADDR;
623
624 /*
625 * Permit system calls in main-text static binaries.
626 * Also block the ld.so syscall-grant
627 */
628 if (interp == NULL) {
629 syscall = VMCMD_SYSCALL;
630 p->p_vmspace->vm_map.flags |= VM_MAP_SYSCALL_ONCE;
631 }
632
633 /*
634 * Calculates size of text and data segments
635 * by starting at first and going to end of last.
636 * 'rwx' sections are treated as data.
637 * this is correct for BSS_PLT, but may not be
638 * for DATA_PLT, is fine for TEXT_PLT.
639 */
640 elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
641 pp, &addr, &size, &prot, flags | textrel | syscall);
642
643 /*
644 * Update exe_base in case alignment was off.
645 * For PIE, addr is relative to exe_base so
646 * adjust it (non PIE exe_base is 0 so no change).
647 */
648 if (flags == VMCMD_BASE)
649 exe_base = addr;
650 else
651 addr += exe_base;
652
653 /*
654 * Decide whether it's text or data by looking
655 * at the protection of the section
656 */
657 if (prot & PROT_WRITE) {
658 /* data section */
659 if (epp->ep_dsize == ELF_NO_ADDR) {
660 epp->ep_daddr = addr;
661 epp->ep_dsize = size;
662 } else {
663 if (addr < epp->ep_daddr) {
664 epp->ep_dsize =
665 epp->ep_dsize +
666 epp->ep_daddr -
667 addr;
668 epp->ep_daddr = addr;
669 } else
670 epp->ep_dsize = addr+size -
671 epp->ep_daddr;
672 }
673 } else if (prot & PROT_EXEC) {
674 /* text section */
675 if (epp->ep_tsize == ELF_NO_ADDR) {
676 epp->ep_taddr = addr;
677 epp->ep_tsize = size;
678 } else {
679 if (addr < epp->ep_taddr) {
680 epp->ep_tsize =
681 epp->ep_tsize +
682 epp->ep_taddr -
683 addr;
684 epp->ep_taddr = addr;
685 } else
686 epp->ep_tsize = addr+size -
687 epp->ep_taddr;
688 }
689 }
690 break;
691
692 case PT_SHLIB:
693 error = ENOEXEC;
694 goto bad;
695
696 case PT_INTERP:
697 /* Already did this one */
698 case PT_NOTE:
699 break;
700
701 case PT_PHDR:
702 /* Note address of program headers (in text segment) */
703 phdr = pp->p_vaddr;
704 break;
705
706 case PT_OPENBSD_RANDOMIZE:
707 if (ph[i].p_memsz > randomizequota) {
708 error = ENOMEM;
709 goto bad;
710 }
711 randomizequota -= ph[i].p_memsz;
712 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize,
713 ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
714 break;
715
716 case PT_DYNAMIC:
717 #if defined (__mips__)
718 /* DT_DEBUG is not ready on mips */
719 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
720 ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
721 #endif
722 break;
723 case PT_GNU_RELRO:
724 case PT_OPENBSD_MUTABLE:
725 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
726 ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
727 break;
728
729 default:
730 /*
731 * Not fatal, we don't need to understand everything
732 * :-)
733 */
734 break;
735 }
736 }
737
738 phdr += exe_base;
739
740 /*
741 * Strangely some linux programs may have all load sections marked
742 * writeable, in this case, textsize is not -1, but rather 0;
743 */
744 if (epp->ep_tsize == ELF_NO_ADDR)
745 epp->ep_tsize = 0;
746 /*
747 * Another possibility is that it has all load sections marked
748 * read-only. Fake a zero-sized data segment right after the
749 * text segment.
750 */
751 if (epp->ep_dsize == ELF_NO_ADDR) {
752 epp->ep_daddr = round_page(epp->ep_taddr + epp->ep_tsize);
753 epp->ep_dsize = 0;
754 }
755
756 epp->ep_interp = interp;
757 epp->ep_entry = eh->e_entry + exe_base;
758
759 /*
760 * Check if we found a dynamically linked binary and arrange to load
761 * its interpreter when the exec file is released.
762 */
763 if (interp || eh->e_type == ET_DYN) {
764 struct elf_args *ap;
765
766 ap = malloc(sizeof(*ap), M_TEMP, M_WAITOK);
767
768 ap->arg_phaddr = phdr;
769 ap->arg_phentsize = eh->e_phentsize;
770 ap->arg_phnum = eh->e_phnum;
771 ap->arg_entry = eh->e_entry + exe_base;
772 ap->arg_interp = exe_base;
773
774 epp->ep_args = ap;
775 }
776
777 free(ph, M_TEMP, phsize);
778 vn_marktext(epp->ep_vp);
779 return (exec_setup_stack(p, epp));
780
781 bad:
782 if (interp)
783 pool_put(&namei_pool, interp);
784 free(ph, M_TEMP, phsize);
785 kill_vmcmds(&epp->ep_vmcmds);
786 if (error == 0)
787 return (ENOEXEC);
788 return (error);
789 }
790
791 /*
792 * Phase II of load. It is now safe to load the interpreter. Info collected
793 * when loading the program is available for setup of the interpreter.
794 */
795 int
796 exec_elf_fixup(struct proc *p, struct exec_package *epp)
797 {
798 char *interp;
799 int error = 0;
800 struct elf_args *ap;
801 AuxInfo ai[ELF_AUX_ENTRIES], *a;
802
803 ap = epp->ep_args;
804 if (ap == NULL) {
805 return (0);
806 }
807
808 interp = epp->ep_interp;
809
810 /* disable kbind in programs that don't use ld.so */
811 if (interp == NULL)
812 p->p_p->ps_kbind_addr = BOGO_PC;
813
814 if (interp &&
815 (error = elf_load_file(p, interp, epp, ap)) != 0) {
816 uprintf("execve: cannot load %s\n", interp);
817 free(ap, M_TEMP, sizeof *ap);
818 pool_put(&namei_pool, interp);
819 kill_vmcmds(&epp->ep_vmcmds);
820 return (error);
821 }
822 /*
823 * We have to do this ourselves...
824 */
825 error = exec_process_vmcmds(p, epp);
826
827 /*
828 * Push extra arguments on the stack needed by dynamically
829 * linked binaries
830 */
831 if (error == 0) {
832 memset(&ai, 0, sizeof ai);
833 a = ai;
834
835 a->au_id = AUX_phdr;
836 a->au_v = ap->arg_phaddr;
837 a++;
838
839 a->au_id = AUX_phent;
840 a->au_v = ap->arg_phentsize;
841 a++;
842
843 a->au_id = AUX_phnum;
844 a->au_v = ap->arg_phnum;
845 a++;
846
847 a->au_id = AUX_pagesz;
848 a->au_v = PAGE_SIZE;
849 a++;
850
851 a->au_id = AUX_base;
852 a->au_v = ap->arg_interp;
853 a++;
854
855 a->au_id = AUX_flags;
856 a->au_v = 0;
857 a++;
858
859 a->au_id = AUX_entry;
860 a->au_v = ap->arg_entry;
861 a++;
862
863 a->au_id = AUX_openbsd_timekeep;
864 a->au_v = p->p_p->ps_timekeep;
865 a++;
866
867 a->au_id = AUX_null;
868 a->au_v = 0;
869 a++;
870
871 error = copyout(ai, epp->ep_auxinfo, sizeof ai);
872 }
873 free(ap, M_TEMP, sizeof *ap);
874 if (interp)
875 pool_put(&namei_pool, interp);
876 return (error);
877 }
878
879 int
880 elf_os_pt_note_name(Elf_Note *np)
881 {
882 int i, j;
883
884 for (i = 0; i < nitems(elf_note_names); i++) {
885 size_t namlen = strlen(elf_note_names[i].name);
886 if (np->namesz < namlen)
887 continue;
888 /* verify name padding (after the NUL) is NUL */
889 for (j = namlen + 1; j < elfround(np->namesz); j++)
890 if (((char *)(np + 1))[j] != '\0')
891 continue;
892 /* verify desc padding is NUL */
893 for (j = np->descsz; j < elfround(np->descsz); j++)
894 if (((char *)(np + 1))[j] != '\0')
895 continue;
896 if (strcmp((char *)(np + 1), elf_note_names[i].name) == 0)
897 return elf_note_names[i].id;
898 }
899 return (0);
900 }
901
902 int
903 elf_os_pt_note(struct proc *p, struct exec_package *epp, Elf_Ehdr *eh, int *namesp)
904 {
905 Elf_Phdr *hph, *ph;
906 Elf_Note *np = NULL;
907 size_t phsize, offset, pfilesz = 0, total;
908 int error, names = 0;
909
910 hph = mallocarray(eh->e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK);
911 phsize = eh->e_phnum * sizeof(Elf_Phdr);
912 if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff,
913 hph, phsize)) != 0)
914 goto out1;
915
916 for (ph = hph; ph < &hph[eh->e_phnum]; ph++) {
917 if (ph->p_type == PT_OPENBSD_WXNEEDED) {
918 epp->ep_flags |= EXEC_WXNEEDED;
919 continue;
920 }
921
922 if (ph->p_type != PT_NOTE || ph->p_filesz > 1024)
923 continue;
924
925 if (np && ph->p_filesz != pfilesz) {
926 free(np, M_TEMP, pfilesz);
927 np = NULL;
928 }
929 if (!np)
930 np = malloc(ph->p_filesz, M_TEMP, M_WAITOK);
931 pfilesz = ph->p_filesz;
932 if ((error = elf_read_from(p, epp->ep_vp, ph->p_offset,
933 np, ph->p_filesz)) != 0)
934 goto out2;
935
936 for (offset = 0; offset < ph->p_filesz; offset += total) {
937 Elf_Note *np2 = (Elf_Note *)((char *)np + offset);
938
939 if (offset + sizeof(Elf_Note) > ph->p_filesz)
940 break;
941 total = sizeof(Elf_Note) + elfround(np2->namesz) +
942 elfround(np2->descsz);
943 if (offset + total > ph->p_filesz)
944 break;
945 names |= elf_os_pt_note_name(np2);
946 }
947 }
948
949 out2:
950 free(np, M_TEMP, pfilesz);
951 out1:
952 free(hph, M_TEMP, phsize);
953 *namesp = names;
954 return ((names & ELF_NOTE_NAME_OPENBSD) ? 0 : ENOEXEC);
955 }
956
957 /*
958 * Start of routines related to dumping core
959 */
960
961 #ifdef SMALL_KERNEL
962 int
963 coredump_elf(struct proc *p, void *cookie)
964 {
965 return EPERM;
966 }
967 #else /* !SMALL_KERNEL */
968
969 struct writesegs_state {
970 off_t notestart;
971 off_t secstart;
972 off_t secoff;
973 struct proc *p;
974 void *iocookie;
975 Elf_Phdr *psections;
976 size_t psectionslen;
977 size_t notesize;
978 int npsections;
979 };
980
981 uvm_coredump_setup_cb coredump_setup_elf;
982 uvm_coredump_walk_cb coredump_walk_elf;
983
984 int coredump_notes_elf(struct proc *, void *, size_t *);
985 int coredump_note_elf(struct proc *, void *, size_t *);
986 int coredump_writenote_elf(struct proc *, void *, Elf_Note *,
987 const char *, void *);
988
989 extern vaddr_t sigcode_va;
990 extern vsize_t sigcode_sz;
991
992 int
993 coredump_elf(struct proc *p, void *cookie)
994 {
995 #ifdef DIAGNOSTIC
996 off_t offset;
997 #endif
998 struct writesegs_state ws;
999 size_t notesize;
1000 int error, i;
1001
1002 ws.p = p;
1003 ws.iocookie = cookie;
1004 ws.psections = NULL;
1005
1006 /*
1007 * Walk the map to get all the segment offsets and lengths,
1008 * write out the ELF header.
1009 */
1010 error = uvm_coredump_walkmap(p, coredump_setup_elf,
1011 coredump_walk_elf, &ws);
1012 if (error)
1013 goto out;
1014
1015 error = coredump_write(cookie, UIO_SYSSPACE, ws.psections,
1016 ws.psectionslen);
1017 if (error)
1018 goto out;
1019
1020 /* Write out the notes. */
1021 error = coredump_notes_elf(p, cookie, ¬esize);
1022 if (error)
1023 goto out;
1024
1025 #ifdef DIAGNOSTIC
1026 if (notesize != ws.notesize)
1027 panic("coredump: notesize changed: %zu != %zu",
1028 ws.notesize, notesize);
1029 offset = ws.notestart + notesize;
1030 if (offset != ws.secstart)
1031 panic("coredump: offset %lld != secstart %lld",
1032 (long long) offset, (long long) ws.secstart);
1033 #endif
1034
1035 /* Pass 3: finally, write the sections themselves. */
1036 for (i = 0; i < ws.npsections - 1; i++) {
1037 Elf_Phdr *pent = &ws.psections[i];
1038 if (pent->p_filesz == 0)
1039 continue;
1040
1041 #ifdef DIAGNOSTIC
1042 if (offset != pent->p_offset)
1043 panic("coredump: offset %lld != p_offset[%d] %lld",
1044 (long long) offset, i,
1045 (long long) pent->p_filesz);
1046 #endif
1047
1048 /*
1049 * Since the sigcode is mapped execute-only, we can't
1050 * read it. So use the kernel mapping for it instead.
1051 */
1052 if (pent->p_vaddr == p->p_p->ps_sigcode &&
1053 pent->p_filesz == sigcode_sz) {
1054 error = coredump_write(cookie, UIO_SYSSPACE,
1055 (void *)sigcode_va, sigcode_sz);
1056 } else {
1057 error = coredump_write(cookie, UIO_USERSPACE,
1058 (void *)(vaddr_t)pent->p_vaddr, pent->p_filesz);
1059 }
1060 if (error)
1061 goto out;
1062
1063 coredump_unmap(cookie, (vaddr_t)pent->p_vaddr,
1064 (vaddr_t)pent->p_vaddr + pent->p_filesz);
1065
1066 #ifdef DIAGNOSTIC
1067 offset += ws.psections[i].p_filesz;
1068 #endif
1069 }
1070
1071 out:
1072 free(ws.psections, M_TEMP, ws.psectionslen);
1073 return (error);
1074 }
1075
1076
1077 /*
1078 * Normally we lay out core files like this:
1079 * [ELF Header] [Program headers] [Notes] [data for PT_LOAD segments]
1080 *
1081 * However, if there's >= 65535 segments then it overflows the field
1082 * in the ELF header, so the standard specifies putting a magic
1083 * number there and saving the real count in the .sh_info field of
1084 * the first *section* header...which requires generating a section
1085 * header. To avoid confusing tools, we include an .shstrtab section
1086 * as well so all the indexes look valid. So in this case we lay
1087 * out the core file like this:
1088 * [ELF Header] [Section Headers] [.shstrtab] [Program headers] \
1089 * [Notes] [data for PT_LOAD segments]
1090 *
1091 * The 'shstrtab' structure below is data for the second of the two
1092 * section headers, plus the .shstrtab itself, in one const buffer.
1093 */
1094 static const struct {
1095 Elf_Shdr shdr;
1096 char shstrtab[sizeof(ELF_SHSTRTAB) + 1];
1097 } shstrtab = {
1098 .shdr = {
1099 .sh_name = 1, /* offset in .shstrtab below */
1100 .sh_type = SHT_STRTAB,
1101 .sh_offset = sizeof(Elf_Ehdr) + 2*sizeof(Elf_Shdr),
1102 .sh_size = sizeof(ELF_SHSTRTAB) + 1,
1103 .sh_addralign = 1,
1104 },
1105 .shstrtab = "\0" ELF_SHSTRTAB,
1106 };
1107
1108 int
1109 coredump_setup_elf(int segment_count, void *cookie)
1110 {
1111 Elf_Ehdr ehdr;
1112 struct writesegs_state *ws = cookie;
1113 Elf_Phdr *note;
1114 int error;
1115
1116 /* Get the count of segments, plus one for the PT_NOTE */
1117 ws->npsections = segment_count + 1;
1118
1119 /* Get the size of the notes. */
1120 error = coredump_notes_elf(ws->p, NULL, &ws->notesize);
1121 if (error)
1122 return error;
1123
1124 /* Setup the ELF header */
1125 memset(&ehdr, 0, sizeof(ehdr));
1126 memcpy(ehdr.e_ident, ELFMAG, SELFMAG);
1127 ehdr.e_ident[EI_CLASS] = ELF_TARG_CLASS;
1128 ehdr.e_ident[EI_DATA] = ELF_TARG_DATA;
1129 ehdr.e_ident[EI_VERSION] = EV_CURRENT;
1130 /* XXX Should be the OSABI/ABI version of the executable. */
1131 ehdr.e_ident[EI_OSABI] = ELFOSABI_SYSV;
1132 ehdr.e_ident[EI_ABIVERSION] = 0;
1133 ehdr.e_type = ET_CORE;
1134 /* XXX This should be the e_machine of the executable. */
1135 ehdr.e_machine = ELF_TARG_MACH;
1136 ehdr.e_version = EV_CURRENT;
1137 ehdr.e_entry = 0;
1138 ehdr.e_flags = 0;
1139 ehdr.e_ehsize = sizeof(ehdr);
1140 ehdr.e_phentsize = sizeof(Elf_Phdr);
1141
1142 if (ws->npsections < PN_XNUM) {
1143 ehdr.e_phoff = sizeof(ehdr);
1144 ehdr.e_shoff = 0;
1145 ehdr.e_phnum = ws->npsections;
1146 ehdr.e_shentsize = 0;
1147 ehdr.e_shnum = 0;
1148 ehdr.e_shstrndx = 0;
1149 } else {
1150 /* too many segments, use extension setup */
1151 ehdr.e_shoff = sizeof(ehdr);
1152 ehdr.e_phnum = PN_XNUM;
1153 ehdr.e_shentsize = sizeof(Elf_Shdr);
1154 ehdr.e_shnum = 2;
1155 ehdr.e_shstrndx = 1;
1156 ehdr.e_phoff = shstrtab.shdr.sh_offset + shstrtab.shdr.sh_size;
1157 }
1158
1159 /* Write out the ELF header. */
1160 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &ehdr, sizeof(ehdr));
1161 if (error)
1162 return error;
1163
1164 /*
1165 * If an section header is needed to store extension info, write
1166 * it out after the ELF header and before the program header.
1167 */
1168 if (ehdr.e_shnum != 0) {
1169 Elf_Shdr shdr = { .sh_info = ws->npsections };
1170 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shdr,
1171 sizeof shdr);
1172 if (error)
1173 return error;
1174 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shstrtab,
1175 sizeof(shstrtab.shdr) + sizeof(shstrtab.shstrtab));
1176 if (error)
1177 return error;
1178 }
1179
1180 /*
1181 * Allocate the segment header array and setup to collect
1182 * the section sizes and offsets
1183 */
1184 ws->psections = mallocarray(ws->npsections, sizeof(Elf_Phdr),
1185 M_TEMP, M_WAITOK|M_CANFAIL|M_ZERO);
1186 if (ws->psections == NULL)
1187 return ENOMEM;
1188 ws->psectionslen = ws->npsections * sizeof(Elf_Phdr);
1189
1190 ws->notestart = ehdr.e_phoff + ws->psectionslen;
1191 ws->secstart = ws->notestart + ws->notesize;
1192 ws->secoff = ws->secstart;
1193
1194 /* Fill in the PT_NOTE segment header in the last slot */
1195 note = &ws->psections[ws->npsections - 1];
1196 note->p_type = PT_NOTE;
1197 note->p_offset = ws->notestart;
1198 note->p_vaddr = 0;
1199 note->p_paddr = 0;
1200 note->p_filesz = ws->notesize;
1201 note->p_memsz = 0;
1202 note->p_flags = PF_R;
1203 note->p_align = ELFROUNDSIZE;
1204
1205 return (0);
1206 }
1207
1208 int
1209 coredump_walk_elf(vaddr_t start, vaddr_t realend, vaddr_t end, vm_prot_t prot,
1210 int nsegment, void *cookie)
1211 {
1212 struct writesegs_state *ws = cookie;
1213 Elf_Phdr phdr;
1214 vsize_t size, realsize;
1215
1216 size = end - start;
1217 realsize = realend - start;
1218
1219 phdr.p_type = PT_LOAD;
1220 phdr.p_offset = ws->secoff;
1221 phdr.p_vaddr = start;
1222 phdr.p_paddr = 0;
1223 phdr.p_filesz = realsize;
1224 phdr.p_memsz = size;
1225 phdr.p_flags = 0;
1226 if (prot & PROT_READ)
1227 phdr.p_flags |= PF_R;
1228 if (prot & PROT_WRITE)
1229 phdr.p_flags |= PF_W;
1230 if (prot & PROT_EXEC)
1231 phdr.p_flags |= PF_X;
1232 phdr.p_align = PAGE_SIZE;
1233
1234 ws->secoff += phdr.p_filesz;
1235 ws->psections[nsegment] = phdr;
1236
1237 return (0);
1238 }
1239
1240 int
1241 coredump_notes_elf(struct proc *p, void *iocookie, size_t *sizep)
1242 {
1243 struct elfcore_procinfo cpi;
1244 Elf_Note nhdr;
1245 struct process *pr = p->p_p;
1246 struct proc *q;
1247 size_t size, notesize;
1248 int error;
1249
1250 KASSERT(!P_HASSIBLING(p) || pr->ps_single != NULL);
1251 size = 0;
1252
1253 /* First, write an elfcore_procinfo. */
1254 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) +
1255 elfround(sizeof(cpi));
1256 if (iocookie) {
1257 memset(&cpi, 0, sizeof(cpi));
1258
1259 cpi.cpi_version = ELFCORE_PROCINFO_VERSION;
1260 cpi.cpi_cpisize = sizeof(cpi);
1261 cpi.cpi_signo = p->p_sisig;
1262 cpi.cpi_sigcode = p->p_sicode;
1263
1264 cpi.cpi_sigpend = p->p_siglist | pr->ps_siglist;
1265 cpi.cpi_sigmask = p->p_sigmask;
1266 cpi.cpi_sigignore = pr->ps_sigacts->ps_sigignore;
1267 cpi.cpi_sigcatch = pr->ps_sigacts->ps_sigcatch;
1268
1269 cpi.cpi_pid = pr->ps_pid;
1270 cpi.cpi_ppid = pr->ps_ppid;
1271 cpi.cpi_pgrp = pr->ps_pgid;
1272 if (pr->ps_session->s_leader)
1273 cpi.cpi_sid = pr->ps_session->s_leader->ps_pid;
1274 else
1275 cpi.cpi_sid = 0;
1276
1277 cpi.cpi_ruid = p->p_ucred->cr_ruid;
1278 cpi.cpi_euid = p->p_ucred->cr_uid;
1279 cpi.cpi_svuid = p->p_ucred->cr_svuid;
1280
1281 cpi.cpi_rgid = p->p_ucred->cr_rgid;
1282 cpi.cpi_egid = p->p_ucred->cr_gid;
1283 cpi.cpi_svgid = p->p_ucred->cr_svgid;
1284
1285 (void)strlcpy(cpi.cpi_name, pr->ps_comm, sizeof(cpi.cpi_name));
1286
1287 nhdr.namesz = sizeof("OpenBSD");
1288 nhdr.descsz = sizeof(cpi);
1289 nhdr.type = NT_OPENBSD_PROCINFO;
1290
1291 error = coredump_writenote_elf(p, iocookie, &nhdr,
1292 "OpenBSD", &cpi);
1293 if (error)
1294 return (error);
1295 }
1296 size += notesize;
1297
1298 /* Second, write an NT_OPENBSD_AUXV note. */
1299 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) +
1300 elfround(ELF_AUX_WORDS * sizeof(char *));
1301 if (iocookie && pr->ps_auxinfo) {
1302
1303 nhdr.namesz = sizeof("OpenBSD");
1304 nhdr.descsz = ELF_AUX_WORDS * sizeof(char *);
1305 nhdr.type = NT_OPENBSD_AUXV;
1306
1307 error = coredump_write(iocookie, UIO_SYSSPACE,
1308 &nhdr, sizeof(nhdr));
1309 if (error)
1310 return (error);
1311
1312 error = coredump_write(iocookie, UIO_SYSSPACE,
1313 "OpenBSD", elfround(nhdr.namesz));
1314 if (error)
1315 return (error);
1316
1317 error = coredump_write(iocookie, UIO_USERSPACE,
1318 (caddr_t)pr->ps_auxinfo, nhdr.descsz);
1319 if (error)
1320 return (error);
1321 }
1322 size += notesize;
1323
1324 #ifdef PT_WCOOKIE
1325 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) +
1326 elfround(sizeof(register_t));
1327 if (iocookie) {
1328 register_t wcookie;
1329
1330 nhdr.namesz = sizeof("OpenBSD");
1331 nhdr.descsz = sizeof(register_t);
1332 nhdr.type = NT_OPENBSD_WCOOKIE;
1333
1334 wcookie = process_get_wcookie(p);
1335 error = coredump_writenote_elf(p, iocookie, &nhdr,
1336 "OpenBSD", &wcookie);
1337 if (error)
1338 return (error);
1339 }
1340 size += notesize;
1341 #endif
1342
1343 /*
1344 * Now write the register info for the thread that caused the
1345 * coredump.
1346 */
1347 error = coredump_note_elf(p, iocookie, ¬esize);
1348 if (error)
1349 return (error);
1350 size += notesize;
1351
1352 /*
1353 * Now, for each thread, write the register info and any other
1354 * per-thread notes. Since we're dumping core, all the other
1355 * threads in the process have been stopped and the list can't
1356 * change.
1357 */
1358 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
1359 if (q == p) /* we've taken care of this thread */
1360 continue;
1361 error = coredump_note_elf(q, iocookie, ¬esize);
1362 if (error)
1363 return (error);
1364 size += notesize;
1365 }
1366
1367 *sizep = size;
1368 return (0);
1369 }
1370
1371 int
1372 coredump_note_elf(struct proc *p, void *iocookie, size_t *sizep)
1373 {
1374 Elf_Note nhdr;
1375 int size, notesize, error;
1376 int namesize;
1377 char name[64+ELFROUNDSIZE];
1378 struct reg intreg;
1379 #ifdef PT_GETFPREGS
1380 struct fpreg freg;
1381 #endif
1382
1383 size = 0;
1384
1385 snprintf(name, sizeof(name)-ELFROUNDSIZE, "%s@%d",
1386 "OpenBSD", p->p_tid + THREAD_PID_OFFSET);
1387 namesize = strlen(name) + 1;
1388 memset(name + namesize, 0, elfround(namesize) - namesize);
1389
1390 notesize = sizeof(nhdr) + elfround(namesize) + elfround(sizeof(intreg));
1391 if (iocookie) {
1392 error = process_read_regs(p, &intreg);
1393 if (error)
1394 return (error);
1395
1396 nhdr.namesz = namesize;
1397 nhdr.descsz = sizeof(intreg);
1398 nhdr.type = NT_OPENBSD_REGS;
1399
1400 error = coredump_writenote_elf(p, iocookie, &nhdr,
1401 name, &intreg);
1402 if (error)
1403 return (error);
1404
1405 }
1406 size += notesize;
1407
1408 #ifdef PT_GETFPREGS
1409 notesize = sizeof(nhdr) + elfround(namesize) + elfround(sizeof(freg));
1410 if (iocookie) {
1411 error = process_read_fpregs(p, &freg);
1412 if (error)
1413 return (error);
1414
1415 nhdr.namesz = namesize;
1416 nhdr.descsz = sizeof(freg);
1417 nhdr.type = NT_OPENBSD_FPREGS;
1418
1419 error = coredump_writenote_elf(p, iocookie, &nhdr, name, &freg);
1420 if (error)
1421 return (error);
1422 }
1423 size += notesize;
1424 #endif
1425
1426 *sizep = size;
1427 /* XXX Add hook for machdep per-LWP notes. */
1428 return (0);
1429 }
1430
1431 int
1432 coredump_writenote_elf(struct proc *p, void *cookie, Elf_Note *nhdr,
1433 const char *name, void *data)
1434 {
1435 int error;
1436
1437 error = coredump_write(cookie, UIO_SYSSPACE, nhdr, sizeof(*nhdr));
1438 if (error)
1439 return error;
1440
1441 error = coredump_write(cookie, UIO_SYSSPACE, name,
1442 elfround(nhdr->namesz));
1443 if (error)
1444 return error;
1445
1446 return coredump_write(cookie, UIO_SYSSPACE, data, nhdr->descsz);
1447 }
1448 #endif /* !SMALL_KERNEL */
Cache object: 419dd955e013d54ecac6bfc5f339cdd2
|