FreeBSD/Linux Kernel Cross Reference
sys/kern/imgact_elf.c
1 /*-
2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 Søren Schmidt
4 * Copyright (c) 1996 Peter Wemm
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_compat.h"
35
36 #include <sys/param.h>
37 #include <sys/exec.h>
38 #include <sys/fcntl.h>
39 #include <sys/imgact.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/namei.h>
48 #include <sys/pioctl.h>
49 #include <sys/proc.h>
50 #include <sys/procfs.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sf_buf.h>
53 #include <sys/systm.h>
54 #include <sys/signalvar.h>
55 #include <sys/stat.h>
56 #include <sys/sx.h>
57 #include <sys/syscall.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/vnode.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_param.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69
70 #include <machine/elf.h>
71 #include <machine/md_var.h>
72
73 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
74 #include <machine/fpu.h>
75 #include <compat/ia32/ia32_reg.h>
76 #endif
77
78 #define OLD_EI_BRAND 8
79
80 static int __elfN(check_header)(const Elf_Ehdr *hdr);
81 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
82 const char *interp);
83 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
84 u_long *entry, size_t pagesize);
85 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
86 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
87 vm_prot_t prot, size_t pagesize);
88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
89
90 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
91 "");
92
93 int __elfN(fallback_brand) = -1;
94 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
95 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
96 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
97 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
98 &__elfN(fallback_brand));
99
100 static int elf_trace = 0;
101 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
102
103 static int elf_legacy_coredump = 0;
104 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
105 &elf_legacy_coredump, 0, "");
106
107 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
108
109 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
110 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
111 #define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
112
113 int
114 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
115 {
116 int i;
117
118 for (i = 0; i < MAX_BRANDS; i++) {
119 if (elf_brand_list[i] == NULL) {
120 elf_brand_list[i] = entry;
121 break;
122 }
123 }
124 if (i == MAX_BRANDS)
125 return (-1);
126 return (0);
127 }
128
129 int
130 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
131 {
132 int i;
133
134 for (i = 0; i < MAX_BRANDS; i++) {
135 if (elf_brand_list[i] == entry) {
136 elf_brand_list[i] = NULL;
137 break;
138 }
139 }
140 if (i == MAX_BRANDS)
141 return (-1);
142 return (0);
143 }
144
145 int
146 __elfN(brand_inuse)(Elf_Brandinfo *entry)
147 {
148 struct proc *p;
149 int rval = FALSE;
150
151 sx_slock(&allproc_lock);
152 FOREACH_PROC_IN_SYSTEM(p) {
153 if (p->p_sysent == entry->sysvec) {
154 rval = TRUE;
155 break;
156 }
157 }
158 sx_sunlock(&allproc_lock);
159
160 return (rval);
161 }
162
163 static Elf_Brandinfo *
164 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
165 {
166 Elf_Brandinfo *bi;
167 int i;
168
169 /*
170 * We support three types of branding -- (1) the ELF EI_OSABI field
171 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
172 * branding w/in the ELF header, and (3) path of the `interp_path'
173 * field. We should also look for an ".note.ABI-tag" ELF section now
174 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
175 */
176
177 /* If the executable has a brand, search for it in the brand list. */
178 for (i = 0; i < MAX_BRANDS; i++) {
179 bi = elf_brand_list[i];
180 if (bi != NULL && hdr->e_machine == bi->machine &&
181 (hdr->e_ident[EI_OSABI] == bi->brand ||
182 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
183 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
184 return (bi);
185 }
186
187 /* Lacking a known brand, search for a recognized interpreter. */
188 if (interp != NULL) {
189 for (i = 0; i < MAX_BRANDS; i++) {
190 bi = elf_brand_list[i];
191 if (bi != NULL && hdr->e_machine == bi->machine &&
192 strcmp(interp, bi->interp_path) == 0)
193 return (bi);
194 }
195 }
196
197 /* Lacking a recognized interpreter, try the default brand */
198 for (i = 0; i < MAX_BRANDS; i++) {
199 bi = elf_brand_list[i];
200 if (bi != NULL && hdr->e_machine == bi->machine &&
201 __elfN(fallback_brand) == bi->brand)
202 return (bi);
203 }
204 return (NULL);
205 }
206
207 static int
208 __elfN(check_header)(const Elf_Ehdr *hdr)
209 {
210 Elf_Brandinfo *bi;
211 int i;
212
213 if (!IS_ELF(*hdr) ||
214 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
215 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
216 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
217 hdr->e_phentsize != sizeof(Elf_Phdr) ||
218 hdr->e_version != ELF_TARG_VER)
219 return (ENOEXEC);
220
221 /*
222 * Make sure we have at least one brand for this machine.
223 */
224
225 for (i = 0; i < MAX_BRANDS; i++) {
226 bi = elf_brand_list[i];
227 if (bi != NULL && bi->machine == hdr->e_machine)
228 break;
229 }
230 if (i == MAX_BRANDS)
231 return (ENOEXEC);
232
233 return (0);
234 }
235
236 static int
237 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
238 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
239 {
240 struct sf_buf *sf;
241 int error;
242 vm_offset_t off;
243
244 /*
245 * Create the page if it doesn't exist yet. Ignore errors.
246 */
247 vm_map_lock(map);
248 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
249 VM_PROT_ALL, VM_PROT_ALL, 0);
250 vm_map_unlock(map);
251
252 /*
253 * Find the page from the underlying object.
254 */
255 if (object) {
256 sf = vm_imgact_map_page(object, offset);
257 if (sf == NULL)
258 return (KERN_FAILURE);
259 off = offset - trunc_page(offset);
260 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
261 end - start);
262 vm_imgact_unmap_page(sf);
263 if (error) {
264 return (KERN_FAILURE);
265 }
266 }
267
268 return (KERN_SUCCESS);
269 }
270
271 static int
272 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
273 vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
274 {
275 struct sf_buf *sf;
276 vm_offset_t off;
277 vm_size_t sz;
278 int error, rv;
279
280 if (start != trunc_page(start)) {
281 rv = __elfN(map_partial)(map, object, offset, start,
282 round_page(start), prot);
283 if (rv)
284 return (rv);
285 offset += round_page(start) - start;
286 start = round_page(start);
287 }
288 if (end != round_page(end)) {
289 rv = __elfN(map_partial)(map, object, offset +
290 trunc_page(end) - start, trunc_page(end), end, prot);
291 if (rv)
292 return (rv);
293 end = trunc_page(end);
294 }
295 if (end > start) {
296 if (offset & PAGE_MASK) {
297 /*
298 * The mapping is not page aligned. This means we have
299 * to copy the data. Sigh.
300 */
301 rv = vm_map_find(map, NULL, 0, &start, end - start,
302 FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
303 if (rv)
304 return (rv);
305 if (object == NULL)
306 return (KERN_SUCCESS);
307 for (; start < end; start += sz) {
308 sf = vm_imgact_map_page(object, offset);
309 if (sf == NULL)
310 return (KERN_FAILURE);
311 off = offset - trunc_page(offset);
312 sz = end - start;
313 if (sz > PAGE_SIZE - off)
314 sz = PAGE_SIZE - off;
315 error = copyout((caddr_t)sf_buf_kva(sf) + off,
316 (caddr_t)start, sz);
317 vm_imgact_unmap_page(sf);
318 if (error) {
319 return (KERN_FAILURE);
320 }
321 offset += sz;
322 }
323 rv = KERN_SUCCESS;
324 } else {
325 vm_object_reference(object);
326 vm_map_lock(map);
327 rv = vm_map_insert(map, object, offset, start, end,
328 prot, VM_PROT_ALL, cow);
329 vm_map_unlock(map);
330 if (rv != KERN_SUCCESS)
331 vm_object_deallocate(object);
332 }
333 return (rv);
334 } else {
335 return (KERN_SUCCESS);
336 }
337 }
338
339 static int
340 __elfN(load_section)(struct vmspace *vmspace,
341 vm_object_t object, vm_offset_t offset,
342 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
343 size_t pagesize)
344 {
345 struct sf_buf *sf;
346 size_t map_len;
347 vm_offset_t map_addr;
348 int error, rv, cow;
349 size_t copy_len;
350 vm_offset_t file_addr;
351
352 /*
353 * It's necessary to fail if the filsz + offset taken from the
354 * header is greater than the actual file pager object's size.
355 * If we were to allow this, then the vm_map_find() below would
356 * walk right off the end of the file object and into the ether.
357 *
358 * While I'm here, might as well check for something else that
359 * is invalid: filsz cannot be greater than memsz.
360 */
361 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
362 filsz > memsz) {
363 uprintf("elf_load_section: truncated ELF file\n");
364 return (ENOEXEC);
365 }
366
367 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
368 file_addr = trunc_page_ps(offset, pagesize);
369
370 /*
371 * We have two choices. We can either clear the data in the last page
372 * of an oversized mapping, or we can start the anon mapping a page
373 * early and copy the initialized data into that first page. We
374 * choose the second..
375 */
376 if (memsz > filsz)
377 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
378 else
379 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
380
381 if (map_len != 0) {
382 /* cow flags: don't dump readonly sections in core */
383 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
384 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
385
386 rv = __elfN(map_insert)(&vmspace->vm_map,
387 object,
388 file_addr, /* file offset */
389 map_addr, /* virtual start */
390 map_addr + map_len,/* virtual end */
391 prot,
392 cow);
393 if (rv != KERN_SUCCESS)
394 return (EINVAL);
395
396 /* we can stop now if we've covered it all */
397 if (memsz == filsz) {
398 return (0);
399 }
400 }
401
402
403 /*
404 * We have to get the remaining bit of the file into the first part
405 * of the oversized map segment. This is normally because the .data
406 * segment in the file is extended to provide bss. It's a neat idea
407 * to try and save a page, but it's a pain in the behind to implement.
408 */
409 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
410 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
411 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
412 map_addr;
413
414 /* This had damn well better be true! */
415 if (map_len != 0) {
416 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
417 map_addr + map_len, VM_PROT_ALL, 0);
418 if (rv != KERN_SUCCESS) {
419 return (EINVAL);
420 }
421 }
422
423 if (copy_len != 0) {
424 vm_offset_t off;
425
426 sf = vm_imgact_map_page(object, offset + filsz);
427 if (sf == NULL)
428 return (EIO);
429
430 /* send the page fragment to user space */
431 off = trunc_page_ps(offset + filsz, pagesize) -
432 trunc_page(offset + filsz);
433 error = copyout((caddr_t)sf_buf_kva(sf) + off,
434 (caddr_t)map_addr, copy_len);
435 vm_imgact_unmap_page(sf);
436 if (error) {
437 return (error);
438 }
439 }
440
441 /*
442 * set it to the specified protection.
443 * XXX had better undo the damage from pasting over the cracks here!
444 */
445 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
446 round_page(map_addr + map_len), prot, FALSE);
447
448 return (0);
449 }
450
451 /*
452 * Load the file "file" into memory. It may be either a shared object
453 * or an executable.
454 *
455 * The "addr" reference parameter is in/out. On entry, it specifies
456 * the address where a shared object should be loaded. If the file is
457 * an executable, this value is ignored. On exit, "addr" specifies
458 * where the file was actually loaded.
459 *
460 * The "entry" reference parameter is out only. On exit, it specifies
461 * the entry point for the loaded file.
462 */
463 static int
464 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
465 u_long *entry, size_t pagesize)
466 {
467 struct {
468 struct nameidata nd;
469 struct vattr attr;
470 struct image_params image_params;
471 } *tempdata;
472 const Elf_Ehdr *hdr = NULL;
473 const Elf_Phdr *phdr = NULL;
474 struct nameidata *nd;
475 struct vmspace *vmspace = p->p_vmspace;
476 struct vattr *attr;
477 struct image_params *imgp;
478 vm_prot_t prot;
479 u_long rbase;
480 u_long base_addr = 0;
481 int vfslocked, error, i, numsegs;
482
483 if (curthread->td_proc != p)
484 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
485
486 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
487 nd = &tempdata->nd;
488 attr = &tempdata->attr;
489 imgp = &tempdata->image_params;
490
491 /*
492 * Initialize part of the common data
493 */
494 imgp->proc = p;
495 imgp->attr = attr;
496 imgp->firstpage = NULL;
497 imgp->image_header = NULL;
498 imgp->object = NULL;
499 imgp->execlabel = NULL;
500
501 /* XXXKSE */
502 NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
503 curthread);
504 vfslocked = 0;
505 if ((error = namei(nd)) != 0) {
506 nd->ni_vp = NULL;
507 goto fail;
508 }
509 vfslocked = NDHASGIANT(nd);
510 NDFREE(nd, NDF_ONLY_PNBUF);
511 imgp->vp = nd->ni_vp;
512
513 /*
514 * Check permissions, modes, uid, etc on the file, and "open" it.
515 */
516 error = exec_check_permissions(imgp);
517 if (error)
518 goto fail;
519
520 error = exec_map_first_page(imgp);
521 if (error)
522 goto fail;
523
524 /*
525 * Also make certain that the interpreter stays the same, so set
526 * its VV_TEXT flag, too.
527 */
528 nd->ni_vp->v_vflag |= VV_TEXT;
529
530 imgp->object = nd->ni_vp->v_object;
531
532 hdr = (const Elf_Ehdr *)imgp->image_header;
533 if ((error = __elfN(check_header)(hdr)) != 0)
534 goto fail;
535 if (hdr->e_type == ET_DYN)
536 rbase = *addr;
537 else if (hdr->e_type == ET_EXEC)
538 rbase = 0;
539 else {
540 error = ENOEXEC;
541 goto fail;
542 }
543
544 /* Only support headers that fit within first page for now */
545 /* (multiplication of two Elf_Half fields will not overflow) */
546 if ((hdr->e_phoff > PAGE_SIZE) ||
547 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
548 error = ENOEXEC;
549 goto fail;
550 }
551
552 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
553 if (!aligned(phdr, Elf_Addr)) {
554 error = ENOEXEC;
555 goto fail;
556 }
557
558 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
559 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */
560 prot = 0;
561 if (phdr[i].p_flags & PF_X)
562 prot |= VM_PROT_EXECUTE;
563 if (phdr[i].p_flags & PF_W)
564 prot |= VM_PROT_WRITE;
565 if (phdr[i].p_flags & PF_R)
566 prot |= VM_PROT_READ;
567
568 if ((error = __elfN(load_section)(vmspace,
569 imgp->object, phdr[i].p_offset,
570 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
571 phdr[i].p_memsz, phdr[i].p_filesz, prot,
572 pagesize)) != 0)
573 goto fail;
574 /*
575 * Establish the base address if this is the
576 * first segment.
577 */
578 if (numsegs == 0)
579 base_addr = trunc_page(phdr[i].p_vaddr +
580 rbase);
581 numsegs++;
582 }
583 }
584 *addr = base_addr;
585 *entry = (unsigned long)hdr->e_entry + rbase;
586
587 fail:
588 if (imgp->firstpage)
589 exec_unmap_first_page(imgp);
590
591 if (nd->ni_vp)
592 vput(nd->ni_vp);
593
594 VFS_UNLOCK_GIANT(vfslocked);
595 free(tempdata, M_TEMP);
596
597 return (error);
598 }
599
600 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
601
602 static int
603 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
604 {
605 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
606 const Elf_Phdr *phdr, *pnote = NULL;
607 Elf_Auxargs *elf_auxargs;
608 struct vmspace *vmspace;
609 vm_prot_t prot;
610 u_long text_size = 0, data_size = 0, total_size = 0;
611 u_long text_addr = 0, data_addr = 0;
612 u_long seg_size, seg_addr;
613 u_long addr, entry = 0, proghdr = 0;
614 int error = 0, i;
615 const char *interp = NULL, *newinterp = NULL;
616 Elf_Brandinfo *brand_info;
617 const Elf_Note *note, *note_end;
618 char *path;
619 const char *note_name;
620 struct thread *td = curthread;
621 struct sysentvec *sv;
622
623 /*
624 * Do we have a valid ELF header ?
625 *
626 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
627 * if particular brand doesn't support it.
628 */
629 if (__elfN(check_header)(hdr) != 0 ||
630 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
631 return (-1);
632
633 /*
634 * From here on down, we return an errno, not -1, as we've
635 * detected an ELF file.
636 */
637
638 if ((hdr->e_phoff > PAGE_SIZE) ||
639 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
640 /* Only support headers in first page for now */
641 return (ENOEXEC);
642 }
643 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
644 if (!aligned(phdr, Elf_Addr))
645 return (ENOEXEC);
646 for (i = 0; i < hdr->e_phnum; i++) {
647 if (phdr[i].p_type == PT_INTERP) {
648 /* Path to interpreter */
649 if (phdr[i].p_filesz > MAXPATHLEN ||
650 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
651 return (ENOEXEC);
652 interp = imgp->image_header + phdr[i].p_offset;
653 break;
654 }
655 }
656
657 brand_info = __elfN(get_brandinfo)(hdr, interp);
658 if (brand_info == NULL) {
659 uprintf("ELF binary type \"%u\" not known.\n",
660 hdr->e_ident[EI_OSABI]);
661 return (ENOEXEC);
662 }
663 if (hdr->e_type == ET_DYN &&
664 (brand_info->flags & BI_CAN_EXEC_DYN) == 0)
665 return (ENOEXEC);
666 sv = brand_info->sysvec;
667 if (interp != NULL && brand_info->interp_newpath != NULL)
668 newinterp = brand_info->interp_newpath;
669
670 /*
671 * Avoid a possible deadlock if the current address space is destroyed
672 * and that address space maps the locked vnode. In the common case,
673 * the locked vnode's v_usecount is decremented but remains greater
674 * than zero. Consequently, the vnode lock is not needed by vrele().
675 * However, in cases where the vnode lock is external, such as nullfs,
676 * v_usecount may become zero.
677 */
678 VOP_UNLOCK(imgp->vp, 0, td);
679
680 error = exec_new_vmspace(imgp, sv);
681 imgp->proc->p_sysent = sv;
682
683 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
684 if (error)
685 return (error);
686
687 vmspace = imgp->proc->p_vmspace;
688
689 for (i = 0; i < hdr->e_phnum; i++) {
690 switch (phdr[i].p_type) {
691 case PT_LOAD: /* Loadable segment */
692 prot = 0;
693 if (phdr[i].p_flags & PF_X)
694 prot |= VM_PROT_EXECUTE;
695 if (phdr[i].p_flags & PF_W)
696 prot |= VM_PROT_WRITE;
697 if (phdr[i].p_flags & PF_R)
698 prot |= VM_PROT_READ;
699
700 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
701 /*
702 * Some x86 binaries assume read == executable,
703 * notably the M3 runtime and therefore cvsup
704 */
705 if (prot & VM_PROT_READ)
706 prot |= VM_PROT_EXECUTE;
707 #endif
708
709 if ((error = __elfN(load_section)(vmspace,
710 imgp->object, phdr[i].p_offset,
711 (caddr_t)(uintptr_t)phdr[i].p_vaddr,
712 phdr[i].p_memsz, phdr[i].p_filesz, prot,
713 sv->sv_pagesize)) != 0)
714 return (error);
715
716 /*
717 * If this segment contains the program headers,
718 * remember their virtual address for the AT_PHDR
719 * aux entry. Static binaries don't usually include
720 * a PT_PHDR entry.
721 */
722 if (phdr[i].p_offset == 0 &&
723 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
724 <= phdr[i].p_filesz)
725 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
726
727 seg_addr = trunc_page(phdr[i].p_vaddr);
728 seg_size = round_page(phdr[i].p_memsz +
729 phdr[i].p_vaddr - seg_addr);
730
731 /*
732 * Is this .text or .data? We can't use
733 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
734 * alpha terribly and possibly does other bad
735 * things so we stick to the old way of figuring
736 * it out: If the segment contains the program
737 * entry point, it's a text segment, otherwise it
738 * is a data segment.
739 *
740 * Note that obreak() assumes that data_addr +
741 * data_size == end of data load area, and the ELF
742 * file format expects segments to be sorted by
743 * address. If multiple data segments exist, the
744 * last one will be used.
745 */
746 if (hdr->e_entry >= phdr[i].p_vaddr &&
747 hdr->e_entry < (phdr[i].p_vaddr +
748 phdr[i].p_memsz)) {
749 text_size = seg_size;
750 text_addr = seg_addr;
751 entry = (u_long)hdr->e_entry;
752 } else {
753 data_size = seg_size;
754 data_addr = seg_addr;
755 }
756 total_size += seg_size;
757 break;
758 case PT_PHDR: /* Program header table info */
759 proghdr = phdr[i].p_vaddr;
760 break;
761 case PT_NOTE:
762 pnote = &phdr[i];
763 break;
764 default:
765 break;
766 }
767 }
768
769 if (data_addr == 0 && data_size == 0) {
770 data_addr = text_addr;
771 data_size = text_size;
772 }
773
774 /*
775 * Check limits. It should be safe to check the
776 * limits after loading the segments since we do
777 * not actually fault in all the segments pages.
778 */
779 PROC_LOCK(imgp->proc);
780 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
781 text_size > maxtsiz ||
782 total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
783 PROC_UNLOCK(imgp->proc);
784 return (ENOMEM);
785 }
786
787 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
788 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
789 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
790 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
791
792 /*
793 * We load the dynamic linker where a userland call
794 * to mmap(0, ...) would put it. The rationale behind this
795 * calculation is that it leaves room for the heap to grow to
796 * its maximum allowed size.
797 */
798 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
799 lim_max(imgp->proc, RLIMIT_DATA));
800 PROC_UNLOCK(imgp->proc);
801
802 imgp->entry_addr = entry;
803
804 if (interp != NULL) {
805 int have_interp = FALSE;
806 VOP_UNLOCK(imgp->vp, 0, td);
807 if (brand_info->emul_path != NULL &&
808 brand_info->emul_path[0] != '\0') {
809 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
810 snprintf(path, MAXPATHLEN, "%s%s",
811 brand_info->emul_path, interp);
812 error = __elfN(load_file)(imgp->proc, path, &addr,
813 &imgp->entry_addr, sv->sv_pagesize);
814 free(path, M_TEMP);
815 if (error == 0)
816 have_interp = TRUE;
817 }
818 if (!have_interp && newinterp != NULL) {
819 error = __elfN(load_file)(imgp->proc, newinterp, &addr,
820 &imgp->entry_addr, sv->sv_pagesize);
821 if (error == 0)
822 have_interp = TRUE;
823 }
824 if (!have_interp) {
825 error = __elfN(load_file)(imgp->proc, interp, &addr,
826 &imgp->entry_addr, sv->sv_pagesize);
827 }
828 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
829 if (error != 0) {
830 uprintf("ELF interpreter %s not found\n", interp);
831 return (error);
832 }
833 }
834
835 /*
836 * Construct auxargs table (used by the fixup routine)
837 */
838 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
839 elf_auxargs->execfd = -1;
840 elf_auxargs->phdr = proghdr;
841 elf_auxargs->phent = hdr->e_phentsize;
842 elf_auxargs->phnum = hdr->e_phnum;
843 elf_auxargs->pagesz = PAGE_SIZE;
844 elf_auxargs->base = addr;
845 elf_auxargs->flags = 0;
846 elf_auxargs->entry = entry;
847 elf_auxargs->trace = elf_trace;
848
849 imgp->auxargs = elf_auxargs;
850 imgp->interpreted = 0;
851
852 /*
853 * Try to fetch the osreldate for FreeBSD binary from the ELF
854 * OSABI-note. Only the first page of the image is searched,
855 * the same as for headers.
856 */
857 if (pnote != NULL && pnote->p_offset < PAGE_SIZE &&
858 pnote->p_offset + pnote->p_filesz < PAGE_SIZE ) {
859 note = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
860 if (!aligned(note, Elf32_Addr)) {
861 free(imgp->auxargs, M_TEMP);
862 imgp->auxargs = NULL;
863 return (ENOEXEC);
864 }
865 note_end = (const Elf_Note *)(imgp->image_header + pnote->p_offset +
866 pnote->p_filesz);
867 while (note < note_end) {
868 if (note->n_namesz == sizeof(FREEBSD_ABI_VENDOR) &&
869 note->n_descsz == sizeof(int32_t) &&
870 note->n_type == 1 /* ABI_NOTETYPE */) {
871 note_name = (const char *)(note + 1);
872 if (strncmp(FREEBSD_ABI_VENDOR, note_name,
873 sizeof(FREEBSD_ABI_VENDOR)) == 0) {
874 imgp->proc->p_osrel = *(const int32_t *)
875 (note_name +
876 round_page_ps(sizeof(FREEBSD_ABI_VENDOR),
877 sizeof(Elf32_Addr)));
878 break;
879 }
880 }
881 note = (const Elf_Note *)((const char *)(note + 1) +
882 round_page_ps(note->n_namesz, sizeof(Elf32_Addr)) +
883 round_page_ps(note->n_descsz, sizeof(Elf32_Addr)));
884 }
885 }
886
887 return (error);
888 }
889
890 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
891
892 int
893 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
894 {
895 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
896 Elf_Addr *base;
897 Elf_Addr *pos;
898
899 base = (Elf_Addr *)*stack_base;
900 pos = base + (imgp->args->argc + imgp->args->envc + 2);
901
902 if (args->trace) {
903 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
904 }
905 if (args->execfd != -1) {
906 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
907 }
908 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
909 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
910 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
911 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
912 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
913 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
914 AUXARGS_ENTRY(pos, AT_BASE, args->base);
915 AUXARGS_ENTRY(pos, AT_NULL, 0);
916
917 free(imgp->auxargs, M_TEMP);
918 imgp->auxargs = NULL;
919
920 base--;
921 suword(base, (long)imgp->args->argc);
922 *stack_base = (register_t *)base;
923 return (0);
924 }
925
926 /*
927 * Code for generating ELF core dumps.
928 */
929
930 typedef void (*segment_callback)(vm_map_entry_t, void *);
931
932 /* Closure for cb_put_phdr(). */
933 struct phdr_closure {
934 Elf_Phdr *phdr; /* Program header to fill in */
935 Elf_Off offset; /* Offset of segment in core file */
936 };
937
938 /* Closure for cb_size_segment(). */
939 struct sseg_closure {
940 int count; /* Count of writable segments. */
941 size_t size; /* Total size of all writable segments. */
942 };
943
944 static void cb_put_phdr(vm_map_entry_t, void *);
945 static void cb_size_segment(vm_map_entry_t, void *);
946 static void each_writable_segment(struct thread *, segment_callback, void *);
947 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
948 int, void *, size_t);
949 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
950 static void __elfN(putnote)(void *, size_t *, const char *, int,
951 const void *, size_t);
952
953 int
954 __elfN(coredump)(td, vp, limit)
955 struct thread *td;
956 struct vnode *vp;
957 off_t limit;
958 {
959 struct ucred *cred = td->td_ucred;
960 int error = 0;
961 struct sseg_closure seginfo;
962 void *hdr;
963 size_t hdrsize;
964
965 /* Size the program segments. */
966 seginfo.count = 0;
967 seginfo.size = 0;
968 each_writable_segment(td, cb_size_segment, &seginfo);
969
970 /*
971 * Calculate the size of the core file header area by making
972 * a dry run of generating it. Nothing is written, but the
973 * size is calculated.
974 */
975 hdrsize = 0;
976 __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
977
978 if (hdrsize + seginfo.size >= limit)
979 return (EFAULT);
980
981 /*
982 * Allocate memory for building the header, fill it up,
983 * and write it out.
984 */
985 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
986 if (hdr == NULL) {
987 return (EINVAL);
988 }
989 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
990
991 /* Write the contents of all of the writable segments. */
992 if (error == 0) {
993 Elf_Phdr *php;
994 off_t offset;
995 int i;
996
997 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
998 offset = hdrsize;
999 for (i = 0; i < seginfo.count; i++) {
1000 error = vn_rdwr_inchunks(UIO_WRITE, vp,
1001 (caddr_t)(uintptr_t)php->p_vaddr,
1002 php->p_filesz, offset, UIO_USERSPACE,
1003 IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1004 curthread); /* XXXKSE */
1005 if (error != 0)
1006 break;
1007 offset += php->p_filesz;
1008 php++;
1009 }
1010 }
1011 free(hdr, M_TEMP);
1012
1013 return (error);
1014 }
1015
1016 /*
1017 * A callback for each_writable_segment() to write out the segment's
1018 * program header entry.
1019 */
1020 static void
1021 cb_put_phdr(entry, closure)
1022 vm_map_entry_t entry;
1023 void *closure;
1024 {
1025 struct phdr_closure *phc = (struct phdr_closure *)closure;
1026 Elf_Phdr *phdr = phc->phdr;
1027
1028 phc->offset = round_page(phc->offset);
1029
1030 phdr->p_type = PT_LOAD;
1031 phdr->p_offset = phc->offset;
1032 phdr->p_vaddr = entry->start;
1033 phdr->p_paddr = 0;
1034 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1035 phdr->p_align = PAGE_SIZE;
1036 phdr->p_flags = 0;
1037 if (entry->protection & VM_PROT_READ)
1038 phdr->p_flags |= PF_R;
1039 if (entry->protection & VM_PROT_WRITE)
1040 phdr->p_flags |= PF_W;
1041 if (entry->protection & VM_PROT_EXECUTE)
1042 phdr->p_flags |= PF_X;
1043
1044 phc->offset += phdr->p_filesz;
1045 phc->phdr++;
1046 }
1047
1048 /*
1049 * A callback for each_writable_segment() to gather information about
1050 * the number of segments and their total size.
1051 */
1052 static void
1053 cb_size_segment(entry, closure)
1054 vm_map_entry_t entry;
1055 void *closure;
1056 {
1057 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1058
1059 ssc->count++;
1060 ssc->size += entry->end - entry->start;
1061 }
1062
1063 /*
1064 * For each writable segment in the process's memory map, call the given
1065 * function with a pointer to the map entry and some arbitrary
1066 * caller-supplied data.
1067 */
1068 static void
1069 each_writable_segment(td, func, closure)
1070 struct thread *td;
1071 segment_callback func;
1072 void *closure;
1073 {
1074 struct proc *p = td->td_proc;
1075 vm_map_t map = &p->p_vmspace->vm_map;
1076 vm_map_entry_t entry;
1077 vm_object_t backing_object, object;
1078 boolean_t ignore_entry;
1079
1080 vm_map_lock_read(map);
1081 for (entry = map->header.next; entry != &map->header;
1082 entry = entry->next) {
1083 /*
1084 * Don't dump inaccessible mappings, deal with legacy
1085 * coredump mode.
1086 *
1087 * Note that read-only segments related to the elf binary
1088 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1089 * need to arbitrarily ignore such segments.
1090 */
1091 if (elf_legacy_coredump) {
1092 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1093 continue;
1094 } else {
1095 if ((entry->protection & VM_PROT_ALL) == 0)
1096 continue;
1097 }
1098
1099 /*
1100 * Dont include memory segment in the coredump if
1101 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1102 * madvise(2). Do not dump submaps (i.e. parts of the
1103 * kernel map).
1104 */
1105 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1106 continue;
1107
1108 if ((object = entry->object.vm_object) == NULL)
1109 continue;
1110
1111 /* Ignore memory-mapped devices and such things. */
1112 VM_OBJECT_LOCK(object);
1113 while ((backing_object = object->backing_object) != NULL) {
1114 VM_OBJECT_LOCK(backing_object);
1115 VM_OBJECT_UNLOCK(object);
1116 object = backing_object;
1117 }
1118 ignore_entry = object->type != OBJT_DEFAULT &&
1119 object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1120 VM_OBJECT_UNLOCK(object);
1121 if (ignore_entry)
1122 continue;
1123
1124 (*func)(entry, closure);
1125 }
1126 vm_map_unlock_read(map);
1127 }
1128
1129 /*
1130 * Write the core file header to the file, including padding up to
1131 * the page boundary.
1132 */
1133 static int
1134 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1135 struct thread *td;
1136 struct vnode *vp;
1137 struct ucred *cred;
1138 int numsegs;
1139 size_t hdrsize;
1140 void *hdr;
1141 {
1142 size_t off;
1143
1144 /* Fill in the header. */
1145 bzero(hdr, hdrsize);
1146 off = 0;
1147 __elfN(puthdr)(td, hdr, &off, numsegs);
1148
1149 /* Write it to the core file. */
1150 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1151 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1152 td)); /* XXXKSE */
1153 }
1154
1155 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1156 typedef struct prstatus32 elf_prstatus_t;
1157 typedef struct prpsinfo32 elf_prpsinfo_t;
1158 typedef struct fpreg32 elf_prfpregset_t;
1159 typedef struct fpreg32 elf_fpregset_t;
1160 typedef struct reg32 elf_gregset_t;
1161 #else
1162 typedef prstatus_t elf_prstatus_t;
1163 typedef prpsinfo_t elf_prpsinfo_t;
1164 typedef prfpregset_t elf_prfpregset_t;
1165 typedef prfpregset_t elf_fpregset_t;
1166 typedef gregset_t elf_gregset_t;
1167 #endif
1168
1169 static void
1170 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1171 {
1172 struct {
1173 elf_prstatus_t status;
1174 elf_prfpregset_t fpregset;
1175 elf_prpsinfo_t psinfo;
1176 } *tempdata;
1177 elf_prstatus_t *status;
1178 elf_prfpregset_t *fpregset;
1179 elf_prpsinfo_t *psinfo;
1180 struct proc *p;
1181 struct thread *thr;
1182 size_t ehoff, noteoff, notesz, phoff;
1183
1184 p = td->td_proc;
1185
1186 ehoff = *off;
1187 *off += sizeof(Elf_Ehdr);
1188
1189 phoff = *off;
1190 *off += (numsegs + 1) * sizeof(Elf_Phdr);
1191
1192 noteoff = *off;
1193 /*
1194 * Don't allocate space for the notes if we're just calculating
1195 * the size of the header. We also don't collect the data.
1196 */
1197 if (dst != NULL) {
1198 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1199 status = &tempdata->status;
1200 fpregset = &tempdata->fpregset;
1201 psinfo = &tempdata->psinfo;
1202 } else {
1203 tempdata = NULL;
1204 status = NULL;
1205 fpregset = NULL;
1206 psinfo = NULL;
1207 }
1208
1209 if (dst != NULL) {
1210 psinfo->pr_version = PRPSINFO_VERSION;
1211 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1212 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1213 /*
1214 * XXX - We don't fill in the command line arguments properly
1215 * yet.
1216 */
1217 strlcpy(psinfo->pr_psargs, p->p_comm,
1218 sizeof(psinfo->pr_psargs));
1219 }
1220 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1221 sizeof *psinfo);
1222
1223 /*
1224 * To have the debugger select the right thread (LWP) as the initial
1225 * thread, we dump the state of the thread passed to us in td first.
1226 * This is the thread that causes the core dump and thus likely to
1227 * be the right thread one wants to have selected in the debugger.
1228 */
1229 thr = td;
1230 while (thr != NULL) {
1231 if (dst != NULL) {
1232 status->pr_version = PRSTATUS_VERSION;
1233 status->pr_statussz = sizeof(elf_prstatus_t);
1234 status->pr_gregsetsz = sizeof(elf_gregset_t);
1235 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1236 status->pr_osreldate = osreldate;
1237 status->pr_cursig = p->p_sig;
1238 status->pr_pid = thr->td_tid;
1239 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1240 fill_regs32(thr, &status->pr_reg);
1241 fill_fpregs32(thr, fpregset);
1242 #else
1243 fill_regs(thr, &status->pr_reg);
1244 fill_fpregs(thr, fpregset);
1245 #endif
1246 }
1247 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1248 sizeof *status);
1249 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1250 sizeof *fpregset);
1251 /*
1252 * Allow for MD specific notes, as well as any MD
1253 * specific preparations for writing MI notes.
1254 */
1255 __elfN(dump_thread)(thr, dst, off);
1256
1257 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1258 TAILQ_NEXT(thr, td_plist);
1259 if (thr == td)
1260 thr = TAILQ_NEXT(thr, td_plist);
1261 }
1262
1263 notesz = *off - noteoff;
1264
1265 if (dst != NULL)
1266 free(tempdata, M_TEMP);
1267
1268 /* Align up to a page boundary for the program segments. */
1269 *off = round_page(*off);
1270
1271 if (dst != NULL) {
1272 Elf_Ehdr *ehdr;
1273 Elf_Phdr *phdr;
1274 struct phdr_closure phc;
1275
1276 /*
1277 * Fill in the ELF header.
1278 */
1279 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1280 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1281 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1282 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1283 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1284 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1285 ehdr->e_ident[EI_DATA] = ELF_DATA;
1286 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1287 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1288 ehdr->e_ident[EI_ABIVERSION] = 0;
1289 ehdr->e_ident[EI_PAD] = 0;
1290 ehdr->e_type = ET_CORE;
1291 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1292 ehdr->e_machine = EM_386;
1293 #else
1294 ehdr->e_machine = ELF_ARCH;
1295 #endif
1296 ehdr->e_version = EV_CURRENT;
1297 ehdr->e_entry = 0;
1298 ehdr->e_phoff = phoff;
1299 ehdr->e_flags = 0;
1300 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1301 ehdr->e_phentsize = sizeof(Elf_Phdr);
1302 ehdr->e_phnum = numsegs + 1;
1303 ehdr->e_shentsize = sizeof(Elf_Shdr);
1304 ehdr->e_shnum = 0;
1305 ehdr->e_shstrndx = SHN_UNDEF;
1306
1307 /*
1308 * Fill in the program header entries.
1309 */
1310 phdr = (Elf_Phdr *)((char *)dst + phoff);
1311
1312 /* The note segement. */
1313 phdr->p_type = PT_NOTE;
1314 phdr->p_offset = noteoff;
1315 phdr->p_vaddr = 0;
1316 phdr->p_paddr = 0;
1317 phdr->p_filesz = notesz;
1318 phdr->p_memsz = 0;
1319 phdr->p_flags = 0;
1320 phdr->p_align = 0;
1321 phdr++;
1322
1323 /* All the writable segments from the program. */
1324 phc.phdr = phdr;
1325 phc.offset = *off;
1326 each_writable_segment(td, cb_put_phdr, &phc);
1327 }
1328 }
1329
1330 static void
1331 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1332 const void *desc, size_t descsz)
1333 {
1334 Elf_Note note;
1335
1336 note.n_namesz = strlen(name) + 1;
1337 note.n_descsz = descsz;
1338 note.n_type = type;
1339 if (dst != NULL)
1340 bcopy(¬e, (char *)dst + *off, sizeof note);
1341 *off += sizeof note;
1342 if (dst != NULL)
1343 bcopy(name, (char *)dst + *off, note.n_namesz);
1344 *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1345 if (dst != NULL)
1346 bcopy(desc, (char *)dst + *off, note.n_descsz);
1347 *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1348 }
1349
1350 /*
1351 * Tell kern_execve.c about it, with a little help from the linker.
1352 */
1353 static struct execsw __elfN(execsw) = {
1354 __CONCAT(exec_, __elfN(imgact)),
1355 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1356 };
1357 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
Cache object: 3f3afd1a7fa176ee89b57bca44645238
|