FreeBSD/Linux Kernel Cross Reference
sys/kern/imgact_elf.c
1 /*-
2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 Søren Schmidt
4 * Copyright (c) 1996 Peter Wemm
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_compat.h"
35
36 #include <sys/param.h>
37 #include <sys/exec.h>
38 #include <sys/fcntl.h>
39 #include <sys/imgact.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/namei.h>
48 #include <sys/pioctl.h>
49 #include <sys/proc.h>
50 #include <sys/procfs.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sf_buf.h>
53 #include <sys/systm.h>
54 #include <sys/signalvar.h>
55 #include <sys/stat.h>
56 #include <sys/sx.h>
57 #include <sys/syscall.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/vnode.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_param.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69
70 #include <machine/elf.h>
71 #include <machine/md_var.h>
72
73 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
74 #include <machine/fpu.h>
75 #include <compat/ia32/ia32_reg.h>
76 #endif
77
78 #define OLD_EI_BRAND 8
79
80 static int __elfN(check_header)(const Elf_Ehdr *hdr);
81 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
82 const char *interp);
83 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
84 u_long *entry, size_t pagesize);
85 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
86 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
87 vm_prot_t prot, size_t pagesize);
88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
89
90 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
91 "");
92
93 int __elfN(fallback_brand) = -1;
94 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
95 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
96 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
97 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
98 &__elfN(fallback_brand));
99
100 static int elf_trace = 0;
101 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
102
103 static int elf_legacy_coredump = 0;
104 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
105 &elf_legacy_coredump, 0, "");
106
107 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
108
109 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
110 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
111 #define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
112
113 int
114 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
115 {
116 int i;
117
118 for (i = 0; i < MAX_BRANDS; i++) {
119 if (elf_brand_list[i] == NULL) {
120 elf_brand_list[i] = entry;
121 break;
122 }
123 }
124 if (i == MAX_BRANDS)
125 return (-1);
126 return (0);
127 }
128
129 int
130 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
131 {
132 int i;
133
134 for (i = 0; i < MAX_BRANDS; i++) {
135 if (elf_brand_list[i] == entry) {
136 elf_brand_list[i] = NULL;
137 break;
138 }
139 }
140 if (i == MAX_BRANDS)
141 return (-1);
142 return (0);
143 }
144
145 int
146 __elfN(brand_inuse)(Elf_Brandinfo *entry)
147 {
148 struct proc *p;
149 int rval = FALSE;
150
151 sx_slock(&allproc_lock);
152 LIST_FOREACH(p, &allproc, p_list) {
153 if (p->p_sysent == entry->sysvec) {
154 rval = TRUE;
155 break;
156 }
157 }
158 sx_sunlock(&allproc_lock);
159
160 return (rval);
161 }
162
163 static Elf_Brandinfo *
164 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
165 {
166 Elf_Brandinfo *bi;
167 int i;
168
169 /*
170 * We support three types of branding -- (1) the ELF EI_OSABI field
171 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
172 * branding w/in the ELF header, and (3) path of the `interp_path'
173 * field. We should also look for an ".note.ABI-tag" ELF section now
174 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
175 */
176
177 /* If the executable has a brand, search for it in the brand list. */
178 for (i = 0; i < MAX_BRANDS; i++) {
179 bi = elf_brand_list[i];
180 if (bi != NULL && hdr->e_machine == bi->machine &&
181 (hdr->e_ident[EI_OSABI] == bi->brand ||
182 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
183 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
184 return (bi);
185 }
186
187 /* Lacking a known brand, search for a recognized interpreter. */
188 if (interp != NULL) {
189 for (i = 0; i < MAX_BRANDS; i++) {
190 bi = elf_brand_list[i];
191 if (bi != NULL && hdr->e_machine == bi->machine &&
192 strcmp(interp, bi->interp_path) == 0)
193 return (bi);
194 }
195 }
196
197 /* Lacking a recognized interpreter, try the default brand */
198 for (i = 0; i < MAX_BRANDS; i++) {
199 bi = elf_brand_list[i];
200 if (bi != NULL && hdr->e_machine == bi->machine &&
201 __elfN(fallback_brand) == bi->brand)
202 return (bi);
203 }
204 return (NULL);
205 }
206
207 static int
208 __elfN(check_header)(const Elf_Ehdr *hdr)
209 {
210 Elf_Brandinfo *bi;
211 int i;
212
213 if (!IS_ELF(*hdr) ||
214 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
215 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
216 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
217 hdr->e_phentsize != sizeof(Elf_Phdr) ||
218 hdr->e_version != ELF_TARG_VER)
219 return (ENOEXEC);
220
221 /*
222 * Make sure we have at least one brand for this machine.
223 */
224
225 for (i = 0; i < MAX_BRANDS; i++) {
226 bi = elf_brand_list[i];
227 if (bi != NULL && bi->machine == hdr->e_machine)
228 break;
229 }
230 if (i == MAX_BRANDS)
231 return (ENOEXEC);
232
233 return (0);
234 }
235
236 static int
237 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
238 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
239 {
240 struct sf_buf *sf;
241 int error;
242 vm_offset_t off;
243
244 /*
245 * Create the page if it doesn't exist yet. Ignore errors.
246 */
247 vm_map_lock(map);
248 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
249 VM_PROT_ALL, VM_PROT_ALL, 0);
250 vm_map_unlock(map);
251
252 /*
253 * Find the page from the underlying object.
254 */
255 if (object) {
256 sf = vm_imgact_map_page(object, offset);
257 if (sf == NULL)
258 return (KERN_FAILURE);
259 off = offset - trunc_page(offset);
260 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
261 end - start);
262 vm_imgact_unmap_page(sf);
263 if (error) {
264 return (KERN_FAILURE);
265 }
266 }
267
268 return (KERN_SUCCESS);
269 }
270
271 static int
272 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
273 vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
274 {
275 struct sf_buf *sf;
276 vm_offset_t off;
277 vm_size_t sz;
278 int error, rv;
279
280 if (start != trunc_page(start)) {
281 rv = __elfN(map_partial)(map, object, offset, start,
282 round_page(start), prot);
283 if (rv)
284 return (rv);
285 offset += round_page(start) - start;
286 start = round_page(start);
287 }
288 if (end != round_page(end)) {
289 rv = __elfN(map_partial)(map, object, offset +
290 trunc_page(end) - start, trunc_page(end), end, prot);
291 if (rv)
292 return (rv);
293 end = trunc_page(end);
294 }
295 if (end > start) {
296 if (offset & PAGE_MASK) {
297 /*
298 * The mapping is not page aligned. This means we have
299 * to copy the data. Sigh.
300 */
301 rv = vm_map_find(map, NULL, 0, &start, end - start,
302 FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
303 if (rv)
304 return (rv);
305 if (object == NULL)
306 return (KERN_SUCCESS);
307 for (; start < end; start += sz) {
308 sf = vm_imgact_map_page(object, offset);
309 if (sf == NULL)
310 return (KERN_FAILURE);
311 off = offset - trunc_page(offset);
312 sz = end - start;
313 if (sz > PAGE_SIZE - off)
314 sz = PAGE_SIZE - off;
315 error = copyout((caddr_t)sf_buf_kva(sf) + off,
316 (caddr_t)start, sz);
317 vm_imgact_unmap_page(sf);
318 if (error) {
319 return (KERN_FAILURE);
320 }
321 offset += sz;
322 }
323 rv = KERN_SUCCESS;
324 } else {
325 vm_object_reference(object);
326 vm_map_lock(map);
327 rv = vm_map_insert(map, object, offset, start, end,
328 prot, VM_PROT_ALL, cow);
329 vm_map_unlock(map);
330 if (rv != KERN_SUCCESS)
331 vm_object_deallocate(object);
332 }
333 return (rv);
334 } else {
335 return (KERN_SUCCESS);
336 }
337 }
338
339 static int
340 __elfN(load_section)(struct vmspace *vmspace,
341 vm_object_t object, vm_offset_t offset,
342 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
343 size_t pagesize)
344 {
345 struct sf_buf *sf;
346 size_t map_len;
347 vm_offset_t map_addr;
348 int error, rv, cow;
349 size_t copy_len;
350 vm_offset_t file_addr;
351
352 /*
353 * It's necessary to fail if the filsz + offset taken from the
354 * header is greater than the actual file pager object's size.
355 * If we were to allow this, then the vm_map_find() below would
356 * walk right off the end of the file object and into the ether.
357 *
358 * While I'm here, might as well check for something else that
359 * is invalid: filsz cannot be greater than memsz.
360 */
361 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
362 filsz > memsz) {
363 uprintf("elf_load_section: truncated ELF file\n");
364 return (ENOEXEC);
365 }
366
367 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
368 file_addr = trunc_page_ps(offset, pagesize);
369
370 /*
371 * We have two choices. We can either clear the data in the last page
372 * of an oversized mapping, or we can start the anon mapping a page
373 * early and copy the initialized data into that first page. We
374 * choose the second..
375 */
376 if (memsz > filsz)
377 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
378 else
379 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
380
381 if (map_len != 0) {
382 /* cow flags: don't dump readonly sections in core */
383 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
384 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
385
386 rv = __elfN(map_insert)(&vmspace->vm_map,
387 object,
388 file_addr, /* file offset */
389 map_addr, /* virtual start */
390 map_addr + map_len,/* virtual end */
391 prot,
392 cow);
393 if (rv != KERN_SUCCESS)
394 return (EINVAL);
395
396 /* we can stop now if we've covered it all */
397 if (memsz == filsz) {
398 return (0);
399 }
400 }
401
402
403 /*
404 * We have to get the remaining bit of the file into the first part
405 * of the oversized map segment. This is normally because the .data
406 * segment in the file is extended to provide bss. It's a neat idea
407 * to try and save a page, but it's a pain in the behind to implement.
408 */
409 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
410 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
411 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
412 map_addr;
413
414 /* This had damn well better be true! */
415 if (map_len != 0) {
416 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
417 map_addr + map_len, VM_PROT_ALL, 0);
418 if (rv != KERN_SUCCESS) {
419 return (EINVAL);
420 }
421 }
422
423 if (copy_len != 0) {
424 vm_offset_t off;
425
426 sf = vm_imgact_map_page(object, offset + filsz);
427 if (sf == NULL)
428 return (EIO);
429
430 /* send the page fragment to user space */
431 off = trunc_page_ps(offset + filsz, pagesize) -
432 trunc_page(offset + filsz);
433 error = copyout((caddr_t)sf_buf_kva(sf) + off,
434 (caddr_t)map_addr, copy_len);
435 vm_imgact_unmap_page(sf);
436 if (error) {
437 return (error);
438 }
439 }
440
441 /*
442 * set it to the specified protection.
443 * XXX had better undo the damage from pasting over the cracks here!
444 */
445 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
446 round_page(map_addr + map_len), prot, FALSE);
447
448 return (0);
449 }
450
451 /*
452 * Load the file "file" into memory. It may be either a shared object
453 * or an executable.
454 *
455 * The "addr" reference parameter is in/out. On entry, it specifies
456 * the address where a shared object should be loaded. If the file is
457 * an executable, this value is ignored. On exit, "addr" specifies
458 * where the file was actually loaded.
459 *
460 * The "entry" reference parameter is out only. On exit, it specifies
461 * the entry point for the loaded file.
462 */
463 static int
464 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
465 u_long *entry, size_t pagesize)
466 {
467 struct {
468 struct nameidata nd;
469 struct vattr attr;
470 struct image_params image_params;
471 } *tempdata;
472 const Elf_Ehdr *hdr = NULL;
473 const Elf_Phdr *phdr = NULL;
474 struct nameidata *nd;
475 struct vmspace *vmspace = p->p_vmspace;
476 struct vattr *attr;
477 struct image_params *imgp;
478 vm_prot_t prot;
479 u_long rbase;
480 u_long base_addr = 0;
481 int vfslocked, error, i, numsegs;
482
483 if (curthread->td_proc != p)
484 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
485
486 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
487 nd = &tempdata->nd;
488 attr = &tempdata->attr;
489 imgp = &tempdata->image_params;
490
491 /*
492 * Initialize part of the common data
493 */
494 imgp->proc = p;
495 imgp->attr = attr;
496 imgp->firstpage = NULL;
497 imgp->image_header = NULL;
498 imgp->object = NULL;
499 imgp->execlabel = NULL;
500
501 /* XXXKSE */
502 NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
503 curthread);
504 vfslocked = 0;
505 if ((error = namei(nd)) != 0) {
506 nd->ni_vp = NULL;
507 goto fail;
508 }
509 vfslocked = NDHASGIANT(nd);
510 NDFREE(nd, NDF_ONLY_PNBUF);
511 imgp->vp = nd->ni_vp;
512
513 /*
514 * Check permissions, modes, uid, etc on the file, and "open" it.
515 */
516 error = exec_check_permissions(imgp);
517 if (error)
518 goto fail;
519
520 error = exec_map_first_page(imgp);
521 if (error)
522 goto fail;
523
524 /*
525 * Also make certain that the interpreter stays the same, so set
526 * its VV_TEXT flag, too.
527 */
528 nd->ni_vp->v_vflag |= VV_TEXT;
529
530 imgp->object = nd->ni_vp->v_object;
531
532 hdr = (const Elf_Ehdr *)imgp->image_header;
533 if ((error = __elfN(check_header)(hdr)) != 0)
534 goto fail;
535 if (hdr->e_type == ET_DYN)
536 rbase = *addr;
537 else if (hdr->e_type == ET_EXEC)
538 rbase = 0;
539 else {
540 error = ENOEXEC;
541 goto fail;
542 }
543
544 /* Only support headers that fit within first page for now */
545 /* (multiplication of two Elf_Half fields will not overflow) */
546 if ((hdr->e_phoff > PAGE_SIZE) ||
547 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
548 error = ENOEXEC;
549 goto fail;
550 }
551
552 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
553 if (!aligned(phdr, Elf_Addr)) {
554 error = ENOEXEC;
555 goto fail;
556 }
557
558 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
559 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */
560 prot = 0;
561 if (phdr[i].p_flags & PF_X)
562 prot |= VM_PROT_EXECUTE;
563 if (phdr[i].p_flags & PF_W)
564 prot |= VM_PROT_WRITE;
565 if (phdr[i].p_flags & PF_R)
566 prot |= VM_PROT_READ;
567
568 if ((error = __elfN(load_section)(vmspace,
569 imgp->object, phdr[i].p_offset,
570 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
571 phdr[i].p_memsz, phdr[i].p_filesz, prot,
572 pagesize)) != 0)
573 goto fail;
574 /*
575 * Establish the base address if this is the
576 * first segment.
577 */
578 if (numsegs == 0)
579 base_addr = trunc_page(phdr[i].p_vaddr +
580 rbase);
581 numsegs++;
582 }
583 }
584 *addr = base_addr;
585 *entry = (unsigned long)hdr->e_entry + rbase;
586
587 fail:
588 if (imgp->firstpage)
589 exec_unmap_first_page(imgp);
590
591 if (nd->ni_vp)
592 vput(nd->ni_vp);
593
594 VFS_UNLOCK_GIANT(vfslocked);
595 free(tempdata, M_TEMP);
596
597 return (error);
598 }
599
600 static int
601 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
602 {
603 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
604 const Elf_Phdr *phdr;
605 Elf_Auxargs *elf_auxargs;
606 struct vmspace *vmspace;
607 vm_prot_t prot;
608 u_long text_size = 0, data_size = 0, total_size = 0;
609 u_long text_addr = 0, data_addr = 0;
610 u_long seg_size, seg_addr;
611 u_long addr, entry = 0, proghdr = 0;
612 int error = 0, i;
613 const char *interp = NULL;
614 Elf_Brandinfo *brand_info;
615 char *path;
616 struct thread *td = curthread;
617 struct sysentvec *sv;
618
619 /*
620 * Do we have a valid ELF header ?
621 *
622 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
623 * if particular brand doesn't support it.
624 */
625 if (__elfN(check_header)(hdr) != 0 ||
626 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
627 return (-1);
628
629 /*
630 * From here on down, we return an errno, not -1, as we've
631 * detected an ELF file.
632 */
633
634 if ((hdr->e_phoff > PAGE_SIZE) ||
635 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
636 /* Only support headers in first page for now */
637 return (ENOEXEC);
638 }
639 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
640 if (!aligned(phdr, Elf_Addr))
641 return (ENOEXEC);
642 for (i = 0; i < hdr->e_phnum; i++) {
643 if (phdr[i].p_type == PT_INTERP) {
644 /* Path to interpreter */
645 if (phdr[i].p_filesz > MAXPATHLEN ||
646 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
647 return (ENOEXEC);
648 interp = imgp->image_header + phdr[i].p_offset;
649 break;
650 }
651 }
652
653 brand_info = __elfN(get_brandinfo)(hdr, interp);
654 if (brand_info == NULL) {
655 uprintf("ELF binary type \"%u\" not known.\n",
656 hdr->e_ident[EI_OSABI]);
657 return (ENOEXEC);
658 }
659 if (hdr->e_type == ET_DYN && brand_info->brand != ELFOSABI_LINUX)
660 return (ENOEXEC);
661 sv = brand_info->sysvec;
662 if (interp != NULL && brand_info->interp_newpath != NULL)
663 interp = brand_info->interp_newpath;
664
665 /*
666 * Avoid a possible deadlock if the current address space is destroyed
667 * and that address space maps the locked vnode. In the common case,
668 * the locked vnode's v_usecount is decremented but remains greater
669 * than zero. Consequently, the vnode lock is not needed by vrele().
670 * However, in cases where the vnode lock is external, such as nullfs,
671 * v_usecount may become zero.
672 */
673 VOP_UNLOCK(imgp->vp, 0, td);
674
675 exec_new_vmspace(imgp, sv);
676 imgp->proc->p_sysent = sv;
677
678 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
679
680 vmspace = imgp->proc->p_vmspace;
681
682 for (i = 0; i < hdr->e_phnum; i++) {
683 switch (phdr[i].p_type) {
684 case PT_LOAD: /* Loadable segment */
685 prot = 0;
686 if (phdr[i].p_flags & PF_X)
687 prot |= VM_PROT_EXECUTE;
688 if (phdr[i].p_flags & PF_W)
689 prot |= VM_PROT_WRITE;
690 if (phdr[i].p_flags & PF_R)
691 prot |= VM_PROT_READ;
692
693 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
694 /*
695 * Some x86 binaries assume read == executable,
696 * notably the M3 runtime and therefore cvsup
697 */
698 if (prot & VM_PROT_READ)
699 prot |= VM_PROT_EXECUTE;
700 #endif
701
702 if ((error = __elfN(load_section)(vmspace,
703 imgp->object, phdr[i].p_offset,
704 (caddr_t)(uintptr_t)phdr[i].p_vaddr,
705 phdr[i].p_memsz, phdr[i].p_filesz, prot,
706 sv->sv_pagesize)) != 0)
707 return (error);
708
709 /*
710 * If this segment contains the program headers,
711 * remember their virtual address for the AT_PHDR
712 * aux entry. Static binaries don't usually include
713 * a PT_PHDR entry.
714 */
715 if (phdr[i].p_offset == 0 &&
716 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
717 <= phdr[i].p_filesz)
718 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
719
720 seg_addr = trunc_page(phdr[i].p_vaddr);
721 seg_size = round_page(phdr[i].p_memsz +
722 phdr[i].p_vaddr - seg_addr);
723
724 /*
725 * Is this .text or .data? We can't use
726 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
727 * alpha terribly and possibly does other bad
728 * things so we stick to the old way of figuring
729 * it out: If the segment contains the program
730 * entry point, it's a text segment, otherwise it
731 * is a data segment.
732 *
733 * Note that obreak() assumes that data_addr +
734 * data_size == end of data load area, and the ELF
735 * file format expects segments to be sorted by
736 * address. If multiple data segments exist, the
737 * last one will be used.
738 */
739 if (hdr->e_entry >= phdr[i].p_vaddr &&
740 hdr->e_entry < (phdr[i].p_vaddr +
741 phdr[i].p_memsz)) {
742 text_size = seg_size;
743 text_addr = seg_addr;
744 entry = (u_long)hdr->e_entry;
745 } else {
746 data_size = seg_size;
747 data_addr = seg_addr;
748 }
749 total_size += seg_size;
750 break;
751 case PT_PHDR: /* Program header table info */
752 proghdr = phdr[i].p_vaddr;
753 break;
754 default:
755 break;
756 }
757 }
758
759 if (data_addr == 0 && data_size == 0) {
760 data_addr = text_addr;
761 data_size = text_size;
762 }
763
764 /*
765 * Check limits. It should be safe to check the
766 * limits after loading the segments since we do
767 * not actually fault in all the segments pages.
768 */
769 PROC_LOCK(imgp->proc);
770 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
771 text_size > maxtsiz ||
772 total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
773 PROC_UNLOCK(imgp->proc);
774 return (ENOMEM);
775 }
776
777 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
778 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
779 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
780 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
781
782 /*
783 * We load the dynamic linker where a userland call
784 * to mmap(0, ...) would put it. The rationale behind this
785 * calculation is that it leaves room for the heap to grow to
786 * its maximum allowed size.
787 */
788 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
789 lim_max(imgp->proc, RLIMIT_DATA));
790 PROC_UNLOCK(imgp->proc);
791
792 imgp->entry_addr = entry;
793
794 if (interp != NULL) {
795 VOP_UNLOCK(imgp->vp, 0, td);
796 if (brand_info->emul_path != NULL &&
797 brand_info->emul_path[0] != '\0') {
798 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
799 snprintf(path, MAXPATHLEN, "%s%s",
800 brand_info->emul_path, interp);
801 error = __elfN(load_file)(imgp->proc, path, &addr,
802 &imgp->entry_addr, sv->sv_pagesize);
803 free(path, M_TEMP);
804 if (error == 0)
805 interp = NULL;
806 }
807 if (interp != NULL) {
808 error = __elfN(load_file)(imgp->proc, interp, &addr,
809 &imgp->entry_addr, sv->sv_pagesize);
810 }
811 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
812 if (error != 0) {
813 uprintf("ELF interpreter %s not found\n", interp);
814 return (error);
815 }
816 }
817
818 /*
819 * Construct auxargs table (used by the fixup routine)
820 */
821 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
822 elf_auxargs->execfd = -1;
823 elf_auxargs->phdr = proghdr;
824 elf_auxargs->phent = hdr->e_phentsize;
825 elf_auxargs->phnum = hdr->e_phnum;
826 elf_auxargs->pagesz = PAGE_SIZE;
827 elf_auxargs->base = addr;
828 elf_auxargs->flags = 0;
829 elf_auxargs->entry = entry;
830 elf_auxargs->trace = elf_trace;
831
832 imgp->auxargs = elf_auxargs;
833 imgp->interpreted = 0;
834
835 return (error);
836 }
837
838 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
839
840 int
841 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
842 {
843 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
844 Elf_Addr *base;
845 Elf_Addr *pos;
846
847 base = (Elf_Addr *)*stack_base;
848 pos = base + (imgp->args->argc + imgp->args->envc + 2);
849
850 if (args->trace) {
851 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
852 }
853 if (args->execfd != -1) {
854 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
855 }
856 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
857 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
858 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
859 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
860 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
861 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
862 AUXARGS_ENTRY(pos, AT_BASE, args->base);
863 AUXARGS_ENTRY(pos, AT_NULL, 0);
864
865 free(imgp->auxargs, M_TEMP);
866 imgp->auxargs = NULL;
867
868 base--;
869 suword(base, (long)imgp->args->argc);
870 *stack_base = (register_t *)base;
871 return (0);
872 }
873
874 /*
875 * Code for generating ELF core dumps.
876 */
877
878 typedef void (*segment_callback)(vm_map_entry_t, void *);
879
880 /* Closure for cb_put_phdr(). */
881 struct phdr_closure {
882 Elf_Phdr *phdr; /* Program header to fill in */
883 Elf_Off offset; /* Offset of segment in core file */
884 };
885
886 /* Closure for cb_size_segment(). */
887 struct sseg_closure {
888 int count; /* Count of writable segments. */
889 size_t size; /* Total size of all writable segments. */
890 };
891
892 static void cb_put_phdr(vm_map_entry_t, void *);
893 static void cb_size_segment(vm_map_entry_t, void *);
894 static void each_writable_segment(struct thread *, segment_callback, void *);
895 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
896 int, void *, size_t);
897 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
898 static void __elfN(putnote)(void *, size_t *, const char *, int,
899 const void *, size_t);
900
901 extern int osreldate;
902
903 int
904 __elfN(coredump)(td, vp, limit)
905 struct thread *td;
906 struct vnode *vp;
907 off_t limit;
908 {
909 struct ucred *cred = td->td_ucred;
910 int error = 0;
911 struct sseg_closure seginfo;
912 void *hdr;
913 size_t hdrsize;
914
915 /* Size the program segments. */
916 seginfo.count = 0;
917 seginfo.size = 0;
918 each_writable_segment(td, cb_size_segment, &seginfo);
919
920 /*
921 * Calculate the size of the core file header area by making
922 * a dry run of generating it. Nothing is written, but the
923 * size is calculated.
924 */
925 hdrsize = 0;
926 __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
927
928 if (hdrsize + seginfo.size >= limit)
929 return (EFAULT);
930
931 /*
932 * Allocate memory for building the header, fill it up,
933 * and write it out.
934 */
935 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
936 if (hdr == NULL) {
937 return (EINVAL);
938 }
939 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
940
941 /* Write the contents of all of the writable segments. */
942 if (error == 0) {
943 Elf_Phdr *php;
944 off_t offset;
945 int i;
946
947 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
948 offset = hdrsize;
949 for (i = 0; i < seginfo.count; i++) {
950 error = vn_rdwr_inchunks(UIO_WRITE, vp,
951 (caddr_t)(uintptr_t)php->p_vaddr,
952 php->p_filesz, offset, UIO_USERSPACE,
953 IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
954 curthread); /* XXXKSE */
955 if (error != 0)
956 break;
957 offset += php->p_filesz;
958 php++;
959 }
960 }
961 free(hdr, M_TEMP);
962
963 return (error);
964 }
965
966 /*
967 * A callback for each_writable_segment() to write out the segment's
968 * program header entry.
969 */
970 static void
971 cb_put_phdr(entry, closure)
972 vm_map_entry_t entry;
973 void *closure;
974 {
975 struct phdr_closure *phc = (struct phdr_closure *)closure;
976 Elf_Phdr *phdr = phc->phdr;
977
978 phc->offset = round_page(phc->offset);
979
980 phdr->p_type = PT_LOAD;
981 phdr->p_offset = phc->offset;
982 phdr->p_vaddr = entry->start;
983 phdr->p_paddr = 0;
984 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
985 phdr->p_align = PAGE_SIZE;
986 phdr->p_flags = 0;
987 if (entry->protection & VM_PROT_READ)
988 phdr->p_flags |= PF_R;
989 if (entry->protection & VM_PROT_WRITE)
990 phdr->p_flags |= PF_W;
991 if (entry->protection & VM_PROT_EXECUTE)
992 phdr->p_flags |= PF_X;
993
994 phc->offset += phdr->p_filesz;
995 phc->phdr++;
996 }
997
998 /*
999 * A callback for each_writable_segment() to gather information about
1000 * the number of segments and their total size.
1001 */
1002 static void
1003 cb_size_segment(entry, closure)
1004 vm_map_entry_t entry;
1005 void *closure;
1006 {
1007 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1008
1009 ssc->count++;
1010 ssc->size += entry->end - entry->start;
1011 }
1012
1013 /*
1014 * For each writable segment in the process's memory map, call the given
1015 * function with a pointer to the map entry and some arbitrary
1016 * caller-supplied data.
1017 */
1018 static void
1019 each_writable_segment(td, func, closure)
1020 struct thread *td;
1021 segment_callback func;
1022 void *closure;
1023 {
1024 struct proc *p = td->td_proc;
1025 vm_map_t map = &p->p_vmspace->vm_map;
1026 vm_map_entry_t entry;
1027 vm_object_t backing_object, object;
1028 boolean_t ignore_entry;
1029
1030 vm_map_lock_read(map);
1031 for (entry = map->header.next; entry != &map->header;
1032 entry = entry->next) {
1033 /*
1034 * Don't dump inaccessible mappings, deal with legacy
1035 * coredump mode.
1036 *
1037 * Note that read-only segments related to the elf binary
1038 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1039 * need to arbitrarily ignore such segments.
1040 */
1041 if (elf_legacy_coredump) {
1042 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1043 continue;
1044 } else {
1045 if ((entry->protection & VM_PROT_ALL) == 0)
1046 continue;
1047 }
1048
1049 /*
1050 * Dont include memory segment in the coredump if
1051 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1052 * madvise(2). Do not dump submaps (i.e. parts of the
1053 * kernel map).
1054 */
1055 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1056 continue;
1057
1058 if ((object = entry->object.vm_object) == NULL)
1059 continue;
1060
1061 /* Ignore memory-mapped devices and such things. */
1062 VM_OBJECT_LOCK(object);
1063 while ((backing_object = object->backing_object) != NULL) {
1064 VM_OBJECT_LOCK(backing_object);
1065 VM_OBJECT_UNLOCK(object);
1066 object = backing_object;
1067 }
1068 ignore_entry = object->type != OBJT_DEFAULT &&
1069 object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1070 VM_OBJECT_UNLOCK(object);
1071 if (ignore_entry)
1072 continue;
1073
1074 (*func)(entry, closure);
1075 }
1076 vm_map_unlock_read(map);
1077 }
1078
1079 /*
1080 * Write the core file header to the file, including padding up to
1081 * the page boundary.
1082 */
1083 static int
1084 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1085 struct thread *td;
1086 struct vnode *vp;
1087 struct ucred *cred;
1088 int numsegs;
1089 size_t hdrsize;
1090 void *hdr;
1091 {
1092 size_t off;
1093
1094 /* Fill in the header. */
1095 bzero(hdr, hdrsize);
1096 off = 0;
1097 __elfN(puthdr)(td, hdr, &off, numsegs);
1098
1099 /* Write it to the core file. */
1100 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1101 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1102 td)); /* XXXKSE */
1103 }
1104
1105 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1106 typedef struct prstatus32 elf_prstatus_t;
1107 typedef struct prpsinfo32 elf_prpsinfo_t;
1108 typedef struct fpreg32 elf_prfpregset_t;
1109 typedef struct fpreg32 elf_fpregset_t;
1110 typedef struct reg32 elf_gregset_t;
1111 #else
1112 typedef prstatus_t elf_prstatus_t;
1113 typedef prpsinfo_t elf_prpsinfo_t;
1114 typedef prfpregset_t elf_prfpregset_t;
1115 typedef prfpregset_t elf_fpregset_t;
1116 typedef gregset_t elf_gregset_t;
1117 #endif
1118
1119 static void
1120 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1121 {
1122 struct {
1123 elf_prstatus_t status;
1124 elf_prfpregset_t fpregset;
1125 elf_prpsinfo_t psinfo;
1126 } *tempdata;
1127 elf_prstatus_t *status;
1128 elf_prfpregset_t *fpregset;
1129 elf_prpsinfo_t *psinfo;
1130 struct proc *p;
1131 struct thread *thr;
1132 size_t ehoff, noteoff, notesz, phoff;
1133
1134 p = td->td_proc;
1135
1136 ehoff = *off;
1137 *off += sizeof(Elf_Ehdr);
1138
1139 phoff = *off;
1140 *off += (numsegs + 1) * sizeof(Elf_Phdr);
1141
1142 noteoff = *off;
1143 /*
1144 * Don't allocate space for the notes if we're just calculating
1145 * the size of the header. We also don't collect the data.
1146 */
1147 if (dst != NULL) {
1148 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1149 status = &tempdata->status;
1150 fpregset = &tempdata->fpregset;
1151 psinfo = &tempdata->psinfo;
1152 } else {
1153 tempdata = NULL;
1154 status = NULL;
1155 fpregset = NULL;
1156 psinfo = NULL;
1157 }
1158
1159 if (dst != NULL) {
1160 psinfo->pr_version = PRPSINFO_VERSION;
1161 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1162 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1163 /*
1164 * XXX - We don't fill in the command line arguments properly
1165 * yet.
1166 */
1167 strlcpy(psinfo->pr_psargs, p->p_comm,
1168 sizeof(psinfo->pr_psargs));
1169 }
1170 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1171 sizeof *psinfo);
1172
1173 /*
1174 * To have the debugger select the right thread (LWP) as the initial
1175 * thread, we dump the state of the thread passed to us in td first.
1176 * This is the thread that causes the core dump and thus likely to
1177 * be the right thread one wants to have selected in the debugger.
1178 */
1179 thr = td;
1180 while (thr != NULL) {
1181 if (dst != NULL) {
1182 status->pr_version = PRSTATUS_VERSION;
1183 status->pr_statussz = sizeof(elf_prstatus_t);
1184 status->pr_gregsetsz = sizeof(elf_gregset_t);
1185 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1186 status->pr_osreldate = osreldate;
1187 status->pr_cursig = p->p_sig;
1188 status->pr_pid = thr->td_tid;
1189 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1190 fill_regs32(thr, &status->pr_reg);
1191 fill_fpregs32(thr, fpregset);
1192 #else
1193 fill_regs(thr, &status->pr_reg);
1194 fill_fpregs(thr, fpregset);
1195 #endif
1196 }
1197 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1198 sizeof *status);
1199 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1200 sizeof *fpregset);
1201 /*
1202 * Allow for MD specific notes, as well as any MD
1203 * specific preparations for writing MI notes.
1204 */
1205 __elfN(dump_thread)(thr, dst, off);
1206
1207 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1208 TAILQ_NEXT(thr, td_plist);
1209 if (thr == td)
1210 thr = TAILQ_NEXT(thr, td_plist);
1211 }
1212
1213 notesz = *off - noteoff;
1214
1215 if (dst != NULL)
1216 free(tempdata, M_TEMP);
1217
1218 /* Align up to a page boundary for the program segments. */
1219 *off = round_page(*off);
1220
1221 if (dst != NULL) {
1222 Elf_Ehdr *ehdr;
1223 Elf_Phdr *phdr;
1224 struct phdr_closure phc;
1225
1226 /*
1227 * Fill in the ELF header.
1228 */
1229 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1230 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1231 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1232 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1233 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1234 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1235 ehdr->e_ident[EI_DATA] = ELF_DATA;
1236 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1237 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1238 ehdr->e_ident[EI_ABIVERSION] = 0;
1239 ehdr->e_ident[EI_PAD] = 0;
1240 ehdr->e_type = ET_CORE;
1241 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1242 ehdr->e_machine = EM_386;
1243 #else
1244 ehdr->e_machine = ELF_ARCH;
1245 #endif
1246 ehdr->e_version = EV_CURRENT;
1247 ehdr->e_entry = 0;
1248 ehdr->e_phoff = phoff;
1249 ehdr->e_flags = 0;
1250 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1251 ehdr->e_phentsize = sizeof(Elf_Phdr);
1252 ehdr->e_phnum = numsegs + 1;
1253 ehdr->e_shentsize = sizeof(Elf_Shdr);
1254 ehdr->e_shnum = 0;
1255 ehdr->e_shstrndx = SHN_UNDEF;
1256
1257 /*
1258 * Fill in the program header entries.
1259 */
1260 phdr = (Elf_Phdr *)((char *)dst + phoff);
1261
1262 /* The note segement. */
1263 phdr->p_type = PT_NOTE;
1264 phdr->p_offset = noteoff;
1265 phdr->p_vaddr = 0;
1266 phdr->p_paddr = 0;
1267 phdr->p_filesz = notesz;
1268 phdr->p_memsz = 0;
1269 phdr->p_flags = 0;
1270 phdr->p_align = 0;
1271 phdr++;
1272
1273 /* All the writable segments from the program. */
1274 phc.phdr = phdr;
1275 phc.offset = *off;
1276 each_writable_segment(td, cb_put_phdr, &phc);
1277 }
1278 }
1279
1280 static void
1281 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1282 const void *desc, size_t descsz)
1283 {
1284 Elf_Note note;
1285
1286 note.n_namesz = strlen(name) + 1;
1287 note.n_descsz = descsz;
1288 note.n_type = type;
1289 if (dst != NULL)
1290 bcopy(¬e, (char *)dst + *off, sizeof note);
1291 *off += sizeof note;
1292 if (dst != NULL)
1293 bcopy(name, (char *)dst + *off, note.n_namesz);
1294 *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1295 if (dst != NULL)
1296 bcopy(desc, (char *)dst + *off, note.n_descsz);
1297 *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1298 }
1299
1300 /*
1301 * Tell kern_execve.c about it, with a little help from the linker.
1302 */
1303 static struct execsw __elfN(execsw) = {
1304 __CONCAT(exec_, __elfN(imgact)),
1305 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1306 };
1307 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
Cache object: 14c02434e6a0943fe25c133a1a710097
|