FreeBSD/Linux Kernel Cross Reference
sys/kern/imgact_elf.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2017 Dell EMC
5 * Copyright (c) 2000-2001, 2003 David O'Brien
6 * Copyright (c) 1995-1996 Søren Schmidt
7 * Copyright (c) 1996 Peter Wemm
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer
15 * in this position and unchanged.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_capsicum.h"
38
39 #include <sys/param.h>
40 #include <sys/capsicum.h>
41 #include <sys/compressor.h>
42 #include <sys/exec.h>
43 #include <sys/fcntl.h>
44 #include <sys/imgact.h>
45 #include <sys/imgact_elf.h>
46 #include <sys/jail.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/mman.h>
52 #include <sys/namei.h>
53 #include <sys/proc.h>
54 #include <sys/procfs.h>
55 #include <sys/ptrace.h>
56 #include <sys/racct.h>
57 #include <sys/resourcevar.h>
58 #include <sys/rwlock.h>
59 #include <sys/sbuf.h>
60 #include <sys/sf_buf.h>
61 #include <sys/smp.h>
62 #include <sys/systm.h>
63 #include <sys/signalvar.h>
64 #include <sys/stat.h>
65 #include <sys/sx.h>
66 #include <sys/syscall.h>
67 #include <sys/sysctl.h>
68 #include <sys/sysent.h>
69 #include <sys/vnode.h>
70 #include <sys/syslog.h>
71 #include <sys/eventhandler.h>
72 #include <sys/user.h>
73
74 #include <vm/vm.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_param.h>
77 #include <vm/pmap.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_extern.h>
81
82 #include <machine/elf.h>
83 #include <machine/md_var.h>
84
85 #define ELF_NOTE_ROUNDSIZE 4
86 #define OLD_EI_BRAND 8
87
88 static int __elfN(check_header)(const Elf_Ehdr *hdr);
89 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
90 const char *interp, int32_t *osrel, uint32_t *fctl0);
91 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
92 u_long *entry);
93 static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset,
94 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot);
95 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
96 static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note,
97 int32_t *osrel);
98 static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
99 static boolean_t __elfN(check_note)(struct image_params *imgp,
100 Elf_Brandnote *checknote, int32_t *osrel, boolean_t *has_fctl0,
101 uint32_t *fctl0);
102 static vm_prot_t __elfN(trans_prot)(Elf_Word);
103 static Elf_Word __elfN(untrans_prot)(vm_prot_t);
104
105 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE),
106 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
107 "");
108
109 #define CORE_BUF_SIZE (16 * 1024)
110
111 int __elfN(fallback_brand) = -1;
112 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
113 fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0,
114 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
115
116 static int elf_legacy_coredump = 0;
117 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
118 &elf_legacy_coredump, 0,
119 "include all and only RW pages in core dumps");
120
121 int __elfN(nxstack) =
122 #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \
123 (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \
124 defined(__riscv)
125 1;
126 #else
127 0;
128 #endif
129 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
130 nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
131 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
132
133 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
134 int i386_read_exec = 0;
135 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
136 "enable execution from readable segments");
137 #endif
138
139 static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR;
140 static int
141 sysctl_pie_base(SYSCTL_HANDLER_ARGS)
142 {
143 u_long val;
144 int error;
145
146 val = __elfN(pie_base);
147 error = sysctl_handle_long(oidp, &val, 0, req);
148 if (error != 0 || req->newptr == NULL)
149 return (error);
150 if ((val & PAGE_MASK) != 0)
151 return (EINVAL);
152 __elfN(pie_base) = val;
153 return (0);
154 }
155 SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base,
156 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
157 sysctl_pie_base, "LU",
158 "PIE load base without randomization");
159
160 SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr,
161 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
162 "");
163 #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr)
164
165 static int __elfN(aslr_enabled) = 0;
166 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN,
167 &__elfN(aslr_enabled), 0,
168 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
169 ": enable address map randomization");
170
171 static int __elfN(pie_aslr_enabled) = 0;
172 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN,
173 &__elfN(pie_aslr_enabled), 0,
174 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
175 ": enable address map randomization for PIE binaries");
176
177 static int __elfN(aslr_honor_sbrk) = 1;
178 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW,
179 &__elfN(aslr_honor_sbrk), 0,
180 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used");
181
182 static int __elfN(aslr_stack_gap) = 3;
183 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack_gap, CTLFLAG_RW,
184 &__elfN(aslr_stack_gap), 0,
185 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
186 ": maximum percentage of main stack to waste on a random gap");
187
188 static int __elfN(sigfastblock) = 1;
189 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock,
190 CTLFLAG_RWTUN, &__elfN(sigfastblock), 0,
191 "enable sigfastblock for new processes");
192
193 static bool __elfN(allow_wx) = true;
194 SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx,
195 CTLFLAG_RWTUN, &__elfN(allow_wx), 0,
196 "Allow pages to be mapped simultaneously writable and executable");
197
198 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
199
200 #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a))
201
202 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
203
204 Elf_Brandnote __elfN(freebsd_brandnote) = {
205 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
206 .hdr.n_descsz = sizeof(int32_t),
207 .hdr.n_type = NT_FREEBSD_ABI_TAG,
208 .vendor = FREEBSD_ABI_VENDOR,
209 .flags = BN_TRANSLATE_OSREL,
210 .trans_osrel = __elfN(freebsd_trans_osrel)
211 };
212
213 static bool
214 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
215 {
216 uintptr_t p;
217
218 p = (uintptr_t)(note + 1);
219 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
220 *osrel = *(const int32_t *)(p);
221
222 return (true);
223 }
224
225 static const char GNU_ABI_VENDOR[] = "GNU";
226 static int GNU_KFREEBSD_ABI_DESC = 3;
227
228 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
229 .hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
230 .hdr.n_descsz = 16, /* XXX at least 16 */
231 .hdr.n_type = 1,
232 .vendor = GNU_ABI_VENDOR,
233 .flags = BN_TRANSLATE_OSREL,
234 .trans_osrel = kfreebsd_trans_osrel
235 };
236
237 static bool
238 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
239 {
240 const Elf32_Word *desc;
241 uintptr_t p;
242
243 p = (uintptr_t)(note + 1);
244 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
245
246 desc = (const Elf32_Word *)p;
247 if (desc[0] != GNU_KFREEBSD_ABI_DESC)
248 return (false);
249
250 /*
251 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
252 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
253 */
254 *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
255
256 return (true);
257 }
258
259 int
260 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
261 {
262 int i;
263
264 for (i = 0; i < MAX_BRANDS; i++) {
265 if (elf_brand_list[i] == NULL) {
266 elf_brand_list[i] = entry;
267 break;
268 }
269 }
270 if (i == MAX_BRANDS) {
271 printf("WARNING: %s: could not insert brandinfo entry: %p\n",
272 __func__, entry);
273 return (-1);
274 }
275 return (0);
276 }
277
278 int
279 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
280 {
281 int i;
282
283 for (i = 0; i < MAX_BRANDS; i++) {
284 if (elf_brand_list[i] == entry) {
285 elf_brand_list[i] = NULL;
286 break;
287 }
288 }
289 if (i == MAX_BRANDS)
290 return (-1);
291 return (0);
292 }
293
294 int
295 __elfN(brand_inuse)(Elf_Brandinfo *entry)
296 {
297 struct proc *p;
298 int rval = FALSE;
299
300 sx_slock(&allproc_lock);
301 FOREACH_PROC_IN_SYSTEM(p) {
302 if (p->p_sysent == entry->sysvec) {
303 rval = TRUE;
304 break;
305 }
306 }
307 sx_sunlock(&allproc_lock);
308
309 return (rval);
310 }
311
312 static Elf_Brandinfo *
313 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
314 int32_t *osrel, uint32_t *fctl0)
315 {
316 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
317 Elf_Brandinfo *bi, *bi_m;
318 boolean_t ret, has_fctl0;
319 int i, interp_name_len;
320
321 interp_name_len = interp != NULL ? strlen(interp) + 1 : 0;
322
323 /*
324 * We support four types of branding -- (1) the ELF EI_OSABI field
325 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
326 * branding w/in the ELF header, (3) path of the `interp_path'
327 * field, and (4) the ".note.ABI-tag" ELF section.
328 */
329
330 /* Look for an ".note.ABI-tag" ELF section */
331 bi_m = NULL;
332 for (i = 0; i < MAX_BRANDS; i++) {
333 bi = elf_brand_list[i];
334 if (bi == NULL)
335 continue;
336 if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)
337 continue;
338 if (hdr->e_machine == bi->machine && (bi->flags &
339 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
340 has_fctl0 = false;
341 *fctl0 = 0;
342 *osrel = 0;
343 ret = __elfN(check_note)(imgp, bi->brand_note, osrel,
344 &has_fctl0, fctl0);
345 /* Give brand a chance to veto check_note's guess */
346 if (ret && bi->header_supported) {
347 ret = bi->header_supported(imgp, osrel,
348 has_fctl0 ? fctl0 : NULL);
349 }
350 /*
351 * If note checker claimed the binary, but the
352 * interpreter path in the image does not
353 * match default one for the brand, try to
354 * search for other brands with the same
355 * interpreter. Either there is better brand
356 * with the right interpreter, or, failing
357 * this, we return first brand which accepted
358 * our note and, optionally, header.
359 */
360 if (ret && bi_m == NULL && interp != NULL &&
361 (bi->interp_path == NULL ||
362 (strlen(bi->interp_path) + 1 != interp_name_len ||
363 strncmp(interp, bi->interp_path, interp_name_len)
364 != 0))) {
365 bi_m = bi;
366 ret = 0;
367 }
368 if (ret)
369 return (bi);
370 }
371 }
372 if (bi_m != NULL)
373 return (bi_m);
374
375 /* If the executable has a brand, search for it in the brand list. */
376 for (i = 0; i < MAX_BRANDS; i++) {
377 bi = elf_brand_list[i];
378 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 ||
379 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0))
380 continue;
381 if (hdr->e_machine == bi->machine &&
382 (hdr->e_ident[EI_OSABI] == bi->brand ||
383 (bi->compat_3_brand != NULL &&
384 strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
385 bi->compat_3_brand) == 0))) {
386 /* Looks good, but give brand a chance to veto */
387 if (bi->header_supported == NULL ||
388 bi->header_supported(imgp, NULL, NULL)) {
389 /*
390 * Again, prefer strictly matching
391 * interpreter path.
392 */
393 if (interp_name_len == 0 &&
394 bi->interp_path == NULL)
395 return (bi);
396 if (bi->interp_path != NULL &&
397 strlen(bi->interp_path) + 1 ==
398 interp_name_len && strncmp(interp,
399 bi->interp_path, interp_name_len) == 0)
400 return (bi);
401 if (bi_m == NULL)
402 bi_m = bi;
403 }
404 }
405 }
406 if (bi_m != NULL)
407 return (bi_m);
408
409 /* No known brand, see if the header is recognized by any brand */
410 for (i = 0; i < MAX_BRANDS; i++) {
411 bi = elf_brand_list[i];
412 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY ||
413 bi->header_supported == NULL)
414 continue;
415 if (hdr->e_machine == bi->machine) {
416 ret = bi->header_supported(imgp, NULL, NULL);
417 if (ret)
418 return (bi);
419 }
420 }
421
422 /* Lacking a known brand, search for a recognized interpreter. */
423 if (interp != NULL) {
424 for (i = 0; i < MAX_BRANDS; i++) {
425 bi = elf_brand_list[i];
426 if (bi == NULL || (bi->flags &
427 (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC))
428 != 0)
429 continue;
430 if (hdr->e_machine == bi->machine &&
431 bi->interp_path != NULL &&
432 /* ELF image p_filesz includes terminating zero */
433 strlen(bi->interp_path) + 1 == interp_name_len &&
434 strncmp(interp, bi->interp_path, interp_name_len)
435 == 0 && (bi->header_supported == NULL ||
436 bi->header_supported(imgp, NULL, NULL)))
437 return (bi);
438 }
439 }
440
441 /* Lacking a recognized interpreter, try the default brand */
442 for (i = 0; i < MAX_BRANDS; i++) {
443 bi = elf_brand_list[i];
444 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 ||
445 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0))
446 continue;
447 if (hdr->e_machine == bi->machine &&
448 __elfN(fallback_brand) == bi->brand &&
449 (bi->header_supported == NULL ||
450 bi->header_supported(imgp, NULL, NULL)))
451 return (bi);
452 }
453 return (NULL);
454 }
455
456 static bool
457 __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr)
458 {
459 return (hdr->e_phoff <= PAGE_SIZE &&
460 (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff);
461 }
462
463 static int
464 __elfN(check_header)(const Elf_Ehdr *hdr)
465 {
466 Elf_Brandinfo *bi;
467 int i;
468
469 if (!IS_ELF(*hdr) ||
470 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
471 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
472 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
473 hdr->e_phentsize != sizeof(Elf_Phdr) ||
474 hdr->e_version != ELF_TARG_VER)
475 return (ENOEXEC);
476
477 /*
478 * Make sure we have at least one brand for this machine.
479 */
480
481 for (i = 0; i < MAX_BRANDS; i++) {
482 bi = elf_brand_list[i];
483 if (bi != NULL && bi->machine == hdr->e_machine)
484 break;
485 }
486 if (i == MAX_BRANDS)
487 return (ENOEXEC);
488
489 return (0);
490 }
491
492 static int
493 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
494 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
495 {
496 struct sf_buf *sf;
497 int error;
498 vm_offset_t off;
499
500 /*
501 * Create the page if it doesn't exist yet. Ignore errors.
502 */
503 vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) -
504 trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL);
505
506 /*
507 * Find the page from the underlying object.
508 */
509 if (object != NULL) {
510 sf = vm_imgact_map_page(object, offset);
511 if (sf == NULL)
512 return (KERN_FAILURE);
513 off = offset - trunc_page(offset);
514 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
515 end - start);
516 vm_imgact_unmap_page(sf);
517 if (error != 0)
518 return (KERN_FAILURE);
519 }
520
521 return (KERN_SUCCESS);
522 }
523
524 static int
525 __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object,
526 vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot,
527 int cow)
528 {
529 struct sf_buf *sf;
530 vm_offset_t off;
531 vm_size_t sz;
532 int error, locked, rv;
533
534 if (start != trunc_page(start)) {
535 rv = __elfN(map_partial)(map, object, offset, start,
536 round_page(start), prot);
537 if (rv != KERN_SUCCESS)
538 return (rv);
539 offset += round_page(start) - start;
540 start = round_page(start);
541 }
542 if (end != round_page(end)) {
543 rv = __elfN(map_partial)(map, object, offset +
544 trunc_page(end) - start, trunc_page(end), end, prot);
545 if (rv != KERN_SUCCESS)
546 return (rv);
547 end = trunc_page(end);
548 }
549 if (start >= end)
550 return (KERN_SUCCESS);
551 if ((offset & PAGE_MASK) != 0) {
552 /*
553 * The mapping is not page aligned. This means that we have
554 * to copy the data.
555 */
556 rv = vm_map_fixed(map, NULL, 0, start, end - start,
557 prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL);
558 if (rv != KERN_SUCCESS)
559 return (rv);
560 if (object == NULL)
561 return (KERN_SUCCESS);
562 for (; start < end; start += sz) {
563 sf = vm_imgact_map_page(object, offset);
564 if (sf == NULL)
565 return (KERN_FAILURE);
566 off = offset - trunc_page(offset);
567 sz = end - start;
568 if (sz > PAGE_SIZE - off)
569 sz = PAGE_SIZE - off;
570 error = copyout((caddr_t)sf_buf_kva(sf) + off,
571 (caddr_t)start, sz);
572 vm_imgact_unmap_page(sf);
573 if (error != 0)
574 return (KERN_FAILURE);
575 offset += sz;
576 }
577 } else {
578 vm_object_reference(object);
579 rv = vm_map_fixed(map, object, offset, start, end - start,
580 prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL |
581 (object != NULL ? MAP_VN_EXEC : 0));
582 if (rv != KERN_SUCCESS) {
583 locked = VOP_ISLOCKED(imgp->vp);
584 VOP_UNLOCK(imgp->vp);
585 vm_object_deallocate(object);
586 vn_lock(imgp->vp, locked | LK_RETRY);
587 return (rv);
588 } else if (object != NULL) {
589 MPASS(imgp->vp->v_object == object);
590 VOP_SET_TEXT_CHECKED(imgp->vp);
591 }
592 }
593 return (KERN_SUCCESS);
594 }
595
596 static int
597 __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset,
598 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
599 {
600 struct sf_buf *sf;
601 size_t map_len;
602 vm_map_t map;
603 vm_object_t object;
604 vm_offset_t map_addr;
605 int error, rv, cow;
606 size_t copy_len;
607 vm_ooffset_t file_addr;
608
609 /*
610 * It's necessary to fail if the filsz + offset taken from the
611 * header is greater than the actual file pager object's size.
612 * If we were to allow this, then the vm_map_find() below would
613 * walk right off the end of the file object and into the ether.
614 *
615 * While I'm here, might as well check for something else that
616 * is invalid: filsz cannot be greater than memsz.
617 */
618 if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) ||
619 filsz > memsz) {
620 uprintf("elf_load_section: truncated ELF file\n");
621 return (ENOEXEC);
622 }
623
624 object = imgp->object;
625 map = &imgp->proc->p_vmspace->vm_map;
626 map_addr = trunc_page((vm_offset_t)vmaddr);
627 file_addr = trunc_page(offset);
628
629 /*
630 * We have two choices. We can either clear the data in the last page
631 * of an oversized mapping, or we can start the anon mapping a page
632 * early and copy the initialized data into that first page. We
633 * choose the second.
634 */
635 if (filsz == 0)
636 map_len = 0;
637 else if (memsz > filsz)
638 map_len = trunc_page(offset + filsz) - file_addr;
639 else
640 map_len = round_page(offset + filsz) - file_addr;
641
642 if (map_len != 0) {
643 /* cow flags: don't dump readonly sections in core */
644 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
645 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
646
647 rv = __elfN(map_insert)(imgp, map, object, file_addr,
648 map_addr, map_addr + map_len, prot, cow);
649 if (rv != KERN_SUCCESS)
650 return (EINVAL);
651
652 /* we can stop now if we've covered it all */
653 if (memsz == filsz)
654 return (0);
655 }
656
657 /*
658 * We have to get the remaining bit of the file into the first part
659 * of the oversized map segment. This is normally because the .data
660 * segment in the file is extended to provide bss. It's a neat idea
661 * to try and save a page, but it's a pain in the behind to implement.
662 */
663 copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset +
664 filsz);
665 map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
666 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
667
668 /* This had damn well better be true! */
669 if (map_len != 0) {
670 rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr,
671 map_addr + map_len, prot, 0);
672 if (rv != KERN_SUCCESS)
673 return (EINVAL);
674 }
675
676 if (copy_len != 0) {
677 sf = vm_imgact_map_page(object, offset + filsz);
678 if (sf == NULL)
679 return (EIO);
680
681 /* send the page fragment to user space */
682 error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr,
683 copy_len);
684 vm_imgact_unmap_page(sf);
685 if (error != 0)
686 return (error);
687 }
688
689 /*
690 * Remove write access to the page if it was only granted by map_insert
691 * to allow copyout.
692 */
693 if ((prot & VM_PROT_WRITE) == 0)
694 vm_map_protect(map, trunc_page(map_addr), round_page(map_addr +
695 map_len), prot, 0, VM_MAP_PROTECT_SET_PROT);
696
697 return (0);
698 }
699
700 static int
701 __elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr,
702 const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp)
703 {
704 vm_prot_t prot;
705 u_long base_addr;
706 bool first;
707 int error, i;
708
709 ASSERT_VOP_LOCKED(imgp->vp, __func__);
710
711 base_addr = 0;
712 first = true;
713
714 for (i = 0; i < hdr->e_phnum; i++) {
715 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
716 continue;
717
718 /* Loadable segment */
719 prot = __elfN(trans_prot)(phdr[i].p_flags);
720 error = __elfN(load_section)(imgp, phdr[i].p_offset,
721 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
722 phdr[i].p_memsz, phdr[i].p_filesz, prot);
723 if (error != 0)
724 return (error);
725
726 /*
727 * Establish the base address if this is the first segment.
728 */
729 if (first) {
730 base_addr = trunc_page(phdr[i].p_vaddr + rbase);
731 first = false;
732 }
733 }
734
735 if (base_addrp != NULL)
736 *base_addrp = base_addr;
737
738 return (0);
739 }
740
741 /*
742 * Load the file "file" into memory. It may be either a shared object
743 * or an executable.
744 *
745 * The "addr" reference parameter is in/out. On entry, it specifies
746 * the address where a shared object should be loaded. If the file is
747 * an executable, this value is ignored. On exit, "addr" specifies
748 * where the file was actually loaded.
749 *
750 * The "entry" reference parameter is out only. On exit, it specifies
751 * the entry point for the loaded file.
752 */
753 static int
754 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
755 u_long *entry)
756 {
757 struct {
758 struct nameidata nd;
759 struct vattr attr;
760 struct image_params image_params;
761 } *tempdata;
762 const Elf_Ehdr *hdr = NULL;
763 const Elf_Phdr *phdr = NULL;
764 struct nameidata *nd;
765 struct vattr *attr;
766 struct image_params *imgp;
767 u_long rbase;
768 u_long base_addr = 0;
769 int error;
770
771 #ifdef CAPABILITY_MODE
772 /*
773 * XXXJA: This check can go away once we are sufficiently confident
774 * that the checks in namei() are correct.
775 */
776 if (IN_CAPABILITY_MODE(curthread))
777 return (ECAPMODE);
778 #endif
779
780 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO);
781 nd = &tempdata->nd;
782 attr = &tempdata->attr;
783 imgp = &tempdata->image_params;
784
785 /*
786 * Initialize part of the common data
787 */
788 imgp->proc = p;
789 imgp->attr = attr;
790
791 NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF,
792 UIO_SYSSPACE, file, curthread);
793 if ((error = namei(nd)) != 0) {
794 nd->ni_vp = NULL;
795 goto fail;
796 }
797 NDFREE(nd, NDF_ONLY_PNBUF);
798 imgp->vp = nd->ni_vp;
799
800 /*
801 * Check permissions, modes, uid, etc on the file, and "open" it.
802 */
803 error = exec_check_permissions(imgp);
804 if (error)
805 goto fail;
806
807 error = exec_map_first_page(imgp);
808 if (error)
809 goto fail;
810
811 imgp->object = nd->ni_vp->v_object;
812
813 hdr = (const Elf_Ehdr *)imgp->image_header;
814 if ((error = __elfN(check_header)(hdr)) != 0)
815 goto fail;
816 if (hdr->e_type == ET_DYN)
817 rbase = *addr;
818 else if (hdr->e_type == ET_EXEC)
819 rbase = 0;
820 else {
821 error = ENOEXEC;
822 goto fail;
823 }
824
825 /* Only support headers that fit within first page for now */
826 if (!__elfN(phdr_in_zero_page)(hdr)) {
827 error = ENOEXEC;
828 goto fail;
829 }
830
831 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
832 if (!aligned(phdr, Elf_Addr)) {
833 error = ENOEXEC;
834 goto fail;
835 }
836
837 error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr);
838 if (error != 0)
839 goto fail;
840
841 *addr = base_addr;
842 *entry = (unsigned long)hdr->e_entry + rbase;
843
844 fail:
845 if (imgp->firstpage)
846 exec_unmap_first_page(imgp);
847
848 if (nd->ni_vp) {
849 if (imgp->textset)
850 VOP_UNSET_TEXT_CHECKED(nd->ni_vp);
851 vput(nd->ni_vp);
852 }
853 free(tempdata, M_TEMP);
854
855 return (error);
856 }
857
858 static u_long
859 __CONCAT(rnd_, __elfN(base))(vm_map_t map __unused, u_long minv, u_long maxv,
860 u_int align)
861 {
862 u_long rbase, res;
863
864 MPASS(vm_map_min(map) <= minv);
865 MPASS(maxv <= vm_map_max(map));
866 MPASS(minv < maxv);
867 MPASS(minv + align < maxv);
868 arc4rand(&rbase, sizeof(rbase), 0);
869 res = roundup(minv, (u_long)align) + rbase % (maxv - minv);
870 res &= ~((u_long)align - 1);
871 if (res >= maxv)
872 res -= align;
873 KASSERT(res >= minv,
874 ("res %#lx < minv %#lx, maxv %#lx rbase %#lx",
875 res, minv, maxv, rbase));
876 KASSERT(res < maxv,
877 ("res %#lx > maxv %#lx, minv %#lx rbase %#lx",
878 res, maxv, minv, rbase));
879 return (res);
880 }
881
882 static int
883 __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr,
884 const Elf_Phdr *phdr, u_long et_dyn_addr)
885 {
886 struct vmspace *vmspace;
887 const char *err_str;
888 u_long text_size, data_size, total_size, text_addr, data_addr;
889 u_long seg_size, seg_addr;
890 int i;
891
892 err_str = NULL;
893 text_size = data_size = total_size = text_addr = data_addr = 0;
894
895 for (i = 0; i < hdr->e_phnum; i++) {
896 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
897 continue;
898
899 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
900 seg_size = round_page(phdr[i].p_memsz +
901 phdr[i].p_vaddr + et_dyn_addr - seg_addr);
902
903 /*
904 * Make the largest executable segment the official
905 * text segment and all others data.
906 *
907 * Note that obreak() assumes that data_addr + data_size == end
908 * of data load area, and the ELF file format expects segments
909 * to be sorted by address. If multiple data segments exist,
910 * the last one will be used.
911 */
912
913 if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) {
914 text_size = seg_size;
915 text_addr = seg_addr;
916 } else {
917 data_size = seg_size;
918 data_addr = seg_addr;
919 }
920 total_size += seg_size;
921 }
922
923 if (data_addr == 0 && data_size == 0) {
924 data_addr = text_addr;
925 data_size = text_size;
926 }
927
928 /*
929 * Check limits. It should be safe to check the
930 * limits after loading the segments since we do
931 * not actually fault in all the segments pages.
932 */
933 PROC_LOCK(imgp->proc);
934 if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA))
935 err_str = "Data segment size exceeds process limit";
936 else if (text_size > maxtsiz)
937 err_str = "Text segment size exceeds system limit";
938 else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM))
939 err_str = "Total segment size exceeds process limit";
940 else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0)
941 err_str = "Data segment size exceeds resource limit";
942 else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0)
943 err_str = "Total segment size exceeds resource limit";
944 PROC_UNLOCK(imgp->proc);
945 if (err_str != NULL) {
946 uprintf("%s\n", err_str);
947 return (ENOMEM);
948 }
949
950 vmspace = imgp->proc->p_vmspace;
951 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
952 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
953 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
954 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
955
956 return (0);
957 }
958
959 static int
960 __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr,
961 char **interpp, bool *free_interpp)
962 {
963 struct thread *td;
964 char *interp;
965 int error, interp_name_len;
966
967 KASSERT(phdr->p_type == PT_INTERP,
968 ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type));
969 ASSERT_VOP_LOCKED(imgp->vp, __func__);
970
971 td = curthread;
972
973 /* Path to interpreter */
974 if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) {
975 uprintf("Invalid PT_INTERP\n");
976 return (ENOEXEC);
977 }
978
979 interp_name_len = phdr->p_filesz;
980 if (phdr->p_offset > PAGE_SIZE ||
981 interp_name_len > PAGE_SIZE - phdr->p_offset) {
982 /*
983 * The vnode lock might be needed by the pagedaemon to
984 * clean pages owned by the vnode. Do not allow sleep
985 * waiting for memory with the vnode locked, instead
986 * try non-sleepable allocation first, and if it
987 * fails, go to the slow path were we drop the lock
988 * and do M_WAITOK. A text reference prevents
989 * modifications to the vnode content.
990 */
991 interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT);
992 if (interp == NULL) {
993 VOP_UNLOCK(imgp->vp);
994 interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK);
995 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
996 }
997
998 error = vn_rdwr(UIO_READ, imgp->vp, interp,
999 interp_name_len, phdr->p_offset,
1000 UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred,
1001 NOCRED, NULL, td);
1002 if (error != 0) {
1003 free(interp, M_TEMP);
1004 uprintf("i/o error PT_INTERP %d\n", error);
1005 return (error);
1006 }
1007 interp[interp_name_len] = '\0';
1008
1009 *interpp = interp;
1010 *free_interpp = true;
1011 return (0);
1012 }
1013
1014 interp = __DECONST(char *, imgp->image_header) + phdr->p_offset;
1015 if (interp[interp_name_len - 1] != '\0') {
1016 uprintf("Invalid PT_INTERP\n");
1017 return (ENOEXEC);
1018 }
1019
1020 *interpp = interp;
1021 *free_interpp = false;
1022 return (0);
1023 }
1024
1025 static int
1026 __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info,
1027 const char *interp, u_long *addr, u_long *entry)
1028 {
1029 char *path;
1030 int error;
1031
1032 if (brand_info->emul_path != NULL &&
1033 brand_info->emul_path[0] != '\0') {
1034 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1035 snprintf(path, MAXPATHLEN, "%s%s",
1036 brand_info->emul_path, interp);
1037 error = __elfN(load_file)(imgp->proc, path, addr, entry);
1038 free(path, M_TEMP);
1039 if (error == 0)
1040 return (0);
1041 }
1042
1043 if (brand_info->interp_newpath != NULL &&
1044 (brand_info->interp_path == NULL ||
1045 strcmp(interp, brand_info->interp_path) == 0)) {
1046 error = __elfN(load_file)(imgp->proc,
1047 brand_info->interp_newpath, addr, entry);
1048 if (error == 0)
1049 return (0);
1050 }
1051
1052 error = __elfN(load_file)(imgp->proc, interp, addr, entry);
1053 if (error == 0)
1054 return (0);
1055
1056 uprintf("ELF interpreter %s not found, error %d\n", interp, error);
1057 return (error);
1058 }
1059
1060 /*
1061 * Impossible et_dyn_addr initial value indicating that the real base
1062 * must be calculated later with some randomization applied.
1063 */
1064 #define ET_DYN_ADDR_RAND 1
1065
1066 static int
1067 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
1068 {
1069 struct thread *td;
1070 const Elf_Ehdr *hdr;
1071 const Elf_Phdr *phdr;
1072 Elf_Auxargs *elf_auxargs;
1073 struct vmspace *vmspace;
1074 vm_map_t map;
1075 char *interp;
1076 Elf_Brandinfo *brand_info;
1077 struct sysentvec *sv;
1078 u_long addr, baddr, et_dyn_addr, entry, proghdr;
1079 u_long maxalign, mapsz, maxv, maxv1;
1080 uint32_t fctl0;
1081 int32_t osrel;
1082 bool free_interp;
1083 int error, i, n;
1084
1085 hdr = (const Elf_Ehdr *)imgp->image_header;
1086
1087 /*
1088 * Do we have a valid ELF header ?
1089 *
1090 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
1091 * if particular brand doesn't support it.
1092 */
1093 if (__elfN(check_header)(hdr) != 0 ||
1094 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
1095 return (-1);
1096
1097 /*
1098 * From here on down, we return an errno, not -1, as we've
1099 * detected an ELF file.
1100 */
1101
1102 if (!__elfN(phdr_in_zero_page)(hdr)) {
1103 uprintf("Program headers not in the first page\n");
1104 return (ENOEXEC);
1105 }
1106 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1107 if (!aligned(phdr, Elf_Addr)) {
1108 uprintf("Unaligned program headers\n");
1109 return (ENOEXEC);
1110 }
1111
1112 n = error = 0;
1113 baddr = 0;
1114 osrel = 0;
1115 fctl0 = 0;
1116 entry = proghdr = 0;
1117 interp = NULL;
1118 free_interp = false;
1119 td = curthread;
1120 maxalign = PAGE_SIZE;
1121 mapsz = 0;
1122
1123 for (i = 0; i < hdr->e_phnum; i++) {
1124 switch (phdr[i].p_type) {
1125 case PT_LOAD:
1126 if (n == 0)
1127 baddr = phdr[i].p_vaddr;
1128 if (phdr[i].p_align > maxalign)
1129 maxalign = phdr[i].p_align;
1130 mapsz += phdr[i].p_memsz;
1131 n++;
1132
1133 /*
1134 * If this segment contains the program headers,
1135 * remember their virtual address for the AT_PHDR
1136 * aux entry. Static binaries don't usually include
1137 * a PT_PHDR entry.
1138 */
1139 if (phdr[i].p_offset == 0 &&
1140 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
1141 <= phdr[i].p_filesz)
1142 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
1143 break;
1144 case PT_INTERP:
1145 /* Path to interpreter */
1146 if (interp != NULL) {
1147 uprintf("Multiple PT_INTERP headers\n");
1148 error = ENOEXEC;
1149 goto ret;
1150 }
1151 error = __elfN(get_interp)(imgp, &phdr[i], &interp,
1152 &free_interp);
1153 if (error != 0)
1154 goto ret;
1155 break;
1156 case PT_GNU_STACK:
1157 if (__elfN(nxstack))
1158 imgp->stack_prot =
1159 __elfN(trans_prot)(phdr[i].p_flags);
1160 imgp->stack_sz = phdr[i].p_memsz;
1161 break;
1162 case PT_PHDR: /* Program header table info */
1163 proghdr = phdr[i].p_vaddr;
1164 break;
1165 }
1166 }
1167
1168 brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0);
1169 if (brand_info == NULL) {
1170 uprintf("ELF binary type \"%u\" not known.\n",
1171 hdr->e_ident[EI_OSABI]);
1172 error = ENOEXEC;
1173 goto ret;
1174 }
1175 sv = brand_info->sysvec;
1176 et_dyn_addr = 0;
1177 if (hdr->e_type == ET_DYN) {
1178 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) {
1179 uprintf("Cannot execute shared object\n");
1180 error = ENOEXEC;
1181 goto ret;
1182 }
1183 /*
1184 * Honour the base load address from the dso if it is
1185 * non-zero for some reason.
1186 */
1187 if (baddr == 0) {
1188 if ((sv->sv_flags & SV_ASLR) == 0 ||
1189 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0)
1190 et_dyn_addr = __elfN(pie_base);
1191 else if ((__elfN(pie_aslr_enabled) &&
1192 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) ||
1193 (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0)
1194 et_dyn_addr = ET_DYN_ADDR_RAND;
1195 else
1196 et_dyn_addr = __elfN(pie_base);
1197 }
1198 }
1199
1200 /*
1201 * Avoid a possible deadlock if the current address space is destroyed
1202 * and that address space maps the locked vnode. In the common case,
1203 * the locked vnode's v_usecount is decremented but remains greater
1204 * than zero. Consequently, the vnode lock is not needed by vrele().
1205 * However, in cases where the vnode lock is external, such as nullfs,
1206 * v_usecount may become zero.
1207 *
1208 * The VV_TEXT flag prevents modifications to the executable while
1209 * the vnode is unlocked.
1210 */
1211 VOP_UNLOCK(imgp->vp);
1212
1213 /*
1214 * Decide whether to enable randomization of user mappings.
1215 * First, reset user preferences for the setid binaries.
1216 * Then, account for the support of the randomization by the
1217 * ABI, by user preferences, and make special treatment for
1218 * PIE binaries.
1219 */
1220 if (imgp->credential_setid) {
1221 PROC_LOCK(imgp->proc);
1222 imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE);
1223 PROC_UNLOCK(imgp->proc);
1224 }
1225 if ((sv->sv_flags & SV_ASLR) == 0 ||
1226 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 ||
1227 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) {
1228 KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND,
1229 ("et_dyn_addr == RAND and !ASLR"));
1230 } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 ||
1231 (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) ||
1232 et_dyn_addr == ET_DYN_ADDR_RAND) {
1233 imgp->map_flags |= MAP_ASLR;
1234 /*
1235 * If user does not care about sbrk, utilize the bss
1236 * grow region for mappings as well. We can select
1237 * the base for the image anywere and still not suffer
1238 * from the fragmentation.
1239 */
1240 if (!__elfN(aslr_honor_sbrk) ||
1241 (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0)
1242 imgp->map_flags |= MAP_ASLR_IGNSTART;
1243 }
1244
1245 if (!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0)
1246 imgp->map_flags |= MAP_WXORX;
1247
1248 error = exec_new_vmspace(imgp, sv);
1249 vmspace = imgp->proc->p_vmspace;
1250 map = &vmspace->vm_map;
1251
1252 imgp->proc->p_sysent = sv;
1253
1254 maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK);
1255 if (et_dyn_addr == ET_DYN_ADDR_RAND) {
1256 KASSERT((map->flags & MAP_ASLR) != 0,
1257 ("ET_DYN_ADDR_RAND but !MAP_ASLR"));
1258 et_dyn_addr = __CONCAT(rnd_, __elfN(base))(map,
1259 vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA),
1260 /* reserve half of the address space to interpreter */
1261 maxv / 2, 1UL << flsl(maxalign));
1262 }
1263
1264 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1265 if (error != 0)
1266 goto ret;
1267
1268 error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL);
1269 if (error != 0)
1270 goto ret;
1271
1272 error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr);
1273 if (error != 0)
1274 goto ret;
1275
1276 entry = (u_long)hdr->e_entry + et_dyn_addr;
1277
1278 /*
1279 * We load the dynamic linker where a userland call
1280 * to mmap(0, ...) would put it. The rationale behind this
1281 * calculation is that it leaves room for the heap to grow to
1282 * its maximum allowed size.
1283 */
1284 addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td,
1285 RLIMIT_DATA));
1286 if ((map->flags & MAP_ASLR) != 0) {
1287 maxv1 = maxv / 2 + addr / 2;
1288 MPASS(maxv1 >= addr); /* No overflow */
1289 map->anon_loc = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1,
1290 MAXPAGESIZES > 1 ? pagesizes[1] : pagesizes[0]);
1291 } else {
1292 map->anon_loc = addr;
1293 }
1294
1295 imgp->entry_addr = entry;
1296
1297 if (interp != NULL) {
1298 VOP_UNLOCK(imgp->vp);
1299 if ((map->flags & MAP_ASLR) != 0) {
1300 /* Assume that interpeter fits into 1/4 of AS */
1301 maxv1 = maxv / 2 + addr / 2;
1302 MPASS(maxv1 >= addr); /* No overflow */
1303 addr = __CONCAT(rnd_, __elfN(base))(map, addr,
1304 maxv1, PAGE_SIZE);
1305 }
1306 error = __elfN(load_interp)(imgp, brand_info, interp, &addr,
1307 &imgp->entry_addr);
1308 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1309 if (error != 0)
1310 goto ret;
1311 } else
1312 addr = et_dyn_addr;
1313
1314 /*
1315 * Construct auxargs table (used by the copyout_auxargs routine)
1316 */
1317 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT);
1318 if (elf_auxargs == NULL) {
1319 VOP_UNLOCK(imgp->vp);
1320 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
1321 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1322 }
1323 elf_auxargs->execfd = -1;
1324 elf_auxargs->phdr = proghdr + et_dyn_addr;
1325 elf_auxargs->phent = hdr->e_phentsize;
1326 elf_auxargs->phnum = hdr->e_phnum;
1327 elf_auxargs->pagesz = PAGE_SIZE;
1328 elf_auxargs->base = addr;
1329 elf_auxargs->flags = 0;
1330 elf_auxargs->entry = entry;
1331 elf_auxargs->hdr_eflags = hdr->e_flags;
1332
1333 imgp->auxargs = elf_auxargs;
1334 imgp->interpreted = 0;
1335 imgp->reloc_base = addr;
1336 imgp->proc->p_osrel = osrel;
1337 imgp->proc->p_fctl0 = fctl0;
1338 imgp->proc->p_elf_machine = hdr->e_machine;
1339 imgp->proc->p_elf_flags = hdr->e_flags;
1340
1341 ret:
1342 if (free_interp)
1343 free(interp, M_TEMP);
1344 return (error);
1345 }
1346
1347 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
1348
1349 int
1350 __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base)
1351 {
1352 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
1353 Elf_Auxinfo *argarray, *pos;
1354 int error;
1355
1356 argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP,
1357 M_WAITOK | M_ZERO);
1358
1359 if (args->execfd != -1)
1360 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
1361 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
1362 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
1363 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
1364 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
1365 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
1366 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
1367 AUXARGS_ENTRY(pos, AT_BASE, args->base);
1368 AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags);
1369 if (imgp->execpathp != 0)
1370 AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp);
1371 AUXARGS_ENTRY(pos, AT_OSRELDATE,
1372 imgp->proc->p_ucred->cr_prison->pr_osreldate);
1373 if (imgp->canary != 0) {
1374 AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary);
1375 AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen);
1376 }
1377 AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus);
1378 if (imgp->pagesizes != 0) {
1379 AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes);
1380 AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen);
1381 }
1382 if (imgp->sysent->sv_timekeep_base != 0) {
1383 AUXARGS_ENTRY(pos, AT_TIMEKEEP,
1384 imgp->sysent->sv_timekeep_base);
1385 }
1386 AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj
1387 != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
1388 imgp->sysent->sv_stackprot);
1389 if (imgp->sysent->sv_hwcap != NULL)
1390 AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap);
1391 if (imgp->sysent->sv_hwcap2 != NULL)
1392 AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2);
1393 AUXARGS_ENTRY(pos, AT_BSDFLAGS, __elfN(sigfastblock) ?
1394 ELF_BSDF_SIGFASTBLK : 0);
1395 AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc);
1396 AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv);
1397 AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc);
1398 AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv);
1399 AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings);
1400 if (imgp->sysent->sv_fxrng_gen_base != 0)
1401 AUXARGS_ENTRY(pos, AT_FXRNG, imgp->sysent->sv_fxrng_gen_base);
1402 AUXARGS_ENTRY(pos, AT_NULL, 0);
1403
1404 free(imgp->auxargs, M_TEMP);
1405 imgp->auxargs = NULL;
1406 KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs"));
1407
1408 error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT);
1409 free(argarray, M_TEMP);
1410 return (error);
1411 }
1412
1413 int
1414 __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp)
1415 {
1416 Elf_Addr *base;
1417
1418 base = (Elf_Addr *)*stack_base;
1419 base--;
1420 if (suword(base, imgp->args->argc) == -1)
1421 return (EFAULT);
1422 *stack_base = (uintptr_t)base;
1423 return (0);
1424 }
1425
1426 /*
1427 * Code for generating ELF core dumps.
1428 */
1429
1430 typedef void (*segment_callback)(vm_map_entry_t, void *);
1431
1432 /* Closure for cb_put_phdr(). */
1433 struct phdr_closure {
1434 Elf_Phdr *phdr; /* Program header to fill in */
1435 Elf_Off offset; /* Offset of segment in core file */
1436 };
1437
1438 /* Closure for cb_size_segment(). */
1439 struct sseg_closure {
1440 int count; /* Count of writable segments. */
1441 size_t size; /* Total size of all writable segments. */
1442 };
1443
1444 typedef void (*outfunc_t)(void *, struct sbuf *, size_t *);
1445
1446 struct note_info {
1447 int type; /* Note type. */
1448 outfunc_t outfunc; /* Output function. */
1449 void *outarg; /* Argument for the output function. */
1450 size_t outsize; /* Output size. */
1451 TAILQ_ENTRY(note_info) link; /* Link to the next note info. */
1452 };
1453
1454 TAILQ_HEAD(note_info_list, note_info);
1455
1456 /* Coredump output parameters. */
1457 struct coredump_params {
1458 off_t offset;
1459 struct ucred *active_cred;
1460 struct ucred *file_cred;
1461 struct thread *td;
1462 struct vnode *vp;
1463 struct compressor *comp;
1464 };
1465
1466 extern int compress_user_cores;
1467 extern int compress_user_cores_level;
1468
1469 static void cb_put_phdr(vm_map_entry_t, void *);
1470 static void cb_size_segment(vm_map_entry_t, void *);
1471 static int core_write(struct coredump_params *, const void *, size_t, off_t,
1472 enum uio_seg, size_t *);
1473 static void each_dumpable_segment(struct thread *, segment_callback, void *);
1474 static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t,
1475 struct note_info_list *, size_t);
1476 static void __elfN(prepare_notes)(struct thread *, struct note_info_list *,
1477 size_t *);
1478 static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t);
1479 static void __elfN(putnote)(struct note_info *, struct sbuf *);
1480 static size_t register_note(struct note_info_list *, int, outfunc_t, void *);
1481 static int sbuf_drain_core_output(void *, const char *, int);
1482
1483 static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *);
1484 static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *);
1485 static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *);
1486 static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *);
1487 static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *);
1488 static void __elfN(note_ptlwpinfo)(void *, struct sbuf *, size_t *);
1489 static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *);
1490 static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *);
1491 static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *);
1492 static void note_procstat_files(void *, struct sbuf *, size_t *);
1493 static void note_procstat_groups(void *, struct sbuf *, size_t *);
1494 static void note_procstat_osrel(void *, struct sbuf *, size_t *);
1495 static void note_procstat_rlimit(void *, struct sbuf *, size_t *);
1496 static void note_procstat_umask(void *, struct sbuf *, size_t *);
1497 static void note_procstat_vmmap(void *, struct sbuf *, size_t *);
1498
1499 /*
1500 * Write out a core segment to the compression stream.
1501 */
1502 static int
1503 compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len)
1504 {
1505 u_int chunk_len;
1506 int error;
1507
1508 while (len > 0) {
1509 chunk_len = MIN(len, CORE_BUF_SIZE);
1510
1511 /*
1512 * We can get EFAULT error here.
1513 * In that case zero out the current chunk of the segment.
1514 */
1515 error = copyin(base, buf, chunk_len);
1516 if (error != 0)
1517 bzero(buf, chunk_len);
1518 error = compressor_write(p->comp, buf, chunk_len);
1519 if (error != 0)
1520 break;
1521 base += chunk_len;
1522 len -= chunk_len;
1523 }
1524 return (error);
1525 }
1526
1527 static int
1528 core_compressed_write(void *base, size_t len, off_t offset, void *arg)
1529 {
1530
1531 return (core_write((struct coredump_params *)arg, base, len, offset,
1532 UIO_SYSSPACE, NULL));
1533 }
1534
1535 static int
1536 core_write(struct coredump_params *p, const void *base, size_t len,
1537 off_t offset, enum uio_seg seg, size_t *resid)
1538 {
1539
1540 return (vn_rdwr_inchunks(UIO_WRITE, p->vp, __DECONST(void *, base),
1541 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
1542 p->active_cred, p->file_cred, resid, p->td));
1543 }
1544
1545 static int
1546 core_output(char *base, size_t len, off_t offset, struct coredump_params *p,
1547 void *tmpbuf)
1548 {
1549 vm_map_t map;
1550 struct mount *mp;
1551 size_t resid, runlen;
1552 int error;
1553 bool success;
1554
1555 KASSERT((uintptr_t)base % PAGE_SIZE == 0,
1556 ("%s: user address %p is not page-aligned", __func__, base));
1557
1558 if (p->comp != NULL)
1559 return (compress_chunk(p, base, tmpbuf, len));
1560
1561 map = &p->td->td_proc->p_vmspace->vm_map;
1562 for (; len > 0; base += runlen, offset += runlen, len -= runlen) {
1563 /*
1564 * Attempt to page in all virtual pages in the range. If a
1565 * virtual page is not backed by the pager, it is represented as
1566 * a hole in the file. This can occur with zero-filled
1567 * anonymous memory or truncated files, for example.
1568 */
1569 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) {
1570 error = vm_fault(map, (uintptr_t)base + runlen,
1571 VM_PROT_READ, VM_FAULT_NOFILL, NULL);
1572 if (runlen == 0)
1573 success = error == KERN_SUCCESS;
1574 else if ((error == KERN_SUCCESS) != success)
1575 break;
1576 }
1577
1578 if (success) {
1579 error = core_write(p, base, runlen, offset,
1580 UIO_USERSPACE, &resid);
1581 if (error != 0) {
1582 if (error != EFAULT)
1583 break;
1584
1585 /*
1586 * EFAULT may be returned if the user mapping
1587 * could not be accessed, e.g., because a mapped
1588 * file has been truncated. Skip the page if no
1589 * progress was made, to protect against a
1590 * hypothetical scenario where vm_fault() was
1591 * successful but core_write() returns EFAULT
1592 * anyway.
1593 */
1594 runlen -= resid;
1595 if (runlen == 0) {
1596 success = false;
1597 runlen = PAGE_SIZE;
1598 }
1599 }
1600 }
1601 if (!success) {
1602 error = vn_start_write(p->vp, &mp, V_WAIT);
1603 if (error != 0)
1604 break;
1605 vn_lock(p->vp, LK_EXCLUSIVE | LK_RETRY);
1606 error = vn_truncate_locked(p->vp, offset + runlen,
1607 false, p->td->td_ucred);
1608 VOP_UNLOCK(p->vp);
1609 vn_finished_write(mp);
1610 if (error != 0)
1611 break;
1612 }
1613 }
1614 return (error);
1615 }
1616
1617 /*
1618 * Drain into a core file.
1619 */
1620 static int
1621 sbuf_drain_core_output(void *arg, const char *data, int len)
1622 {
1623 struct coredump_params *p;
1624 int error, locked;
1625
1626 p = (struct coredump_params *)arg;
1627
1628 /*
1629 * Some kern_proc out routines that print to this sbuf may
1630 * call us with the process lock held. Draining with the
1631 * non-sleepable lock held is unsafe. The lock is needed for
1632 * those routines when dumping a live process. In our case we
1633 * can safely release the lock before draining and acquire
1634 * again after.
1635 */
1636 locked = PROC_LOCKED(p->td->td_proc);
1637 if (locked)
1638 PROC_UNLOCK(p->td->td_proc);
1639 if (p->comp != NULL)
1640 error = compressor_write(p->comp, __DECONST(char *, data), len);
1641 else
1642 error = core_write(p, __DECONST(void *, data), len, p->offset,
1643 UIO_SYSSPACE, NULL);
1644 if (locked)
1645 PROC_LOCK(p->td->td_proc);
1646 if (error != 0)
1647 return (-error);
1648 p->offset += len;
1649 return (len);
1650 }
1651
1652 int
1653 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
1654 {
1655 struct ucred *cred = td->td_ucred;
1656 int error = 0;
1657 struct sseg_closure seginfo;
1658 struct note_info_list notelst;
1659 struct coredump_params params;
1660 struct note_info *ninfo;
1661 void *hdr, *tmpbuf;
1662 size_t hdrsize, notesz, coresize;
1663
1664 hdr = NULL;
1665 tmpbuf = NULL;
1666 TAILQ_INIT(¬elst);
1667
1668 /* Size the program segments. */
1669 seginfo.count = 0;
1670 seginfo.size = 0;
1671 each_dumpable_segment(td, cb_size_segment, &seginfo);
1672
1673 /*
1674 * Collect info about the core file header area.
1675 */
1676 hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count);
1677 if (seginfo.count + 1 >= PN_XNUM)
1678 hdrsize += sizeof(Elf_Shdr);
1679 __elfN(prepare_notes)(td, ¬elst, ¬esz);
1680 coresize = round_page(hdrsize + notesz) + seginfo.size;
1681
1682 /* Set up core dump parameters. */
1683 params.offset = 0;
1684 params.active_cred = cred;
1685 params.file_cred = NOCRED;
1686 params.td = td;
1687 params.vp = vp;
1688 params.comp = NULL;
1689
1690 #ifdef RACCT
1691 if (racct_enable) {
1692 PROC_LOCK(td->td_proc);
1693 error = racct_add(td->td_proc, RACCT_CORE, coresize);
1694 PROC_UNLOCK(td->td_proc);
1695 if (error != 0) {
1696 error = EFAULT;
1697 goto done;
1698 }
1699 }
1700 #endif
1701 if (coresize >= limit) {
1702 error = EFAULT;
1703 goto done;
1704 }
1705
1706 /* Create a compression stream if necessary. */
1707 if (compress_user_cores != 0) {
1708 params.comp = compressor_init(core_compressed_write,
1709 compress_user_cores, CORE_BUF_SIZE,
1710 compress_user_cores_level, ¶ms);
1711 if (params.comp == NULL) {
1712 error = EFAULT;
1713 goto done;
1714 }
1715 tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO);
1716 }
1717
1718 /*
1719 * Allocate memory for building the header, fill it up,
1720 * and write it out following the notes.
1721 */
1722 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1723 error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst,
1724 notesz);
1725
1726 /* Write the contents of all of the writable segments. */
1727 if (error == 0) {
1728 Elf_Phdr *php;
1729 off_t offset;
1730 int i;
1731
1732 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1733 offset = round_page(hdrsize + notesz);
1734 for (i = 0; i < seginfo.count; i++) {
1735 error = core_output((char *)(uintptr_t)php->p_vaddr,
1736 php->p_filesz, offset, ¶ms, tmpbuf);
1737 if (error != 0)
1738 break;
1739 offset += php->p_filesz;
1740 php++;
1741 }
1742 if (error == 0 && params.comp != NULL)
1743 error = compressor_flush(params.comp);
1744 }
1745 if (error) {
1746 log(LOG_WARNING,
1747 "Failed to write core file for process %s (error %d)\n",
1748 curproc->p_comm, error);
1749 }
1750
1751 done:
1752 free(tmpbuf, M_TEMP);
1753 if (params.comp != NULL)
1754 compressor_fini(params.comp);
1755 while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) {
1756 TAILQ_REMOVE(¬elst, ninfo, link);
1757 free(ninfo, M_TEMP);
1758 }
1759 if (hdr != NULL)
1760 free(hdr, M_TEMP);
1761
1762 return (error);
1763 }
1764
1765 /*
1766 * A callback for each_dumpable_segment() to write out the segment's
1767 * program header entry.
1768 */
1769 static void
1770 cb_put_phdr(vm_map_entry_t entry, void *closure)
1771 {
1772 struct phdr_closure *phc = (struct phdr_closure *)closure;
1773 Elf_Phdr *phdr = phc->phdr;
1774
1775 phc->offset = round_page(phc->offset);
1776
1777 phdr->p_type = PT_LOAD;
1778 phdr->p_offset = phc->offset;
1779 phdr->p_vaddr = entry->start;
1780 phdr->p_paddr = 0;
1781 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1782 phdr->p_align = PAGE_SIZE;
1783 phdr->p_flags = __elfN(untrans_prot)(entry->protection);
1784
1785 phc->offset += phdr->p_filesz;
1786 phc->phdr++;
1787 }
1788
1789 /*
1790 * A callback for each_dumpable_segment() to gather information about
1791 * the number of segments and their total size.
1792 */
1793 static void
1794 cb_size_segment(vm_map_entry_t entry, void *closure)
1795 {
1796 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1797
1798 ssc->count++;
1799 ssc->size += entry->end - entry->start;
1800 }
1801
1802 /*
1803 * For each writable segment in the process's memory map, call the given
1804 * function with a pointer to the map entry and some arbitrary
1805 * caller-supplied data.
1806 */
1807 static void
1808 each_dumpable_segment(struct thread *td, segment_callback func, void *closure)
1809 {
1810 struct proc *p = td->td_proc;
1811 vm_map_t map = &p->p_vmspace->vm_map;
1812 vm_map_entry_t entry;
1813 vm_object_t backing_object, object;
1814 bool ignore_entry;
1815
1816 vm_map_lock_read(map);
1817 VM_MAP_ENTRY_FOREACH(entry, map) {
1818 /*
1819 * Don't dump inaccessible mappings, deal with legacy
1820 * coredump mode.
1821 *
1822 * Note that read-only segments related to the elf binary
1823 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1824 * need to arbitrarily ignore such segments.
1825 */
1826 if (elf_legacy_coredump) {
1827 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1828 continue;
1829 } else {
1830 if ((entry->protection & VM_PROT_ALL) == 0)
1831 continue;
1832 }
1833
1834 /*
1835 * Dont include memory segment in the coredump if
1836 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1837 * madvise(2). Do not dump submaps (i.e. parts of the
1838 * kernel map).
1839 */
1840 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1841 continue;
1842
1843 if ((object = entry->object.vm_object) == NULL)
1844 continue;
1845
1846 /* Ignore memory-mapped devices and such things. */
1847 VM_OBJECT_RLOCK(object);
1848 while ((backing_object = object->backing_object) != NULL) {
1849 VM_OBJECT_RLOCK(backing_object);
1850 VM_OBJECT_RUNLOCK(object);
1851 object = backing_object;
1852 }
1853 ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0;
1854 VM_OBJECT_RUNLOCK(object);
1855 if (ignore_entry)
1856 continue;
1857
1858 (*func)(entry, closure);
1859 }
1860 vm_map_unlock_read(map);
1861 }
1862
1863 /*
1864 * Write the core file header to the file, including padding up to
1865 * the page boundary.
1866 */
1867 static int
1868 __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr,
1869 size_t hdrsize, struct note_info_list *notelst, size_t notesz)
1870 {
1871 struct note_info *ninfo;
1872 struct sbuf *sb;
1873 int error;
1874
1875 /* Fill in the header. */
1876 bzero(hdr, hdrsize);
1877 __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz);
1878
1879 sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN);
1880 sbuf_set_drain(sb, sbuf_drain_core_output, p);
1881 sbuf_start_section(sb, NULL);
1882 sbuf_bcat(sb, hdr, hdrsize);
1883 TAILQ_FOREACH(ninfo, notelst, link)
1884 __elfN(putnote)(ninfo, sb);
1885 /* Align up to a page boundary for the program segments. */
1886 sbuf_end_section(sb, -1, PAGE_SIZE, 0);
1887 error = sbuf_finish(sb);
1888 sbuf_delete(sb);
1889
1890 return (error);
1891 }
1892
1893 static void
1894 __elfN(prepare_notes)(struct thread *td, struct note_info_list *list,
1895 size_t *sizep)
1896 {
1897 struct proc *p;
1898 struct thread *thr;
1899 size_t size;
1900
1901 p = td->td_proc;
1902 size = 0;
1903
1904 size += register_note(list, NT_PRPSINFO, __elfN(note_prpsinfo), p);
1905
1906 /*
1907 * To have the debugger select the right thread (LWP) as the initial
1908 * thread, we dump the state of the thread passed to us in td first.
1909 * This is the thread that causes the core dump and thus likely to
1910 * be the right thread one wants to have selected in the debugger.
1911 */
1912 thr = td;
1913 while (thr != NULL) {
1914 size += register_note(list, NT_PRSTATUS,
1915 __elfN(note_prstatus), thr);
1916 size += register_note(list, NT_FPREGSET,
1917 __elfN(note_fpregset), thr);
1918 size += register_note(list, NT_THRMISC,
1919 __elfN(note_thrmisc), thr);
1920 size += register_note(list, NT_PTLWPINFO,
1921 __elfN(note_ptlwpinfo), thr);
1922 size += register_note(list, -1,
1923 __elfN(note_threadmd), thr);
1924
1925 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1926 TAILQ_NEXT(thr, td_plist);
1927 if (thr == td)
1928 thr = TAILQ_NEXT(thr, td_plist);
1929 }
1930
1931 size += register_note(list, NT_PROCSTAT_PROC,
1932 __elfN(note_procstat_proc), p);
1933 size += register_note(list, NT_PROCSTAT_FILES,
1934 note_procstat_files, p);
1935 size += register_note(list, NT_PROCSTAT_VMMAP,
1936 note_procstat_vmmap, p);
1937 size += register_note(list, NT_PROCSTAT_GROUPS,
1938 note_procstat_groups, p);
1939 size += register_note(list, NT_PROCSTAT_UMASK,
1940 note_procstat_umask, p);
1941 size += register_note(list, NT_PROCSTAT_RLIMIT,
1942 note_procstat_rlimit, p);
1943 size += register_note(list, NT_PROCSTAT_OSREL,
1944 note_procstat_osrel, p);
1945 size += register_note(list, NT_PROCSTAT_PSSTRINGS,
1946 __elfN(note_procstat_psstrings), p);
1947 size += register_note(list, NT_PROCSTAT_AUXV,
1948 __elfN(note_procstat_auxv), p);
1949
1950 *sizep = size;
1951 }
1952
1953 static void
1954 __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs,
1955 size_t notesz)
1956 {
1957 Elf_Ehdr *ehdr;
1958 Elf_Phdr *phdr;
1959 Elf_Shdr *shdr;
1960 struct phdr_closure phc;
1961
1962 ehdr = (Elf_Ehdr *)hdr;
1963
1964 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1965 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1966 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1967 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1968 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1969 ehdr->e_ident[EI_DATA] = ELF_DATA;
1970 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1971 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1972 ehdr->e_ident[EI_ABIVERSION] = 0;
1973 ehdr->e_ident[EI_PAD] = 0;
1974 ehdr->e_type = ET_CORE;
1975 ehdr->e_machine = td->td_proc->p_elf_machine;
1976 ehdr->e_version = EV_CURRENT;
1977 ehdr->e_entry = 0;
1978 ehdr->e_phoff = sizeof(Elf_Ehdr);
1979 ehdr->e_flags = td->td_proc->p_elf_flags;
1980 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1981 ehdr->e_phentsize = sizeof(Elf_Phdr);
1982 ehdr->e_shentsize = sizeof(Elf_Shdr);
1983 ehdr->e_shstrndx = SHN_UNDEF;
1984 if (numsegs + 1 < PN_XNUM) {
1985 ehdr->e_phnum = numsegs + 1;
1986 ehdr->e_shnum = 0;
1987 } else {
1988 ehdr->e_phnum = PN_XNUM;
1989 ehdr->e_shnum = 1;
1990
1991 ehdr->e_shoff = ehdr->e_phoff +
1992 (numsegs + 1) * ehdr->e_phentsize;
1993 KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr),
1994 ("e_shoff: %zu, hdrsize - shdr: %zu",
1995 (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr)));
1996
1997 shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff);
1998 memset(shdr, 0, sizeof(*shdr));
1999 /*
2000 * A special first section is used to hold large segment and
2001 * section counts. This was proposed by Sun Microsystems in
2002 * Solaris and has been adopted by Linux; the standard ELF
2003 * tools are already familiar with the technique.
2004 *
2005 * See table 7-7 of the Solaris "Linker and Libraries Guide"
2006 * (or 12-7 depending on the version of the document) for more
2007 * details.
2008 */
2009 shdr->sh_type = SHT_NULL;
2010 shdr->sh_size = ehdr->e_shnum;
2011 shdr->sh_link = ehdr->e_shstrndx;
2012 shdr->sh_info = numsegs + 1;
2013 }
2014
2015 /*
2016 * Fill in the program header entries.
2017 */
2018 phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff);
2019
2020 /* The note segement. */
2021 phdr->p_type = PT_NOTE;
2022 phdr->p_offset = hdrsize;
2023 phdr->p_vaddr = 0;
2024 phdr->p_paddr = 0;
2025 phdr->p_filesz = notesz;
2026 phdr->p_memsz = 0;
2027 phdr->p_flags = PF_R;
2028 phdr->p_align = ELF_NOTE_ROUNDSIZE;
2029 phdr++;
2030
2031 /* All the writable segments from the program. */
2032 phc.phdr = phdr;
2033 phc.offset = round_page(hdrsize + notesz);
2034 each_dumpable_segment(td, cb_put_phdr, &phc);
2035 }
2036
2037 static size_t
2038 register_note(struct note_info_list *list, int type, outfunc_t out, void *arg)
2039 {
2040 struct note_info *ninfo;
2041 size_t size, notesize;
2042
2043 size = 0;
2044 out(arg, NULL, &size);
2045 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK);
2046 ninfo->type = type;
2047 ninfo->outfunc = out;
2048 ninfo->outarg = arg;
2049 ninfo->outsize = size;
2050 TAILQ_INSERT_TAIL(list, ninfo, link);
2051
2052 if (type == -1)
2053 return (size);
2054
2055 notesize = sizeof(Elf_Note) + /* note header */
2056 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) +
2057 /* note name */
2058 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
2059
2060 return (notesize);
2061 }
2062
2063 static size_t
2064 append_note_data(const void *src, void *dst, size_t len)
2065 {
2066 size_t padded_len;
2067
2068 padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE);
2069 if (dst != NULL) {
2070 bcopy(src, dst, len);
2071 bzero((char *)dst + len, padded_len - len);
2072 }
2073 return (padded_len);
2074 }
2075
2076 size_t
2077 __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp)
2078 {
2079 Elf_Note *note;
2080 char *buf;
2081 size_t notesize;
2082
2083 buf = dst;
2084 if (buf != NULL) {
2085 note = (Elf_Note *)buf;
2086 note->n_namesz = sizeof(FREEBSD_ABI_VENDOR);
2087 note->n_descsz = size;
2088 note->n_type = type;
2089 buf += sizeof(*note);
2090 buf += append_note_data(FREEBSD_ABI_VENDOR, buf,
2091 sizeof(FREEBSD_ABI_VENDOR));
2092 append_note_data(src, buf, size);
2093 if (descp != NULL)
2094 *descp = buf;
2095 }
2096
2097 notesize = sizeof(Elf_Note) + /* note header */
2098 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) +
2099 /* note name */
2100 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
2101
2102 return (notesize);
2103 }
2104
2105 static void
2106 __elfN(putnote)(struct note_info *ninfo, struct sbuf *sb)
2107 {
2108 Elf_Note note;
2109 ssize_t old_len, sect_len;
2110 size_t new_len, descsz, i;
2111
2112 if (ninfo->type == -1) {
2113 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
2114 return;
2115 }
2116
2117 note.n_namesz = sizeof(FREEBSD_ABI_VENDOR);
2118 note.n_descsz = ninfo->outsize;
2119 note.n_type = ninfo->type;
2120
2121 sbuf_bcat(sb, ¬e, sizeof(note));
2122 sbuf_start_section(sb, &old_len);
2123 sbuf_bcat(sb, FREEBSD_ABI_VENDOR, sizeof(FREEBSD_ABI_VENDOR));
2124 sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
2125 if (note.n_descsz == 0)
2126 return;
2127 sbuf_start_section(sb, &old_len);
2128 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
2129 sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
2130 if (sect_len < 0)
2131 return;
2132
2133 new_len = (size_t)sect_len;
2134 descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE);
2135 if (new_len < descsz) {
2136 /*
2137 * It is expected that individual note emitters will correctly
2138 * predict their expected output size and fill up to that size
2139 * themselves, padding in a format-specific way if needed.
2140 * However, in case they don't, just do it here with zeros.
2141 */
2142 for (i = 0; i < descsz - new_len; i++)
2143 sbuf_putc(sb, 0);
2144 } else if (new_len > descsz) {
2145 /*
2146 * We can't always truncate sb -- we may have drained some
2147 * of it already.
2148 */
2149 KASSERT(new_len == descsz, ("%s: Note type %u changed as we "
2150 "read it (%zu > %zu). Since it is longer than "
2151 "expected, this coredump's notes are corrupt. THIS "
2152 "IS A BUG in the note_procstat routine for type %u.\n",
2153 __func__, (unsigned)note.n_type, new_len, descsz,
2154 (unsigned)note.n_type));
2155 }
2156 }
2157
2158 /*
2159 * Miscellaneous note out functions.
2160 */
2161
2162 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2163 #include <compat/freebsd32/freebsd32.h>
2164 #include <compat/freebsd32/freebsd32_signal.h>
2165
2166 typedef struct prstatus32 elf_prstatus_t;
2167 typedef struct prpsinfo32 elf_prpsinfo_t;
2168 typedef struct fpreg32 elf_prfpregset_t;
2169 typedef struct fpreg32 elf_fpregset_t;
2170 typedef struct reg32 elf_gregset_t;
2171 typedef struct thrmisc32 elf_thrmisc_t;
2172 #define ELF_KERN_PROC_MASK KERN_PROC_MASK32
2173 typedef struct kinfo_proc32 elf_kinfo_proc_t;
2174 typedef uint32_t elf_ps_strings_t;
2175 #else
2176 typedef prstatus_t elf_prstatus_t;
2177 typedef prpsinfo_t elf_prpsinfo_t;
2178 typedef prfpregset_t elf_prfpregset_t;
2179 typedef prfpregset_t elf_fpregset_t;
2180 typedef gregset_t elf_gregset_t;
2181 typedef thrmisc_t elf_thrmisc_t;
2182 #define ELF_KERN_PROC_MASK 0
2183 typedef struct kinfo_proc elf_kinfo_proc_t;
2184 typedef vm_offset_t elf_ps_strings_t;
2185 #endif
2186
2187 static void
2188 __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep)
2189 {
2190 struct sbuf sbarg;
2191 size_t len;
2192 char *cp, *end;
2193 struct proc *p;
2194 elf_prpsinfo_t *psinfo;
2195 int error;
2196
2197 p = (struct proc *)arg;
2198 if (sb != NULL) {
2199 KASSERT(*sizep == sizeof(*psinfo), ("invalid size"));
2200 psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK);
2201 psinfo->pr_version = PRPSINFO_VERSION;
2202 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
2203 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
2204 PROC_LOCK(p);
2205 if (p->p_args != NULL) {
2206 len = sizeof(psinfo->pr_psargs) - 1;
2207 if (len > p->p_args->ar_length)
2208 len = p->p_args->ar_length;
2209 memcpy(psinfo->pr_psargs, p->p_args->ar_args, len);
2210 PROC_UNLOCK(p);
2211 error = 0;
2212 } else {
2213 _PHOLD(p);
2214 PROC_UNLOCK(p);
2215 sbuf_new(&sbarg, psinfo->pr_psargs,
2216 sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN);
2217 error = proc_getargv(curthread, p, &sbarg);
2218 PRELE(p);
2219 if (sbuf_finish(&sbarg) == 0) {
2220 len = sbuf_len(&sbarg);
2221 if (len > 0)
2222 len--;
2223 } else {
2224 len = sizeof(psinfo->pr_psargs) - 1;
2225 }
2226 sbuf_delete(&sbarg);
2227 }
2228 if (error != 0 || len == 0 || (ssize_t)len == -1)
2229 strlcpy(psinfo->pr_psargs, p->p_comm,
2230 sizeof(psinfo->pr_psargs));
2231 else {
2232 KASSERT(len < sizeof(psinfo->pr_psargs),
2233 ("len is too long: %zu vs %zu", len,
2234 sizeof(psinfo->pr_psargs)));
2235 cp = psinfo->pr_psargs;
2236 end = cp + len - 1;
2237 for (;;) {
2238 cp = memchr(cp, '\0', end - cp);
2239 if (cp == NULL)
2240 break;
2241 *cp = ' ';
2242 }
2243 }
2244 psinfo->pr_pid = p->p_pid;
2245 sbuf_bcat(sb, psinfo, sizeof(*psinfo));
2246 free(psinfo, M_TEMP);
2247 }
2248 *sizep = sizeof(*psinfo);
2249 }
2250
2251 static void
2252 __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep)
2253 {
2254 struct thread *td;
2255 elf_prstatus_t *status;
2256
2257 td = (struct thread *)arg;
2258 if (sb != NULL) {
2259 KASSERT(*sizep == sizeof(*status), ("invalid size"));
2260 status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK);
2261 status->pr_version = PRSTATUS_VERSION;
2262 status->pr_statussz = sizeof(elf_prstatus_t);
2263 status->pr_gregsetsz = sizeof(elf_gregset_t);
2264 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
2265 status->pr_osreldate = osreldate;
2266 status->pr_cursig = td->td_proc->p_sig;
2267 status->pr_pid = td->td_tid;
2268 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2269 fill_regs32(td, &status->pr_reg);
2270 #else
2271 fill_regs(td, &status->pr_reg);
2272 #endif
2273 sbuf_bcat(sb, status, sizeof(*status));
2274 free(status, M_TEMP);
2275 }
2276 *sizep = sizeof(*status);
2277 }
2278
2279 static void
2280 __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep)
2281 {
2282 struct thread *td;
2283 elf_prfpregset_t *fpregset;
2284
2285 td = (struct thread *)arg;
2286 if (sb != NULL) {
2287 KASSERT(*sizep == sizeof(*fpregset), ("invalid size"));
2288 fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK);
2289 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2290 fill_fpregs32(td, fpregset);
2291 #else
2292 fill_fpregs(td, fpregset);
2293 #endif
2294 sbuf_bcat(sb, fpregset, sizeof(*fpregset));
2295 free(fpregset, M_TEMP);
2296 }
2297 *sizep = sizeof(*fpregset);
2298 }
2299
2300 static void
2301 __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep)
2302 {
2303 struct thread *td;
2304 elf_thrmisc_t thrmisc;
2305
2306 td = (struct thread *)arg;
2307 if (sb != NULL) {
2308 KASSERT(*sizep == sizeof(thrmisc), ("invalid size"));
2309 bzero(&thrmisc, sizeof(thrmisc));
2310 strcpy(thrmisc.pr_tname, td->td_name);
2311 sbuf_bcat(sb, &thrmisc, sizeof(thrmisc));
2312 }
2313 *sizep = sizeof(thrmisc);
2314 }
2315
2316 static void
2317 __elfN(note_ptlwpinfo)(void *arg, struct sbuf *sb, size_t *sizep)
2318 {
2319 struct thread *td;
2320 size_t size;
2321 int structsize;
2322 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2323 struct ptrace_lwpinfo32 pl;
2324 #else
2325 struct ptrace_lwpinfo pl;
2326 #endif
2327
2328 td = (struct thread *)arg;
2329 size = sizeof(structsize) + sizeof(pl);
2330 if (sb != NULL) {
2331 KASSERT(*sizep == size, ("invalid size"));
2332 structsize = sizeof(pl);
2333 sbuf_bcat(sb, &structsize, sizeof(structsize));
2334 bzero(&pl, sizeof(pl));
2335 pl.pl_lwpid = td->td_tid;
2336 pl.pl_event = PL_EVENT_NONE;
2337 pl.pl_sigmask = td->td_sigmask;
2338 pl.pl_siglist = td->td_siglist;
2339 if (td->td_si.si_signo != 0) {
2340 pl.pl_event = PL_EVENT_SIGNAL;
2341 pl.pl_flags |= PL_FLAG_SI;
2342 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2343 siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo);
2344 #else
2345 pl.pl_siginfo = td->td_si;
2346 #endif
2347 }
2348 strcpy(pl.pl_tdname, td->td_name);
2349 /* XXX TODO: supply more information in struct ptrace_lwpinfo*/
2350 sbuf_bcat(sb, &pl, sizeof(pl));
2351 }
2352 *sizep = size;
2353 }
2354
2355 /*
2356 * Allow for MD specific notes, as well as any MD
2357 * specific preparations for writing MI notes.
2358 */
2359 static void
2360 __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep)
2361 {
2362 struct thread *td;
2363 void *buf;
2364 size_t size;
2365
2366 td = (struct thread *)arg;
2367 size = *sizep;
2368 if (size != 0 && sb != NULL)
2369 buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK);
2370 else
2371 buf = NULL;
2372 size = 0;
2373 __elfN(dump_thread)(td, buf, &size);
2374 KASSERT(sb == NULL || *sizep == size, ("invalid size"));
2375 if (size != 0 && sb != NULL)
2376 sbuf_bcat(sb, buf, size);
2377 free(buf, M_TEMP);
2378 *sizep = size;
2379 }
2380
2381 #ifdef KINFO_PROC_SIZE
2382 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
2383 #endif
2384
2385 static void
2386 __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep)
2387 {
2388 struct proc *p;
2389 size_t size;
2390 int structsize;
2391
2392 p = (struct proc *)arg;
2393 size = sizeof(structsize) + p->p_numthreads *
2394 sizeof(elf_kinfo_proc_t);
2395
2396 if (sb != NULL) {
2397 KASSERT(*sizep == size, ("invalid size"));
2398 structsize = sizeof(elf_kinfo_proc_t);
2399 sbuf_bcat(sb, &structsize, sizeof(structsize));
2400 sx_slock(&proctree_lock);
2401 PROC_LOCK(p);
2402 kern_proc_out(p, sb, ELF_KERN_PROC_MASK);
2403 sx_sunlock(&proctree_lock);
2404 }
2405 *sizep = size;
2406 }
2407
2408 #ifdef KINFO_FILE_SIZE
2409 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
2410 #endif
2411
2412 static void
2413 note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep)
2414 {
2415 struct proc *p;
2416 size_t size, sect_sz, i;
2417 ssize_t start_len, sect_len;
2418 int structsize, filedesc_flags;
2419
2420 if (coredump_pack_fileinfo)
2421 filedesc_flags = KERN_FILEDESC_PACK_KINFO;
2422 else
2423 filedesc_flags = 0;
2424
2425 p = (struct proc *)arg;
2426 structsize = sizeof(struct kinfo_file);
2427 if (sb == NULL) {
2428 size = 0;
2429 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2430 sbuf_set_drain(sb, sbuf_count_drain, &size);
2431 sbuf_bcat(sb, &structsize, sizeof(structsize));
2432 PROC_LOCK(p);
2433 kern_proc_filedesc_out(p, sb, -1, filedesc_flags);
2434 sbuf_finish(sb);
2435 sbuf_delete(sb);
2436 *sizep = size;
2437 } else {
2438 sbuf_start_section(sb, &start_len);
2439
2440 sbuf_bcat(sb, &structsize, sizeof(structsize));
2441 PROC_LOCK(p);
2442 kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize),
2443 filedesc_flags);
2444
2445 sect_len = sbuf_end_section(sb, start_len, 0, 0);
2446 if (sect_len < 0)
2447 return;
2448 sect_sz = sect_len;
2449
2450 KASSERT(sect_sz <= *sizep,
2451 ("kern_proc_filedesc_out did not respect maxlen; "
2452 "requested %zu, got %zu", *sizep - sizeof(structsize),
2453 sect_sz - sizeof(structsize)));
2454
2455 for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++)
2456 sbuf_putc(sb, 0);
2457 }
2458 }
2459
2460 #ifdef KINFO_VMENTRY_SIZE
2461 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
2462 #endif
2463
2464 static void
2465 note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep)
2466 {
2467 struct proc *p;
2468 size_t size;
2469 int structsize, vmmap_flags;
2470
2471 if (coredump_pack_vmmapinfo)
2472 vmmap_flags = KERN_VMMAP_PACK_KINFO;
2473 else
2474 vmmap_flags = 0;
2475
2476 p = (struct proc *)arg;
2477 structsize = sizeof(struct kinfo_vmentry);
2478 if (sb == NULL) {
2479 size = 0;
2480 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2481 sbuf_set_drain(sb, sbuf_count_drain, &size);
2482 sbuf_bcat(sb, &structsize, sizeof(structsize));
2483 PROC_LOCK(p);
2484 kern_proc_vmmap_out(p, sb, -1, vmmap_flags);
2485 sbuf_finish(sb);
2486 sbuf_delete(sb);
2487 *sizep = size;
2488 } else {
2489 sbuf_bcat(sb, &structsize, sizeof(structsize));
2490 PROC_LOCK(p);
2491 kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize),
2492 vmmap_flags);
2493 }
2494 }
2495
2496 static void
2497 note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep)
2498 {
2499 struct proc *p;
2500 size_t size;
2501 int structsize;
2502
2503 p = (struct proc *)arg;
2504 size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t);
2505 if (sb != NULL) {
2506 KASSERT(*sizep == size, ("invalid size"));
2507 structsize = sizeof(gid_t);
2508 sbuf_bcat(sb, &structsize, sizeof(structsize));
2509 sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups *
2510 sizeof(gid_t));
2511 }
2512 *sizep = size;
2513 }
2514
2515 static void
2516 note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep)
2517 {
2518 struct proc *p;
2519 size_t size;
2520 int structsize;
2521
2522 p = (struct proc *)arg;
2523 size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask);
2524 if (sb != NULL) {
2525 KASSERT(*sizep == size, ("invalid size"));
2526 structsize = sizeof(p->p_pd->pd_cmask);
2527 sbuf_bcat(sb, &structsize, sizeof(structsize));
2528 sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask));
2529 }
2530 *sizep = size;
2531 }
2532
2533 static void
2534 note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep)
2535 {
2536 struct proc *p;
2537 struct rlimit rlim[RLIM_NLIMITS];
2538 size_t size;
2539 int structsize, i;
2540
2541 p = (struct proc *)arg;
2542 size = sizeof(structsize) + sizeof(rlim);
2543 if (sb != NULL) {
2544 KASSERT(*sizep == size, ("invalid size"));
2545 structsize = sizeof(rlim);
2546 sbuf_bcat(sb, &structsize, sizeof(structsize));
2547 PROC_LOCK(p);
2548 for (i = 0; i < RLIM_NLIMITS; i++)
2549 lim_rlimit_proc(p, i, &rlim[i]);
2550 PROC_UNLOCK(p);
2551 sbuf_bcat(sb, rlim, sizeof(rlim));
2552 }
2553 *sizep = size;
2554 }
2555
2556 static void
2557 note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep)
2558 {
2559 struct proc *p;
2560 size_t size;
2561 int structsize;
2562
2563 p = (struct proc *)arg;
2564 size = sizeof(structsize) + sizeof(p->p_osrel);
2565 if (sb != NULL) {
2566 KASSERT(*sizep == size, ("invalid size"));
2567 structsize = sizeof(p->p_osrel);
2568 sbuf_bcat(sb, &structsize, sizeof(structsize));
2569 sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel));
2570 }
2571 *sizep = size;
2572 }
2573
2574 static void
2575 __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep)
2576 {
2577 struct proc *p;
2578 elf_ps_strings_t ps_strings;
2579 size_t size;
2580 int structsize;
2581
2582 p = (struct proc *)arg;
2583 size = sizeof(structsize) + sizeof(ps_strings);
2584 if (sb != NULL) {
2585 KASSERT(*sizep == size, ("invalid size"));
2586 structsize = sizeof(ps_strings);
2587 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2588 ps_strings = PTROUT(p->p_sysent->sv_psstrings);
2589 #else
2590 ps_strings = p->p_sysent->sv_psstrings;
2591 #endif
2592 sbuf_bcat(sb, &structsize, sizeof(structsize));
2593 sbuf_bcat(sb, &ps_strings, sizeof(ps_strings));
2594 }
2595 *sizep = size;
2596 }
2597
2598 static void
2599 __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep)
2600 {
2601 struct proc *p;
2602 size_t size;
2603 int structsize;
2604
2605 p = (struct proc *)arg;
2606 if (sb == NULL) {
2607 size = 0;
2608 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2609 sbuf_set_drain(sb, sbuf_count_drain, &size);
2610 sbuf_bcat(sb, &structsize, sizeof(structsize));
2611 PHOLD(p);
2612 proc_getauxv(curthread, p, sb);
2613 PRELE(p);
2614 sbuf_finish(sb);
2615 sbuf_delete(sb);
2616 *sizep = size;
2617 } else {
2618 structsize = sizeof(Elf_Auxinfo);
2619 sbuf_bcat(sb, &structsize, sizeof(structsize));
2620 PHOLD(p);
2621 proc_getauxv(curthread, p, sb);
2622 PRELE(p);
2623 }
2624 }
2625
2626 static boolean_t
2627 __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote,
2628 const char *note_vendor, const Elf_Phdr *pnote,
2629 boolean_t (*cb)(const Elf_Note *, void *, boolean_t *), void *cb_arg)
2630 {
2631 const Elf_Note *note, *note0, *note_end;
2632 const char *note_name;
2633 char *buf;
2634 int i, error;
2635 boolean_t res;
2636
2637 /* We need some limit, might as well use PAGE_SIZE. */
2638 if (pnote == NULL || pnote->p_filesz > PAGE_SIZE)
2639 return (FALSE);
2640 ASSERT_VOP_LOCKED(imgp->vp, "parse_notes");
2641 if (pnote->p_offset > PAGE_SIZE ||
2642 pnote->p_filesz > PAGE_SIZE - pnote->p_offset) {
2643 buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT);
2644 if (buf == NULL) {
2645 VOP_UNLOCK(imgp->vp);
2646 buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK);
2647 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
2648 }
2649 error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz,
2650 pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED,
2651 curthread->td_ucred, NOCRED, NULL, curthread);
2652 if (error != 0) {
2653 uprintf("i/o error PT_NOTE\n");
2654 goto retf;
2655 }
2656 note = note0 = (const Elf_Note *)buf;
2657 note_end = (const Elf_Note *)(buf + pnote->p_filesz);
2658 } else {
2659 note = note0 = (const Elf_Note *)(imgp->image_header +
2660 pnote->p_offset);
2661 note_end = (const Elf_Note *)(imgp->image_header +
2662 pnote->p_offset + pnote->p_filesz);
2663 buf = NULL;
2664 }
2665 for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
2666 if (!aligned(note, Elf32_Addr) || (const char *)note_end -
2667 (const char *)note < sizeof(Elf_Note)) {
2668 goto retf;
2669 }
2670 if (note->n_namesz != checknote->n_namesz ||
2671 note->n_descsz != checknote->n_descsz ||
2672 note->n_type != checknote->n_type)
2673 goto nextnote;
2674 note_name = (const char *)(note + 1);
2675 if (note_name + checknote->n_namesz >=
2676 (const char *)note_end || strncmp(note_vendor,
2677 note_name, checknote->n_namesz) != 0)
2678 goto nextnote;
2679
2680 if (cb(note, cb_arg, &res))
2681 goto ret;
2682 nextnote:
2683 note = (const Elf_Note *)((const char *)(note + 1) +
2684 roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) +
2685 roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE));
2686 }
2687 retf:
2688 res = FALSE;
2689 ret:
2690 free(buf, M_TEMP);
2691 return (res);
2692 }
2693
2694 struct brandnote_cb_arg {
2695 Elf_Brandnote *brandnote;
2696 int32_t *osrel;
2697 };
2698
2699 static boolean_t
2700 brandnote_cb(const Elf_Note *note, void *arg0, boolean_t *res)
2701 {
2702 struct brandnote_cb_arg *arg;
2703
2704 arg = arg0;
2705
2706 /*
2707 * Fetch the osreldate for binary from the ELF OSABI-note if
2708 * necessary.
2709 */
2710 *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 &&
2711 arg->brandnote->trans_osrel != NULL ?
2712 arg->brandnote->trans_osrel(note, arg->osrel) : TRUE;
2713
2714 return (TRUE);
2715 }
2716
2717 static Elf_Note fctl_note = {
2718 .n_namesz = sizeof(FREEBSD_ABI_VENDOR),
2719 .n_descsz = sizeof(uint32_t),
2720 .n_type = NT_FREEBSD_FEATURE_CTL,
2721 };
2722
2723 struct fctl_cb_arg {
2724 boolean_t *has_fctl0;
2725 uint32_t *fctl0;
2726 };
2727
2728 static boolean_t
2729 note_fctl_cb(const Elf_Note *note, void *arg0, boolean_t *res)
2730 {
2731 struct fctl_cb_arg *arg;
2732 const Elf32_Word *desc;
2733 uintptr_t p;
2734
2735 arg = arg0;
2736 p = (uintptr_t)(note + 1);
2737 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
2738 desc = (const Elf32_Word *)p;
2739 *arg->has_fctl0 = TRUE;
2740 *arg->fctl0 = desc[0];
2741 return (TRUE);
2742 }
2743
2744 /*
2745 * Try to find the appropriate ABI-note section for checknote, fetch
2746 * the osreldate and feature control flags for binary from the ELF
2747 * OSABI-note. Only the first page of the image is searched, the same
2748 * as for headers.
2749 */
2750 static boolean_t
2751 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote,
2752 int32_t *osrel, boolean_t *has_fctl0, uint32_t *fctl0)
2753 {
2754 const Elf_Phdr *phdr;
2755 const Elf_Ehdr *hdr;
2756 struct brandnote_cb_arg b_arg;
2757 struct fctl_cb_arg f_arg;
2758 int i, j;
2759
2760 hdr = (const Elf_Ehdr *)imgp->image_header;
2761 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
2762 b_arg.brandnote = brandnote;
2763 b_arg.osrel = osrel;
2764 f_arg.has_fctl0 = has_fctl0;
2765 f_arg.fctl0 = fctl0;
2766
2767 for (i = 0; i < hdr->e_phnum; i++) {
2768 if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp,
2769 &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb,
2770 &b_arg)) {
2771 for (j = 0; j < hdr->e_phnum; j++) {
2772 if (phdr[j].p_type == PT_NOTE &&
2773 __elfN(parse_notes)(imgp, &fctl_note,
2774 FREEBSD_ABI_VENDOR, &phdr[j],
2775 note_fctl_cb, &f_arg))
2776 break;
2777 }
2778 return (TRUE);
2779 }
2780 }
2781 return (FALSE);
2782
2783 }
2784
2785 /*
2786 * Tell kern_execve.c about it, with a little help from the linker.
2787 */
2788 static struct execsw __elfN(execsw) = {
2789 .ex_imgact = __CONCAT(exec_, __elfN(imgact)),
2790 .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
2791 };
2792 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
2793
2794 static vm_prot_t
2795 __elfN(trans_prot)(Elf_Word flags)
2796 {
2797 vm_prot_t prot;
2798
2799 prot = 0;
2800 if (flags & PF_X)
2801 prot |= VM_PROT_EXECUTE;
2802 if (flags & PF_W)
2803 prot |= VM_PROT_WRITE;
2804 if (flags & PF_R)
2805 prot |= VM_PROT_READ;
2806 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
2807 if (i386_read_exec && (flags & PF_R))
2808 prot |= VM_PROT_EXECUTE;
2809 #endif
2810 return (prot);
2811 }
2812
2813 static Elf_Word
2814 __elfN(untrans_prot)(vm_prot_t prot)
2815 {
2816 Elf_Word flags;
2817
2818 flags = 0;
2819 if (prot & VM_PROT_EXECUTE)
2820 flags |= PF_X;
2821 if (prot & VM_PROT_READ)
2822 flags |= PF_R;
2823 if (prot & VM_PROT_WRITE)
2824 flags |= PF_W;
2825 return (flags);
2826 }
2827
2828 void
2829 __elfN(stackgap)(struct image_params *imgp, uintptr_t *stack_base)
2830 {
2831 uintptr_t range, rbase, gap;
2832 int pct;
2833
2834 pct = __elfN(aslr_stack_gap);
2835 if (pct == 0)
2836 return;
2837 if (pct > 50)
2838 pct = 50;
2839 range = imgp->eff_stack_sz * pct / 100;
2840 arc4rand(&rbase, sizeof(rbase), 0);
2841 gap = rbase % range;
2842 gap &= ~(sizeof(u_long) - 1);
2843 *stack_base -= gap;
2844 }
Cache object: bc0002c531976181ce9eccc7792d5d10
|