FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_exec.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1993, David Greenman
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_capsicum.h"
33 #include "opt_hwpmc_hooks.h"
34 #include "opt_ktrace.h"
35 #include "opt_vm.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/acct.h>
40 #include <sys/asan.h>
41 #include <sys/capsicum.h>
42 #include <sys/compressor.h>
43 #include <sys/eventhandler.h>
44 #include <sys/exec.h>
45 #include <sys/fcntl.h>
46 #include <sys/filedesc.h>
47 #include <sys/imgact.h>
48 #include <sys/imgact_elf.h>
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mman.h>
53 #include <sys/mount.h>
54 #include <sys/mutex.h>
55 #include <sys/namei.h>
56 #include <sys/priv.h>
57 #include <sys/proc.h>
58 #include <sys/ptrace.h>
59 #include <sys/reg.h>
60 #include <sys/resourcevar.h>
61 #include <sys/rwlock.h>
62 #include <sys/sched.h>
63 #include <sys/sdt.h>
64 #include <sys/sf_buf.h>
65 #include <sys/shm.h>
66 #include <sys/signalvar.h>
67 #include <sys/smp.h>
68 #include <sys/stat.h>
69 #include <sys/syscallsubr.h>
70 #include <sys/sysctl.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/timers.h>
74 #include <sys/umtxvar.h>
75 #include <sys/vnode.h>
76 #include <sys/wait.h>
77 #ifdef KTRACE
78 #include <sys/ktrace.h>
79 #endif
80
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_pager.h>
90
91 #ifdef HWPMC_HOOKS
92 #include <sys/pmckern.h>
93 #endif
94
95 #include <security/audit/audit.h>
96 #include <security/mac/mac_framework.h>
97
98 #ifdef KDTRACE_HOOKS
99 #include <sys/dtrace_bsd.h>
100 dtrace_execexit_func_t dtrace_fasttrap_exec;
101 #endif
102
103 SDT_PROVIDER_DECLARE(proc);
104 SDT_PROBE_DEFINE1(proc, , , exec, "char *");
105 SDT_PROBE_DEFINE1(proc, , , exec__failure, "int");
106 SDT_PROBE_DEFINE1(proc, , , exec__success, "char *");
107
108 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
109
110 int coredump_pack_fileinfo = 1;
111 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
112 &coredump_pack_fileinfo, 0,
113 "Enable file path packing in 'procstat -f' coredump notes");
114
115 int coredump_pack_vmmapinfo = 1;
116 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
117 &coredump_pack_vmmapinfo, 0,
118 "Enable file path packing in 'procstat -v' coredump notes");
119
120 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
121 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
122 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
123 static int do_execve(struct thread *td, struct image_args *args,
124 struct mac *mac_p, struct vmspace *oldvmspace);
125
126 /* XXX This should be vm_size_t. */
127 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD|
128 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU",
129 "Location of process' ps_strings structure");
130
131 /* XXX This should be vm_size_t. */
132 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
133 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU",
134 "Top of process stack");
135
136 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE,
137 NULL, 0, sysctl_kern_stackprot, "I",
138 "Stack memory permissions");
139
140 u_long ps_arg_cache_limit = PAGE_SIZE / 16;
141 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
142 &ps_arg_cache_limit, 0,
143 "Process' command line characters cache limit");
144
145 static int disallow_high_osrel;
146 SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
147 &disallow_high_osrel, 0,
148 "Disallow execution of binaries built for higher version of the world");
149
150 static int map_at_zero = 0;
151 SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
152 "Permit processes to map an object at virtual address 0.");
153
154 static int core_dump_can_intr = 1;
155 SYSCTL_INT(_kern, OID_AUTO, core_dump_can_intr, CTLFLAG_RWTUN,
156 &core_dump_can_intr, 0,
157 "Core dumping interruptible with SIGKILL");
158
159 static int
160 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
161 {
162 struct proc *p;
163 vm_offset_t ps_strings;
164
165 p = curproc;
166 #ifdef SCTL_MASK32
167 if (req->flags & SCTL_MASK32) {
168 unsigned int val;
169 val = (unsigned int)PROC_PS_STRINGS(p);
170 return (SYSCTL_OUT(req, &val, sizeof(val)));
171 }
172 #endif
173 ps_strings = PROC_PS_STRINGS(p);
174 return (SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)));
175 }
176
177 static int
178 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
179 {
180 struct proc *p;
181 vm_offset_t val;
182
183 p = curproc;
184 #ifdef SCTL_MASK32
185 if (req->flags & SCTL_MASK32) {
186 unsigned int val32;
187
188 val32 = round_page((unsigned int)p->p_vmspace->vm_stacktop);
189 return (SYSCTL_OUT(req, &val32, sizeof(val32)));
190 }
191 #endif
192 val = round_page(p->p_vmspace->vm_stacktop);
193 return (SYSCTL_OUT(req, &val, sizeof(val)));
194 }
195
196 static int
197 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
198 {
199 struct proc *p;
200
201 p = curproc;
202 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
203 sizeof(p->p_sysent->sv_stackprot)));
204 }
205
206 /*
207 * Each of the items is a pointer to a `const struct execsw', hence the
208 * double pointer here.
209 */
210 static const struct execsw **execsw;
211
212 #ifndef _SYS_SYSPROTO_H_
213 struct execve_args {
214 char *fname;
215 char **argv;
216 char **envv;
217 };
218 #endif
219
220 int
221 sys_execve(struct thread *td, struct execve_args *uap)
222 {
223 struct image_args args;
224 struct vmspace *oldvmspace;
225 int error;
226
227 error = pre_execve(td, &oldvmspace);
228 if (error != 0)
229 return (error);
230 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
231 uap->argv, uap->envv);
232 if (error == 0)
233 error = kern_execve(td, &args, NULL, oldvmspace);
234 post_execve(td, error, oldvmspace);
235 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
236 return (error);
237 }
238
239 #ifndef _SYS_SYSPROTO_H_
240 struct fexecve_args {
241 int fd;
242 char **argv;
243 char **envv;
244 };
245 #endif
246 int
247 sys_fexecve(struct thread *td, struct fexecve_args *uap)
248 {
249 struct image_args args;
250 struct vmspace *oldvmspace;
251 int error;
252
253 error = pre_execve(td, &oldvmspace);
254 if (error != 0)
255 return (error);
256 error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
257 uap->argv, uap->envv);
258 if (error == 0) {
259 args.fd = uap->fd;
260 error = kern_execve(td, &args, NULL, oldvmspace);
261 }
262 post_execve(td, error, oldvmspace);
263 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
264 return (error);
265 }
266
267 #ifndef _SYS_SYSPROTO_H_
268 struct __mac_execve_args {
269 char *fname;
270 char **argv;
271 char **envv;
272 struct mac *mac_p;
273 };
274 #endif
275
276 int
277 sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
278 {
279 #ifdef MAC
280 struct image_args args;
281 struct vmspace *oldvmspace;
282 int error;
283
284 error = pre_execve(td, &oldvmspace);
285 if (error != 0)
286 return (error);
287 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
288 uap->argv, uap->envv);
289 if (error == 0)
290 error = kern_execve(td, &args, uap->mac_p, oldvmspace);
291 post_execve(td, error, oldvmspace);
292 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
293 return (error);
294 #else
295 return (ENOSYS);
296 #endif
297 }
298
299 int
300 pre_execve(struct thread *td, struct vmspace **oldvmspace)
301 {
302 struct proc *p;
303 int error;
304
305 KASSERT(td == curthread, ("non-current thread %p", td));
306 error = 0;
307 p = td->td_proc;
308 if ((p->p_flag & P_HADTHREADS) != 0) {
309 PROC_LOCK(p);
310 if (thread_single(p, SINGLE_BOUNDARY) != 0)
311 error = ERESTART;
312 PROC_UNLOCK(p);
313 }
314 KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
315 ("nested execve"));
316 *oldvmspace = p->p_vmspace;
317 return (error);
318 }
319
320 void
321 post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
322 {
323 struct proc *p;
324
325 KASSERT(td == curthread, ("non-current thread %p", td));
326 p = td->td_proc;
327 if ((p->p_flag & P_HADTHREADS) != 0) {
328 PROC_LOCK(p);
329 /*
330 * If success, we upgrade to SINGLE_EXIT state to
331 * force other threads to suicide.
332 */
333 if (error == EJUSTRETURN)
334 thread_single(p, SINGLE_EXIT);
335 else
336 thread_single_end(p, SINGLE_BOUNDARY);
337 PROC_UNLOCK(p);
338 }
339 exec_cleanup(td, oldvmspace);
340 }
341
342 /*
343 * kern_execve() has the astonishing property of not always returning to
344 * the caller. If sufficiently bad things happen during the call to
345 * do_execve(), it can end up calling exit1(); as a result, callers must
346 * avoid doing anything which they might need to undo (e.g., allocating
347 * memory).
348 */
349 int
350 kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
351 struct vmspace *oldvmspace)
352 {
353
354 TSEXEC(td->td_proc->p_pid, args->begin_argv);
355 AUDIT_ARG_ARGV(args->begin_argv, args->argc,
356 exec_args_get_begin_envv(args) - args->begin_argv);
357 AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc,
358 args->endp - exec_args_get_begin_envv(args));
359
360 /* Must have at least one argument. */
361 if (args->argc == 0) {
362 exec_free_args(args);
363 return (EINVAL);
364 }
365 return (do_execve(td, args, mac_p, oldvmspace));
366 }
367
368 static void
369 execve_nosetid(struct image_params *imgp)
370 {
371 imgp->credential_setid = false;
372 if (imgp->newcred != NULL) {
373 crfree(imgp->newcred);
374 imgp->newcred = NULL;
375 }
376 }
377
378 /*
379 * In-kernel implementation of execve(). All arguments are assumed to be
380 * userspace pointers from the passed thread.
381 */
382 static int
383 do_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
384 struct vmspace *oldvmspace)
385 {
386 struct proc *p = td->td_proc;
387 struct nameidata nd;
388 struct ucred *oldcred;
389 struct uidinfo *euip = NULL;
390 uintptr_t stack_base;
391 struct image_params image_params, *imgp;
392 struct vattr attr;
393 int (*img_first)(struct image_params *);
394 struct pargs *oldargs = NULL, *newargs = NULL;
395 struct sigacts *oldsigacts = NULL, *newsigacts = NULL;
396 #ifdef KTRACE
397 struct ktr_io_params *kiop;
398 #endif
399 struct vnode *oldtextvp, *newtextvp;
400 struct vnode *oldtextdvp, *newtextdvp;
401 char *oldbinname, *newbinname;
402 bool credential_changing;
403 #ifdef MAC
404 struct label *interpvplabel = NULL;
405 bool will_transition;
406 #endif
407 #ifdef HWPMC_HOOKS
408 struct pmckern_procexec pe;
409 #endif
410 int error, i, orig_osrel;
411 uint32_t orig_fctl0;
412 Elf_Brandinfo *orig_brandinfo;
413 size_t freepath_size;
414 static const char fexecv_proc_title[] = "(fexecv)";
415
416 imgp = &image_params;
417 oldtextvp = oldtextdvp = NULL;
418 newtextvp = newtextdvp = NULL;
419 newbinname = oldbinname = NULL;
420 #ifdef KTRACE
421 kiop = NULL;
422 #endif
423
424 /*
425 * Lock the process and set the P_INEXEC flag to indicate that
426 * it should be left alone until we're done here. This is
427 * necessary to avoid race conditions - e.g. in ptrace() -
428 * that might allow a local user to illicitly obtain elevated
429 * privileges.
430 */
431 PROC_LOCK(p);
432 KASSERT((p->p_flag & P_INEXEC) == 0,
433 ("%s(): process already has P_INEXEC flag", __func__));
434 p->p_flag |= P_INEXEC;
435 PROC_UNLOCK(p);
436
437 /*
438 * Initialize part of the common data
439 */
440 bzero(imgp, sizeof(*imgp));
441 imgp->proc = p;
442 imgp->attr = &attr;
443 imgp->args = args;
444 oldcred = p->p_ucred;
445 orig_osrel = p->p_osrel;
446 orig_fctl0 = p->p_fctl0;
447 orig_brandinfo = p->p_elf_brandinfo;
448
449 #ifdef MAC
450 error = mac_execve_enter(imgp, mac_p);
451 if (error)
452 goto exec_fail;
453 #endif
454
455 SDT_PROBE1(proc, , , exec, args->fname);
456
457 interpret:
458 if (args->fname != NULL) {
459 #ifdef CAPABILITY_MODE
460 /*
461 * While capability mode can't reach this point via direct
462 * path arguments to execve(), we also don't allow
463 * interpreters to be used in capability mode (for now).
464 * Catch indirect lookups and return a permissions error.
465 */
466 if (IN_CAPABILITY_MODE(td)) {
467 error = ECAPMODE;
468 goto exec_fail;
469 }
470 #endif
471
472 /*
473 * Translate the file name. namei() returns a vnode
474 * pointer in ni_vp among other things.
475 */
476 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW |
477 AUDITVNODE1 | WANTPARENT, UIO_SYSSPACE,
478 args->fname);
479
480 error = namei(&nd);
481 if (error)
482 goto exec_fail;
483
484 newtextvp = nd.ni_vp;
485 newtextdvp = nd.ni_dvp;
486 nd.ni_dvp = NULL;
487 newbinname = malloc(nd.ni_cnd.cn_namelen + 1, M_PARGS,
488 M_WAITOK);
489 memcpy(newbinname, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen);
490 newbinname[nd.ni_cnd.cn_namelen] = '\0';
491 imgp->vp = newtextvp;
492
493 /*
494 * Do the best to calculate the full path to the image file.
495 */
496 if (args->fname[0] == '/') {
497 imgp->execpath = args->fname;
498 } else {
499 VOP_UNLOCK(imgp->vp);
500 freepath_size = MAXPATHLEN;
501 if (vn_fullpath_hardlink(newtextvp, newtextdvp,
502 newbinname, nd.ni_cnd.cn_namelen, &imgp->execpath,
503 &imgp->freepath, &freepath_size) != 0)
504 imgp->execpath = args->fname;
505 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
506 }
507 } else if (imgp->interpreter_vp) {
508 /*
509 * An image activator has already provided an open vnode
510 */
511 newtextvp = imgp->interpreter_vp;
512 imgp->interpreter_vp = NULL;
513 if (vn_fullpath(newtextvp, &imgp->execpath,
514 &imgp->freepath) != 0)
515 imgp->execpath = args->fname;
516 vn_lock(newtextvp, LK_SHARED | LK_RETRY);
517 AUDIT_ARG_VNODE1(newtextvp);
518 imgp->vp = newtextvp;
519 } else {
520 AUDIT_ARG_FD(args->fd);
521
522 /*
523 * If the descriptors was not opened with O_PATH, then
524 * we require that it was opened with O_EXEC or
525 * O_RDONLY. In either case, exec_check_permissions()
526 * below checks _current_ file access mode regardless
527 * of the permissions additionally checked at the
528 * open(2).
529 */
530 error = fgetvp_exec(td, args->fd, &cap_fexecve_rights,
531 &newtextvp);
532 if (error != 0)
533 goto exec_fail;
534
535 if (vn_fullpath(newtextvp, &imgp->execpath,
536 &imgp->freepath) != 0)
537 imgp->execpath = args->fname;
538 vn_lock(newtextvp, LK_SHARED | LK_RETRY);
539 AUDIT_ARG_VNODE1(newtextvp);
540 imgp->vp = newtextvp;
541 }
542
543 /*
544 * Check file permissions. Also 'opens' file and sets its vnode to
545 * text mode.
546 */
547 error = exec_check_permissions(imgp);
548 if (error)
549 goto exec_fail_dealloc;
550
551 imgp->object = imgp->vp->v_object;
552 if (imgp->object != NULL)
553 vm_object_reference(imgp->object);
554
555 error = exec_map_first_page(imgp);
556 if (error)
557 goto exec_fail_dealloc;
558
559 imgp->proc->p_osrel = 0;
560 imgp->proc->p_fctl0 = 0;
561 imgp->proc->p_elf_brandinfo = NULL;
562
563 /*
564 * Implement image setuid/setgid.
565 *
566 * Determine new credentials before attempting image activators
567 * so that it can be used by process_exec handlers to determine
568 * credential/setid changes.
569 *
570 * Don't honor setuid/setgid if the filesystem prohibits it or if
571 * the process is being traced.
572 *
573 * We disable setuid/setgid/etc in capability mode on the basis
574 * that most setugid applications are not written with that
575 * environment in mind, and will therefore almost certainly operate
576 * incorrectly. In principle there's no reason that setugid
577 * applications might not be useful in capability mode, so we may want
578 * to reconsider this conservative design choice in the future.
579 *
580 * XXXMAC: For the time being, use NOSUID to also prohibit
581 * transitions on the file system.
582 */
583 credential_changing = false;
584 credential_changing |= (attr.va_mode & S_ISUID) &&
585 oldcred->cr_uid != attr.va_uid;
586 credential_changing |= (attr.va_mode & S_ISGID) &&
587 oldcred->cr_gid != attr.va_gid;
588 #ifdef MAC
589 will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
590 interpvplabel, imgp) != 0;
591 credential_changing |= will_transition;
592 #endif
593
594 /* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */
595 if (credential_changing)
596 imgp->proc->p_pdeathsig = 0;
597
598 if (credential_changing &&
599 #ifdef CAPABILITY_MODE
600 ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
601 #endif
602 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
603 (p->p_flag & P_TRACED) == 0) {
604 imgp->credential_setid = true;
605 VOP_UNLOCK(imgp->vp);
606 imgp->newcred = crdup(oldcred);
607 if (attr.va_mode & S_ISUID) {
608 euip = uifind(attr.va_uid);
609 change_euid(imgp->newcred, euip);
610 }
611 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
612 if (attr.va_mode & S_ISGID)
613 change_egid(imgp->newcred, attr.va_gid);
614 /*
615 * Implement correct POSIX saved-id behavior.
616 *
617 * XXXMAC: Note that the current logic will save the
618 * uid and gid if a MAC domain transition occurs, even
619 * though maybe it shouldn't.
620 */
621 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
622 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
623 } else {
624 /*
625 * Implement correct POSIX saved-id behavior.
626 *
627 * XXX: It's not clear that the existing behavior is
628 * POSIX-compliant. A number of sources indicate that the
629 * saved uid/gid should only be updated if the new ruid is
630 * not equal to the old ruid, or the new euid is not equal
631 * to the old euid and the new euid is not equal to the old
632 * ruid. The FreeBSD code always updates the saved uid/gid.
633 * Also, this code uses the new (replaced) euid and egid as
634 * the source, which may or may not be the right ones to use.
635 */
636 if (oldcred->cr_svuid != oldcred->cr_uid ||
637 oldcred->cr_svgid != oldcred->cr_gid) {
638 VOP_UNLOCK(imgp->vp);
639 imgp->newcred = crdup(oldcred);
640 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
641 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
642 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
643 }
644 }
645 /* The new credentials are installed into the process later. */
646
647 /*
648 * If the current process has a special image activator it
649 * wants to try first, call it. For example, emulating shell
650 * scripts differently.
651 */
652 error = -1;
653 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
654 error = img_first(imgp);
655
656 /*
657 * Loop through the list of image activators, calling each one.
658 * An activator returns -1 if there is no match, 0 on success,
659 * and an error otherwise.
660 */
661 for (i = 0; error == -1 && execsw[i]; ++i) {
662 if (execsw[i]->ex_imgact == NULL ||
663 execsw[i]->ex_imgact == img_first) {
664 continue;
665 }
666 error = (*execsw[i]->ex_imgact)(imgp);
667 }
668
669 if (error) {
670 if (error == -1)
671 error = ENOEXEC;
672 goto exec_fail_dealloc;
673 }
674
675 /*
676 * Special interpreter operation, cleanup and loop up to try to
677 * activate the interpreter.
678 */
679 if (imgp->interpreted) {
680 exec_unmap_first_page(imgp);
681 /*
682 * The text reference needs to be removed for scripts.
683 * There is a short period before we determine that
684 * something is a script where text reference is active.
685 * The vnode lock is held over this entire period
686 * so nothing should illegitimately be blocked.
687 */
688 MPASS(imgp->textset);
689 VOP_UNSET_TEXT_CHECKED(newtextvp);
690 imgp->textset = false;
691 /* free name buffer and old vnode */
692 #ifdef MAC
693 mac_execve_interpreter_enter(newtextvp, &interpvplabel);
694 #endif
695 if (imgp->opened) {
696 VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
697 imgp->opened = false;
698 }
699 vput(newtextvp);
700 imgp->vp = newtextvp = NULL;
701 if (args->fname != NULL) {
702 if (newtextdvp != NULL) {
703 vrele(newtextdvp);
704 newtextdvp = NULL;
705 }
706 NDFREE_PNBUF(&nd);
707 free(newbinname, M_PARGS);
708 newbinname = NULL;
709 }
710 vm_object_deallocate(imgp->object);
711 imgp->object = NULL;
712 execve_nosetid(imgp);
713 imgp->execpath = NULL;
714 free(imgp->freepath, M_TEMP);
715 imgp->freepath = NULL;
716 /* set new name to that of the interpreter */
717 if (imgp->interpreter_vp) {
718 args->fname = NULL;
719 } else {
720 args->fname = imgp->interpreter_name;
721 }
722 goto interpret;
723 }
724
725 /*
726 * NB: We unlock the vnode here because it is believed that none
727 * of the sv_copyout_strings/sv_fixup operations require the vnode.
728 */
729 VOP_UNLOCK(imgp->vp);
730
731 if (disallow_high_osrel &&
732 P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
733 error = ENOEXEC;
734 uprintf("Osrel %d for image %s too high\n", p->p_osrel,
735 imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
736 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
737 goto exec_fail_dealloc;
738 }
739
740 /*
741 * Copy out strings (args and env) and initialize stack base.
742 */
743 error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base);
744 if (error != 0) {
745 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
746 goto exec_fail_dealloc;
747 }
748
749 /*
750 * Stack setup.
751 */
752 error = (*p->p_sysent->sv_fixup)(&stack_base, imgp);
753 if (error != 0) {
754 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
755 goto exec_fail_dealloc;
756 }
757
758 /*
759 * For security and other reasons, the file descriptor table cannot be
760 * shared after an exec.
761 */
762 fdunshare(td);
763 pdunshare(td);
764 /* close files on exec */
765 fdcloseexec(td);
766
767 /*
768 * Malloc things before we need locks.
769 */
770 i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv;
771 /* Cache arguments if they fit inside our allowance */
772 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
773 newargs = pargs_alloc(i);
774 bcopy(imgp->args->begin_argv, newargs->ar_args, i);
775 }
776
777 /*
778 * For security and other reasons, signal handlers cannot
779 * be shared after an exec. The new process gets a copy of the old
780 * handlers. In execsigs(), the new process will have its signals
781 * reset.
782 */
783 if (sigacts_shared(p->p_sigacts)) {
784 oldsigacts = p->p_sigacts;
785 newsigacts = sigacts_alloc();
786 sigacts_copy(newsigacts, oldsigacts);
787 }
788
789 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
790
791 PROC_LOCK(p);
792 if (oldsigacts)
793 p->p_sigacts = newsigacts;
794 /* Stop profiling */
795 stopprofclock(p);
796
797 /* reset caught signals */
798 execsigs(p);
799
800 /* name this process - nameiexec(p, ndp) */
801 bzero(p->p_comm, sizeof(p->p_comm));
802 if (args->fname)
803 bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
804 min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
805 else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
806 bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
807 bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
808 #ifdef KTR
809 sched_clear_tdname(td);
810 #endif
811
812 /*
813 * mark as execed, wakeup the process that vforked (if any) and tell
814 * it that it now has its own resources back
815 */
816 p->p_flag |= P_EXEC;
817 if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
818 p->p_flag2 &= ~P2_NOTRACE;
819 if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0)
820 p->p_flag2 &= ~P2_STKGAP_DISABLE;
821 if (p->p_flag & P_PPWAIT) {
822 p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
823 cv_broadcast(&p->p_pwait);
824 /* STOPs are no longer ignored, arrange for AST */
825 signotify(td);
826 }
827
828 if ((imgp->sysent->sv_setid_allowed != NULL &&
829 !(*imgp->sysent->sv_setid_allowed)(td, imgp)) ||
830 (p->p_flag2 & P2_NO_NEW_PRIVS) != 0)
831 execve_nosetid(imgp);
832
833 /*
834 * Implement image setuid/setgid installation.
835 */
836 if (imgp->credential_setid) {
837 /*
838 * Turn off syscall tracing for set-id programs, except for
839 * root. Record any set-id flags first to make sure that
840 * we do not regain any tracing during a possible block.
841 */
842 setsugid(p);
843 #ifdef KTRACE
844 kiop = ktrprocexec(p);
845 #endif
846 /*
847 * Close any file descriptors 0..2 that reference procfs,
848 * then make sure file descriptors 0..2 are in use.
849 *
850 * Both fdsetugidsafety() and fdcheckstd() may call functions
851 * taking sleepable locks, so temporarily drop our locks.
852 */
853 PROC_UNLOCK(p);
854 VOP_UNLOCK(imgp->vp);
855 fdsetugidsafety(td);
856 error = fdcheckstd(td);
857 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
858 if (error != 0)
859 goto exec_fail_dealloc;
860 PROC_LOCK(p);
861 #ifdef MAC
862 if (will_transition) {
863 mac_vnode_execve_transition(oldcred, imgp->newcred,
864 imgp->vp, interpvplabel, imgp);
865 }
866 #endif
867 } else {
868 if (oldcred->cr_uid == oldcred->cr_ruid &&
869 oldcred->cr_gid == oldcred->cr_rgid)
870 p->p_flag &= ~P_SUGID;
871 }
872 /*
873 * Set the new credentials.
874 */
875 if (imgp->newcred != NULL) {
876 proc_set_cred(p, imgp->newcred);
877 crfree(oldcred);
878 oldcred = NULL;
879 }
880
881 /*
882 * Store the vp for use in kern.proc.pathname. This vnode was
883 * referenced by namei() or by fexecve variant of fname handling.
884 */
885 oldtextvp = p->p_textvp;
886 p->p_textvp = newtextvp;
887 oldtextdvp = p->p_textdvp;
888 p->p_textdvp = newtextdvp;
889 newtextdvp = NULL;
890 oldbinname = p->p_binname;
891 p->p_binname = newbinname;
892 newbinname = NULL;
893
894 #ifdef KDTRACE_HOOKS
895 /*
896 * Tell the DTrace fasttrap provider about the exec if it
897 * has declared an interest.
898 */
899 if (dtrace_fasttrap_exec)
900 dtrace_fasttrap_exec(p);
901 #endif
902
903 /*
904 * Notify others that we exec'd, and clear the P_INEXEC flag
905 * as we're now a bona fide freshly-execed process.
906 */
907 KNOTE_LOCKED(p->p_klist, NOTE_EXEC);
908 p->p_flag &= ~P_INEXEC;
909
910 /* clear "fork but no exec" flag, as we _are_ execing */
911 p->p_acflag &= ~AFORK;
912
913 /*
914 * Free any previous argument cache and replace it with
915 * the new argument cache, if any.
916 */
917 oldargs = p->p_args;
918 p->p_args = newargs;
919 newargs = NULL;
920
921 PROC_UNLOCK(p);
922
923 #ifdef HWPMC_HOOKS
924 /*
925 * Check if system-wide sampling is in effect or if the
926 * current process is using PMCs. If so, do exec() time
927 * processing. This processing needs to happen AFTER the
928 * P_INEXEC flag is cleared.
929 */
930 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
931 VOP_UNLOCK(imgp->vp);
932 pe.pm_credentialschanged = credential_changing;
933 pe.pm_entryaddr = imgp->entry_addr;
934
935 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
936 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
937 }
938 #endif
939
940 /* Set values passed into the program in registers. */
941 (*p->p_sysent->sv_setregs)(td, imgp, stack_base);
942
943 VOP_MMAPPED(imgp->vp);
944
945 SDT_PROBE1(proc, , , exec__success, args->fname);
946
947 exec_fail_dealloc:
948 if (error != 0) {
949 p->p_osrel = orig_osrel;
950 p->p_fctl0 = orig_fctl0;
951 p->p_elf_brandinfo = orig_brandinfo;
952 }
953
954 if (imgp->firstpage != NULL)
955 exec_unmap_first_page(imgp);
956
957 if (imgp->vp != NULL) {
958 if (imgp->opened)
959 VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
960 if (imgp->textset)
961 VOP_UNSET_TEXT_CHECKED(imgp->vp);
962 if (error != 0)
963 vput(imgp->vp);
964 else
965 VOP_UNLOCK(imgp->vp);
966 if (args->fname != NULL)
967 NDFREE_PNBUF(&nd);
968 if (newtextdvp != NULL)
969 vrele(newtextdvp);
970 free(newbinname, M_PARGS);
971 }
972
973 if (imgp->object != NULL)
974 vm_object_deallocate(imgp->object);
975
976 free(imgp->freepath, M_TEMP);
977
978 if (error == 0) {
979 if (p->p_ptevents & PTRACE_EXEC) {
980 PROC_LOCK(p);
981 if (p->p_ptevents & PTRACE_EXEC)
982 td->td_dbgflags |= TDB_EXEC;
983 PROC_UNLOCK(p);
984 }
985 } else {
986 exec_fail:
987 /* we're done here, clear P_INEXEC */
988 PROC_LOCK(p);
989 p->p_flag &= ~P_INEXEC;
990 PROC_UNLOCK(p);
991
992 SDT_PROBE1(proc, , , exec__failure, error);
993 }
994
995 if (imgp->newcred != NULL && oldcred != NULL)
996 crfree(imgp->newcred);
997
998 #ifdef MAC
999 mac_execve_exit(imgp);
1000 mac_execve_interpreter_exit(interpvplabel);
1001 #endif
1002 exec_free_args(args);
1003
1004 /*
1005 * Handle deferred decrement of ref counts.
1006 */
1007 if (oldtextvp != NULL)
1008 vrele(oldtextvp);
1009 if (oldtextdvp != NULL)
1010 vrele(oldtextdvp);
1011 free(oldbinname, M_PARGS);
1012 #ifdef KTRACE
1013 ktr_io_params_free(kiop);
1014 #endif
1015 pargs_drop(oldargs);
1016 pargs_drop(newargs);
1017 if (oldsigacts != NULL)
1018 sigacts_free(oldsigacts);
1019 if (euip != NULL)
1020 uifree(euip);
1021
1022 if (error && imgp->vmspace_destroyed) {
1023 /* sorry, no more process anymore. exit gracefully */
1024 exec_cleanup(td, oldvmspace);
1025 exit1(td, 0, SIGABRT);
1026 /* NOT REACHED */
1027 }
1028
1029 #ifdef KTRACE
1030 if (error == 0)
1031 ktrprocctor(p);
1032 #endif
1033
1034 /*
1035 * We don't want cpu_set_syscall_retval() to overwrite any of
1036 * the register values put in place by exec_setregs().
1037 * Implementations of cpu_set_syscall_retval() will leave
1038 * registers unmodified when returning EJUSTRETURN.
1039 */
1040 return (error == 0 ? EJUSTRETURN : error);
1041 }
1042
1043 void
1044 exec_cleanup(struct thread *td, struct vmspace *oldvmspace)
1045 {
1046 if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
1047 KASSERT(td->td_proc->p_vmspace != oldvmspace,
1048 ("oldvmspace still used"));
1049 vmspace_free(oldvmspace);
1050 td->td_pflags &= ~TDP_EXECVMSPC;
1051 }
1052 }
1053
1054 int
1055 exec_map_first_page(struct image_params *imgp)
1056 {
1057 vm_object_t object;
1058 vm_page_t m;
1059 int error;
1060
1061 if (imgp->firstpage != NULL)
1062 exec_unmap_first_page(imgp);
1063
1064 object = imgp->vp->v_object;
1065 if (object == NULL)
1066 return (EACCES);
1067 #if VM_NRESERVLEVEL > 0
1068 if ((object->flags & OBJ_COLORED) == 0) {
1069 VM_OBJECT_WLOCK(object);
1070 vm_object_color(object, 0);
1071 VM_OBJECT_WUNLOCK(object);
1072 }
1073 #endif
1074 error = vm_page_grab_valid_unlocked(&m, object, 0,
1075 VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) |
1076 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
1077
1078 if (error != VM_PAGER_OK)
1079 return (EIO);
1080 imgp->firstpage = sf_buf_alloc(m, 0);
1081 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
1082
1083 return (0);
1084 }
1085
1086 void
1087 exec_unmap_first_page(struct image_params *imgp)
1088 {
1089 vm_page_t m;
1090
1091 if (imgp->firstpage != NULL) {
1092 m = sf_buf_page(imgp->firstpage);
1093 sf_buf_free(imgp->firstpage);
1094 imgp->firstpage = NULL;
1095 vm_page_unwire(m, PQ_ACTIVE);
1096 }
1097 }
1098
1099 void
1100 exec_onexec_old(struct thread *td)
1101 {
1102 sigfastblock_clear(td);
1103 umtx_exec(td->td_proc);
1104 }
1105
1106 /*
1107 * This is an optimization which removes the unmanaged shared page
1108 * mapping. In combination with pmap_remove_pages(), which cleans all
1109 * managed mappings in the process' vmspace pmap, no work will be left
1110 * for pmap_remove(min, max).
1111 */
1112 void
1113 exec_free_abi_mappings(struct proc *p)
1114 {
1115 struct vmspace *vmspace;
1116
1117 vmspace = p->p_vmspace;
1118 if (refcount_load(&vmspace->vm_refcnt) != 1)
1119 return;
1120
1121 if (!PROC_HAS_SHP(p))
1122 return;
1123
1124 pmap_remove(vmspace_pmap(vmspace), vmspace->vm_shp_base,
1125 vmspace->vm_shp_base + p->p_sysent->sv_shared_page_len);
1126 }
1127
1128 /*
1129 * Run down the current address space and install a new one.
1130 */
1131 int
1132 exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
1133 {
1134 int error;
1135 struct proc *p = imgp->proc;
1136 struct vmspace *vmspace = p->p_vmspace;
1137 struct thread *td = curthread;
1138 vm_offset_t sv_minuser;
1139 vm_map_t map;
1140
1141 imgp->vmspace_destroyed = true;
1142 imgp->sysent = sv;
1143
1144 if (p->p_sysent->sv_onexec_old != NULL)
1145 p->p_sysent->sv_onexec_old(td);
1146 itimers_exec(p);
1147
1148 EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);
1149
1150 /*
1151 * Blow away entire process VM, if address space not shared,
1152 * otherwise, create a new VM space so that other threads are
1153 * not disrupted
1154 */
1155 map = &vmspace->vm_map;
1156 if (map_at_zero)
1157 sv_minuser = sv->sv_minuser;
1158 else
1159 sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
1160 if (refcount_load(&vmspace->vm_refcnt) == 1 &&
1161 vm_map_min(map) == sv_minuser &&
1162 vm_map_max(map) == sv->sv_maxuser &&
1163 cpu_exec_vmspace_reuse(p, map)) {
1164 exec_free_abi_mappings(p);
1165 shmexit(vmspace);
1166 pmap_remove_pages(vmspace_pmap(vmspace));
1167 vm_map_remove(map, vm_map_min(map), vm_map_max(map));
1168 /*
1169 * An exec terminates mlockall(MCL_FUTURE).
1170 * ASLR and W^X states must be re-evaluated.
1171 */
1172 vm_map_lock(map);
1173 vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
1174 MAP_ASLR_IGNSTART | MAP_ASLR_STACK | MAP_WXORX);
1175 vm_map_unlock(map);
1176 } else {
1177 error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
1178 if (error)
1179 return (error);
1180 vmspace = p->p_vmspace;
1181 map = &vmspace->vm_map;
1182 }
1183 map->flags |= imgp->map_flags;
1184
1185 return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0);
1186 }
1187
1188 /*
1189 * Compute the stack size limit and map the main process stack.
1190 * Map the shared page.
1191 */
1192 int
1193 exec_map_stack(struct image_params *imgp)
1194 {
1195 struct rlimit rlim_stack;
1196 struct sysentvec *sv;
1197 struct proc *p;
1198 vm_map_t map;
1199 struct vmspace *vmspace;
1200 vm_offset_t stack_addr, stack_top;
1201 vm_offset_t sharedpage_addr;
1202 u_long ssiz;
1203 int error, find_space, stack_off;
1204 vm_prot_t stack_prot;
1205 vm_object_t obj;
1206
1207 p = imgp->proc;
1208 sv = p->p_sysent;
1209
1210 if (imgp->stack_sz != 0) {
1211 ssiz = trunc_page(imgp->stack_sz);
1212 PROC_LOCK(p);
1213 lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
1214 PROC_UNLOCK(p);
1215 if (ssiz > rlim_stack.rlim_max)
1216 ssiz = rlim_stack.rlim_max;
1217 if (ssiz > rlim_stack.rlim_cur) {
1218 rlim_stack.rlim_cur = ssiz;
1219 kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
1220 }
1221 } else if (sv->sv_maxssiz != NULL) {
1222 ssiz = *sv->sv_maxssiz;
1223 } else {
1224 ssiz = maxssiz;
1225 }
1226
1227 vmspace = p->p_vmspace;
1228 map = &vmspace->vm_map;
1229
1230 stack_prot = sv->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ?
1231 imgp->stack_prot : sv->sv_stackprot;
1232 if ((map->flags & MAP_ASLR_STACK) != 0) {
1233 stack_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1234 lim_max(curthread, RLIMIT_DATA));
1235 find_space = VMFS_ANY_SPACE;
1236 } else {
1237 stack_addr = sv->sv_usrstack - ssiz;
1238 find_space = VMFS_NO_SPACE;
1239 }
1240 error = vm_map_find(map, NULL, 0, &stack_addr, (vm_size_t)ssiz,
1241 sv->sv_usrstack, find_space, stack_prot, VM_PROT_ALL,
1242 MAP_STACK_GROWS_DOWN);
1243 if (error != KERN_SUCCESS) {
1244 uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x "
1245 "failed, mach error %d errno %d\n", (uintmax_t)ssiz,
1246 stack_prot, error, vm_mmap_to_errno(error));
1247 return (vm_mmap_to_errno(error));
1248 }
1249
1250 stack_top = stack_addr + ssiz;
1251 if ((map->flags & MAP_ASLR_STACK) != 0) {
1252 /* Randomize within the first page of the stack. */
1253 arc4rand(&stack_off, sizeof(stack_off), 0);
1254 stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *));
1255 }
1256
1257 /* Map a shared page */
1258 obj = sv->sv_shared_page_obj;
1259 if (obj == NULL) {
1260 sharedpage_addr = 0;
1261 goto out;
1262 }
1263
1264 /*
1265 * If randomization is disabled then the shared page will
1266 * be mapped at address specified in sysentvec.
1267 * Otherwise any address above .data section can be selected.
1268 * Same logic is used for stack address randomization.
1269 * If the address randomization is applied map a guard page
1270 * at the top of UVA.
1271 */
1272 vm_object_reference(obj);
1273 if ((imgp->imgp_flags & IMGP_ASLR_SHARED_PAGE) != 0) {
1274 sharedpage_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1275 lim_max(curthread, RLIMIT_DATA));
1276
1277 error = vm_map_fixed(map, NULL, 0,
1278 sv->sv_maxuser - PAGE_SIZE, PAGE_SIZE,
1279 VM_PROT_NONE, VM_PROT_NONE, MAP_CREATE_GUARD);
1280 if (error != KERN_SUCCESS) {
1281 /*
1282 * This is not fatal, so let's just print a warning
1283 * and continue.
1284 */
1285 uprintf("%s: Mapping guard page at the top of UVA failed"
1286 " mach error %d errno %d",
1287 __func__, error, vm_mmap_to_errno(error));
1288 }
1289
1290 error = vm_map_find(map, obj, 0,
1291 &sharedpage_addr, sv->sv_shared_page_len,
1292 sv->sv_maxuser, VMFS_ANY_SPACE,
1293 VM_PROT_READ | VM_PROT_EXECUTE,
1294 VM_PROT_READ | VM_PROT_EXECUTE,
1295 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1296 } else {
1297 sharedpage_addr = sv->sv_shared_page_base;
1298 vm_map_fixed(map, obj, 0,
1299 sharedpage_addr, sv->sv_shared_page_len,
1300 VM_PROT_READ | VM_PROT_EXECUTE,
1301 VM_PROT_READ | VM_PROT_EXECUTE,
1302 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1303 }
1304 if (error != KERN_SUCCESS) {
1305 uprintf("%s: mapping shared page at addr: %p"
1306 "failed, mach error %d errno %d\n", __func__,
1307 (void *)sharedpage_addr, error, vm_mmap_to_errno(error));
1308 vm_object_deallocate(obj);
1309 return (vm_mmap_to_errno(error));
1310 }
1311 out:
1312 /*
1313 * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
1314 * are still used to enforce the stack rlimit on the process stack.
1315 */
1316 vmspace->vm_maxsaddr = (char *)stack_addr;
1317 vmspace->vm_stacktop = stack_top;
1318 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
1319 vmspace->vm_shp_base = sharedpage_addr;
1320
1321 return (0);
1322 }
1323
1324 /*
1325 * Copy out argument and environment strings from the old process address
1326 * space into the temporary string buffer.
1327 */
1328 int
1329 exec_copyin_args(struct image_args *args, const char *fname,
1330 enum uio_seg segflg, char **argv, char **envv)
1331 {
1332 u_long arg, env;
1333 int error;
1334
1335 bzero(args, sizeof(*args));
1336 if (argv == NULL)
1337 return (EFAULT);
1338
1339 /*
1340 * Allocate demand-paged memory for the file name, argument, and
1341 * environment strings.
1342 */
1343 error = exec_alloc_args(args);
1344 if (error != 0)
1345 return (error);
1346
1347 /*
1348 * Copy the file name.
1349 */
1350 error = exec_args_add_fname(args, fname, segflg);
1351 if (error != 0)
1352 goto err_exit;
1353
1354 /*
1355 * extract arguments first
1356 */
1357 for (;;) {
1358 error = fueword(argv++, &arg);
1359 if (error == -1) {
1360 error = EFAULT;
1361 goto err_exit;
1362 }
1363 if (arg == 0)
1364 break;
1365 error = exec_args_add_arg(args, (char *)(uintptr_t)arg,
1366 UIO_USERSPACE);
1367 if (error != 0)
1368 goto err_exit;
1369 }
1370
1371 /*
1372 * extract environment strings
1373 */
1374 if (envv) {
1375 for (;;) {
1376 error = fueword(envv++, &env);
1377 if (error == -1) {
1378 error = EFAULT;
1379 goto err_exit;
1380 }
1381 if (env == 0)
1382 break;
1383 error = exec_args_add_env(args,
1384 (char *)(uintptr_t)env, UIO_USERSPACE);
1385 if (error != 0)
1386 goto err_exit;
1387 }
1388 }
1389
1390 return (0);
1391
1392 err_exit:
1393 exec_free_args(args);
1394 return (error);
1395 }
1396
1397 struct exec_args_kva {
1398 vm_offset_t addr;
1399 u_int gen;
1400 SLIST_ENTRY(exec_args_kva) next;
1401 };
1402
1403 DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva);
1404
1405 static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist;
1406 static struct mtx exec_args_kva_mtx;
1407 static u_int exec_args_gen;
1408
1409 static void
1410 exec_prealloc_args_kva(void *arg __unused)
1411 {
1412 struct exec_args_kva *argkva;
1413 u_int i;
1414
1415 SLIST_INIT(&exec_args_kva_freelist);
1416 mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF);
1417 for (i = 0; i < exec_map_entries; i++) {
1418 argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK);
1419 argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size);
1420 argkva->gen = exec_args_gen;
1421 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1422 }
1423 }
1424 SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL);
1425
1426 static vm_offset_t
1427 exec_alloc_args_kva(void **cookie)
1428 {
1429 struct exec_args_kva *argkva;
1430
1431 argkva = (void *)atomic_readandclear_ptr(
1432 (uintptr_t *)DPCPU_PTR(exec_args_kva));
1433 if (argkva == NULL) {
1434 mtx_lock(&exec_args_kva_mtx);
1435 while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL)
1436 (void)mtx_sleep(&exec_args_kva_freelist,
1437 &exec_args_kva_mtx, 0, "execkva", 0);
1438 SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next);
1439 mtx_unlock(&exec_args_kva_mtx);
1440 }
1441 kasan_mark((void *)argkva->addr, exec_map_entry_size,
1442 exec_map_entry_size, 0);
1443 *(struct exec_args_kva **)cookie = argkva;
1444 return (argkva->addr);
1445 }
1446
1447 static void
1448 exec_release_args_kva(struct exec_args_kva *argkva, u_int gen)
1449 {
1450 vm_offset_t base;
1451
1452 base = argkva->addr;
1453 kasan_mark((void *)argkva->addr, 0, exec_map_entry_size,
1454 KASAN_EXEC_ARGS_FREED);
1455 if (argkva->gen != gen) {
1456 (void)vm_map_madvise(exec_map, base, base + exec_map_entry_size,
1457 MADV_FREE);
1458 argkva->gen = gen;
1459 }
1460 if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva),
1461 (uintptr_t)NULL, (uintptr_t)argkva)) {
1462 mtx_lock(&exec_args_kva_mtx);
1463 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1464 wakeup_one(&exec_args_kva_freelist);
1465 mtx_unlock(&exec_args_kva_mtx);
1466 }
1467 }
1468
1469 static void
1470 exec_free_args_kva(void *cookie)
1471 {
1472
1473 exec_release_args_kva(cookie, exec_args_gen);
1474 }
1475
1476 static void
1477 exec_args_kva_lowmem(void *arg __unused)
1478 {
1479 SLIST_HEAD(, exec_args_kva) head;
1480 struct exec_args_kva *argkva;
1481 u_int gen;
1482 int i;
1483
1484 gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1;
1485
1486 /*
1487 * Force an madvise of each KVA range. Any currently allocated ranges
1488 * will have MADV_FREE applied once they are freed.
1489 */
1490 SLIST_INIT(&head);
1491 mtx_lock(&exec_args_kva_mtx);
1492 SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva);
1493 mtx_unlock(&exec_args_kva_mtx);
1494 while ((argkva = SLIST_FIRST(&head)) != NULL) {
1495 SLIST_REMOVE_HEAD(&head, next);
1496 exec_release_args_kva(argkva, gen);
1497 }
1498
1499 CPU_FOREACH(i) {
1500 argkva = (void *)atomic_readandclear_ptr(
1501 (uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva));
1502 if (argkva != NULL)
1503 exec_release_args_kva(argkva, gen);
1504 }
1505 }
1506 EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL,
1507 EVENTHANDLER_PRI_ANY);
1508
1509 /*
1510 * Allocate temporary demand-paged, zero-filled memory for the file name,
1511 * argument, and environment strings.
1512 */
1513 int
1514 exec_alloc_args(struct image_args *args)
1515 {
1516
1517 args->buf = (char *)exec_alloc_args_kva(&args->bufkva);
1518 return (0);
1519 }
1520
1521 void
1522 exec_free_args(struct image_args *args)
1523 {
1524
1525 if (args->buf != NULL) {
1526 exec_free_args_kva(args->bufkva);
1527 args->buf = NULL;
1528 }
1529 if (args->fname_buf != NULL) {
1530 free(args->fname_buf, M_TEMP);
1531 args->fname_buf = NULL;
1532 }
1533 }
1534
1535 /*
1536 * A set to functions to fill struct image args.
1537 *
1538 * NOTE: exec_args_add_fname() must be called (possibly with a NULL
1539 * fname) before the other functions. All exec_args_add_arg() calls must
1540 * be made before any exec_args_add_env() calls. exec_args_adjust_args()
1541 * may be called any time after exec_args_add_fname().
1542 *
1543 * exec_args_add_fname() - install path to be executed
1544 * exec_args_add_arg() - append an argument string
1545 * exec_args_add_env() - append an env string
1546 * exec_args_adjust_args() - adjust location of the argument list to
1547 * allow new arguments to be prepended
1548 */
1549 int
1550 exec_args_add_fname(struct image_args *args, const char *fname,
1551 enum uio_seg segflg)
1552 {
1553 int error;
1554 size_t length;
1555
1556 KASSERT(args->fname == NULL, ("fname already appended"));
1557 KASSERT(args->endp == NULL, ("already appending to args"));
1558
1559 if (fname != NULL) {
1560 args->fname = args->buf;
1561 error = segflg == UIO_SYSSPACE ?
1562 copystr(fname, args->fname, PATH_MAX, &length) :
1563 copyinstr(fname, args->fname, PATH_MAX, &length);
1564 if (error != 0)
1565 return (error == ENAMETOOLONG ? E2BIG : error);
1566 } else
1567 length = 0;
1568
1569 /* Set up for _arg_*()/_env_*() */
1570 args->endp = args->buf + length;
1571 /* begin_argv must be set and kept updated */
1572 args->begin_argv = args->endp;
1573 KASSERT(exec_map_entry_size - length >= ARG_MAX,
1574 ("too little space remaining for arguments %zu < %zu",
1575 exec_map_entry_size - length, (size_t)ARG_MAX));
1576 args->stringspace = ARG_MAX;
1577
1578 return (0);
1579 }
1580
1581 static int
1582 exec_args_add_str(struct image_args *args, const char *str,
1583 enum uio_seg segflg, int *countp)
1584 {
1585 int error;
1586 size_t length;
1587
1588 KASSERT(args->endp != NULL, ("endp not initialized"));
1589 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1590
1591 error = (segflg == UIO_SYSSPACE) ?
1592 copystr(str, args->endp, args->stringspace, &length) :
1593 copyinstr(str, args->endp, args->stringspace, &length);
1594 if (error != 0)
1595 return (error == ENAMETOOLONG ? E2BIG : error);
1596 args->stringspace -= length;
1597 args->endp += length;
1598 (*countp)++;
1599
1600 return (0);
1601 }
1602
1603 int
1604 exec_args_add_arg(struct image_args *args, const char *argp,
1605 enum uio_seg segflg)
1606 {
1607
1608 KASSERT(args->envc == 0, ("appending args after env"));
1609
1610 return (exec_args_add_str(args, argp, segflg, &args->argc));
1611 }
1612
1613 int
1614 exec_args_add_env(struct image_args *args, const char *envp,
1615 enum uio_seg segflg)
1616 {
1617
1618 if (args->envc == 0)
1619 args->begin_envv = args->endp;
1620
1621 return (exec_args_add_str(args, envp, segflg, &args->envc));
1622 }
1623
1624 int
1625 exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend)
1626 {
1627 ssize_t offset;
1628
1629 KASSERT(args->endp != NULL, ("endp not initialized"));
1630 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1631
1632 offset = extend - consume;
1633 if (args->stringspace < offset)
1634 return (E2BIG);
1635 memmove(args->begin_argv + extend, args->begin_argv + consume,
1636 args->endp - args->begin_argv + consume);
1637 if (args->envc > 0)
1638 args->begin_envv += offset;
1639 args->endp += offset;
1640 args->stringspace -= offset;
1641 return (0);
1642 }
1643
1644 char *
1645 exec_args_get_begin_envv(struct image_args *args)
1646 {
1647
1648 KASSERT(args->endp != NULL, ("endp not initialized"));
1649
1650 if (args->envc > 0)
1651 return (args->begin_envv);
1652 return (args->endp);
1653 }
1654
1655 /*
1656 * Copy strings out to the new process address space, constructing new arg
1657 * and env vector tables. Return a pointer to the base so that it can be used
1658 * as the initial stack pointer.
1659 */
1660 int
1661 exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base)
1662 {
1663 int argc, envc;
1664 char **vectp;
1665 char *stringp;
1666 uintptr_t destp, ustringp;
1667 struct ps_strings *arginfo;
1668 struct proc *p;
1669 struct sysentvec *sysent;
1670 size_t execpath_len;
1671 int error, szsigcode;
1672 char canary[sizeof(long) * 8];
1673
1674 p = imgp->proc;
1675 sysent = p->p_sysent;
1676
1677 destp = PROC_PS_STRINGS(p);
1678 arginfo = imgp->ps_strings = (void *)destp;
1679
1680 /*
1681 * Install sigcode.
1682 */
1683 if (sysent->sv_shared_page_base == 0 && sysent->sv_szsigcode != NULL) {
1684 szsigcode = *(sysent->sv_szsigcode);
1685 destp -= szsigcode;
1686 destp = rounddown2(destp, sizeof(void *));
1687 error = copyout(sysent->sv_sigcode, (void *)destp, szsigcode);
1688 if (error != 0)
1689 return (error);
1690 }
1691
1692 /*
1693 * Copy the image path for the rtld.
1694 */
1695 if (imgp->execpath != NULL && imgp->auxargs != NULL) {
1696 execpath_len = strlen(imgp->execpath) + 1;
1697 destp -= execpath_len;
1698 destp = rounddown2(destp, sizeof(void *));
1699 imgp->execpathp = (void *)destp;
1700 error = copyout(imgp->execpath, imgp->execpathp, execpath_len);
1701 if (error != 0)
1702 return (error);
1703 }
1704
1705 /*
1706 * Prepare the canary for SSP.
1707 */
1708 arc4rand(canary, sizeof(canary), 0);
1709 destp -= sizeof(canary);
1710 imgp->canary = (void *)destp;
1711 error = copyout(canary, imgp->canary, sizeof(canary));
1712 if (error != 0)
1713 return (error);
1714 imgp->canarylen = sizeof(canary);
1715
1716 /*
1717 * Prepare the pagesizes array.
1718 */
1719 imgp->pagesizeslen = sizeof(pagesizes[0]) * MAXPAGESIZES;
1720 destp -= imgp->pagesizeslen;
1721 destp = rounddown2(destp, sizeof(void *));
1722 imgp->pagesizes = (void *)destp;
1723 error = copyout(pagesizes, imgp->pagesizes, imgp->pagesizeslen);
1724 if (error != 0)
1725 return (error);
1726
1727 /*
1728 * Allocate room for the argument and environment strings.
1729 */
1730 destp -= ARG_MAX - imgp->args->stringspace;
1731 destp = rounddown2(destp, sizeof(void *));
1732 ustringp = destp;
1733
1734 if (imgp->auxargs) {
1735 /*
1736 * Allocate room on the stack for the ELF auxargs
1737 * array. It has up to AT_COUNT entries.
1738 */
1739 destp -= AT_COUNT * sizeof(Elf_Auxinfo);
1740 destp = rounddown2(destp, sizeof(void *));
1741 }
1742
1743 vectp = (char **)destp;
1744
1745 /*
1746 * Allocate room for the argv[] and env vectors including the
1747 * terminating NULL pointers.
1748 */
1749 vectp -= imgp->args->argc + 1 + imgp->args->envc + 1;
1750
1751 /*
1752 * vectp also becomes our initial stack base
1753 */
1754 *stack_base = (uintptr_t)vectp;
1755
1756 stringp = imgp->args->begin_argv;
1757 argc = imgp->args->argc;
1758 envc = imgp->args->envc;
1759
1760 /*
1761 * Copy out strings - arguments and environment.
1762 */
1763 error = copyout(stringp, (void *)ustringp,
1764 ARG_MAX - imgp->args->stringspace);
1765 if (error != 0)
1766 return (error);
1767
1768 /*
1769 * Fill in "ps_strings" struct for ps, w, etc.
1770 */
1771 imgp->argv = vectp;
1772 if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 ||
1773 suword32(&arginfo->ps_nargvstr, argc) != 0)
1774 return (EFAULT);
1775
1776 /*
1777 * Fill in argument portion of vector table.
1778 */
1779 for (; argc > 0; --argc) {
1780 if (suword(vectp++, ustringp) != 0)
1781 return (EFAULT);
1782 while (*stringp++ != 0)
1783 ustringp++;
1784 ustringp++;
1785 }
1786
1787 /* a null vector table pointer separates the argp's from the envp's */
1788 if (suword(vectp++, 0) != 0)
1789 return (EFAULT);
1790
1791 imgp->envv = vectp;
1792 if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 ||
1793 suword32(&arginfo->ps_nenvstr, envc) != 0)
1794 return (EFAULT);
1795
1796 /*
1797 * Fill in environment portion of vector table.
1798 */
1799 for (; envc > 0; --envc) {
1800 if (suword(vectp++, ustringp) != 0)
1801 return (EFAULT);
1802 while (*stringp++ != 0)
1803 ustringp++;
1804 ustringp++;
1805 }
1806
1807 /* end of vector table is a null pointer */
1808 if (suword(vectp, 0) != 0)
1809 return (EFAULT);
1810
1811 if (imgp->auxargs) {
1812 vectp++;
1813 error = imgp->sysent->sv_copyout_auxargs(imgp,
1814 (uintptr_t)vectp);
1815 if (error != 0)
1816 return (error);
1817 }
1818
1819 return (0);
1820 }
1821
1822 /*
1823 * Check permissions of file to execute.
1824 * Called with imgp->vp locked.
1825 * Return 0 for success or error code on failure.
1826 */
1827 int
1828 exec_check_permissions(struct image_params *imgp)
1829 {
1830 struct vnode *vp = imgp->vp;
1831 struct vattr *attr = imgp->attr;
1832 struct thread *td;
1833 int error;
1834
1835 td = curthread;
1836
1837 /* Get file attributes */
1838 error = VOP_GETATTR(vp, attr, td->td_ucred);
1839 if (error)
1840 return (error);
1841
1842 #ifdef MAC
1843 error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
1844 if (error)
1845 return (error);
1846 #endif
1847
1848 /*
1849 * 1) Check if file execution is disabled for the filesystem that
1850 * this file resides on.
1851 * 2) Ensure that at least one execute bit is on. Otherwise, a
1852 * privileged user will always succeed, and we don't want this
1853 * to happen unless the file really is executable.
1854 * 3) Ensure that the file is a regular file.
1855 */
1856 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1857 (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
1858 (attr->va_type != VREG))
1859 return (EACCES);
1860
1861 /*
1862 * Zero length files can't be exec'd
1863 */
1864 if (attr->va_size == 0)
1865 return (ENOEXEC);
1866
1867 /*
1868 * Check for execute permission to file based on current credentials.
1869 */
1870 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1871 if (error)
1872 return (error);
1873
1874 /*
1875 * Check number of open-for-writes on the file and deny execution
1876 * if there are any.
1877 *
1878 * Add a text reference now so no one can write to the
1879 * executable while we're activating it.
1880 *
1881 * Remember if this was set before and unset it in case this is not
1882 * actually an executable image.
1883 */
1884 error = VOP_SET_TEXT(vp);
1885 if (error != 0)
1886 return (error);
1887 imgp->textset = true;
1888
1889 /*
1890 * Call filesystem specific open routine (which does nothing in the
1891 * general case).
1892 */
1893 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
1894 if (error == 0)
1895 imgp->opened = true;
1896 return (error);
1897 }
1898
1899 /*
1900 * Exec handler registration
1901 */
1902 int
1903 exec_register(const struct execsw *execsw_arg)
1904 {
1905 const struct execsw **es, **xs, **newexecsw;
1906 u_int count = 2; /* New slot and trailing NULL */
1907
1908 if (execsw)
1909 for (es = execsw; *es; es++)
1910 count++;
1911 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1912 xs = newexecsw;
1913 if (execsw)
1914 for (es = execsw; *es; es++)
1915 *xs++ = *es;
1916 *xs++ = execsw_arg;
1917 *xs = NULL;
1918 if (execsw)
1919 free(execsw, M_TEMP);
1920 execsw = newexecsw;
1921 return (0);
1922 }
1923
1924 int
1925 exec_unregister(const struct execsw *execsw_arg)
1926 {
1927 const struct execsw **es, **xs, **newexecsw;
1928 int count = 1;
1929
1930 if (execsw == NULL)
1931 panic("unregister with no handlers left?\n");
1932
1933 for (es = execsw; *es; es++) {
1934 if (*es == execsw_arg)
1935 break;
1936 }
1937 if (*es == NULL)
1938 return (ENOENT);
1939 for (es = execsw; *es; es++)
1940 if (*es != execsw_arg)
1941 count++;
1942 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1943 xs = newexecsw;
1944 for (es = execsw; *es; es++)
1945 if (*es != execsw_arg)
1946 *xs++ = *es;
1947 *xs = NULL;
1948 if (execsw)
1949 free(execsw, M_TEMP);
1950 execsw = newexecsw;
1951 return (0);
1952 }
1953
1954 /*
1955 * Write out a core segment to the compression stream.
1956 */
1957 static int
1958 compress_chunk(struct coredump_params *cp, char *base, char *buf, size_t len)
1959 {
1960 size_t chunk_len;
1961 int error;
1962
1963 while (len > 0) {
1964 chunk_len = MIN(len, CORE_BUF_SIZE);
1965
1966 /*
1967 * We can get EFAULT error here.
1968 * In that case zero out the current chunk of the segment.
1969 */
1970 error = copyin(base, buf, chunk_len);
1971 if (error != 0)
1972 bzero(buf, chunk_len);
1973 error = compressor_write(cp->comp, buf, chunk_len);
1974 if (error != 0)
1975 break;
1976 base += chunk_len;
1977 len -= chunk_len;
1978 }
1979 return (error);
1980 }
1981
1982 int
1983 core_write(struct coredump_params *cp, const void *base, size_t len,
1984 off_t offset, enum uio_seg seg, size_t *resid)
1985 {
1986
1987 return (vn_rdwr_inchunks(UIO_WRITE, cp->vp, __DECONST(void *, base),
1988 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
1989 cp->active_cred, cp->file_cred, resid, cp->td));
1990 }
1991
1992 int
1993 core_output(char *base, size_t len, off_t offset, struct coredump_params *cp,
1994 void *tmpbuf)
1995 {
1996 vm_map_t map;
1997 struct mount *mp;
1998 size_t resid, runlen;
1999 int error;
2000 bool success;
2001
2002 KASSERT((uintptr_t)base % PAGE_SIZE == 0,
2003 ("%s: user address %p is not page-aligned", __func__, base));
2004
2005 if (cp->comp != NULL)
2006 return (compress_chunk(cp, base, tmpbuf, len));
2007
2008 map = &cp->td->td_proc->p_vmspace->vm_map;
2009 for (; len > 0; base += runlen, offset += runlen, len -= runlen) {
2010 /*
2011 * Attempt to page in all virtual pages in the range. If a
2012 * virtual page is not backed by the pager, it is represented as
2013 * a hole in the file. This can occur with zero-filled
2014 * anonymous memory or truncated files, for example.
2015 */
2016 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) {
2017 if (core_dump_can_intr && curproc_sigkilled())
2018 return (EINTR);
2019 error = vm_fault(map, (uintptr_t)base + runlen,
2020 VM_PROT_READ, VM_FAULT_NOFILL, NULL);
2021 if (runlen == 0)
2022 success = error == KERN_SUCCESS;
2023 else if ((error == KERN_SUCCESS) != success)
2024 break;
2025 }
2026
2027 if (success) {
2028 error = core_write(cp, base, runlen, offset,
2029 UIO_USERSPACE, &resid);
2030 if (error != 0) {
2031 if (error != EFAULT)
2032 break;
2033
2034 /*
2035 * EFAULT may be returned if the user mapping
2036 * could not be accessed, e.g., because a mapped
2037 * file has been truncated. Skip the page if no
2038 * progress was made, to protect against a
2039 * hypothetical scenario where vm_fault() was
2040 * successful but core_write() returns EFAULT
2041 * anyway.
2042 */
2043 runlen -= resid;
2044 if (runlen == 0) {
2045 success = false;
2046 runlen = PAGE_SIZE;
2047 }
2048 }
2049 }
2050 if (!success) {
2051 error = vn_start_write(cp->vp, &mp, V_WAIT);
2052 if (error != 0)
2053 break;
2054 vn_lock(cp->vp, LK_EXCLUSIVE | LK_RETRY);
2055 error = vn_truncate_locked(cp->vp, offset + runlen,
2056 false, cp->td->td_ucred);
2057 VOP_UNLOCK(cp->vp);
2058 vn_finished_write(mp);
2059 if (error != 0)
2060 break;
2061 }
2062 }
2063 return (error);
2064 }
2065
2066 /*
2067 * Drain into a core file.
2068 */
2069 int
2070 sbuf_drain_core_output(void *arg, const char *data, int len)
2071 {
2072 struct coredump_params *cp;
2073 struct proc *p;
2074 int error, locked;
2075
2076 cp = arg;
2077 p = cp->td->td_proc;
2078
2079 /*
2080 * Some kern_proc out routines that print to this sbuf may
2081 * call us with the process lock held. Draining with the
2082 * non-sleepable lock held is unsafe. The lock is needed for
2083 * those routines when dumping a live process. In our case we
2084 * can safely release the lock before draining and acquire
2085 * again after.
2086 */
2087 locked = PROC_LOCKED(p);
2088 if (locked)
2089 PROC_UNLOCK(p);
2090 if (cp->comp != NULL)
2091 error = compressor_write(cp->comp, __DECONST(char *, data),
2092 len);
2093 else
2094 error = core_write(cp, __DECONST(void *, data), len, cp->offset,
2095 UIO_SYSSPACE, NULL);
2096 if (locked)
2097 PROC_LOCK(p);
2098 if (error != 0)
2099 return (-error);
2100 cp->offset += len;
2101 return (len);
2102 }
Cache object: f3234e262ce2b260410e659e47f53989
|