FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_exec.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1993, David Greenman
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_capsicum.h"
33 #include "opt_hwpmc_hooks.h"
34 #include "opt_ktrace.h"
35 #include "opt_vm.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/acct.h>
40 #include <sys/asan.h>
41 #include <sys/capsicum.h>
42 #include <sys/compressor.h>
43 #include <sys/eventhandler.h>
44 #include <sys/exec.h>
45 #include <sys/fcntl.h>
46 #include <sys/filedesc.h>
47 #include <sys/imgact.h>
48 #include <sys/imgact_elf.h>
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mman.h>
53 #include <sys/mount.h>
54 #include <sys/mutex.h>
55 #include <sys/namei.h>
56 #include <sys/priv.h>
57 #include <sys/proc.h>
58 #include <sys/ptrace.h>
59 #include <sys/reg.h>
60 #include <sys/resourcevar.h>
61 #include <sys/rwlock.h>
62 #include <sys/sched.h>
63 #include <sys/sdt.h>
64 #include <sys/sf_buf.h>
65 #include <sys/shm.h>
66 #include <sys/signalvar.h>
67 #include <sys/smp.h>
68 #include <sys/stat.h>
69 #include <sys/syscallsubr.h>
70 #include <sys/sysctl.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/timers.h>
74 #include <sys/umtxvar.h>
75 #include <sys/vnode.h>
76 #include <sys/wait.h>
77 #ifdef KTRACE
78 #include <sys/ktrace.h>
79 #endif
80
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_pager.h>
90
91 #ifdef HWPMC_HOOKS
92 #include <sys/pmckern.h>
93 #endif
94
95 #include <security/audit/audit.h>
96 #include <security/mac/mac_framework.h>
97
98 #ifdef KDTRACE_HOOKS
99 #include <sys/dtrace_bsd.h>
100 dtrace_execexit_func_t dtrace_fasttrap_exec;
101 #endif
102
103 SDT_PROVIDER_DECLARE(proc);
104 SDT_PROBE_DEFINE1(proc, , , exec, "char *");
105 SDT_PROBE_DEFINE1(proc, , , exec__failure, "int");
106 SDT_PROBE_DEFINE1(proc, , , exec__success, "char *");
107
108 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
109
110 int coredump_pack_fileinfo = 1;
111 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
112 &coredump_pack_fileinfo, 0,
113 "Enable file path packing in 'procstat -f' coredump notes");
114
115 int coredump_pack_vmmapinfo = 1;
116 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
117 &coredump_pack_vmmapinfo, 0,
118 "Enable file path packing in 'procstat -v' coredump notes");
119
120 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
121 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
122 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
123 static int do_execve(struct thread *td, struct image_args *args,
124 struct mac *mac_p, struct vmspace *oldvmspace);
125
126 /* XXX This should be vm_size_t. */
127 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD|
128 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU",
129 "Location of process' ps_strings structure");
130
131 /* XXX This should be vm_size_t. */
132 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
133 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU",
134 "Top of process stack");
135
136 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE,
137 NULL, 0, sysctl_kern_stackprot, "I",
138 "Stack memory permissions");
139
140 u_long ps_arg_cache_limit = PAGE_SIZE / 16;
141 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
142 &ps_arg_cache_limit, 0,
143 "Process' command line characters cache limit");
144
145 static int disallow_high_osrel;
146 SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
147 &disallow_high_osrel, 0,
148 "Disallow execution of binaries built for higher version of the world");
149
150 static int map_at_zero = 0;
151 SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
152 "Permit processes to map an object at virtual address 0.");
153
154 static int core_dump_can_intr = 1;
155 SYSCTL_INT(_kern, OID_AUTO, core_dump_can_intr, CTLFLAG_RWTUN,
156 &core_dump_can_intr, 0,
157 "Core dumping interruptible with SIGKILL");
158
159 static int
160 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
161 {
162 struct proc *p;
163 vm_offset_t ps_strings;
164
165 p = curproc;
166 #ifdef SCTL_MASK32
167 if (req->flags & SCTL_MASK32) {
168 unsigned int val;
169 val = (unsigned int)PROC_PS_STRINGS(p);
170 return (SYSCTL_OUT(req, &val, sizeof(val)));
171 }
172 #endif
173 ps_strings = PROC_PS_STRINGS(p);
174 return (SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)));
175 }
176
177 static int
178 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
179 {
180 struct proc *p;
181 vm_offset_t val;
182
183 p = curproc;
184 #ifdef SCTL_MASK32
185 if (req->flags & SCTL_MASK32) {
186 unsigned int val32;
187
188 val32 = round_page((unsigned int)p->p_vmspace->vm_stacktop);
189 return (SYSCTL_OUT(req, &val32, sizeof(val32)));
190 }
191 #endif
192 val = round_page(p->p_vmspace->vm_stacktop);
193 return (SYSCTL_OUT(req, &val, sizeof(val)));
194 }
195
196 static int
197 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
198 {
199 struct proc *p;
200
201 p = curproc;
202 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
203 sizeof(p->p_sysent->sv_stackprot)));
204 }
205
206 /*
207 * Each of the items is a pointer to a `const struct execsw', hence the
208 * double pointer here.
209 */
210 static const struct execsw **execsw;
211
212 #ifndef _SYS_SYSPROTO_H_
213 struct execve_args {
214 char *fname;
215 char **argv;
216 char **envv;
217 };
218 #endif
219
220 int
221 sys_execve(struct thread *td, struct execve_args *uap)
222 {
223 struct image_args args;
224 struct vmspace *oldvmspace;
225 int error;
226
227 error = pre_execve(td, &oldvmspace);
228 if (error != 0)
229 return (error);
230 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
231 uap->argv, uap->envv);
232 if (error == 0)
233 error = kern_execve(td, &args, NULL, oldvmspace);
234 post_execve(td, error, oldvmspace);
235 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
236 return (error);
237 }
238
239 #ifndef _SYS_SYSPROTO_H_
240 struct fexecve_args {
241 int fd;
242 char **argv;
243 char **envv;
244 };
245 #endif
246 int
247 sys_fexecve(struct thread *td, struct fexecve_args *uap)
248 {
249 struct image_args args;
250 struct vmspace *oldvmspace;
251 int error;
252
253 error = pre_execve(td, &oldvmspace);
254 if (error != 0)
255 return (error);
256 error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
257 uap->argv, uap->envv);
258 if (error == 0) {
259 args.fd = uap->fd;
260 error = kern_execve(td, &args, NULL, oldvmspace);
261 }
262 post_execve(td, error, oldvmspace);
263 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
264 return (error);
265 }
266
267 #ifndef _SYS_SYSPROTO_H_
268 struct __mac_execve_args {
269 char *fname;
270 char **argv;
271 char **envv;
272 struct mac *mac_p;
273 };
274 #endif
275
276 int
277 sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
278 {
279 #ifdef MAC
280 struct image_args args;
281 struct vmspace *oldvmspace;
282 int error;
283
284 error = pre_execve(td, &oldvmspace);
285 if (error != 0)
286 return (error);
287 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
288 uap->argv, uap->envv);
289 if (error == 0)
290 error = kern_execve(td, &args, uap->mac_p, oldvmspace);
291 post_execve(td, error, oldvmspace);
292 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
293 return (error);
294 #else
295 return (ENOSYS);
296 #endif
297 }
298
299 int
300 pre_execve(struct thread *td, struct vmspace **oldvmspace)
301 {
302 struct proc *p;
303 int error;
304
305 KASSERT(td == curthread, ("non-current thread %p", td));
306 error = 0;
307 p = td->td_proc;
308 if ((p->p_flag & P_HADTHREADS) != 0) {
309 PROC_LOCK(p);
310 if (thread_single(p, SINGLE_BOUNDARY) != 0)
311 error = ERESTART;
312 PROC_UNLOCK(p);
313 }
314 KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
315 ("nested execve"));
316 *oldvmspace = p->p_vmspace;
317 return (error);
318 }
319
320 void
321 post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
322 {
323 struct proc *p;
324
325 KASSERT(td == curthread, ("non-current thread %p", td));
326 p = td->td_proc;
327 if ((p->p_flag & P_HADTHREADS) != 0) {
328 PROC_LOCK(p);
329 /*
330 * If success, we upgrade to SINGLE_EXIT state to
331 * force other threads to suicide.
332 */
333 if (error == EJUSTRETURN)
334 thread_single(p, SINGLE_EXIT);
335 else
336 thread_single_end(p, SINGLE_BOUNDARY);
337 PROC_UNLOCK(p);
338 }
339 exec_cleanup(td, oldvmspace);
340 }
341
342 /*
343 * kern_execve() has the astonishing property of not always returning to
344 * the caller. If sufficiently bad things happen during the call to
345 * do_execve(), it can end up calling exit1(); as a result, callers must
346 * avoid doing anything which they might need to undo (e.g., allocating
347 * memory).
348 */
349 int
350 kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
351 struct vmspace *oldvmspace)
352 {
353
354 TSEXEC(td->td_proc->p_pid, args->begin_argv);
355 AUDIT_ARG_ARGV(args->begin_argv, args->argc,
356 exec_args_get_begin_envv(args) - args->begin_argv);
357 AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc,
358 args->endp - exec_args_get_begin_envv(args));
359
360 /* Must have at least one argument. */
361 if (args->argc == 0) {
362 exec_free_args(args);
363 return (EINVAL);
364 }
365 return (do_execve(td, args, mac_p, oldvmspace));
366 }
367
368 static void
369 execve_nosetid(struct image_params *imgp)
370 {
371 imgp->credential_setid = false;
372 if (imgp->newcred != NULL) {
373 crfree(imgp->newcred);
374 imgp->newcred = NULL;
375 }
376 }
377
378 /*
379 * In-kernel implementation of execve(). All arguments are assumed to be
380 * userspace pointers from the passed thread.
381 */
382 static int
383 do_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
384 struct vmspace *oldvmspace)
385 {
386 struct proc *p = td->td_proc;
387 struct nameidata nd;
388 struct ucred *oldcred;
389 struct uidinfo *euip = NULL;
390 uintptr_t stack_base;
391 struct image_params image_params, *imgp;
392 struct vattr attr;
393 int (*img_first)(struct image_params *);
394 struct pargs *oldargs = NULL, *newargs = NULL;
395 struct sigacts *oldsigacts = NULL, *newsigacts = NULL;
396 #ifdef KTRACE
397 struct ktr_io_params *kiop;
398 #endif
399 struct vnode *oldtextvp, *newtextvp;
400 struct vnode *oldtextdvp, *newtextdvp;
401 char *oldbinname, *newbinname;
402 bool credential_changing;
403 #ifdef MAC
404 struct label *interpvplabel = NULL;
405 bool will_transition;
406 #endif
407 #ifdef HWPMC_HOOKS
408 struct pmckern_procexec pe;
409 #endif
410 int error, i, orig_osrel;
411 uint32_t orig_fctl0;
412 Elf_Brandinfo *orig_brandinfo;
413 size_t freepath_size;
414 static const char fexecv_proc_title[] = "(fexecv)";
415
416 imgp = &image_params;
417 oldtextvp = oldtextdvp = NULL;
418 newtextvp = newtextdvp = NULL;
419 newbinname = oldbinname = NULL;
420 #ifdef KTRACE
421 kiop = NULL;
422 #endif
423
424 /*
425 * Lock the process and set the P_INEXEC flag to indicate that
426 * it should be left alone until we're done here. This is
427 * necessary to avoid race conditions - e.g. in ptrace() -
428 * that might allow a local user to illicitly obtain elevated
429 * privileges.
430 */
431 PROC_LOCK(p);
432 KASSERT((p->p_flag & P_INEXEC) == 0,
433 ("%s(): process already has P_INEXEC flag", __func__));
434 p->p_flag |= P_INEXEC;
435 PROC_UNLOCK(p);
436
437 /*
438 * Initialize part of the common data
439 */
440 bzero(imgp, sizeof(*imgp));
441 imgp->proc = p;
442 imgp->attr = &attr;
443 imgp->args = args;
444 oldcred = p->p_ucred;
445 orig_osrel = p->p_osrel;
446 orig_fctl0 = p->p_fctl0;
447 orig_brandinfo = p->p_elf_brandinfo;
448
449 #ifdef MAC
450 error = mac_execve_enter(imgp, mac_p);
451 if (error)
452 goto exec_fail;
453 #endif
454
455 SDT_PROBE1(proc, , , exec, args->fname);
456
457 interpret:
458 if (args->fname != NULL) {
459 #ifdef CAPABILITY_MODE
460 /*
461 * While capability mode can't reach this point via direct
462 * path arguments to execve(), we also don't allow
463 * interpreters to be used in capability mode (for now).
464 * Catch indirect lookups and return a permissions error.
465 */
466 if (IN_CAPABILITY_MODE(td)) {
467 error = ECAPMODE;
468 goto exec_fail;
469 }
470 #endif
471
472 /*
473 * Translate the file name. namei() returns a vnode
474 * pointer in ni_vp among other things.
475 */
476 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW |
477 SAVENAME | AUDITVNODE1 | WANTPARENT, UIO_SYSSPACE,
478 args->fname, td);
479
480 error = namei(&nd);
481 if (error)
482 goto exec_fail;
483
484 newtextvp = nd.ni_vp;
485 newtextdvp = nd.ni_dvp;
486 nd.ni_dvp = NULL;
487 newbinname = malloc(nd.ni_cnd.cn_namelen + 1, M_PARGS,
488 M_WAITOK);
489 memcpy(newbinname, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen);
490 newbinname[nd.ni_cnd.cn_namelen] = '\0';
491 imgp->vp = newtextvp;
492
493 /*
494 * Do the best to calculate the full path to the image file.
495 */
496 if (args->fname[0] == '/') {
497 imgp->execpath = args->fname;
498 } else {
499 VOP_UNLOCK(imgp->vp);
500 freepath_size = MAXPATHLEN;
501 if (vn_fullpath_hardlink(newtextvp, newtextdvp,
502 newbinname, nd.ni_cnd.cn_namelen, &imgp->execpath,
503 &imgp->freepath, &freepath_size) != 0)
504 imgp->execpath = args->fname;
505 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
506 }
507 } else if (imgp->interpreter_vp) {
508 /*
509 * An image activator has already provided an open vnode
510 */
511 newtextvp = imgp->interpreter_vp;
512 imgp->interpreter_vp = NULL;
513 if (vn_fullpath(newtextvp, &imgp->execpath,
514 &imgp->freepath) != 0)
515 imgp->execpath = args->fname;
516 vn_lock(newtextvp, LK_SHARED | LK_RETRY);
517 AUDIT_ARG_VNODE1(newtextvp);
518 imgp->vp = newtextvp;
519 } else {
520 AUDIT_ARG_FD(args->fd);
521
522 /*
523 * If the descriptors was not opened with O_PATH, then
524 * we require that it was opened with O_EXEC or
525 * O_RDONLY. In either case, exec_check_permissions()
526 * below checks _current_ file access mode regardless
527 * of the permissions additionally checked at the
528 * open(2).
529 */
530 error = fgetvp_exec(td, args->fd, &cap_fexecve_rights,
531 &newtextvp);
532 if (error != 0)
533 goto exec_fail;
534
535 if (vn_fullpath(newtextvp, &imgp->execpath,
536 &imgp->freepath) != 0)
537 imgp->execpath = args->fname;
538 vn_lock(newtextvp, LK_SHARED | LK_RETRY);
539 AUDIT_ARG_VNODE1(newtextvp);
540 imgp->vp = newtextvp;
541 }
542
543 /*
544 * Check file permissions. Also 'opens' file and sets its vnode to
545 * text mode.
546 */
547 error = exec_check_permissions(imgp);
548 if (error)
549 goto exec_fail_dealloc;
550
551 imgp->object = imgp->vp->v_object;
552 if (imgp->object != NULL)
553 vm_object_reference(imgp->object);
554
555 error = exec_map_first_page(imgp);
556 if (error)
557 goto exec_fail_dealloc;
558
559 imgp->proc->p_osrel = 0;
560 imgp->proc->p_fctl0 = 0;
561 imgp->proc->p_elf_brandinfo = NULL;
562
563 /*
564 * Implement image setuid/setgid.
565 *
566 * Determine new credentials before attempting image activators
567 * so that it can be used by process_exec handlers to determine
568 * credential/setid changes.
569 *
570 * Don't honor setuid/setgid if the filesystem prohibits it or if
571 * the process is being traced.
572 *
573 * We disable setuid/setgid/etc in capability mode on the basis
574 * that most setugid applications are not written with that
575 * environment in mind, and will therefore almost certainly operate
576 * incorrectly. In principle there's no reason that setugid
577 * applications might not be useful in capability mode, so we may want
578 * to reconsider this conservative design choice in the future.
579 *
580 * XXXMAC: For the time being, use NOSUID to also prohibit
581 * transitions on the file system.
582 */
583 credential_changing = false;
584 credential_changing |= (attr.va_mode & S_ISUID) &&
585 oldcred->cr_uid != attr.va_uid;
586 credential_changing |= (attr.va_mode & S_ISGID) &&
587 oldcred->cr_gid != attr.va_gid;
588 #ifdef MAC
589 will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
590 interpvplabel, imgp) != 0;
591 credential_changing |= will_transition;
592 #endif
593
594 /* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */
595 if (credential_changing)
596 imgp->proc->p_pdeathsig = 0;
597
598 if (credential_changing &&
599 #ifdef CAPABILITY_MODE
600 ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
601 #endif
602 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
603 (p->p_flag & P_TRACED) == 0) {
604 imgp->credential_setid = true;
605 VOP_UNLOCK(imgp->vp);
606 imgp->newcred = crdup(oldcred);
607 if (attr.va_mode & S_ISUID) {
608 euip = uifind(attr.va_uid);
609 change_euid(imgp->newcred, euip);
610 }
611 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
612 if (attr.va_mode & S_ISGID)
613 change_egid(imgp->newcred, attr.va_gid);
614 /*
615 * Implement correct POSIX saved-id behavior.
616 *
617 * XXXMAC: Note that the current logic will save the
618 * uid and gid if a MAC domain transition occurs, even
619 * though maybe it shouldn't.
620 */
621 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
622 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
623 } else {
624 /*
625 * Implement correct POSIX saved-id behavior.
626 *
627 * XXX: It's not clear that the existing behavior is
628 * POSIX-compliant. A number of sources indicate that the
629 * saved uid/gid should only be updated if the new ruid is
630 * not equal to the old ruid, or the new euid is not equal
631 * to the old euid and the new euid is not equal to the old
632 * ruid. The FreeBSD code always updates the saved uid/gid.
633 * Also, this code uses the new (replaced) euid and egid as
634 * the source, which may or may not be the right ones to use.
635 */
636 if (oldcred->cr_svuid != oldcred->cr_uid ||
637 oldcred->cr_svgid != oldcred->cr_gid) {
638 VOP_UNLOCK(imgp->vp);
639 imgp->newcred = crdup(oldcred);
640 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
641 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
642 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
643 }
644 }
645 /* The new credentials are installed into the process later. */
646
647 /*
648 * If the current process has a special image activator it
649 * wants to try first, call it. For example, emulating shell
650 * scripts differently.
651 */
652 error = -1;
653 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
654 error = img_first(imgp);
655
656 /*
657 * Loop through the list of image activators, calling each one.
658 * An activator returns -1 if there is no match, 0 on success,
659 * and an error otherwise.
660 */
661 for (i = 0; error == -1 && execsw[i]; ++i) {
662 if (execsw[i]->ex_imgact == NULL ||
663 execsw[i]->ex_imgact == img_first) {
664 continue;
665 }
666 error = (*execsw[i]->ex_imgact)(imgp);
667 }
668
669 if (error) {
670 if (error == -1)
671 error = ENOEXEC;
672 goto exec_fail_dealloc;
673 }
674
675 /*
676 * Special interpreter operation, cleanup and loop up to try to
677 * activate the interpreter.
678 */
679 if (imgp->interpreted) {
680 exec_unmap_first_page(imgp);
681 /*
682 * The text reference needs to be removed for scripts.
683 * There is a short period before we determine that
684 * something is a script where text reference is active.
685 * The vnode lock is held over this entire period
686 * so nothing should illegitimately be blocked.
687 */
688 MPASS(imgp->textset);
689 VOP_UNSET_TEXT_CHECKED(newtextvp);
690 imgp->textset = false;
691 /* free name buffer and old vnode */
692 #ifdef MAC
693 mac_execve_interpreter_enter(newtextvp, &interpvplabel);
694 #endif
695 if (imgp->opened) {
696 VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
697 imgp->opened = false;
698 }
699 vput(newtextvp);
700 imgp->vp = newtextvp = NULL;
701 if (args->fname != NULL) {
702 if (newtextdvp != NULL) {
703 vrele(newtextdvp);
704 newtextdvp = NULL;
705 }
706 NDFREE(&nd, NDF_ONLY_PNBUF);
707 free(newbinname, M_PARGS);
708 newbinname = NULL;
709 }
710 vm_object_deallocate(imgp->object);
711 imgp->object = NULL;
712 execve_nosetid(imgp);
713 imgp->execpath = NULL;
714 free(imgp->freepath, M_TEMP);
715 imgp->freepath = NULL;
716 /* set new name to that of the interpreter */
717 if (imgp->interpreter_vp) {
718 args->fname = NULL;
719 } else {
720 args->fname = imgp->interpreter_name;
721 }
722 goto interpret;
723 }
724
725 /*
726 * NB: We unlock the vnode here because it is believed that none
727 * of the sv_copyout_strings/sv_fixup operations require the vnode.
728 */
729 VOP_UNLOCK(imgp->vp);
730
731 if (disallow_high_osrel &&
732 P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
733 error = ENOEXEC;
734 uprintf("Osrel %d for image %s too high\n", p->p_osrel,
735 imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
736 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
737 goto exec_fail_dealloc;
738 }
739
740 /* ABI enforces the use of Capsicum. Switch into capabilities mode. */
741 if (SV_PROC_FLAG(p, SV_CAPSICUM))
742 sys_cap_enter(td, NULL);
743
744 /*
745 * Copy out strings (args and env) and initialize stack base.
746 */
747 error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base);
748 if (error != 0) {
749 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
750 goto exec_fail_dealloc;
751 }
752
753 /*
754 * Stack setup.
755 */
756 error = (*p->p_sysent->sv_fixup)(&stack_base, imgp);
757 if (error != 0) {
758 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
759 goto exec_fail_dealloc;
760 }
761
762 if (args->fdp != NULL) {
763 /* Install a brand new file descriptor table. */
764 fdinstall_remapped(td, args->fdp);
765 args->fdp = NULL;
766 } else {
767 /*
768 * Keep on using the existing file descriptor table. For
769 * security and other reasons, the file descriptor table
770 * cannot be shared after an exec.
771 */
772 fdunshare(td);
773 pdunshare(td);
774 /* close files on exec */
775 fdcloseexec(td);
776 }
777
778 /*
779 * Malloc things before we need locks.
780 */
781 i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv;
782 /* Cache arguments if they fit inside our allowance */
783 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
784 newargs = pargs_alloc(i);
785 bcopy(imgp->args->begin_argv, newargs->ar_args, i);
786 }
787
788 /*
789 * For security and other reasons, signal handlers cannot
790 * be shared after an exec. The new process gets a copy of the old
791 * handlers. In execsigs(), the new process will have its signals
792 * reset.
793 */
794 if (sigacts_shared(p->p_sigacts)) {
795 oldsigacts = p->p_sigacts;
796 newsigacts = sigacts_alloc();
797 sigacts_copy(newsigacts, oldsigacts);
798 }
799
800 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
801
802 PROC_LOCK(p);
803 if (oldsigacts)
804 p->p_sigacts = newsigacts;
805 /* Stop profiling */
806 stopprofclock(p);
807
808 /* reset caught signals */
809 execsigs(p);
810
811 /* name this process - nameiexec(p, ndp) */
812 bzero(p->p_comm, sizeof(p->p_comm));
813 if (args->fname)
814 bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
815 min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
816 else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
817 bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
818 bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
819 #ifdef KTR
820 sched_clear_tdname(td);
821 #endif
822
823 /*
824 * mark as execed, wakeup the process that vforked (if any) and tell
825 * it that it now has its own resources back
826 */
827 p->p_flag |= P_EXEC;
828 if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
829 p->p_flag2 &= ~P2_NOTRACE;
830 if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0)
831 p->p_flag2 &= ~P2_STKGAP_DISABLE;
832 if (p->p_flag & P_PPWAIT) {
833 p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
834 cv_broadcast(&p->p_pwait);
835 /* STOPs are no longer ignored, arrange for AST */
836 signotify(td);
837 }
838
839 if ((imgp->sysent->sv_setid_allowed != NULL &&
840 !(*imgp->sysent->sv_setid_allowed)(td, imgp)) ||
841 (p->p_flag2 & P2_NO_NEW_PRIVS) != 0)
842 execve_nosetid(imgp);
843
844 /*
845 * Implement image setuid/setgid installation.
846 */
847 if (imgp->credential_setid) {
848 /*
849 * Turn off syscall tracing for set-id programs, except for
850 * root. Record any set-id flags first to make sure that
851 * we do not regain any tracing during a possible block.
852 */
853 setsugid(p);
854 #ifdef KTRACE
855 kiop = ktrprocexec(p);
856 #endif
857 /*
858 * Close any file descriptors 0..2 that reference procfs,
859 * then make sure file descriptors 0..2 are in use.
860 *
861 * Both fdsetugidsafety() and fdcheckstd() may call functions
862 * taking sleepable locks, so temporarily drop our locks.
863 */
864 PROC_UNLOCK(p);
865 VOP_UNLOCK(imgp->vp);
866 fdsetugidsafety(td);
867 error = fdcheckstd(td);
868 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
869 if (error != 0)
870 goto exec_fail_dealloc;
871 PROC_LOCK(p);
872 #ifdef MAC
873 if (will_transition) {
874 mac_vnode_execve_transition(oldcred, imgp->newcred,
875 imgp->vp, interpvplabel, imgp);
876 }
877 #endif
878 } else {
879 if (oldcred->cr_uid == oldcred->cr_ruid &&
880 oldcred->cr_gid == oldcred->cr_rgid)
881 p->p_flag &= ~P_SUGID;
882 }
883 /*
884 * Set the new credentials.
885 */
886 if (imgp->newcred != NULL) {
887 proc_set_cred(p, imgp->newcred);
888 crfree(oldcred);
889 oldcred = NULL;
890 }
891
892 /*
893 * Store the vp for use in kern.proc.pathname. This vnode was
894 * referenced by namei() or by fexecve variant of fname handling.
895 */
896 oldtextvp = p->p_textvp;
897 p->p_textvp = newtextvp;
898 oldtextdvp = p->p_textdvp;
899 p->p_textdvp = newtextdvp;
900 newtextdvp = NULL;
901 oldbinname = p->p_binname;
902 p->p_binname = newbinname;
903 newbinname = NULL;
904
905 #ifdef KDTRACE_HOOKS
906 /*
907 * Tell the DTrace fasttrap provider about the exec if it
908 * has declared an interest.
909 */
910 if (dtrace_fasttrap_exec)
911 dtrace_fasttrap_exec(p);
912 #endif
913
914 /*
915 * Notify others that we exec'd, and clear the P_INEXEC flag
916 * as we're now a bona fide freshly-execed process.
917 */
918 KNOTE_LOCKED(p->p_klist, NOTE_EXEC);
919 p->p_flag &= ~P_INEXEC;
920
921 /* clear "fork but no exec" flag, as we _are_ execing */
922 p->p_acflag &= ~AFORK;
923
924 /*
925 * Free any previous argument cache and replace it with
926 * the new argument cache, if any.
927 */
928 oldargs = p->p_args;
929 p->p_args = newargs;
930 newargs = NULL;
931
932 PROC_UNLOCK(p);
933
934 #ifdef HWPMC_HOOKS
935 /*
936 * Check if system-wide sampling is in effect or if the
937 * current process is using PMCs. If so, do exec() time
938 * processing. This processing needs to happen AFTER the
939 * P_INEXEC flag is cleared.
940 */
941 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
942 VOP_UNLOCK(imgp->vp);
943 pe.pm_credentialschanged = credential_changing;
944 pe.pm_entryaddr = imgp->entry_addr;
945
946 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
947 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
948 }
949 #endif
950
951 /* Set values passed into the program in registers. */
952 (*p->p_sysent->sv_setregs)(td, imgp, stack_base);
953
954 VOP_MMAPPED(imgp->vp);
955
956 SDT_PROBE1(proc, , , exec__success, args->fname);
957
958 exec_fail_dealloc:
959 if (error != 0) {
960 p->p_osrel = orig_osrel;
961 p->p_fctl0 = orig_fctl0;
962 p->p_elf_brandinfo = orig_brandinfo;
963 }
964
965 if (imgp->firstpage != NULL)
966 exec_unmap_first_page(imgp);
967
968 if (imgp->vp != NULL) {
969 if (imgp->opened)
970 VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
971 if (imgp->textset)
972 VOP_UNSET_TEXT_CHECKED(imgp->vp);
973 if (error != 0)
974 vput(imgp->vp);
975 else
976 VOP_UNLOCK(imgp->vp);
977 if (args->fname != NULL)
978 NDFREE(&nd, NDF_ONLY_PNBUF);
979 if (newtextdvp != NULL)
980 vrele(newtextdvp);
981 free(newbinname, M_PARGS);
982 }
983
984 if (imgp->object != NULL)
985 vm_object_deallocate(imgp->object);
986
987 free(imgp->freepath, M_TEMP);
988
989 if (error == 0) {
990 if (p->p_ptevents & PTRACE_EXEC) {
991 PROC_LOCK(p);
992 if (p->p_ptevents & PTRACE_EXEC)
993 td->td_dbgflags |= TDB_EXEC;
994 PROC_UNLOCK(p);
995 }
996 } else {
997 exec_fail:
998 /* we're done here, clear P_INEXEC */
999 PROC_LOCK(p);
1000 p->p_flag &= ~P_INEXEC;
1001 PROC_UNLOCK(p);
1002
1003 SDT_PROBE1(proc, , , exec__failure, error);
1004 }
1005
1006 if (imgp->newcred != NULL && oldcred != NULL)
1007 crfree(imgp->newcred);
1008
1009 #ifdef MAC
1010 mac_execve_exit(imgp);
1011 mac_execve_interpreter_exit(interpvplabel);
1012 #endif
1013 exec_free_args(args);
1014
1015 /*
1016 * Handle deferred decrement of ref counts.
1017 */
1018 if (oldtextvp != NULL)
1019 vrele(oldtextvp);
1020 if (oldtextdvp != NULL)
1021 vrele(oldtextdvp);
1022 free(oldbinname, M_PARGS);
1023 #ifdef KTRACE
1024 ktr_io_params_free(kiop);
1025 #endif
1026 pargs_drop(oldargs);
1027 pargs_drop(newargs);
1028 if (oldsigacts != NULL)
1029 sigacts_free(oldsigacts);
1030 if (euip != NULL)
1031 uifree(euip);
1032
1033 if (error && imgp->vmspace_destroyed) {
1034 /* sorry, no more process anymore. exit gracefully */
1035 exec_cleanup(td, oldvmspace);
1036 exit1(td, 0, SIGABRT);
1037 /* NOT REACHED */
1038 }
1039
1040 #ifdef KTRACE
1041 if (error == 0)
1042 ktrprocctor(p);
1043 #endif
1044
1045 /*
1046 * We don't want cpu_set_syscall_retval() to overwrite any of
1047 * the register values put in place by exec_setregs().
1048 * Implementations of cpu_set_syscall_retval() will leave
1049 * registers unmodified when returning EJUSTRETURN.
1050 */
1051 return (error == 0 ? EJUSTRETURN : error);
1052 }
1053
1054 void
1055 exec_cleanup(struct thread *td, struct vmspace *oldvmspace)
1056 {
1057 if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
1058 KASSERT(td->td_proc->p_vmspace != oldvmspace,
1059 ("oldvmspace still used"));
1060 vmspace_free(oldvmspace);
1061 td->td_pflags &= ~TDP_EXECVMSPC;
1062 }
1063 }
1064
1065 int
1066 exec_map_first_page(struct image_params *imgp)
1067 {
1068 vm_object_t object;
1069 vm_page_t m;
1070 int error;
1071
1072 if (imgp->firstpage != NULL)
1073 exec_unmap_first_page(imgp);
1074
1075 object = imgp->vp->v_object;
1076 if (object == NULL)
1077 return (EACCES);
1078 #if VM_NRESERVLEVEL > 0
1079 if ((object->flags & OBJ_COLORED) == 0) {
1080 VM_OBJECT_WLOCK(object);
1081 vm_object_color(object, 0);
1082 VM_OBJECT_WUNLOCK(object);
1083 }
1084 #endif
1085 error = vm_page_grab_valid_unlocked(&m, object, 0,
1086 VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) |
1087 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
1088
1089 if (error != VM_PAGER_OK)
1090 return (EIO);
1091 imgp->firstpage = sf_buf_alloc(m, 0);
1092 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
1093
1094 return (0);
1095 }
1096
1097 void
1098 exec_unmap_first_page(struct image_params *imgp)
1099 {
1100 vm_page_t m;
1101
1102 if (imgp->firstpage != NULL) {
1103 m = sf_buf_page(imgp->firstpage);
1104 sf_buf_free(imgp->firstpage);
1105 imgp->firstpage = NULL;
1106 vm_page_unwire(m, PQ_ACTIVE);
1107 }
1108 }
1109
1110 void
1111 exec_onexec_old(struct thread *td)
1112 {
1113 sigfastblock_clear(td);
1114 umtx_exec(td->td_proc);
1115 }
1116
1117 /*
1118 * This is an optimization which removes the unmanaged shared page
1119 * mapping. In combination with pmap_remove_pages(), which cleans all
1120 * managed mappings in the process' vmspace pmap, no work will be left
1121 * for pmap_remove(min, max).
1122 */
1123 void
1124 exec_free_abi_mappings(struct proc *p)
1125 {
1126 struct vmspace *vmspace;
1127 struct sysentvec *sv;
1128
1129 vmspace = p->p_vmspace;
1130 if (refcount_load(&vmspace->vm_refcnt) != 1)
1131 return;
1132
1133 sv = p->p_sysent;
1134 if (sv->sv_shared_page_obj == NULL)
1135 return;
1136
1137 pmap_remove(vmspace_pmap(vmspace), sv->sv_shared_page_base,
1138 sv->sv_shared_page_base + sv->sv_shared_page_len);
1139 }
1140
1141 /*
1142 * Run down the current address space and install a new one. Map the shared
1143 * page.
1144 */
1145 int
1146 exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
1147 {
1148 int error;
1149 struct proc *p = imgp->proc;
1150 struct vmspace *vmspace = p->p_vmspace;
1151 struct thread *td = curthread;
1152 vm_object_t obj;
1153 vm_offset_t sv_minuser;
1154 vm_map_t map;
1155
1156 imgp->vmspace_destroyed = true;
1157 imgp->sysent = sv;
1158
1159 if (p->p_sysent->sv_onexec_old != NULL)
1160 p->p_sysent->sv_onexec_old(td);
1161 itimers_exec(p);
1162
1163 EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);
1164
1165 /*
1166 * Blow away entire process VM, if address space not shared,
1167 * otherwise, create a new VM space so that other threads are
1168 * not disrupted
1169 */
1170 map = &vmspace->vm_map;
1171 if (map_at_zero)
1172 sv_minuser = sv->sv_minuser;
1173 else
1174 sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
1175 if (refcount_load(&vmspace->vm_refcnt) == 1 &&
1176 vm_map_min(map) == sv_minuser &&
1177 vm_map_max(map) == sv->sv_maxuser &&
1178 cpu_exec_vmspace_reuse(p, map)) {
1179 exec_free_abi_mappings(p);
1180 shmexit(vmspace);
1181 pmap_remove_pages(vmspace_pmap(vmspace));
1182 vm_map_remove(map, vm_map_min(map), vm_map_max(map));
1183 /*
1184 * An exec terminates mlockall(MCL_FUTURE).
1185 * ASLR and W^X states must be re-evaluated.
1186 */
1187 vm_map_lock(map);
1188 vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
1189 MAP_ASLR_IGNSTART | MAP_ASLR_STACK | MAP_WXORX);
1190 vm_map_unlock(map);
1191 } else {
1192 error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
1193 if (error)
1194 return (error);
1195 vmspace = p->p_vmspace;
1196 map = &vmspace->vm_map;
1197 }
1198 map->flags |= imgp->map_flags;
1199
1200 /* Map a shared page */
1201 obj = sv->sv_shared_page_obj;
1202 if (obj != NULL) {
1203 vm_object_reference(obj);
1204 error = vm_map_fixed(map, obj, 0,
1205 sv->sv_shared_page_base, sv->sv_shared_page_len,
1206 VM_PROT_READ | VM_PROT_EXECUTE,
1207 VM_PROT_READ | VM_PROT_EXECUTE,
1208 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1209 if (error != KERN_SUCCESS) {
1210 vm_object_deallocate(obj);
1211 return (vm_mmap_to_errno(error));
1212 }
1213 }
1214
1215 return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0);
1216 }
1217
1218 /*
1219 * Compute the stack size limit and map the main process stack.
1220 */
1221 int
1222 exec_map_stack(struct image_params *imgp)
1223 {
1224 struct rlimit rlim_stack;
1225 struct sysentvec *sv;
1226 struct proc *p;
1227 vm_map_t map;
1228 struct vmspace *vmspace;
1229 vm_offset_t stack_addr, stack_top;
1230 u_long ssiz;
1231 int error, find_space, stack_off;
1232 vm_prot_t stack_prot;
1233
1234 p = imgp->proc;
1235 sv = p->p_sysent;
1236
1237 if (imgp->stack_sz != 0) {
1238 ssiz = trunc_page(imgp->stack_sz);
1239 PROC_LOCK(p);
1240 lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
1241 PROC_UNLOCK(p);
1242 if (ssiz > rlim_stack.rlim_max)
1243 ssiz = rlim_stack.rlim_max;
1244 if (ssiz > rlim_stack.rlim_cur) {
1245 rlim_stack.rlim_cur = ssiz;
1246 kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
1247 }
1248 } else if (sv->sv_maxssiz != NULL) {
1249 ssiz = *sv->sv_maxssiz;
1250 } else {
1251 ssiz = maxssiz;
1252 }
1253
1254 vmspace = p->p_vmspace;
1255 map = &vmspace->vm_map;
1256
1257 stack_prot = sv->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ?
1258 imgp->stack_prot : sv->sv_stackprot;
1259 if ((map->flags & MAP_ASLR_STACK) != 0) {
1260 stack_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1261 lim_max(curthread, RLIMIT_DATA));
1262 find_space = VMFS_ANY_SPACE;
1263 } else {
1264 stack_addr = sv->sv_usrstack - ssiz;
1265 find_space = VMFS_NO_SPACE;
1266 }
1267 error = vm_map_find(map, NULL, 0, &stack_addr, (vm_size_t)ssiz,
1268 sv->sv_usrstack, find_space, stack_prot, VM_PROT_ALL,
1269 MAP_STACK_GROWS_DOWN);
1270 if (error != KERN_SUCCESS) {
1271 uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x "
1272 "failed, mach error %d errno %d\n", (uintmax_t)ssiz,
1273 stack_prot, error, vm_mmap_to_errno(error));
1274 return (vm_mmap_to_errno(error));
1275 }
1276
1277 stack_top = stack_addr + ssiz;
1278 if ((map->flags & MAP_ASLR_STACK) != 0) {
1279 /* Randomize within the first page of the stack. */
1280 arc4rand(&stack_off, sizeof(stack_off), 0);
1281 stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *));
1282 }
1283
1284 /*
1285 * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
1286 * are still used to enforce the stack rlimit on the process stack.
1287 */
1288 vmspace->vm_maxsaddr = (char *)stack_addr;
1289 vmspace->vm_stacktop = stack_top;
1290 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
1291
1292 return (0);
1293 }
1294
1295 /*
1296 * Copy out argument and environment strings from the old process address
1297 * space into the temporary string buffer.
1298 */
1299 int
1300 exec_copyin_args(struct image_args *args, const char *fname,
1301 enum uio_seg segflg, char **argv, char **envv)
1302 {
1303 u_long arg, env;
1304 int error;
1305
1306 bzero(args, sizeof(*args));
1307 if (argv == NULL)
1308 return (EFAULT);
1309
1310 /*
1311 * Allocate demand-paged memory for the file name, argument, and
1312 * environment strings.
1313 */
1314 error = exec_alloc_args(args);
1315 if (error != 0)
1316 return (error);
1317
1318 /*
1319 * Copy the file name.
1320 */
1321 error = exec_args_add_fname(args, fname, segflg);
1322 if (error != 0)
1323 goto err_exit;
1324
1325 /*
1326 * extract arguments first
1327 */
1328 for (;;) {
1329 error = fueword(argv++, &arg);
1330 if (error == -1) {
1331 error = EFAULT;
1332 goto err_exit;
1333 }
1334 if (arg == 0)
1335 break;
1336 error = exec_args_add_arg(args, (char *)(uintptr_t)arg,
1337 UIO_USERSPACE);
1338 if (error != 0)
1339 goto err_exit;
1340 }
1341
1342 /*
1343 * extract environment strings
1344 */
1345 if (envv) {
1346 for (;;) {
1347 error = fueword(envv++, &env);
1348 if (error == -1) {
1349 error = EFAULT;
1350 goto err_exit;
1351 }
1352 if (env == 0)
1353 break;
1354 error = exec_args_add_env(args,
1355 (char *)(uintptr_t)env, UIO_USERSPACE);
1356 if (error != 0)
1357 goto err_exit;
1358 }
1359 }
1360
1361 return (0);
1362
1363 err_exit:
1364 exec_free_args(args);
1365 return (error);
1366 }
1367
1368 int
1369 exec_copyin_data_fds(struct thread *td, struct image_args *args,
1370 const void *data, size_t datalen, const int *fds, size_t fdslen)
1371 {
1372 struct filedesc *ofdp;
1373 const char *p;
1374 int *kfds;
1375 int error;
1376
1377 memset(args, '\0', sizeof(*args));
1378 ofdp = td->td_proc->p_fd;
1379 if (datalen >= ARG_MAX || fdslen >= ofdp->fd_nfiles)
1380 return (E2BIG);
1381 error = exec_alloc_args(args);
1382 if (error != 0)
1383 return (error);
1384
1385 args->begin_argv = args->buf;
1386 args->stringspace = ARG_MAX;
1387
1388 if (datalen > 0) {
1389 /*
1390 * Argument buffer has been provided. Copy it into the
1391 * kernel as a single string and add a terminating null
1392 * byte.
1393 */
1394 error = copyin(data, args->begin_argv, datalen);
1395 if (error != 0)
1396 goto err_exit;
1397 args->begin_argv[datalen] = '\0';
1398 args->endp = args->begin_argv + datalen + 1;
1399 args->stringspace -= datalen + 1;
1400
1401 /*
1402 * Traditional argument counting. Count the number of
1403 * null bytes.
1404 */
1405 for (p = args->begin_argv; p < args->endp; ++p)
1406 if (*p == '\0')
1407 ++args->argc;
1408 } else {
1409 /* No argument buffer provided. */
1410 args->endp = args->begin_argv;
1411 }
1412
1413 /* Create new file descriptor table. */
1414 kfds = malloc(fdslen * sizeof(int), M_TEMP, M_WAITOK);
1415 error = copyin(fds, kfds, fdslen * sizeof(int));
1416 if (error != 0) {
1417 free(kfds, M_TEMP);
1418 goto err_exit;
1419 }
1420 error = fdcopy_remapped(ofdp, kfds, fdslen, &args->fdp);
1421 free(kfds, M_TEMP);
1422 if (error != 0)
1423 goto err_exit;
1424
1425 return (0);
1426 err_exit:
1427 exec_free_args(args);
1428 return (error);
1429 }
1430
1431 struct exec_args_kva {
1432 vm_offset_t addr;
1433 u_int gen;
1434 SLIST_ENTRY(exec_args_kva) next;
1435 };
1436
1437 DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva);
1438
1439 static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist;
1440 static struct mtx exec_args_kva_mtx;
1441 static u_int exec_args_gen;
1442
1443 static void
1444 exec_prealloc_args_kva(void *arg __unused)
1445 {
1446 struct exec_args_kva *argkva;
1447 u_int i;
1448
1449 SLIST_INIT(&exec_args_kva_freelist);
1450 mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF);
1451 for (i = 0; i < exec_map_entries; i++) {
1452 argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK);
1453 argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size);
1454 argkva->gen = exec_args_gen;
1455 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1456 }
1457 }
1458 SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL);
1459
1460 static vm_offset_t
1461 exec_alloc_args_kva(void **cookie)
1462 {
1463 struct exec_args_kva *argkva;
1464
1465 argkva = (void *)atomic_readandclear_ptr(
1466 (uintptr_t *)DPCPU_PTR(exec_args_kva));
1467 if (argkva == NULL) {
1468 mtx_lock(&exec_args_kva_mtx);
1469 while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL)
1470 (void)mtx_sleep(&exec_args_kva_freelist,
1471 &exec_args_kva_mtx, 0, "execkva", 0);
1472 SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next);
1473 mtx_unlock(&exec_args_kva_mtx);
1474 }
1475 kasan_mark((void *)argkva->addr, exec_map_entry_size,
1476 exec_map_entry_size, 0);
1477 *(struct exec_args_kva **)cookie = argkva;
1478 return (argkva->addr);
1479 }
1480
1481 static void
1482 exec_release_args_kva(struct exec_args_kva *argkva, u_int gen)
1483 {
1484 vm_offset_t base;
1485
1486 base = argkva->addr;
1487 kasan_mark((void *)argkva->addr, 0, exec_map_entry_size,
1488 KASAN_EXEC_ARGS_FREED);
1489 if (argkva->gen != gen) {
1490 (void)vm_map_madvise(exec_map, base, base + exec_map_entry_size,
1491 MADV_FREE);
1492 argkva->gen = gen;
1493 }
1494 if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva),
1495 (uintptr_t)NULL, (uintptr_t)argkva)) {
1496 mtx_lock(&exec_args_kva_mtx);
1497 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1498 wakeup_one(&exec_args_kva_freelist);
1499 mtx_unlock(&exec_args_kva_mtx);
1500 }
1501 }
1502
1503 static void
1504 exec_free_args_kva(void *cookie)
1505 {
1506
1507 exec_release_args_kva(cookie, exec_args_gen);
1508 }
1509
1510 static void
1511 exec_args_kva_lowmem(void *arg __unused)
1512 {
1513 SLIST_HEAD(, exec_args_kva) head;
1514 struct exec_args_kva *argkva;
1515 u_int gen;
1516 int i;
1517
1518 gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1;
1519
1520 /*
1521 * Force an madvise of each KVA range. Any currently allocated ranges
1522 * will have MADV_FREE applied once they are freed.
1523 */
1524 SLIST_INIT(&head);
1525 mtx_lock(&exec_args_kva_mtx);
1526 SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva);
1527 mtx_unlock(&exec_args_kva_mtx);
1528 while ((argkva = SLIST_FIRST(&head)) != NULL) {
1529 SLIST_REMOVE_HEAD(&head, next);
1530 exec_release_args_kva(argkva, gen);
1531 }
1532
1533 CPU_FOREACH(i) {
1534 argkva = (void *)atomic_readandclear_ptr(
1535 (uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva));
1536 if (argkva != NULL)
1537 exec_release_args_kva(argkva, gen);
1538 }
1539 }
1540 EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL,
1541 EVENTHANDLER_PRI_ANY);
1542
1543 /*
1544 * Allocate temporary demand-paged, zero-filled memory for the file name,
1545 * argument, and environment strings.
1546 */
1547 int
1548 exec_alloc_args(struct image_args *args)
1549 {
1550
1551 args->buf = (char *)exec_alloc_args_kva(&args->bufkva);
1552 return (0);
1553 }
1554
1555 void
1556 exec_free_args(struct image_args *args)
1557 {
1558
1559 if (args->buf != NULL) {
1560 exec_free_args_kva(args->bufkva);
1561 args->buf = NULL;
1562 }
1563 if (args->fname_buf != NULL) {
1564 free(args->fname_buf, M_TEMP);
1565 args->fname_buf = NULL;
1566 }
1567 if (args->fdp != NULL)
1568 fdescfree_remapped(args->fdp);
1569 }
1570
1571 /*
1572 * A set to functions to fill struct image args.
1573 *
1574 * NOTE: exec_args_add_fname() must be called (possibly with a NULL
1575 * fname) before the other functions. All exec_args_add_arg() calls must
1576 * be made before any exec_args_add_env() calls. exec_args_adjust_args()
1577 * may be called any time after exec_args_add_fname().
1578 *
1579 * exec_args_add_fname() - install path to be executed
1580 * exec_args_add_arg() - append an argument string
1581 * exec_args_add_env() - append an env string
1582 * exec_args_adjust_args() - adjust location of the argument list to
1583 * allow new arguments to be prepended
1584 */
1585 int
1586 exec_args_add_fname(struct image_args *args, const char *fname,
1587 enum uio_seg segflg)
1588 {
1589 int error;
1590 size_t length;
1591
1592 KASSERT(args->fname == NULL, ("fname already appended"));
1593 KASSERT(args->endp == NULL, ("already appending to args"));
1594
1595 if (fname != NULL) {
1596 args->fname = args->buf;
1597 error = segflg == UIO_SYSSPACE ?
1598 copystr(fname, args->fname, PATH_MAX, &length) :
1599 copyinstr(fname, args->fname, PATH_MAX, &length);
1600 if (error != 0)
1601 return (error == ENAMETOOLONG ? E2BIG : error);
1602 } else
1603 length = 0;
1604
1605 /* Set up for _arg_*()/_env_*() */
1606 args->endp = args->buf + length;
1607 /* begin_argv must be set and kept updated */
1608 args->begin_argv = args->endp;
1609 KASSERT(exec_map_entry_size - length >= ARG_MAX,
1610 ("too little space remaining for arguments %zu < %zu",
1611 exec_map_entry_size - length, (size_t)ARG_MAX));
1612 args->stringspace = ARG_MAX;
1613
1614 return (0);
1615 }
1616
1617 static int
1618 exec_args_add_str(struct image_args *args, const char *str,
1619 enum uio_seg segflg, int *countp)
1620 {
1621 int error;
1622 size_t length;
1623
1624 KASSERT(args->endp != NULL, ("endp not initialized"));
1625 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1626
1627 error = (segflg == UIO_SYSSPACE) ?
1628 copystr(str, args->endp, args->stringspace, &length) :
1629 copyinstr(str, args->endp, args->stringspace, &length);
1630 if (error != 0)
1631 return (error == ENAMETOOLONG ? E2BIG : error);
1632 args->stringspace -= length;
1633 args->endp += length;
1634 (*countp)++;
1635
1636 return (0);
1637 }
1638
1639 int
1640 exec_args_add_arg(struct image_args *args, const char *argp,
1641 enum uio_seg segflg)
1642 {
1643
1644 KASSERT(args->envc == 0, ("appending args after env"));
1645
1646 return (exec_args_add_str(args, argp, segflg, &args->argc));
1647 }
1648
1649 int
1650 exec_args_add_env(struct image_args *args, const char *envp,
1651 enum uio_seg segflg)
1652 {
1653
1654 if (args->envc == 0)
1655 args->begin_envv = args->endp;
1656
1657 return (exec_args_add_str(args, envp, segflg, &args->envc));
1658 }
1659
1660 int
1661 exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend)
1662 {
1663 ssize_t offset;
1664
1665 KASSERT(args->endp != NULL, ("endp not initialized"));
1666 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1667
1668 offset = extend - consume;
1669 if (args->stringspace < offset)
1670 return (E2BIG);
1671 memmove(args->begin_argv + extend, args->begin_argv + consume,
1672 args->endp - args->begin_argv + consume);
1673 if (args->envc > 0)
1674 args->begin_envv += offset;
1675 args->endp += offset;
1676 args->stringspace -= offset;
1677 return (0);
1678 }
1679
1680 char *
1681 exec_args_get_begin_envv(struct image_args *args)
1682 {
1683
1684 KASSERT(args->endp != NULL, ("endp not initialized"));
1685
1686 if (args->envc > 0)
1687 return (args->begin_envv);
1688 return (args->endp);
1689 }
1690
1691 /*
1692 * Copy strings out to the new process address space, constructing new arg
1693 * and env vector tables. Return a pointer to the base so that it can be used
1694 * as the initial stack pointer.
1695 */
1696 int
1697 exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base)
1698 {
1699 int argc, envc;
1700 char **vectp;
1701 char *stringp;
1702 uintptr_t destp, ustringp;
1703 struct ps_strings *arginfo;
1704 struct proc *p;
1705 struct sysentvec *sysent;
1706 size_t execpath_len;
1707 int error, szsigcode;
1708 char canary[sizeof(long) * 8];
1709
1710 p = imgp->proc;
1711 sysent = p->p_sysent;
1712
1713 destp = PROC_PS_STRINGS(p);
1714 arginfo = imgp->ps_strings = (void *)destp;
1715
1716 /*
1717 * Install sigcode.
1718 */
1719 if (sysent->sv_sigcode_base == 0 && sysent->sv_szsigcode != NULL) {
1720 szsigcode = *(sysent->sv_szsigcode);
1721 destp -= szsigcode;
1722 destp = rounddown2(destp, sizeof(void *));
1723 error = copyout(sysent->sv_sigcode, (void *)destp, szsigcode);
1724 if (error != 0)
1725 return (error);
1726 }
1727
1728 /*
1729 * Copy the image path for the rtld.
1730 */
1731 if (imgp->execpath != NULL && imgp->auxargs != NULL) {
1732 execpath_len = strlen(imgp->execpath) + 1;
1733 destp -= execpath_len;
1734 destp = rounddown2(destp, sizeof(void *));
1735 imgp->execpathp = (void *)destp;
1736 error = copyout(imgp->execpath, imgp->execpathp, execpath_len);
1737 if (error != 0)
1738 return (error);
1739 }
1740
1741 /*
1742 * Prepare the canary for SSP.
1743 */
1744 arc4rand(canary, sizeof(canary), 0);
1745 destp -= sizeof(canary);
1746 imgp->canary = (void *)destp;
1747 error = copyout(canary, imgp->canary, sizeof(canary));
1748 if (error != 0)
1749 return (error);
1750 imgp->canarylen = sizeof(canary);
1751
1752 /*
1753 * Prepare the pagesizes array.
1754 */
1755 imgp->pagesizeslen = sizeof(pagesizes[0]) * MAXPAGESIZES;
1756 destp -= imgp->pagesizeslen;
1757 destp = rounddown2(destp, sizeof(void *));
1758 imgp->pagesizes = (void *)destp;
1759 error = copyout(pagesizes, imgp->pagesizes, imgp->pagesizeslen);
1760 if (error != 0)
1761 return (error);
1762
1763 /*
1764 * Allocate room for the argument and environment strings.
1765 */
1766 destp -= ARG_MAX - imgp->args->stringspace;
1767 destp = rounddown2(destp, sizeof(void *));
1768 ustringp = destp;
1769
1770 if (imgp->auxargs) {
1771 /*
1772 * Allocate room on the stack for the ELF auxargs
1773 * array. It has up to AT_COUNT entries.
1774 */
1775 destp -= AT_COUNT * sizeof(Elf_Auxinfo);
1776 destp = rounddown2(destp, sizeof(void *));
1777 }
1778
1779 vectp = (char **)destp;
1780
1781 /*
1782 * Allocate room for the argv[] and env vectors including the
1783 * terminating NULL pointers.
1784 */
1785 vectp -= imgp->args->argc + 1 + imgp->args->envc + 1;
1786
1787 /*
1788 * vectp also becomes our initial stack base
1789 */
1790 *stack_base = (uintptr_t)vectp;
1791
1792 stringp = imgp->args->begin_argv;
1793 argc = imgp->args->argc;
1794 envc = imgp->args->envc;
1795
1796 /*
1797 * Copy out strings - arguments and environment.
1798 */
1799 error = copyout(stringp, (void *)ustringp,
1800 ARG_MAX - imgp->args->stringspace);
1801 if (error != 0)
1802 return (error);
1803
1804 /*
1805 * Fill in "ps_strings" struct for ps, w, etc.
1806 */
1807 imgp->argv = vectp;
1808 if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 ||
1809 suword32(&arginfo->ps_nargvstr, argc) != 0)
1810 return (EFAULT);
1811
1812 /*
1813 * Fill in argument portion of vector table.
1814 */
1815 for (; argc > 0; --argc) {
1816 if (suword(vectp++, ustringp) != 0)
1817 return (EFAULT);
1818 while (*stringp++ != 0)
1819 ustringp++;
1820 ustringp++;
1821 }
1822
1823 /* a null vector table pointer separates the argp's from the envp's */
1824 if (suword(vectp++, 0) != 0)
1825 return (EFAULT);
1826
1827 imgp->envv = vectp;
1828 if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 ||
1829 suword32(&arginfo->ps_nenvstr, envc) != 0)
1830 return (EFAULT);
1831
1832 /*
1833 * Fill in environment portion of vector table.
1834 */
1835 for (; envc > 0; --envc) {
1836 if (suword(vectp++, ustringp) != 0)
1837 return (EFAULT);
1838 while (*stringp++ != 0)
1839 ustringp++;
1840 ustringp++;
1841 }
1842
1843 /* end of vector table is a null pointer */
1844 if (suword(vectp, 0) != 0)
1845 return (EFAULT);
1846
1847 if (imgp->auxargs) {
1848 vectp++;
1849 error = imgp->sysent->sv_copyout_auxargs(imgp,
1850 (uintptr_t)vectp);
1851 if (error != 0)
1852 return (error);
1853 }
1854
1855 return (0);
1856 }
1857
1858 /*
1859 * Check permissions of file to execute.
1860 * Called with imgp->vp locked.
1861 * Return 0 for success or error code on failure.
1862 */
1863 int
1864 exec_check_permissions(struct image_params *imgp)
1865 {
1866 struct vnode *vp = imgp->vp;
1867 struct vattr *attr = imgp->attr;
1868 struct thread *td;
1869 int error;
1870
1871 td = curthread;
1872
1873 /* Get file attributes */
1874 error = VOP_GETATTR(vp, attr, td->td_ucred);
1875 if (error)
1876 return (error);
1877
1878 #ifdef MAC
1879 error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
1880 if (error)
1881 return (error);
1882 #endif
1883
1884 /*
1885 * 1) Check if file execution is disabled for the filesystem that
1886 * this file resides on.
1887 * 2) Ensure that at least one execute bit is on. Otherwise, a
1888 * privileged user will always succeed, and we don't want this
1889 * to happen unless the file really is executable.
1890 * 3) Ensure that the file is a regular file.
1891 */
1892 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1893 (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
1894 (attr->va_type != VREG))
1895 return (EACCES);
1896
1897 /*
1898 * Zero length files can't be exec'd
1899 */
1900 if (attr->va_size == 0)
1901 return (ENOEXEC);
1902
1903 /*
1904 * Check for execute permission to file based on current credentials.
1905 */
1906 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1907 if (error)
1908 return (error);
1909
1910 /*
1911 * Check number of open-for-writes on the file and deny execution
1912 * if there are any.
1913 *
1914 * Add a text reference now so no one can write to the
1915 * executable while we're activating it.
1916 *
1917 * Remember if this was set before and unset it in case this is not
1918 * actually an executable image.
1919 */
1920 error = VOP_SET_TEXT(vp);
1921 if (error != 0)
1922 return (error);
1923 imgp->textset = true;
1924
1925 /*
1926 * Call filesystem specific open routine (which does nothing in the
1927 * general case).
1928 */
1929 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
1930 if (error == 0)
1931 imgp->opened = true;
1932 return (error);
1933 }
1934
1935 /*
1936 * Exec handler registration
1937 */
1938 int
1939 exec_register(const struct execsw *execsw_arg)
1940 {
1941 const struct execsw **es, **xs, **newexecsw;
1942 u_int count = 2; /* New slot and trailing NULL */
1943
1944 if (execsw)
1945 for (es = execsw; *es; es++)
1946 count++;
1947 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1948 xs = newexecsw;
1949 if (execsw)
1950 for (es = execsw; *es; es++)
1951 *xs++ = *es;
1952 *xs++ = execsw_arg;
1953 *xs = NULL;
1954 if (execsw)
1955 free(execsw, M_TEMP);
1956 execsw = newexecsw;
1957 return (0);
1958 }
1959
1960 int
1961 exec_unregister(const struct execsw *execsw_arg)
1962 {
1963 const struct execsw **es, **xs, **newexecsw;
1964 int count = 1;
1965
1966 if (execsw == NULL)
1967 panic("unregister with no handlers left?\n");
1968
1969 for (es = execsw; *es; es++) {
1970 if (*es == execsw_arg)
1971 break;
1972 }
1973 if (*es == NULL)
1974 return (ENOENT);
1975 for (es = execsw; *es; es++)
1976 if (*es != execsw_arg)
1977 count++;
1978 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1979 xs = newexecsw;
1980 for (es = execsw; *es; es++)
1981 if (*es != execsw_arg)
1982 *xs++ = *es;
1983 *xs = NULL;
1984 if (execsw)
1985 free(execsw, M_TEMP);
1986 execsw = newexecsw;
1987 return (0);
1988 }
1989
1990 /*
1991 * Write out a core segment to the compression stream.
1992 */
1993 static int
1994 compress_chunk(struct coredump_params *cp, char *base, char *buf, size_t len)
1995 {
1996 size_t chunk_len;
1997 int error;
1998
1999 while (len > 0) {
2000 chunk_len = MIN(len, CORE_BUF_SIZE);
2001
2002 /*
2003 * We can get EFAULT error here.
2004 * In that case zero out the current chunk of the segment.
2005 */
2006 error = copyin(base, buf, chunk_len);
2007 if (error != 0)
2008 bzero(buf, chunk_len);
2009 error = compressor_write(cp->comp, buf, chunk_len);
2010 if (error != 0)
2011 break;
2012 base += chunk_len;
2013 len -= chunk_len;
2014 }
2015 return (error);
2016 }
2017
2018 int
2019 core_write(struct coredump_params *cp, const void *base, size_t len,
2020 off_t offset, enum uio_seg seg, size_t *resid)
2021 {
2022
2023 return (vn_rdwr_inchunks(UIO_WRITE, cp->vp, __DECONST(void *, base),
2024 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
2025 cp->active_cred, cp->file_cred, resid, cp->td));
2026 }
2027
2028 int
2029 core_output(char *base, size_t len, off_t offset, struct coredump_params *cp,
2030 void *tmpbuf)
2031 {
2032 vm_map_t map;
2033 struct mount *mp;
2034 size_t resid, runlen;
2035 int error;
2036 bool success;
2037
2038 KASSERT((uintptr_t)base % PAGE_SIZE == 0,
2039 ("%s: user address %p is not page-aligned", __func__, base));
2040
2041 if (cp->comp != NULL)
2042 return (compress_chunk(cp, base, tmpbuf, len));
2043
2044 map = &cp->td->td_proc->p_vmspace->vm_map;
2045 for (; len > 0; base += runlen, offset += runlen, len -= runlen) {
2046 /*
2047 * Attempt to page in all virtual pages in the range. If a
2048 * virtual page is not backed by the pager, it is represented as
2049 * a hole in the file. This can occur with zero-filled
2050 * anonymous memory or truncated files, for example.
2051 */
2052 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) {
2053 if (core_dump_can_intr && curproc_sigkilled())
2054 return (EINTR);
2055 error = vm_fault(map, (uintptr_t)base + runlen,
2056 VM_PROT_READ, VM_FAULT_NOFILL, NULL);
2057 if (runlen == 0)
2058 success = error == KERN_SUCCESS;
2059 else if ((error == KERN_SUCCESS) != success)
2060 break;
2061 }
2062
2063 if (success) {
2064 error = core_write(cp, base, runlen, offset,
2065 UIO_USERSPACE, &resid);
2066 if (error != 0) {
2067 if (error != EFAULT)
2068 break;
2069
2070 /*
2071 * EFAULT may be returned if the user mapping
2072 * could not be accessed, e.g., because a mapped
2073 * file has been truncated. Skip the page if no
2074 * progress was made, to protect against a
2075 * hypothetical scenario where vm_fault() was
2076 * successful but core_write() returns EFAULT
2077 * anyway.
2078 */
2079 runlen -= resid;
2080 if (runlen == 0) {
2081 success = false;
2082 runlen = PAGE_SIZE;
2083 }
2084 }
2085 }
2086 if (!success) {
2087 error = vn_start_write(cp->vp, &mp, V_WAIT);
2088 if (error != 0)
2089 break;
2090 vn_lock(cp->vp, LK_EXCLUSIVE | LK_RETRY);
2091 error = vn_truncate_locked(cp->vp, offset + runlen,
2092 false, cp->td->td_ucred);
2093 VOP_UNLOCK(cp->vp);
2094 vn_finished_write(mp);
2095 if (error != 0)
2096 break;
2097 }
2098 }
2099 return (error);
2100 }
2101
2102 /*
2103 * Drain into a core file.
2104 */
2105 int
2106 sbuf_drain_core_output(void *arg, const char *data, int len)
2107 {
2108 struct coredump_params *cp;
2109 struct proc *p;
2110 int error, locked;
2111
2112 cp = arg;
2113 p = cp->td->td_proc;
2114
2115 /*
2116 * Some kern_proc out routines that print to this sbuf may
2117 * call us with the process lock held. Draining with the
2118 * non-sleepable lock held is unsafe. The lock is needed for
2119 * those routines when dumping a live process. In our case we
2120 * can safely release the lock before draining and acquire
2121 * again after.
2122 */
2123 locked = PROC_LOCKED(p);
2124 if (locked)
2125 PROC_UNLOCK(p);
2126 if (cp->comp != NULL)
2127 error = compressor_write(cp->comp, __DECONST(char *, data), len);
2128 else
2129 error = core_write(cp, __DECONST(void *, data), len, cp->offset,
2130 UIO_SYSSPACE, NULL);
2131 if (locked)
2132 PROC_LOCK(p);
2133 if (error != 0)
2134 return (-error);
2135 cp->offset += len;
2136 return (len);
2137 }
Cache object: 07b7b7e85f41bc62dbe58f75fffab33e
|