FreeBSD/Linux Kernel Cross Reference
sys/kern/exec_subr.c
1 /* $NetBSD: exec_subr.c,v 1.50 2006/10/05 14:48:32 chs Exp $ */
2
3 /*
4 * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Christopher G. Demetriou.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.50 2006/10/05 14:48:32 chs Exp $");
35
36 #include "opt_pax.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/malloc.h>
42 #include <sys/vnode.h>
43 #include <sys/filedesc.h>
44 #include <sys/exec.h>
45 #include <sys/mman.h>
46 #include <sys/resourcevar.h>
47 #include <sys/device.h>
48
49 #ifdef PAX_MPROTECT
50 #include <sys/pax.h>
51 #endif /* PAX_MPROTECT */
52
53 #include <uvm/uvm.h>
54
55 #define VMCMD_EVCNT_DECL(name) \
56 static struct evcnt vmcmd_ev_##name = \
57 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name); \
58 EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
59
60 #define VMCMD_EVCNT_INCR(name) \
61 vmcmd_ev_##name.ev_count++
62
63 VMCMD_EVCNT_DECL(calls);
64 VMCMD_EVCNT_DECL(extends);
65 VMCMD_EVCNT_DECL(kills);
66
67 /*
68 * new_vmcmd():
69 * create a new vmcmd structure and fill in its fields based
70 * on function call arguments. make sure objects ref'd by
71 * the vmcmd are 'held'.
72 */
73
74 void
75 new_vmcmd(struct exec_vmcmd_set *evsp,
76 int (*proc)(struct lwp * l, struct exec_vmcmd *),
77 u_long len, u_long addr, struct vnode *vp, u_long offset,
78 u_int prot, int flags)
79 {
80 struct exec_vmcmd *vcp;
81
82 VMCMD_EVCNT_INCR(calls);
83
84 if (evsp->evs_used >= evsp->evs_cnt)
85 vmcmdset_extend(evsp);
86 vcp = &evsp->evs_cmds[evsp->evs_used++];
87 vcp->ev_proc = proc;
88 vcp->ev_len = len;
89 vcp->ev_addr = addr;
90 if ((vcp->ev_vp = vp) != NULL)
91 vref(vp);
92 vcp->ev_offset = offset;
93 vcp->ev_prot = prot;
94 vcp->ev_flags = flags;
95 }
96
97 void
98 vmcmdset_extend(struct exec_vmcmd_set *evsp)
99 {
100 struct exec_vmcmd *nvcp;
101 u_int ocnt;
102
103 #ifdef DIAGNOSTIC
104 if (evsp->evs_used < evsp->evs_cnt)
105 panic("vmcmdset_extend: not necessary");
106 #endif
107
108 /* figure out number of entries in new set */
109 if ((ocnt = evsp->evs_cnt) != 0) {
110 evsp->evs_cnt += ocnt;
111 VMCMD_EVCNT_INCR(extends);
112 } else
113 evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
114
115 /* allocate it */
116 nvcp = malloc(evsp->evs_cnt * sizeof(struct exec_vmcmd),
117 M_EXEC, M_WAITOK);
118
119 /* free the old struct, if there was one, and record the new one */
120 if (ocnt) {
121 memcpy(nvcp, evsp->evs_cmds,
122 (ocnt * sizeof(struct exec_vmcmd)));
123 free(evsp->evs_cmds, M_EXEC);
124 }
125 evsp->evs_cmds = nvcp;
126 }
127
128 void
129 kill_vmcmds(struct exec_vmcmd_set *evsp)
130 {
131 struct exec_vmcmd *vcp;
132 u_int i;
133
134 VMCMD_EVCNT_INCR(kills);
135
136 if (evsp->evs_cnt == 0)
137 return;
138
139 for (i = 0; i < evsp->evs_used; i++) {
140 vcp = &evsp->evs_cmds[i];
141 if (vcp->ev_vp != NULL)
142 vrele(vcp->ev_vp);
143 }
144 evsp->evs_used = evsp->evs_cnt = 0;
145 free(evsp->evs_cmds, M_EXEC);
146 }
147
148 /*
149 * vmcmd_map_pagedvn():
150 * handle vmcmd which specifies that a vnode should be mmap'd.
151 * appropriate for handling demand-paged text and data segments.
152 */
153
154 int
155 vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
156 {
157 struct uvm_object *uobj;
158 struct vnode *vp = cmd->ev_vp;
159 struct proc *p = l->l_proc;
160 int error;
161 vm_prot_t prot, maxprot;
162
163 KASSERT(vp->v_flag & VTEXT);
164
165 /*
166 * map the vnode in using uvm_map.
167 */
168
169 if (cmd->ev_len == 0)
170 return(0);
171 if (cmd->ev_offset & PAGE_MASK)
172 return(EINVAL);
173 if (cmd->ev_addr & PAGE_MASK)
174 return(EINVAL);
175 if (cmd->ev_len & PAGE_MASK)
176 return(EINVAL);
177
178 /*
179 * first, attach to the object
180 */
181
182 uobj = uvn_attach(vp, VM_PROT_READ|VM_PROT_EXECUTE);
183 if (uobj == NULL)
184 return(ENOMEM);
185 VREF(vp);
186
187 if ((vp->v_flag & VMAPPED) == 0) {
188 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
189 simple_lock(&vp->v_interlock);
190 vp->v_flag |= VMAPPED;
191 simple_unlock(&vp->v_interlock);
192 VOP_UNLOCK(vp, 0);
193 }
194
195 prot = cmd->ev_prot;
196 maxprot = UVM_PROT_ALL;
197 #ifdef PAX_MPROTECT
198 pax_mprotect(l, &prot, &maxprot);
199 #endif /* PAX_MPROTECT */
200
201 /*
202 * do the map
203 */
204
205 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
206 uobj, cmd->ev_offset, 0,
207 UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
208 UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
209 if (error) {
210 uobj->pgops->pgo_detach(uobj);
211 }
212 return error;
213 }
214
215 /*
216 * vmcmd_map_readvn():
217 * handle vmcmd which specifies that a vnode should be read from.
218 * appropriate for non-demand-paged text/data segments, i.e. impure
219 * objects (a la OMAGIC and NMAGIC).
220 */
221 int
222 vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
223 {
224 struct proc *p = l->l_proc;
225 int error;
226 long diff;
227
228 if (cmd->ev_len == 0)
229 return 0;
230
231 diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
232 cmd->ev_addr -= diff; /* required by uvm_map */
233 cmd->ev_offset -= diff;
234 cmd->ev_len += diff;
235
236 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
237 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
238 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
239 UVM_ADV_NORMAL,
240 UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
241
242 if (error)
243 return error;
244
245 return vmcmd_readvn(l, cmd);
246 }
247
248 int
249 vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
250 {
251 struct proc *p = l->l_proc;
252 int error;
253 vm_prot_t prot, maxprot;
254
255 error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
256 cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
257 l->l_cred, NULL, l);
258 if (error)
259 return error;
260
261 prot = cmd->ev_prot;
262 maxprot = VM_PROT_ALL;
263 #ifdef PAX_MPROTECT
264 pax_mprotect(l, &prot, &maxprot);
265 #endif /* PAX_MPROTECT */
266
267 #ifdef PMAP_NEED_PROCWR
268 /*
269 * we had to write the process, make sure the pages are synched
270 * with the instruction cache.
271 */
272 if (prot & VM_PROT_EXECUTE)
273 pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
274 #endif
275
276 /*
277 * we had to map in the area at PROT_ALL so that vn_rdwr()
278 * could write to it. however, the caller seems to want
279 * it mapped read-only, so now we are going to have to call
280 * uvm_map_protect() to fix up the protection. ICK.
281 */
282 if (maxprot != VM_PROT_ALL) {
283 error = uvm_map_protect(&p->p_vmspace->vm_map,
284 trunc_page(cmd->ev_addr),
285 round_page(cmd->ev_addr + cmd->ev_len),
286 maxprot, TRUE);
287 if (error)
288 return (error);
289 }
290
291 if (prot != maxprot) {
292 error = uvm_map_protect(&p->p_vmspace->vm_map,
293 trunc_page(cmd->ev_addr),
294 round_page(cmd->ev_addr + cmd->ev_len),
295 prot, FALSE);
296 if (error)
297 return (error);
298 }
299
300 return 0;
301 }
302
303 /*
304 * vmcmd_map_zero():
305 * handle vmcmd which specifies a zero-filled address space region. The
306 * address range must be first allocated, then protected appropriately.
307 */
308
309 int
310 vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
311 {
312 struct proc *p = l->l_proc;
313 int error;
314 long diff;
315 vm_prot_t prot, maxprot;
316
317 diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
318 cmd->ev_addr -= diff; /* required by uvm_map */
319 cmd->ev_len += diff;
320
321 prot = cmd->ev_prot;
322 maxprot = UVM_PROT_ALL;
323 #ifdef PAX_MPROTECT
324 pax_mprotect(l, &prot, &maxprot);
325 #endif /* PAX_MPROTECT */
326
327 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
328 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
329 UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
330 UVM_ADV_NORMAL,
331 UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
332 return error;
333 }
334
335 /*
336 * exec_read_from():
337 *
338 * Read from vnode into buffer at offset.
339 */
340 int
341 exec_read_from(struct lwp *l, struct vnode *vp, u_long off, void *bf,
342 size_t size)
343 {
344 int error;
345 size_t resid;
346
347 if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
348 0, l->l_cred, &resid, NULL)) != 0)
349 return error;
350 /*
351 * See if we got all of it
352 */
353 if (resid != 0)
354 return ENOEXEC;
355 return 0;
356 }
357
358 /*
359 * exec_setup_stack(): Set up the stack segment for an elf
360 * executable.
361 *
362 * Note that the ep_ssize parameter must be set to be the current stack
363 * limit; this is adjusted in the body of execve() to yield the
364 * appropriate stack segment usage once the argument length is
365 * calculated.
366 *
367 * This function returns an int for uniformity with other (future) formats'
368 * stack setup functions. They might have errors to return.
369 */
370
371 int
372 exec_setup_stack(struct lwp *l, struct exec_package *epp)
373 {
374 u_long max_stack_size;
375 u_long access_linear_min, access_size;
376 u_long noaccess_linear_min, noaccess_size;
377
378 #ifndef USRSTACK32
379 #define USRSTACK32 (0x00000000ffffffffL&~PGOFSET)
380 #endif
381
382 if (epp->ep_flags & EXEC_32) {
383 epp->ep_minsaddr = USRSTACK32;
384 max_stack_size = MAXSSIZ;
385 } else {
386 epp->ep_minsaddr = USRSTACK;
387 max_stack_size = MAXSSIZ;
388 }
389 epp->ep_maxsaddr = (u_long)STACK_GROW(epp->ep_minsaddr,
390 max_stack_size);
391 epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur;
392
393 /*
394 * set up commands for stack. note that this takes *two*, one to
395 * map the part of the stack which we can access, and one to map
396 * the part which we can't.
397 *
398 * arguably, it could be made into one, but that would require the
399 * addition of another mapping proc, which is unnecessary
400 */
401 access_size = epp->ep_ssize;
402 access_linear_min = (u_long)STACK_ALLOC(epp->ep_minsaddr, access_size);
403 noaccess_size = max_stack_size - access_size;
404 noaccess_linear_min = (u_long)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
405 access_size), noaccess_size);
406 if (noaccess_size > 0) {
407 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
408 noaccess_linear_min, NULL, 0, VM_PROT_NONE);
409 }
410 KASSERT(access_size > 0);
411 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
412 access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE);
413
414 return 0;
415 }
Cache object: e6fe81570b4cfd076b39d3205619a4ee
|