1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1994 Sean Eric Fagan
5 * Copyright (c) 1994 Søren Schmidt
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer
13 * in this position and unchanged.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/12.0/sys/i386/ibcs2/imgact_coff.c 326260 2017-11-27 15:08:52Z pfg $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/exec.h>
38 #include <sys/fcntl.h>
39 #include <sys/imgact.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mman.h>
44 #include <sys/mount.h>
45 #include <sys/namei.h>
46 #include <sys/vnode.h>
47
48 #include <vm/vm.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_extern.h>
53
54 #include <i386/ibcs2/coff.h>
55 #include <i386/ibcs2/ibcs2_util.h>
56
57 MODULE_DEPEND(coff, ibcs2, 1, 1, 1);
58
59 extern struct sysentvec ibcs2_svr3_sysvec;
60
61 static int coff_load_file(struct thread *td, char *name);
62 static int exec_coff_imgact(struct image_params *imgp);
63
64 static int load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot);
65
66 static int
67 load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
68 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
69 {
70 size_t map_len;
71 vm_offset_t map_offset;
72 vm_offset_t map_addr;
73 int error;
74 unsigned char *data_buf = NULL;
75 size_t copy_len;
76
77 map_offset = trunc_page(offset);
78 map_addr = trunc_page((vm_offset_t)vmaddr);
79
80 if (memsz > filsz) {
81 /*
82 * We have the stupid situation that
83 * the section is longer than it is on file,
84 * which means it has zero-filled areas, and
85 * we have to work for it. Stupid iBCS!
86 */
87 map_len = trunc_page(offset + filsz) - trunc_page(map_offset);
88 } else {
89 /*
90 * The only stuff we care about is on disk, and we
91 * don't care if we map in more than is really there.
92 */
93 map_len = round_page(offset + filsz) - trunc_page(map_offset);
94 }
95
96 DPRINTF(("%s(%d): vm_mmap(&vmspace->vm_map, &0x%08jx, 0x%x, 0x%x, "
97 "VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
98 __FILE__, __LINE__, (uintmax_t)map_addr, map_len, prot,
99 map_offset));
100
101 if ((error = vm_mmap(&vmspace->vm_map,
102 &map_addr,
103 map_len,
104 prot,
105 VM_PROT_ALL,
106 MAP_PRIVATE | MAP_FIXED,
107 OBJT_VNODE,
108 vp,
109 map_offset)) != 0)
110 return error;
111
112 if (memsz == filsz) {
113 /* We're done! */
114 return 0;
115 }
116
117 /*
118 * Now we have screwball stuff, to accomodate stupid COFF.
119 * We have to map the remaining bit of the file into the kernel's
120 * memory map, allocate some anonymous memory, copy that last
121 * bit into it, and then we're done. *sigh*
122 * For clean-up reasons, we actally map in the file last.
123 */
124
125 copy_len = (offset + filsz) - trunc_page(offset + filsz);
126 map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
127 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
128
129 DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08jx,0x%x, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, (uintmax_t)map_addr, map_len));
130
131 if (map_len != 0) {
132 error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr,
133 map_len, 0, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
134 if (error)
135 return (vm_mmap_to_errno(error));
136 }
137
138 if ((error = vm_mmap(exec_map,
139 (vm_offset_t *) &data_buf,
140 PAGE_SIZE,
141 VM_PROT_READ,
142 VM_PROT_READ,
143 0,
144 OBJT_VNODE,
145 vp,
146 trunc_page(offset + filsz))) != 0)
147 return error;
148
149 error = copyout(data_buf, (caddr_t) map_addr, copy_len);
150
151 kmap_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE);
152
153 return error;
154 }
155
156 static int
157 coff_load_file(struct thread *td, char *name)
158 {
159 struct proc *p = td->td_proc;
160 struct vmspace *vmspace = p->p_vmspace;
161 int error;
162 struct nameidata nd;
163 struct vnode *vp;
164 struct vattr attr;
165 struct filehdr *fhdr;
166 struct aouthdr *ahdr;
167 struct scnhdr *scns;
168 char *ptr = NULL;
169 int nscns;
170 unsigned long text_offset = 0, text_address = 0, text_size = 0;
171 unsigned long data_offset = 0, data_address = 0, data_size = 0;
172 unsigned long bss_size = 0;
173 int i, writecount;
174
175 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME,
176 UIO_SYSSPACE, name, td);
177
178 error = namei(&nd);
179 if (error)
180 return error;
181
182 vp = nd.ni_vp;
183 if (vp == NULL)
184 return ENOEXEC;
185
186 error = VOP_GET_WRITECOUNT(vp, &writecount);
187 if (error != 0)
188 goto fail;
189 if (writecount != 0) {
190 error = ETXTBSY;
191 goto fail;
192 }
193
194 if ((error = VOP_GETATTR(vp, &attr, td->td_ucred)) != 0)
195 goto fail;
196
197 if ((vp->v_mount->mnt_flag & MNT_NOEXEC)
198 || ((attr.va_mode & 0111) == 0)
199 || (attr.va_type != VREG))
200 goto fail;
201
202 if (attr.va_size == 0) {
203 error = ENOEXEC;
204 goto fail;
205 }
206
207 if ((error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td)) != 0)
208 goto fail;
209
210 if ((error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL)) != 0)
211 goto fail;
212
213 /*
214 * Lose the lock on the vnode. It's no longer needed, and must not
215 * exist for the pagefault paging to work below.
216 */
217 VOP_UNLOCK(vp, 0);
218
219 if ((error = vm_mmap(exec_map,
220 (vm_offset_t *) &ptr,
221 PAGE_SIZE,
222 VM_PROT_READ,
223 VM_PROT_READ,
224 0,
225 OBJT_VNODE,
226 vp,
227 0)) != 0)
228 goto unlocked_fail;
229
230 fhdr = (struct filehdr *)ptr;
231
232 if (fhdr->f_magic != I386_COFF) {
233 error = ENOEXEC;
234 goto dealloc_and_fail;
235 }
236
237 nscns = fhdr->f_nscns;
238
239 if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
240 /*
241 * XXX -- just fail. I'm so lazy.
242 */
243 error = ENOEXEC;
244 goto dealloc_and_fail;
245 }
246
247 ahdr = (struct aouthdr*)(ptr + sizeof(struct filehdr));
248
249 scns = (struct scnhdr*)(ptr + sizeof(struct filehdr)
250 + sizeof(struct aouthdr));
251
252 for (i = 0; i < nscns; i++) {
253 if (scns[i].s_flags & STYP_NOLOAD)
254 continue;
255 else if (scns[i].s_flags & STYP_TEXT) {
256 text_address = scns[i].s_vaddr;
257 text_size = scns[i].s_size;
258 text_offset = scns[i].s_scnptr;
259 }
260 else if (scns[i].s_flags & STYP_DATA) {
261 data_address = scns[i].s_vaddr;
262 data_size = scns[i].s_size;
263 data_offset = scns[i].s_scnptr;
264 } else if (scns[i].s_flags & STYP_BSS) {
265 bss_size = scns[i].s_size;
266 }
267 }
268
269 if ((error = load_coff_section(vmspace, vp, text_offset,
270 (caddr_t)(void *)(uintptr_t)text_address,
271 text_size, text_size,
272 VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
273 goto dealloc_and_fail;
274 }
275 if ((error = load_coff_section(vmspace, vp, data_offset,
276 (caddr_t)(void *)(uintptr_t)data_address,
277 data_size + bss_size, data_size,
278 VM_PROT_ALL)) != 0) {
279 goto dealloc_and_fail;
280 }
281
282 error = 0;
283
284 dealloc_and_fail:
285 kmap_free_wakeup(exec_map, (vm_offset_t)ptr, PAGE_SIZE);
286 fail:
287 VOP_UNLOCK(vp, 0);
288 unlocked_fail:
289 NDFREE(&nd, NDF_ONLY_PNBUF);
290 vrele(nd.ni_vp);
291 return error;
292 }
293
294 static int
295 exec_coff_imgact(imgp)
296 struct image_params *imgp;
297 {
298 const struct filehdr *fhdr = (const struct filehdr*)imgp->image_header;
299 const struct aouthdr *ahdr;
300 const struct scnhdr *scns;
301 int i;
302 struct vmspace *vmspace;
303 int nscns;
304 int error;
305 unsigned long text_offset = 0, text_address = 0, text_size = 0;
306 unsigned long data_offset = 0, data_address = 0, data_size = 0;
307 unsigned long bss_size = 0;
308 vm_offset_t hole;
309
310 if (fhdr->f_magic != I386_COFF ||
311 !(fhdr->f_flags & F_EXEC)) {
312
313 DPRINTF(("%s(%d): return -1\n", __FILE__, __LINE__));
314 return -1;
315 }
316
317 nscns = fhdr->f_nscns;
318 if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
319 /*
320 * For now, return an error -- need to be able to
321 * read in all of the section structures.
322 */
323
324 DPRINTF(("%s(%d): return -1\n", __FILE__, __LINE__));
325 return -1;
326 }
327
328 ahdr = (const struct aouthdr*)
329 ((const char*)(imgp->image_header) + sizeof(struct filehdr));
330 imgp->entry_addr = ahdr->entry;
331
332 scns = (const struct scnhdr*)
333 ((const char*)(imgp->image_header) + sizeof(struct filehdr) +
334 sizeof(struct aouthdr));
335
336 VOP_UNLOCK(imgp->vp, 0);
337
338 error = exec_new_vmspace(imgp, &ibcs2_svr3_sysvec);
339 if (error)
340 goto fail;
341 vmspace = imgp->proc->p_vmspace;
342
343 for (i = 0; i < nscns; i++) {
344
345 DPRINTF(("i = %d, s_name = %s, s_vaddr = %08lx, "
346 "s_scnptr = %ld s_size = %lx\n", i, scns[i].s_name,
347 scns[i].s_vaddr, scns[i].s_scnptr, scns[i].s_size));
348 if (scns[i].s_flags & STYP_NOLOAD) {
349 /*
350 * A section that is not loaded, for whatever
351 * reason. It takes precedance over other flag
352 * bits...
353 */
354 continue;
355 } else if (scns[i].s_flags & STYP_TEXT) {
356 text_address = scns[i].s_vaddr;
357 text_size = scns[i].s_size;
358 text_offset = scns[i].s_scnptr;
359 } else if (scns[i].s_flags & STYP_DATA) {
360 /* .data section */
361 data_address = scns[i].s_vaddr;
362 data_size = scns[i].s_size;
363 data_offset = scns[i].s_scnptr;
364 } else if (scns[i].s_flags & STYP_BSS) {
365 /* .bss section */
366 bss_size = scns[i].s_size;
367 } else if (scns[i].s_flags & STYP_LIB) {
368 char *buf = NULL;
369 int foff = trunc_page(scns[i].s_scnptr);
370 int off = scns[i].s_scnptr - foff;
371 int len = round_page(scns[i].s_size + PAGE_SIZE);
372 int j;
373
374 if ((error = vm_mmap(exec_map,
375 (vm_offset_t *) &buf,
376 len,
377 VM_PROT_READ,
378 VM_PROT_READ,
379 MAP_SHARED,
380 OBJT_VNODE,
381 imgp->vp,
382 foff)) != 0) {
383 error = ENOEXEC;
384 goto fail;
385 }
386 if(scns[i].s_size) {
387 char *libbuf;
388 int emul_path_len = strlen(ibcs2_emul_path);
389
390 libbuf = malloc(MAXPATHLEN + emul_path_len,
391 M_TEMP, M_WAITOK);
392 strcpy(libbuf, ibcs2_emul_path);
393
394 for (j = off; j < scns[i].s_size + off;) {
395 long stroff, nextoff;
396 char *libname;
397
398 nextoff = 4 * *(long *)(buf + j);
399 stroff = 4 * *(long *)(buf + j + sizeof(long));
400
401 libname = buf + j + stroff;
402 j += nextoff;
403
404 DPRINTF(("%s(%d): shared library %s\n",
405 __FILE__, __LINE__, libname));
406 strlcpy(&libbuf[emul_path_len], libname, MAXPATHLEN);
407 error = coff_load_file(
408 FIRST_THREAD_IN_PROC(imgp->proc), libbuf);
409 if (error)
410 error = coff_load_file(
411 FIRST_THREAD_IN_PROC(imgp->proc),
412 libname);
413 if (error) {
414 printf(
415 "error %d loading coff shared library %s\n",
416 error, libname);
417 break;
418 }
419 }
420 free(libbuf, M_TEMP);
421 }
422 kmap_free_wakeup(exec_map, (vm_offset_t)buf, len);
423 if (error)
424 goto fail;
425 }
426 }
427 /*
428 * Map in .text now
429 */
430
431 DPRINTF(("%s(%d): load_coff_section(vmspace, "
432 "imgp->vp, %08lx, %08lx, 0x%lx, 0x%lx, 0x%x)\n",
433 __FILE__, __LINE__, text_offset, text_address,
434 text_size, text_size, VM_PROT_READ | VM_PROT_EXECUTE));
435 if ((error = load_coff_section(vmspace, imgp->vp,
436 text_offset,
437 (caddr_t)(void *)(uintptr_t)text_address,
438 text_size, text_size,
439 VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
440 DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
441 goto fail;
442 }
443 /*
444 * Map in .data and .bss now
445 */
446
447
448 DPRINTF(("%s(%d): load_coff_section(vmspace, "
449 "imgp->vp, 0x%08lx, 0x%08lx, 0x%lx, 0x%lx, 0x%x)\n",
450 __FILE__, __LINE__, data_offset, data_address,
451 data_size + bss_size, data_size, VM_PROT_ALL));
452 if ((error = load_coff_section(vmspace, imgp->vp,
453 data_offset,
454 (caddr_t)(void *)(uintptr_t)data_address,
455 data_size + bss_size, data_size,
456 VM_PROT_ALL)) != 0) {
457
458 DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
459 goto fail;
460 }
461
462 imgp->interpreted = 0;
463 imgp->proc->p_sysent = &ibcs2_svr3_sysvec;
464
465 vmspace->vm_tsize = round_page(text_size) >> PAGE_SHIFT;
466 vmspace->vm_dsize = round_page(data_size + bss_size) >> PAGE_SHIFT;
467 vmspace->vm_taddr = (caddr_t)(void *)(uintptr_t)text_address;
468 vmspace->vm_daddr = (caddr_t)(void *)(uintptr_t)data_address;
469
470 hole = trunc_page((vm_offset_t)vmspace->vm_daddr +
471 ctob(vmspace->vm_dsize));
472
473 DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%jx, PAGE_SIZE, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0)\n",
474 __FILE__, __LINE__, (uintmax_t)hole));
475 DPRINTF(("imgact: error = %d\n", error));
476
477 vm_map_find(&vmspace->vm_map, NULL, 0,
478 (vm_offset_t *)&hole, PAGE_SIZE, 0, VMFS_NO_SPACE,
479 VM_PROT_ALL, VM_PROT_ALL, 0);
480 DPRINTF(("IBCS2: start vm_dsize = 0x%x, vm_daddr = 0x%p end = 0x%p\n",
481 ctob(vmspace->vm_dsize), vmspace->vm_daddr,
482 ctob(vmspace->vm_dsize) + vmspace->vm_daddr ));
483 DPRINTF(("%s(%d): returning %d!\n", __FILE__, __LINE__, error));
484
485 fail:
486 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
487
488 return (error);
489 }
490
491 /*
492 * Tell kern_execve.c about it, with a little help from the linker.
493 */
494 static struct execsw coff_execsw = { exec_coff_imgact, "coff" };
495 EXEC_SET(coff, coff_execsw);
Cache object: af6b9e6d7f0b641e23730d1881f692c6
|