FreeBSD/Linux Kernel Cross Reference
sys/fs/binfmt_elf.c
1 /*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12 #include <linux/module.h>
13
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp_lock.h>
34 #include <linux/compiler.h>
35 #include <linux/highmem.h>
36
37 #include <asm/uaccess.h>
38 #include <asm/param.h>
39 #include <asm/pgalloc.h>
40
41 #define DLINFO_ITEMS 13
42
43 #include <linux/elf.h>
44
45 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
46 static int load_elf_library(struct file*);
47 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
48 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
49 extern void dump_thread(struct pt_regs *, struct user *);
50
51 #ifndef elf_addr_t
52 #define elf_addr_t unsigned long
53 #define elf_caddr_t char *
54 #endif
55
56 /*
57 * If we don't support core dumping, then supply a NULL so we
58 * don't even try.
59 */
60 #ifdef USE_ELF_CORE_DUMP
61 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
62 #else
63 #define elf_core_dump NULL
64 #endif
65
66 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
67 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
68 #else
69 # define ELF_MIN_ALIGN PAGE_SIZE
70 #endif
71
72 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
73 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
74 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
75
76 static struct linux_binfmt elf_format = {
77 NULL, THIS_MODULE, load_elf_binary, load_elf_library, elf_core_dump, ELF_EXEC_PAGESIZE
78 };
79
80 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
81
82 static void set_brk(unsigned long start, unsigned long end)
83 {
84 start = ELF_PAGEALIGN(start);
85 end = ELF_PAGEALIGN(end);
86 if (end <= start)
87 return;
88 do_brk(start, end - start);
89 }
90
91
92 /* We need to explicitly zero any fractional pages
93 after the data section (i.e. bss). This would
94 contain the junk from the file that should not
95 be in memory */
96
97
98 static void padzero(unsigned long elf_bss)
99 {
100 unsigned long nbyte;
101
102 nbyte = ELF_PAGEOFFSET(elf_bss);
103 if (nbyte) {
104 nbyte = ELF_MIN_ALIGN - nbyte;
105 clear_user((void *) elf_bss, nbyte);
106 }
107 }
108
109 static elf_addr_t *
110 create_elf_tables(char *p, int argc, int envc,
111 struct elfhdr * exec,
112 unsigned long load_addr,
113 unsigned long load_bias,
114 unsigned long interp_load_addr, int ibcs)
115 {
116 elf_caddr_t *argv;
117 elf_caddr_t *envp;
118 elf_addr_t *sp, *csp;
119 char *k_platform, *u_platform;
120 long hwcap;
121 size_t platform_len = 0;
122 size_t len;
123
124 /*
125 * Get hold of platform and hardware capabilities masks for
126 * the machine we are running on. In some cases (Sparc),
127 * this info is impossible to get, in others (i386) it is
128 * merely difficult.
129 */
130
131 hwcap = ELF_HWCAP;
132 k_platform = ELF_PLATFORM;
133
134 if (k_platform) {
135 platform_len = strlen(k_platform) + 1;
136 u_platform = p - platform_len;
137 __copy_to_user(u_platform, k_platform, platform_len);
138 } else
139 u_platform = p;
140
141 #if defined(__i386__) && defined(CONFIG_SMP)
142 /*
143 * In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
144 * by the processes running on the same package. One thing we can do
145 * is to shuffle the initial stack for them.
146 *
147 * The conditionals here are unneeded, but kept in to make the
148 * code behaviour the same as pre change unless we have hyperthreaded
149 * processors. This keeps Mr Marcelo Person happier but should be
150 * removed for 2.5
151 */
152
153 if(smp_num_siblings > 1)
154 u_platform = u_platform - ((current->pid % 64) << 7);
155 #endif
156
157 /*
158 * Force 16 byte _final_ alignment here for generality.
159 */
160 sp = (elf_addr_t *)(~15UL & (unsigned long)(u_platform));
161 csp = sp;
162 csp -= (1+DLINFO_ITEMS)*2 + (k_platform ? 2 : 0);
163 #ifdef DLINFO_ARCH_ITEMS
164 csp -= DLINFO_ARCH_ITEMS*2;
165 #endif
166 csp -= envc+1;
167 csp -= argc+1;
168 csp -= (!ibcs ? 3 : 1); /* argc itself */
169 if ((unsigned long)csp & 15UL)
170 sp -= ((unsigned long)csp & 15UL) / sizeof(*sp);
171
172 /*
173 * Put the ELF interpreter info on the stack
174 */
175 #define NEW_AUX_ENT(nr, id, val) \
176 __put_user ((id), sp+(nr*2)); \
177 __put_user ((val), sp+(nr*2+1)); \
178
179 sp -= 2;
180 NEW_AUX_ENT(0, AT_NULL, 0);
181 if (k_platform) {
182 sp -= 2;
183 NEW_AUX_ENT(0, AT_PLATFORM, (elf_addr_t)(unsigned long) u_platform);
184 }
185 sp -= DLINFO_ITEMS*2;
186 NEW_AUX_ENT( 0, AT_HWCAP, hwcap);
187 NEW_AUX_ENT( 1, AT_PAGESZ, ELF_EXEC_PAGESIZE);
188 NEW_AUX_ENT( 2, AT_CLKTCK, CLOCKS_PER_SEC);
189 NEW_AUX_ENT( 3, AT_PHDR, load_addr + exec->e_phoff);
190 NEW_AUX_ENT( 4, AT_PHENT, sizeof (struct elf_phdr));
191 NEW_AUX_ENT( 5, AT_PHNUM, exec->e_phnum);
192 NEW_AUX_ENT( 6, AT_BASE, interp_load_addr);
193 NEW_AUX_ENT( 7, AT_FLAGS, 0);
194 NEW_AUX_ENT( 8, AT_ENTRY, load_bias + exec->e_entry);
195 NEW_AUX_ENT( 9, AT_UID, (elf_addr_t) current->uid);
196 NEW_AUX_ENT(10, AT_EUID, (elf_addr_t) current->euid);
197 NEW_AUX_ENT(11, AT_GID, (elf_addr_t) current->gid);
198 NEW_AUX_ENT(12, AT_EGID, (elf_addr_t) current->egid);
199 #ifdef ARCH_DLINFO
200 /*
201 * ARCH_DLINFO must come last so platform specific code can enforce
202 * special alignment requirements on the AUXV if necessary (eg. PPC).
203 */
204 ARCH_DLINFO;
205 #endif
206 #undef NEW_AUX_ENT
207
208 sp -= envc+1;
209 envp = (elf_caddr_t *) sp;
210 sp -= argc+1;
211 argv = (elf_caddr_t *) sp;
212 if (!ibcs) {
213 __put_user((elf_addr_t)(unsigned long) envp,--sp);
214 __put_user((elf_addr_t)(unsigned long) argv,--sp);
215 }
216
217 __put_user((elf_addr_t)argc,--sp);
218 current->mm->arg_start = (unsigned long) p;
219 while (argc-->0) {
220 __put_user((elf_caddr_t)(unsigned long)p,argv++);
221 len = strnlen_user(p, PAGE_SIZE*MAX_ARG_PAGES);
222 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
223 return NULL;
224 p += len;
225 }
226 __put_user(NULL, argv);
227 current->mm->arg_end = current->mm->env_start = (unsigned long) p;
228 while (envc-->0) {
229 __put_user((elf_caddr_t)(unsigned long)p,envp++);
230 len = strnlen_user(p, PAGE_SIZE*MAX_ARG_PAGES);
231 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
232 return NULL;
233 p += len;
234 }
235 __put_user(NULL, envp);
236 current->mm->env_end = (unsigned long) p;
237 return sp;
238 }
239
240 #ifndef elf_map
241
242 static inline unsigned long
243 elf_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
244 {
245 unsigned long map_addr;
246
247 down_write(¤t->mm->mmap_sem);
248 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
249 eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
250 eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
251 up_write(¤t->mm->mmap_sem);
252 return(map_addr);
253 }
254
255 #endif /* !elf_map */
256
257 /* This is much more generalized than the library routine read function,
258 so we keep this separate. Technically the library read function
259 is only provided so that we can read a.out libraries that have
260 an ELF header */
261
262 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
263 struct file * interpreter,
264 unsigned long *interp_load_addr)
265 {
266 struct elf_phdr *elf_phdata;
267 struct elf_phdr *eppnt;
268 unsigned long load_addr = 0;
269 int load_addr_set = 0;
270 unsigned long last_bss = 0, elf_bss = 0;
271 unsigned long error = ~0UL;
272 int retval, i, size;
273
274 /* First of all, some simple consistency checks */
275 if (interp_elf_ex->e_type != ET_EXEC &&
276 interp_elf_ex->e_type != ET_DYN)
277 goto out;
278 if (!elf_check_arch(interp_elf_ex))
279 goto out;
280 if (!interpreter->f_op || !interpreter->f_op->mmap)
281 goto out;
282
283 /*
284 * If the size of this structure has changed, then punt, since
285 * we will be doing the wrong thing.
286 */
287 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
288 goto out;
289 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
290 goto out;
291
292 /* Now read in all of the header information */
293
294 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
295 if (size > ELF_MIN_ALIGN)
296 goto out;
297 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
298 if (!elf_phdata)
299 goto out;
300
301 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
302 error = retval;
303 if (retval < 0)
304 goto out_close;
305
306 eppnt = elf_phdata;
307 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
308 if (eppnt->p_type == PT_LOAD) {
309 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
310 int elf_prot = 0;
311 unsigned long vaddr = 0;
312 unsigned long k, map_addr;
313
314 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
315 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
316 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
317 vaddr = eppnt->p_vaddr;
318 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
319 elf_type |= MAP_FIXED;
320
321 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
322 if (BAD_ADDR(map_addr))
323 goto out_close;
324
325 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
326 load_addr = map_addr - ELF_PAGESTART(vaddr);
327 load_addr_set = 1;
328 }
329
330 /*
331 * Find the end of the file mapping for this phdr, and keep
332 * track of the largest address we see for this.
333 */
334 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
335 if (k > elf_bss)
336 elf_bss = k;
337
338 /*
339 * Do the same thing for the memory mapping - between
340 * elf_bss and last_bss is the bss section.
341 */
342 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
343 if (k > last_bss)
344 last_bss = k;
345 }
346 }
347
348 /* Now use mmap to map the library into memory. */
349
350 /*
351 * Now fill out the bss section. First pad the last page up
352 * to the page boundary, and then perform a mmap to make sure
353 * that there are zero-mapped pages up to and including the
354 * last bss page.
355 */
356 padzero(elf_bss);
357 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
358
359 /* Map the last of the bss segment */
360 if (last_bss > elf_bss)
361 do_brk(elf_bss, last_bss - elf_bss);
362
363 *interp_load_addr = load_addr;
364 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
365
366 out_close:
367 kfree(elf_phdata);
368 out:
369 return error;
370 }
371
372 static unsigned long load_aout_interp(struct exec * interp_ex,
373 struct file * interpreter)
374 {
375 unsigned long text_data, elf_entry = ~0UL;
376 char * addr;
377 loff_t offset;
378 int retval;
379
380 current->mm->end_code = interp_ex->a_text;
381 text_data = interp_ex->a_text + interp_ex->a_data;
382 current->mm->end_data = text_data;
383 current->mm->brk = interp_ex->a_bss + text_data;
384
385 switch (N_MAGIC(*interp_ex)) {
386 case OMAGIC:
387 offset = 32;
388 addr = (char *) 0;
389 break;
390 case ZMAGIC:
391 case QMAGIC:
392 offset = N_TXTOFF(*interp_ex);
393 addr = (char *) N_TXTADDR(*interp_ex);
394 break;
395 default:
396 goto out;
397 }
398
399 do_brk(0, text_data);
400 retval = -ENOEXEC;
401 if (!interpreter->f_op || !interpreter->f_op->read)
402 goto out;
403 retval = interpreter->f_op->read(interpreter, addr, text_data, &offset);
404 if (retval < 0)
405 goto out;
406 flush_icache_range((unsigned long)addr,
407 (unsigned long)addr + text_data);
408
409 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
410 interp_ex->a_bss);
411 elf_entry = interp_ex->a_entry;
412
413 out:
414 return elf_entry;
415 }
416
417 /*
418 * These are the functions used to load ELF style executables and shared
419 * libraries. There is no binary dependent code anywhere else.
420 */
421
422 #define INTERPRETER_NONE 0
423 #define INTERPRETER_AOUT 1
424 #define INTERPRETER_ELF 2
425
426
427 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
428 {
429 struct file *interpreter = NULL; /* to shut gcc up */
430 unsigned long load_addr = 0, load_bias = 0;
431 int load_addr_set = 0;
432 char * elf_interpreter = NULL;
433 unsigned int interpreter_type = INTERPRETER_NONE;
434 unsigned char ibcs2_interpreter = 0;
435 unsigned long error;
436 struct elf_phdr * elf_ppnt, *elf_phdata;
437 unsigned long elf_bss, k, elf_brk;
438 int elf_exec_fileno;
439 int retval, i;
440 unsigned int size;
441 unsigned long elf_entry, interp_load_addr = 0;
442 unsigned long start_code, end_code, start_data, end_data;
443 struct elfhdr elf_ex;
444 struct elfhdr interp_elf_ex;
445 struct exec interp_ex;
446 char passed_fileno[6];
447 struct files_struct *files;
448
449 /* Get the exec-header */
450 elf_ex = *((struct elfhdr *) bprm->buf);
451
452 retval = -ENOEXEC;
453 /* First of all, some simple consistency checks */
454 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
455 goto out;
456
457 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
458 goto out;
459 if (!elf_check_arch(&elf_ex))
460 goto out;
461 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
462 goto out;
463
464 /* Now read in all of the header information */
465
466 retval = -ENOMEM;
467 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
468 goto out;
469 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
470 goto out;
471 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
472 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
473 if (!elf_phdata)
474 goto out;
475
476 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
477 if (retval < 0)
478 goto out_free_ph;
479
480 files = current->files; /* Refcounted so ok */
481 if(unshare_files() < 0)
482 goto out_free_ph;
483 if (files == current->files) {
484 put_files_struct(files);
485 files = NULL;
486 }
487
488 /* exec will make our files private anyway, but for the a.out
489 loader stuff we need to do it earlier */
490
491 retval = get_unused_fd();
492 if (retval < 0)
493 goto out_free_fh;
494 get_file(bprm->file);
495 fd_install(elf_exec_fileno = retval, bprm->file);
496
497 elf_ppnt = elf_phdata;
498 elf_bss = 0;
499 elf_brk = 0;
500
501 start_code = ~0UL;
502 end_code = 0;
503 start_data = 0;
504 end_data = 0;
505
506 for (i = 0; i < elf_ex.e_phnum; i++) {
507 if (elf_ppnt->p_type == PT_INTERP) {
508 /* This is the program interpreter used for
509 * shared libraries - for now assume that this
510 * is an a.out format binary
511 */
512
513 retval = -ENOMEM;
514 if (elf_ppnt->p_filesz > PATH_MAX)
515 goto out_free_file;
516 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
517 GFP_KERNEL);
518 if (!elf_interpreter)
519 goto out_free_file;
520
521 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
522 elf_interpreter,
523 elf_ppnt->p_filesz);
524 if (retval < 0)
525 goto out_free_interp;
526 /* If the program interpreter is one of these two,
527 * then assume an iBCS2 image. Otherwise assume
528 * a native linux image.
529 */
530 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
531 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
532 ibcs2_interpreter = 1;
533 #if 0
534 printk("Using ELF interpreter %s\n", elf_interpreter);
535 #endif
536
537 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
538
539 interpreter = open_exec(elf_interpreter);
540 retval = PTR_ERR(interpreter);
541 if (IS_ERR(interpreter))
542 goto out_free_interp;
543 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
544 if (retval < 0)
545 goto out_free_dentry;
546
547 /* Get the exec headers */
548 interp_ex = *((struct exec *) bprm->buf);
549 interp_elf_ex = *((struct elfhdr *) bprm->buf);
550 break;
551 }
552 elf_ppnt++;
553 }
554
555 /* Some simple consistency checks for the interpreter */
556 if (elf_interpreter) {
557 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
558
559 /* Now figure out which format our binary is */
560 if ((N_MAGIC(interp_ex) != OMAGIC) &&
561 (N_MAGIC(interp_ex) != ZMAGIC) &&
562 (N_MAGIC(interp_ex) != QMAGIC))
563 interpreter_type = INTERPRETER_ELF;
564
565 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
566 interpreter_type &= ~INTERPRETER_ELF;
567
568 retval = -ELIBBAD;
569 if (!interpreter_type)
570 goto out_free_dentry;
571
572 /* Make sure only one type was selected */
573 if ((interpreter_type & INTERPRETER_ELF) &&
574 interpreter_type != INTERPRETER_ELF) {
575 // FIXME - ratelimit this before re-enabling
576 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
577 interpreter_type = INTERPRETER_ELF;
578 }
579 } else {
580 /* Executables without an interpreter also need a personality */
581 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
582 }
583
584 /* OK, we are done with that, now set up the arg stuff,
585 and then start this sucker up */
586
587 if (!bprm->sh_bang) {
588 char * passed_p;
589
590 if (interpreter_type == INTERPRETER_AOUT) {
591 sprintf(passed_fileno, "%d", elf_exec_fileno);
592 passed_p = passed_fileno;
593
594 if (elf_interpreter) {
595 retval = copy_strings_kernel(1,&passed_p,bprm);
596 if (retval)
597 goto out_free_dentry;
598 bprm->argc++;
599 }
600 }
601 }
602
603 /* Flush all traces of the currently running executable */
604 retval = flush_old_exec(bprm);
605 if (retval)
606 goto out_free_dentry;
607
608 /* Discard our unneeded old files struct */
609 if (files) {
610 steal_locks(files);
611 put_files_struct(files);
612 files = NULL;
613 }
614
615 /* OK, This is the point of no return */
616 current->mm->start_data = 0;
617 current->mm->end_data = 0;
618 current->mm->end_code = 0;
619 current->mm->mmap = NULL;
620 current->flags &= ~PF_FORKNOEXEC;
621 elf_entry = (unsigned long) elf_ex.e_entry;
622
623 /* Do this so that we can load the interpreter, if need be. We will
624 change some of these later */
625 current->mm->rss = 0;
626 retval = setup_arg_pages(bprm);
627 if (retval < 0) {
628 send_sig(SIGKILL, current, 0);
629 return retval;
630 }
631
632 current->mm->start_stack = bprm->p;
633
634 /* Now we do a little grungy work by mmaping the ELF image into
635 the correct location in memory. At this point, we assume that
636 the image should be loaded at fixed address, not at a variable
637 address. */
638
639 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
640 int elf_prot = 0, elf_flags;
641 unsigned long vaddr;
642
643 if (elf_ppnt->p_type != PT_LOAD)
644 continue;
645
646 if (unlikely (elf_brk > elf_bss)) {
647 unsigned long nbyte;
648
649 /* There was a PT_LOAD segment with p_memsz > p_filesz
650 before this one. Map anonymous pages, if needed,
651 and clear the area. */
652 set_brk (elf_bss + load_bias, elf_brk + load_bias);
653 nbyte = ELF_PAGEOFFSET(elf_bss);
654 if (nbyte) {
655 nbyte = ELF_MIN_ALIGN - nbyte;
656 if (nbyte > elf_brk - elf_bss)
657 nbyte = elf_brk - elf_bss;
658 clear_user((void *) elf_bss + load_bias, nbyte);
659 }
660 }
661
662 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
663 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
664 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
665
666 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
667
668 vaddr = elf_ppnt->p_vaddr;
669 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
670 elf_flags |= MAP_FIXED;
671 } else if (elf_ex.e_type == ET_DYN) {
672 /* Try and get dynamic programs out of the way of the default mmap
673 base, as well as whatever program they might try to exec. This
674 is because the brk will follow the loader, and is not movable. */
675 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
676 }
677
678 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
679 if (BAD_ADDR(error))
680 continue;
681
682 if (!load_addr_set) {
683 load_addr_set = 1;
684 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
685 if (elf_ex.e_type == ET_DYN) {
686 load_bias += error -
687 ELF_PAGESTART(load_bias + vaddr);
688 load_addr += load_bias;
689 }
690 }
691 k = elf_ppnt->p_vaddr;
692 if (k < start_code) start_code = k;
693 if (start_data < k) start_data = k;
694
695 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
696
697 if (k > elf_bss)
698 elf_bss = k;
699 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
700 end_code = k;
701 if (end_data < k)
702 end_data = k;
703 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
704 if (k > elf_brk)
705 elf_brk = k;
706 }
707
708 elf_entry += load_bias;
709 elf_bss += load_bias;
710 elf_brk += load_bias;
711 start_code += load_bias;
712 end_code += load_bias;
713 start_data += load_bias;
714 end_data += load_bias;
715
716 if (elf_interpreter) {
717 if (interpreter_type == INTERPRETER_AOUT)
718 elf_entry = load_aout_interp(&interp_ex,
719 interpreter);
720 else
721 elf_entry = load_elf_interp(&interp_elf_ex,
722 interpreter,
723 &interp_load_addr);
724 if (BAD_ADDR(elf_entry)) {
725 printk(KERN_ERR "Unable to load interpreter\n");
726 send_sig(SIGSEGV, current, 0);
727 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
728 goto out_free_dentry;
729 }
730
731 allow_write_access(interpreter);
732 fput(interpreter);
733 kfree(elf_interpreter);
734 }
735
736 kfree(elf_phdata);
737
738 if (interpreter_type != INTERPRETER_AOUT)
739 sys_close(elf_exec_fileno);
740
741 set_binfmt(&elf_format);
742
743 compute_creds(bprm);
744 current->flags &= ~PF_FORKNOEXEC;
745 bprm->p = (unsigned long)
746 create_elf_tables((char *)bprm->p,
747 bprm->argc,
748 bprm->envc,
749 &elf_ex,
750 load_addr, load_bias,
751 interp_load_addr,
752 (interpreter_type == INTERPRETER_AOUT ? 0 : 1));
753 /* N.B. passed_fileno might not be initialized? */
754 if (interpreter_type == INTERPRETER_AOUT)
755 current->mm->arg_start += strlen(passed_fileno) + 1;
756 current->mm->start_brk = current->mm->brk = elf_brk;
757 current->mm->end_code = end_code;
758 current->mm->start_code = start_code;
759 current->mm->start_data = start_data;
760 current->mm->end_data = end_data;
761 current->mm->start_stack = bprm->p;
762
763 /* Calling set_brk effectively mmaps the pages that we need
764 * for the bss and break sections
765 */
766 set_brk(elf_bss, elf_brk);
767
768 padzero(elf_bss);
769
770 #if 0
771 printk("(start_brk) %lx\n" , (long) current->mm->start_brk);
772 printk("(end_code) %lx\n" , (long) current->mm->end_code);
773 printk("(start_code) %lx\n" , (long) current->mm->start_code);
774 printk("(start_data) %lx\n" , (long) current->mm->start_data);
775 printk("(end_data) %lx\n" , (long) current->mm->end_data);
776 printk("(start_stack) %lx\n" , (long) current->mm->start_stack);
777 printk("(brk) %lx\n" , (long) current->mm->brk);
778 #endif
779
780 if (current->personality & MMAP_PAGE_ZERO) {
781 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
782 and some applications "depend" upon this behavior.
783 Since we do not have the power to recompile these, we
784 emulate the SVr4 behavior. Sigh. */
785 /* N.B. Shouldn't the size here be PAGE_SIZE?? */
786 down_write(¤t->mm->mmap_sem);
787 error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
788 MAP_FIXED | MAP_PRIVATE, 0);
789 up_write(¤t->mm->mmap_sem);
790 }
791
792 #ifdef ELF_PLAT_INIT
793 /*
794 * The ABI may specify that certain registers be set up in special
795 * ways (on i386 %edx is the address of a DT_FINI function, for
796 * example. This macro performs whatever initialization to
797 * the regs structure is required.
798 */
799 ELF_PLAT_INIT(regs);
800 #endif
801
802 start_thread(regs, elf_entry, bprm->p);
803 if (current->ptrace & PT_PTRACED)
804 send_sig(SIGTRAP, current, 0);
805 retval = 0;
806 out:
807 return retval;
808
809 /* error cleanup */
810 out_free_dentry:
811 allow_write_access(interpreter);
812 fput(interpreter);
813 out_free_interp:
814 if (elf_interpreter)
815 kfree(elf_interpreter);
816 out_free_file:
817 sys_close(elf_exec_fileno);
818 out_free_fh:
819 if (files) {
820 put_files_struct(current->files);
821 current->files = files;
822 }
823 out_free_ph:
824 kfree(elf_phdata);
825 goto out;
826 }
827
828 /* This is really simpleminded and specialized - we are loading an
829 a.out library that is given an ELF header. */
830
831 static int load_elf_library(struct file *file)
832 {
833 struct elf_phdr *elf_phdata;
834 unsigned long elf_bss, bss, len;
835 int retval, error, i, j;
836 struct elfhdr elf_ex;
837
838 error = -ENOEXEC;
839 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
840 if (retval != sizeof(elf_ex))
841 goto out;
842
843 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
844 goto out;
845
846 /* First of all, some simple consistency checks */
847 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
848 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
849 goto out;
850
851 /* Now read in all of the header information */
852
853 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
854 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
855
856 error = -ENOMEM;
857 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
858 if (!elf_phdata)
859 goto out;
860
861 error = -ENOEXEC;
862 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
863 if (retval != j)
864 goto out_free_ph;
865
866 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
867 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
868 if (j != 1)
869 goto out_free_ph;
870
871 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
872
873 /* Now use mmap to map the library into memory. */
874 down_write(¤t->mm->mmap_sem);
875 error = do_mmap(file,
876 ELF_PAGESTART(elf_phdata->p_vaddr),
877 (elf_phdata->p_filesz +
878 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
879 PROT_READ | PROT_WRITE | PROT_EXEC,
880 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
881 (elf_phdata->p_offset -
882 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
883 up_write(¤t->mm->mmap_sem);
884 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
885 goto out_free_ph;
886
887 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
888 padzero(elf_bss);
889
890 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
891 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
892 if (bss > len)
893 do_brk(len, bss - len);
894 error = 0;
895
896 out_free_ph:
897 kfree(elf_phdata);
898 out:
899 return error;
900 }
901
902 /*
903 * Note that some platforms still use traditional core dumps and not
904 * the ELF core dump. Each platform can select it as appropriate.
905 */
906 #ifdef USE_ELF_CORE_DUMP
907
908 /*
909 * ELF core dumper
910 *
911 * Modelled on fs/exec.c:aout_core_dump()
912 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
913 */
914 /*
915 * These are the only things you should do on a core-file: use only these
916 * functions to write out all the necessary info.
917 */
918 static int dump_write(struct file *file, const void *addr, int nr)
919 {
920 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
921 }
922
923 static int dump_seek(struct file *file, off_t off)
924 {
925 if (file->f_op->llseek) {
926 if (file->f_op->llseek(file, off, 0) != off)
927 return 0;
928 } else
929 file->f_pos = off;
930 return 1;
931 }
932
933 /*
934 * Decide whether a segment is worth dumping; default is yes to be
935 * sure (missing info is worse than too much; etc).
936 * Personally I'd include everything, and use the coredump limit...
937 *
938 * I think we should skip something. But I am not sure how. H.J.
939 */
940 static inline int maydump(struct vm_area_struct *vma)
941 {
942 /*
943 * If we may not read the contents, don't allow us to dump
944 * them either. "dump_write()" can't handle it anyway.
945 */
946 if (!(vma->vm_flags & VM_READ))
947 return 0;
948
949 /* Do not dump I/O mapped devices! -DaveM */
950 if (vma->vm_flags & VM_IO)
951 return 0;
952 #if 1
953 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
954 return 1;
955 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
956 return 0;
957 #endif
958 return 1;
959 }
960
961 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
962
963 /* An ELF note in memory */
964 struct memelfnote
965 {
966 const char *name;
967 int type;
968 unsigned int datasz;
969 void *data;
970 };
971
972 static int notesize(struct memelfnote *en)
973 {
974 int sz;
975
976 sz = sizeof(struct elf_note);
977 sz += roundup(strlen(en->name), 4);
978 sz += roundup(en->datasz, 4);
979
980 return sz;
981 }
982
983 /* #define DEBUG */
984
985 #ifdef DEBUG
986 static void dump_regs(const char *str, elf_greg_t *r)
987 {
988 int i;
989 static const char *regs[] = { "ebx", "ecx", "edx", "esi", "edi", "ebp",
990 "eax", "ds", "es", "fs", "gs",
991 "orig_eax", "eip", "cs",
992 "efl", "uesp", "ss"};
993 printk("Registers: %s\n", str);
994
995 for(i = 0; i < ELF_NGREG; i++)
996 {
997 unsigned long val = r[i];
998 printk(" %-2d %-5s=%08lx %lu\n", i, regs[i], val, val);
999 }
1000 }
1001 #endif
1002
1003 #define DUMP_WRITE(addr, nr) \
1004 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1005 #define DUMP_SEEK(off) \
1006 do { if (!dump_seek(file, (off))) return 0; } while(0)
1007
1008 static int writenote(struct memelfnote *men, struct file *file)
1009 {
1010 struct elf_note en;
1011
1012 en.n_namesz = strlen(men->name);
1013 en.n_descsz = men->datasz;
1014 en.n_type = men->type;
1015
1016 DUMP_WRITE(&en, sizeof(en));
1017 DUMP_WRITE(men->name, en.n_namesz);
1018 /* XXX - cast from long long to long to avoid need for libgcc.a */
1019 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1020 DUMP_WRITE(men->data, men->datasz);
1021 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1022
1023 return 1;
1024 }
1025 #undef DUMP_WRITE
1026 #undef DUMP_SEEK
1027
1028 #define DUMP_WRITE(addr, nr) \
1029 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1030 goto end_coredump;
1031 #define DUMP_SEEK(off) \
1032 if (!dump_seek(file, (off))) \
1033 goto end_coredump;
1034 /*
1035 * Actual dumper
1036 *
1037 * This is a two-pass process; first we find the offsets of the bits,
1038 * and then they are actually written out. If we run out of core limit
1039 * we just truncate.
1040 */
1041 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1042 {
1043 int has_dumped = 0;
1044 mm_segment_t fs;
1045 int segs;
1046 size_t size = 0;
1047 int i;
1048 struct vm_area_struct *vma;
1049 struct elfhdr elf;
1050 off_t offset = 0, dataoff;
1051 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1052 int numnote = 4;
1053 struct memelfnote notes[4];
1054 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1055 elf_fpregset_t fpu; /* NT_PRFPREG */
1056 struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
1057
1058 /* first copy the parameters from user space */
1059 memset(&psinfo, 0, sizeof(psinfo));
1060 {
1061 int i, len;
1062
1063 len = current->mm->arg_end - current->mm->arg_start;
1064 if (len >= ELF_PRARGSZ)
1065 len = ELF_PRARGSZ-1;
1066 copy_from_user(&psinfo.pr_psargs,
1067 (const char *)current->mm->arg_start, len);
1068 for(i = 0; i < len; i++)
1069 if (psinfo.pr_psargs[i] == 0)
1070 psinfo.pr_psargs[i] = ' ';
1071 psinfo.pr_psargs[len] = 0;
1072
1073 }
1074
1075 memset(&prstatus, 0, sizeof(prstatus));
1076 /*
1077 * This transfers the registers from regs into the standard
1078 * coredump arrangement, whatever that is.
1079 */
1080 #ifdef ELF_CORE_COPY_REGS
1081 ELF_CORE_COPY_REGS(prstatus.pr_reg, regs)
1082 #else
1083 if (sizeof(elf_gregset_t) != sizeof(struct pt_regs))
1084 {
1085 printk("sizeof(elf_gregset_t) (%ld) != sizeof(struct pt_regs) (%ld)\n",
1086 (long)sizeof(elf_gregset_t), (long)sizeof(struct pt_regs));
1087 }
1088 else
1089 *(struct pt_regs *)&prstatus.pr_reg = *regs;
1090 #endif
1091
1092 /* now stop all vm operations */
1093 down_write(¤t->mm->mmap_sem);
1094 segs = current->mm->map_count;
1095
1096 #ifdef DEBUG
1097 printk("elf_core_dump: %d segs %lu limit\n", segs, limit);
1098 #endif
1099
1100 /* Set up header */
1101 memcpy(elf.e_ident, ELFMAG, SELFMAG);
1102 elf.e_ident[EI_CLASS] = ELF_CLASS;
1103 elf.e_ident[EI_DATA] = ELF_DATA;
1104 elf.e_ident[EI_VERSION] = EV_CURRENT;
1105 memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1106
1107 elf.e_type = ET_CORE;
1108 elf.e_machine = ELF_ARCH;
1109 elf.e_version = EV_CURRENT;
1110 elf.e_entry = 0;
1111 elf.e_phoff = sizeof(elf);
1112 elf.e_shoff = 0;
1113 elf.e_flags = 0;
1114 elf.e_ehsize = sizeof(elf);
1115 elf.e_phentsize = sizeof(struct elf_phdr);
1116 elf.e_phnum = segs+1; /* Include notes */
1117 elf.e_shentsize = 0;
1118 elf.e_shnum = 0;
1119 elf.e_shstrndx = 0;
1120
1121 fs = get_fs();
1122 set_fs(KERNEL_DS);
1123
1124 has_dumped = 1;
1125 current->flags |= PF_DUMPCORE;
1126
1127 DUMP_WRITE(&elf, sizeof(elf));
1128 offset += sizeof(elf); /* Elf header */
1129 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1130
1131 /*
1132 * Set up the notes in similar form to SVR4 core dumps made
1133 * with info from their /proc.
1134 */
1135
1136 notes[0].name = "CORE";
1137 notes[0].type = NT_PRSTATUS;
1138 notes[0].datasz = sizeof(prstatus);
1139 notes[0].data = &prstatus;
1140 prstatus.pr_info.si_signo = prstatus.pr_cursig = signr;
1141 prstatus.pr_sigpend = current->pending.signal.sig[0];
1142 prstatus.pr_sighold = current->blocked.sig[0];
1143 psinfo.pr_pid = prstatus.pr_pid = current->pid;
1144 psinfo.pr_ppid = prstatus.pr_ppid = current->p_pptr->pid;
1145 psinfo.pr_pgrp = prstatus.pr_pgrp = current->pgrp;
1146 psinfo.pr_sid = prstatus.pr_sid = current->session;
1147 prstatus.pr_utime.tv_sec = CT_TO_SECS(current->times.tms_utime);
1148 prstatus.pr_utime.tv_usec = CT_TO_USECS(current->times.tms_utime);
1149 prstatus.pr_stime.tv_sec = CT_TO_SECS(current->times.tms_stime);
1150 prstatus.pr_stime.tv_usec = CT_TO_USECS(current->times.tms_stime);
1151 prstatus.pr_cutime.tv_sec = CT_TO_SECS(current->times.tms_cutime);
1152 prstatus.pr_cutime.tv_usec = CT_TO_USECS(current->times.tms_cutime);
1153 prstatus.pr_cstime.tv_sec = CT_TO_SECS(current->times.tms_cstime);
1154 prstatus.pr_cstime.tv_usec = CT_TO_USECS(current->times.tms_cstime);
1155
1156 #ifdef DEBUG
1157 dump_regs("Passed in regs", (elf_greg_t *)regs);
1158 dump_regs("prstatus regs", (elf_greg_t *)&prstatus.pr_reg);
1159 #endif
1160
1161 notes[1].name = "CORE";
1162 notes[1].type = NT_PRPSINFO;
1163 notes[1].datasz = sizeof(psinfo);
1164 notes[1].data = &psinfo;
1165 i = current->state ? ffz(~current->state) + 1 : 0;
1166 psinfo.pr_state = i;
1167 psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
1168 psinfo.pr_zomb = psinfo.pr_sname == 'Z';
1169 psinfo.pr_nice = current->nice;
1170 psinfo.pr_flag = current->flags;
1171 psinfo.pr_uid = NEW_TO_OLD_UID(current->uid);
1172 psinfo.pr_gid = NEW_TO_OLD_GID(current->gid);
1173 strncpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
1174
1175 notes[2].name = "CORE";
1176 notes[2].type = NT_TASKSTRUCT;
1177 notes[2].datasz = sizeof(*current);
1178 notes[2].data = current;
1179
1180 /* Try to dump the FPU. */
1181 prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
1182 if (!prstatus.pr_fpvalid)
1183 {
1184 numnote--;
1185 }
1186 else
1187 {
1188 notes[3].name = "CORE";
1189 notes[3].type = NT_PRFPREG;
1190 notes[3].datasz = sizeof(fpu);
1191 notes[3].data = &fpu;
1192 }
1193
1194 /* Write notes phdr entry */
1195 {
1196 struct elf_phdr phdr;
1197 int sz = 0;
1198
1199 for(i = 0; i < numnote; i++)
1200 sz += notesize(¬es[i]);
1201
1202 phdr.p_type = PT_NOTE;
1203 phdr.p_offset = offset;
1204 phdr.p_vaddr = 0;
1205 phdr.p_paddr = 0;
1206 phdr.p_filesz = sz;
1207 phdr.p_memsz = 0;
1208 phdr.p_flags = 0;
1209 phdr.p_align = 0;
1210
1211 offset += phdr.p_filesz;
1212 DUMP_WRITE(&phdr, sizeof(phdr));
1213 }
1214
1215 /* Page-align dumped data */
1216 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1217
1218 /* Write program headers for segments dump */
1219 for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1220 struct elf_phdr phdr;
1221 size_t sz;
1222
1223 sz = vma->vm_end - vma->vm_start;
1224
1225 phdr.p_type = PT_LOAD;
1226 phdr.p_offset = offset;
1227 phdr.p_vaddr = vma->vm_start;
1228 phdr.p_paddr = 0;
1229 phdr.p_filesz = maydump(vma) ? sz : 0;
1230 phdr.p_memsz = sz;
1231 offset += phdr.p_filesz;
1232 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1233 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1234 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1235 phdr.p_align = ELF_EXEC_PAGESIZE;
1236
1237 DUMP_WRITE(&phdr, sizeof(phdr));
1238 }
1239
1240 for(i = 0; i < numnote; i++)
1241 if (!writenote(¬es[i], file))
1242 goto end_coredump;
1243
1244 DUMP_SEEK(dataoff);
1245
1246 for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1247 unsigned long addr;
1248
1249 if (!maydump(vma))
1250 continue;
1251
1252 #ifdef DEBUG
1253 printk("elf_core_dump: writing %08lx-%08lx\n", vma->vm_start, vma->vm_end);
1254 #endif
1255
1256 for (addr = vma->vm_start;
1257 addr < vma->vm_end;
1258 addr += PAGE_SIZE) {
1259 struct page* page;
1260 struct vm_area_struct *vma;
1261
1262 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1263 &page, &vma) <= 0) {
1264 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1265 } else {
1266 if (page == ZERO_PAGE(addr)) {
1267 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1268 } else {
1269 void *kaddr;
1270 flush_cache_page(vma, addr);
1271 kaddr = kmap(page);
1272 DUMP_WRITE(kaddr, PAGE_SIZE);
1273 flush_page_to_ram(page);
1274 kunmap(page);
1275 }
1276 put_page(page);
1277 }
1278 }
1279 }
1280
1281 if ((off_t) file->f_pos != offset) {
1282 /* Sanity check */
1283 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1284 (off_t) file->f_pos, offset);
1285 }
1286
1287 end_coredump:
1288 set_fs(fs);
1289 up_write(¤t->mm->mmap_sem);
1290 return has_dumped;
1291 }
1292 #endif /* USE_ELF_CORE_DUMP */
1293
1294 static int __init init_elf_binfmt(void)
1295 {
1296 return register_binfmt(&elf_format);
1297 }
1298
1299 static void __exit exit_elf_binfmt(void)
1300 {
1301 /* Remove the COFF and ELF loaders. */
1302 unregister_binfmt(&elf_format);
1303 }
1304
1305 module_init(init_elf_binfmt)
1306 module_exit(exit_elf_binfmt)
1307 MODULE_LICENSE("GPL");
Cache object: ce3d6c2ed9312c7bfcb3f2ba4724c0da
|