1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright 1996-1998 John D. Polstra.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * from: src/sys/i386/i386/elf_machdep.c,v 1.20 2004/08/11 02:35:05 marcel
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/exec.h>
37 #include <sys/imgact.h>
38 #include <sys/linker.h>
39 #include <sys/sysent.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/proc.h>
42 #include <sys/syscall.h>
43 #include <sys/signalvar.h>
44 #include <sys/vnode.h>
45
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_param.h>
49
50 #include <machine/elf.h>
51 #include <machine/md_var.h>
52 #include <machine/cache.h>
53
54 static struct sysentvec elf_freebsd_sysvec = {
55 .sv_size = SYS_MAXSYSCALL,
56 .sv_table = sysent,
57 .sv_transtrap = NULL,
58 .sv_fixup = __elfN(freebsd_fixup),
59 .sv_sendsig = sendsig,
60 .sv_sigcode = sigcode,
61 .sv_szsigcode = &szsigcode,
62 #ifdef __mips_n64
63 .sv_name = "FreeBSD ELF64",
64 #else
65 .sv_name = "FreeBSD ELF32",
66 #endif
67 .sv_coredump = __elfN(coredump),
68 .sv_imgact_try = NULL,
69 .sv_minsigstksz = MINSIGSTKSZ,
70 .sv_minuser = VM_MIN_ADDRESS,
71 .sv_maxuser = VM_MAXUSER_ADDRESS,
72 .sv_usrstack = USRSTACK,
73 .sv_psstrings = PS_STRINGS,
74 .sv_stackprot = VM_PROT_ALL,
75 .sv_copyout_auxargs = __elfN(freebsd_copyout_auxargs),
76 .sv_copyout_strings = exec_copyout_strings,
77 .sv_setregs = exec_setregs,
78 .sv_fixlimit = NULL,
79 .sv_maxssiz = NULL,
80 .sv_flags = SV_ABI_FREEBSD | SV_ASLR | SV_RNG_SEED_VER |
81 #ifdef __mips_n64
82 SV_LP64,
83 #else
84 SV_ILP32,
85 #endif
86 .sv_set_syscall_retval = cpu_set_syscall_retval,
87 .sv_fetch_syscall_args = cpu_fetch_syscall_args,
88 .sv_syscallnames = syscallnames,
89 .sv_schedtail = NULL,
90 .sv_thread_detach = NULL,
91 .sv_trap = NULL,
92 };
93
94 static __ElfN(Brandinfo) freebsd_brand_info = {
95 .brand = ELFOSABI_FREEBSD,
96 .machine = EM_MIPS,
97 .compat_3_brand = "FreeBSD",
98 .emul_path = NULL,
99 .interp_path = "/libexec/ld-elf.so.1",
100 .sysvec = &elf_freebsd_sysvec,
101 .interp_newpath = NULL,
102 .brand_note = &__elfN(freebsd_brandnote),
103 .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
104 };
105
106 SYSINIT(elf, SI_SUB_EXEC, SI_ORDER_ANY,
107 (sysinit_cfunc_t) __elfN(insert_brand_entry),
108 &freebsd_brand_info);
109
110 void
111 __elfN(dump_thread)(struct thread *td __unused, void *dst __unused,
112 size_t *off __unused)
113 {
114 }
115
116 /*
117 * The following MIPS relocation code for tracking multiple
118 * consecutive HI32/LO32 entries is because of the following:
119 *
120 * https://dmz-portal.mips.com/wiki/MIPS_relocation_types
121 *
122 * ===
123 *
124 * + R_MIPS_HI16
125 *
126 * An R_MIPS_HI16 must be followed eventually by an associated R_MIPS_LO16
127 * relocation record in the same SHT_REL section. The contents of the two
128 * fields to be relocated are combined to form a full 32-bit addend AHL.
129 * An R_MIPS_LO16 entry which does not immediately follow a R_MIPS_HI16 is
130 * combined with the most recent one encountered, i.e. multiple R_MIPS_LO16
131 * entries may be associated with a single R_MIPS_HI16. Use of these
132 * relocation types in a SHT_REL section is discouraged and may be
133 * forbidden to avoid this complication.
134 *
135 * A GNU extension allows multiple R_MIPS_HI16 records to share the same
136 * R_MIPS_LO16 relocation record(s). The association works like this within
137 * a single relocation section:
138 *
139 * + From the beginning of the section moving to the end of the section,
140 * until R_MIPS_LO16 is not found each found R_MIPS_HI16 relocation will
141 * be associated with the first R_MIPS_LO16.
142 *
143 * + Until another R_MIPS_HI16 record is found all found R_MIPS_LO16
144 * relocations found are associated with the last R_MIPS_HI16.
145 *
146 * ===
147 *
148 * This is so gcc can do dead code detection/removal without having to
149 * generate HI/LO pairs even if one of them would be deleted.
150 *
151 * So, the summary is:
152 *
153 * + A HI16 entry must occur before any LO16 entries;
154 * + Multiple consecutive HI16 RELA entries need to be buffered until the
155 * first LO16 RELA entry occurs - and then all HI16 RELA relocations use
156 * the offset in the LOW16 RELA for calculating their offsets;
157 * + The last HI16 RELA entry before a LO16 RELA entry is used (the AHL)
158 * for the first subsequent LO16 calculation;
159 * + If multiple consecutive LO16 RELA entries occur, only the first
160 * LO16 RELA entry triggers an update of buffered HI16 RELA entries;
161 * any subsequent LO16 RELA entry before another HI16 RELA entry will
162 * not cause any further updates to the HI16 RELA entries.
163 *
164 * Additionally, flush out any outstanding HI16 entries that don't have
165 * a LO16 entry in case some garbage entries are left in the file.
166 */
167
168 struct mips_tmp_reloc;
169 struct mips_tmp_reloc {
170 struct mips_tmp_reloc *next;
171
172 Elf_Addr ahl;
173 Elf32_Addr *where_hi16;
174 };
175
176 static struct mips_tmp_reloc *ml = NULL;
177
178 /*
179 * Add a temporary relocation (ie, a HI16 reloc type.)
180 */
181 static int
182 mips_tmp_reloc_add(Elf_Addr ahl, Elf32_Addr *where_hi16)
183 {
184 struct mips_tmp_reloc *r;
185
186 r = malloc(sizeof(struct mips_tmp_reloc), M_TEMP, M_NOWAIT);
187 if (r == NULL) {
188 printf("%s: failed to malloc\n", __func__);
189 return (0);
190 }
191
192 r->ahl = ahl;
193 r->where_hi16 = where_hi16;
194 r->next = ml;
195 ml = r;
196
197 return (1);
198 }
199
200 /*
201 * Flush the temporary relocation list.
202 *
203 * This should be done after a file is completely loaded
204 * so no stale relocations exist to confuse the next
205 * load.
206 */
207 static void
208 mips_tmp_reloc_flush(void)
209 {
210 struct mips_tmp_reloc *r, *rn;
211
212 r = ml;
213 ml = NULL;
214 while (r != NULL) {
215 rn = r->next;
216 free(r, M_TEMP);
217 r = rn;
218 }
219 }
220
221 /*
222 * Get an entry from the reloc list; or NULL if we've run out.
223 */
224 static struct mips_tmp_reloc *
225 mips_tmp_reloc_get(void)
226 {
227 struct mips_tmp_reloc *r;
228
229 r = ml;
230 if (r == NULL)
231 return (NULL);
232 ml = ml->next;
233 return (r);
234 }
235
236 /*
237 * Free a relocation entry.
238 */
239 static void
240 mips_tmp_reloc_free(struct mips_tmp_reloc *r)
241 {
242
243 free(r, M_TEMP);
244 }
245
246 bool
247 elf_is_ifunc_reloc(Elf_Size r_info __unused)
248 {
249
250 return (false);
251 }
252
253 /* Process one elf relocation with addend. */
254 static int
255 elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
256 int type, int local, elf_lookup_fn lookup)
257 {
258 Elf32_Addr *where = (Elf32_Addr *)NULL;
259 Elf_Addr addr;
260 Elf_Addr addend = (Elf_Addr)0;
261 Elf_Word rtype = (Elf_Word)0, symidx;
262 struct mips_tmp_reloc *r;
263 const Elf_Rel *rel = NULL;
264 const Elf_Rela *rela = NULL;
265 int error;
266
267 /* Store the last seen ahl from a HI16 for LO16 processing */
268 static Elf_Addr last_ahl;
269
270 switch (type) {
271 case ELF_RELOC_REL:
272 rel = (const Elf_Rel *)data;
273 where = (Elf32_Addr *) (relocbase + rel->r_offset);
274 rtype = ELF_R_TYPE(rel->r_info);
275 symidx = ELF_R_SYM(rel->r_info);
276 switch (rtype) {
277 case R_MIPS_64:
278 addend = *(Elf64_Addr *)where;
279 break;
280 default:
281 addend = *where;
282 break;
283 }
284
285 break;
286 case ELF_RELOC_RELA:
287 rela = (const Elf_Rela *)data;
288 where = (Elf32_Addr *) (relocbase + rela->r_offset);
289 addend = rela->r_addend;
290 rtype = ELF_R_TYPE(rela->r_info);
291 symidx = ELF_R_SYM(rela->r_info);
292 break;
293 default:
294 panic("unknown reloc type %d\n", type);
295 }
296
297 switch (rtype) {
298 case R_MIPS_NONE: /* none */
299 break;
300
301 case R_MIPS_32: /* S + A */
302 error = lookup(lf, symidx, 1, &addr);
303 if (error != 0)
304 return (-1);
305 addr += addend;
306 if (*where != addr)
307 *where = (Elf32_Addr)addr;
308 break;
309
310 case R_MIPS_26: /* ((A << 2) | (P & 0xf0000000) + S) >> 2 */
311 error = lookup(lf, symidx, 1, &addr);
312 if (error != 0)
313 return (-1);
314
315 addend &= 0x03ffffff;
316 /*
317 * Addendum for .rela R_MIPS_26 is not shifted right
318 */
319 if (rela == NULL)
320 addend <<= 2;
321
322 addr += ((Elf_Addr)where & 0xf0000000) | addend;
323 addr >>= 2;
324
325 *where &= ~0x03ffffff;
326 *where |= addr & 0x03ffffff;
327 break;
328
329 case R_MIPS_64: /* S + A */
330 error = lookup(lf, symidx, 1, &addr);
331 if (error != 0)
332 return (-1);
333 addr += addend;
334 if (*(Elf64_Addr*)where != addr)
335 *(Elf64_Addr*)where = addr;
336 break;
337
338 /*
339 * Handle the two GNU extension cases:
340 *
341 * + Multiple HI16s followed by a LO16, and
342 * + A HI16 followed by multiple LO16s.
343 *
344 * The former is tricky - the HI16 relocations need
345 * to be buffered until a LO16 occurs, at which point
346 * each HI16 is replayed against the LO16 relocation entry
347 * (with the relevant overflow information.)
348 *
349 * The latter should be easy to handle - when the
350 * first LO16 is seen, write out and flush the
351 * HI16 buffer. Any subsequent LO16 entries will
352 * find a blank relocation buffer.
353 *
354 */
355
356 case R_MIPS_HI16: /* ((AHL + S) - ((short)(AHL + S)) >> 16 */
357 if (rela != NULL) {
358 error = lookup(lf, symidx, 1, &addr);
359 if (error != 0)
360 return (-1);
361 addr += addend;
362 *where &= 0xffff0000;
363 *where |= ((((long long) addr + 0x8000LL) >> 16) & 0xffff);
364 } else {
365 /*
366 * Add a temporary relocation to the list;
367 * will pop it off / free the list when
368 * we've found a suitable HI16.
369 */
370 if (mips_tmp_reloc_add(addend << 16, where) == 0)
371 return (-1);
372 /*
373 * Track the last seen HI16 AHL for use by
374 * the first LO16 AHL calculation.
375 *
376 * The assumption is any intermediary deleted
377 * LO16's were optimised out, so the last
378 * HI16 before the LO16 is the "true" relocation
379 * entry to use for that LO16 write.
380 */
381 last_ahl = addend << 16;
382 }
383 break;
384
385 case R_MIPS_LO16: /* AHL + S */
386 if (rela != NULL) {
387 error = lookup(lf, symidx, 1, &addr);
388 if (error != 0)
389 return (-1);
390 addr += addend;
391 *where &= 0xffff0000;
392 *where |= addr & 0xffff;
393 } else {
394 Elf_Addr tmp_ahl;
395 Elf_Addr tmp_addend;
396
397 tmp_ahl = last_ahl + (int16_t) addend;
398 error = lookup(lf, symidx, 1, &addr);
399 if (error != 0)
400 return (-1);
401
402 tmp_addend = addend & 0xffff0000;
403
404 /* Use the last seen ahl for calculating addend */
405 tmp_addend |= (uint16_t)(tmp_ahl + addr);
406 *where = tmp_addend;
407
408 /*
409 * This logic implements the "we saw multiple HI16
410 * before a LO16" assignment /and/ "we saw multiple
411 * LO16s".
412 *
413 * Multiple LO16s will be handled as a blank
414 * relocation list.
415 *
416 * Multple HI16's are iterated over here.
417 */
418 while ((r = mips_tmp_reloc_get()) != NULL) {
419 Elf_Addr rahl;
420
421 /*
422 * We have the ahl from the HI16 entry, so
423 * offset it by the 16 bits of the low ahl.
424 */
425 rahl = r->ahl;
426 rahl += (int16_t) addend;
427
428 tmp_addend = *(r->where_hi16);
429 tmp_addend &= 0xffff0000;
430 tmp_addend |= ((rahl + addr) -
431 (int16_t)(rahl + addr)) >> 16;
432 *(r->where_hi16) = tmp_addend;
433 mips_tmp_reloc_free(r);
434 }
435 }
436
437 break;
438
439 case R_MIPS_HIGHER: /* %higher(A+S) */
440 error = lookup(lf, symidx, 1, &addr);
441 if (error != 0)
442 return (-1);
443 addr += addend;
444 *where &= 0xffff0000;
445 *where |= (((long long)addr + 0x80008000LL) >> 32) & 0xffff;
446 break;
447
448 case R_MIPS_HIGHEST: /* %highest(A+S) */
449 error = lookup(lf, symidx, 1, &addr);
450 if (error != 0)
451 return (-1);
452 addr += addend;
453 *where &= 0xffff0000;
454 *where |= (((long long)addr + 0x800080008000LL) >> 48) & 0xffff;
455 break;
456
457 default:
458 printf("kldload: unexpected relocation type %d, "
459 "symbol index %d\n", rtype, symidx);
460 return (-1);
461 }
462
463 return (0);
464 }
465
466 int
467 elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
468 elf_lookup_fn lookup)
469 {
470
471 return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
472 }
473
474 int
475 elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
476 int type, elf_lookup_fn lookup)
477 {
478
479 return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup));
480 }
481
482 int
483 elf_cpu_load_file(linker_file_t lf __unused)
484 {
485
486 /*
487 * Sync the I and D caches to make sure our relocations are visible.
488 */
489 mips_icache_sync_all();
490
491 /* Flush outstanding relocations */
492 mips_tmp_reloc_flush();
493
494 return (0);
495 }
496
497 int
498 elf_cpu_unload_file(linker_file_t lf __unused)
499 {
500
501 return (0);
502 }
503
504 int
505 elf_cpu_parse_dynamic(caddr_t loadbase __unused, Elf_Dyn *dynamic __unused)
506 {
507
508 return (0);
509 }
Cache object: 41a069ba8b7fa8d805bd59727000ce37
|