FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/machdep.c
1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
2
3 /*-
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Mark Brinicombe
22 * for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * Machine dependant functions for kernel setup
40 *
41 * Created : 17/09/94
42 * Updated : 18/04/01 updated for new wscons
43 */
44
45 #include "opt_compat.h"
46 #include "opt_ddb.h"
47
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50
51 #include <sys/param.h>
52 #include <sys/proc.h>
53 #include <sys/systm.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/bus.h>
57 #include <sys/cons.h>
58 #include <sys/cpu.h>
59 #include <sys/exec.h>
60 #include <sys/imgact.h>
61 #include <sys/kernel.h>
62 #include <sys/ktr.h>
63 #include <sys/linker.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/pcpu.h>
68 #include <sys/ptrace.h>
69 #include <sys/signalvar.h>
70 #include <sys/syscallsubr.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/uio.h>
74
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pager.h>
81 #include <vm/vnode_pager.h>
82
83 #include <machine/armreg.h>
84 #include <machine/cpu.h>
85 #include <machine/machdep.h>
86 #include <machine/md_var.h>
87 #include <machine/metadata.h>
88 #include <machine/pcb.h>
89 #include <machine/pmap.h>
90 #include <machine/reg.h>
91 #include <machine/trap.h>
92 #include <machine/undefined.h>
93 #include <machine/vmparam.h>
94 #include <machine/sysarch.h>
95
96 uint32_t cpu_reset_address = 0;
97 int cold = 1;
98 vm_offset_t vector_page;
99
100 long realmem = 0;
101
102 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
103 int (*_arm_bzero)(void *, int, int) = NULL;
104 int _min_memcpy_size = 0;
105 int _min_bzero_size = 0;
106
107 extern int *end;
108 #ifdef DDB
109 extern vm_offset_t ksym_start, ksym_end;
110 #endif
111
112 void
113 sendsig(catcher, ksi, mask)
114 sig_t catcher;
115 ksiginfo_t *ksi;
116 sigset_t *mask;
117 {
118 struct thread *td;
119 struct proc *p;
120 struct trapframe *tf;
121 struct sigframe *fp, frame;
122 struct sigacts *psp;
123 int onstack;
124 int sig;
125 int code;
126
127 td = curthread;
128 p = td->td_proc;
129 PROC_LOCK_ASSERT(p, MA_OWNED);
130 sig = ksi->ksi_signo;
131 code = ksi->ksi_code;
132 psp = p->p_sigacts;
133 mtx_assert(&psp->ps_mtx, MA_OWNED);
134 tf = td->td_frame;
135 onstack = sigonstack(tf->tf_usr_sp);
136
137 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
138 catcher, sig);
139
140 /* Allocate and validate space for the signal handler context. */
141 if ((td->td_flags & TDP_ALTSTACK) != 0 && !(onstack) &&
142 SIGISMEMBER(psp->ps_sigonstack, sig)) {
143 fp = (struct sigframe *)(td->td_sigstk.ss_sp +
144 td->td_sigstk.ss_size);
145 #if defined(COMPAT_43)
146 td->td_sigstk.ss_flags |= SS_ONSTACK;
147 #endif
148 } else
149 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
150
151 /* make room on the stack */
152 fp--;
153
154 /* make the stack aligned */
155 fp = (struct sigframe *)STACKALIGN(fp);
156 /* Populate the siginfo frame. */
157 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
158 frame.sf_si = ksi->ksi_info;
159 frame.sf_uc.uc_sigmask = *mask;
160 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
161 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
162 frame.sf_uc.uc_stack = td->td_sigstk;
163 mtx_unlock(&psp->ps_mtx);
164 PROC_UNLOCK(td->td_proc);
165
166 /* Copy the sigframe out to the user's stack. */
167 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
168 /* Process has trashed its stack. Kill it. */
169 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
170 PROC_LOCK(p);
171 sigexit(td, SIGILL);
172 }
173
174 /* Translate the signal if appropriate. */
175 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
176 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
177
178 /*
179 * Build context to run handler in. We invoke the handler
180 * directly, only returning via the trampoline. Note the
181 * trampoline version numbers are coordinated with machine-
182 * dependent code in libc.
183 */
184
185 tf->tf_r0 = sig;
186 tf->tf_r1 = (register_t)&fp->sf_si;
187 tf->tf_r2 = (register_t)&fp->sf_uc;
188
189 /* the trampoline uses r5 as the uc address */
190 tf->tf_r5 = (register_t)&fp->sf_uc;
191 tf->tf_pc = (register_t)catcher;
192 tf->tf_usr_sp = (register_t)fp;
193 tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
194
195 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
196 tf->tf_usr_sp);
197
198 PROC_LOCK(p);
199 mtx_lock(&psp->ps_mtx);
200 }
201
202 struct kva_md_info kmi;
203
204 /*
205 * arm32_vector_init:
206 *
207 * Initialize the vector page, and select whether or not to
208 * relocate the vectors.
209 *
210 * NOTE: We expect the vector page to be mapped at its expected
211 * destination.
212 */
213
214 extern unsigned int page0[], page0_data[];
215 void
216 arm_vector_init(vm_offset_t va, int which)
217 {
218 unsigned int *vectors = (int *) va;
219 unsigned int *vectors_data = vectors + (page0_data - page0);
220 int vec;
221
222 /*
223 * Loop through the vectors we're taking over, and copy the
224 * vector's insn and data word.
225 */
226 for (vec = 0; vec < ARM_NVEC; vec++) {
227 if ((which & (1 << vec)) == 0) {
228 /* Don't want to take over this vector. */
229 continue;
230 }
231 vectors[vec] = page0[vec];
232 vectors_data[vec] = page0_data[vec];
233 }
234
235 /* Now sync the vectors. */
236 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
237
238 vector_page = va;
239
240 if (va == ARM_VECTORS_HIGH) {
241 /*
242 * Assume the MD caller knows what it's doing here, and
243 * really does want the vector page relocated.
244 *
245 * Note: This has to be done here (and not just in
246 * cpu_setup()) because the vector page needs to be
247 * accessible *before* cpu_startup() is called.
248 * Think ddb(9) ...
249 *
250 * NOTE: If the CPU control register is not readable,
251 * this will totally fail! We'll just assume that
252 * any system that has high vector support has a
253 * readable CPU control register, for now. If we
254 * ever encounter one that does not, we'll have to
255 * rethink this.
256 */
257 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
258 }
259 }
260
261 static void
262 cpu_startup(void *dummy)
263 {
264 struct pcb *pcb = thread0.td_pcb;
265 #ifndef ARM_CACHE_LOCK_ENABLE
266 vm_page_t m;
267 #endif
268
269 cpu_setup("");
270 identify_arm_cpu();
271
272 printf("real memory = %ju (%ju MB)\n", (uintmax_t)ptoa(physmem),
273 (uintmax_t)ptoa(physmem) / 1048576);
274 realmem = physmem;
275
276 /*
277 * Display the RAM layout.
278 */
279 if (bootverbose) {
280 int indx;
281
282 printf("Physical memory chunk(s):\n");
283 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
284 vm_paddr_t size;
285
286 size = phys_avail[indx + 1] - phys_avail[indx];
287 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
288 (uintmax_t)phys_avail[indx],
289 (uintmax_t)phys_avail[indx + 1] - 1,
290 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
291 }
292 }
293
294 vm_ksubmap_init(&kmi);
295
296 printf("avail memory = %ju (%ju MB)\n",
297 (uintmax_t)ptoa(cnt.v_free_count),
298 (uintmax_t)ptoa(cnt.v_free_count) / 1048576);
299
300 bufinit();
301 vm_pager_bufferinit();
302 pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
303 USPACE_UNDEF_STACK_TOP;
304 pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
305 USPACE_SVC_STACK_TOP;
306 vector_page_setprot(VM_PROT_READ);
307 pmap_set_pcb_pagedir(pmap_kernel(), pcb);
308 pmap_postinit();
309 #ifdef ARM_CACHE_LOCK_ENABLE
310 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
311 arm_lock_cache_line(ARM_TP_ADDRESS);
312 #else
313 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
314 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
315 #endif
316 *(uint32_t *)ARM_RAS_START = 0;
317 *(uint32_t *)ARM_RAS_END = 0xffffffff;
318 }
319
320 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
321
322 /*
323 * Flush the D-cache for non-DMA I/O so that the I-cache can
324 * be made coherent later.
325 */
326 void
327 cpu_flush_dcache(void *ptr, size_t len)
328 {
329
330 cpu_dcache_wb_range((uintptr_t)ptr, len);
331 cpu_l2cache_wb_range((uintptr_t)ptr, len);
332 }
333
334 /* Get current clock frequency for the given cpu id. */
335 int
336 cpu_est_clockrate(int cpu_id, uint64_t *rate)
337 {
338
339 return (ENXIO);
340 }
341
342 void
343 cpu_idle(int busy)
344 {
345 cpu_sleep(0);
346 }
347
348 int
349 cpu_idle_wakeup(int cpu)
350 {
351
352 return (0);
353 }
354
355 int
356 fill_regs(struct thread *td, struct reg *regs)
357 {
358 struct trapframe *tf = td->td_frame;
359 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
360 regs->r_sp = tf->tf_usr_sp;
361 regs->r_lr = tf->tf_usr_lr;
362 regs->r_pc = tf->tf_pc;
363 regs->r_cpsr = tf->tf_spsr;
364 return (0);
365 }
366 int
367 fill_fpregs(struct thread *td, struct fpreg *regs)
368 {
369 bzero(regs, sizeof(*regs));
370 return (0);
371 }
372
373 int
374 set_regs(struct thread *td, struct reg *regs)
375 {
376 struct trapframe *tf = td->td_frame;
377
378 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
379 tf->tf_usr_sp = regs->r_sp;
380 tf->tf_usr_lr = regs->r_lr;
381 tf->tf_pc = regs->r_pc;
382 tf->tf_spsr &= ~PSR_FLAGS;
383 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
384 return (0);
385 }
386
387 int
388 set_fpregs(struct thread *td, struct fpreg *regs)
389 {
390 return (0);
391 }
392
393 int
394 fill_dbregs(struct thread *td, struct dbreg *regs)
395 {
396 return (0);
397 }
398 int
399 set_dbregs(struct thread *td, struct dbreg *regs)
400 {
401 return (0);
402 }
403
404
405 static int
406 ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
407 {
408 struct iovec iov;
409 struct uio uio;
410
411 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
412 iov.iov_base = (caddr_t) v;
413 iov.iov_len = sizeof(u_int32_t);
414 uio.uio_iov = &iov;
415 uio.uio_iovcnt = 1;
416 uio.uio_offset = (off_t)addr;
417 uio.uio_resid = sizeof(u_int32_t);
418 uio.uio_segflg = UIO_SYSSPACE;
419 uio.uio_rw = UIO_READ;
420 uio.uio_td = td;
421 return proc_rwmem(td->td_proc, &uio);
422 }
423
424 static int
425 ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
426 {
427 struct iovec iov;
428 struct uio uio;
429
430 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
431 iov.iov_base = (caddr_t) &v;
432 iov.iov_len = sizeof(u_int32_t);
433 uio.uio_iov = &iov;
434 uio.uio_iovcnt = 1;
435 uio.uio_offset = (off_t)addr;
436 uio.uio_resid = sizeof(u_int32_t);
437 uio.uio_segflg = UIO_SYSSPACE;
438 uio.uio_rw = UIO_WRITE;
439 uio.uio_td = td;
440 return proc_rwmem(td->td_proc, &uio);
441 }
442
443 int
444 ptrace_single_step(struct thread *td)
445 {
446 struct proc *p;
447 int error;
448
449 KASSERT(td->td_md.md_ptrace_instr == 0,
450 ("Didn't clear single step"));
451 p = td->td_proc;
452 PROC_UNLOCK(p);
453 error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
454 &td->td_md.md_ptrace_instr);
455 if (error)
456 goto out;
457 error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
458 PTRACE_BREAKPOINT);
459 if (error)
460 td->td_md.md_ptrace_instr = 0;
461 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
462 out:
463 PROC_LOCK(p);
464 return (error);
465 }
466
467 int
468 ptrace_clear_single_step(struct thread *td)
469 {
470 struct proc *p;
471
472 if (td->td_md.md_ptrace_instr) {
473 p = td->td_proc;
474 PROC_UNLOCK(p);
475 ptrace_write_int(td, td->td_md.md_ptrace_addr,
476 td->td_md.md_ptrace_instr);
477 PROC_LOCK(p);
478 td->td_md.md_ptrace_instr = 0;
479 }
480 return (0);
481 }
482
483 int
484 ptrace_set_pc(struct thread *td, unsigned long addr)
485 {
486 td->td_frame->tf_pc = addr;
487 return (0);
488 }
489
490 void
491 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
492 {
493 }
494
495 void
496 spinlock_enter(void)
497 {
498 struct thread *td;
499 register_t cspr;
500
501 td = curthread;
502 if (td->td_md.md_spinlock_count == 0) {
503 cspr = disable_interrupts(I32_bit | F32_bit);
504 td->td_md.md_spinlock_count = 1;
505 td->td_md.md_saved_cspr = cspr;
506 } else
507 td->td_md.md_spinlock_count++;
508 critical_enter();
509 }
510
511 void
512 spinlock_exit(void)
513 {
514 struct thread *td;
515 register_t cspr;
516
517 td = curthread;
518 critical_exit();
519 cspr = td->td_md.md_saved_cspr;
520 td->td_md.md_spinlock_count--;
521 if (td->td_md.md_spinlock_count == 0)
522 restore_interrupts(cspr);
523 }
524
525 /*
526 * Clear registers on exec
527 */
528 void
529 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
530 {
531 struct trapframe *tf = td->td_frame;
532
533 memset(tf, 0, sizeof(*tf));
534 tf->tf_usr_sp = stack;
535 tf->tf_usr_lr = entry;
536 tf->tf_svc_lr = 0x77777777;
537 tf->tf_pc = entry;
538 tf->tf_spsr = PSR_USR32_MODE;
539 }
540
541 /*
542 * Get machine context.
543 */
544 int
545 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
546 {
547 struct trapframe *tf = td->td_frame;
548 __greg_t *gr = mcp->__gregs;
549
550 if (clear_ret & GET_MC_CLEAR_RET)
551 gr[_REG_R0] = 0;
552 else
553 gr[_REG_R0] = tf->tf_r0;
554 gr[_REG_R1] = tf->tf_r1;
555 gr[_REG_R2] = tf->tf_r2;
556 gr[_REG_R3] = tf->tf_r3;
557 gr[_REG_R4] = tf->tf_r4;
558 gr[_REG_R5] = tf->tf_r5;
559 gr[_REG_R6] = tf->tf_r6;
560 gr[_REG_R7] = tf->tf_r7;
561 gr[_REG_R8] = tf->tf_r8;
562 gr[_REG_R9] = tf->tf_r9;
563 gr[_REG_R10] = tf->tf_r10;
564 gr[_REG_R11] = tf->tf_r11;
565 gr[_REG_R12] = tf->tf_r12;
566 gr[_REG_SP] = tf->tf_usr_sp;
567 gr[_REG_LR] = tf->tf_usr_lr;
568 gr[_REG_PC] = tf->tf_pc;
569 gr[_REG_CPSR] = tf->tf_spsr;
570
571 return (0);
572 }
573
574 /*
575 * Set machine context.
576 *
577 * However, we don't set any but the user modifiable flags, and we won't
578 * touch the cs selector.
579 */
580 int
581 set_mcontext(struct thread *td, const mcontext_t *mcp)
582 {
583 struct trapframe *tf = td->td_frame;
584 const __greg_t *gr = mcp->__gregs;
585
586 tf->tf_r0 = gr[_REG_R0];
587 tf->tf_r1 = gr[_REG_R1];
588 tf->tf_r2 = gr[_REG_R2];
589 tf->tf_r3 = gr[_REG_R3];
590 tf->tf_r4 = gr[_REG_R4];
591 tf->tf_r5 = gr[_REG_R5];
592 tf->tf_r6 = gr[_REG_R6];
593 tf->tf_r7 = gr[_REG_R7];
594 tf->tf_r8 = gr[_REG_R8];
595 tf->tf_r9 = gr[_REG_R9];
596 tf->tf_r10 = gr[_REG_R10];
597 tf->tf_r11 = gr[_REG_R11];
598 tf->tf_r12 = gr[_REG_R12];
599 tf->tf_usr_sp = gr[_REG_SP];
600 tf->tf_usr_lr = gr[_REG_LR];
601 tf->tf_pc = gr[_REG_PC];
602 tf->tf_spsr = gr[_REG_CPSR];
603
604 return (0);
605 }
606
607 /*
608 * MPSAFE
609 */
610 int
611 sigreturn(td, uap)
612 struct thread *td;
613 struct sigreturn_args /* {
614 const struct __ucontext *sigcntxp;
615 } */ *uap;
616 {
617 struct sigframe sf;
618 struct trapframe *tf;
619 int spsr;
620
621 if (uap == NULL)
622 return (EFAULT);
623 if (copyin(uap->sigcntxp, &sf, sizeof(sf)))
624 return (EFAULT);
625 /*
626 * Make sure the processor mode has not been tampered with and
627 * interrupts have not been disabled.
628 */
629 spsr = sf.sf_uc.uc_mcontext.__gregs[_REG_CPSR];
630 if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
631 (spsr & (I32_bit | F32_bit)) != 0)
632 return (EINVAL);
633 /* Restore register context. */
634 tf = td->td_frame;
635 set_mcontext(td, &sf.sf_uc.uc_mcontext);
636
637 /* Restore signal mask. */
638 kern_sigprocmask(td, SIG_SETMASK, &sf.sf_uc.uc_sigmask, NULL, 0);
639
640 return (EJUSTRETURN);
641 }
642
643
644 /*
645 * Construct a PCB from a trapframe. This is called from kdb_trap() where
646 * we want to start a backtrace from the function that caused us to enter
647 * the debugger. We have the context in the trapframe, but base the trace
648 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
649 * enough for a backtrace.
650 */
651 void
652 makectx(struct trapframe *tf, struct pcb *pcb)
653 {
654 pcb->un_32.pcb32_r8 = tf->tf_r8;
655 pcb->un_32.pcb32_r9 = tf->tf_r9;
656 pcb->un_32.pcb32_r10 = tf->tf_r10;
657 pcb->un_32.pcb32_r11 = tf->tf_r11;
658 pcb->un_32.pcb32_r12 = tf->tf_r12;
659 pcb->un_32.pcb32_pc = tf->tf_pc;
660 pcb->un_32.pcb32_lr = tf->tf_usr_lr;
661 pcb->un_32.pcb32_sp = tf->tf_usr_sp;
662 }
663
664 /*
665 * Fake up a boot descriptor table
666 */
667 vm_offset_t
668 fake_preload_metadata(void)
669 {
670 #ifdef DDB
671 vm_offset_t zstart = 0, zend = 0;
672 #endif
673 vm_offset_t lastaddr;
674 int i = 0;
675 static uint32_t fake_preload[35];
676
677 fake_preload[i++] = MODINFO_NAME;
678 fake_preload[i++] = strlen("elf kernel") + 1;
679 strcpy((char*)&fake_preload[i++], "elf kernel");
680 i += 2;
681 fake_preload[i++] = MODINFO_TYPE;
682 fake_preload[i++] = strlen("elf kernel") + 1;
683 strcpy((char*)&fake_preload[i++], "elf kernel");
684 i += 2;
685 fake_preload[i++] = MODINFO_ADDR;
686 fake_preload[i++] = sizeof(vm_offset_t);
687 fake_preload[i++] = KERNVIRTADDR;
688 fake_preload[i++] = MODINFO_SIZE;
689 fake_preload[i++] = sizeof(uint32_t);
690 fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
691 #ifdef DDB
692 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
693 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
694 fake_preload[i++] = sizeof(vm_offset_t);
695 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
696 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
697 fake_preload[i++] = sizeof(vm_offset_t);
698 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
699 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
700 zend = lastaddr;
701 zstart = *(uint32_t *)(KERNVIRTADDR + 4);
702 ksym_start = zstart;
703 ksym_end = zend;
704 } else
705 #endif
706 lastaddr = (vm_offset_t)&end;
707 fake_preload[i++] = 0;
708 fake_preload[i] = 0;
709 preload_metadata = (void *)fake_preload;
710
711 return (lastaddr);
712 }
Cache object: 490423ac76a2826bfca16b6d32691571
|