FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/machdep.c
1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
2
3 /*-
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Mark Brinicombe
22 * for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * Machine dependant functions for kernel setup
40 *
41 * Created : 17/09/94
42 * Updated : 18/04/01 updated for new wscons
43 */
44
45 #include "opt_compat.h"
46 #include "opt_ddb.h"
47
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD: releng/9.0/sys/arm/arm/machdep.c 225617 2011-09-16 13:58:51Z kmacy $");
50
51 #include <sys/param.h>
52 #include <sys/proc.h>
53 #include <sys/systm.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/bus.h>
57 #include <sys/cons.h>
58 #include <sys/cpu.h>
59 #include <sys/exec.h>
60 #include <sys/imgact.h>
61 #include <sys/kernel.h>
62 #include <sys/ktr.h>
63 #include <sys/linker.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/pcpu.h>
68 #include <sys/ptrace.h>
69 #include <sys/signalvar.h>
70 #include <sys/syscallsubr.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/uio.h>
74
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pager.h>
81
82 #include <machine/armreg.h>
83 #include <machine/cpu.h>
84 #include <machine/machdep.h>
85 #include <machine/md_var.h>
86 #include <machine/metadata.h>
87 #include <machine/pcb.h>
88 #include <machine/pmap.h>
89 #include <machine/reg.h>
90 #include <machine/trap.h>
91 #include <machine/undefined.h>
92 #include <machine/vmparam.h>
93 #include <machine/sysarch.h>
94
95 uint32_t cpu_reset_address = 0;
96 int cold = 1;
97 vm_offset_t vector_page;
98
99 long realmem = 0;
100
101 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
102 int (*_arm_bzero)(void *, int, int) = NULL;
103 int _min_memcpy_size = 0;
104 int _min_bzero_size = 0;
105
106 extern int *end;
107 #ifdef DDB
108 extern vm_offset_t ksym_start, ksym_end;
109 #endif
110
111 void
112 sendsig(catcher, ksi, mask)
113 sig_t catcher;
114 ksiginfo_t *ksi;
115 sigset_t *mask;
116 {
117 struct thread *td;
118 struct proc *p;
119 struct trapframe *tf;
120 struct sigframe *fp, frame;
121 struct sigacts *psp;
122 int onstack;
123 int sig;
124 int code;
125
126 td = curthread;
127 p = td->td_proc;
128 PROC_LOCK_ASSERT(p, MA_OWNED);
129 sig = ksi->ksi_signo;
130 code = ksi->ksi_code;
131 psp = p->p_sigacts;
132 mtx_assert(&psp->ps_mtx, MA_OWNED);
133 tf = td->td_frame;
134 onstack = sigonstack(tf->tf_usr_sp);
135
136 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
137 catcher, sig);
138
139 /* Allocate and validate space for the signal handler context. */
140 if ((td->td_flags & TDP_ALTSTACK) != 0 && !(onstack) &&
141 SIGISMEMBER(psp->ps_sigonstack, sig)) {
142 fp = (struct sigframe *)(td->td_sigstk.ss_sp +
143 td->td_sigstk.ss_size);
144 #if defined(COMPAT_43)
145 td->td_sigstk.ss_flags |= SS_ONSTACK;
146 #endif
147 } else
148 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
149
150 /* make room on the stack */
151 fp--;
152
153 /* make the stack aligned */
154 fp = (struct sigframe *)STACKALIGN(fp);
155 /* Populate the siginfo frame. */
156 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
157 frame.sf_si = ksi->ksi_info;
158 frame.sf_uc.uc_sigmask = *mask;
159 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
160 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
161 frame.sf_uc.uc_stack = td->td_sigstk;
162 mtx_unlock(&psp->ps_mtx);
163 PROC_UNLOCK(td->td_proc);
164
165 /* Copy the sigframe out to the user's stack. */
166 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
167 /* Process has trashed its stack. Kill it. */
168 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
169 PROC_LOCK(p);
170 sigexit(td, SIGILL);
171 }
172
173 /* Translate the signal if appropriate. */
174 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
175 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
176
177 /*
178 * Build context to run handler in. We invoke the handler
179 * directly, only returning via the trampoline. Note the
180 * trampoline version numbers are coordinated with machine-
181 * dependent code in libc.
182 */
183
184 tf->tf_r0 = sig;
185 tf->tf_r1 = (register_t)&fp->sf_si;
186 tf->tf_r2 = (register_t)&fp->sf_uc;
187
188 /* the trampoline uses r5 as the uc address */
189 tf->tf_r5 = (register_t)&fp->sf_uc;
190 tf->tf_pc = (register_t)catcher;
191 tf->tf_usr_sp = (register_t)fp;
192 tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
193
194 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
195 tf->tf_usr_sp);
196
197 PROC_LOCK(p);
198 mtx_lock(&psp->ps_mtx);
199 }
200
201 struct kva_md_info kmi;
202
203 /*
204 * arm32_vector_init:
205 *
206 * Initialize the vector page, and select whether or not to
207 * relocate the vectors.
208 *
209 * NOTE: We expect the vector page to be mapped at its expected
210 * destination.
211 */
212
213 extern unsigned int page0[], page0_data[];
214 void
215 arm_vector_init(vm_offset_t va, int which)
216 {
217 unsigned int *vectors = (int *) va;
218 unsigned int *vectors_data = vectors + (page0_data - page0);
219 int vec;
220
221 /*
222 * Loop through the vectors we're taking over, and copy the
223 * vector's insn and data word.
224 */
225 for (vec = 0; vec < ARM_NVEC; vec++) {
226 if ((which & (1 << vec)) == 0) {
227 /* Don't want to take over this vector. */
228 continue;
229 }
230 vectors[vec] = page0[vec];
231 vectors_data[vec] = page0_data[vec];
232 }
233
234 /* Now sync the vectors. */
235 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
236
237 vector_page = va;
238
239 if (va == ARM_VECTORS_HIGH) {
240 /*
241 * Assume the MD caller knows what it's doing here, and
242 * really does want the vector page relocated.
243 *
244 * Note: This has to be done here (and not just in
245 * cpu_setup()) because the vector page needs to be
246 * accessible *before* cpu_startup() is called.
247 * Think ddb(9) ...
248 *
249 * NOTE: If the CPU control register is not readable,
250 * this will totally fail! We'll just assume that
251 * any system that has high vector support has a
252 * readable CPU control register, for now. If we
253 * ever encounter one that does not, we'll have to
254 * rethink this.
255 */
256 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
257 }
258 }
259
260 static void
261 cpu_startup(void *dummy)
262 {
263 struct pcb *pcb = thread0.td_pcb;
264 #ifndef ARM_CACHE_LOCK_ENABLE
265 vm_page_t m;
266 #endif
267
268 cpu_setup("");
269 identify_arm_cpu();
270
271 printf("real memory = %ju (%ju MB)\n", (uintmax_t)ptoa(physmem),
272 (uintmax_t)ptoa(physmem) / 1048576);
273 realmem = physmem;
274
275 /*
276 * Display the RAM layout.
277 */
278 if (bootverbose) {
279 int indx;
280
281 printf("Physical memory chunk(s):\n");
282 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
283 vm_paddr_t size;
284
285 size = phys_avail[indx + 1] - phys_avail[indx];
286 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
287 (uintmax_t)phys_avail[indx],
288 (uintmax_t)phys_avail[indx + 1] - 1,
289 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
290 }
291 }
292
293 vm_ksubmap_init(&kmi);
294
295 printf("avail memory = %ju (%ju MB)\n",
296 (uintmax_t)ptoa(cnt.v_free_count),
297 (uintmax_t)ptoa(cnt.v_free_count) / 1048576);
298
299 bufinit();
300 vm_pager_bufferinit();
301 pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
302 USPACE_UNDEF_STACK_TOP;
303 pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
304 USPACE_SVC_STACK_TOP;
305 vector_page_setprot(VM_PROT_READ);
306 pmap_set_pcb_pagedir(pmap_kernel(), pcb);
307 pmap_postinit();
308 #ifdef ARM_CACHE_LOCK_ENABLE
309 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
310 arm_lock_cache_line(ARM_TP_ADDRESS);
311 #else
312 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
313 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
314 #endif
315 }
316
317 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
318
319 /*
320 * Flush the D-cache for non-DMA I/O so that the I-cache can
321 * be made coherent later.
322 */
323 void
324 cpu_flush_dcache(void *ptr, size_t len)
325 {
326
327 cpu_dcache_wb_range((uintptr_t)ptr, len);
328 cpu_l2cache_wb_range((uintptr_t)ptr, len);
329 }
330
331 /* Get current clock frequency for the given cpu id. */
332 int
333 cpu_est_clockrate(int cpu_id, uint64_t *rate)
334 {
335
336 return (ENXIO);
337 }
338
339 void
340 cpu_idle(int busy)
341 {
342 cpu_sleep(0);
343 }
344
345 int
346 cpu_idle_wakeup(int cpu)
347 {
348
349 return (0);
350 }
351
352 int
353 fill_regs(struct thread *td, struct reg *regs)
354 {
355 struct trapframe *tf = td->td_frame;
356 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
357 regs->r_sp = tf->tf_usr_sp;
358 regs->r_lr = tf->tf_usr_lr;
359 regs->r_pc = tf->tf_pc;
360 regs->r_cpsr = tf->tf_spsr;
361 return (0);
362 }
363 int
364 fill_fpregs(struct thread *td, struct fpreg *regs)
365 {
366 bzero(regs, sizeof(*regs));
367 return (0);
368 }
369
370 int
371 set_regs(struct thread *td, struct reg *regs)
372 {
373 struct trapframe *tf = td->td_frame;
374
375 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
376 tf->tf_usr_sp = regs->r_sp;
377 tf->tf_usr_lr = regs->r_lr;
378 tf->tf_pc = regs->r_pc;
379 tf->tf_spsr &= ~PSR_FLAGS;
380 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
381 return (0);
382 }
383
384 int
385 set_fpregs(struct thread *td, struct fpreg *regs)
386 {
387 return (0);
388 }
389
390 int
391 fill_dbregs(struct thread *td, struct dbreg *regs)
392 {
393 return (0);
394 }
395 int
396 set_dbregs(struct thread *td, struct dbreg *regs)
397 {
398 return (0);
399 }
400
401
402 static int
403 ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
404 {
405 struct iovec iov;
406 struct uio uio;
407
408 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
409 iov.iov_base = (caddr_t) v;
410 iov.iov_len = sizeof(u_int32_t);
411 uio.uio_iov = &iov;
412 uio.uio_iovcnt = 1;
413 uio.uio_offset = (off_t)addr;
414 uio.uio_resid = sizeof(u_int32_t);
415 uio.uio_segflg = UIO_SYSSPACE;
416 uio.uio_rw = UIO_READ;
417 uio.uio_td = td;
418 return proc_rwmem(td->td_proc, &uio);
419 }
420
421 static int
422 ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
423 {
424 struct iovec iov;
425 struct uio uio;
426
427 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
428 iov.iov_base = (caddr_t) &v;
429 iov.iov_len = sizeof(u_int32_t);
430 uio.uio_iov = &iov;
431 uio.uio_iovcnt = 1;
432 uio.uio_offset = (off_t)addr;
433 uio.uio_resid = sizeof(u_int32_t);
434 uio.uio_segflg = UIO_SYSSPACE;
435 uio.uio_rw = UIO_WRITE;
436 uio.uio_td = td;
437 return proc_rwmem(td->td_proc, &uio);
438 }
439
440 int
441 ptrace_single_step(struct thread *td)
442 {
443 struct proc *p;
444 int error;
445
446 KASSERT(td->td_md.md_ptrace_instr == 0,
447 ("Didn't clear single step"));
448 p = td->td_proc;
449 PROC_UNLOCK(p);
450 error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
451 &td->td_md.md_ptrace_instr);
452 if (error)
453 goto out;
454 error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
455 PTRACE_BREAKPOINT);
456 if (error)
457 td->td_md.md_ptrace_instr = 0;
458 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
459 out:
460 PROC_LOCK(p);
461 return (error);
462 }
463
464 int
465 ptrace_clear_single_step(struct thread *td)
466 {
467 struct proc *p;
468
469 if (td->td_md.md_ptrace_instr) {
470 p = td->td_proc;
471 PROC_UNLOCK(p);
472 ptrace_write_int(td, td->td_md.md_ptrace_addr,
473 td->td_md.md_ptrace_instr);
474 PROC_LOCK(p);
475 td->td_md.md_ptrace_instr = 0;
476 }
477 return (0);
478 }
479
480 int
481 ptrace_set_pc(struct thread *td, unsigned long addr)
482 {
483 td->td_frame->tf_pc = addr;
484 return (0);
485 }
486
487 void
488 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
489 {
490 }
491
492 void
493 spinlock_enter(void)
494 {
495 struct thread *td;
496 register_t cspr;
497
498 td = curthread;
499 if (td->td_md.md_spinlock_count == 0) {
500 cspr = disable_interrupts(I32_bit | F32_bit);
501 td->td_md.md_spinlock_count = 1;
502 td->td_md.md_saved_cspr = cspr;
503 } else
504 td->td_md.md_spinlock_count++;
505 critical_enter();
506 }
507
508 void
509 spinlock_exit(void)
510 {
511 struct thread *td;
512 register_t cspr;
513
514 td = curthread;
515 critical_exit();
516 cspr = td->td_md.md_saved_cspr;
517 td->td_md.md_spinlock_count--;
518 if (td->td_md.md_spinlock_count == 0)
519 restore_interrupts(cspr);
520 }
521
522 /*
523 * Clear registers on exec
524 */
525 void
526 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
527 {
528 struct trapframe *tf = td->td_frame;
529
530 memset(tf, 0, sizeof(*tf));
531 tf->tf_usr_sp = stack;
532 tf->tf_usr_lr = imgp->entry_addr;
533 tf->tf_svc_lr = 0x77777777;
534 tf->tf_pc = imgp->entry_addr;
535 tf->tf_spsr = PSR_USR32_MODE;
536 }
537
538 /*
539 * Get machine context.
540 */
541 int
542 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
543 {
544 struct trapframe *tf = td->td_frame;
545 __greg_t *gr = mcp->__gregs;
546
547 if (clear_ret & GET_MC_CLEAR_RET)
548 gr[_REG_R0] = 0;
549 else
550 gr[_REG_R0] = tf->tf_r0;
551 gr[_REG_R1] = tf->tf_r1;
552 gr[_REG_R2] = tf->tf_r2;
553 gr[_REG_R3] = tf->tf_r3;
554 gr[_REG_R4] = tf->tf_r4;
555 gr[_REG_R5] = tf->tf_r5;
556 gr[_REG_R6] = tf->tf_r6;
557 gr[_REG_R7] = tf->tf_r7;
558 gr[_REG_R8] = tf->tf_r8;
559 gr[_REG_R9] = tf->tf_r9;
560 gr[_REG_R10] = tf->tf_r10;
561 gr[_REG_R11] = tf->tf_r11;
562 gr[_REG_R12] = tf->tf_r12;
563 gr[_REG_SP] = tf->tf_usr_sp;
564 gr[_REG_LR] = tf->tf_usr_lr;
565 gr[_REG_PC] = tf->tf_pc;
566 gr[_REG_CPSR] = tf->tf_spsr;
567
568 return (0);
569 }
570
571 /*
572 * Set machine context.
573 *
574 * However, we don't set any but the user modifiable flags, and we won't
575 * touch the cs selector.
576 */
577 int
578 set_mcontext(struct thread *td, const mcontext_t *mcp)
579 {
580 struct trapframe *tf = td->td_frame;
581 const __greg_t *gr = mcp->__gregs;
582
583 tf->tf_r0 = gr[_REG_R0];
584 tf->tf_r1 = gr[_REG_R1];
585 tf->tf_r2 = gr[_REG_R2];
586 tf->tf_r3 = gr[_REG_R3];
587 tf->tf_r4 = gr[_REG_R4];
588 tf->tf_r5 = gr[_REG_R5];
589 tf->tf_r6 = gr[_REG_R6];
590 tf->tf_r7 = gr[_REG_R7];
591 tf->tf_r8 = gr[_REG_R8];
592 tf->tf_r9 = gr[_REG_R9];
593 tf->tf_r10 = gr[_REG_R10];
594 tf->tf_r11 = gr[_REG_R11];
595 tf->tf_r12 = gr[_REG_R12];
596 tf->tf_usr_sp = gr[_REG_SP];
597 tf->tf_usr_lr = gr[_REG_LR];
598 tf->tf_pc = gr[_REG_PC];
599 tf->tf_spsr = gr[_REG_CPSR];
600
601 return (0);
602 }
603
604 /*
605 * MPSAFE
606 */
607 int
608 sys_sigreturn(td, uap)
609 struct thread *td;
610 struct sigreturn_args /* {
611 const struct __ucontext *sigcntxp;
612 } */ *uap;
613 {
614 struct sigframe sf;
615 struct trapframe *tf;
616 int spsr;
617
618 if (uap == NULL)
619 return (EFAULT);
620 if (copyin(uap->sigcntxp, &sf, sizeof(sf)))
621 return (EFAULT);
622 /*
623 * Make sure the processor mode has not been tampered with and
624 * interrupts have not been disabled.
625 */
626 spsr = sf.sf_uc.uc_mcontext.__gregs[_REG_CPSR];
627 if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
628 (spsr & (I32_bit | F32_bit)) != 0)
629 return (EINVAL);
630 /* Restore register context. */
631 tf = td->td_frame;
632 set_mcontext(td, &sf.sf_uc.uc_mcontext);
633
634 /* Restore signal mask. */
635 kern_sigprocmask(td, SIG_SETMASK, &sf.sf_uc.uc_sigmask, NULL, 0);
636
637 return (EJUSTRETURN);
638 }
639
640
641 /*
642 * Construct a PCB from a trapframe. This is called from kdb_trap() where
643 * we want to start a backtrace from the function that caused us to enter
644 * the debugger. We have the context in the trapframe, but base the trace
645 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
646 * enough for a backtrace.
647 */
648 void
649 makectx(struct trapframe *tf, struct pcb *pcb)
650 {
651 pcb->un_32.pcb32_r8 = tf->tf_r8;
652 pcb->un_32.pcb32_r9 = tf->tf_r9;
653 pcb->un_32.pcb32_r10 = tf->tf_r10;
654 pcb->un_32.pcb32_r11 = tf->tf_r11;
655 pcb->un_32.pcb32_r12 = tf->tf_r12;
656 pcb->un_32.pcb32_pc = tf->tf_pc;
657 pcb->un_32.pcb32_lr = tf->tf_usr_lr;
658 pcb->un_32.pcb32_sp = tf->tf_usr_sp;
659 }
660
661 /*
662 * Fake up a boot descriptor table
663 */
664 vm_offset_t
665 fake_preload_metadata(void)
666 {
667 #ifdef DDB
668 vm_offset_t zstart = 0, zend = 0;
669 #endif
670 vm_offset_t lastaddr;
671 int i = 0;
672 static uint32_t fake_preload[35];
673
674 fake_preload[i++] = MODINFO_NAME;
675 fake_preload[i++] = strlen("elf kernel") + 1;
676 strcpy((char*)&fake_preload[i++], "elf kernel");
677 i += 2;
678 fake_preload[i++] = MODINFO_TYPE;
679 fake_preload[i++] = strlen("elf kernel") + 1;
680 strcpy((char*)&fake_preload[i++], "elf kernel");
681 i += 2;
682 fake_preload[i++] = MODINFO_ADDR;
683 fake_preload[i++] = sizeof(vm_offset_t);
684 fake_preload[i++] = KERNVIRTADDR;
685 fake_preload[i++] = MODINFO_SIZE;
686 fake_preload[i++] = sizeof(uint32_t);
687 fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
688 #ifdef DDB
689 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
690 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
691 fake_preload[i++] = sizeof(vm_offset_t);
692 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
693 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
694 fake_preload[i++] = sizeof(vm_offset_t);
695 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
696 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
697 zend = lastaddr;
698 zstart = *(uint32_t *)(KERNVIRTADDR + 4);
699 ksym_start = zstart;
700 ksym_end = zend;
701 } else
702 #endif
703 lastaddr = (vm_offset_t)&end;
704 fake_preload[i++] = 0;
705 fake_preload[i] = 0;
706 preload_metadata = (void *)fake_preload;
707
708 return (lastaddr);
709 }
Cache object: 735a56f4fa410accc9661ed2ad0ca565
|