FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/machdep.c
1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
2
3 /*-
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Mark Brinicombe
22 * for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * Machine dependant functions for kernel setup
40 *
41 * Created : 17/09/94
42 * Updated : 18/04/01 updated for new wscons
43 */
44
45 #include "opt_compat.h"
46 #include "opt_ddb.h"
47
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50
51 #include <sys/param.h>
52 #include <sys/proc.h>
53 #include <sys/systm.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/bus.h>
57 #include <sys/cons.h>
58 #include <sys/cpu.h>
59 #include <sys/exec.h>
60 #include <sys/imgact.h>
61 #include <sys/kernel.h>
62 #include <sys/ktr.h>
63 #include <sys/linker.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/pcpu.h>
68 #include <sys/ptrace.h>
69 #include <sys/signalvar.h>
70 #include <sys/syscallsubr.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/uio.h>
74
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pager.h>
81
82 #include <machine/armreg.h>
83 #include <machine/cpu.h>
84 #include <machine/machdep.h>
85 #include <machine/md_var.h>
86 #include <machine/metadata.h>
87 #include <machine/pcb.h>
88 #include <machine/pmap.h>
89 #include <machine/reg.h>
90 #include <machine/trap.h>
91 #include <machine/undefined.h>
92 #include <machine/vmparam.h>
93 #include <machine/sysarch.h>
94
95 uint32_t cpu_reset_address = 0;
96 int cold = 1;
97 vm_offset_t vector_page;
98
99 long realmem = 0;
100
101 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
102 int (*_arm_bzero)(void *, int, int) = NULL;
103 int _min_memcpy_size = 0;
104 int _min_bzero_size = 0;
105
106 extern int *end;
107 #ifdef DDB
108 extern vm_offset_t ksym_start, ksym_end;
109 #endif
110
111 void
112 sendsig(catcher, ksi, mask)
113 sig_t catcher;
114 ksiginfo_t *ksi;
115 sigset_t *mask;
116 {
117 struct thread *td;
118 struct proc *p;
119 struct trapframe *tf;
120 struct sigframe *fp, frame;
121 struct sigacts *psp;
122 int onstack;
123 int sig;
124 int code;
125
126 td = curthread;
127 p = td->td_proc;
128 PROC_LOCK_ASSERT(p, MA_OWNED);
129 sig = ksi->ksi_signo;
130 code = ksi->ksi_code;
131 psp = p->p_sigacts;
132 mtx_assert(&psp->ps_mtx, MA_OWNED);
133 tf = td->td_frame;
134 onstack = sigonstack(tf->tf_usr_sp);
135
136 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
137 catcher, sig);
138
139 /* Allocate and validate space for the signal handler context. */
140 if ((td->td_flags & TDP_ALTSTACK) != 0 && !(onstack) &&
141 SIGISMEMBER(psp->ps_sigonstack, sig)) {
142 fp = (struct sigframe *)(td->td_sigstk.ss_sp +
143 td->td_sigstk.ss_size);
144 #if defined(COMPAT_43)
145 td->td_sigstk.ss_flags |= SS_ONSTACK;
146 #endif
147 } else
148 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
149
150 /* make room on the stack */
151 fp--;
152
153 /* make the stack aligned */
154 fp = (struct sigframe *)STACKALIGN(fp);
155 /* Populate the siginfo frame. */
156 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
157 frame.sf_si = ksi->ksi_info;
158 frame.sf_uc.uc_sigmask = *mask;
159 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
160 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
161 frame.sf_uc.uc_stack = td->td_sigstk;
162 mtx_unlock(&psp->ps_mtx);
163 PROC_UNLOCK(td->td_proc);
164
165 /* Copy the sigframe out to the user's stack. */
166 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
167 /* Process has trashed its stack. Kill it. */
168 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
169 PROC_LOCK(p);
170 sigexit(td, SIGILL);
171 }
172
173 /* Translate the signal if appropriate. */
174 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
175 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
176
177 /*
178 * Build context to run handler in. We invoke the handler
179 * directly, only returning via the trampoline. Note the
180 * trampoline version numbers are coordinated with machine-
181 * dependent code in libc.
182 */
183
184 tf->tf_r0 = sig;
185 tf->tf_r1 = (register_t)&fp->sf_si;
186 tf->tf_r2 = (register_t)&fp->sf_uc;
187
188 /* the trampoline uses r5 as the uc address */
189 tf->tf_r5 = (register_t)&fp->sf_uc;
190 tf->tf_pc = (register_t)catcher;
191 tf->tf_usr_sp = (register_t)fp;
192 tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
193
194 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
195 tf->tf_usr_sp);
196
197 PROC_LOCK(p);
198 mtx_lock(&psp->ps_mtx);
199 }
200
201 struct kva_md_info kmi;
202
203 /*
204 * arm32_vector_init:
205 *
206 * Initialize the vector page, and select whether or not to
207 * relocate the vectors.
208 *
209 * NOTE: We expect the vector page to be mapped at its expected
210 * destination.
211 */
212
213 extern unsigned int page0[], page0_data[];
214 void
215 arm_vector_init(vm_offset_t va, int which)
216 {
217 unsigned int *vectors = (int *) va;
218 unsigned int *vectors_data = vectors + (page0_data - page0);
219 int vec;
220
221 /*
222 * Loop through the vectors we're taking over, and copy the
223 * vector's insn and data word.
224 */
225 for (vec = 0; vec < ARM_NVEC; vec++) {
226 if ((which & (1 << vec)) == 0) {
227 /* Don't want to take over this vector. */
228 continue;
229 }
230 vectors[vec] = page0[vec];
231 vectors_data[vec] = page0_data[vec];
232 }
233
234 /* Now sync the vectors. */
235 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
236
237 vector_page = va;
238
239 if (va == ARM_VECTORS_HIGH) {
240 /*
241 * Assume the MD caller knows what it's doing here, and
242 * really does want the vector page relocated.
243 *
244 * Note: This has to be done here (and not just in
245 * cpu_setup()) because the vector page needs to be
246 * accessible *before* cpu_startup() is called.
247 * Think ddb(9) ...
248 *
249 * NOTE: If the CPU control register is not readable,
250 * this will totally fail! We'll just assume that
251 * any system that has high vector support has a
252 * readable CPU control register, for now. If we
253 * ever encounter one that does not, we'll have to
254 * rethink this.
255 */
256 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
257 }
258 }
259
260 static void
261 cpu_startup(void *dummy)
262 {
263 struct pcb *pcb = thread0.td_pcb;
264 #ifndef ARM_CACHE_LOCK_ENABLE
265 vm_page_t m;
266 #endif
267
268 cpu_setup("");
269 identify_arm_cpu();
270
271 printf("real memory = %ju (%ju MB)\n", (uintmax_t)ptoa(physmem),
272 (uintmax_t)ptoa(physmem) / 1048576);
273 realmem = physmem;
274
275 /*
276 * Display the RAM layout.
277 */
278 if (bootverbose) {
279 int indx;
280
281 printf("Physical memory chunk(s):\n");
282 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
283 vm_paddr_t size;
284
285 size = phys_avail[indx + 1] - phys_avail[indx];
286 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
287 (uintmax_t)phys_avail[indx],
288 (uintmax_t)phys_avail[indx + 1] - 1,
289 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
290 }
291 }
292
293 vm_ksubmap_init(&kmi);
294
295 printf("avail memory = %ju (%ju MB)\n",
296 (uintmax_t)ptoa(cnt.v_free_count),
297 (uintmax_t)ptoa(cnt.v_free_count) / 1048576);
298
299 bufinit();
300 vm_pager_bufferinit();
301 pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
302 USPACE_UNDEF_STACK_TOP;
303 pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
304 USPACE_SVC_STACK_TOP;
305 vector_page_setprot(VM_PROT_READ);
306 pmap_set_pcb_pagedir(pmap_kernel(), pcb);
307 pmap_postinit();
308 #ifdef ARM_CACHE_LOCK_ENABLE
309 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
310 arm_lock_cache_line(ARM_TP_ADDRESS);
311 #else
312 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
313 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
314 #endif
315 *(uint32_t *)ARM_RAS_START = 0;
316 *(uint32_t *)ARM_RAS_END = 0xffffffff;
317 }
318
319 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
320
321 /*
322 * Flush the D-cache for non-DMA I/O so that the I-cache can
323 * be made coherent later.
324 */
325 void
326 cpu_flush_dcache(void *ptr, size_t len)
327 {
328
329 cpu_dcache_wb_range((uintptr_t)ptr, len);
330 cpu_l2cache_wb_range((uintptr_t)ptr, len);
331 }
332
333 /* Get current clock frequency for the given cpu id. */
334 int
335 cpu_est_clockrate(int cpu_id, uint64_t *rate)
336 {
337
338 return (ENXIO);
339 }
340
341 void
342 cpu_idle(int busy)
343 {
344 cpu_sleep(0);
345 }
346
347 int
348 cpu_idle_wakeup(int cpu)
349 {
350
351 return (0);
352 }
353
354 int
355 fill_regs(struct thread *td, struct reg *regs)
356 {
357 struct trapframe *tf = td->td_frame;
358 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
359 regs->r_sp = tf->tf_usr_sp;
360 regs->r_lr = tf->tf_usr_lr;
361 regs->r_pc = tf->tf_pc;
362 regs->r_cpsr = tf->tf_spsr;
363 return (0);
364 }
365 int
366 fill_fpregs(struct thread *td, struct fpreg *regs)
367 {
368 bzero(regs, sizeof(*regs));
369 return (0);
370 }
371
372 int
373 set_regs(struct thread *td, struct reg *regs)
374 {
375 struct trapframe *tf = td->td_frame;
376
377 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
378 tf->tf_usr_sp = regs->r_sp;
379 tf->tf_usr_lr = regs->r_lr;
380 tf->tf_pc = regs->r_pc;
381 tf->tf_spsr &= ~PSR_FLAGS;
382 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
383 return (0);
384 }
385
386 int
387 set_fpregs(struct thread *td, struct fpreg *regs)
388 {
389 return (0);
390 }
391
392 int
393 fill_dbregs(struct thread *td, struct dbreg *regs)
394 {
395 return (0);
396 }
397 int
398 set_dbregs(struct thread *td, struct dbreg *regs)
399 {
400 return (0);
401 }
402
403
404 static int
405 ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
406 {
407 struct iovec iov;
408 struct uio uio;
409
410 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
411 iov.iov_base = (caddr_t) v;
412 iov.iov_len = sizeof(u_int32_t);
413 uio.uio_iov = &iov;
414 uio.uio_iovcnt = 1;
415 uio.uio_offset = (off_t)addr;
416 uio.uio_resid = sizeof(u_int32_t);
417 uio.uio_segflg = UIO_SYSSPACE;
418 uio.uio_rw = UIO_READ;
419 uio.uio_td = td;
420 return proc_rwmem(td->td_proc, &uio);
421 }
422
423 static int
424 ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
425 {
426 struct iovec iov;
427 struct uio uio;
428
429 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
430 iov.iov_base = (caddr_t) &v;
431 iov.iov_len = sizeof(u_int32_t);
432 uio.uio_iov = &iov;
433 uio.uio_iovcnt = 1;
434 uio.uio_offset = (off_t)addr;
435 uio.uio_resid = sizeof(u_int32_t);
436 uio.uio_segflg = UIO_SYSSPACE;
437 uio.uio_rw = UIO_WRITE;
438 uio.uio_td = td;
439 return proc_rwmem(td->td_proc, &uio);
440 }
441
442 int
443 ptrace_single_step(struct thread *td)
444 {
445 struct proc *p;
446 int error;
447
448 KASSERT(td->td_md.md_ptrace_instr == 0,
449 ("Didn't clear single step"));
450 p = td->td_proc;
451 PROC_UNLOCK(p);
452 error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
453 &td->td_md.md_ptrace_instr);
454 if (error)
455 goto out;
456 error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
457 PTRACE_BREAKPOINT);
458 if (error)
459 td->td_md.md_ptrace_instr = 0;
460 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
461 out:
462 PROC_LOCK(p);
463 return (error);
464 }
465
466 int
467 ptrace_clear_single_step(struct thread *td)
468 {
469 struct proc *p;
470
471 if (td->td_md.md_ptrace_instr) {
472 p = td->td_proc;
473 PROC_UNLOCK(p);
474 ptrace_write_int(td, td->td_md.md_ptrace_addr,
475 td->td_md.md_ptrace_instr);
476 PROC_LOCK(p);
477 td->td_md.md_ptrace_instr = 0;
478 }
479 return (0);
480 }
481
482 int
483 ptrace_set_pc(struct thread *td, unsigned long addr)
484 {
485 td->td_frame->tf_pc = addr;
486 return (0);
487 }
488
489 void
490 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
491 {
492 }
493
494 void
495 spinlock_enter(void)
496 {
497 struct thread *td;
498 register_t cspr;
499
500 td = curthread;
501 if (td->td_md.md_spinlock_count == 0) {
502 cspr = disable_interrupts(I32_bit | F32_bit);
503 td->td_md.md_spinlock_count = 1;
504 td->td_md.md_saved_cspr = cspr;
505 } else
506 td->td_md.md_spinlock_count++;
507 critical_enter();
508 }
509
510 void
511 spinlock_exit(void)
512 {
513 struct thread *td;
514 register_t cspr;
515
516 td = curthread;
517 critical_exit();
518 cspr = td->td_md.md_saved_cspr;
519 td->td_md.md_spinlock_count--;
520 if (td->td_md.md_spinlock_count == 0)
521 restore_interrupts(cspr);
522 }
523
524 /*
525 * Clear registers on exec
526 */
527 void
528 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
529 {
530 struct trapframe *tf = td->td_frame;
531
532 memset(tf, 0, sizeof(*tf));
533 tf->tf_usr_sp = stack;
534 tf->tf_usr_lr = imgp->entry_addr;
535 tf->tf_svc_lr = 0x77777777;
536 tf->tf_pc = imgp->entry_addr;
537 tf->tf_spsr = PSR_USR32_MODE;
538 }
539
540 /*
541 * Get machine context.
542 */
543 int
544 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
545 {
546 struct trapframe *tf = td->td_frame;
547 __greg_t *gr = mcp->__gregs;
548
549 if (clear_ret & GET_MC_CLEAR_RET)
550 gr[_REG_R0] = 0;
551 else
552 gr[_REG_R0] = tf->tf_r0;
553 gr[_REG_R1] = tf->tf_r1;
554 gr[_REG_R2] = tf->tf_r2;
555 gr[_REG_R3] = tf->tf_r3;
556 gr[_REG_R4] = tf->tf_r4;
557 gr[_REG_R5] = tf->tf_r5;
558 gr[_REG_R6] = tf->tf_r6;
559 gr[_REG_R7] = tf->tf_r7;
560 gr[_REG_R8] = tf->tf_r8;
561 gr[_REG_R9] = tf->tf_r9;
562 gr[_REG_R10] = tf->tf_r10;
563 gr[_REG_R11] = tf->tf_r11;
564 gr[_REG_R12] = tf->tf_r12;
565 gr[_REG_SP] = tf->tf_usr_sp;
566 gr[_REG_LR] = tf->tf_usr_lr;
567 gr[_REG_PC] = tf->tf_pc;
568 gr[_REG_CPSR] = tf->tf_spsr;
569
570 return (0);
571 }
572
573 /*
574 * Set machine context.
575 *
576 * However, we don't set any but the user modifiable flags, and we won't
577 * touch the cs selector.
578 */
579 int
580 set_mcontext(struct thread *td, const mcontext_t *mcp)
581 {
582 struct trapframe *tf = td->td_frame;
583 const __greg_t *gr = mcp->__gregs;
584
585 tf->tf_r0 = gr[_REG_R0];
586 tf->tf_r1 = gr[_REG_R1];
587 tf->tf_r2 = gr[_REG_R2];
588 tf->tf_r3 = gr[_REG_R3];
589 tf->tf_r4 = gr[_REG_R4];
590 tf->tf_r5 = gr[_REG_R5];
591 tf->tf_r6 = gr[_REG_R6];
592 tf->tf_r7 = gr[_REG_R7];
593 tf->tf_r8 = gr[_REG_R8];
594 tf->tf_r9 = gr[_REG_R9];
595 tf->tf_r10 = gr[_REG_R10];
596 tf->tf_r11 = gr[_REG_R11];
597 tf->tf_r12 = gr[_REG_R12];
598 tf->tf_usr_sp = gr[_REG_SP];
599 tf->tf_usr_lr = gr[_REG_LR];
600 tf->tf_pc = gr[_REG_PC];
601 tf->tf_spsr = gr[_REG_CPSR];
602
603 return (0);
604 }
605
606 /*
607 * MPSAFE
608 */
609 int
610 sys_sigreturn(td, uap)
611 struct thread *td;
612 struct sigreturn_args /* {
613 const struct __ucontext *sigcntxp;
614 } */ *uap;
615 {
616 struct sigframe sf;
617 struct trapframe *tf;
618 int spsr;
619
620 if (uap == NULL)
621 return (EFAULT);
622 if (copyin(uap->sigcntxp, &sf, sizeof(sf)))
623 return (EFAULT);
624 /*
625 * Make sure the processor mode has not been tampered with and
626 * interrupts have not been disabled.
627 */
628 spsr = sf.sf_uc.uc_mcontext.__gregs[_REG_CPSR];
629 if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
630 (spsr & (I32_bit | F32_bit)) != 0)
631 return (EINVAL);
632 /* Restore register context. */
633 tf = td->td_frame;
634 set_mcontext(td, &sf.sf_uc.uc_mcontext);
635
636 /* Restore signal mask. */
637 kern_sigprocmask(td, SIG_SETMASK, &sf.sf_uc.uc_sigmask, NULL, 0);
638
639 return (EJUSTRETURN);
640 }
641
642
643 /*
644 * Construct a PCB from a trapframe. This is called from kdb_trap() where
645 * we want to start a backtrace from the function that caused us to enter
646 * the debugger. We have the context in the trapframe, but base the trace
647 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
648 * enough for a backtrace.
649 */
650 void
651 makectx(struct trapframe *tf, struct pcb *pcb)
652 {
653 pcb->un_32.pcb32_r8 = tf->tf_r8;
654 pcb->un_32.pcb32_r9 = tf->tf_r9;
655 pcb->un_32.pcb32_r10 = tf->tf_r10;
656 pcb->un_32.pcb32_r11 = tf->tf_r11;
657 pcb->un_32.pcb32_r12 = tf->tf_r12;
658 pcb->un_32.pcb32_pc = tf->tf_pc;
659 pcb->un_32.pcb32_lr = tf->tf_usr_lr;
660 pcb->un_32.pcb32_sp = tf->tf_usr_sp;
661 }
662
663 /*
664 * Fake up a boot descriptor table
665 */
666 vm_offset_t
667 fake_preload_metadata(void)
668 {
669 #ifdef DDB
670 vm_offset_t zstart = 0, zend = 0;
671 #endif
672 vm_offset_t lastaddr;
673 int i = 0;
674 static uint32_t fake_preload[35];
675
676 fake_preload[i++] = MODINFO_NAME;
677 fake_preload[i++] = strlen("kernel") + 1;
678 strcpy((char*)&fake_preload[i++], "kernel");
679 i += 1;
680 fake_preload[i++] = MODINFO_TYPE;
681 fake_preload[i++] = strlen("elf kernel") + 1;
682 strcpy((char*)&fake_preload[i++], "elf kernel");
683 i += 2;
684 fake_preload[i++] = MODINFO_ADDR;
685 fake_preload[i++] = sizeof(vm_offset_t);
686 fake_preload[i++] = KERNVIRTADDR;
687 fake_preload[i++] = MODINFO_SIZE;
688 fake_preload[i++] = sizeof(uint32_t);
689 fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
690 #ifdef DDB
691 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
692 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
693 fake_preload[i++] = sizeof(vm_offset_t);
694 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
695 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
696 fake_preload[i++] = sizeof(vm_offset_t);
697 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
698 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
699 zend = lastaddr;
700 zstart = *(uint32_t *)(KERNVIRTADDR + 4);
701 ksym_start = zstart;
702 ksym_end = zend;
703 } else
704 #endif
705 lastaddr = (vm_offset_t)&end;
706 fake_preload[i++] = 0;
707 fake_preload[i] = 0;
708 preload_metadata = (void *)fake_preload;
709
710 return (lastaddr);
711 }
Cache object: 72869c476958bcbea10f7e92f2235dea
|