FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/machdep.c
1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
2
3 /*-
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Mark Brinicombe
22 * for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * Machine dependant functions for kernel setup
40 *
41 * Created : 17/09/94
42 * Updated : 18/04/01 updated for new wscons
43 */
44
45 #include "opt_compat.h"
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD: releng/6.0/sys/arm/arm/machdep.c 144637 2005-04-04 21:53:56Z jhb $");
48
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/systm.h>
52 #include <sys/bio.h>
53 #include <sys/buf.h>
54 #include <sys/bus.h>
55 #include <sys/cons.h>
56 #include <sys/cpu.h>
57 #include <sys/exec.h>
58 #include <sys/imgact.h>
59 #include <sys/kernel.h>
60 #include <sys/linker.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mutex.h>
64 #include <sys/pcpu.h>
65 #include <sys/ptrace.h>
66 #include <sys/signalvar.h>
67 #include <sys/sysent.h>
68 #include <sys/sysproto.h>
69 #include <sys/uio.h>
70
71 #include <vm/vm.h>
72 #include <vm/pmap.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_pager.h>
77 #include <vm/vnode_pager.h>
78
79 #include <machine/armreg.h>
80 #include <machine/cpu.h>
81 #include <machine/machdep.h>
82 #include <machine/md_var.h>
83 #include <machine/metadata.h>
84 #include <machine/pcb.h>
85 #include <machine/pmap.h>
86 #include <machine/reg.h>
87 #include <machine/trap.h>
88 #include <machine/undefined.h>
89 #include <machine/vmparam.h>
90 #include <machine/sysarch.h>
91
92 uint32_t cpu_reset_address = 0;
93 int cold = 1;
94 vm_offset_t vector_page;
95
96 long realmem = 0;
97
98 void
99 sendsig(catcher, sig, mask, code)
100 sig_t catcher;
101 int sig;
102 sigset_t *mask;
103 u_long code;
104 {
105 struct thread *td = curthread;
106 struct proc *p = td->td_proc;
107 struct trapframe *tf = td->td_frame;
108 struct sigframe *fp, frame;
109 struct sigacts *psp = td->td_proc->p_sigacts;
110 int onstack;
111
112 onstack = sigonstack(td->td_frame->tf_usr_sp);
113
114 if ((td->td_flags & TDP_ALTSTACK) &&
115 !(onstack) &&
116 SIGISMEMBER(td->td_proc->p_sigacts->ps_sigonstack, sig)) {
117 fp = (void*)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size);
118 td->td_sigstk.ss_flags |= SS_ONSTACK;
119 } else
120 fp = (void*)td->td_frame->tf_usr_sp;
121
122 /* make room on the stack */
123 fp--;
124
125 /* make the stack aligned */
126 fp = (struct sigframe *)STACKALIGN(fp);
127 /* Populate the siginfo frame. */
128 frame.sf_si.si_signo = sig;
129 frame.sf_si.si_code = code;
130 frame.sf_uc.uc_sigmask = *mask;
131 frame.sf_uc.uc_link = NULL;
132 frame.sf_uc.uc_flags = (td->td_pflags & TDP_ALTSTACK )
133 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
134 frame.sf_uc.uc_stack = td->td_sigstk;
135 memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
136 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
137 PROC_UNLOCK(td->td_proc);
138 mtx_unlock(&psp->ps_mtx);
139 if (copyout(&frame, (void*)fp, sizeof(frame)) != 0)
140 sigexit(td, SIGILL);
141 /*
142 * Build context to run handler in. We invoke the handler
143 * directly, only returning via the trampoline. Note the
144 * trampoline version numbers are coordinated with machine-
145 * dependent code in libc.
146 */
147
148 tf->tf_r0 = sig;
149 tf->tf_r1 = (int)&fp->sf_si;
150 tf->tf_r2 = (int)&fp->sf_uc;
151
152 /* the trampoline uses r5 as the uc address */
153 tf->tf_r5 = (int)&fp->sf_uc;
154 tf->tf_pc = (int)catcher;
155 tf->tf_usr_sp = (int)fp;
156 tf->tf_usr_lr = (int)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
157 PROC_LOCK(td->td_proc);
158 mtx_lock(&psp->ps_mtx);
159 }
160
161 struct kva_md_info kmi;
162
163 /*
164 * arm32_vector_init:
165 *
166 * Initialize the vector page, and select whether or not to
167 * relocate the vectors.
168 *
169 * NOTE: We expect the vector page to be mapped at its expected
170 * destination.
171 */
172
173 extern unsigned int page0[], page0_data[];
174 void
175 arm_vector_init(vm_offset_t va, int which)
176 {
177 unsigned int *vectors = (int *) va;
178 unsigned int *vectors_data = vectors + (page0_data - page0);
179 int vec;
180
181 /*
182 * Loop through the vectors we're taking over, and copy the
183 * vector's insn and data word.
184 */
185 for (vec = 0; vec < ARM_NVEC; vec++) {
186 if ((which & (1 << vec)) == 0) {
187 /* Don't want to take over this vector. */
188 continue;
189 }
190 vectors[vec] = page0[vec];
191 vectors_data[vec] = page0_data[vec];
192 }
193
194 /* Now sync the vectors. */
195 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
196
197 vector_page = va;
198
199 if (va == ARM_VECTORS_HIGH) {
200 /*
201 * Assume the MD caller knows what it's doing here, and
202 * really does want the vector page relocated.
203 *
204 * Note: This has to be done here (and not just in
205 * cpu_setup()) because the vector page needs to be
206 * accessible *before* cpu_startup() is called.
207 * Think ddb(9) ...
208 *
209 * NOTE: If the CPU control register is not readable,
210 * this will totally fail! We'll just assume that
211 * any system that has high vector support has a
212 * readable CPU control register, for now. If we
213 * ever encounter one that does not, we'll have to
214 * rethink this.
215 */
216 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
217 }
218 }
219
220 static void
221 cpu_startup(void *dummy)
222 {
223 struct pcb *pcb = thread0.td_pcb;
224 #ifndef ARM_CACHE_LOCK_ENABLE
225 vm_page_t m;
226 #endif
227
228 vm_ksubmap_init(&kmi);
229 bufinit();
230 vm_pager_bufferinit();
231 pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
232 USPACE_UNDEF_STACK_TOP;
233 pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
234 USPACE_SVC_STACK_TOP;
235 vector_page_setprot(VM_PROT_READ);
236 pmap_set_pcb_pagedir(pmap_kernel(), pcb);
237 cpu_setup("");
238 identify_arm_cpu();
239 thread0.td_frame = (struct trapframe *)pcb->un_32.pcb32_sp - 1;
240 #ifdef ARM_CACHE_LOCK_ENABLE
241 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
242 arm_lock_cache_line(ARM_TP_ADDRESS);
243 #else
244 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
245 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
246 #endif
247 realmem = physmem;
248
249 }
250
251 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
252
253 /* Get current clock frequency for the given cpu id. */
254 int
255 cpu_est_clockrate(int cpu_id, uint64_t *rate)
256 {
257
258 return (ENXIO);
259 }
260
261 void
262 cpu_idle(void)
263 {
264 cpu_sleep(0);
265 }
266
267 int
268 fill_regs(struct thread *td, struct reg *regs)
269 {
270 struct trapframe *tf = td->td_frame;
271 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
272 regs->r_sp = tf->tf_usr_sp;
273 regs->r_lr = tf->tf_usr_lr;
274 regs->r_pc = tf->tf_pc;
275 regs->r_cpsr = tf->tf_spsr;
276 return (0);
277 }
278 int
279 fill_fpregs(struct thread *td, struct fpreg *regs)
280 {
281 bzero(regs, sizeof(*regs));
282 return (0);
283 }
284
285 int
286 set_regs(struct thread *td, struct reg *regs)
287 {
288 struct trapframe *tf = td->td_frame;
289
290 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
291 tf->tf_usr_sp = regs->r_sp;
292 tf->tf_usr_lr = regs->r_lr;
293 tf->tf_pc = regs->r_pc;
294 tf->tf_spsr &= ~PSR_FLAGS;
295 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
296 return (0);
297 }
298
299 int
300 set_fpregs(struct thread *td, struct fpreg *regs)
301 {
302 return (0);
303 }
304
305 int
306 fill_dbregs(struct thread *td, struct dbreg *regs)
307 {
308 return (0);
309 }
310 int
311 set_dbregs(struct thread *td, struct dbreg *regs)
312 {
313 return (0);
314 }
315
316
317 static int
318 ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
319 {
320 struct iovec iov;
321 struct uio uio;
322 iov.iov_base = (caddr_t) v;
323 iov.iov_len = sizeof(u_int32_t);
324 uio.uio_iov = &iov;
325 uio.uio_iovcnt = 1;
326 uio.uio_offset = (off_t)addr;
327 uio.uio_resid = sizeof(u_int32_t);
328 uio.uio_segflg = UIO_SYSSPACE;
329 uio.uio_rw = UIO_READ;
330 uio.uio_td = td;
331 return proc_rwmem(td->td_proc, &uio);
332 }
333
334 static int
335 ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
336 {
337 struct iovec iov;
338 struct uio uio;
339 iov.iov_base = (caddr_t) &v;
340 iov.iov_len = sizeof(u_int32_t);
341 uio.uio_iov = &iov;
342 uio.uio_iovcnt = 1;
343 uio.uio_offset = (off_t)addr;
344 uio.uio_resid = sizeof(u_int32_t);
345 uio.uio_segflg = UIO_SYSSPACE;
346 uio.uio_rw = UIO_WRITE;
347 uio.uio_td = td;
348 return proc_rwmem(td->td_proc, &uio);
349 }
350
351 int
352 ptrace_single_step(struct thread *td)
353 {
354 int error;
355
356 KASSERT(td->td_md.md_ptrace_instr == 0,
357 ("Didn't clear single step"));
358 error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
359 &td->td_md.md_ptrace_instr);
360 if (error)
361 return (error);
362 error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
363 PTRACE_BREAKPOINT);
364 if (error)
365 td->td_md.md_ptrace_instr = 0;
366 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
367 return (error);
368 }
369
370 int
371 ptrace_clear_single_step(struct thread *td)
372 {
373 if (td->td_md.md_ptrace_instr) {
374 ptrace_write_int(td, td->td_md.md_ptrace_addr,
375 td->td_md.md_ptrace_instr);
376 td->td_md.md_ptrace_instr = 0;
377 }
378 return (0);
379 }
380
381 int
382 ptrace_set_pc(struct thread *td, unsigned long addr)
383 {
384 td->td_frame->tf_pc = addr;
385 return (0);
386 }
387
388 void
389 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
390 {
391 }
392
393 void
394 spinlock_enter(void)
395 {
396 struct thread *td;
397
398 td = curthread;
399 if (td->td_md.md_spinlock_count == 0)
400 td->td_md.md_saved_cspr = disable_interrupts(I32_bit | F32_bit);
401 td->td_md.md_spinlock_count++;
402 critical_enter();
403 }
404
405 void
406 spinlock_exit(void)
407 {
408 struct thread *td;
409
410 td = curthread;
411 critical_exit();
412 td->td_md.md_spinlock_count--;
413 if (td->td_md.md_spinlock_count == 0)
414 restore_interrupts(td->td_md.md_saved_cspr);
415 }
416
417 /*
418 * Clear registers on exec
419 */
420 void
421 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
422 {
423 struct trapframe *tf = td->td_frame;
424
425 memset(tf, 0, sizeof(*tf));
426 tf->tf_usr_sp = stack;
427 tf->tf_usr_lr = entry;
428 tf->tf_svc_lr = 0x77777777;
429 tf->tf_pc = entry;
430 tf->tf_spsr = PSR_USR32_MODE;
431 }
432
433 /*
434 * Build siginfo_t for SA thread
435 */
436 void
437 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
438 {
439 bzero(si, sizeof(*si));
440 si->si_signo = sig;
441 si->si_code = code;
442 }
443
444 /*
445 * Get machine context.
446 */
447 int
448 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
449 {
450 struct trapframe *tf = td->td_frame;
451 __greg_t *gr = mcp->__gregs;
452
453 if (clear_ret & GET_MC_CLEAR_RET)
454 gr[_REG_R0] = 0;
455 else
456 gr[_REG_R0] = tf->tf_r0;
457 gr[_REG_R1] = tf->tf_r1;
458 gr[_REG_R2] = tf->tf_r2;
459 gr[_REG_R3] = tf->tf_r3;
460 gr[_REG_R4] = tf->tf_r4;
461 gr[_REG_R5] = tf->tf_r5;
462 gr[_REG_R6] = tf->tf_r6;
463 gr[_REG_R7] = tf->tf_r7;
464 gr[_REG_R8] = tf->tf_r8;
465 gr[_REG_R9] = tf->tf_r9;
466 gr[_REG_R10] = tf->tf_r10;
467 gr[_REG_R11] = tf->tf_r11;
468 gr[_REG_R12] = tf->tf_r12;
469 gr[_REG_SP] = tf->tf_usr_sp;
470 gr[_REG_LR] = tf->tf_usr_lr;
471 gr[_REG_PC] = tf->tf_pc;
472 gr[_REG_CPSR] = tf->tf_spsr;
473
474 return (0);
475 }
476
477 /*
478 * Set machine context.
479 *
480 * However, we don't set any but the user modifiable flags, and we won't
481 * touch the cs selector.
482 */
483 int
484 set_mcontext(struct thread *td, const mcontext_t *mcp)
485 {
486 struct trapframe *tf = td->td_frame;
487 __greg_t *gr = mcp->__gregs;
488
489 tf->tf_r0 = gr[_REG_R0];
490 tf->tf_r1 = gr[_REG_R1];
491 tf->tf_r2 = gr[_REG_R2];
492 tf->tf_r3 = gr[_REG_R3];
493 tf->tf_r4 = gr[_REG_R4];
494 tf->tf_r5 = gr[_REG_R5];
495 tf->tf_r6 = gr[_REG_R6];
496 tf->tf_r7 = gr[_REG_R7];
497 tf->tf_r8 = gr[_REG_R8];
498 tf->tf_r9 = gr[_REG_R9];
499 tf->tf_r10 = gr[_REG_R10];
500 tf->tf_r11 = gr[_REG_R11];
501 tf->tf_r12 = gr[_REG_R12];
502 tf->tf_usr_sp = gr[_REG_SP];
503 tf->tf_usr_lr = gr[_REG_LR];
504 tf->tf_pc = gr[_REG_PC];
505 tf->tf_spsr = gr[_REG_CPSR];
506
507 return (0);
508 }
509
510 /*
511 * MPSAFE
512 */
513 int
514 sigreturn(td, uap)
515 struct thread *td;
516 struct sigreturn_args /* {
517 const __ucontext *sigcntxp;
518 } */ *uap;
519 {
520 struct proc *p = td->td_proc;
521 struct sigframe sf;
522 struct trapframe *tf;
523 int spsr;
524
525 if (uap == NULL)
526 return (EFAULT);
527 if (copyin(uap->sigcntxp, &sf, sizeof(sf)))
528 return (EFAULT);
529 /*
530 * Make sure the processor mode has not been tampered with and
531 * interrupts have not been disabled.
532 */
533 spsr = sf.sf_uc.uc_mcontext.__gregs[_REG_CPSR];
534 if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
535 (spsr & (I32_bit | F32_bit)) != 0)
536 return (EINVAL);
537 /* Restore register context. */
538 tf = td->td_frame;
539 set_mcontext(td, &sf.sf_uc.uc_mcontext);
540
541 /* Restore signal mask. */
542 PROC_LOCK(p);
543 td->td_sigmask = sf.sf_uc.uc_sigmask;
544 SIG_CANTMASK(td->td_sigmask);
545 signotify(td);
546 PROC_UNLOCK(p);
547
548 return (EJUSTRETURN);
549 }
550
551
552 /*
553 * Construct a PCB from a trapframe. This is called from kdb_trap() where
554 * we want to start a backtrace from the function that caused us to enter
555 * the debugger. We have the context in the trapframe, but base the trace
556 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
557 * enough for a backtrace.
558 */
559 void
560 makectx(struct trapframe *tf, struct pcb *pcb)
561 {
562 pcb->un_32.pcb32_r8 = tf->tf_r8;
563 pcb->un_32.pcb32_r9 = tf->tf_r9;
564 pcb->un_32.pcb32_r10 = tf->tf_r10;
565 pcb->un_32.pcb32_r11 = tf->tf_r11;
566 pcb->un_32.pcb32_r12 = tf->tf_r12;
567 pcb->un_32.pcb32_pc = tf->tf_pc;
568 pcb->un_32.pcb32_lr = tf->tf_usr_lr;
569 pcb->un_32.pcb32_sp = tf->tf_usr_sp;
570 }
Cache object: 989915b2fe6fdafffb268f1f24b9a535
|