FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/machdep.c
1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
2
3 /*-
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 1994-1998 Mark Brinicombe.
6 * Copyright (c) 1994 Brini.
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Mark Brinicombe
22 * for the NetBSD Project.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * Machine dependant functions for kernel setup
40 *
41 * Created : 17/09/94
42 * Updated : 18/04/01 updated for new wscons
43 */
44
45 #include "opt_compat.h"
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD: releng/6.2/sys/arm/arm/machdep.c 156401 2006-03-07 18:08:09Z jhb $");
48
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/systm.h>
52 #include <sys/bio.h>
53 #include <sys/buf.h>
54 #include <sys/bus.h>
55 #include <sys/cons.h>
56 #include <sys/cpu.h>
57 #include <sys/exec.h>
58 #include <sys/imgact.h>
59 #include <sys/kernel.h>
60 #include <sys/linker.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mutex.h>
64 #include <sys/pcpu.h>
65 #include <sys/ptrace.h>
66 #include <sys/signalvar.h>
67 #include <sys/sysent.h>
68 #include <sys/sysproto.h>
69 #include <sys/uio.h>
70
71 #include <vm/vm.h>
72 #include <vm/pmap.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_pager.h>
77 #include <vm/vnode_pager.h>
78
79 #include <machine/armreg.h>
80 #include <machine/cpu.h>
81 #include <machine/machdep.h>
82 #include <machine/md_var.h>
83 #include <machine/metadata.h>
84 #include <machine/pcb.h>
85 #include <machine/pmap.h>
86 #include <machine/reg.h>
87 #include <machine/trap.h>
88 #include <machine/undefined.h>
89 #include <machine/vmparam.h>
90 #include <machine/sysarch.h>
91
92 uint32_t cpu_reset_address = 0;
93 int cold = 1;
94 vm_offset_t vector_page;
95
96 long realmem = 0;
97
98 void
99 sendsig(catcher, sig, mask, code)
100 sig_t catcher;
101 int sig;
102 sigset_t *mask;
103 u_long code;
104 {
105 struct thread *td = curthread;
106 struct proc *p = td->td_proc;
107 struct trapframe *tf = td->td_frame;
108 struct sigframe *fp, frame;
109 struct sigacts *psp = td->td_proc->p_sigacts;
110 int onstack;
111
112 onstack = sigonstack(td->td_frame->tf_usr_sp);
113
114 if ((td->td_flags & TDP_ALTSTACK) &&
115 !(onstack) &&
116 SIGISMEMBER(td->td_proc->p_sigacts->ps_sigonstack, sig)) {
117 fp = (void*)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size);
118 td->td_sigstk.ss_flags |= SS_ONSTACK;
119 } else
120 fp = (void*)td->td_frame->tf_usr_sp;
121
122 /* make room on the stack */
123 fp--;
124
125 /* make the stack aligned */
126 fp = (struct sigframe *)STACKALIGN(fp);
127 /* Populate the siginfo frame. */
128 frame.sf_si.si_signo = sig;
129 frame.sf_si.si_code = code;
130 frame.sf_uc.uc_sigmask = *mask;
131 frame.sf_uc.uc_link = NULL;
132 frame.sf_uc.uc_flags = (td->td_pflags & TDP_ALTSTACK )
133 ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
134 frame.sf_uc.uc_stack = td->td_sigstk;
135 memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
136 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
137 PROC_UNLOCK(td->td_proc);
138 mtx_unlock(&psp->ps_mtx);
139 if (copyout(&frame, (void*)fp, sizeof(frame)) != 0)
140 sigexit(td, SIGILL);
141 /*
142 * Build context to run handler in. We invoke the handler
143 * directly, only returning via the trampoline. Note the
144 * trampoline version numbers are coordinated with machine-
145 * dependent code in libc.
146 */
147
148 tf->tf_r0 = sig;
149 tf->tf_r1 = (int)&fp->sf_si;
150 tf->tf_r2 = (int)&fp->sf_uc;
151
152 /* the trampoline uses r5 as the uc address */
153 tf->tf_r5 = (int)&fp->sf_uc;
154 tf->tf_pc = (int)catcher;
155 tf->tf_usr_sp = (int)fp;
156 tf->tf_usr_lr = (int)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
157 PROC_LOCK(td->td_proc);
158 mtx_lock(&psp->ps_mtx);
159 }
160
161 struct kva_md_info kmi;
162
163 /*
164 * arm32_vector_init:
165 *
166 * Initialize the vector page, and select whether or not to
167 * relocate the vectors.
168 *
169 * NOTE: We expect the vector page to be mapped at its expected
170 * destination.
171 */
172
173 extern unsigned int page0[], page0_data[];
174 void
175 arm_vector_init(vm_offset_t va, int which)
176 {
177 unsigned int *vectors = (int *) va;
178 unsigned int *vectors_data = vectors + (page0_data - page0);
179 int vec;
180
181 /*
182 * Loop through the vectors we're taking over, and copy the
183 * vector's insn and data word.
184 */
185 for (vec = 0; vec < ARM_NVEC; vec++) {
186 if ((which & (1 << vec)) == 0) {
187 /* Don't want to take over this vector. */
188 continue;
189 }
190 vectors[vec] = page0[vec];
191 vectors_data[vec] = page0_data[vec];
192 }
193
194 /* Now sync the vectors. */
195 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
196
197 vector_page = va;
198
199 if (va == ARM_VECTORS_HIGH) {
200 /*
201 * Assume the MD caller knows what it's doing here, and
202 * really does want the vector page relocated.
203 *
204 * Note: This has to be done here (and not just in
205 * cpu_setup()) because the vector page needs to be
206 * accessible *before* cpu_startup() is called.
207 * Think ddb(9) ...
208 *
209 * NOTE: If the CPU control register is not readable,
210 * this will totally fail! We'll just assume that
211 * any system that has high vector support has a
212 * readable CPU control register, for now. If we
213 * ever encounter one that does not, we'll have to
214 * rethink this.
215 */
216 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
217 }
218 }
219
220 static void
221 cpu_startup(void *dummy)
222 {
223 struct pcb *pcb = thread0.td_pcb;
224 #ifndef ARM_CACHE_LOCK_ENABLE
225 vm_page_t m;
226 #endif
227
228 vm_ksubmap_init(&kmi);
229 bufinit();
230 vm_pager_bufferinit();
231 pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
232 USPACE_UNDEF_STACK_TOP;
233 pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
234 USPACE_SVC_STACK_TOP;
235 vector_page_setprot(VM_PROT_READ);
236 pmap_set_pcb_pagedir(pmap_kernel(), pcb);
237 cpu_setup("");
238 identify_arm_cpu();
239 thread0.td_frame = (struct trapframe *)pcb->un_32.pcb32_sp - 1;
240 #ifdef ARM_CACHE_LOCK_ENABLE
241 pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
242 arm_lock_cache_line(ARM_TP_ADDRESS);
243 #else
244 m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
245 pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
246 #endif
247 realmem = physmem;
248
249 }
250
251 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
252
253 /* Get current clock frequency for the given cpu id. */
254 int
255 cpu_est_clockrate(int cpu_id, uint64_t *rate)
256 {
257
258 return (ENXIO);
259 }
260
261 void
262 cpu_idle(void)
263 {
264 cpu_sleep(0);
265 }
266
267 int
268 fill_regs(struct thread *td, struct reg *regs)
269 {
270 struct trapframe *tf = td->td_frame;
271 bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
272 regs->r_sp = tf->tf_usr_sp;
273 regs->r_lr = tf->tf_usr_lr;
274 regs->r_pc = tf->tf_pc;
275 regs->r_cpsr = tf->tf_spsr;
276 return (0);
277 }
278 int
279 fill_fpregs(struct thread *td, struct fpreg *regs)
280 {
281 bzero(regs, sizeof(*regs));
282 return (0);
283 }
284
285 int
286 set_regs(struct thread *td, struct reg *regs)
287 {
288 struct trapframe *tf = td->td_frame;
289
290 bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
291 tf->tf_usr_sp = regs->r_sp;
292 tf->tf_usr_lr = regs->r_lr;
293 tf->tf_pc = regs->r_pc;
294 tf->tf_spsr &= ~PSR_FLAGS;
295 tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
296 return (0);
297 }
298
299 int
300 set_fpregs(struct thread *td, struct fpreg *regs)
301 {
302 return (0);
303 }
304
305 int
306 fill_dbregs(struct thread *td, struct dbreg *regs)
307 {
308 return (0);
309 }
310 int
311 set_dbregs(struct thread *td, struct dbreg *regs)
312 {
313 return (0);
314 }
315
316
317 static int
318 ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
319 {
320 struct iovec iov;
321 struct uio uio;
322
323 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
324 iov.iov_base = (caddr_t) v;
325 iov.iov_len = sizeof(u_int32_t);
326 uio.uio_iov = &iov;
327 uio.uio_iovcnt = 1;
328 uio.uio_offset = (off_t)addr;
329 uio.uio_resid = sizeof(u_int32_t);
330 uio.uio_segflg = UIO_SYSSPACE;
331 uio.uio_rw = UIO_READ;
332 uio.uio_td = td;
333 return proc_rwmem(td->td_proc, &uio);
334 }
335
336 static int
337 ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
338 {
339 struct iovec iov;
340 struct uio uio;
341
342 PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
343 iov.iov_base = (caddr_t) &v;
344 iov.iov_len = sizeof(u_int32_t);
345 uio.uio_iov = &iov;
346 uio.uio_iovcnt = 1;
347 uio.uio_offset = (off_t)addr;
348 uio.uio_resid = sizeof(u_int32_t);
349 uio.uio_segflg = UIO_SYSSPACE;
350 uio.uio_rw = UIO_WRITE;
351 uio.uio_td = td;
352 return proc_rwmem(td->td_proc, &uio);
353 }
354
355 int
356 ptrace_single_step(struct thread *td)
357 {
358 struct proc *p;
359 int error;
360
361 KASSERT(td->td_md.md_ptrace_instr == 0,
362 ("Didn't clear single step"));
363 p = td->td_proc;
364 PROC_UNLOCK(p);
365 error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
366 &td->td_md.md_ptrace_instr);
367 if (error)
368 goto out;
369 error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
370 PTRACE_BREAKPOINT);
371 if (error)
372 td->td_md.md_ptrace_instr = 0;
373 td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
374 out:
375 PROC_LOCK(p);
376 return (error);
377 }
378
379 int
380 ptrace_clear_single_step(struct thread *td)
381 {
382 struct proc *p;
383
384 if (td->td_md.md_ptrace_instr) {
385 p = td->td_proc;
386 PROC_UNLOCK(p);
387 ptrace_write_int(td, td->td_md.md_ptrace_addr,
388 td->td_md.md_ptrace_instr);
389 PROC_LOCK(p);
390 td->td_md.md_ptrace_instr = 0;
391 }
392 return (0);
393 }
394
395 int
396 ptrace_set_pc(struct thread *td, unsigned long addr)
397 {
398 td->td_frame->tf_pc = addr;
399 return (0);
400 }
401
402 void
403 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
404 {
405 }
406
407 void
408 spinlock_enter(void)
409 {
410 struct thread *td;
411
412 td = curthread;
413 if (td->td_md.md_spinlock_count == 0)
414 td->td_md.md_saved_cspr = disable_interrupts(I32_bit | F32_bit);
415 td->td_md.md_spinlock_count++;
416 critical_enter();
417 }
418
419 void
420 spinlock_exit(void)
421 {
422 struct thread *td;
423
424 td = curthread;
425 critical_exit();
426 td->td_md.md_spinlock_count--;
427 if (td->td_md.md_spinlock_count == 0)
428 restore_interrupts(td->td_md.md_saved_cspr);
429 }
430
431 /*
432 * Clear registers on exec
433 */
434 void
435 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
436 {
437 struct trapframe *tf = td->td_frame;
438
439 memset(tf, 0, sizeof(*tf));
440 tf->tf_usr_sp = stack;
441 tf->tf_usr_lr = entry;
442 tf->tf_svc_lr = 0x77777777;
443 tf->tf_pc = entry;
444 tf->tf_spsr = PSR_USR32_MODE;
445 }
446
447 /*
448 * Build siginfo_t for SA thread
449 */
450 void
451 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
452 {
453 bzero(si, sizeof(*si));
454 si->si_signo = sig;
455 si->si_code = code;
456 }
457
458 /*
459 * Get machine context.
460 */
461 int
462 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
463 {
464 struct trapframe *tf = td->td_frame;
465 __greg_t *gr = mcp->__gregs;
466
467 if (clear_ret & GET_MC_CLEAR_RET)
468 gr[_REG_R0] = 0;
469 else
470 gr[_REG_R0] = tf->tf_r0;
471 gr[_REG_R1] = tf->tf_r1;
472 gr[_REG_R2] = tf->tf_r2;
473 gr[_REG_R3] = tf->tf_r3;
474 gr[_REG_R4] = tf->tf_r4;
475 gr[_REG_R5] = tf->tf_r5;
476 gr[_REG_R6] = tf->tf_r6;
477 gr[_REG_R7] = tf->tf_r7;
478 gr[_REG_R8] = tf->tf_r8;
479 gr[_REG_R9] = tf->tf_r9;
480 gr[_REG_R10] = tf->tf_r10;
481 gr[_REG_R11] = tf->tf_r11;
482 gr[_REG_R12] = tf->tf_r12;
483 gr[_REG_SP] = tf->tf_usr_sp;
484 gr[_REG_LR] = tf->tf_usr_lr;
485 gr[_REG_PC] = tf->tf_pc;
486 gr[_REG_CPSR] = tf->tf_spsr;
487
488 return (0);
489 }
490
491 /*
492 * Set machine context.
493 *
494 * However, we don't set any but the user modifiable flags, and we won't
495 * touch the cs selector.
496 */
497 int
498 set_mcontext(struct thread *td, const mcontext_t *mcp)
499 {
500 struct trapframe *tf = td->td_frame;
501 __greg_t *gr = mcp->__gregs;
502
503 tf->tf_r0 = gr[_REG_R0];
504 tf->tf_r1 = gr[_REG_R1];
505 tf->tf_r2 = gr[_REG_R2];
506 tf->tf_r3 = gr[_REG_R3];
507 tf->tf_r4 = gr[_REG_R4];
508 tf->tf_r5 = gr[_REG_R5];
509 tf->tf_r6 = gr[_REG_R6];
510 tf->tf_r7 = gr[_REG_R7];
511 tf->tf_r8 = gr[_REG_R8];
512 tf->tf_r9 = gr[_REG_R9];
513 tf->tf_r10 = gr[_REG_R10];
514 tf->tf_r11 = gr[_REG_R11];
515 tf->tf_r12 = gr[_REG_R12];
516 tf->tf_usr_sp = gr[_REG_SP];
517 tf->tf_usr_lr = gr[_REG_LR];
518 tf->tf_pc = gr[_REG_PC];
519 tf->tf_spsr = gr[_REG_CPSR];
520
521 return (0);
522 }
523
524 /*
525 * MPSAFE
526 */
527 int
528 sigreturn(td, uap)
529 struct thread *td;
530 struct sigreturn_args /* {
531 const __ucontext *sigcntxp;
532 } */ *uap;
533 {
534 struct proc *p = td->td_proc;
535 struct sigframe sf;
536 struct trapframe *tf;
537 int spsr;
538
539 if (uap == NULL)
540 return (EFAULT);
541 if (copyin(uap->sigcntxp, &sf, sizeof(sf)))
542 return (EFAULT);
543 /*
544 * Make sure the processor mode has not been tampered with and
545 * interrupts have not been disabled.
546 */
547 spsr = sf.sf_uc.uc_mcontext.__gregs[_REG_CPSR];
548 if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
549 (spsr & (I32_bit | F32_bit)) != 0)
550 return (EINVAL);
551 /* Restore register context. */
552 tf = td->td_frame;
553 set_mcontext(td, &sf.sf_uc.uc_mcontext);
554
555 /* Restore signal mask. */
556 PROC_LOCK(p);
557 td->td_sigmask = sf.sf_uc.uc_sigmask;
558 SIG_CANTMASK(td->td_sigmask);
559 signotify(td);
560 PROC_UNLOCK(p);
561
562 return (EJUSTRETURN);
563 }
564
565
566 /*
567 * Construct a PCB from a trapframe. This is called from kdb_trap() where
568 * we want to start a backtrace from the function that caused us to enter
569 * the debugger. We have the context in the trapframe, but base the trace
570 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
571 * enough for a backtrace.
572 */
573 void
574 makectx(struct trapframe *tf, struct pcb *pcb)
575 {
576 pcb->un_32.pcb32_r8 = tf->tf_r8;
577 pcb->un_32.pcb32_r9 = tf->tf_r9;
578 pcb->un_32.pcb32_r10 = tf->tf_r10;
579 pcb->un_32.pcb32_r11 = tf->tf_r11;
580 pcb->un_32.pcb32_r12 = tf->tf_r12;
581 pcb->un_32.pcb32_pc = tf->tf_pc;
582 pcb->un_32.pcb32_lr = tf->tf_usr_lr;
583 pcb->un_32.pcb32_sp = tf->tf_usr_sp;
584 }
Cache object: af9ad5e24ceb7e8d3cad685acd8b5ad7
|