1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/5.3/sys/i386/i386/machdep.c 134976 2004-09-09 10:03:21Z julian $");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_compat.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_ipx.h"
50 #include "opt_isa.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_msgbuf.h"
54 #include "opt_npx.h"
55 #include "opt_perfmon.h"
56
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/sysproto.h>
60 #include <sys/signalvar.h>
61 #include <sys/imgact.h>
62 #include <sys/kdb.h>
63 #include <sys/kernel.h>
64 #include <sys/ktr.h>
65 #include <sys/linker.h>
66 #include <sys/lock.h>
67 #include <sys/malloc.h>
68 #include <sys/memrange.h>
69 #include <sys/mutex.h>
70 #include <sys/pcpu.h>
71 #include <sys/proc.h>
72 #include <sys/bio.h>
73 #include <sys/buf.h>
74 #include <sys/reboot.h>
75 #include <sys/callout.h>
76 #include <sys/msgbuf.h>
77 #include <sys/sched.h>
78 #include <sys/sysent.h>
79 #include <sys/sysctl.h>
80 #include <sys/ucontext.h>
81 #include <sys/vmmeter.h>
82 #include <sys/bus.h>
83 #include <sys/eventhandler.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_extern.h>
93
94 #include <sys/user.h>
95 #include <sys/exec.h>
96 #include <sys/cons.h>
97
98 #ifdef DDB
99 #ifndef KDB
100 #error KDB must be enabled in order for DDB to work!
101 #endif
102 #include <ddb/ddb.h>
103 #include <ddb/db_sym.h>
104 #endif
105
106 #include <net/netisr.h>
107
108 #include <machine/cpu.h>
109 #include <machine/cputypes.h>
110 #include <machine/reg.h>
111 #include <machine/clock.h>
112 #include <machine/specialreg.h>
113 #include <machine/bootinfo.h>
114 #include <machine/intr_machdep.h>
115 #include <machine/md_var.h>
116 #include <machine/pc/bios.h>
117 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
118 #include <machine/proc.h>
119 #ifdef PERFMON
120 #include <machine/perfmon.h>
121 #endif
122 #ifdef SMP
123 #include <machine/privatespace.h>
124 #include <machine/smp.h>
125 #endif
126
127 #ifdef DEV_ISA
128 #include <i386/isa/icu.h>
129 #endif
130
131 #include <isa/rtc.h>
132 #include <machine/vm86.h>
133 #include <sys/ptrace.h>
134 #include <machine/sigframe.h>
135
136 /* Sanity check for __curthread() */
137 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
138
139 extern void init386(int first);
140 extern void dblfault_handler(void);
141
142 extern void printcpuinfo(void); /* XXX header file */
143 extern void finishidentcpu(void);
144 extern void panicifcpuunsupported(void);
145 extern void initializecpu(void);
146
147 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
148 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
149
150 #if !defined(CPU_ENABLE_SSE) && defined(I686_CPU)
151 #define CPU_ENABLE_SSE
152 #endif
153 #if defined(CPU_DISABLE_SSE)
154 #undef CPU_ENABLE_SSE
155 #endif
156
157 static void cpu_startup(void *);
158 static void fpstate_drop(struct thread *td);
159 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
160 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
161 #ifdef CPU_ENABLE_SSE
162 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
163 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
164 #endif /* CPU_ENABLE_SSE */
165 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
166
167 #ifdef DDB
168 extern vm_offset_t ksym_start, ksym_end;
169 #endif
170
171 int _udatasel, _ucodesel;
172 u_int basemem;
173
174 int cold = 1;
175
176 #ifdef COMPAT_43
177 static void osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code);
178 #endif
179 #ifdef COMPAT_FREEBSD4
180 static void freebsd4_sendsig(sig_t catcher, int sig, sigset_t *mask,
181 u_long code);
182 #endif
183
184 long Maxmem = 0;
185
186 vm_paddr_t phys_avail[10];
187
188 /* must be 2 less so 0 0 can signal end of chunks */
189 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
190
191 struct kva_md_info kmi;
192
193 static struct trapframe proc0_tf;
194 #ifndef SMP
195 static struct pcpu __pcpu;
196 #endif
197
198 struct mtx icu_lock;
199
200 struct mem_range_softc mem_range_softc;
201
202 static void
203 cpu_startup(dummy)
204 void *dummy;
205 {
206 /*
207 * Good {morning,afternoon,evening,night}.
208 */
209 startrtclock();
210 printcpuinfo();
211 panicifcpuunsupported();
212 #ifdef PERFMON
213 perfmon_init();
214 #endif
215 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
216 ptoa((uintmax_t)Maxmem) / 1048576);
217 /*
218 * Display any holes after the first chunk of extended memory.
219 */
220 if (bootverbose) {
221 int indx;
222
223 printf("Physical memory chunk(s):\n");
224 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
225 vm_paddr_t size;
226
227 size = phys_avail[indx + 1] - phys_avail[indx];
228 printf(
229 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
230 (uintmax_t)phys_avail[indx],
231 (uintmax_t)phys_avail[indx + 1] - 1,
232 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
233 }
234 }
235
236 vm_ksubmap_init(&kmi);
237
238 printf("avail memory = %ju (%ju MB)\n",
239 ptoa((uintmax_t)cnt.v_free_count),
240 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
241
242 /*
243 * Set up buffers, so they can be used to read disk labels.
244 */
245 bufinit();
246 vm_pager_bufferinit();
247
248 cpu_setregs();
249 }
250
251 /*
252 * Send an interrupt to process.
253 *
254 * Stack is set up to allow sigcode stored
255 * at top to call routine, followed by kcall
256 * to sigreturn routine below. After sigreturn
257 * resets the signal mask, the stack, and the
258 * frame pointer, it returns to the user
259 * specified pc, psl.
260 */
261 #ifdef COMPAT_43
262 static void
263 osendsig(catcher, sig, mask, code)
264 sig_t catcher;
265 int sig;
266 sigset_t *mask;
267 u_long code;
268 {
269 struct osigframe sf, *fp;
270 struct proc *p;
271 struct thread *td;
272 struct sigacts *psp;
273 struct trapframe *regs;
274 int oonstack;
275
276 td = curthread;
277 p = td->td_proc;
278 PROC_LOCK_ASSERT(p, MA_OWNED);
279 psp = p->p_sigacts;
280 mtx_assert(&psp->ps_mtx, MA_OWNED);
281 regs = td->td_frame;
282 oonstack = sigonstack(regs->tf_esp);
283
284 /* Allocate space for the signal handler context. */
285 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
286 SIGISMEMBER(psp->ps_sigonstack, sig)) {
287 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
288 td->td_sigstk.ss_size - sizeof(struct osigframe));
289 #if defined(COMPAT_43)
290 td->td_sigstk.ss_flags |= SS_ONSTACK;
291 #endif
292 } else
293 fp = (struct osigframe *)regs->tf_esp - 1;
294
295 /* Translate the signal if appropriate. */
296 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
297 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
298
299 /* Build the argument list for the signal handler. */
300 sf.sf_signum = sig;
301 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
302 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
303 /* Signal handler installed with SA_SIGINFO. */
304 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
305 sf.sf_siginfo.si_signo = sig;
306 sf.sf_siginfo.si_code = code;
307 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
308 } else {
309 /* Old FreeBSD-style arguments. */
310 sf.sf_arg2 = code;
311 sf.sf_addr = regs->tf_err;
312 sf.sf_ahu.sf_handler = catcher;
313 }
314 mtx_unlock(&psp->ps_mtx);
315 PROC_UNLOCK(p);
316
317 /* Save most if not all of trap frame. */
318 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
319 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
320 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
321 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
322 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
323 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
324 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
325 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
326 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
327 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
328 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
329 sf.sf_siginfo.si_sc.sc_gs = rgs();
330 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
331
332 /* Build the signal context to be used by osigreturn(). */
333 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
334 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
335 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
336 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
337 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
338 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
339 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
340 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
341
342 /*
343 * If we're a vm86 process, we want to save the segment registers.
344 * We also change eflags to be our emulated eflags, not the actual
345 * eflags.
346 */
347 if (regs->tf_eflags & PSL_VM) {
348 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
349 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
350 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
351
352 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
353 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
354 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
355 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
356
357 if (vm86->vm86_has_vme == 0)
358 sf.sf_siginfo.si_sc.sc_ps =
359 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
360 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
361
362 /* See sendsig() for comments. */
363 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
364 }
365
366 /*
367 * Copy the sigframe out to the user's stack.
368 */
369 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
370 #ifdef DEBUG
371 printf("process %ld has trashed its stack\n", (long)p->p_pid);
372 #endif
373 PROC_LOCK(p);
374 sigexit(td, SIGILL);
375 }
376
377 regs->tf_esp = (int)fp;
378 regs->tf_eip = PS_STRINGS - szosigcode;
379 regs->tf_eflags &= ~PSL_T;
380 regs->tf_cs = _ucodesel;
381 regs->tf_ds = _udatasel;
382 regs->tf_es = _udatasel;
383 regs->tf_fs = _udatasel;
384 load_gs(_udatasel);
385 regs->tf_ss = _udatasel;
386 PROC_LOCK(p);
387 mtx_lock(&psp->ps_mtx);
388 }
389 #endif /* COMPAT_43 */
390
391 #ifdef COMPAT_FREEBSD4
392 static void
393 freebsd4_sendsig(catcher, sig, mask, code)
394 sig_t catcher;
395 int sig;
396 sigset_t *mask;
397 u_long code;
398 {
399 struct sigframe4 sf, *sfp;
400 struct proc *p;
401 struct thread *td;
402 struct sigacts *psp;
403 struct trapframe *regs;
404 int oonstack;
405
406 td = curthread;
407 p = td->td_proc;
408 PROC_LOCK_ASSERT(p, MA_OWNED);
409 psp = p->p_sigacts;
410 mtx_assert(&psp->ps_mtx, MA_OWNED);
411 regs = td->td_frame;
412 oonstack = sigonstack(regs->tf_esp);
413
414 /* Save user context. */
415 bzero(&sf, sizeof(sf));
416 sf.sf_uc.uc_sigmask = *mask;
417 sf.sf_uc.uc_stack = td->td_sigstk;
418 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
419 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
420 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
421 sf.sf_uc.uc_mcontext.mc_gs = rgs();
422 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
423
424 /* Allocate space for the signal handler context. */
425 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
426 SIGISMEMBER(psp->ps_sigonstack, sig)) {
427 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
428 td->td_sigstk.ss_size - sizeof(struct sigframe4));
429 #if defined(COMPAT_43)
430 td->td_sigstk.ss_flags |= SS_ONSTACK;
431 #endif
432 } else
433 sfp = (struct sigframe4 *)regs->tf_esp - 1;
434
435 /* Translate the signal if appropriate. */
436 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
437 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
438
439 /* Build the argument list for the signal handler. */
440 sf.sf_signum = sig;
441 sf.sf_ucontext = (register_t)&sfp->sf_uc;
442 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
443 /* Signal handler installed with SA_SIGINFO. */
444 sf.sf_siginfo = (register_t)&sfp->sf_si;
445 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
446
447 /* Fill in POSIX parts */
448 sf.sf_si.si_signo = sig;
449 sf.sf_si.si_code = code;
450 sf.sf_si.si_addr = (void *)regs->tf_err;
451 } else {
452 /* Old FreeBSD-style arguments. */
453 sf.sf_siginfo = code;
454 sf.sf_addr = regs->tf_err;
455 sf.sf_ahu.sf_handler = catcher;
456 }
457 mtx_unlock(&psp->ps_mtx);
458 PROC_UNLOCK(p);
459
460 /*
461 * If we're a vm86 process, we want to save the segment registers.
462 * We also change eflags to be our emulated eflags, not the actual
463 * eflags.
464 */
465 if (regs->tf_eflags & PSL_VM) {
466 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
467 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
468
469 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
470 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
471 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
472 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
473
474 if (vm86->vm86_has_vme == 0)
475 sf.sf_uc.uc_mcontext.mc_eflags =
476 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
477 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
478
479 /*
480 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
481 * syscalls made by the signal handler. This just avoids
482 * wasting time for our lazy fixup of such faults. PSL_NT
483 * does nothing in vm86 mode, but vm86 programs can set it
484 * almost legitimately in probes for old cpu types.
485 */
486 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
487 }
488
489 /*
490 * Copy the sigframe out to the user's stack.
491 */
492 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
493 #ifdef DEBUG
494 printf("process %ld has trashed its stack\n", (long)p->p_pid);
495 #endif
496 PROC_LOCK(p);
497 sigexit(td, SIGILL);
498 }
499
500 regs->tf_esp = (int)sfp;
501 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
502 regs->tf_eflags &= ~PSL_T;
503 regs->tf_cs = _ucodesel;
504 regs->tf_ds = _udatasel;
505 regs->tf_es = _udatasel;
506 regs->tf_fs = _udatasel;
507 regs->tf_ss = _udatasel;
508 PROC_LOCK(p);
509 mtx_lock(&psp->ps_mtx);
510 }
511 #endif /* COMPAT_FREEBSD4 */
512
513 void
514 sendsig(catcher, sig, mask, code)
515 sig_t catcher;
516 int sig;
517 sigset_t *mask;
518 u_long code;
519 {
520 struct sigframe sf, *sfp;
521 struct proc *p;
522 struct thread *td;
523 struct sigacts *psp;
524 char *sp;
525 struct trapframe *regs;
526 int oonstack;
527
528 td = curthread;
529 p = td->td_proc;
530 PROC_LOCK_ASSERT(p, MA_OWNED);
531 psp = p->p_sigacts;
532 mtx_assert(&psp->ps_mtx, MA_OWNED);
533 #ifdef COMPAT_FREEBSD4
534 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
535 freebsd4_sendsig(catcher, sig, mask, code);
536 return;
537 }
538 #endif
539 #ifdef COMPAT_43
540 if (SIGISMEMBER(psp->ps_osigset, sig)) {
541 osendsig(catcher, sig, mask, code);
542 return;
543 }
544 #endif
545 regs = td->td_frame;
546 oonstack = sigonstack(regs->tf_esp);
547
548 /* Save user context. */
549 bzero(&sf, sizeof(sf));
550 sf.sf_uc.uc_sigmask = *mask;
551 sf.sf_uc.uc_stack = td->td_sigstk;
552 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
553 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
554 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
555 sf.sf_uc.uc_mcontext.mc_gs = rgs();
556 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
557 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
558 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
559 fpstate_drop(td);
560
561 /* Allocate space for the signal handler context. */
562 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
563 SIGISMEMBER(psp->ps_sigonstack, sig)) {
564 sp = td->td_sigstk.ss_sp +
565 td->td_sigstk.ss_size - sizeof(struct sigframe);
566 #if defined(COMPAT_43)
567 td->td_sigstk.ss_flags |= SS_ONSTACK;
568 #endif
569 } else
570 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
571 /* Align to 16 bytes. */
572 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
573
574 /* Translate the signal if appropriate. */
575 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
576 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
577
578 /* Build the argument list for the signal handler. */
579 sf.sf_signum = sig;
580 sf.sf_ucontext = (register_t)&sfp->sf_uc;
581 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
582 /* Signal handler installed with SA_SIGINFO. */
583 sf.sf_siginfo = (register_t)&sfp->sf_si;
584 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
585
586 /* Fill in POSIX parts */
587 sf.sf_si.si_signo = sig;
588 sf.sf_si.si_code = code;
589 sf.sf_si.si_addr = (void *)regs->tf_err;
590 } else {
591 /* Old FreeBSD-style arguments. */
592 sf.sf_siginfo = code;
593 sf.sf_addr = regs->tf_err;
594 sf.sf_ahu.sf_handler = catcher;
595 }
596 mtx_unlock(&psp->ps_mtx);
597 PROC_UNLOCK(p);
598
599 /*
600 * If we're a vm86 process, we want to save the segment registers.
601 * We also change eflags to be our emulated eflags, not the actual
602 * eflags.
603 */
604 if (regs->tf_eflags & PSL_VM) {
605 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
606 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
607
608 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
609 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
610 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
611 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
612
613 if (vm86->vm86_has_vme == 0)
614 sf.sf_uc.uc_mcontext.mc_eflags =
615 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
616 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
617
618 /*
619 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
620 * syscalls made by the signal handler. This just avoids
621 * wasting time for our lazy fixup of such faults. PSL_NT
622 * does nothing in vm86 mode, but vm86 programs can set it
623 * almost legitimately in probes for old cpu types.
624 */
625 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
626 }
627
628 /*
629 * Copy the sigframe out to the user's stack.
630 */
631 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
632 #ifdef DEBUG
633 printf("process %ld has trashed its stack\n", (long)p->p_pid);
634 #endif
635 PROC_LOCK(p);
636 sigexit(td, SIGILL);
637 }
638
639 regs->tf_esp = (int)sfp;
640 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
641 regs->tf_eflags &= ~PSL_T;
642 regs->tf_cs = _ucodesel;
643 regs->tf_ds = _udatasel;
644 regs->tf_es = _udatasel;
645 regs->tf_fs = _udatasel;
646 regs->tf_ss = _udatasel;
647 PROC_LOCK(p);
648 mtx_lock(&psp->ps_mtx);
649 }
650
651 /*
652 * Build siginfo_t for SA thread
653 */
654 void
655 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
656 {
657 struct proc *p;
658 struct thread *td;
659
660 td = curthread;
661 p = td->td_proc;
662 PROC_LOCK_ASSERT(p, MA_OWNED);
663
664 bzero(si, sizeof(*si));
665 si->si_signo = sig;
666 si->si_code = code;
667 si->si_addr = (void *)td->td_frame->tf_err;
668 /* XXXKSE fill other fields */
669 }
670
671 /*
672 * System call to cleanup state after a signal
673 * has been taken. Reset signal mask and
674 * stack state from context left by sendsig (above).
675 * Return to previous pc and psl as specified by
676 * context left by sendsig. Check carefully to
677 * make sure that the user has not modified the
678 * state to gain improper privileges.
679 *
680 * MPSAFE
681 */
682 #ifdef COMPAT_43
683 int
684 osigreturn(td, uap)
685 struct thread *td;
686 struct osigreturn_args /* {
687 struct osigcontext *sigcntxp;
688 } */ *uap;
689 {
690 struct osigcontext sc;
691 struct trapframe *regs;
692 struct osigcontext *scp;
693 struct proc *p = td->td_proc;
694 int eflags, error;
695
696 regs = td->td_frame;
697 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
698 if (error != 0)
699 return (error);
700 scp = ≻
701 eflags = scp->sc_ps;
702 if (eflags & PSL_VM) {
703 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
704 struct vm86_kernel *vm86;
705
706 /*
707 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
708 * set up the vm86 area, and we can't enter vm86 mode.
709 */
710 if (td->td_pcb->pcb_ext == 0)
711 return (EINVAL);
712 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
713 if (vm86->vm86_inited == 0)
714 return (EINVAL);
715
716 /* Go back to user mode if both flags are set. */
717 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
718 trapsignal(td, SIGBUS, 0);
719
720 if (vm86->vm86_has_vme) {
721 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
722 (eflags & VME_USERCHANGE) | PSL_VM;
723 } else {
724 vm86->vm86_eflags = eflags; /* save VIF, VIP */
725 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
726 (eflags & VM_USERCHANGE) | PSL_VM;
727 }
728 tf->tf_vm86_ds = scp->sc_ds;
729 tf->tf_vm86_es = scp->sc_es;
730 tf->tf_vm86_fs = scp->sc_fs;
731 tf->tf_vm86_gs = scp->sc_gs;
732 tf->tf_ds = _udatasel;
733 tf->tf_es = _udatasel;
734 tf->tf_fs = _udatasel;
735 } else {
736 /*
737 * Don't allow users to change privileged or reserved flags.
738 */
739 /*
740 * XXX do allow users to change the privileged flag PSL_RF.
741 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
742 * should sometimes set it there too. tf_eflags is kept in
743 * the signal context during signal handling and there is no
744 * other place to remember it, so the PSL_RF bit may be
745 * corrupted by the signal handler without us knowing.
746 * Corruption of the PSL_RF bit at worst causes one more or
747 * one less debugger trap, so allowing it is fairly harmless.
748 */
749 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
750 return (EINVAL);
751 }
752
753 /*
754 * Don't allow users to load a valid privileged %cs. Let the
755 * hardware check for invalid selectors, excess privilege in
756 * other selectors, invalid %eip's and invalid %esp's.
757 */
758 if (!CS_SECURE(scp->sc_cs)) {
759 trapsignal(td, SIGBUS, T_PROTFLT);
760 return (EINVAL);
761 }
762 regs->tf_ds = scp->sc_ds;
763 regs->tf_es = scp->sc_es;
764 regs->tf_fs = scp->sc_fs;
765 }
766
767 /* Restore remaining registers. */
768 regs->tf_eax = scp->sc_eax;
769 regs->tf_ebx = scp->sc_ebx;
770 regs->tf_ecx = scp->sc_ecx;
771 regs->tf_edx = scp->sc_edx;
772 regs->tf_esi = scp->sc_esi;
773 regs->tf_edi = scp->sc_edi;
774 regs->tf_cs = scp->sc_cs;
775 regs->tf_ss = scp->sc_ss;
776 regs->tf_isp = scp->sc_isp;
777 regs->tf_ebp = scp->sc_fp;
778 regs->tf_esp = scp->sc_sp;
779 regs->tf_eip = scp->sc_pc;
780 regs->tf_eflags = eflags;
781
782 PROC_LOCK(p);
783 #if defined(COMPAT_43)
784 if (scp->sc_onstack & 1)
785 td->td_sigstk.ss_flags |= SS_ONSTACK;
786 else
787 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
788 #endif
789 SIGSETOLD(td->td_sigmask, scp->sc_mask);
790 SIG_CANTMASK(td->td_sigmask);
791 signotify(td);
792 PROC_UNLOCK(p);
793 return (EJUSTRETURN);
794 }
795 #endif /* COMPAT_43 */
796
797 #ifdef COMPAT_FREEBSD4
798 /*
799 * MPSAFE
800 */
801 int
802 freebsd4_sigreturn(td, uap)
803 struct thread *td;
804 struct freebsd4_sigreturn_args /* {
805 const ucontext4 *sigcntxp;
806 } */ *uap;
807 {
808 struct ucontext4 uc;
809 struct proc *p = td->td_proc;
810 struct trapframe *regs;
811 const struct ucontext4 *ucp;
812 int cs, eflags, error;
813
814 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
815 if (error != 0)
816 return (error);
817 ucp = &uc;
818 regs = td->td_frame;
819 eflags = ucp->uc_mcontext.mc_eflags;
820 if (eflags & PSL_VM) {
821 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
822 struct vm86_kernel *vm86;
823
824 /*
825 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
826 * set up the vm86 area, and we can't enter vm86 mode.
827 */
828 if (td->td_pcb->pcb_ext == 0)
829 return (EINVAL);
830 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
831 if (vm86->vm86_inited == 0)
832 return (EINVAL);
833
834 /* Go back to user mode if both flags are set. */
835 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
836 trapsignal(td, SIGBUS, 0);
837
838 if (vm86->vm86_has_vme) {
839 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
840 (eflags & VME_USERCHANGE) | PSL_VM;
841 } else {
842 vm86->vm86_eflags = eflags; /* save VIF, VIP */
843 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
844 (eflags & VM_USERCHANGE) | PSL_VM;
845 }
846 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
847 tf->tf_eflags = eflags;
848 tf->tf_vm86_ds = tf->tf_ds;
849 tf->tf_vm86_es = tf->tf_es;
850 tf->tf_vm86_fs = tf->tf_fs;
851 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
852 tf->tf_ds = _udatasel;
853 tf->tf_es = _udatasel;
854 tf->tf_fs = _udatasel;
855 } else {
856 /*
857 * Don't allow users to change privileged or reserved flags.
858 */
859 /*
860 * XXX do allow users to change the privileged flag PSL_RF.
861 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
862 * should sometimes set it there too. tf_eflags is kept in
863 * the signal context during signal handling and there is no
864 * other place to remember it, so the PSL_RF bit may be
865 * corrupted by the signal handler without us knowing.
866 * Corruption of the PSL_RF bit at worst causes one more or
867 * one less debugger trap, so allowing it is fairly harmless.
868 */
869 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
870 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
871 return (EINVAL);
872 }
873
874 /*
875 * Don't allow users to load a valid privileged %cs. Let the
876 * hardware check for invalid selectors, excess privilege in
877 * other selectors, invalid %eip's and invalid %esp's.
878 */
879 cs = ucp->uc_mcontext.mc_cs;
880 if (!CS_SECURE(cs)) {
881 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
882 trapsignal(td, SIGBUS, T_PROTFLT);
883 return (EINVAL);
884 }
885
886 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
887 }
888
889 PROC_LOCK(p);
890 #if defined(COMPAT_43)
891 if (ucp->uc_mcontext.mc_onstack & 1)
892 td->td_sigstk.ss_flags |= SS_ONSTACK;
893 else
894 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
895 #endif
896
897 td->td_sigmask = ucp->uc_sigmask;
898 SIG_CANTMASK(td->td_sigmask);
899 signotify(td);
900 PROC_UNLOCK(p);
901 return (EJUSTRETURN);
902 }
903 #endif /* COMPAT_FREEBSD4 */
904
905 /*
906 * MPSAFE
907 */
908 int
909 sigreturn(td, uap)
910 struct thread *td;
911 struct sigreturn_args /* {
912 const __ucontext *sigcntxp;
913 } */ *uap;
914 {
915 ucontext_t uc;
916 struct proc *p = td->td_proc;
917 struct trapframe *regs;
918 const ucontext_t *ucp;
919 int cs, eflags, error, ret;
920
921 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
922 if (error != 0)
923 return (error);
924 ucp = &uc;
925 regs = td->td_frame;
926 eflags = ucp->uc_mcontext.mc_eflags;
927 if (eflags & PSL_VM) {
928 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
929 struct vm86_kernel *vm86;
930
931 /*
932 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
933 * set up the vm86 area, and we can't enter vm86 mode.
934 */
935 if (td->td_pcb->pcb_ext == 0)
936 return (EINVAL);
937 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
938 if (vm86->vm86_inited == 0)
939 return (EINVAL);
940
941 /* Go back to user mode if both flags are set. */
942 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
943 trapsignal(td, SIGBUS, 0);
944
945 if (vm86->vm86_has_vme) {
946 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
947 (eflags & VME_USERCHANGE) | PSL_VM;
948 } else {
949 vm86->vm86_eflags = eflags; /* save VIF, VIP */
950 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
951 (eflags & VM_USERCHANGE) | PSL_VM;
952 }
953 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
954 tf->tf_eflags = eflags;
955 tf->tf_vm86_ds = tf->tf_ds;
956 tf->tf_vm86_es = tf->tf_es;
957 tf->tf_vm86_fs = tf->tf_fs;
958 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
959 tf->tf_ds = _udatasel;
960 tf->tf_es = _udatasel;
961 tf->tf_fs = _udatasel;
962 } else {
963 /*
964 * Don't allow users to change privileged or reserved flags.
965 */
966 /*
967 * XXX do allow users to change the privileged flag PSL_RF.
968 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
969 * should sometimes set it there too. tf_eflags is kept in
970 * the signal context during signal handling and there is no
971 * other place to remember it, so the PSL_RF bit may be
972 * corrupted by the signal handler without us knowing.
973 * Corruption of the PSL_RF bit at worst causes one more or
974 * one less debugger trap, so allowing it is fairly harmless.
975 */
976 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
977 printf("sigreturn: eflags = 0x%x\n", eflags);
978 return (EINVAL);
979 }
980
981 /*
982 * Don't allow users to load a valid privileged %cs. Let the
983 * hardware check for invalid selectors, excess privilege in
984 * other selectors, invalid %eip's and invalid %esp's.
985 */
986 cs = ucp->uc_mcontext.mc_cs;
987 if (!CS_SECURE(cs)) {
988 printf("sigreturn: cs = 0x%x\n", cs);
989 trapsignal(td, SIGBUS, T_PROTFLT);
990 return (EINVAL);
991 }
992
993 ret = set_fpcontext(td, &ucp->uc_mcontext);
994 if (ret != 0)
995 return (ret);
996 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
997 }
998
999 PROC_LOCK(p);
1000 #if defined(COMPAT_43)
1001 if (ucp->uc_mcontext.mc_onstack & 1)
1002 td->td_sigstk.ss_flags |= SS_ONSTACK;
1003 else
1004 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1005 #endif
1006
1007 td->td_sigmask = ucp->uc_sigmask;
1008 SIG_CANTMASK(td->td_sigmask);
1009 signotify(td);
1010 PROC_UNLOCK(p);
1011 return (EJUSTRETURN);
1012 }
1013
1014 /*
1015 * Machine dependent boot() routine
1016 *
1017 * I haven't seen anything to put here yet
1018 * Possibly some stuff might be grafted back here from boot()
1019 */
1020 void
1021 cpu_boot(int howto)
1022 {
1023 }
1024
1025 /*
1026 * Shutdown the CPU as much as possible
1027 */
1028 void
1029 cpu_halt(void)
1030 {
1031 for (;;)
1032 __asm__ ("hlt");
1033 }
1034
1035 /*
1036 * Hook to idle the CPU when possible. In the SMP case we default to
1037 * off because a halted cpu will not currently pick up a new thread in the
1038 * run queue until the next timer tick. If turned on this will result in
1039 * approximately a 4.2% loss in real time performance in buildworld tests
1040 * (but improves user and sys times oddly enough), and saves approximately
1041 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1042 *
1043 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1044 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1045 * Then we can have our cake and eat it too.
1046 *
1047 * XXX I'm turning it on for SMP as well by default for now. It seems to
1048 * help lock contention somewhat, and this is critical for HTT. -Peter
1049 */
1050 static int cpu_idle_hlt = 1;
1051 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1052 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1053
1054 static void
1055 cpu_idle_default(void)
1056 {
1057 /*
1058 * we must absolutely guarentee that hlt is the
1059 * absolute next instruction after sti or we
1060 * introduce a timing window.
1061 */
1062 __asm __volatile("sti; hlt");
1063 }
1064
1065 /*
1066 * Note that we have to be careful here to avoid a race between checking
1067 * sched_runnable() and actually halting. If we don't do this, we may waste
1068 * the time between calling hlt and the next interrupt even though there
1069 * is a runnable process.
1070 */
1071 void
1072 cpu_idle(void)
1073 {
1074
1075 #ifdef SMP
1076 if (mp_grab_cpu_hlt())
1077 return;
1078 #endif
1079
1080 if (cpu_idle_hlt) {
1081 disable_intr();
1082 if (sched_runnable())
1083 enable_intr();
1084 else
1085 (*cpu_idle_hook)();
1086 }
1087 }
1088
1089 /* Other subsystems (e.g., ACPI) can hook this later. */
1090 void (*cpu_idle_hook)(void) = cpu_idle_default;
1091
1092 /*
1093 * Clear registers on exec
1094 */
1095 void
1096 exec_setregs(td, entry, stack, ps_strings)
1097 struct thread *td;
1098 u_long entry;
1099 u_long stack;
1100 u_long ps_strings;
1101 {
1102 struct trapframe *regs = td->td_frame;
1103 struct pcb *pcb = td->td_pcb;
1104
1105 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1106 pcb->pcb_gs = _udatasel;
1107 load_gs(_udatasel);
1108
1109 if (td->td_proc->p_md.md_ldt)
1110 user_ldt_free(td);
1111
1112 bzero((char *)regs, sizeof(struct trapframe));
1113 regs->tf_eip = entry;
1114 regs->tf_esp = stack;
1115 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1116 regs->tf_ss = _udatasel;
1117 regs->tf_ds = _udatasel;
1118 regs->tf_es = _udatasel;
1119 regs->tf_fs = _udatasel;
1120 regs->tf_cs = _ucodesel;
1121
1122 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1123 regs->tf_ebx = ps_strings;
1124
1125 /*
1126 * Reset the hardware debug registers if they were in use.
1127 * They won't have any meaning for the newly exec'd process.
1128 */
1129 if (pcb->pcb_flags & PCB_DBREGS) {
1130 pcb->pcb_dr0 = 0;
1131 pcb->pcb_dr1 = 0;
1132 pcb->pcb_dr2 = 0;
1133 pcb->pcb_dr3 = 0;
1134 pcb->pcb_dr6 = 0;
1135 pcb->pcb_dr7 = 0;
1136 if (pcb == PCPU_GET(curpcb)) {
1137 /*
1138 * Clear the debug registers on the running
1139 * CPU, otherwise they will end up affecting
1140 * the next process we switch to.
1141 */
1142 reset_dbregs();
1143 }
1144 pcb->pcb_flags &= ~PCB_DBREGS;
1145 }
1146
1147 /*
1148 * Initialize the math emulator (if any) for the current process.
1149 * Actually, just clear the bit that says that the emulator has
1150 * been initialized. Initialization is delayed until the process
1151 * traps to the emulator (if it is done at all) mainly because
1152 * emulators don't provide an entry point for initialization.
1153 */
1154 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1155
1156 /*
1157 * Drop the FP state if we hold it, so that the process gets a
1158 * clean FP state if it uses the FPU again.
1159 */
1160 fpstate_drop(td);
1161
1162 /*
1163 * XXX - Linux emulator
1164 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1165 * on it.
1166 */
1167 td->td_retval[1] = 0;
1168 }
1169
1170 void
1171 cpu_setregs(void)
1172 {
1173 unsigned int cr0;
1174
1175 cr0 = rcr0();
1176 /*
1177 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
1178 * BSP. See the comments there about why we set them.
1179 */
1180 cr0 |= CR0_MP | CR0_NE | CR0_TS;
1181 #ifndef I386_CPU
1182 cr0 |= CR0_WP | CR0_AM;
1183 #endif
1184 load_cr0(cr0);
1185 load_gs(_udatasel);
1186 }
1187
1188 static int
1189 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
1190 {
1191 int error;
1192 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
1193 req);
1194 if (!error && req->newptr)
1195 resettodr();
1196 return (error);
1197 }
1198
1199 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
1200 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
1201
1202 SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
1203 CTLFLAG_RW, &disable_rtc_set, 0, "");
1204
1205 SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo,
1206 CTLFLAG_RD, &bootinfo, bootinfo, "");
1207
1208 SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
1209 CTLFLAG_RW, &wall_cmos_clock, 0, "");
1210
1211 u_long bootdev; /* not a struct cdev *- encoding is different */
1212 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1213 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1214
1215 /*
1216 * Initialize 386 and configure to run kernel
1217 */
1218
1219 /*
1220 * Initialize segments & interrupt table
1221 */
1222
1223 int _default_ldt;
1224 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1225 static struct gate_descriptor idt0[NIDT];
1226 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1227 union descriptor ldt[NLDT]; /* local descriptor table */
1228 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1229
1230 int private_tss; /* flag indicating private tss */
1231
1232 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1233 extern int has_f00f_bug;
1234 #endif
1235
1236 static struct i386tss dblfault_tss;
1237 static char dblfault_stack[PAGE_SIZE];
1238
1239 extern struct user *proc0uarea;
1240 extern vm_offset_t proc0kstack;
1241
1242
1243 /* software prototypes -- in more palatable form */
1244 struct soft_segment_descriptor gdt_segs[] = {
1245 /* GNULL_SEL 0 Null Descriptor */
1246 { 0x0, /* segment base address */
1247 0x0, /* length */
1248 0, /* segment type */
1249 0, /* segment descriptor priority level */
1250 0, /* segment descriptor present */
1251 0, 0,
1252 0, /* default 32 vs 16 bit size */
1253 0 /* limit granularity (byte/page units)*/ },
1254 /* GCODE_SEL 1 Code Descriptor for kernel */
1255 { 0x0, /* segment base address */
1256 0xfffff, /* length - all address space */
1257 SDT_MEMERA, /* segment type */
1258 0, /* segment descriptor priority level */
1259 1, /* segment descriptor present */
1260 0, 0,
1261 1, /* default 32 vs 16 bit size */
1262 1 /* limit granularity (byte/page units)*/ },
1263 /* GDATA_SEL 2 Data Descriptor for kernel */
1264 { 0x0, /* segment base address */
1265 0xfffff, /* length - all address space */
1266 SDT_MEMRWA, /* segment type */
1267 0, /* segment descriptor priority level */
1268 1, /* segment descriptor present */
1269 0, 0,
1270 1, /* default 32 vs 16 bit size */
1271 1 /* limit granularity (byte/page units)*/ },
1272 /* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */
1273 { 0x0, /* segment base address */
1274 0xfffff, /* length - all address space */
1275 SDT_MEMRWA, /* segment type */
1276 0, /* segment descriptor priority level */
1277 1, /* segment descriptor present */
1278 0, 0,
1279 1, /* default 32 vs 16 bit size */
1280 1 /* limit granularity (byte/page units)*/ },
1281 /* GPROC0_SEL 4 Proc 0 Tss Descriptor */
1282 {
1283 0x0, /* segment base address */
1284 sizeof(struct i386tss)-1,/* length */
1285 SDT_SYS386TSS, /* segment type */
1286 0, /* segment descriptor priority level */
1287 1, /* segment descriptor present */
1288 0, 0,
1289 0, /* unused - default 32 vs 16 bit size */
1290 0 /* limit granularity (byte/page units)*/ },
1291 /* GLDT_SEL 5 LDT Descriptor */
1292 { (int) ldt, /* segment base address */
1293 sizeof(ldt)-1, /* length - all address space */
1294 SDT_SYSLDT, /* segment type */
1295 SEL_UPL, /* segment descriptor priority level */
1296 1, /* segment descriptor present */
1297 0, 0,
1298 0, /* unused - default 32 vs 16 bit size */
1299 0 /* limit granularity (byte/page units)*/ },
1300 /* GUSERLDT_SEL 6 User LDT Descriptor per process */
1301 { (int) ldt, /* segment base address */
1302 (512 * sizeof(union descriptor)-1), /* length */
1303 SDT_SYSLDT, /* segment type */
1304 0, /* segment descriptor priority level */
1305 1, /* segment descriptor present */
1306 0, 0,
1307 0, /* unused - default 32 vs 16 bit size */
1308 0 /* limit granularity (byte/page units)*/ },
1309 /* GTGATE_SEL 7 Null Descriptor - Placeholder */
1310 { 0x0, /* segment base address */
1311 0x0, /* length - all address space */
1312 0, /* segment type */
1313 0, /* segment descriptor priority level */
1314 0, /* segment descriptor present */
1315 0, 0,
1316 0, /* default 32 vs 16 bit size */
1317 0 /* limit granularity (byte/page units)*/ },
1318 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1319 { 0x400, /* segment base address */
1320 0xfffff, /* length */
1321 SDT_MEMRWA, /* segment type */
1322 0, /* segment descriptor priority level */
1323 1, /* segment descriptor present */
1324 0, 0,
1325 1, /* default 32 vs 16 bit size */
1326 1 /* limit granularity (byte/page units)*/ },
1327 /* GPANIC_SEL 9 Panic Tss Descriptor */
1328 { (int) &dblfault_tss, /* segment base address */
1329 sizeof(struct i386tss)-1,/* length - all address space */
1330 SDT_SYS386TSS, /* segment type */
1331 0, /* segment descriptor priority level */
1332 1, /* segment descriptor present */
1333 0, 0,
1334 0, /* unused - default 32 vs 16 bit size */
1335 0 /* limit granularity (byte/page units)*/ },
1336 /* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */
1337 { 0, /* segment base address (overwritten) */
1338 0xfffff, /* length */
1339 SDT_MEMERA, /* segment type */
1340 0, /* segment descriptor priority level */
1341 1, /* segment descriptor present */
1342 0, 0,
1343 0, /* default 32 vs 16 bit size */
1344 1 /* limit granularity (byte/page units)*/ },
1345 /* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */
1346 { 0, /* segment base address (overwritten) */
1347 0xfffff, /* length */
1348 SDT_MEMERA, /* segment type */
1349 0, /* segment descriptor priority level */
1350 1, /* segment descriptor present */
1351 0, 0,
1352 0, /* default 32 vs 16 bit size */
1353 1 /* limit granularity (byte/page units)*/ },
1354 /* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */
1355 { 0, /* segment base address (overwritten) */
1356 0xfffff, /* length */
1357 SDT_MEMRWA, /* segment type */
1358 0, /* segment descriptor priority level */
1359 1, /* segment descriptor present */
1360 0, 0,
1361 1, /* default 32 vs 16 bit size */
1362 1 /* limit granularity (byte/page units)*/ },
1363 /* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */
1364 { 0, /* segment base address (overwritten) */
1365 0xfffff, /* length */
1366 SDT_MEMRWA, /* segment type */
1367 0, /* segment descriptor priority level */
1368 1, /* segment descriptor present */
1369 0, 0,
1370 0, /* default 32 vs 16 bit size */
1371 1 /* limit granularity (byte/page units)*/ },
1372 /* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */
1373 { 0, /* segment base address (overwritten) */
1374 0xfffff, /* length */
1375 SDT_MEMRWA, /* segment type */
1376 0, /* segment descriptor priority level */
1377 1, /* segment descriptor present */
1378 0, 0,
1379 0, /* default 32 vs 16 bit size */
1380 1 /* limit granularity (byte/page units)*/ },
1381 };
1382
1383 static struct soft_segment_descriptor ldt_segs[] = {
1384 /* Null Descriptor - overwritten by call gate */
1385 { 0x0, /* segment base address */
1386 0x0, /* length - all address space */
1387 0, /* segment type */
1388 0, /* segment descriptor priority level */
1389 0, /* segment descriptor present */
1390 0, 0,
1391 0, /* default 32 vs 16 bit size */
1392 0 /* limit granularity (byte/page units)*/ },
1393 /* Null Descriptor - overwritten by call gate */
1394 { 0x0, /* segment base address */
1395 0x0, /* length - all address space */
1396 0, /* segment type */
1397 0, /* segment descriptor priority level */
1398 0, /* segment descriptor present */
1399 0, 0,
1400 0, /* default 32 vs 16 bit size */
1401 0 /* limit granularity (byte/page units)*/ },
1402 /* Null Descriptor - overwritten by call gate */
1403 { 0x0, /* segment base address */
1404 0x0, /* length - all address space */
1405 0, /* segment type */
1406 0, /* segment descriptor priority level */
1407 0, /* segment descriptor present */
1408 0, 0,
1409 0, /* default 32 vs 16 bit size */
1410 0 /* limit granularity (byte/page units)*/ },
1411 /* Code Descriptor for user */
1412 { 0x0, /* segment base address */
1413 0xfffff, /* length - all address space */
1414 SDT_MEMERA, /* segment type */
1415 SEL_UPL, /* segment descriptor priority level */
1416 1, /* segment descriptor present */
1417 0, 0,
1418 1, /* default 32 vs 16 bit size */
1419 1 /* limit granularity (byte/page units)*/ },
1420 /* Null Descriptor - overwritten by call gate */
1421 { 0x0, /* segment base address */
1422 0x0, /* length - all address space */
1423 0, /* segment type */
1424 0, /* segment descriptor priority level */
1425 0, /* segment descriptor present */
1426 0, 0,
1427 0, /* default 32 vs 16 bit size */
1428 0 /* limit granularity (byte/page units)*/ },
1429 /* Data Descriptor for user */
1430 { 0x0, /* segment base address */
1431 0xfffff, /* length - all address space */
1432 SDT_MEMRWA, /* segment type */
1433 SEL_UPL, /* segment descriptor priority level */
1434 1, /* segment descriptor present */
1435 0, 0,
1436 1, /* default 32 vs 16 bit size */
1437 1 /* limit granularity (byte/page units)*/ },
1438 };
1439
1440 void
1441 setidt(idx, func, typ, dpl, selec)
1442 int idx;
1443 inthand_t *func;
1444 int typ;
1445 int dpl;
1446 int selec;
1447 {
1448 struct gate_descriptor *ip;
1449
1450 ip = idt + idx;
1451 ip->gd_looffset = (int)func;
1452 ip->gd_selector = selec;
1453 ip->gd_stkcpy = 0;
1454 ip->gd_xx = 0;
1455 ip->gd_type = typ;
1456 ip->gd_dpl = dpl;
1457 ip->gd_p = 1;
1458 ip->gd_hioffset = ((int)func)>>16 ;
1459 }
1460
1461 #define IDTVEC(name) __CONCAT(X,name)
1462
1463 extern inthand_t
1464 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1465 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1466 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1467 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1468 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1469
1470 #ifdef DDB
1471 /*
1472 * Display the index and function name of any IDT entries that don't use
1473 * the default 'rsvd' entry point.
1474 */
1475 DB_SHOW_COMMAND(idt, db_show_idt)
1476 {
1477 struct gate_descriptor *ip;
1478 int idx, quit;
1479 uintptr_t func;
1480
1481 ip = idt;
1482 db_setup_paging(db_simple_pager, &quit, DB_LINES_PER_PAGE);
1483 for (idx = 0, quit = 0; idx < NIDT; idx++) {
1484 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1485 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1486 db_printf("%3d\t", idx);
1487 db_printsym(func, DB_STGY_PROC);
1488 db_printf("\n");
1489 }
1490 ip++;
1491 }
1492 }
1493 #endif
1494
1495 void
1496 sdtossd(sd, ssd)
1497 struct segment_descriptor *sd;
1498 struct soft_segment_descriptor *ssd;
1499 {
1500 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1501 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1502 ssd->ssd_type = sd->sd_type;
1503 ssd->ssd_dpl = sd->sd_dpl;
1504 ssd->ssd_p = sd->sd_p;
1505 ssd->ssd_def32 = sd->sd_def32;
1506 ssd->ssd_gran = sd->sd_gran;
1507 }
1508
1509 #define PHYSMAP_SIZE (2 * 8)
1510
1511 /*
1512 * Populate the (physmap) array with base/bound pairs describing the
1513 * available physical memory in the system, then test this memory and
1514 * build the phys_avail array describing the actually-available memory.
1515 *
1516 * If we cannot accurately determine the physical memory map, then use
1517 * value from the 0xE801 call, and failing that, the RTC.
1518 *
1519 * Total memory size may be set by the kernel environment variable
1520 * hw.physmem or the compile-time define MAXMEM.
1521 *
1522 * XXX first should be vm_paddr_t.
1523 */
1524 static void
1525 getmemsize(int first)
1526 {
1527 int i, physmap_idx, pa_indx;
1528 int hasbrokenint12;
1529 u_int extmem;
1530 struct vm86frame vmf;
1531 struct vm86context vmc;
1532 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1533 pt_entry_t *pte;
1534 char *cp;
1535 struct bios_smap *smap;
1536
1537 hasbrokenint12 = 0;
1538 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
1539 bzero(&vmf, sizeof(vmf));
1540 bzero(physmap, sizeof(physmap));
1541 basemem = 0;
1542
1543 /*
1544 * Some newer BIOSes has broken INT 12H implementation which cause
1545 * kernel panic immediately. In this case, we need to scan SMAP
1546 * with INT 15:E820 first, then determine base memory size.
1547 */
1548 if (hasbrokenint12) {
1549 goto int15e820;
1550 }
1551
1552 /*
1553 * Perform "base memory" related probes & setup
1554 */
1555 vm86_intcall(0x12, &vmf);
1556 basemem = vmf.vmf_ax;
1557 if (basemem > 640) {
1558 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1559 basemem);
1560 basemem = 640;
1561 }
1562
1563 /*
1564 * XXX if biosbasemem is now < 640, there is a `hole'
1565 * between the end of base memory and the start of
1566 * ISA memory. The hole may be empty or it may
1567 * contain BIOS code or data. Map it read/write so
1568 * that the BIOS can write to it. (Memory from 0 to
1569 * the physical end of the kernel is mapped read-only
1570 * to begin with and then parts of it are remapped.
1571 * The parts that aren't remapped form holes that
1572 * remain read-only and are unused by the kernel.
1573 * The base memory area is below the physical end of
1574 * the kernel and right now forms a read-only hole.
1575 * The part of it from PAGE_SIZE to
1576 * (trunc_page(biosbasemem * 1024) - 1) will be
1577 * remapped and used by the kernel later.)
1578 *
1579 * This code is similar to the code used in
1580 * pmap_mapdev, but since no memory needs to be
1581 * allocated we simply change the mapping.
1582 */
1583 for (pa = trunc_page(basemem * 1024);
1584 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1585 pmap_kenter(KERNBASE + pa, pa);
1586
1587 /*
1588 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1589 * the vm86 page table so that vm86 can scribble on them using
1590 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1591 * page 0, at least as initialized here?
1592 */
1593 pte = (pt_entry_t *)vm86paddr;
1594 for (i = basemem / 4; i < 160; i++)
1595 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1596
1597 int15e820:
1598 /*
1599 * map page 1 R/W into the kernel page table so we can use it
1600 * as a buffer. The kernel will unmap this page later.
1601 */
1602 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
1603
1604 /*
1605 * get memory map with INT 15:E820
1606 */
1607 vmc.npages = 0;
1608 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
1609 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
1610
1611 physmap_idx = 0;
1612 vmf.vmf_ebx = 0;
1613 do {
1614 vmf.vmf_eax = 0xE820;
1615 vmf.vmf_edx = SMAP_SIG;
1616 vmf.vmf_ecx = sizeof(struct bios_smap);
1617 i = vm86_datacall(0x15, &vmf, &vmc);
1618 if (i || vmf.vmf_eax != SMAP_SIG)
1619 break;
1620 if (boothowto & RB_VERBOSE)
1621 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1622 smap->type, smap->base, smap->length);
1623
1624 if (smap->type != 0x01)
1625 goto next_run;
1626
1627 if (smap->length == 0)
1628 goto next_run;
1629
1630 #ifndef PAE
1631 if (smap->base >= 0xffffffff) {
1632 printf("%uK of memory above 4GB ignored\n",
1633 (u_int)(smap->length / 1024));
1634 goto next_run;
1635 }
1636 #endif
1637
1638 for (i = 0; i <= physmap_idx; i += 2) {
1639 if (smap->base < physmap[i + 1]) {
1640 if (boothowto & RB_VERBOSE)
1641 printf(
1642 "Overlapping or non-montonic memory region, ignoring second region\n");
1643 goto next_run;
1644 }
1645 }
1646
1647 if (smap->base == physmap[physmap_idx + 1]) {
1648 physmap[physmap_idx + 1] += smap->length;
1649 goto next_run;
1650 }
1651
1652 physmap_idx += 2;
1653 if (physmap_idx == PHYSMAP_SIZE) {
1654 printf(
1655 "Too many segments in the physical address map, giving up\n");
1656 break;
1657 }
1658 physmap[physmap_idx] = smap->base;
1659 physmap[physmap_idx + 1] = smap->base + smap->length;
1660 next_run: ;
1661 } while (vmf.vmf_ebx != 0);
1662
1663 /*
1664 * Perform "base memory" related probes & setup based on SMAP
1665 */
1666 if (basemem == 0) {
1667 for (i = 0; i <= physmap_idx; i += 2) {
1668 if (physmap[i] == 0x00000000) {
1669 basemem = physmap[i + 1] / 1024;
1670 break;
1671 }
1672 }
1673
1674 /*
1675 * XXX this function is horribly organized and has to the same
1676 * things that it does above here.
1677 */
1678 if (basemem == 0)
1679 basemem = 640;
1680 if (basemem > 640) {
1681 printf(
1682 "Preposterous BIOS basemem of %uK, truncating to 640K\n",
1683 basemem);
1684 basemem = 640;
1685 }
1686
1687 /*
1688 * Let vm86 scribble on pages between basemem and
1689 * ISA_HOLE_START, as above.
1690 */
1691 for (pa = trunc_page(basemem * 1024);
1692 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1693 pmap_kenter(KERNBASE + pa, pa);
1694 pte = (pt_entry_t *)vm86paddr;
1695 for (i = basemem / 4; i < 160; i++)
1696 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1697 }
1698
1699 if (physmap[1] != 0)
1700 goto physmap_done;
1701
1702 /*
1703 * If we failed above, try memory map with INT 15:E801
1704 */
1705 vmf.vmf_ax = 0xE801;
1706 if (vm86_intcall(0x15, &vmf) == 0) {
1707 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1708 } else {
1709 #if 0
1710 vmf.vmf_ah = 0x88;
1711 vm86_intcall(0x15, &vmf);
1712 extmem = vmf.vmf_ax;
1713 #else
1714 /*
1715 * Prefer the RTC value for extended memory.
1716 */
1717 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1718 #endif
1719 }
1720
1721 /*
1722 * Special hack for chipsets that still remap the 384k hole when
1723 * there's 16MB of memory - this really confuses people that
1724 * are trying to use bus mastering ISA controllers with the
1725 * "16MB limit"; they only have 16MB, but the remapping puts
1726 * them beyond the limit.
1727 *
1728 * If extended memory is between 15-16MB (16-17MB phys address range),
1729 * chop it to 15MB.
1730 */
1731 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1732 extmem = 15 * 1024;
1733
1734 physmap[0] = 0;
1735 physmap[1] = basemem * 1024;
1736 physmap_idx = 2;
1737 physmap[physmap_idx] = 0x100000;
1738 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1739
1740 physmap_done:
1741 /*
1742 * Now, physmap contains a map of physical memory.
1743 */
1744
1745 #ifdef SMP
1746 /* make hole for AP bootstrap code */
1747 physmap[1] = mp_bootaddress(physmap[1]);
1748 #endif
1749
1750 /*
1751 * Maxmem isn't the "maximum memory", it's one larger than the
1752 * highest page of the physical address space. It should be
1753 * called something like "Maxphyspage". We may adjust this
1754 * based on ``hw.physmem'' and the results of the memory test.
1755 */
1756 Maxmem = atop(physmap[physmap_idx + 1]);
1757
1758 #ifdef MAXMEM
1759 Maxmem = MAXMEM / 4;
1760 #endif
1761
1762 /*
1763 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes
1764 * for the appropriate modifiers. This overrides MAXMEM.
1765 */
1766 if ((cp = getenv("hw.physmem")) != NULL) {
1767 u_int64_t AllowMem, sanity;
1768 char *ep;
1769
1770 sanity = AllowMem = strtouq(cp, &ep, 0);
1771 if ((ep != cp) && (*ep != 0)) {
1772 switch(*ep) {
1773 case 'g':
1774 case 'G':
1775 AllowMem <<= 10;
1776 case 'm':
1777 case 'M':
1778 AllowMem <<= 10;
1779 case 'k':
1780 case 'K':
1781 AllowMem <<= 10;
1782 break;
1783 default:
1784 AllowMem = sanity = 0;
1785 }
1786 if (AllowMem < sanity)
1787 AllowMem = 0;
1788 }
1789 if (AllowMem == 0)
1790 printf("Ignoring invalid memory size of '%s'\n", cp);
1791 else
1792 Maxmem = atop(AllowMem);
1793 freeenv(cp);
1794 }
1795
1796 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1797 (boothowto & RB_VERBOSE))
1798 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1799
1800 /*
1801 * If Maxmem has been increased beyond what the system has detected,
1802 * extend the last memory segment to the new limit.
1803 */
1804 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1805 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1806
1807 /* call pmap initialization to make new kernel address space */
1808 pmap_bootstrap(first, 0);
1809
1810 /*
1811 * Size up each available chunk of physical memory.
1812 */
1813 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1814 pa_indx = 0;
1815 phys_avail[pa_indx++] = physmap[0];
1816 phys_avail[pa_indx] = physmap[0];
1817 pte = CMAP1;
1818
1819 /*
1820 * physmap is in bytes, so when converting to page boundaries,
1821 * round up the start address and round down the end address.
1822 */
1823 for (i = 0; i <= physmap_idx; i += 2) {
1824 vm_paddr_t end;
1825
1826 end = ptoa((vm_paddr_t)Maxmem);
1827 if (physmap[i + 1] < end)
1828 end = trunc_page(physmap[i + 1]);
1829 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1830 int tmp, page_bad;
1831 int *ptr = (int *)CADDR1;
1832
1833 /*
1834 * block out kernel memory as not available.
1835 */
1836 if (pa >= KERNLOAD && pa < first)
1837 continue;
1838
1839 page_bad = FALSE;
1840
1841 /*
1842 * map page into kernel: valid, read/write,non-cacheable
1843 */
1844 *pte = pa | PG_V | PG_RW | PG_N;
1845 invltlb();
1846
1847 tmp = *(int *)ptr;
1848 /*
1849 * Test for alternating 1's and 0's
1850 */
1851 *(volatile int *)ptr = 0xaaaaaaaa;
1852 if (*(volatile int *)ptr != 0xaaaaaaaa) {
1853 page_bad = TRUE;
1854 }
1855 /*
1856 * Test for alternating 0's and 1's
1857 */
1858 *(volatile int *)ptr = 0x55555555;
1859 if (*(volatile int *)ptr != 0x55555555) {
1860 page_bad = TRUE;
1861 }
1862 /*
1863 * Test for all 1's
1864 */
1865 *(volatile int *)ptr = 0xffffffff;
1866 if (*(volatile int *)ptr != 0xffffffff) {
1867 page_bad = TRUE;
1868 }
1869 /*
1870 * Test for all 0's
1871 */
1872 *(volatile int *)ptr = 0x0;
1873 if (*(volatile int *)ptr != 0x0) {
1874 page_bad = TRUE;
1875 }
1876 /*
1877 * Restore original value.
1878 */
1879 *(int *)ptr = tmp;
1880
1881 /*
1882 * Adjust array of valid/good pages.
1883 */
1884 if (page_bad == TRUE) {
1885 continue;
1886 }
1887 /*
1888 * If this good page is a continuation of the
1889 * previous set of good pages, then just increase
1890 * the end pointer. Otherwise start a new chunk.
1891 * Note that "end" points one higher than end,
1892 * making the range >= start and < end.
1893 * If we're also doing a speculative memory
1894 * test and we at or past the end, bump up Maxmem
1895 * so that we keep going. The first bad page
1896 * will terminate the loop.
1897 */
1898 if (phys_avail[pa_indx] == pa) {
1899 phys_avail[pa_indx] += PAGE_SIZE;
1900 } else {
1901 pa_indx++;
1902 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1903 printf(
1904 "Too many holes in the physical address space, giving up\n");
1905 pa_indx--;
1906 break;
1907 }
1908 phys_avail[pa_indx++] = pa; /* start */
1909 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1910 }
1911 physmem++;
1912 }
1913 }
1914 *pte = 0;
1915 invltlb();
1916
1917 /*
1918 * XXX
1919 * The last chunk must contain at least one page plus the message
1920 * buffer to avoid complicating other code (message buffer address
1921 * calculation, etc.).
1922 */
1923 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1924 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1925 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1926 phys_avail[pa_indx--] = 0;
1927 phys_avail[pa_indx--] = 0;
1928 }
1929
1930 Maxmem = atop(phys_avail[pa_indx]);
1931
1932 /* Trim off space for the message buffer. */
1933 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1934
1935 avail_end = phys_avail[pa_indx];
1936 }
1937
1938 void
1939 init386(first)
1940 int first;
1941 {
1942 struct gate_descriptor *gdp;
1943 int gsel_tss, metadata_missing, off, x;
1944 struct pcpu *pc;
1945
1946 proc0.p_uarea = proc0uarea;
1947 thread0.td_kstack = proc0kstack;
1948 thread0.td_pcb = (struct pcb *)
1949 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
1950
1951 /*
1952 * This may be done better later if it gets more high level
1953 * components in it. If so just link td->td_proc here.
1954 */
1955 proc_linkup(&proc0, &ksegrp0, &thread0);
1956
1957 metadata_missing = 0;
1958 if (bootinfo.bi_modulep) {
1959 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
1960 preload_bootstrap_relocate(KERNBASE);
1961 } else {
1962 metadata_missing = 1;
1963 }
1964 if (envmode == 1)
1965 kern_envp = static_env;
1966 else if (bootinfo.bi_envp)
1967 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
1968
1969 /* Init basic tunables, hz etc */
1970 init_param1();
1971
1972 /*
1973 * make gdt memory segments, the code segment goes up to end of the
1974 * page with etext in it, the data segment goes to the end of
1975 * the address space
1976 */
1977 /*
1978 * XXX text protection is temporarily (?) disabled. The limit was
1979 * i386_btop(round_page(etext)) - 1.
1980 */
1981 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
1982 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
1983 #ifdef SMP
1984 pc = &SMP_prvspace[0].pcpu;
1985 gdt_segs[GPRIV_SEL].ssd_limit =
1986 atop(sizeof(struct privatespace) - 1);
1987 #else
1988 pc = &__pcpu;
1989 gdt_segs[GPRIV_SEL].ssd_limit =
1990 atop(sizeof(struct pcpu) - 1);
1991 #endif
1992 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
1993 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
1994
1995 for (x = 0; x < NGDT; x++)
1996 ssdtosd(&gdt_segs[x], &gdt[x].sd);
1997
1998 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1999 r_gdt.rd_base = (int) gdt;
2000 lgdt(&r_gdt);
2001
2002 pcpu_init(pc, 0, sizeof(struct pcpu));
2003 PCPU_SET(prvspace, pc);
2004 PCPU_SET(curthread, &thread0);
2005 PCPU_SET(curpcb, thread0.td_pcb);
2006
2007 /*
2008 * Initialize mutexes.
2009 *
2010 * icu_lock: in order to allow an interrupt to occur in a critical
2011 * section, to set pcpu->ipending (etc...) properly, we
2012 * must be able to get the icu lock, so it can't be
2013 * under witness.
2014 */
2015 mutex_init();
2016 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN);
2017 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
2018
2019 /* make ldt memory segments */
2020 /*
2021 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it
2022 * should be spelled ...MAX_USER...
2023 */
2024 ldt_segs[LUCODE_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1);
2025 ldt_segs[LUDATA_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1);
2026 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2027 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2028
2029 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2030 lldt(_default_ldt);
2031 PCPU_SET(currentldt, _default_ldt);
2032
2033 /* exceptions */
2034 for (x = 0; x < NIDT; x++)
2035 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2036 GSEL(GCODE_SEL, SEL_KPL));
2037 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2038 GSEL(GCODE_SEL, SEL_KPL));
2039 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2040 GSEL(GCODE_SEL, SEL_KPL));
2041 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL,
2042 GSEL(GCODE_SEL, SEL_KPL));
2043 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2044 GSEL(GCODE_SEL, SEL_KPL));
2045 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2046 GSEL(GCODE_SEL, SEL_KPL));
2047 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2048 GSEL(GCODE_SEL, SEL_KPL));
2049 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2050 GSEL(GCODE_SEL, SEL_KPL));
2051 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2052 , GSEL(GCODE_SEL, SEL_KPL));
2053 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2054 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2055 GSEL(GCODE_SEL, SEL_KPL));
2056 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2057 GSEL(GCODE_SEL, SEL_KPL));
2058 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2059 GSEL(GCODE_SEL, SEL_KPL));
2060 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2061 GSEL(GCODE_SEL, SEL_KPL));
2062 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2063 GSEL(GCODE_SEL, SEL_KPL));
2064 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2065 GSEL(GCODE_SEL, SEL_KPL));
2066 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2067 GSEL(GCODE_SEL, SEL_KPL));
2068 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2069 GSEL(GCODE_SEL, SEL_KPL));
2070 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2071 GSEL(GCODE_SEL, SEL_KPL));
2072 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2073 GSEL(GCODE_SEL, SEL_KPL));
2074 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2075 GSEL(GCODE_SEL, SEL_KPL));
2076
2077 r_idt.rd_limit = sizeof(idt0) - 1;
2078 r_idt.rd_base = (int) idt;
2079 lidt(&r_idt);
2080
2081 /*
2082 * Initialize the console before we print anything out.
2083 */
2084 cninit();
2085
2086 if (metadata_missing)
2087 printf("WARNING: loader(8) metadata is missing!\n");
2088
2089 #ifdef DEV_ISA
2090 atpic_startup();
2091 #endif
2092
2093 #ifdef DDB
2094 ksym_start = bootinfo.bi_symtab;
2095 ksym_end = bootinfo.bi_esymtab;
2096 #endif
2097
2098 kdb_init();
2099
2100 #ifdef KDB
2101 if (boothowto & RB_KDB)
2102 kdb_enter("Boot flags requested debugger");
2103 #endif
2104
2105 finishidentcpu(); /* Final stage of CPU initialization */
2106 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2107 GSEL(GCODE_SEL, SEL_KPL));
2108 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2109 GSEL(GCODE_SEL, SEL_KPL));
2110 initializecpu(); /* Initialize CPU registers */
2111
2112 /* make an initial tss so cpu can get interrupt stack on syscall! */
2113 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2114 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2115 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2116 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2117 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2118 private_tss = 0;
2119 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2120 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2121 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2122 ltr(gsel_tss);
2123
2124 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2125 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2126 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2127 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2128 #ifdef PAE
2129 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2130 #else
2131 dblfault_tss.tss_cr3 = (int)IdlePTD;
2132 #endif
2133 dblfault_tss.tss_eip = (int)dblfault_handler;
2134 dblfault_tss.tss_eflags = PSL_KERNEL;
2135 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2136 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2137 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2138 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2139 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2140
2141 vm86_initialize();
2142 getmemsize(first);
2143 init_param2(physmem);
2144
2145 /* now running on new page tables, configured,and u/iom is accessible */
2146
2147 /* Map the message buffer. */
2148 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2149 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
2150
2151 msgbufinit(msgbufp, MSGBUF_SIZE);
2152
2153 /* make a call gate to reenter kernel with */
2154 gdp = &ldt[LSYS5CALLS_SEL].gd;
2155
2156 x = (int) &IDTVEC(lcall_syscall);
2157 gdp->gd_looffset = x;
2158 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2159 gdp->gd_stkcpy = 1;
2160 gdp->gd_type = SDT_SYS386CGT;
2161 gdp->gd_dpl = SEL_UPL;
2162 gdp->gd_p = 1;
2163 gdp->gd_hioffset = x >> 16;
2164
2165 /* XXX does this work? */
2166 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2167 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2168
2169 /* transfer to user mode */
2170
2171 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
2172 _udatasel = LSEL(LUDATA_SEL, SEL_UPL);
2173
2174 /* setup proc 0's pcb */
2175 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
2176 #ifdef PAE
2177 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2178 #else
2179 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2180 #endif
2181 thread0.td_pcb->pcb_ext = 0;
2182 thread0.td_frame = &proc0_tf;
2183 }
2184
2185 void
2186 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2187 {
2188
2189 pcpu->pc_acpi_id = 0xffffffff;
2190 }
2191
2192 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2193 static void f00f_hack(void *unused);
2194 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL)
2195
2196 static void
2197 f00f_hack(void *unused)
2198 {
2199 struct gate_descriptor *new_idt;
2200 vm_offset_t tmp;
2201
2202 if (!has_f00f_bug)
2203 return;
2204
2205 GIANT_REQUIRED;
2206
2207 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2208
2209 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2210 if (tmp == 0)
2211 panic("kmem_alloc returned 0");
2212
2213 /* Put the problematic entry (#6) at the end of the lower page. */
2214 new_idt = (struct gate_descriptor*)
2215 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2216 bcopy(idt, new_idt, sizeof(idt0));
2217 r_idt.rd_base = (u_int)new_idt;
2218 lidt(&r_idt);
2219 idt = new_idt;
2220 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2221 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2222 panic("vm_map_protect failed");
2223 }
2224 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2225
2226 /*
2227 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2228 * we want to start a backtrace from the function that caused us to enter
2229 * the debugger. We have the context in the trapframe, but base the trace
2230 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2231 * enough for a backtrace.
2232 */
2233 void
2234 makectx(struct trapframe *tf, struct pcb *pcb)
2235 {
2236
2237 pcb->pcb_edi = tf->tf_edi;
2238 pcb->pcb_esi = tf->tf_esi;
2239 pcb->pcb_ebp = tf->tf_ebp;
2240 pcb->pcb_ebx = tf->tf_ebx;
2241 pcb->pcb_eip = tf->tf_eip;
2242 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2243 }
2244
2245 int
2246 ptrace_set_pc(struct thread *td, u_long addr)
2247 {
2248
2249 td->td_frame->tf_eip = addr;
2250 return (0);
2251 }
2252
2253 int
2254 ptrace_single_step(struct thread *td)
2255 {
2256 td->td_frame->tf_eflags |= PSL_T;
2257 return (0);
2258 }
2259
2260 int
2261 ptrace_clear_single_step(struct thread *td)
2262 {
2263 td->td_frame->tf_eflags &= ~PSL_T;
2264 return (0);
2265 }
2266
2267 int
2268 fill_regs(struct thread *td, struct reg *regs)
2269 {
2270 struct pcb *pcb;
2271 struct trapframe *tp;
2272
2273 tp = td->td_frame;
2274 regs->r_fs = tp->tf_fs;
2275 regs->r_es = tp->tf_es;
2276 regs->r_ds = tp->tf_ds;
2277 regs->r_edi = tp->tf_edi;
2278 regs->r_esi = tp->tf_esi;
2279 regs->r_ebp = tp->tf_ebp;
2280 regs->r_ebx = tp->tf_ebx;
2281 regs->r_edx = tp->tf_edx;
2282 regs->r_ecx = tp->tf_ecx;
2283 regs->r_eax = tp->tf_eax;
2284 regs->r_eip = tp->tf_eip;
2285 regs->r_cs = tp->tf_cs;
2286 regs->r_eflags = tp->tf_eflags;
2287 regs->r_esp = tp->tf_esp;
2288 regs->r_ss = tp->tf_ss;
2289 pcb = td->td_pcb;
2290 regs->r_gs = pcb->pcb_gs;
2291 return (0);
2292 }
2293
2294 int
2295 set_regs(struct thread *td, struct reg *regs)
2296 {
2297 struct pcb *pcb;
2298 struct trapframe *tp;
2299
2300 tp = td->td_frame;
2301 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2302 !CS_SECURE(regs->r_cs))
2303 return (EINVAL);
2304 tp->tf_fs = regs->r_fs;
2305 tp->tf_es = regs->r_es;
2306 tp->tf_ds = regs->r_ds;
2307 tp->tf_edi = regs->r_edi;
2308 tp->tf_esi = regs->r_esi;
2309 tp->tf_ebp = regs->r_ebp;
2310 tp->tf_ebx = regs->r_ebx;
2311 tp->tf_edx = regs->r_edx;
2312 tp->tf_ecx = regs->r_ecx;
2313 tp->tf_eax = regs->r_eax;
2314 tp->tf_eip = regs->r_eip;
2315 tp->tf_cs = regs->r_cs;
2316 tp->tf_eflags = regs->r_eflags;
2317 tp->tf_esp = regs->r_esp;
2318 tp->tf_ss = regs->r_ss;
2319 pcb = td->td_pcb;
2320 pcb->pcb_gs = regs->r_gs;
2321 return (0);
2322 }
2323
2324 #ifdef CPU_ENABLE_SSE
2325 static void
2326 fill_fpregs_xmm(sv_xmm, sv_87)
2327 struct savexmm *sv_xmm;
2328 struct save87 *sv_87;
2329 {
2330 register struct env87 *penv_87 = &sv_87->sv_env;
2331 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2332 int i;
2333
2334 bzero(sv_87, sizeof(*sv_87));
2335
2336 /* FPU control/status */
2337 penv_87->en_cw = penv_xmm->en_cw;
2338 penv_87->en_sw = penv_xmm->en_sw;
2339 penv_87->en_tw = penv_xmm->en_tw;
2340 penv_87->en_fip = penv_xmm->en_fip;
2341 penv_87->en_fcs = penv_xmm->en_fcs;
2342 penv_87->en_opcode = penv_xmm->en_opcode;
2343 penv_87->en_foo = penv_xmm->en_foo;
2344 penv_87->en_fos = penv_xmm->en_fos;
2345
2346 /* FPU registers */
2347 for (i = 0; i < 8; ++i)
2348 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2349 }
2350
2351 static void
2352 set_fpregs_xmm(sv_87, sv_xmm)
2353 struct save87 *sv_87;
2354 struct savexmm *sv_xmm;
2355 {
2356 register struct env87 *penv_87 = &sv_87->sv_env;
2357 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2358 int i;
2359
2360 /* FPU control/status */
2361 penv_xmm->en_cw = penv_87->en_cw;
2362 penv_xmm->en_sw = penv_87->en_sw;
2363 penv_xmm->en_tw = penv_87->en_tw;
2364 penv_xmm->en_fip = penv_87->en_fip;
2365 penv_xmm->en_fcs = penv_87->en_fcs;
2366 penv_xmm->en_opcode = penv_87->en_opcode;
2367 penv_xmm->en_foo = penv_87->en_foo;
2368 penv_xmm->en_fos = penv_87->en_fos;
2369
2370 /* FPU registers */
2371 for (i = 0; i < 8; ++i)
2372 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2373 }
2374 #endif /* CPU_ENABLE_SSE */
2375
2376 int
2377 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2378 {
2379 #ifdef CPU_ENABLE_SSE
2380 if (cpu_fxsr) {
2381 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2382 (struct save87 *)fpregs);
2383 return (0);
2384 }
2385 #endif /* CPU_ENABLE_SSE */
2386 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2387 return (0);
2388 }
2389
2390 int
2391 set_fpregs(struct thread *td, struct fpreg *fpregs)
2392 {
2393 #ifdef CPU_ENABLE_SSE
2394 if (cpu_fxsr) {
2395 set_fpregs_xmm((struct save87 *)fpregs,
2396 &td->td_pcb->pcb_save.sv_xmm);
2397 return (0);
2398 }
2399 #endif /* CPU_ENABLE_SSE */
2400 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2401 return (0);
2402 }
2403
2404 /*
2405 * Get machine context.
2406 */
2407 int
2408 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2409 {
2410 struct trapframe *tp;
2411
2412 tp = td->td_frame;
2413
2414 PROC_LOCK(curthread->td_proc);
2415 mcp->mc_onstack = sigonstack(tp->tf_esp);
2416 PROC_UNLOCK(curthread->td_proc);
2417 mcp->mc_gs = td->td_pcb->pcb_gs;
2418 mcp->mc_fs = tp->tf_fs;
2419 mcp->mc_es = tp->tf_es;
2420 mcp->mc_ds = tp->tf_ds;
2421 mcp->mc_edi = tp->tf_edi;
2422 mcp->mc_esi = tp->tf_esi;
2423 mcp->mc_ebp = tp->tf_ebp;
2424 mcp->mc_isp = tp->tf_isp;
2425 if (flags & GET_MC_CLEAR_RET) {
2426 mcp->mc_eax = 0;
2427 mcp->mc_edx = 0;
2428 } else {
2429 mcp->mc_eax = tp->tf_eax;
2430 mcp->mc_edx = tp->tf_edx;
2431 }
2432 mcp->mc_ebx = tp->tf_ebx;
2433 mcp->mc_ecx = tp->tf_ecx;
2434 mcp->mc_eip = tp->tf_eip;
2435 mcp->mc_cs = tp->tf_cs;
2436 mcp->mc_eflags = tp->tf_eflags;
2437 mcp->mc_esp = tp->tf_esp;
2438 mcp->mc_ss = tp->tf_ss;
2439 mcp->mc_len = sizeof(*mcp);
2440 get_fpcontext(td, mcp);
2441 return (0);
2442 }
2443
2444 /*
2445 * Set machine context.
2446 *
2447 * However, we don't set any but the user modifiable flags, and we won't
2448 * touch the cs selector.
2449 */
2450 int
2451 set_mcontext(struct thread *td, const mcontext_t *mcp)
2452 {
2453 struct trapframe *tp;
2454 int eflags, ret;
2455
2456 tp = td->td_frame;
2457 if (mcp->mc_len != sizeof(*mcp))
2458 return (EINVAL);
2459 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2460 (tp->tf_eflags & ~PSL_USERCHANGE);
2461 if ((ret = set_fpcontext(td, mcp)) == 0) {
2462 tp->tf_fs = mcp->mc_fs;
2463 tp->tf_es = mcp->mc_es;
2464 tp->tf_ds = mcp->mc_ds;
2465 tp->tf_edi = mcp->mc_edi;
2466 tp->tf_esi = mcp->mc_esi;
2467 tp->tf_ebp = mcp->mc_ebp;
2468 tp->tf_ebx = mcp->mc_ebx;
2469 tp->tf_edx = mcp->mc_edx;
2470 tp->tf_ecx = mcp->mc_ecx;
2471 tp->tf_eax = mcp->mc_eax;
2472 tp->tf_eip = mcp->mc_eip;
2473 tp->tf_eflags = eflags;
2474 tp->tf_esp = mcp->mc_esp;
2475 tp->tf_ss = mcp->mc_ss;
2476 td->td_pcb->pcb_gs = mcp->mc_gs;
2477 ret = 0;
2478 }
2479 return (ret);
2480 }
2481
2482 static void
2483 get_fpcontext(struct thread *td, mcontext_t *mcp)
2484 {
2485 #ifndef DEV_NPX
2486 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2487 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2488 #else
2489 union savefpu *addr;
2490
2491 /*
2492 * XXX mc_fpstate might be misaligned, since its declaration is not
2493 * unportabilized using __attribute__((aligned(16))) like the
2494 * declaration of struct savemm, and anyway, alignment doesn't work
2495 * for auto variables since we don't use gcc's pessimal stack
2496 * alignment. Work around this by abusing the spare fields after
2497 * mcp->mc_fpstate.
2498 *
2499 * XXX unpessimize most cases by only aligning when fxsave might be
2500 * called, although this requires knowing too much about
2501 * npxgetregs()'s internals.
2502 */
2503 addr = (union savefpu *)&mcp->mc_fpstate;
2504 if (td == PCPU_GET(fpcurthread) &&
2505 #ifdef CPU_ENABLE_SSE
2506 cpu_fxsr &&
2507 #endif
2508 ((uintptr_t)(void *)addr & 0xF)) {
2509 do
2510 addr = (void *)((char *)addr + 4);
2511 while ((uintptr_t)(void *)addr & 0xF);
2512 }
2513 mcp->mc_ownedfp = npxgetregs(td, addr);
2514 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2515 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2516 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2517 }
2518 mcp->mc_fpformat = npxformat();
2519 #endif
2520 }
2521
2522 static int
2523 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2524 {
2525 union savefpu *addr;
2526
2527 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2528 return (0);
2529 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2530 mcp->mc_fpformat != _MC_FPFMT_XMM)
2531 return (EINVAL);
2532 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2533 /* We don't care what state is left in the FPU or PCB. */
2534 fpstate_drop(td);
2535 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2536 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2537 /* XXX align as above. */
2538 addr = (union savefpu *)&mcp->mc_fpstate;
2539 if (td == PCPU_GET(fpcurthread) &&
2540 #ifdef CPU_ENABLE_SSE
2541 cpu_fxsr &&
2542 #endif
2543 ((uintptr_t)(void *)addr & 0xF)) {
2544 do
2545 addr = (void *)((char *)addr + 4);
2546 while ((uintptr_t)(void *)addr & 0xF);
2547 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2548 }
2549 #ifdef DEV_NPX
2550 /*
2551 * XXX we violate the dubious requirement that npxsetregs()
2552 * be called with interrupts disabled.
2553 */
2554 npxsetregs(td, addr);
2555 #endif
2556 /*
2557 * Don't bother putting things back where they were in the
2558 * misaligned case, since we know that the caller won't use
2559 * them again.
2560 */
2561 } else
2562 return (EINVAL);
2563 return (0);
2564 }
2565
2566 static void
2567 fpstate_drop(struct thread *td)
2568 {
2569 register_t s;
2570
2571 s = intr_disable();
2572 #ifdef DEV_NPX
2573 if (PCPU_GET(fpcurthread) == td)
2574 npxdrop();
2575 #endif
2576 /*
2577 * XXX force a full drop of the npx. The above only drops it if we
2578 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2579 *
2580 * XXX I don't much like npxgetregs()'s semantics of doing a full
2581 * drop. Dropping only to the pcb matches fnsave's behaviour.
2582 * We only need to drop to !PCB_INITDONE in sendsig(). But
2583 * sendsig() is the only caller of npxgetregs()... perhaps we just
2584 * have too many layers.
2585 */
2586 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2587 intr_restore(s);
2588 }
2589
2590 int
2591 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2592 {
2593 struct pcb *pcb;
2594
2595 if (td == NULL) {
2596 dbregs->dr[0] = rdr0();
2597 dbregs->dr[1] = rdr1();
2598 dbregs->dr[2] = rdr2();
2599 dbregs->dr[3] = rdr3();
2600 dbregs->dr[4] = rdr4();
2601 dbregs->dr[5] = rdr5();
2602 dbregs->dr[6] = rdr6();
2603 dbregs->dr[7] = rdr7();
2604 } else {
2605 pcb = td->td_pcb;
2606 dbregs->dr[0] = pcb->pcb_dr0;
2607 dbregs->dr[1] = pcb->pcb_dr1;
2608 dbregs->dr[2] = pcb->pcb_dr2;
2609 dbregs->dr[3] = pcb->pcb_dr3;
2610 dbregs->dr[4] = 0;
2611 dbregs->dr[5] = 0;
2612 dbregs->dr[6] = pcb->pcb_dr6;
2613 dbregs->dr[7] = pcb->pcb_dr7;
2614 }
2615 return (0);
2616 }
2617
2618 int
2619 set_dbregs(struct thread *td, struct dbreg *dbregs)
2620 {
2621 struct pcb *pcb;
2622 int i;
2623 u_int32_t mask1, mask2;
2624
2625 if (td == NULL) {
2626 load_dr0(dbregs->dr[0]);
2627 load_dr1(dbregs->dr[1]);
2628 load_dr2(dbregs->dr[2]);
2629 load_dr3(dbregs->dr[3]);
2630 load_dr4(dbregs->dr[4]);
2631 load_dr5(dbregs->dr[5]);
2632 load_dr6(dbregs->dr[6]);
2633 load_dr7(dbregs->dr[7]);
2634 } else {
2635 /*
2636 * Don't let an illegal value for dr7 get set. Specifically,
2637 * check for undefined settings. Setting these bit patterns
2638 * result in undefined behaviour and can lead to an unexpected
2639 * TRCTRAP.
2640 */
2641 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8;
2642 i++, mask1 <<= 2, mask2 <<= 2)
2643 if ((dbregs->dr[7] & mask1) == mask2)
2644 return (EINVAL);
2645
2646 pcb = td->td_pcb;
2647
2648 /*
2649 * Don't let a process set a breakpoint that is not within the
2650 * process's address space. If a process could do this, it
2651 * could halt the system by setting a breakpoint in the kernel
2652 * (if ddb was enabled). Thus, we need to check to make sure
2653 * that no breakpoints are being enabled for addresses outside
2654 * process's address space, unless, perhaps, we were called by
2655 * uid 0.
2656 *
2657 * XXX - what about when the watched area of the user's
2658 * address space is written into from within the kernel
2659 * ... wouldn't that still cause a breakpoint to be generated
2660 * from within kernel mode?
2661 */
2662
2663 if (suser(td) != 0) {
2664 if (dbregs->dr[7] & 0x3) {
2665 /* dr0 is enabled */
2666 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2667 return (EINVAL);
2668 }
2669
2670 if (dbregs->dr[7] & (0x3<<2)) {
2671 /* dr1 is enabled */
2672 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2673 return (EINVAL);
2674 }
2675
2676 if (dbregs->dr[7] & (0x3<<4)) {
2677 /* dr2 is enabled */
2678 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2679 return (EINVAL);
2680 }
2681
2682 if (dbregs->dr[7] & (0x3<<6)) {
2683 /* dr3 is enabled */
2684 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2685 return (EINVAL);
2686 }
2687 }
2688
2689 pcb->pcb_dr0 = dbregs->dr[0];
2690 pcb->pcb_dr1 = dbregs->dr[1];
2691 pcb->pcb_dr2 = dbregs->dr[2];
2692 pcb->pcb_dr3 = dbregs->dr[3];
2693 pcb->pcb_dr6 = dbregs->dr[6];
2694 pcb->pcb_dr7 = dbregs->dr[7];
2695
2696 pcb->pcb_flags |= PCB_DBREGS;
2697 }
2698
2699 return (0);
2700 }
2701
2702 /*
2703 * Return > 0 if a hardware breakpoint has been hit, and the
2704 * breakpoint was in user space. Return 0, otherwise.
2705 */
2706 int
2707 user_dbreg_trap(void)
2708 {
2709 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2710 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2711 int nbp; /* number of breakpoints that triggered */
2712 caddr_t addr[4]; /* breakpoint addresses */
2713 int i;
2714
2715 dr7 = rdr7();
2716 if ((dr7 & 0x000000ff) == 0) {
2717 /*
2718 * all GE and LE bits in the dr7 register are zero,
2719 * thus the trap couldn't have been caused by the
2720 * hardware debug registers
2721 */
2722 return 0;
2723 }
2724
2725 nbp = 0;
2726 dr6 = rdr6();
2727 bp = dr6 & 0x0000000f;
2728
2729 if (!bp) {
2730 /*
2731 * None of the breakpoint bits are set meaning this
2732 * trap was not caused by any of the debug registers
2733 */
2734 return 0;
2735 }
2736
2737 /*
2738 * at least one of the breakpoints were hit, check to see
2739 * which ones and if any of them are user space addresses
2740 */
2741
2742 if (bp & 0x01) {
2743 addr[nbp++] = (caddr_t)rdr0();
2744 }
2745 if (bp & 0x02) {
2746 addr[nbp++] = (caddr_t)rdr1();
2747 }
2748 if (bp & 0x04) {
2749 addr[nbp++] = (caddr_t)rdr2();
2750 }
2751 if (bp & 0x08) {
2752 addr[nbp++] = (caddr_t)rdr3();
2753 }
2754
2755 for (i=0; i<nbp; i++) {
2756 if (addr[i] <
2757 (caddr_t)VM_MAXUSER_ADDRESS) {
2758 /*
2759 * addr[i] is in user space
2760 */
2761 return nbp;
2762 }
2763 }
2764
2765 /*
2766 * None of the breakpoints are in user space.
2767 */
2768 return 0;
2769 }
2770
2771 #ifndef DEV_APIC
2772 #include <machine/apicvar.h>
2773
2774 /*
2775 * Provide stub functions so that the MADT APIC enumerator in the acpi
2776 * kernel module will link against a kernel without 'device apic'.
2777 *
2778 * XXX - This is a gross hack.
2779 */
2780 void
2781 apic_register_enumerator(struct apic_enumerator *enumerator)
2782 {
2783 }
2784
2785 void *
2786 ioapic_create(uintptr_t addr, int32_t id, int intbase)
2787 {
2788 return (NULL);
2789 }
2790
2791 int
2792 ioapic_disable_pin(void *cookie, u_int pin)
2793 {
2794 return (ENXIO);
2795 }
2796
2797 void
2798 ioapic_enable_mixed_mode(void)
2799 {
2800 }
2801
2802 int
2803 ioapic_get_vector(void *cookie, u_int pin)
2804 {
2805 return (-1);
2806 }
2807
2808 void
2809 ioapic_register(void *cookie)
2810 {
2811 }
2812
2813 int
2814 ioapic_remap_vector(void *cookie, u_int pin, int vector)
2815 {
2816 return (ENXIO);
2817 }
2818
2819 int
2820 ioapic_set_extint(void *cookie, u_int pin)
2821 {
2822 return (ENXIO);
2823 }
2824
2825 int
2826 ioapic_set_nmi(void *cookie, u_int pin)
2827 {
2828 return (ENXIO);
2829 }
2830
2831 int
2832 ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol)
2833 {
2834 return (ENXIO);
2835 }
2836
2837 int
2838 ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger)
2839 {
2840 return (ENXIO);
2841 }
2842
2843 void
2844 lapic_create(u_int apic_id, int boot_cpu)
2845 {
2846 }
2847
2848 void
2849 lapic_init(uintptr_t addr)
2850 {
2851 }
2852
2853 int
2854 lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode)
2855 {
2856 return (ENXIO);
2857 }
2858
2859 int
2860 lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol)
2861 {
2862 return (ENXIO);
2863 }
2864
2865 int
2866 lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger)
2867 {
2868 return (ENXIO);
2869 }
2870 #endif
2871
2872 #ifdef KDB
2873
2874 /*
2875 * Provide inb() and outb() as functions. They are normally only
2876 * available as macros calling inlined functions, thus cannot be
2877 * called from the debugger.
2878 *
2879 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
2880 */
2881
2882 #undef inb
2883 #undef outb
2884
2885 /* silence compiler warnings */
2886 u_char inb(u_int);
2887 void outb(u_int, u_char);
2888
2889 u_char
2890 inb(u_int port)
2891 {
2892 u_char data;
2893 /*
2894 * We use %%dx and not %1 here because i/o is done at %dx and not at
2895 * %edx, while gcc generates inferior code (movw instead of movl)
2896 * if we tell it to load (u_short) port.
2897 */
2898 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
2899 return (data);
2900 }
2901
2902 void
2903 outb(u_int port, u_char data)
2904 {
2905 u_char al;
2906 /*
2907 * Use an unnecessary assignment to help gcc's register allocator.
2908 * This make a large difference for gcc-1.40 and a tiny difference
2909 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
2910 * best results. gcc-2.6.0 can't handle this.
2911 */
2912 al = data;
2913 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
2914 }
2915
2916 #endif /* KDB */
Cache object: 1999b93ad2e7e487b98c538dfcef18e3
|