1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_compat.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_ipx.h"
50 #include "opt_isa.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_msgbuf.h"
54 #include "opt_npx.h"
55 #include "opt_perfmon.h"
56 #include "opt_xbox.h"
57
58 #include <sys/param.h>
59 #include <sys/proc.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/buf.h>
63 #include <sys/bus.h>
64 #include <sys/callout.h>
65 #include <sys/clock.h>
66 #include <sys/cons.h>
67 #include <sys/cpu.h>
68 #include <sys/eventhandler.h>
69 #include <sys/exec.h>
70 #include <sys/imgact.h>
71 #include <sys/kdb.h>
72 #include <sys/kernel.h>
73 #include <sys/ktr.h>
74 #include <sys/linker.h>
75 #include <sys/lock.h>
76 #include <sys/malloc.h>
77 #include <sys/memrange.h>
78 #include <sys/msgbuf.h>
79 #include <sys/mutex.h>
80 #include <sys/pcpu.h>
81 #include <sys/ptrace.h>
82 #include <sys/reboot.h>
83 #include <sys/sched.h>
84 #include <sys/signalvar.h>
85 #include <sys/sysctl.h>
86 #include <sys/sysent.h>
87 #include <sys/sysproto.h>
88 #include <sys/ucontext.h>
89 #include <sys/vmmeter.h>
90
91 #include <vm/vm.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_param.h>
99
100 #ifdef DDB
101 #ifndef KDB
102 #error KDB must be enabled in order for DDB to work!
103 #endif
104 #include <ddb/ddb.h>
105 #include <ddb/db_sym.h>
106 #endif
107
108 #include <isa/rtc.h>
109
110 #include <net/netisr.h>
111
112 #include <machine/bootinfo.h>
113 #include <machine/clock.h>
114 #include <machine/cpu.h>
115 #include <machine/cputypes.h>
116 #include <machine/intr_machdep.h>
117 #include <machine/md_var.h>
118 #include <machine/pc/bios.h>
119 #include <machine/pcb.h>
120 #include <machine/pcb_ext.h>
121 #include <machine/proc.h>
122 #include <machine/reg.h>
123 #include <machine/sigframe.h>
124 #include <machine/specialreg.h>
125 #include <machine/vm86.h>
126 #ifdef PERFMON
127 #include <machine/perfmon.h>
128 #endif
129 #ifdef SMP
130 #include <machine/privatespace.h>
131 #include <machine/smp.h>
132 #endif
133
134 #ifdef DEV_ISA
135 #include <i386/isa/icu.h>
136 #endif
137
138 #ifdef XBOX
139 #include <machine/xbox.h>
140
141 int arch_i386_is_xbox = 0;
142 uint32_t arch_i386_xbox_memsize = 0;
143 #endif
144
145 /* Sanity check for __curthread() */
146 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
147
148 extern void init386(int first);
149 extern void dblfault_handler(void);
150
151 extern void printcpuinfo(void); /* XXX header file */
152 extern void finishidentcpu(void);
153 extern void panicifcpuunsupported(void);
154 extern void initializecpu(void);
155
156 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
157 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
158
159 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
160 #define CPU_ENABLE_SSE
161 #endif
162
163 static void cpu_startup(void *);
164 static void fpstate_drop(struct thread *td);
165 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
166 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
167 #ifdef CPU_ENABLE_SSE
168 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
169 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
170 #endif /* CPU_ENABLE_SSE */
171 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
172
173 #ifdef DDB
174 extern vm_offset_t ksym_start, ksym_end;
175 #endif
176
177 /* Intel ICH registers */
178 #define ICH_PMBASE 0x400
179 #define ICH_SMI_EN ICH_PMBASE + 0x30
180
181 int _udatasel, _ucodesel;
182 u_int basemem;
183
184 int cold = 1;
185
186 #ifdef COMPAT_43
187 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
188 #endif
189 #ifdef COMPAT_FREEBSD4
190 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
191 #endif
192
193 long Maxmem = 0;
194 long realmem = 0;
195
196 /*
197 * The number of PHYSMAP entries must be one less than the number of
198 * PHYSSEG entries because the PHYSMAP entry that spans the largest
199 * physical address that is accessible by ISA DMA is split into two
200 * PHYSSEG entries.
201 */
202 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
203
204 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
205 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
206
207 /* must be 2 less so 0 0 can signal end of chunks */
208 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
209 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
210
211 struct kva_md_info kmi;
212
213 static struct trapframe proc0_tf;
214 #ifndef SMP
215 static struct pcpu __pcpu;
216 #endif
217
218 struct mtx icu_lock;
219
220 struct mem_range_softc mem_range_softc;
221
222 static void
223 cpu_startup(dummy)
224 void *dummy;
225 {
226 char *sysenv;
227
228 /*
229 * On MacBooks, we need to disallow the legacy USB circuit to
230 * generate an SMI# because this can cause several problems,
231 * namely: incorrect CPU frequency detection and failure to
232 * start the APs.
233 * We do this by disabling a bit in the SMI_EN (SMI Control and
234 * Enable register) of the Intel ICH LPC Interface Bridge.
235 */
236 sysenv = getenv("smbios.system.product");
237 if (sysenv != NULL) {
238 if (strncmp(sysenv, "MacBook", 7) == 0) {
239 if (bootverbose)
240 printf("Disabling LEGACY_USB_EN bit on "
241 "Intel ICH.\n");
242 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
243 }
244 freeenv(sysenv);
245 }
246
247 /*
248 * Good {morning,afternoon,evening,night}.
249 */
250 startrtclock();
251 printcpuinfo();
252 panicifcpuunsupported();
253 #ifdef PERFMON
254 perfmon_init();
255 #endif
256 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
257 ptoa((uintmax_t)Maxmem) / 1048576);
258 realmem = Maxmem;
259 /*
260 * Display any holes after the first chunk of extended memory.
261 */
262 if (bootverbose) {
263 int indx;
264
265 printf("Physical memory chunk(s):\n");
266 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
267 vm_paddr_t size;
268
269 size = phys_avail[indx + 1] - phys_avail[indx];
270 printf(
271 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
272 (uintmax_t)phys_avail[indx],
273 (uintmax_t)phys_avail[indx + 1] - 1,
274 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
275 }
276 }
277
278 vm_ksubmap_init(&kmi);
279
280 printf("avail memory = %ju (%ju MB)\n",
281 ptoa((uintmax_t)cnt.v_free_count),
282 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
283
284 /*
285 * Set up buffers, so they can be used to read disk labels.
286 */
287 bufinit();
288 vm_pager_bufferinit();
289
290 cpu_setregs();
291 }
292
293 /*
294 * Send an interrupt to process.
295 *
296 * Stack is set up to allow sigcode stored
297 * at top to call routine, followed by kcall
298 * to sigreturn routine below. After sigreturn
299 * resets the signal mask, the stack, and the
300 * frame pointer, it returns to the user
301 * specified pc, psl.
302 */
303 #ifdef COMPAT_43
304 static void
305 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
306 {
307 struct osigframe sf, *fp;
308 struct proc *p;
309 struct thread *td;
310 struct sigacts *psp;
311 struct trapframe *regs;
312 int sig;
313 int oonstack;
314
315 td = curthread;
316 p = td->td_proc;
317 PROC_LOCK_ASSERT(p, MA_OWNED);
318 sig = ksi->ksi_signo;
319 psp = p->p_sigacts;
320 mtx_assert(&psp->ps_mtx, MA_OWNED);
321 regs = td->td_frame;
322 oonstack = sigonstack(regs->tf_esp);
323
324 /* Allocate space for the signal handler context. */
325 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
326 SIGISMEMBER(psp->ps_sigonstack, sig)) {
327 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
328 td->td_sigstk.ss_size - sizeof(struct osigframe));
329 #if defined(COMPAT_43)
330 td->td_sigstk.ss_flags |= SS_ONSTACK;
331 #endif
332 } else
333 fp = (struct osigframe *)regs->tf_esp - 1;
334
335 /* Translate the signal if appropriate. */
336 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
337 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
338
339 /* Build the argument list for the signal handler. */
340 sf.sf_signum = sig;
341 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
342 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
343 /* Signal handler installed with SA_SIGINFO. */
344 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
345 sf.sf_siginfo.si_signo = sig;
346 sf.sf_siginfo.si_code = ksi->ksi_code;
347 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
348 } else {
349 /* Old FreeBSD-style arguments. */
350 sf.sf_arg2 = ksi->ksi_code;
351 sf.sf_addr = (register_t)ksi->ksi_addr;
352 sf.sf_ahu.sf_handler = catcher;
353 }
354 mtx_unlock(&psp->ps_mtx);
355 PROC_UNLOCK(p);
356
357 /* Save most if not all of trap frame. */
358 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
359 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
360 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
361 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
362 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
363 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
364 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
365 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
366 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
367 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
368 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
369 sf.sf_siginfo.si_sc.sc_gs = rgs();
370 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
371
372 /* Build the signal context to be used by osigreturn(). */
373 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
374 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
375 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
376 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
377 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
378 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
379 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
380 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
381
382 /*
383 * If we're a vm86 process, we want to save the segment registers.
384 * We also change eflags to be our emulated eflags, not the actual
385 * eflags.
386 */
387 if (regs->tf_eflags & PSL_VM) {
388 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
389 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
390 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
391
392 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
393 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
394 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
395 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
396
397 if (vm86->vm86_has_vme == 0)
398 sf.sf_siginfo.si_sc.sc_ps =
399 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
400 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
401
402 /* See sendsig() for comments. */
403 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
404 }
405
406 /*
407 * Copy the sigframe out to the user's stack.
408 */
409 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
410 #ifdef DEBUG
411 printf("process %ld has trashed its stack\n", (long)p->p_pid);
412 #endif
413 PROC_LOCK(p);
414 sigexit(td, SIGILL);
415 }
416
417 regs->tf_esp = (int)fp;
418 regs->tf_eip = PS_STRINGS - szosigcode;
419 regs->tf_eflags &= ~PSL_T;
420 regs->tf_cs = _ucodesel;
421 regs->tf_ds = _udatasel;
422 regs->tf_es = _udatasel;
423 regs->tf_fs = _udatasel;
424 load_gs(_udatasel);
425 regs->tf_ss = _udatasel;
426 PROC_LOCK(p);
427 mtx_lock(&psp->ps_mtx);
428 }
429 #endif /* COMPAT_43 */
430
431 #ifdef COMPAT_FREEBSD4
432 static void
433 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
434 {
435 struct sigframe4 sf, *sfp;
436 struct proc *p;
437 struct thread *td;
438 struct sigacts *psp;
439 struct trapframe *regs;
440 int sig;
441 int oonstack;
442
443 td = curthread;
444 p = td->td_proc;
445 PROC_LOCK_ASSERT(p, MA_OWNED);
446 sig = ksi->ksi_signo;
447 psp = p->p_sigacts;
448 mtx_assert(&psp->ps_mtx, MA_OWNED);
449 regs = td->td_frame;
450 oonstack = sigonstack(regs->tf_esp);
451
452 /* Save user context. */
453 bzero(&sf, sizeof(sf));
454 sf.sf_uc.uc_sigmask = *mask;
455 sf.sf_uc.uc_stack = td->td_sigstk;
456 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
457 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
458 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
459 sf.sf_uc.uc_mcontext.mc_gs = rgs();
460 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
461
462 /* Allocate space for the signal handler context. */
463 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
464 SIGISMEMBER(psp->ps_sigonstack, sig)) {
465 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
466 td->td_sigstk.ss_size - sizeof(struct sigframe4));
467 #if defined(COMPAT_43)
468 td->td_sigstk.ss_flags |= SS_ONSTACK;
469 #endif
470 } else
471 sfp = (struct sigframe4 *)regs->tf_esp - 1;
472
473 /* Translate the signal if appropriate. */
474 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
475 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
476
477 /* Build the argument list for the signal handler. */
478 sf.sf_signum = sig;
479 sf.sf_ucontext = (register_t)&sfp->sf_uc;
480 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
481 /* Signal handler installed with SA_SIGINFO. */
482 sf.sf_siginfo = (register_t)&sfp->sf_si;
483 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
484
485 /* Fill in POSIX parts */
486 sf.sf_si.si_signo = sig;
487 sf.sf_si.si_code = ksi->ksi_code;
488 sf.sf_si.si_addr = ksi->ksi_addr;
489 } else {
490 /* Old FreeBSD-style arguments. */
491 sf.sf_siginfo = ksi->ksi_code;
492 sf.sf_addr = (register_t)ksi->ksi_addr;
493 sf.sf_ahu.sf_handler = catcher;
494 }
495 mtx_unlock(&psp->ps_mtx);
496 PROC_UNLOCK(p);
497
498 /*
499 * If we're a vm86 process, we want to save the segment registers.
500 * We also change eflags to be our emulated eflags, not the actual
501 * eflags.
502 */
503 if (regs->tf_eflags & PSL_VM) {
504 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
505 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
506
507 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
508 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
509 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
510 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
511
512 if (vm86->vm86_has_vme == 0)
513 sf.sf_uc.uc_mcontext.mc_eflags =
514 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
515 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
516
517 /*
518 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
519 * syscalls made by the signal handler. This just avoids
520 * wasting time for our lazy fixup of such faults. PSL_NT
521 * does nothing in vm86 mode, but vm86 programs can set it
522 * almost legitimately in probes for old cpu types.
523 */
524 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
525 }
526
527 /*
528 * Copy the sigframe out to the user's stack.
529 */
530 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
531 #ifdef DEBUG
532 printf("process %ld has trashed its stack\n", (long)p->p_pid);
533 #endif
534 PROC_LOCK(p);
535 sigexit(td, SIGILL);
536 }
537
538 regs->tf_esp = (int)sfp;
539 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
540 regs->tf_eflags &= ~PSL_T;
541 regs->tf_cs = _ucodesel;
542 regs->tf_ds = _udatasel;
543 regs->tf_es = _udatasel;
544 regs->tf_fs = _udatasel;
545 regs->tf_ss = _udatasel;
546 PROC_LOCK(p);
547 mtx_lock(&psp->ps_mtx);
548 }
549 #endif /* COMPAT_FREEBSD4 */
550
551 void
552 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
553 {
554 struct sigframe sf, *sfp;
555 struct proc *p;
556 struct thread *td;
557 struct sigacts *psp;
558 char *sp;
559 struct trapframe *regs;
560 int sig;
561 int oonstack;
562
563 td = curthread;
564 p = td->td_proc;
565 PROC_LOCK_ASSERT(p, MA_OWNED);
566 sig = ksi->ksi_signo;
567 psp = p->p_sigacts;
568 mtx_assert(&psp->ps_mtx, MA_OWNED);
569 #ifdef COMPAT_FREEBSD4
570 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
571 freebsd4_sendsig(catcher, ksi, mask);
572 return;
573 }
574 #endif
575 #ifdef COMPAT_43
576 if (SIGISMEMBER(psp->ps_osigset, sig)) {
577 osendsig(catcher, ksi, mask);
578 return;
579 }
580 #endif
581 regs = td->td_frame;
582 oonstack = sigonstack(regs->tf_esp);
583
584 /* Save user context. */
585 bzero(&sf, sizeof(sf));
586 sf.sf_uc.uc_sigmask = *mask;
587 sf.sf_uc.uc_stack = td->td_sigstk;
588 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
589 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
590 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
591 sf.sf_uc.uc_mcontext.mc_gs = rgs();
592 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
593 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
594 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
595 fpstate_drop(td);
596
597 /* Allocate space for the signal handler context. */
598 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
599 SIGISMEMBER(psp->ps_sigonstack, sig)) {
600 sp = td->td_sigstk.ss_sp +
601 td->td_sigstk.ss_size - sizeof(struct sigframe);
602 #if defined(COMPAT_43)
603 td->td_sigstk.ss_flags |= SS_ONSTACK;
604 #endif
605 } else
606 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
607 /* Align to 16 bytes. */
608 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
609
610 /* Translate the signal if appropriate. */
611 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
612 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
613
614 /* Build the argument list for the signal handler. */
615 sf.sf_signum = sig;
616 sf.sf_ucontext = (register_t)&sfp->sf_uc;
617 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
618 /* Signal handler installed with SA_SIGINFO. */
619 sf.sf_siginfo = (register_t)&sfp->sf_si;
620 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
621
622 /* Fill in POSIX parts */
623 sf.sf_si = ksi->ksi_info;
624 sf.sf_si.si_signo = sig; /* maybe a translated signal */
625 } else {
626 /* Old FreeBSD-style arguments. */
627 sf.sf_siginfo = ksi->ksi_code;
628 sf.sf_addr = (register_t)ksi->ksi_addr;
629 sf.sf_ahu.sf_handler = catcher;
630 }
631 mtx_unlock(&psp->ps_mtx);
632 PROC_UNLOCK(p);
633
634 /*
635 * If we're a vm86 process, we want to save the segment registers.
636 * We also change eflags to be our emulated eflags, not the actual
637 * eflags.
638 */
639 if (regs->tf_eflags & PSL_VM) {
640 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
641 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
642
643 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
644 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
645 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
646 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
647
648 if (vm86->vm86_has_vme == 0)
649 sf.sf_uc.uc_mcontext.mc_eflags =
650 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
651 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
652
653 /*
654 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
655 * syscalls made by the signal handler. This just avoids
656 * wasting time for our lazy fixup of such faults. PSL_NT
657 * does nothing in vm86 mode, but vm86 programs can set it
658 * almost legitimately in probes for old cpu types.
659 */
660 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
661 }
662
663 /*
664 * Copy the sigframe out to the user's stack.
665 */
666 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
667 #ifdef DEBUG
668 printf("process %ld has trashed its stack\n", (long)p->p_pid);
669 #endif
670 PROC_LOCK(p);
671 sigexit(td, SIGILL);
672 }
673
674 regs->tf_esp = (int)sfp;
675 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
676 regs->tf_eflags &= ~PSL_T;
677 regs->tf_cs = _ucodesel;
678 regs->tf_ds = _udatasel;
679 regs->tf_es = _udatasel;
680 regs->tf_fs = _udatasel;
681 regs->tf_ss = _udatasel;
682 PROC_LOCK(p);
683 mtx_lock(&psp->ps_mtx);
684 }
685
686 /*
687 * System call to cleanup state after a signal
688 * has been taken. Reset signal mask and
689 * stack state from context left by sendsig (above).
690 * Return to previous pc and psl as specified by
691 * context left by sendsig. Check carefully to
692 * make sure that the user has not modified the
693 * state to gain improper privileges.
694 *
695 * MPSAFE
696 */
697 #ifdef COMPAT_43
698 int
699 osigreturn(td, uap)
700 struct thread *td;
701 struct osigreturn_args /* {
702 struct osigcontext *sigcntxp;
703 } */ *uap;
704 {
705 struct osigcontext sc;
706 struct trapframe *regs;
707 struct osigcontext *scp;
708 struct proc *p = td->td_proc;
709 int eflags, error;
710 ksiginfo_t ksi;
711
712 regs = td->td_frame;
713 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
714 if (error != 0)
715 return (error);
716 scp = ≻
717 eflags = scp->sc_ps;
718 if (eflags & PSL_VM) {
719 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
720 struct vm86_kernel *vm86;
721
722 /*
723 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
724 * set up the vm86 area, and we can't enter vm86 mode.
725 */
726 if (td->td_pcb->pcb_ext == 0)
727 return (EINVAL);
728 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
729 if (vm86->vm86_inited == 0)
730 return (EINVAL);
731
732 /* Go back to user mode if both flags are set. */
733 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
734 ksiginfo_init_trap(&ksi);
735 ksi.ksi_signo = SIGBUS;
736 ksi.ksi_code = BUS_OBJERR;
737 ksi.ksi_addr = (void *)regs->tf_eip;
738 trapsignal(td, &ksi);
739 }
740
741 if (vm86->vm86_has_vme) {
742 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
743 (eflags & VME_USERCHANGE) | PSL_VM;
744 } else {
745 vm86->vm86_eflags = eflags; /* save VIF, VIP */
746 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
747 (eflags & VM_USERCHANGE) | PSL_VM;
748 }
749 tf->tf_vm86_ds = scp->sc_ds;
750 tf->tf_vm86_es = scp->sc_es;
751 tf->tf_vm86_fs = scp->sc_fs;
752 tf->tf_vm86_gs = scp->sc_gs;
753 tf->tf_ds = _udatasel;
754 tf->tf_es = _udatasel;
755 tf->tf_fs = _udatasel;
756 } else {
757 /*
758 * Don't allow users to change privileged or reserved flags.
759 */
760 /*
761 * XXX do allow users to change the privileged flag PSL_RF.
762 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
763 * should sometimes set it there too. tf_eflags is kept in
764 * the signal context during signal handling and there is no
765 * other place to remember it, so the PSL_RF bit may be
766 * corrupted by the signal handler without us knowing.
767 * Corruption of the PSL_RF bit at worst causes one more or
768 * one less debugger trap, so allowing it is fairly harmless.
769 */
770 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
771 return (EINVAL);
772 }
773
774 /*
775 * Don't allow users to load a valid privileged %cs. Let the
776 * hardware check for invalid selectors, excess privilege in
777 * other selectors, invalid %eip's and invalid %esp's.
778 */
779 if (!CS_SECURE(scp->sc_cs)) {
780 ksiginfo_init_trap(&ksi);
781 ksi.ksi_signo = SIGBUS;
782 ksi.ksi_code = BUS_OBJERR;
783 ksi.ksi_trapno = T_PROTFLT;
784 ksi.ksi_addr = (void *)regs->tf_eip;
785 trapsignal(td, &ksi);
786 return (EINVAL);
787 }
788 regs->tf_ds = scp->sc_ds;
789 regs->tf_es = scp->sc_es;
790 regs->tf_fs = scp->sc_fs;
791 }
792
793 /* Restore remaining registers. */
794 regs->tf_eax = scp->sc_eax;
795 regs->tf_ebx = scp->sc_ebx;
796 regs->tf_ecx = scp->sc_ecx;
797 regs->tf_edx = scp->sc_edx;
798 regs->tf_esi = scp->sc_esi;
799 regs->tf_edi = scp->sc_edi;
800 regs->tf_cs = scp->sc_cs;
801 regs->tf_ss = scp->sc_ss;
802 regs->tf_isp = scp->sc_isp;
803 regs->tf_ebp = scp->sc_fp;
804 regs->tf_esp = scp->sc_sp;
805 regs->tf_eip = scp->sc_pc;
806 regs->tf_eflags = eflags;
807
808 PROC_LOCK(p);
809 #if defined(COMPAT_43)
810 if (scp->sc_onstack & 1)
811 td->td_sigstk.ss_flags |= SS_ONSTACK;
812 else
813 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
814 #endif
815 SIGSETOLD(td->td_sigmask, scp->sc_mask);
816 SIG_CANTMASK(td->td_sigmask);
817 signotify(td);
818 PROC_UNLOCK(p);
819 return (EJUSTRETURN);
820 }
821 #endif /* COMPAT_43 */
822
823 #ifdef COMPAT_FREEBSD4
824 /*
825 * MPSAFE
826 */
827 int
828 freebsd4_sigreturn(td, uap)
829 struct thread *td;
830 struct freebsd4_sigreturn_args /* {
831 const ucontext4 *sigcntxp;
832 } */ *uap;
833 {
834 struct ucontext4 uc;
835 struct proc *p = td->td_proc;
836 struct trapframe *regs;
837 const struct ucontext4 *ucp;
838 int cs, eflags, error;
839 ksiginfo_t ksi;
840
841 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
842 if (error != 0)
843 return (error);
844 ucp = &uc;
845 regs = td->td_frame;
846 eflags = ucp->uc_mcontext.mc_eflags;
847 if (eflags & PSL_VM) {
848 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
849 struct vm86_kernel *vm86;
850
851 /*
852 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
853 * set up the vm86 area, and we can't enter vm86 mode.
854 */
855 if (td->td_pcb->pcb_ext == 0)
856 return (EINVAL);
857 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
858 if (vm86->vm86_inited == 0)
859 return (EINVAL);
860
861 /* Go back to user mode if both flags are set. */
862 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
863 ksiginfo_init_trap(&ksi);
864 ksi.ksi_signo = SIGBUS;
865 ksi.ksi_code = BUS_OBJERR;
866 ksi.ksi_addr = (void *)regs->tf_eip;
867 trapsignal(td, &ksi);
868 }
869 if (vm86->vm86_has_vme) {
870 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
871 (eflags & VME_USERCHANGE) | PSL_VM;
872 } else {
873 vm86->vm86_eflags = eflags; /* save VIF, VIP */
874 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
875 (eflags & VM_USERCHANGE) | PSL_VM;
876 }
877 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
878 tf->tf_eflags = eflags;
879 tf->tf_vm86_ds = tf->tf_ds;
880 tf->tf_vm86_es = tf->tf_es;
881 tf->tf_vm86_fs = tf->tf_fs;
882 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
883 tf->tf_ds = _udatasel;
884 tf->tf_es = _udatasel;
885 tf->tf_fs = _udatasel;
886 } else {
887 /*
888 * Don't allow users to change privileged or reserved flags.
889 */
890 /*
891 * XXX do allow users to change the privileged flag PSL_RF.
892 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
893 * should sometimes set it there too. tf_eflags is kept in
894 * the signal context during signal handling and there is no
895 * other place to remember it, so the PSL_RF bit may be
896 * corrupted by the signal handler without us knowing.
897 * Corruption of the PSL_RF bit at worst causes one more or
898 * one less debugger trap, so allowing it is fairly harmless.
899 */
900 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
901 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
902 return (EINVAL);
903 }
904
905 /*
906 * Don't allow users to load a valid privileged %cs. Let the
907 * hardware check for invalid selectors, excess privilege in
908 * other selectors, invalid %eip's and invalid %esp's.
909 */
910 cs = ucp->uc_mcontext.mc_cs;
911 if (!CS_SECURE(cs)) {
912 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
913 ksiginfo_init_trap(&ksi);
914 ksi.ksi_signo = SIGBUS;
915 ksi.ksi_code = BUS_OBJERR;
916 ksi.ksi_trapno = T_PROTFLT;
917 ksi.ksi_addr = (void *)regs->tf_eip;
918 trapsignal(td, &ksi);
919 return (EINVAL);
920 }
921
922 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
923 }
924
925 PROC_LOCK(p);
926 #if defined(COMPAT_43)
927 if (ucp->uc_mcontext.mc_onstack & 1)
928 td->td_sigstk.ss_flags |= SS_ONSTACK;
929 else
930 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
931 #endif
932
933 td->td_sigmask = ucp->uc_sigmask;
934 SIG_CANTMASK(td->td_sigmask);
935 signotify(td);
936 PROC_UNLOCK(p);
937 return (EJUSTRETURN);
938 }
939 #endif /* COMPAT_FREEBSD4 */
940
941 /*
942 * MPSAFE
943 */
944 int
945 sigreturn(td, uap)
946 struct thread *td;
947 struct sigreturn_args /* {
948 const struct __ucontext *sigcntxp;
949 } */ *uap;
950 {
951 ucontext_t uc;
952 struct proc *p = td->td_proc;
953 struct trapframe *regs;
954 const ucontext_t *ucp;
955 int cs, eflags, error, ret;
956 ksiginfo_t ksi;
957
958 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
959 if (error != 0)
960 return (error);
961 ucp = &uc;
962 regs = td->td_frame;
963 eflags = ucp->uc_mcontext.mc_eflags;
964 if (eflags & PSL_VM) {
965 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
966 struct vm86_kernel *vm86;
967
968 /*
969 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
970 * set up the vm86 area, and we can't enter vm86 mode.
971 */
972 if (td->td_pcb->pcb_ext == 0)
973 return (EINVAL);
974 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
975 if (vm86->vm86_inited == 0)
976 return (EINVAL);
977
978 /* Go back to user mode if both flags are set. */
979 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
980 ksiginfo_init_trap(&ksi);
981 ksi.ksi_signo = SIGBUS;
982 ksi.ksi_code = BUS_OBJERR;
983 ksi.ksi_addr = (void *)regs->tf_eip;
984 trapsignal(td, &ksi);
985 }
986
987 if (vm86->vm86_has_vme) {
988 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
989 (eflags & VME_USERCHANGE) | PSL_VM;
990 } else {
991 vm86->vm86_eflags = eflags; /* save VIF, VIP */
992 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
993 (eflags & VM_USERCHANGE) | PSL_VM;
994 }
995 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
996 tf->tf_eflags = eflags;
997 tf->tf_vm86_ds = tf->tf_ds;
998 tf->tf_vm86_es = tf->tf_es;
999 tf->tf_vm86_fs = tf->tf_fs;
1000 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1001 tf->tf_ds = _udatasel;
1002 tf->tf_es = _udatasel;
1003 tf->tf_fs = _udatasel;
1004 } else {
1005 /*
1006 * Don't allow users to change privileged or reserved flags.
1007 */
1008 /*
1009 * XXX do allow users to change the privileged flag PSL_RF.
1010 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1011 * should sometimes set it there too. tf_eflags is kept in
1012 * the signal context during signal handling and there is no
1013 * other place to remember it, so the PSL_RF bit may be
1014 * corrupted by the signal handler without us knowing.
1015 * Corruption of the PSL_RF bit at worst causes one more or
1016 * one less debugger trap, so allowing it is fairly harmless.
1017 */
1018 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1019 printf("sigreturn: eflags = 0x%x\n", eflags);
1020 return (EINVAL);
1021 }
1022
1023 /*
1024 * Don't allow users to load a valid privileged %cs. Let the
1025 * hardware check for invalid selectors, excess privilege in
1026 * other selectors, invalid %eip's and invalid %esp's.
1027 */
1028 cs = ucp->uc_mcontext.mc_cs;
1029 if (!CS_SECURE(cs)) {
1030 printf("sigreturn: cs = 0x%x\n", cs);
1031 ksiginfo_init_trap(&ksi);
1032 ksi.ksi_signo = SIGBUS;
1033 ksi.ksi_code = BUS_OBJERR;
1034 ksi.ksi_trapno = T_PROTFLT;
1035 ksi.ksi_addr = (void *)regs->tf_eip;
1036 trapsignal(td, &ksi);
1037 return (EINVAL);
1038 }
1039
1040 ret = set_fpcontext(td, &ucp->uc_mcontext);
1041 if (ret != 0)
1042 return (ret);
1043 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1044 }
1045
1046 PROC_LOCK(p);
1047 #if defined(COMPAT_43)
1048 if (ucp->uc_mcontext.mc_onstack & 1)
1049 td->td_sigstk.ss_flags |= SS_ONSTACK;
1050 else
1051 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1052 #endif
1053
1054 td->td_sigmask = ucp->uc_sigmask;
1055 SIG_CANTMASK(td->td_sigmask);
1056 signotify(td);
1057 PROC_UNLOCK(p);
1058 return (EJUSTRETURN);
1059 }
1060
1061 /*
1062 * Machine dependent boot() routine
1063 *
1064 * I haven't seen anything to put here yet
1065 * Possibly some stuff might be grafted back here from boot()
1066 */
1067 void
1068 cpu_boot(int howto)
1069 {
1070 }
1071
1072 /* Get current clock frequency for the given cpu id. */
1073 int
1074 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1075 {
1076 register_t reg;
1077 uint64_t tsc1, tsc2;
1078
1079 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1080 return (EINVAL);
1081 if (!tsc_present)
1082 return (EOPNOTSUPP);
1083
1084 /* If we're booting, trust the rate calibrated moments ago. */
1085 if (cold) {
1086 *rate = tsc_freq;
1087 return (0);
1088 }
1089
1090 #ifdef SMP
1091 /* Schedule ourselves on the indicated cpu. */
1092 thread_lock(curthread);
1093 sched_bind(curthread, cpu_id);
1094 thread_unlock(curthread);
1095 #endif
1096
1097 /* Calibrate by measuring a short delay. */
1098 reg = intr_disable();
1099 tsc1 = rdtsc();
1100 DELAY(1000);
1101 tsc2 = rdtsc();
1102 intr_restore(reg);
1103
1104 #ifdef SMP
1105 thread_lock(curthread);
1106 sched_unbind(curthread);
1107 thread_unlock(curthread);
1108 #endif
1109
1110 /*
1111 * Calculate the difference in readings, convert to Mhz, and
1112 * subtract 0.5% of the total. Empirical testing has shown that
1113 * overhead in DELAY() works out to approximately this value.
1114 */
1115 tsc2 -= tsc1;
1116 *rate = tsc2 * 1000 - tsc2 * 5;
1117 return (0);
1118 }
1119
1120 /*
1121 * Shutdown the CPU as much as possible
1122 */
1123 void
1124 cpu_halt(void)
1125 {
1126 for (;;)
1127 __asm__ ("hlt");
1128 }
1129
1130 /*
1131 * Hook to idle the CPU when possible. In the SMP case we default to
1132 * off because a halted cpu will not currently pick up a new thread in the
1133 * run queue until the next timer tick. If turned on this will result in
1134 * approximately a 4.2% loss in real time performance in buildworld tests
1135 * (but improves user and sys times oddly enough), and saves approximately
1136 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1137 *
1138 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1139 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1140 * Then we can have our cake and eat it too.
1141 *
1142 * XXX I'm turning it on for SMP as well by default for now. It seems to
1143 * help lock contention somewhat, and this is critical for HTT. -Peter
1144 */
1145 static int cpu_idle_hlt = 1;
1146 TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt);
1147 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1148 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1149
1150 static void
1151 cpu_idle_default(void)
1152 {
1153 /*
1154 * we must absolutely guarentee that hlt is the
1155 * absolute next instruction after sti or we
1156 * introduce a timing window.
1157 */
1158 __asm __volatile("sti; hlt");
1159 }
1160
1161 /*
1162 * Note that we have to be careful here to avoid a race between checking
1163 * sched_runnable() and actually halting. If we don't do this, we may waste
1164 * the time between calling hlt and the next interrupt even though there
1165 * is a runnable process.
1166 */
1167 void
1168 cpu_idle(void)
1169 {
1170
1171 #ifdef SMP
1172 if (mp_grab_cpu_hlt())
1173 return;
1174 #endif
1175
1176 if (cpu_idle_hlt) {
1177 disable_intr();
1178 if (sched_runnable())
1179 enable_intr();
1180 else
1181 (*cpu_idle_hook)();
1182 }
1183 }
1184
1185 /* Other subsystems (e.g., ACPI) can hook this later. */
1186 void (*cpu_idle_hook)(void) = cpu_idle_default;
1187
1188 /*
1189 * Clear registers on exec
1190 */
1191 void
1192 exec_setregs(td, entry, stack, ps_strings)
1193 struct thread *td;
1194 u_long entry;
1195 u_long stack;
1196 u_long ps_strings;
1197 {
1198 struct trapframe *regs = td->td_frame;
1199 struct pcb *pcb = td->td_pcb;
1200
1201 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1202 pcb->pcb_gs = _udatasel;
1203 load_gs(_udatasel);
1204
1205 mtx_lock_spin(&dt_lock);
1206 if (td->td_proc->p_md.md_ldt)
1207 user_ldt_free(td);
1208 else
1209 mtx_unlock_spin(&dt_lock);
1210
1211 bzero((char *)regs, sizeof(struct trapframe));
1212 regs->tf_eip = entry;
1213 regs->tf_esp = stack;
1214 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1215 regs->tf_ss = _udatasel;
1216 regs->tf_ds = _udatasel;
1217 regs->tf_es = _udatasel;
1218 regs->tf_fs = _udatasel;
1219 regs->tf_cs = _ucodesel;
1220
1221 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1222 regs->tf_ebx = ps_strings;
1223
1224 /*
1225 * Reset the hardware debug registers if they were in use.
1226 * They won't have any meaning for the newly exec'd process.
1227 */
1228 if (pcb->pcb_flags & PCB_DBREGS) {
1229 pcb->pcb_dr0 = 0;
1230 pcb->pcb_dr1 = 0;
1231 pcb->pcb_dr2 = 0;
1232 pcb->pcb_dr3 = 0;
1233 pcb->pcb_dr6 = 0;
1234 pcb->pcb_dr7 = 0;
1235 if (pcb == PCPU_GET(curpcb)) {
1236 /*
1237 * Clear the debug registers on the running
1238 * CPU, otherwise they will end up affecting
1239 * the next process we switch to.
1240 */
1241 reset_dbregs();
1242 }
1243 pcb->pcb_flags &= ~PCB_DBREGS;
1244 }
1245
1246 /*
1247 * Initialize the math emulator (if any) for the current process.
1248 * Actually, just clear the bit that says that the emulator has
1249 * been initialized. Initialization is delayed until the process
1250 * traps to the emulator (if it is done at all) mainly because
1251 * emulators don't provide an entry point for initialization.
1252 */
1253 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1254
1255 /*
1256 * Drop the FP state if we hold it, so that the process gets a
1257 * clean FP state if it uses the FPU again.
1258 */
1259 fpstate_drop(td);
1260
1261 /*
1262 * XXX - Linux emulator
1263 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1264 * on it.
1265 */
1266 td->td_retval[1] = 0;
1267 }
1268
1269 void
1270 cpu_setregs(void)
1271 {
1272 unsigned int cr0;
1273
1274 cr0 = rcr0();
1275
1276 /*
1277 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1278 *
1279 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1280 * instructions. We must set the CR0_MP bit and use the CR0_TS
1281 * bit to control the trap, because setting the CR0_EM bit does
1282 * not cause WAIT instructions to trap. It's important to trap
1283 * WAIT instructions - otherwise the "wait" variants of no-wait
1284 * control instructions would degenerate to the "no-wait" variants
1285 * after FP context switches but work correctly otherwise. It's
1286 * particularly important to trap WAITs when there is no NPX -
1287 * otherwise the "wait" variants would always degenerate.
1288 *
1289 * Try setting CR0_NE to get correct error reporting on 486DX's.
1290 * Setting it should fail or do nothing on lesser processors.
1291 */
1292 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1293 load_cr0(cr0);
1294 load_gs(_udatasel);
1295 }
1296
1297 u_long bootdev; /* not a struct cdev *- encoding is different */
1298 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1299 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1300
1301 /*
1302 * Initialize 386 and configure to run kernel
1303 */
1304
1305 /*
1306 * Initialize segments & interrupt table
1307 */
1308
1309 int _default_ldt;
1310 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1311 static struct gate_descriptor idt0[NIDT];
1312 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1313 union descriptor ldt[NLDT]; /* local descriptor table */
1314 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1315 struct mtx dt_lock; /* lock for GDT and LDT */
1316
1317 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1318 extern int has_f00f_bug;
1319 #endif
1320
1321 static struct i386tss dblfault_tss;
1322 static char dblfault_stack[PAGE_SIZE];
1323
1324 extern vm_offset_t proc0kstack;
1325
1326
1327 /*
1328 * software prototypes -- in more palatable form.
1329 *
1330 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1331 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1332 */
1333 struct soft_segment_descriptor gdt_segs[] = {
1334 /* GNULL_SEL 0 Null Descriptor */
1335 { 0x0, /* segment base address */
1336 0x0, /* length */
1337 0, /* segment type */
1338 0, /* segment descriptor priority level */
1339 0, /* segment descriptor present */
1340 0, 0,
1341 0, /* default 32 vs 16 bit size */
1342 0 /* limit granularity (byte/page units)*/ },
1343 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1344 { 0x0, /* segment base address */
1345 0xfffff, /* length - all address space */
1346 SDT_MEMRWA, /* segment type */
1347 0, /* segment descriptor priority level */
1348 1, /* segment descriptor present */
1349 0, 0,
1350 1, /* default 32 vs 16 bit size */
1351 1 /* limit granularity (byte/page units)*/ },
1352 /* GUFS_SEL 2 %fs Descriptor for user */
1353 { 0x0, /* segment base address */
1354 0xfffff, /* length - all address space */
1355 SDT_MEMRWA, /* segment type */
1356 SEL_UPL, /* segment descriptor priority level */
1357 1, /* segment descriptor present */
1358 0, 0,
1359 1, /* default 32 vs 16 bit size */
1360 1 /* limit granularity (byte/page units)*/ },
1361 /* GUGS_SEL 3 %gs Descriptor for user */
1362 { 0x0, /* segment base address */
1363 0xfffff, /* length - all address space */
1364 SDT_MEMRWA, /* segment type */
1365 SEL_UPL, /* segment descriptor priority level */
1366 1, /* segment descriptor present */
1367 0, 0,
1368 1, /* default 32 vs 16 bit size */
1369 1 /* limit granularity (byte/page units)*/ },
1370 /* GCODE_SEL 4 Code Descriptor for kernel */
1371 { 0x0, /* segment base address */
1372 0xfffff, /* length - all address space */
1373 SDT_MEMERA, /* segment type */
1374 0, /* segment descriptor priority level */
1375 1, /* segment descriptor present */
1376 0, 0,
1377 1, /* default 32 vs 16 bit size */
1378 1 /* limit granularity (byte/page units)*/ },
1379 /* GDATA_SEL 5 Data Descriptor for kernel */
1380 { 0x0, /* segment base address */
1381 0xfffff, /* length - all address space */
1382 SDT_MEMRWA, /* segment type */
1383 0, /* segment descriptor priority level */
1384 1, /* segment descriptor present */
1385 0, 0,
1386 1, /* default 32 vs 16 bit size */
1387 1 /* limit granularity (byte/page units)*/ },
1388 /* GUCODE_SEL 6 Code Descriptor for user */
1389 { 0x0, /* segment base address */
1390 0xfffff, /* length - all address space */
1391 SDT_MEMERA, /* segment type */
1392 SEL_UPL, /* segment descriptor priority level */
1393 1, /* segment descriptor present */
1394 0, 0,
1395 1, /* default 32 vs 16 bit size */
1396 1 /* limit granularity (byte/page units)*/ },
1397 /* GUDATA_SEL 7 Data Descriptor for user */
1398 { 0x0, /* segment base address */
1399 0xfffff, /* length - all address space */
1400 SDT_MEMRWA, /* segment type */
1401 SEL_UPL, /* segment descriptor priority level */
1402 1, /* segment descriptor present */
1403 0, 0,
1404 1, /* default 32 vs 16 bit size */
1405 1 /* limit granularity (byte/page units)*/ },
1406 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1407 { 0x400, /* segment base address */
1408 0xfffff, /* length */
1409 SDT_MEMRWA, /* segment type */
1410 0, /* segment descriptor priority level */
1411 1, /* segment descriptor present */
1412 0, 0,
1413 1, /* default 32 vs 16 bit size */
1414 1 /* limit granularity (byte/page units)*/ },
1415 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1416 {
1417 0x0, /* segment base address */
1418 sizeof(struct i386tss)-1,/* length */
1419 SDT_SYS386TSS, /* segment type */
1420 0, /* segment descriptor priority level */
1421 1, /* segment descriptor present */
1422 0, 0,
1423 0, /* unused - default 32 vs 16 bit size */
1424 0 /* limit granularity (byte/page units)*/ },
1425 /* GLDT_SEL 10 LDT Descriptor */
1426 { (int) ldt, /* segment base address */
1427 sizeof(ldt)-1, /* length - all address space */
1428 SDT_SYSLDT, /* segment type */
1429 SEL_UPL, /* segment descriptor priority level */
1430 1, /* segment descriptor present */
1431 0, 0,
1432 0, /* unused - default 32 vs 16 bit size */
1433 0 /* limit granularity (byte/page units)*/ },
1434 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1435 { (int) ldt, /* segment base address */
1436 (512 * sizeof(union descriptor)-1), /* length */
1437 SDT_SYSLDT, /* segment type */
1438 0, /* segment descriptor priority level */
1439 1, /* segment descriptor present */
1440 0, 0,
1441 0, /* unused - default 32 vs 16 bit size */
1442 0 /* limit granularity (byte/page units)*/ },
1443 /* GPANIC_SEL 12 Panic Tss Descriptor */
1444 { (int) &dblfault_tss, /* segment base address */
1445 sizeof(struct i386tss)-1,/* length - all address space */
1446 SDT_SYS386TSS, /* segment type */
1447 0, /* segment descriptor priority level */
1448 1, /* segment descriptor present */
1449 0, 0,
1450 0, /* unused - default 32 vs 16 bit size */
1451 0 /* limit granularity (byte/page units)*/ },
1452 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1453 { 0, /* segment base address (overwritten) */
1454 0xfffff, /* length */
1455 SDT_MEMERA, /* segment type */
1456 0, /* segment descriptor priority level */
1457 1, /* segment descriptor present */
1458 0, 0,
1459 0, /* default 32 vs 16 bit size */
1460 1 /* limit granularity (byte/page units)*/ },
1461 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1462 { 0, /* segment base address (overwritten) */
1463 0xfffff, /* length */
1464 SDT_MEMERA, /* segment type */
1465 0, /* segment descriptor priority level */
1466 1, /* segment descriptor present */
1467 0, 0,
1468 0, /* default 32 vs 16 bit size */
1469 1 /* limit granularity (byte/page units)*/ },
1470 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1471 { 0, /* segment base address (overwritten) */
1472 0xfffff, /* length */
1473 SDT_MEMRWA, /* segment type */
1474 0, /* segment descriptor priority level */
1475 1, /* segment descriptor present */
1476 0, 0,
1477 1, /* default 32 vs 16 bit size */
1478 1 /* limit granularity (byte/page units)*/ },
1479 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1480 { 0, /* segment base address (overwritten) */
1481 0xfffff, /* length */
1482 SDT_MEMRWA, /* segment type */
1483 0, /* segment descriptor priority level */
1484 1, /* segment descriptor present */
1485 0, 0,
1486 0, /* default 32 vs 16 bit size */
1487 1 /* limit granularity (byte/page units)*/ },
1488 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1489 { 0, /* segment base address (overwritten) */
1490 0xfffff, /* length */
1491 SDT_MEMRWA, /* segment type */
1492 0, /* segment descriptor priority level */
1493 1, /* segment descriptor present */
1494 0, 0,
1495 0, /* default 32 vs 16 bit size */
1496 1 /* limit granularity (byte/page units)*/ },
1497 /* GNDIS_SEL 18 NDIS Descriptor */
1498 { 0x0, /* segment base address */
1499 0x0, /* length */
1500 0, /* segment type */
1501 0, /* segment descriptor priority level */
1502 0, /* segment descriptor present */
1503 0, 0,
1504 0, /* default 32 vs 16 bit size */
1505 0 /* limit granularity (byte/page units)*/ },
1506 };
1507
1508 static struct soft_segment_descriptor ldt_segs[] = {
1509 /* Null Descriptor - overwritten by call gate */
1510 { 0x0, /* segment base address */
1511 0x0, /* length - all address space */
1512 0, /* segment type */
1513 0, /* segment descriptor priority level */
1514 0, /* segment descriptor present */
1515 0, 0,
1516 0, /* default 32 vs 16 bit size */
1517 0 /* limit granularity (byte/page units)*/ },
1518 /* Null Descriptor - overwritten by call gate */
1519 { 0x0, /* segment base address */
1520 0x0, /* length - all address space */
1521 0, /* segment type */
1522 0, /* segment descriptor priority level */
1523 0, /* segment descriptor present */
1524 0, 0,
1525 0, /* default 32 vs 16 bit size */
1526 0 /* limit granularity (byte/page units)*/ },
1527 /* Null Descriptor - overwritten by call gate */
1528 { 0x0, /* segment base address */
1529 0x0, /* length - all address space */
1530 0, /* segment type */
1531 0, /* segment descriptor priority level */
1532 0, /* segment descriptor present */
1533 0, 0,
1534 0, /* default 32 vs 16 bit size */
1535 0 /* limit granularity (byte/page units)*/ },
1536 /* Code Descriptor for user */
1537 { 0x0, /* segment base address */
1538 0xfffff, /* length - all address space */
1539 SDT_MEMERA, /* segment type */
1540 SEL_UPL, /* segment descriptor priority level */
1541 1, /* segment descriptor present */
1542 0, 0,
1543 1, /* default 32 vs 16 bit size */
1544 1 /* limit granularity (byte/page units)*/ },
1545 /* Null Descriptor - overwritten by call gate */
1546 { 0x0, /* segment base address */
1547 0x0, /* length - all address space */
1548 0, /* segment type */
1549 0, /* segment descriptor priority level */
1550 0, /* segment descriptor present */
1551 0, 0,
1552 0, /* default 32 vs 16 bit size */
1553 0 /* limit granularity (byte/page units)*/ },
1554 /* Data Descriptor for user */
1555 { 0x0, /* segment base address */
1556 0xfffff, /* length - all address space */
1557 SDT_MEMRWA, /* segment type */
1558 SEL_UPL, /* segment descriptor priority level */
1559 1, /* segment descriptor present */
1560 0, 0,
1561 1, /* default 32 vs 16 bit size */
1562 1 /* limit granularity (byte/page units)*/ },
1563 };
1564
1565 void
1566 setidt(idx, func, typ, dpl, selec)
1567 int idx;
1568 inthand_t *func;
1569 int typ;
1570 int dpl;
1571 int selec;
1572 {
1573 struct gate_descriptor *ip;
1574
1575 ip = idt + idx;
1576 ip->gd_looffset = (int)func;
1577 ip->gd_selector = selec;
1578 ip->gd_stkcpy = 0;
1579 ip->gd_xx = 0;
1580 ip->gd_type = typ;
1581 ip->gd_dpl = dpl;
1582 ip->gd_p = 1;
1583 ip->gd_hioffset = ((int)func)>>16 ;
1584 }
1585
1586 extern inthand_t
1587 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1588 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1589 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1590 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1591 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1592
1593 #ifdef DDB
1594 /*
1595 * Display the index and function name of any IDT entries that don't use
1596 * the default 'rsvd' entry point.
1597 */
1598 DB_SHOW_COMMAND(idt, db_show_idt)
1599 {
1600 struct gate_descriptor *ip;
1601 int idx;
1602 uintptr_t func;
1603
1604 ip = idt;
1605 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1606 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1607 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1608 db_printf("%3d\t", idx);
1609 db_printsym(func, DB_STGY_PROC);
1610 db_printf("\n");
1611 }
1612 ip++;
1613 }
1614 }
1615
1616 /* Show privileged registers. */
1617 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1618 {
1619 uint64_t idtr, gdtr;
1620
1621 idtr = ridt();
1622 db_printf("idtr\t0x%08x/%04x\n",
1623 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1624 gdtr = rgdt();
1625 db_printf("gdtr\t0x%08x/%04x\n",
1626 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1627 db_printf("ldtr\t0x%04x\n", rldt());
1628 db_printf("tr\t0x%04x\n", rtr());
1629 db_printf("cr0\t0x%08x\n", rcr0());
1630 db_printf("cr2\t0x%08x\n", rcr2());
1631 db_printf("cr3\t0x%08x\n", rcr3());
1632 db_printf("cr4\t0x%08x\n", rcr4());
1633 }
1634 #endif
1635
1636 void
1637 sdtossd(sd, ssd)
1638 struct segment_descriptor *sd;
1639 struct soft_segment_descriptor *ssd;
1640 {
1641 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1642 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1643 ssd->ssd_type = sd->sd_type;
1644 ssd->ssd_dpl = sd->sd_dpl;
1645 ssd->ssd_p = sd->sd_p;
1646 ssd->ssd_def32 = sd->sd_def32;
1647 ssd->ssd_gran = sd->sd_gran;
1648 }
1649
1650 /*
1651 * Populate the (physmap) array with base/bound pairs describing the
1652 * available physical memory in the system, then test this memory and
1653 * build the phys_avail array describing the actually-available memory.
1654 *
1655 * If we cannot accurately determine the physical memory map, then use
1656 * value from the 0xE801 call, and failing that, the RTC.
1657 *
1658 * Total memory size may be set by the kernel environment variable
1659 * hw.physmem or the compile-time define MAXMEM.
1660 *
1661 * XXX first should be vm_paddr_t.
1662 */
1663 static void
1664 getmemsize(int first)
1665 {
1666 int i, off, physmap_idx, pa_indx, da_indx;
1667 int hasbrokenint12, has_smap;
1668 u_long physmem_tunable;
1669 u_int extmem;
1670 struct vm86frame vmf;
1671 struct vm86context vmc;
1672 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1673 pt_entry_t *pte;
1674 struct bios_smap *smap;
1675 quad_t dcons_addr, dcons_size;
1676
1677 has_smap = 0;
1678 #ifdef XBOX
1679 if (arch_i386_is_xbox) {
1680 /*
1681 * We queried the memory size before, so chop off 4MB for
1682 * the framebuffer and inform the OS of this.
1683 */
1684 physmap[0] = 0;
1685 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
1686 physmap_idx = 0;
1687 goto physmap_done;
1688 }
1689 #endif
1690
1691 hasbrokenint12 = 0;
1692 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
1693 bzero(&vmf, sizeof(vmf));
1694 bzero(physmap, sizeof(physmap));
1695 basemem = 0;
1696
1697 /*
1698 * Some newer BIOSes has broken INT 12H implementation which cause
1699 * kernel panic immediately. In this case, we need to scan SMAP
1700 * with INT 15:E820 first, then determine base memory size.
1701 */
1702 if (hasbrokenint12) {
1703 goto int15e820;
1704 }
1705
1706 /*
1707 * Perform "base memory" related probes & setup
1708 */
1709 vm86_intcall(0x12, &vmf);
1710 basemem = vmf.vmf_ax;
1711 if (basemem > 640) {
1712 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1713 basemem);
1714 basemem = 640;
1715 }
1716
1717 /*
1718 * XXX if biosbasemem is now < 640, there is a `hole'
1719 * between the end of base memory and the start of
1720 * ISA memory. The hole may be empty or it may
1721 * contain BIOS code or data. Map it read/write so
1722 * that the BIOS can write to it. (Memory from 0 to
1723 * the physical end of the kernel is mapped read-only
1724 * to begin with and then parts of it are remapped.
1725 * The parts that aren't remapped form holes that
1726 * remain read-only and are unused by the kernel.
1727 * The base memory area is below the physical end of
1728 * the kernel and right now forms a read-only hole.
1729 * The part of it from PAGE_SIZE to
1730 * (trunc_page(biosbasemem * 1024) - 1) will be
1731 * remapped and used by the kernel later.)
1732 *
1733 * This code is similar to the code used in
1734 * pmap_mapdev, but since no memory needs to be
1735 * allocated we simply change the mapping.
1736 */
1737 for (pa = trunc_page(basemem * 1024);
1738 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1739 pmap_kenter(KERNBASE + pa, pa);
1740
1741 /*
1742 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1743 * the vm86 page table so that vm86 can scribble on them using
1744 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1745 * page 0, at least as initialized here?
1746 */
1747 pte = (pt_entry_t *)vm86paddr;
1748 for (i = basemem / 4; i < 160; i++)
1749 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1750
1751 int15e820:
1752 /*
1753 * map page 1 R/W into the kernel page table so we can use it
1754 * as a buffer. The kernel will unmap this page later.
1755 */
1756 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
1757
1758 /*
1759 * get memory map with INT 15:E820
1760 */
1761 vmc.npages = 0;
1762 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
1763 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
1764
1765 physmap_idx = 0;
1766 vmf.vmf_ebx = 0;
1767 do {
1768 vmf.vmf_eax = 0xE820;
1769 vmf.vmf_edx = SMAP_SIG;
1770 vmf.vmf_ecx = sizeof(struct bios_smap);
1771 i = vm86_datacall(0x15, &vmf, &vmc);
1772 if (i || vmf.vmf_eax != SMAP_SIG)
1773 break;
1774 if (boothowto & RB_VERBOSE)
1775 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1776 smap->type, smap->base, smap->length);
1777 has_smap = 1;
1778
1779 if (smap->type != 0x01)
1780 continue;
1781
1782 if (smap->length == 0)
1783 continue;
1784
1785 #ifndef PAE
1786 if (smap->base >= 0xffffffff) {
1787 printf("%uK of memory above 4GB ignored\n",
1788 (u_int)(smap->length / 1024));
1789 continue;
1790 }
1791 #endif
1792
1793 for (i = 0; i <= physmap_idx; i += 2) {
1794 if (smap->base < physmap[i + 1]) {
1795 if (boothowto & RB_VERBOSE)
1796 printf(
1797 "Overlapping or non-monotonic memory region, ignoring second region\n");
1798 continue;
1799 }
1800 }
1801
1802 if (smap->base == physmap[physmap_idx + 1]) {
1803 physmap[physmap_idx + 1] += smap->length;
1804 continue;
1805 }
1806
1807 physmap_idx += 2;
1808 if (physmap_idx == PHYSMAP_SIZE) {
1809 printf(
1810 "Too many segments in the physical address map, giving up\n");
1811 break;
1812 }
1813 physmap[physmap_idx] = smap->base;
1814 physmap[physmap_idx + 1] = smap->base + smap->length;
1815 } while (vmf.vmf_ebx != 0);
1816
1817 /*
1818 * Perform "base memory" related probes & setup based on SMAP
1819 */
1820 if (basemem == 0) {
1821 for (i = 0; i <= physmap_idx; i += 2) {
1822 if (physmap[i] == 0x00000000) {
1823 basemem = physmap[i + 1] / 1024;
1824 break;
1825 }
1826 }
1827
1828 /*
1829 * XXX this function is horribly organized and has to the same
1830 * things that it does above here.
1831 */
1832 if (basemem == 0)
1833 basemem = 640;
1834 if (basemem > 640) {
1835 printf(
1836 "Preposterous BIOS basemem of %uK, truncating to 640K\n",
1837 basemem);
1838 basemem = 640;
1839 }
1840
1841 /*
1842 * Let vm86 scribble on pages between basemem and
1843 * ISA_HOLE_START, as above.
1844 */
1845 for (pa = trunc_page(basemem * 1024);
1846 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1847 pmap_kenter(KERNBASE + pa, pa);
1848 pte = (pt_entry_t *)vm86paddr;
1849 for (i = basemem / 4; i < 160; i++)
1850 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1851 }
1852
1853 if (physmap[1] != 0)
1854 goto physmap_done;
1855
1856 /*
1857 * If we failed above, try memory map with INT 15:E801
1858 */
1859 vmf.vmf_ax = 0xE801;
1860 if (vm86_intcall(0x15, &vmf) == 0) {
1861 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1862 } else {
1863 #if 0
1864 vmf.vmf_ah = 0x88;
1865 vm86_intcall(0x15, &vmf);
1866 extmem = vmf.vmf_ax;
1867 #else
1868 /*
1869 * Prefer the RTC value for extended memory.
1870 */
1871 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1872 #endif
1873 }
1874
1875 /*
1876 * Special hack for chipsets that still remap the 384k hole when
1877 * there's 16MB of memory - this really confuses people that
1878 * are trying to use bus mastering ISA controllers with the
1879 * "16MB limit"; they only have 16MB, but the remapping puts
1880 * them beyond the limit.
1881 *
1882 * If extended memory is between 15-16MB (16-17MB phys address range),
1883 * chop it to 15MB.
1884 */
1885 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1886 extmem = 15 * 1024;
1887
1888 physmap[0] = 0;
1889 physmap[1] = basemem * 1024;
1890 physmap_idx = 2;
1891 physmap[physmap_idx] = 0x100000;
1892 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1893
1894 physmap_done:
1895 /*
1896 * Now, physmap contains a map of physical memory.
1897 */
1898
1899 #ifdef SMP
1900 /* make hole for AP bootstrap code */
1901 physmap[1] = mp_bootaddress(physmap[1]);
1902 #endif
1903
1904 /*
1905 * Maxmem isn't the "maximum memory", it's one larger than the
1906 * highest page of the physical address space. It should be
1907 * called something like "Maxphyspage". We may adjust this
1908 * based on ``hw.physmem'' and the results of the memory test.
1909 */
1910 Maxmem = atop(physmap[physmap_idx + 1]);
1911
1912 #ifdef MAXMEM
1913 Maxmem = MAXMEM / 4;
1914 #endif
1915
1916 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1917 Maxmem = atop(physmem_tunable);
1918
1919 /*
1920 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
1921 * the amount of memory in the system.
1922 */
1923 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
1924 Maxmem = atop(physmap[physmap_idx + 1]);
1925
1926 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1927 (boothowto & RB_VERBOSE))
1928 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1929
1930 /*
1931 * If Maxmem has been increased beyond what the system has detected,
1932 * extend the last memory segment to the new limit.
1933 */
1934 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1935 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1936
1937 /* call pmap initialization to make new kernel address space */
1938 pmap_bootstrap(first);
1939
1940 /*
1941 * Size up each available chunk of physical memory.
1942 */
1943 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1944 pa_indx = 0;
1945 da_indx = 1;
1946 phys_avail[pa_indx++] = physmap[0];
1947 phys_avail[pa_indx] = physmap[0];
1948 dump_avail[da_indx] = physmap[0];
1949 pte = CMAP1;
1950
1951 /*
1952 * Get dcons buffer address
1953 */
1954 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1955 getenv_quad("dcons.size", &dcons_size) == 0)
1956 dcons_addr = 0;
1957
1958 /*
1959 * physmap is in bytes, so when converting to page boundaries,
1960 * round up the start address and round down the end address.
1961 */
1962 for (i = 0; i <= physmap_idx; i += 2) {
1963 vm_paddr_t end;
1964
1965 end = ptoa((vm_paddr_t)Maxmem);
1966 if (physmap[i + 1] < end)
1967 end = trunc_page(physmap[i + 1]);
1968 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1969 int tmp, page_bad, full;
1970 int *ptr = (int *)CADDR1;
1971
1972 full = FALSE;
1973 /*
1974 * block out kernel memory as not available.
1975 */
1976 if (pa >= KERNLOAD && pa < first)
1977 goto do_dump_avail;
1978
1979 /*
1980 * block out dcons buffer
1981 */
1982 if (dcons_addr > 0
1983 && pa >= trunc_page(dcons_addr)
1984 && pa < dcons_addr + dcons_size)
1985 goto do_dump_avail;
1986
1987 page_bad = FALSE;
1988
1989 /*
1990 * map page into kernel: valid, read/write,non-cacheable
1991 */
1992 *pte = pa | PG_V | PG_RW | PG_N;
1993 invltlb();
1994
1995 tmp = *(int *)ptr;
1996 /*
1997 * Test for alternating 1's and 0's
1998 */
1999 *(volatile int *)ptr = 0xaaaaaaaa;
2000 if (*(volatile int *)ptr != 0xaaaaaaaa)
2001 page_bad = TRUE;
2002 /*
2003 * Test for alternating 0's and 1's
2004 */
2005 *(volatile int *)ptr = 0x55555555;
2006 if (*(volatile int *)ptr != 0x55555555)
2007 page_bad = TRUE;
2008 /*
2009 * Test for all 1's
2010 */
2011 *(volatile int *)ptr = 0xffffffff;
2012 if (*(volatile int *)ptr != 0xffffffff)
2013 page_bad = TRUE;
2014 /*
2015 * Test for all 0's
2016 */
2017 *(volatile int *)ptr = 0x0;
2018 if (*(volatile int *)ptr != 0x0)
2019 page_bad = TRUE;
2020 /*
2021 * Restore original value.
2022 */
2023 *(int *)ptr = tmp;
2024
2025 /*
2026 * Adjust array of valid/good pages.
2027 */
2028 if (page_bad == TRUE)
2029 continue;
2030 /*
2031 * If this good page is a continuation of the
2032 * previous set of good pages, then just increase
2033 * the end pointer. Otherwise start a new chunk.
2034 * Note that "end" points one higher than end,
2035 * making the range >= start and < end.
2036 * If we're also doing a speculative memory
2037 * test and we at or past the end, bump up Maxmem
2038 * so that we keep going. The first bad page
2039 * will terminate the loop.
2040 */
2041 if (phys_avail[pa_indx] == pa) {
2042 phys_avail[pa_indx] += PAGE_SIZE;
2043 } else {
2044 pa_indx++;
2045 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2046 printf(
2047 "Too many holes in the physical address space, giving up\n");
2048 pa_indx--;
2049 full = TRUE;
2050 goto do_dump_avail;
2051 }
2052 phys_avail[pa_indx++] = pa; /* start */
2053 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2054 }
2055 physmem++;
2056 do_dump_avail:
2057 if (dump_avail[da_indx] == pa) {
2058 dump_avail[da_indx] += PAGE_SIZE;
2059 } else {
2060 da_indx++;
2061 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2062 da_indx--;
2063 goto do_next;
2064 }
2065 dump_avail[da_indx++] = pa; /* start */
2066 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2067 }
2068 do_next:
2069 if (full)
2070 break;
2071 }
2072 }
2073 *pte = 0;
2074 invltlb();
2075
2076 /*
2077 * XXX
2078 * The last chunk must contain at least one page plus the message
2079 * buffer to avoid complicating other code (message buffer address
2080 * calculation, etc.).
2081 */
2082 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2083 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
2084 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2085 phys_avail[pa_indx--] = 0;
2086 phys_avail[pa_indx--] = 0;
2087 }
2088
2089 Maxmem = atop(phys_avail[pa_indx]);
2090
2091 /* Trim off space for the message buffer. */
2092 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
2093
2094 /* Map the message buffer. */
2095 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2096 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2097 off);
2098 }
2099
2100 void
2101 init386(first)
2102 int first;
2103 {
2104 struct gate_descriptor *gdp;
2105 int gsel_tss, metadata_missing, x;
2106 struct pcpu *pc;
2107
2108 thread0.td_kstack = proc0kstack;
2109 thread0.td_pcb = (struct pcb *)
2110 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
2111
2112 /*
2113 * This may be done better later if it gets more high level
2114 * components in it. If so just link td->td_proc here.
2115 */
2116 proc_linkup0(&proc0, &thread0);
2117
2118 metadata_missing = 0;
2119 if (bootinfo.bi_modulep) {
2120 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2121 preload_bootstrap_relocate(KERNBASE);
2122 } else {
2123 metadata_missing = 1;
2124 }
2125 if (envmode == 1)
2126 kern_envp = static_env;
2127 else if (bootinfo.bi_envp)
2128 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2129
2130 /* Init basic tunables, hz etc */
2131 init_param1();
2132
2133 /*
2134 * Make gdt memory segments. All segments cover the full 4GB
2135 * of address space and permissions are enforced at page level.
2136 */
2137 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2138 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2139 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2140 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2141 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2142 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2143
2144 #ifdef SMP
2145 pc = &SMP_prvspace[0].pcpu;
2146 #else
2147 pc = &__pcpu;
2148 #endif
2149 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2150 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2151 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2152
2153 for (x = 0; x < NGDT; x++)
2154 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2155
2156 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2157 r_gdt.rd_base = (int) gdt;
2158 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2159 lgdt(&r_gdt);
2160
2161 pcpu_init(pc, 0, sizeof(struct pcpu));
2162 PCPU_SET(prvspace, pc);
2163 PCPU_SET(curthread, &thread0);
2164 PCPU_SET(curpcb, thread0.td_pcb);
2165
2166 /*
2167 * Initialize mutexes.
2168 *
2169 * icu_lock: in order to allow an interrupt to occur in a critical
2170 * section, to set pcpu->ipending (etc...) properly, we
2171 * must be able to get the icu lock, so it can't be
2172 * under witness.
2173 */
2174 mutex_init();
2175 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2176
2177 /* make ldt memory segments */
2178 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2179 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2180 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2181 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2182
2183 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2184 lldt(_default_ldt);
2185 PCPU_SET(currentldt, _default_ldt);
2186
2187 /* exceptions */
2188 for (x = 0; x < NIDT; x++)
2189 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2190 GSEL(GCODE_SEL, SEL_KPL));
2191 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2192 GSEL(GCODE_SEL, SEL_KPL));
2193 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2194 GSEL(GCODE_SEL, SEL_KPL));
2195 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2196 GSEL(GCODE_SEL, SEL_KPL));
2197 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2198 GSEL(GCODE_SEL, SEL_KPL));
2199 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2200 GSEL(GCODE_SEL, SEL_KPL));
2201 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2202 GSEL(GCODE_SEL, SEL_KPL));
2203 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2204 GSEL(GCODE_SEL, SEL_KPL));
2205 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2206 , GSEL(GCODE_SEL, SEL_KPL));
2207 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2208 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2209 GSEL(GCODE_SEL, SEL_KPL));
2210 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2211 GSEL(GCODE_SEL, SEL_KPL));
2212 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2213 GSEL(GCODE_SEL, SEL_KPL));
2214 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2215 GSEL(GCODE_SEL, SEL_KPL));
2216 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2217 GSEL(GCODE_SEL, SEL_KPL));
2218 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2219 GSEL(GCODE_SEL, SEL_KPL));
2220 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2221 GSEL(GCODE_SEL, SEL_KPL));
2222 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2223 GSEL(GCODE_SEL, SEL_KPL));
2224 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2225 GSEL(GCODE_SEL, SEL_KPL));
2226 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2227 GSEL(GCODE_SEL, SEL_KPL));
2228 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2229 GSEL(GCODE_SEL, SEL_KPL));
2230
2231 r_idt.rd_limit = sizeof(idt0) - 1;
2232 r_idt.rd_base = (int) idt;
2233 lidt(&r_idt);
2234
2235 #ifdef XBOX
2236 /*
2237 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2238 * This should be 0x10de / 0x02a5.
2239 *
2240 * This is exactly what Linux does.
2241 */
2242 outl(0xcf8, 0x80000000);
2243 if (inl(0xcfc) == 0x02a510de) {
2244 arch_i386_is_xbox = 1;
2245 pic16l_setled(XBOX_LED_GREEN);
2246
2247 /*
2248 * We are an XBOX, but we may have either 64MB or 128MB of
2249 * memory. The PCI host bridge should be programmed for this,
2250 * so we just query it.
2251 */
2252 outl(0xcf8, 0x80000084);
2253 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2254 }
2255 #endif /* XBOX */
2256
2257 /*
2258 * Initialize the i8254 before the console so that console
2259 * initialization can use DELAY().
2260 */
2261 i8254_init();
2262
2263 /*
2264 * Initialize the console before we print anything out.
2265 */
2266 cninit();
2267
2268 if (metadata_missing)
2269 printf("WARNING: loader(8) metadata is missing!\n");
2270
2271 #ifdef DEV_ISA
2272 elcr_probe();
2273 atpic_startup();
2274 #endif
2275
2276 #ifdef DDB
2277 ksym_start = bootinfo.bi_symtab;
2278 ksym_end = bootinfo.bi_esymtab;
2279 #endif
2280
2281 kdb_init();
2282
2283 #ifdef KDB
2284 if (boothowto & RB_KDB)
2285 kdb_enter("Boot flags requested debugger");
2286 #endif
2287
2288 finishidentcpu(); /* Final stage of CPU initialization */
2289 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2290 GSEL(GCODE_SEL, SEL_KPL));
2291 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2292 GSEL(GCODE_SEL, SEL_KPL));
2293 initializecpu(); /* Initialize CPU registers */
2294
2295 /* make an initial tss so cpu can get interrupt stack on syscall! */
2296 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2297 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2298 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2299 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2300 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2301 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2302 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2303 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2304 ltr(gsel_tss);
2305
2306 /* pointer to selector slot for %fs/%gs */
2307 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2308
2309 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2310 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2311 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2312 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2313 #ifdef PAE
2314 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2315 #else
2316 dblfault_tss.tss_cr3 = (int)IdlePTD;
2317 #endif
2318 dblfault_tss.tss_eip = (int)dblfault_handler;
2319 dblfault_tss.tss_eflags = PSL_KERNEL;
2320 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2321 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2322 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2323 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2324 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2325
2326 vm86_initialize();
2327 getmemsize(first);
2328 init_param2(physmem);
2329
2330 /* now running on new page tables, configured,and u/iom is accessible */
2331
2332 msgbufinit(msgbufp, MSGBUF_SIZE);
2333
2334 /* make a call gate to reenter kernel with */
2335 gdp = &ldt[LSYS5CALLS_SEL].gd;
2336
2337 x = (int) &IDTVEC(lcall_syscall);
2338 gdp->gd_looffset = x;
2339 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2340 gdp->gd_stkcpy = 1;
2341 gdp->gd_type = SDT_SYS386CGT;
2342 gdp->gd_dpl = SEL_UPL;
2343 gdp->gd_p = 1;
2344 gdp->gd_hioffset = x >> 16;
2345
2346 /* XXX does this work? */
2347 /* XXX yes! */
2348 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2349 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2350
2351 /* transfer to user mode */
2352
2353 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2354 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2355
2356 /* setup proc 0's pcb */
2357 thread0.td_pcb->pcb_flags = 0;
2358 #ifdef PAE
2359 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2360 #else
2361 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2362 #endif
2363 thread0.td_pcb->pcb_ext = 0;
2364 thread0.td_frame = &proc0_tf;
2365 }
2366
2367 void
2368 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2369 {
2370
2371 pcpu->pc_acpi_id = 0xffffffff;
2372 }
2373
2374 void
2375 spinlock_enter(void)
2376 {
2377 struct thread *td;
2378
2379 td = curthread;
2380 if (td->td_md.md_spinlock_count == 0)
2381 td->td_md.md_saved_flags = intr_disable();
2382 td->td_md.md_spinlock_count++;
2383 critical_enter();
2384 }
2385
2386 void
2387 spinlock_exit(void)
2388 {
2389 struct thread *td;
2390
2391 td = curthread;
2392 critical_exit();
2393 td->td_md.md_spinlock_count--;
2394 if (td->td_md.md_spinlock_count == 0)
2395 intr_restore(td->td_md.md_saved_flags);
2396 }
2397
2398 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2399 static void f00f_hack(void *unused);
2400 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL)
2401
2402 static void
2403 f00f_hack(void *unused)
2404 {
2405 struct gate_descriptor *new_idt;
2406 vm_offset_t tmp;
2407
2408 if (!has_f00f_bug)
2409 return;
2410
2411 GIANT_REQUIRED;
2412
2413 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2414
2415 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2416 if (tmp == 0)
2417 panic("kmem_alloc returned 0");
2418
2419 /* Put the problematic entry (#6) at the end of the lower page. */
2420 new_idt = (struct gate_descriptor*)
2421 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2422 bcopy(idt, new_idt, sizeof(idt0));
2423 r_idt.rd_base = (u_int)new_idt;
2424 lidt(&r_idt);
2425 idt = new_idt;
2426 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2427 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2428 panic("vm_map_protect failed");
2429 }
2430 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2431
2432 /*
2433 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2434 * we want to start a backtrace from the function that caused us to enter
2435 * the debugger. We have the context in the trapframe, but base the trace
2436 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2437 * enough for a backtrace.
2438 */
2439 void
2440 makectx(struct trapframe *tf, struct pcb *pcb)
2441 {
2442
2443 pcb->pcb_edi = tf->tf_edi;
2444 pcb->pcb_esi = tf->tf_esi;
2445 pcb->pcb_ebp = tf->tf_ebp;
2446 pcb->pcb_ebx = tf->tf_ebx;
2447 pcb->pcb_eip = tf->tf_eip;
2448 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2449 }
2450
2451 int
2452 ptrace_set_pc(struct thread *td, u_long addr)
2453 {
2454
2455 td->td_frame->tf_eip = addr;
2456 return (0);
2457 }
2458
2459 int
2460 ptrace_single_step(struct thread *td)
2461 {
2462 td->td_frame->tf_eflags |= PSL_T;
2463 return (0);
2464 }
2465
2466 int
2467 ptrace_clear_single_step(struct thread *td)
2468 {
2469 td->td_frame->tf_eflags &= ~PSL_T;
2470 return (0);
2471 }
2472
2473 int
2474 fill_regs(struct thread *td, struct reg *regs)
2475 {
2476 struct pcb *pcb;
2477 struct trapframe *tp;
2478
2479 tp = td->td_frame;
2480 pcb = td->td_pcb;
2481 regs->r_fs = tp->tf_fs;
2482 regs->r_es = tp->tf_es;
2483 regs->r_ds = tp->tf_ds;
2484 regs->r_edi = tp->tf_edi;
2485 regs->r_esi = tp->tf_esi;
2486 regs->r_ebp = tp->tf_ebp;
2487 regs->r_ebx = tp->tf_ebx;
2488 regs->r_edx = tp->tf_edx;
2489 regs->r_ecx = tp->tf_ecx;
2490 regs->r_eax = tp->tf_eax;
2491 regs->r_eip = tp->tf_eip;
2492 regs->r_cs = tp->tf_cs;
2493 regs->r_eflags = tp->tf_eflags;
2494 regs->r_esp = tp->tf_esp;
2495 regs->r_ss = tp->tf_ss;
2496 regs->r_gs = pcb->pcb_gs;
2497 return (0);
2498 }
2499
2500 int
2501 set_regs(struct thread *td, struct reg *regs)
2502 {
2503 struct pcb *pcb;
2504 struct trapframe *tp;
2505
2506 tp = td->td_frame;
2507 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2508 !CS_SECURE(regs->r_cs))
2509 return (EINVAL);
2510 pcb = td->td_pcb;
2511 tp->tf_fs = regs->r_fs;
2512 tp->tf_es = regs->r_es;
2513 tp->tf_ds = regs->r_ds;
2514 tp->tf_edi = regs->r_edi;
2515 tp->tf_esi = regs->r_esi;
2516 tp->tf_ebp = regs->r_ebp;
2517 tp->tf_ebx = regs->r_ebx;
2518 tp->tf_edx = regs->r_edx;
2519 tp->tf_ecx = regs->r_ecx;
2520 tp->tf_eax = regs->r_eax;
2521 tp->tf_eip = regs->r_eip;
2522 tp->tf_cs = regs->r_cs;
2523 tp->tf_eflags = regs->r_eflags;
2524 tp->tf_esp = regs->r_esp;
2525 tp->tf_ss = regs->r_ss;
2526 pcb->pcb_gs = regs->r_gs;
2527 return (0);
2528 }
2529
2530 #ifdef CPU_ENABLE_SSE
2531 static void
2532 fill_fpregs_xmm(sv_xmm, sv_87)
2533 struct savexmm *sv_xmm;
2534 struct save87 *sv_87;
2535 {
2536 register struct env87 *penv_87 = &sv_87->sv_env;
2537 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2538 int i;
2539
2540 bzero(sv_87, sizeof(*sv_87));
2541
2542 /* FPU control/status */
2543 penv_87->en_cw = penv_xmm->en_cw;
2544 penv_87->en_sw = penv_xmm->en_sw;
2545 penv_87->en_tw = penv_xmm->en_tw;
2546 penv_87->en_fip = penv_xmm->en_fip;
2547 penv_87->en_fcs = penv_xmm->en_fcs;
2548 penv_87->en_opcode = penv_xmm->en_opcode;
2549 penv_87->en_foo = penv_xmm->en_foo;
2550 penv_87->en_fos = penv_xmm->en_fos;
2551
2552 /* FPU registers */
2553 for (i = 0; i < 8; ++i)
2554 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2555 }
2556
2557 static void
2558 set_fpregs_xmm(sv_87, sv_xmm)
2559 struct save87 *sv_87;
2560 struct savexmm *sv_xmm;
2561 {
2562 register struct env87 *penv_87 = &sv_87->sv_env;
2563 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2564 int i;
2565
2566 /* FPU control/status */
2567 penv_xmm->en_cw = penv_87->en_cw;
2568 penv_xmm->en_sw = penv_87->en_sw;
2569 penv_xmm->en_tw = penv_87->en_tw;
2570 penv_xmm->en_fip = penv_87->en_fip;
2571 penv_xmm->en_fcs = penv_87->en_fcs;
2572 penv_xmm->en_opcode = penv_87->en_opcode;
2573 penv_xmm->en_foo = penv_87->en_foo;
2574 penv_xmm->en_fos = penv_87->en_fos;
2575
2576 /* FPU registers */
2577 for (i = 0; i < 8; ++i)
2578 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2579 }
2580 #endif /* CPU_ENABLE_SSE */
2581
2582 int
2583 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2584 {
2585 #ifdef CPU_ENABLE_SSE
2586 if (cpu_fxsr) {
2587 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2588 (struct save87 *)fpregs);
2589 return (0);
2590 }
2591 #endif /* CPU_ENABLE_SSE */
2592 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2593 return (0);
2594 }
2595
2596 int
2597 set_fpregs(struct thread *td, struct fpreg *fpregs)
2598 {
2599 #ifdef CPU_ENABLE_SSE
2600 if (cpu_fxsr) {
2601 set_fpregs_xmm((struct save87 *)fpregs,
2602 &td->td_pcb->pcb_save.sv_xmm);
2603 return (0);
2604 }
2605 #endif /* CPU_ENABLE_SSE */
2606 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2607 return (0);
2608 }
2609
2610 /*
2611 * Get machine context.
2612 */
2613 int
2614 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2615 {
2616 struct trapframe *tp;
2617
2618 tp = td->td_frame;
2619
2620 PROC_LOCK(curthread->td_proc);
2621 mcp->mc_onstack = sigonstack(tp->tf_esp);
2622 PROC_UNLOCK(curthread->td_proc);
2623 mcp->mc_gs = td->td_pcb->pcb_gs;
2624 mcp->mc_fs = tp->tf_fs;
2625 mcp->mc_es = tp->tf_es;
2626 mcp->mc_ds = tp->tf_ds;
2627 mcp->mc_edi = tp->tf_edi;
2628 mcp->mc_esi = tp->tf_esi;
2629 mcp->mc_ebp = tp->tf_ebp;
2630 mcp->mc_isp = tp->tf_isp;
2631 mcp->mc_eflags = tp->tf_eflags;
2632 if (flags & GET_MC_CLEAR_RET) {
2633 mcp->mc_eax = 0;
2634 mcp->mc_edx = 0;
2635 mcp->mc_eflags &= ~PSL_C;
2636 } else {
2637 mcp->mc_eax = tp->tf_eax;
2638 mcp->mc_edx = tp->tf_edx;
2639 }
2640 mcp->mc_ebx = tp->tf_ebx;
2641 mcp->mc_ecx = tp->tf_ecx;
2642 mcp->mc_eip = tp->tf_eip;
2643 mcp->mc_cs = tp->tf_cs;
2644 mcp->mc_esp = tp->tf_esp;
2645 mcp->mc_ss = tp->tf_ss;
2646 mcp->mc_len = sizeof(*mcp);
2647 get_fpcontext(td, mcp);
2648 return (0);
2649 }
2650
2651 /*
2652 * Set machine context.
2653 *
2654 * However, we don't set any but the user modifiable flags, and we won't
2655 * touch the cs selector.
2656 */
2657 int
2658 set_mcontext(struct thread *td, const mcontext_t *mcp)
2659 {
2660 struct trapframe *tp;
2661 int eflags, ret;
2662
2663 tp = td->td_frame;
2664 if (mcp->mc_len != sizeof(*mcp))
2665 return (EINVAL);
2666 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2667 (tp->tf_eflags & ~PSL_USERCHANGE);
2668 if ((ret = set_fpcontext(td, mcp)) == 0) {
2669 tp->tf_fs = mcp->mc_fs;
2670 tp->tf_es = mcp->mc_es;
2671 tp->tf_ds = mcp->mc_ds;
2672 tp->tf_edi = mcp->mc_edi;
2673 tp->tf_esi = mcp->mc_esi;
2674 tp->tf_ebp = mcp->mc_ebp;
2675 tp->tf_ebx = mcp->mc_ebx;
2676 tp->tf_edx = mcp->mc_edx;
2677 tp->tf_ecx = mcp->mc_ecx;
2678 tp->tf_eax = mcp->mc_eax;
2679 tp->tf_eip = mcp->mc_eip;
2680 tp->tf_eflags = eflags;
2681 tp->tf_esp = mcp->mc_esp;
2682 tp->tf_ss = mcp->mc_ss;
2683 td->td_pcb->pcb_gs = mcp->mc_gs;
2684 ret = 0;
2685 }
2686 return (ret);
2687 }
2688
2689 static void
2690 get_fpcontext(struct thread *td, mcontext_t *mcp)
2691 {
2692 #ifndef DEV_NPX
2693 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2694 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2695 #else
2696 union savefpu *addr;
2697
2698 /*
2699 * XXX mc_fpstate might be misaligned, since its declaration is not
2700 * unportabilized using __attribute__((aligned(16))) like the
2701 * declaration of struct savemm, and anyway, alignment doesn't work
2702 * for auto variables since we don't use gcc's pessimal stack
2703 * alignment. Work around this by abusing the spare fields after
2704 * mcp->mc_fpstate.
2705 *
2706 * XXX unpessimize most cases by only aligning when fxsave might be
2707 * called, although this requires knowing too much about
2708 * npxgetregs()'s internals.
2709 */
2710 addr = (union savefpu *)&mcp->mc_fpstate;
2711 if (td == PCPU_GET(fpcurthread) &&
2712 #ifdef CPU_ENABLE_SSE
2713 cpu_fxsr &&
2714 #endif
2715 ((uintptr_t)(void *)addr & 0xF)) {
2716 do
2717 addr = (void *)((char *)addr + 4);
2718 while ((uintptr_t)(void *)addr & 0xF);
2719 }
2720 mcp->mc_ownedfp = npxgetregs(td, addr);
2721 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2722 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2723 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2724 }
2725 mcp->mc_fpformat = npxformat();
2726 #endif
2727 }
2728
2729 static int
2730 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2731 {
2732 union savefpu *addr;
2733
2734 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2735 return (0);
2736 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2737 mcp->mc_fpformat != _MC_FPFMT_XMM)
2738 return (EINVAL);
2739 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2740 /* We don't care what state is left in the FPU or PCB. */
2741 fpstate_drop(td);
2742 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2743 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2744 /* XXX align as above. */
2745 addr = (union savefpu *)&mcp->mc_fpstate;
2746 if (td == PCPU_GET(fpcurthread) &&
2747 #ifdef CPU_ENABLE_SSE
2748 cpu_fxsr &&
2749 #endif
2750 ((uintptr_t)(void *)addr & 0xF)) {
2751 do
2752 addr = (void *)((char *)addr + 4);
2753 while ((uintptr_t)(void *)addr & 0xF);
2754 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2755 }
2756 #ifdef DEV_NPX
2757 #ifdef CPU_ENABLE_SSE
2758 if (cpu_fxsr)
2759 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
2760 #endif
2761 /*
2762 * XXX we violate the dubious requirement that npxsetregs()
2763 * be called with interrupts disabled.
2764 */
2765 npxsetregs(td, addr);
2766 #endif
2767 /*
2768 * Don't bother putting things back where they were in the
2769 * misaligned case, since we know that the caller won't use
2770 * them again.
2771 */
2772 } else
2773 return (EINVAL);
2774 return (0);
2775 }
2776
2777 static void
2778 fpstate_drop(struct thread *td)
2779 {
2780 register_t s;
2781
2782 s = intr_disable();
2783 #ifdef DEV_NPX
2784 if (PCPU_GET(fpcurthread) == td)
2785 npxdrop();
2786 #endif
2787 /*
2788 * XXX force a full drop of the npx. The above only drops it if we
2789 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2790 *
2791 * XXX I don't much like npxgetregs()'s semantics of doing a full
2792 * drop. Dropping only to the pcb matches fnsave's behaviour.
2793 * We only need to drop to !PCB_INITDONE in sendsig(). But
2794 * sendsig() is the only caller of npxgetregs()... perhaps we just
2795 * have too many layers.
2796 */
2797 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2798 intr_restore(s);
2799 }
2800
2801 int
2802 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2803 {
2804 struct pcb *pcb;
2805
2806 if (td == NULL) {
2807 dbregs->dr[0] = rdr0();
2808 dbregs->dr[1] = rdr1();
2809 dbregs->dr[2] = rdr2();
2810 dbregs->dr[3] = rdr3();
2811 dbregs->dr[4] = rdr4();
2812 dbregs->dr[5] = rdr5();
2813 dbregs->dr[6] = rdr6();
2814 dbregs->dr[7] = rdr7();
2815 } else {
2816 pcb = td->td_pcb;
2817 dbregs->dr[0] = pcb->pcb_dr0;
2818 dbregs->dr[1] = pcb->pcb_dr1;
2819 dbregs->dr[2] = pcb->pcb_dr2;
2820 dbregs->dr[3] = pcb->pcb_dr3;
2821 dbregs->dr[4] = 0;
2822 dbregs->dr[5] = 0;
2823 dbregs->dr[6] = pcb->pcb_dr6;
2824 dbregs->dr[7] = pcb->pcb_dr7;
2825 }
2826 return (0);
2827 }
2828
2829 int
2830 set_dbregs(struct thread *td, struct dbreg *dbregs)
2831 {
2832 struct pcb *pcb;
2833 int i;
2834
2835 if (td == NULL) {
2836 load_dr0(dbregs->dr[0]);
2837 load_dr1(dbregs->dr[1]);
2838 load_dr2(dbregs->dr[2]);
2839 load_dr3(dbregs->dr[3]);
2840 load_dr4(dbregs->dr[4]);
2841 load_dr5(dbregs->dr[5]);
2842 load_dr6(dbregs->dr[6]);
2843 load_dr7(dbregs->dr[7]);
2844 } else {
2845 /*
2846 * Don't let an illegal value for dr7 get set. Specifically,
2847 * check for undefined settings. Setting these bit patterns
2848 * result in undefined behaviour and can lead to an unexpected
2849 * TRCTRAP.
2850 */
2851 for (i = 0; i < 4; i++) {
2852 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2853 return (EINVAL);
2854 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
2855 return (EINVAL);
2856 }
2857
2858 pcb = td->td_pcb;
2859
2860 /*
2861 * Don't let a process set a breakpoint that is not within the
2862 * process's address space. If a process could do this, it
2863 * could halt the system by setting a breakpoint in the kernel
2864 * (if ddb was enabled). Thus, we need to check to make sure
2865 * that no breakpoints are being enabled for addresses outside
2866 * process's address space.
2867 *
2868 * XXX - what about when the watched area of the user's
2869 * address space is written into from within the kernel
2870 * ... wouldn't that still cause a breakpoint to be generated
2871 * from within kernel mode?
2872 */
2873
2874 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2875 /* dr0 is enabled */
2876 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2877 return (EINVAL);
2878 }
2879
2880 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2881 /* dr1 is enabled */
2882 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2883 return (EINVAL);
2884 }
2885
2886 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2887 /* dr2 is enabled */
2888 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2889 return (EINVAL);
2890 }
2891
2892 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2893 /* dr3 is enabled */
2894 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2895 return (EINVAL);
2896 }
2897
2898 pcb->pcb_dr0 = dbregs->dr[0];
2899 pcb->pcb_dr1 = dbregs->dr[1];
2900 pcb->pcb_dr2 = dbregs->dr[2];
2901 pcb->pcb_dr3 = dbregs->dr[3];
2902 pcb->pcb_dr6 = dbregs->dr[6];
2903 pcb->pcb_dr7 = dbregs->dr[7];
2904
2905 pcb->pcb_flags |= PCB_DBREGS;
2906 }
2907
2908 return (0);
2909 }
2910
2911 /*
2912 * Return > 0 if a hardware breakpoint has been hit, and the
2913 * breakpoint was in user space. Return 0, otherwise.
2914 */
2915 int
2916 user_dbreg_trap(void)
2917 {
2918 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2919 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2920 int nbp; /* number of breakpoints that triggered */
2921 caddr_t addr[4]; /* breakpoint addresses */
2922 int i;
2923
2924 dr7 = rdr7();
2925 if ((dr7 & 0x000000ff) == 0) {
2926 /*
2927 * all GE and LE bits in the dr7 register are zero,
2928 * thus the trap couldn't have been caused by the
2929 * hardware debug registers
2930 */
2931 return 0;
2932 }
2933
2934 nbp = 0;
2935 dr6 = rdr6();
2936 bp = dr6 & 0x0000000f;
2937
2938 if (!bp) {
2939 /*
2940 * None of the breakpoint bits are set meaning this
2941 * trap was not caused by any of the debug registers
2942 */
2943 return 0;
2944 }
2945
2946 /*
2947 * at least one of the breakpoints were hit, check to see
2948 * which ones and if any of them are user space addresses
2949 */
2950
2951 if (bp & 0x01) {
2952 addr[nbp++] = (caddr_t)rdr0();
2953 }
2954 if (bp & 0x02) {
2955 addr[nbp++] = (caddr_t)rdr1();
2956 }
2957 if (bp & 0x04) {
2958 addr[nbp++] = (caddr_t)rdr2();
2959 }
2960 if (bp & 0x08) {
2961 addr[nbp++] = (caddr_t)rdr3();
2962 }
2963
2964 for (i = 0; i < nbp; i++) {
2965 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2966 /*
2967 * addr[i] is in user space
2968 */
2969 return nbp;
2970 }
2971 }
2972
2973 /*
2974 * None of the breakpoints are in user space.
2975 */
2976 return 0;
2977 }
2978
2979 #ifndef DEV_APIC
2980 #include <machine/apicvar.h>
2981
2982 /*
2983 * Provide stub functions so that the MADT APIC enumerator in the acpi
2984 * kernel module will link against a kernel without 'device apic'.
2985 *
2986 * XXX - This is a gross hack.
2987 */
2988 void
2989 apic_register_enumerator(struct apic_enumerator *enumerator)
2990 {
2991 }
2992
2993 void *
2994 ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase)
2995 {
2996 return (NULL);
2997 }
2998
2999 int
3000 ioapic_disable_pin(void *cookie, u_int pin)
3001 {
3002 return (ENXIO);
3003 }
3004
3005 int
3006 ioapic_get_vector(void *cookie, u_int pin)
3007 {
3008 return (-1);
3009 }
3010
3011 void
3012 ioapic_register(void *cookie)
3013 {
3014 }
3015
3016 int
3017 ioapic_remap_vector(void *cookie, u_int pin, int vector)
3018 {
3019 return (ENXIO);
3020 }
3021
3022 int
3023 ioapic_set_extint(void *cookie, u_int pin)
3024 {
3025 return (ENXIO);
3026 }
3027
3028 int
3029 ioapic_set_nmi(void *cookie, u_int pin)
3030 {
3031 return (ENXIO);
3032 }
3033
3034 int
3035 ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol)
3036 {
3037 return (ENXIO);
3038 }
3039
3040 int
3041 ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger)
3042 {
3043 return (ENXIO);
3044 }
3045
3046 void
3047 lapic_create(u_int apic_id, int boot_cpu)
3048 {
3049 }
3050
3051 void
3052 lapic_init(vm_paddr_t addr)
3053 {
3054 }
3055
3056 int
3057 lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode)
3058 {
3059 return (ENXIO);
3060 }
3061
3062 int
3063 lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol)
3064 {
3065 return (ENXIO);
3066 }
3067
3068 int
3069 lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger)
3070 {
3071 return (ENXIO);
3072 }
3073 #endif
3074
3075 #ifdef KDB
3076
3077 /*
3078 * Provide inb() and outb() as functions. They are normally only
3079 * available as macros calling inlined functions, thus cannot be
3080 * called from the debugger.
3081 *
3082 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
3083 */
3084
3085 #undef inb
3086 #undef outb
3087
3088 /* silence compiler warnings */
3089 u_char inb(u_int);
3090 void outb(u_int, u_char);
3091
3092 u_char
3093 inb(u_int port)
3094 {
3095 u_char data;
3096 /*
3097 * We use %%dx and not %1 here because i/o is done at %dx and not at
3098 * %edx, while gcc generates inferior code (movw instead of movl)
3099 * if we tell it to load (u_short) port.
3100 */
3101 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
3102 return (data);
3103 }
3104
3105 void
3106 outb(u_int port, u_char data)
3107 {
3108 u_char al;
3109 /*
3110 * Use an unnecessary assignment to help gcc's register allocator.
3111 * This make a large difference for gcc-1.40 and a tiny difference
3112 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
3113 * best results. gcc-2.6.0 can't handle this.
3114 */
3115 al = data;
3116 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
3117 }
3118
3119 #endif /* KDB */
Cache object: cdd8ab0f39ddb7100190579fe2c271a8
|