1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_compat.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_ipx.h"
50 #include "opt_isa.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_msgbuf.h"
54 #include "opt_npx.h"
55 #include "opt_perfmon.h"
56 #include "opt_xbox.h"
57
58 #include <sys/param.h>
59 #include <sys/proc.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/buf.h>
63 #include <sys/bus.h>
64 #include <sys/callout.h>
65 #include <sys/clock.h>
66 #include <sys/cons.h>
67 #include <sys/cpu.h>
68 #include <sys/eventhandler.h>
69 #include <sys/exec.h>
70 #include <sys/imgact.h>
71 #include <sys/kdb.h>
72 #include <sys/kernel.h>
73 #include <sys/ktr.h>
74 #include <sys/linker.h>
75 #include <sys/lock.h>
76 #include <sys/malloc.h>
77 #include <sys/memrange.h>
78 #include <sys/msgbuf.h>
79 #include <sys/mutex.h>
80 #include <sys/pcpu.h>
81 #include <sys/ptrace.h>
82 #include <sys/reboot.h>
83 #include <sys/sched.h>
84 #include <sys/signalvar.h>
85 #include <sys/sysctl.h>
86 #include <sys/sysent.h>
87 #include <sys/sysproto.h>
88 #include <sys/ucontext.h>
89 #include <sys/vmmeter.h>
90
91 #include <vm/vm.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_param.h>
99
100 #ifdef DDB
101 #ifndef KDB
102 #error KDB must be enabled in order for DDB to work!
103 #endif
104 #include <ddb/ddb.h>
105 #include <ddb/db_sym.h>
106 #endif
107
108 #include <isa/rtc.h>
109
110 #include <net/netisr.h>
111
112 #include <machine/bootinfo.h>
113 #include <machine/clock.h>
114 #include <machine/cpu.h>
115 #include <machine/cputypes.h>
116 #include <machine/intr_machdep.h>
117 #include <machine/md_var.h>
118 #include <machine/metadata.h>
119 #include <machine/pc/bios.h>
120 #include <machine/pcb.h>
121 #include <machine/pcb_ext.h>
122 #include <machine/proc.h>
123 #include <machine/reg.h>
124 #include <machine/sigframe.h>
125 #include <machine/specialreg.h>
126 #include <machine/vm86.h>
127 #ifdef PERFMON
128 #include <machine/perfmon.h>
129 #endif
130 #ifdef SMP
131 #include <machine/smp.h>
132 #endif
133
134 #ifdef DEV_ISA
135 #include <i386/isa/icu.h>
136 #endif
137
138 #ifdef XBOX
139 #include <machine/xbox.h>
140
141 int arch_i386_is_xbox = 0;
142 uint32_t arch_i386_xbox_memsize = 0;
143 #endif
144
145 /* Sanity check for __curthread() */
146 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
147
148 extern void init386(int first);
149 extern void dblfault_handler(void);
150
151 extern void printcpuinfo(void); /* XXX header file */
152 extern void finishidentcpu(void);
153 extern void panicifcpuunsupported(void);
154 extern void initializecpu(void);
155
156 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
157 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
158
159 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
160 #define CPU_ENABLE_SSE
161 #endif
162
163 static void cpu_startup(void *);
164 static void fpstate_drop(struct thread *td);
165 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
166 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
167 #ifdef CPU_ENABLE_SSE
168 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
169 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
170 #endif /* CPU_ENABLE_SSE */
171 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
172
173 #ifdef DDB
174 extern vm_offset_t ksym_start, ksym_end;
175 #endif
176
177 /* Intel ICH registers */
178 #define ICH_PMBASE 0x400
179 #define ICH_SMI_EN ICH_PMBASE + 0x30
180
181 int _udatasel, _ucodesel;
182 u_int basemem;
183
184 int cold = 1;
185
186 #ifdef COMPAT_43
187 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
188 #endif
189 #ifdef COMPAT_FREEBSD4
190 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
191 #endif
192
193 long Maxmem = 0;
194 long realmem = 0;
195
196 #ifdef PAE
197 FEATURE(pae, "Physical Address Extensions");
198 #endif
199
200 /*
201 * The number of PHYSMAP entries must be one less than the number of
202 * PHYSSEG entries because the PHYSMAP entry that spans the largest
203 * physical address that is accessible by ISA DMA is split into two
204 * PHYSSEG entries.
205 */
206 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
207
208 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
209 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
210
211 /* must be 2 less so 0 0 can signal end of chunks */
212 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
213 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
214
215 struct kva_md_info kmi;
216
217 static struct trapframe proc0_tf;
218 struct pcpu __pcpu[MAXCPU];
219
220 struct mtx icu_lock;
221
222 struct mem_range_softc mem_range_softc;
223
224 static void
225 cpu_startup(dummy)
226 void *dummy;
227 {
228 char *sysenv;
229
230 /*
231 * On MacBooks, we need to disallow the legacy USB circuit to
232 * generate an SMI# because this can cause several problems,
233 * namely: incorrect CPU frequency detection and failure to
234 * start the APs.
235 * We do this by disabling a bit in the SMI_EN (SMI Control and
236 * Enable register) of the Intel ICH LPC Interface Bridge.
237 */
238 sysenv = getenv("smbios.system.product");
239 if (sysenv != NULL) {
240 if (strncmp(sysenv, "MacBook", 7) == 0) {
241 if (bootverbose)
242 printf("Disabling LEGACY_USB_EN bit on "
243 "Intel ICH.\n");
244 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
245 }
246 freeenv(sysenv);
247 }
248
249 /*
250 * Good {morning,afternoon,evening,night}.
251 */
252 startrtclock();
253 printcpuinfo();
254 panicifcpuunsupported();
255 #ifdef PERFMON
256 perfmon_init();
257 #endif
258 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
259 ptoa((uintmax_t)Maxmem) / 1048576);
260 realmem = Maxmem;
261 /*
262 * Display any holes after the first chunk of extended memory.
263 */
264 if (bootverbose) {
265 int indx;
266
267 printf("Physical memory chunk(s):\n");
268 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
269 vm_paddr_t size;
270
271 size = phys_avail[indx + 1] - phys_avail[indx];
272 printf(
273 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
274 (uintmax_t)phys_avail[indx],
275 (uintmax_t)phys_avail[indx + 1] - 1,
276 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
277 }
278 }
279
280 vm_ksubmap_init(&kmi);
281
282 printf("avail memory = %ju (%ju MB)\n",
283 ptoa((uintmax_t)cnt.v_free_count),
284 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
285
286 /*
287 * Set up buffers, so they can be used to read disk labels.
288 */
289 bufinit();
290 vm_pager_bufferinit();
291
292 cpu_setregs();
293 }
294
295 /*
296 * Send an interrupt to process.
297 *
298 * Stack is set up to allow sigcode stored
299 * at top to call routine, followed by kcall
300 * to sigreturn routine below. After sigreturn
301 * resets the signal mask, the stack, and the
302 * frame pointer, it returns to the user
303 * specified pc, psl.
304 */
305 #ifdef COMPAT_43
306 static void
307 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
308 {
309 struct osigframe sf, *fp;
310 struct proc *p;
311 struct thread *td;
312 struct sigacts *psp;
313 struct trapframe *regs;
314 int sig;
315 int oonstack;
316
317 td = curthread;
318 p = td->td_proc;
319 PROC_LOCK_ASSERT(p, MA_OWNED);
320 sig = ksi->ksi_signo;
321 psp = p->p_sigacts;
322 mtx_assert(&psp->ps_mtx, MA_OWNED);
323 regs = td->td_frame;
324 oonstack = sigonstack(regs->tf_esp);
325
326 /* Allocate space for the signal handler context. */
327 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
328 SIGISMEMBER(psp->ps_sigonstack, sig)) {
329 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
330 td->td_sigstk.ss_size - sizeof(struct osigframe));
331 #if defined(COMPAT_43)
332 td->td_sigstk.ss_flags |= SS_ONSTACK;
333 #endif
334 } else
335 fp = (struct osigframe *)regs->tf_esp - 1;
336
337 /* Translate the signal if appropriate. */
338 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
339 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
340
341 /* Build the argument list for the signal handler. */
342 sf.sf_signum = sig;
343 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
344 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
345 /* Signal handler installed with SA_SIGINFO. */
346 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
347 sf.sf_siginfo.si_signo = sig;
348 sf.sf_siginfo.si_code = ksi->ksi_code;
349 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
350 } else {
351 /* Old FreeBSD-style arguments. */
352 sf.sf_arg2 = ksi->ksi_code;
353 sf.sf_addr = (register_t)ksi->ksi_addr;
354 sf.sf_ahu.sf_handler = catcher;
355 }
356 mtx_unlock(&psp->ps_mtx);
357 PROC_UNLOCK(p);
358
359 /* Save most if not all of trap frame. */
360 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
361 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
362 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
363 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
364 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
365 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
366 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
367 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
368 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
369 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
370 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
371 sf.sf_siginfo.si_sc.sc_gs = rgs();
372 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
373
374 /* Build the signal context to be used by osigreturn(). */
375 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
376 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
377 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
378 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
379 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
380 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
381 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
382 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
383
384 /*
385 * If we're a vm86 process, we want to save the segment registers.
386 * We also change eflags to be our emulated eflags, not the actual
387 * eflags.
388 */
389 if (regs->tf_eflags & PSL_VM) {
390 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
391 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
392 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
393
394 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
395 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
396 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
397 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
398
399 if (vm86->vm86_has_vme == 0)
400 sf.sf_siginfo.si_sc.sc_ps =
401 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
402 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
403
404 /* See sendsig() for comments. */
405 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
406 }
407
408 /*
409 * Copy the sigframe out to the user's stack.
410 */
411 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
412 #ifdef DEBUG
413 printf("process %ld has trashed its stack\n", (long)p->p_pid);
414 #endif
415 PROC_LOCK(p);
416 sigexit(td, SIGILL);
417 }
418
419 regs->tf_esp = (int)fp;
420 regs->tf_eip = PS_STRINGS - szosigcode;
421 regs->tf_eflags &= ~(PSL_T | PSL_D);
422 regs->tf_cs = _ucodesel;
423 regs->tf_ds = _udatasel;
424 regs->tf_es = _udatasel;
425 regs->tf_fs = _udatasel;
426 load_gs(_udatasel);
427 regs->tf_ss = _udatasel;
428 PROC_LOCK(p);
429 mtx_lock(&psp->ps_mtx);
430 }
431 #endif /* COMPAT_43 */
432
433 #ifdef COMPAT_FREEBSD4
434 static void
435 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
436 {
437 struct sigframe4 sf, *sfp;
438 struct proc *p;
439 struct thread *td;
440 struct sigacts *psp;
441 struct trapframe *regs;
442 int sig;
443 int oonstack;
444
445 td = curthread;
446 p = td->td_proc;
447 PROC_LOCK_ASSERT(p, MA_OWNED);
448 sig = ksi->ksi_signo;
449 psp = p->p_sigacts;
450 mtx_assert(&psp->ps_mtx, MA_OWNED);
451 regs = td->td_frame;
452 oonstack = sigonstack(regs->tf_esp);
453
454 /* Save user context. */
455 bzero(&sf, sizeof(sf));
456 sf.sf_uc.uc_sigmask = *mask;
457 sf.sf_uc.uc_stack = td->td_sigstk;
458 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
459 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
460 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
461 sf.sf_uc.uc_mcontext.mc_gs = rgs();
462 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
463
464 /* Allocate space for the signal handler context. */
465 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
466 SIGISMEMBER(psp->ps_sigonstack, sig)) {
467 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
468 td->td_sigstk.ss_size - sizeof(struct sigframe4));
469 #if defined(COMPAT_43)
470 td->td_sigstk.ss_flags |= SS_ONSTACK;
471 #endif
472 } else
473 sfp = (struct sigframe4 *)regs->tf_esp - 1;
474
475 /* Translate the signal if appropriate. */
476 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
477 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
478
479 /* Build the argument list for the signal handler. */
480 sf.sf_signum = sig;
481 sf.sf_ucontext = (register_t)&sfp->sf_uc;
482 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
483 /* Signal handler installed with SA_SIGINFO. */
484 sf.sf_siginfo = (register_t)&sfp->sf_si;
485 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
486
487 /* Fill in POSIX parts */
488 sf.sf_si.si_signo = sig;
489 sf.sf_si.si_code = ksi->ksi_code;
490 sf.sf_si.si_addr = ksi->ksi_addr;
491 } else {
492 /* Old FreeBSD-style arguments. */
493 sf.sf_siginfo = ksi->ksi_code;
494 sf.sf_addr = (register_t)ksi->ksi_addr;
495 sf.sf_ahu.sf_handler = catcher;
496 }
497 mtx_unlock(&psp->ps_mtx);
498 PROC_UNLOCK(p);
499
500 /*
501 * If we're a vm86 process, we want to save the segment registers.
502 * We also change eflags to be our emulated eflags, not the actual
503 * eflags.
504 */
505 if (regs->tf_eflags & PSL_VM) {
506 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
507 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
508
509 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
510 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
511 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
512 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
513
514 if (vm86->vm86_has_vme == 0)
515 sf.sf_uc.uc_mcontext.mc_eflags =
516 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
517 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
518
519 /*
520 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
521 * syscalls made by the signal handler. This just avoids
522 * wasting time for our lazy fixup of such faults. PSL_NT
523 * does nothing in vm86 mode, but vm86 programs can set it
524 * almost legitimately in probes for old cpu types.
525 */
526 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
527 }
528
529 /*
530 * Copy the sigframe out to the user's stack.
531 */
532 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
533 #ifdef DEBUG
534 printf("process %ld has trashed its stack\n", (long)p->p_pid);
535 #endif
536 PROC_LOCK(p);
537 sigexit(td, SIGILL);
538 }
539
540 regs->tf_esp = (int)sfp;
541 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
542 regs->tf_eflags &= ~(PSL_T | PSL_D);
543 regs->tf_cs = _ucodesel;
544 regs->tf_ds = _udatasel;
545 regs->tf_es = _udatasel;
546 regs->tf_fs = _udatasel;
547 regs->tf_ss = _udatasel;
548 PROC_LOCK(p);
549 mtx_lock(&psp->ps_mtx);
550 }
551 #endif /* COMPAT_FREEBSD4 */
552
553 void
554 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
555 {
556 struct sigframe sf, *sfp;
557 struct proc *p;
558 struct thread *td;
559 struct sigacts *psp;
560 char *sp;
561 struct trapframe *regs;
562 int sig;
563 int oonstack;
564
565 td = curthread;
566 p = td->td_proc;
567 PROC_LOCK_ASSERT(p, MA_OWNED);
568 sig = ksi->ksi_signo;
569 psp = p->p_sigacts;
570 mtx_assert(&psp->ps_mtx, MA_OWNED);
571 #ifdef COMPAT_FREEBSD4
572 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
573 freebsd4_sendsig(catcher, ksi, mask);
574 return;
575 }
576 #endif
577 #ifdef COMPAT_43
578 if (SIGISMEMBER(psp->ps_osigset, sig)) {
579 osendsig(catcher, ksi, mask);
580 return;
581 }
582 #endif
583 regs = td->td_frame;
584 oonstack = sigonstack(regs->tf_esp);
585
586 /* Save user context. */
587 bzero(&sf, sizeof(sf));
588 sf.sf_uc.uc_sigmask = *mask;
589 sf.sf_uc.uc_stack = td->td_sigstk;
590 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
591 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
592 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
593 sf.sf_uc.uc_mcontext.mc_gs = rgs();
594 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
595 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
596 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
597 fpstate_drop(td);
598
599 /* Allocate space for the signal handler context. */
600 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
601 SIGISMEMBER(psp->ps_sigonstack, sig)) {
602 sp = td->td_sigstk.ss_sp +
603 td->td_sigstk.ss_size - sizeof(struct sigframe);
604 #if defined(COMPAT_43)
605 td->td_sigstk.ss_flags |= SS_ONSTACK;
606 #endif
607 } else
608 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
609 /* Align to 16 bytes. */
610 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
611
612 /* Translate the signal if appropriate. */
613 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
614 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
615
616 /* Build the argument list for the signal handler. */
617 sf.sf_signum = sig;
618 sf.sf_ucontext = (register_t)&sfp->sf_uc;
619 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
620 /* Signal handler installed with SA_SIGINFO. */
621 sf.sf_siginfo = (register_t)&sfp->sf_si;
622 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
623
624 /* Fill in POSIX parts */
625 sf.sf_si = ksi->ksi_info;
626 sf.sf_si.si_signo = sig; /* maybe a translated signal */
627 } else {
628 /* Old FreeBSD-style arguments. */
629 sf.sf_siginfo = ksi->ksi_code;
630 sf.sf_addr = (register_t)ksi->ksi_addr;
631 sf.sf_ahu.sf_handler = catcher;
632 }
633 mtx_unlock(&psp->ps_mtx);
634 PROC_UNLOCK(p);
635
636 /*
637 * If we're a vm86 process, we want to save the segment registers.
638 * We also change eflags to be our emulated eflags, not the actual
639 * eflags.
640 */
641 if (regs->tf_eflags & PSL_VM) {
642 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
643 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
644
645 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
646 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
647 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
648 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
649
650 if (vm86->vm86_has_vme == 0)
651 sf.sf_uc.uc_mcontext.mc_eflags =
652 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
653 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
654
655 /*
656 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
657 * syscalls made by the signal handler. This just avoids
658 * wasting time for our lazy fixup of such faults. PSL_NT
659 * does nothing in vm86 mode, but vm86 programs can set it
660 * almost legitimately in probes for old cpu types.
661 */
662 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
663 }
664
665 /*
666 * Copy the sigframe out to the user's stack.
667 */
668 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
669 #ifdef DEBUG
670 printf("process %ld has trashed its stack\n", (long)p->p_pid);
671 #endif
672 PROC_LOCK(p);
673 sigexit(td, SIGILL);
674 }
675
676 regs->tf_esp = (int)sfp;
677 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
678 regs->tf_eflags &= ~(PSL_T | PSL_D);
679 regs->tf_cs = _ucodesel;
680 regs->tf_ds = _udatasel;
681 regs->tf_es = _udatasel;
682 regs->tf_fs = _udatasel;
683 regs->tf_ss = _udatasel;
684 PROC_LOCK(p);
685 mtx_lock(&psp->ps_mtx);
686 }
687
688 /*
689 * System call to cleanup state after a signal
690 * has been taken. Reset signal mask and
691 * stack state from context left by sendsig (above).
692 * Return to previous pc and psl as specified by
693 * context left by sendsig. Check carefully to
694 * make sure that the user has not modified the
695 * state to gain improper privileges.
696 *
697 * MPSAFE
698 */
699 #ifdef COMPAT_43
700 int
701 osigreturn(td, uap)
702 struct thread *td;
703 struct osigreturn_args /* {
704 struct osigcontext *sigcntxp;
705 } */ *uap;
706 {
707 struct osigcontext sc;
708 struct trapframe *regs;
709 struct osigcontext *scp;
710 struct proc *p = td->td_proc;
711 int eflags, error;
712 ksiginfo_t ksi;
713
714 regs = td->td_frame;
715 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
716 if (error != 0)
717 return (error);
718 scp = ≻
719 eflags = scp->sc_ps;
720 if (eflags & PSL_VM) {
721 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
722 struct vm86_kernel *vm86;
723
724 /*
725 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
726 * set up the vm86 area, and we can't enter vm86 mode.
727 */
728 if (td->td_pcb->pcb_ext == 0)
729 return (EINVAL);
730 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
731 if (vm86->vm86_inited == 0)
732 return (EINVAL);
733
734 /* Go back to user mode if both flags are set. */
735 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
736 ksiginfo_init_trap(&ksi);
737 ksi.ksi_signo = SIGBUS;
738 ksi.ksi_code = BUS_OBJERR;
739 ksi.ksi_addr = (void *)regs->tf_eip;
740 trapsignal(td, &ksi);
741 }
742
743 if (vm86->vm86_has_vme) {
744 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
745 (eflags & VME_USERCHANGE) | PSL_VM;
746 } else {
747 vm86->vm86_eflags = eflags; /* save VIF, VIP */
748 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
749 (eflags & VM_USERCHANGE) | PSL_VM;
750 }
751 tf->tf_vm86_ds = scp->sc_ds;
752 tf->tf_vm86_es = scp->sc_es;
753 tf->tf_vm86_fs = scp->sc_fs;
754 tf->tf_vm86_gs = scp->sc_gs;
755 tf->tf_ds = _udatasel;
756 tf->tf_es = _udatasel;
757 tf->tf_fs = _udatasel;
758 } else {
759 /*
760 * Don't allow users to change privileged or reserved flags.
761 */
762 /*
763 * XXX do allow users to change the privileged flag PSL_RF.
764 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
765 * should sometimes set it there too. tf_eflags is kept in
766 * the signal context during signal handling and there is no
767 * other place to remember it, so the PSL_RF bit may be
768 * corrupted by the signal handler without us knowing.
769 * Corruption of the PSL_RF bit at worst causes one more or
770 * one less debugger trap, so allowing it is fairly harmless.
771 */
772 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
773 return (EINVAL);
774 }
775
776 /*
777 * Don't allow users to load a valid privileged %cs. Let the
778 * hardware check for invalid selectors, excess privilege in
779 * other selectors, invalid %eip's and invalid %esp's.
780 */
781 if (!CS_SECURE(scp->sc_cs)) {
782 ksiginfo_init_trap(&ksi);
783 ksi.ksi_signo = SIGBUS;
784 ksi.ksi_code = BUS_OBJERR;
785 ksi.ksi_trapno = T_PROTFLT;
786 ksi.ksi_addr = (void *)regs->tf_eip;
787 trapsignal(td, &ksi);
788 return (EINVAL);
789 }
790 regs->tf_ds = scp->sc_ds;
791 regs->tf_es = scp->sc_es;
792 regs->tf_fs = scp->sc_fs;
793 }
794
795 /* Restore remaining registers. */
796 regs->tf_eax = scp->sc_eax;
797 regs->tf_ebx = scp->sc_ebx;
798 regs->tf_ecx = scp->sc_ecx;
799 regs->tf_edx = scp->sc_edx;
800 regs->tf_esi = scp->sc_esi;
801 regs->tf_edi = scp->sc_edi;
802 regs->tf_cs = scp->sc_cs;
803 regs->tf_ss = scp->sc_ss;
804 regs->tf_isp = scp->sc_isp;
805 regs->tf_ebp = scp->sc_fp;
806 regs->tf_esp = scp->sc_sp;
807 regs->tf_eip = scp->sc_pc;
808 regs->tf_eflags = eflags;
809
810 PROC_LOCK(p);
811 #if defined(COMPAT_43)
812 if (scp->sc_onstack & 1)
813 td->td_sigstk.ss_flags |= SS_ONSTACK;
814 else
815 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
816 #endif
817 SIGSETOLD(td->td_sigmask, scp->sc_mask);
818 SIG_CANTMASK(td->td_sigmask);
819 signotify(td);
820 PROC_UNLOCK(p);
821 return (EJUSTRETURN);
822 }
823 #endif /* COMPAT_43 */
824
825 #ifdef COMPAT_FREEBSD4
826 /*
827 * MPSAFE
828 */
829 int
830 freebsd4_sigreturn(td, uap)
831 struct thread *td;
832 struct freebsd4_sigreturn_args /* {
833 const ucontext4 *sigcntxp;
834 } */ *uap;
835 {
836 struct ucontext4 uc;
837 struct proc *p = td->td_proc;
838 struct trapframe *regs;
839 const struct ucontext4 *ucp;
840 int cs, eflags, error;
841 ksiginfo_t ksi;
842
843 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
844 if (error != 0)
845 return (error);
846 ucp = &uc;
847 regs = td->td_frame;
848 eflags = ucp->uc_mcontext.mc_eflags;
849 if (eflags & PSL_VM) {
850 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
851 struct vm86_kernel *vm86;
852
853 /*
854 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
855 * set up the vm86 area, and we can't enter vm86 mode.
856 */
857 if (td->td_pcb->pcb_ext == 0)
858 return (EINVAL);
859 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
860 if (vm86->vm86_inited == 0)
861 return (EINVAL);
862
863 /* Go back to user mode if both flags are set. */
864 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
865 ksiginfo_init_trap(&ksi);
866 ksi.ksi_signo = SIGBUS;
867 ksi.ksi_code = BUS_OBJERR;
868 ksi.ksi_addr = (void *)regs->tf_eip;
869 trapsignal(td, &ksi);
870 }
871 if (vm86->vm86_has_vme) {
872 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
873 (eflags & VME_USERCHANGE) | PSL_VM;
874 } else {
875 vm86->vm86_eflags = eflags; /* save VIF, VIP */
876 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
877 (eflags & VM_USERCHANGE) | PSL_VM;
878 }
879 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
880 tf->tf_eflags = eflags;
881 tf->tf_vm86_ds = tf->tf_ds;
882 tf->tf_vm86_es = tf->tf_es;
883 tf->tf_vm86_fs = tf->tf_fs;
884 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
885 tf->tf_ds = _udatasel;
886 tf->tf_es = _udatasel;
887 tf->tf_fs = _udatasel;
888 } else {
889 /*
890 * Don't allow users to change privileged or reserved flags.
891 */
892 /*
893 * XXX do allow users to change the privileged flag PSL_RF.
894 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
895 * should sometimes set it there too. tf_eflags is kept in
896 * the signal context during signal handling and there is no
897 * other place to remember it, so the PSL_RF bit may be
898 * corrupted by the signal handler without us knowing.
899 * Corruption of the PSL_RF bit at worst causes one more or
900 * one less debugger trap, so allowing it is fairly harmless.
901 */
902 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
903 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
904 return (EINVAL);
905 }
906
907 /*
908 * Don't allow users to load a valid privileged %cs. Let the
909 * hardware check for invalid selectors, excess privilege in
910 * other selectors, invalid %eip's and invalid %esp's.
911 */
912 cs = ucp->uc_mcontext.mc_cs;
913 if (!CS_SECURE(cs)) {
914 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
915 ksiginfo_init_trap(&ksi);
916 ksi.ksi_signo = SIGBUS;
917 ksi.ksi_code = BUS_OBJERR;
918 ksi.ksi_trapno = T_PROTFLT;
919 ksi.ksi_addr = (void *)regs->tf_eip;
920 trapsignal(td, &ksi);
921 return (EINVAL);
922 }
923
924 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
925 }
926
927 PROC_LOCK(p);
928 #if defined(COMPAT_43)
929 if (ucp->uc_mcontext.mc_onstack & 1)
930 td->td_sigstk.ss_flags |= SS_ONSTACK;
931 else
932 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
933 #endif
934
935 td->td_sigmask = ucp->uc_sigmask;
936 SIG_CANTMASK(td->td_sigmask);
937 signotify(td);
938 PROC_UNLOCK(p);
939 return (EJUSTRETURN);
940 }
941 #endif /* COMPAT_FREEBSD4 */
942
943 /*
944 * MPSAFE
945 */
946 int
947 sigreturn(td, uap)
948 struct thread *td;
949 struct sigreturn_args /* {
950 const struct __ucontext *sigcntxp;
951 } */ *uap;
952 {
953 ucontext_t uc;
954 struct proc *p = td->td_proc;
955 struct trapframe *regs;
956 const ucontext_t *ucp;
957 int cs, eflags, error, ret;
958 ksiginfo_t ksi;
959
960 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
961 if (error != 0)
962 return (error);
963 ucp = &uc;
964 regs = td->td_frame;
965 eflags = ucp->uc_mcontext.mc_eflags;
966 if (eflags & PSL_VM) {
967 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
968 struct vm86_kernel *vm86;
969
970 /*
971 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
972 * set up the vm86 area, and we can't enter vm86 mode.
973 */
974 if (td->td_pcb->pcb_ext == 0)
975 return (EINVAL);
976 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
977 if (vm86->vm86_inited == 0)
978 return (EINVAL);
979
980 /* Go back to user mode if both flags are set. */
981 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
982 ksiginfo_init_trap(&ksi);
983 ksi.ksi_signo = SIGBUS;
984 ksi.ksi_code = BUS_OBJERR;
985 ksi.ksi_addr = (void *)regs->tf_eip;
986 trapsignal(td, &ksi);
987 }
988
989 if (vm86->vm86_has_vme) {
990 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
991 (eflags & VME_USERCHANGE) | PSL_VM;
992 } else {
993 vm86->vm86_eflags = eflags; /* save VIF, VIP */
994 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
995 (eflags & VM_USERCHANGE) | PSL_VM;
996 }
997 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
998 tf->tf_eflags = eflags;
999 tf->tf_vm86_ds = tf->tf_ds;
1000 tf->tf_vm86_es = tf->tf_es;
1001 tf->tf_vm86_fs = tf->tf_fs;
1002 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1003 tf->tf_ds = _udatasel;
1004 tf->tf_es = _udatasel;
1005 tf->tf_fs = _udatasel;
1006 } else {
1007 /*
1008 * Don't allow users to change privileged or reserved flags.
1009 */
1010 /*
1011 * XXX do allow users to change the privileged flag PSL_RF.
1012 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1013 * should sometimes set it there too. tf_eflags is kept in
1014 * the signal context during signal handling and there is no
1015 * other place to remember it, so the PSL_RF bit may be
1016 * corrupted by the signal handler without us knowing.
1017 * Corruption of the PSL_RF bit at worst causes one more or
1018 * one less debugger trap, so allowing it is fairly harmless.
1019 */
1020 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1021 printf("sigreturn: eflags = 0x%x\n", eflags);
1022 return (EINVAL);
1023 }
1024
1025 /*
1026 * Don't allow users to load a valid privileged %cs. Let the
1027 * hardware check for invalid selectors, excess privilege in
1028 * other selectors, invalid %eip's and invalid %esp's.
1029 */
1030 cs = ucp->uc_mcontext.mc_cs;
1031 if (!CS_SECURE(cs)) {
1032 printf("sigreturn: cs = 0x%x\n", cs);
1033 ksiginfo_init_trap(&ksi);
1034 ksi.ksi_signo = SIGBUS;
1035 ksi.ksi_code = BUS_OBJERR;
1036 ksi.ksi_trapno = T_PROTFLT;
1037 ksi.ksi_addr = (void *)regs->tf_eip;
1038 trapsignal(td, &ksi);
1039 return (EINVAL);
1040 }
1041
1042 ret = set_fpcontext(td, &ucp->uc_mcontext);
1043 if (ret != 0)
1044 return (ret);
1045 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1046 }
1047
1048 PROC_LOCK(p);
1049 #if defined(COMPAT_43)
1050 if (ucp->uc_mcontext.mc_onstack & 1)
1051 td->td_sigstk.ss_flags |= SS_ONSTACK;
1052 else
1053 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1054 #endif
1055
1056 td->td_sigmask = ucp->uc_sigmask;
1057 SIG_CANTMASK(td->td_sigmask);
1058 signotify(td);
1059 PROC_UNLOCK(p);
1060 return (EJUSTRETURN);
1061 }
1062
1063 /*
1064 * Machine dependent boot() routine
1065 *
1066 * I haven't seen anything to put here yet
1067 * Possibly some stuff might be grafted back here from boot()
1068 */
1069 void
1070 cpu_boot(int howto)
1071 {
1072 }
1073
1074 /* Get current clock frequency for the given cpu id. */
1075 int
1076 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1077 {
1078 register_t reg;
1079 uint64_t tsc1, tsc2;
1080
1081 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1082 return (EINVAL);
1083 if (!tsc_present)
1084 return (EOPNOTSUPP);
1085
1086 /* If we're booting, trust the rate calibrated moments ago. */
1087 if (cold) {
1088 *rate = tsc_freq;
1089 return (0);
1090 }
1091
1092 #ifdef SMP
1093 /* Schedule ourselves on the indicated cpu. */
1094 thread_lock(curthread);
1095 sched_bind(curthread, cpu_id);
1096 thread_unlock(curthread);
1097 #endif
1098
1099 /* Calibrate by measuring a short delay. */
1100 reg = intr_disable();
1101 tsc1 = rdtsc();
1102 DELAY(1000);
1103 tsc2 = rdtsc();
1104 intr_restore(reg);
1105
1106 #ifdef SMP
1107 thread_lock(curthread);
1108 sched_unbind(curthread);
1109 thread_unlock(curthread);
1110 #endif
1111
1112 /*
1113 * Calculate the difference in readings, convert to Mhz, and
1114 * subtract 0.5% of the total. Empirical testing has shown that
1115 * overhead in DELAY() works out to approximately this value.
1116 */
1117 tsc2 -= tsc1;
1118 *rate = tsc2 * 1000 - tsc2 * 5;
1119 return (0);
1120 }
1121
1122 /*
1123 * Shutdown the CPU as much as possible
1124 */
1125 void
1126 cpu_halt(void)
1127 {
1128 for (;;)
1129 __asm__ ("hlt");
1130 }
1131
1132 /*
1133 * Hook to idle the CPU when possible. In the SMP case we default to
1134 * off because a halted cpu will not currently pick up a new thread in the
1135 * run queue until the next timer tick. If turned on this will result in
1136 * approximately a 4.2% loss in real time performance in buildworld tests
1137 * (but improves user and sys times oddly enough), and saves approximately
1138 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1139 *
1140 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1141 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1142 * Then we can have our cake and eat it too.
1143 *
1144 * XXX I'm turning it on for SMP as well by default for now. It seems to
1145 * help lock contention somewhat, and this is critical for HTT. -Peter
1146 */
1147 static int cpu_idle_hlt = 1;
1148 TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt);
1149 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1150 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1151
1152 static void
1153 cpu_idle_default(void)
1154 {
1155 /*
1156 * we must absolutely guarentee that hlt is the
1157 * absolute next instruction after sti or we
1158 * introduce a timing window.
1159 */
1160 __asm __volatile("sti; hlt");
1161 }
1162
1163 /*
1164 * Note that we have to be careful here to avoid a race between checking
1165 * sched_runnable() and actually halting. If we don't do this, we may waste
1166 * the time between calling hlt and the next interrupt even though there
1167 * is a runnable process.
1168 */
1169 void
1170 cpu_idle(void)
1171 {
1172
1173 #ifdef SMP
1174 if (mp_grab_cpu_hlt())
1175 return;
1176 #endif
1177
1178 if (cpu_idle_hlt) {
1179 disable_intr();
1180 if (sched_runnable())
1181 enable_intr();
1182 else
1183 (*cpu_idle_hook)();
1184 }
1185 }
1186
1187 /* Other subsystems (e.g., ACPI) can hook this later. */
1188 void (*cpu_idle_hook)(void) = cpu_idle_default;
1189
1190 /*
1191 * Clear registers on exec
1192 */
1193 void
1194 exec_setregs(td, entry, stack, ps_strings)
1195 struct thread *td;
1196 u_long entry;
1197 u_long stack;
1198 u_long ps_strings;
1199 {
1200 struct trapframe *regs = td->td_frame;
1201 struct pcb *pcb = td->td_pcb;
1202
1203 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1204 pcb->pcb_gs = _udatasel;
1205 load_gs(_udatasel);
1206
1207 mtx_lock_spin(&dt_lock);
1208 if (td->td_proc->p_md.md_ldt)
1209 user_ldt_free(td);
1210 else
1211 mtx_unlock_spin(&dt_lock);
1212
1213 bzero((char *)regs, sizeof(struct trapframe));
1214 regs->tf_eip = entry;
1215 regs->tf_esp = stack;
1216 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1217 regs->tf_ss = _udatasel;
1218 regs->tf_ds = _udatasel;
1219 regs->tf_es = _udatasel;
1220 regs->tf_fs = _udatasel;
1221 regs->tf_cs = _ucodesel;
1222
1223 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1224 regs->tf_ebx = ps_strings;
1225
1226 /*
1227 * Reset the hardware debug registers if they were in use.
1228 * They won't have any meaning for the newly exec'd process.
1229 */
1230 if (pcb->pcb_flags & PCB_DBREGS) {
1231 pcb->pcb_dr0 = 0;
1232 pcb->pcb_dr1 = 0;
1233 pcb->pcb_dr2 = 0;
1234 pcb->pcb_dr3 = 0;
1235 pcb->pcb_dr6 = 0;
1236 pcb->pcb_dr7 = 0;
1237 if (pcb == PCPU_GET(curpcb)) {
1238 /*
1239 * Clear the debug registers on the running
1240 * CPU, otherwise they will end up affecting
1241 * the next process we switch to.
1242 */
1243 reset_dbregs();
1244 }
1245 pcb->pcb_flags &= ~PCB_DBREGS;
1246 }
1247
1248 /*
1249 * Initialize the math emulator (if any) for the current process.
1250 * Actually, just clear the bit that says that the emulator has
1251 * been initialized. Initialization is delayed until the process
1252 * traps to the emulator (if it is done at all) mainly because
1253 * emulators don't provide an entry point for initialization.
1254 */
1255 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1256
1257 /*
1258 * Drop the FP state if we hold it, so that the process gets a
1259 * clean FP state if it uses the FPU again.
1260 */
1261 fpstate_drop(td);
1262
1263 /*
1264 * XXX - Linux emulator
1265 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1266 * on it.
1267 */
1268 td->td_retval[1] = 0;
1269 }
1270
1271 void
1272 cpu_setregs(void)
1273 {
1274 unsigned int cr0;
1275
1276 cr0 = rcr0();
1277
1278 /*
1279 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1280 *
1281 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1282 * instructions. We must set the CR0_MP bit and use the CR0_TS
1283 * bit to control the trap, because setting the CR0_EM bit does
1284 * not cause WAIT instructions to trap. It's important to trap
1285 * WAIT instructions - otherwise the "wait" variants of no-wait
1286 * control instructions would degenerate to the "no-wait" variants
1287 * after FP context switches but work correctly otherwise. It's
1288 * particularly important to trap WAITs when there is no NPX -
1289 * otherwise the "wait" variants would always degenerate.
1290 *
1291 * Try setting CR0_NE to get correct error reporting on 486DX's.
1292 * Setting it should fail or do nothing on lesser processors.
1293 */
1294 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1295 load_cr0(cr0);
1296 load_gs(_udatasel);
1297 }
1298
1299 u_long bootdev; /* not a struct cdev *- encoding is different */
1300 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1301 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1302
1303 /*
1304 * Initialize 386 and configure to run kernel
1305 */
1306
1307 /*
1308 * Initialize segments & interrupt table
1309 */
1310
1311 int _default_ldt;
1312 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1313 static struct gate_descriptor idt0[NIDT];
1314 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1315 union descriptor ldt[NLDT]; /* local descriptor table */
1316 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1317 struct mtx dt_lock; /* lock for GDT and LDT */
1318
1319 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1320 extern int has_f00f_bug;
1321 #endif
1322
1323 static struct i386tss dblfault_tss;
1324 static char dblfault_stack[PAGE_SIZE];
1325
1326 extern vm_offset_t proc0kstack;
1327
1328
1329 /*
1330 * software prototypes -- in more palatable form.
1331 *
1332 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1333 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1334 */
1335 struct soft_segment_descriptor gdt_segs[] = {
1336 /* GNULL_SEL 0 Null Descriptor */
1337 { 0x0, /* segment base address */
1338 0x0, /* length */
1339 0, /* segment type */
1340 0, /* segment descriptor priority level */
1341 0, /* segment descriptor present */
1342 0, 0,
1343 0, /* default 32 vs 16 bit size */
1344 0 /* limit granularity (byte/page units)*/ },
1345 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1346 { 0x0, /* segment base address */
1347 0xfffff, /* length - all address space */
1348 SDT_MEMRWA, /* segment type */
1349 0, /* segment descriptor priority level */
1350 1, /* segment descriptor present */
1351 0, 0,
1352 1, /* default 32 vs 16 bit size */
1353 1 /* limit granularity (byte/page units)*/ },
1354 /* GUFS_SEL 2 %fs Descriptor for user */
1355 { 0x0, /* segment base address */
1356 0xfffff, /* length - all address space */
1357 SDT_MEMRWA, /* segment type */
1358 SEL_UPL, /* segment descriptor priority level */
1359 1, /* segment descriptor present */
1360 0, 0,
1361 1, /* default 32 vs 16 bit size */
1362 1 /* limit granularity (byte/page units)*/ },
1363 /* GUGS_SEL 3 %gs Descriptor for user */
1364 { 0x0, /* segment base address */
1365 0xfffff, /* length - all address space */
1366 SDT_MEMRWA, /* segment type */
1367 SEL_UPL, /* segment descriptor priority level */
1368 1, /* segment descriptor present */
1369 0, 0,
1370 1, /* default 32 vs 16 bit size */
1371 1 /* limit granularity (byte/page units)*/ },
1372 /* GCODE_SEL 4 Code Descriptor for kernel */
1373 { 0x0, /* segment base address */
1374 0xfffff, /* length - all address space */
1375 SDT_MEMERA, /* segment type */
1376 0, /* segment descriptor priority level */
1377 1, /* segment descriptor present */
1378 0, 0,
1379 1, /* default 32 vs 16 bit size */
1380 1 /* limit granularity (byte/page units)*/ },
1381 /* GDATA_SEL 5 Data Descriptor for kernel */
1382 { 0x0, /* segment base address */
1383 0xfffff, /* length - all address space */
1384 SDT_MEMRWA, /* segment type */
1385 0, /* segment descriptor priority level */
1386 1, /* segment descriptor present */
1387 0, 0,
1388 1, /* default 32 vs 16 bit size */
1389 1 /* limit granularity (byte/page units)*/ },
1390 /* GUCODE_SEL 6 Code Descriptor for user */
1391 { 0x0, /* segment base address */
1392 0xfffff, /* length - all address space */
1393 SDT_MEMERA, /* segment type */
1394 SEL_UPL, /* segment descriptor priority level */
1395 1, /* segment descriptor present */
1396 0, 0,
1397 1, /* default 32 vs 16 bit size */
1398 1 /* limit granularity (byte/page units)*/ },
1399 /* GUDATA_SEL 7 Data Descriptor for user */
1400 { 0x0, /* segment base address */
1401 0xfffff, /* length - all address space */
1402 SDT_MEMRWA, /* segment type */
1403 SEL_UPL, /* segment descriptor priority level */
1404 1, /* segment descriptor present */
1405 0, 0,
1406 1, /* default 32 vs 16 bit size */
1407 1 /* limit granularity (byte/page units)*/ },
1408 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1409 { 0x400, /* segment base address */
1410 0xfffff, /* length */
1411 SDT_MEMRWA, /* segment type */
1412 0, /* segment descriptor priority level */
1413 1, /* segment descriptor present */
1414 0, 0,
1415 1, /* default 32 vs 16 bit size */
1416 1 /* limit granularity (byte/page units)*/ },
1417 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1418 {
1419 0x0, /* segment base address */
1420 sizeof(struct i386tss)-1,/* length */
1421 SDT_SYS386TSS, /* segment type */
1422 0, /* segment descriptor priority level */
1423 1, /* segment descriptor present */
1424 0, 0,
1425 0, /* unused - default 32 vs 16 bit size */
1426 0 /* limit granularity (byte/page units)*/ },
1427 /* GLDT_SEL 10 LDT Descriptor */
1428 { (int) ldt, /* segment base address */
1429 sizeof(ldt)-1, /* length - all address space */
1430 SDT_SYSLDT, /* segment type */
1431 SEL_UPL, /* segment descriptor priority level */
1432 1, /* segment descriptor present */
1433 0, 0,
1434 0, /* unused - default 32 vs 16 bit size */
1435 0 /* limit granularity (byte/page units)*/ },
1436 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1437 { (int) ldt, /* segment base address */
1438 (512 * sizeof(union descriptor)-1), /* length */
1439 SDT_SYSLDT, /* segment type */
1440 0, /* segment descriptor priority level */
1441 1, /* segment descriptor present */
1442 0, 0,
1443 0, /* unused - default 32 vs 16 bit size */
1444 0 /* limit granularity (byte/page units)*/ },
1445 /* GPANIC_SEL 12 Panic Tss Descriptor */
1446 { (int) &dblfault_tss, /* segment base address */
1447 sizeof(struct i386tss)-1,/* length - all address space */
1448 SDT_SYS386TSS, /* segment type */
1449 0, /* segment descriptor priority level */
1450 1, /* segment descriptor present */
1451 0, 0,
1452 0, /* unused - default 32 vs 16 bit size */
1453 0 /* limit granularity (byte/page units)*/ },
1454 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1455 { 0, /* segment base address (overwritten) */
1456 0xfffff, /* length */
1457 SDT_MEMERA, /* segment type */
1458 0, /* segment descriptor priority level */
1459 1, /* segment descriptor present */
1460 0, 0,
1461 0, /* default 32 vs 16 bit size */
1462 1 /* limit granularity (byte/page units)*/ },
1463 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1464 { 0, /* segment base address (overwritten) */
1465 0xfffff, /* length */
1466 SDT_MEMERA, /* segment type */
1467 0, /* segment descriptor priority level */
1468 1, /* segment descriptor present */
1469 0, 0,
1470 0, /* default 32 vs 16 bit size */
1471 1 /* limit granularity (byte/page units)*/ },
1472 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1473 { 0, /* segment base address (overwritten) */
1474 0xfffff, /* length */
1475 SDT_MEMRWA, /* segment type */
1476 0, /* segment descriptor priority level */
1477 1, /* segment descriptor present */
1478 0, 0,
1479 1, /* default 32 vs 16 bit size */
1480 1 /* limit granularity (byte/page units)*/ },
1481 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1482 { 0, /* segment base address (overwritten) */
1483 0xfffff, /* length */
1484 SDT_MEMRWA, /* segment type */
1485 0, /* segment descriptor priority level */
1486 1, /* segment descriptor present */
1487 0, 0,
1488 0, /* default 32 vs 16 bit size */
1489 1 /* limit granularity (byte/page units)*/ },
1490 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1491 { 0, /* segment base address (overwritten) */
1492 0xfffff, /* length */
1493 SDT_MEMRWA, /* segment type */
1494 0, /* segment descriptor priority level */
1495 1, /* segment descriptor present */
1496 0, 0,
1497 0, /* default 32 vs 16 bit size */
1498 1 /* limit granularity (byte/page units)*/ },
1499 /* GNDIS_SEL 18 NDIS Descriptor */
1500 { 0x0, /* segment base address */
1501 0x0, /* length */
1502 0, /* segment type */
1503 0, /* segment descriptor priority level */
1504 0, /* segment descriptor present */
1505 0, 0,
1506 0, /* default 32 vs 16 bit size */
1507 0 /* limit granularity (byte/page units)*/ },
1508 };
1509
1510 static struct soft_segment_descriptor ldt_segs[] = {
1511 /* Null Descriptor - overwritten by call gate */
1512 { 0x0, /* segment base address */
1513 0x0, /* length - all address space */
1514 0, /* segment type */
1515 0, /* segment descriptor priority level */
1516 0, /* segment descriptor present */
1517 0, 0,
1518 0, /* default 32 vs 16 bit size */
1519 0 /* limit granularity (byte/page units)*/ },
1520 /* Null Descriptor - overwritten by call gate */
1521 { 0x0, /* segment base address */
1522 0x0, /* length - all address space */
1523 0, /* segment type */
1524 0, /* segment descriptor priority level */
1525 0, /* segment descriptor present */
1526 0, 0,
1527 0, /* default 32 vs 16 bit size */
1528 0 /* limit granularity (byte/page units)*/ },
1529 /* Null Descriptor - overwritten by call gate */
1530 { 0x0, /* segment base address */
1531 0x0, /* length - all address space */
1532 0, /* segment type */
1533 0, /* segment descriptor priority level */
1534 0, /* segment descriptor present */
1535 0, 0,
1536 0, /* default 32 vs 16 bit size */
1537 0 /* limit granularity (byte/page units)*/ },
1538 /* Code Descriptor for user */
1539 { 0x0, /* segment base address */
1540 0xfffff, /* length - all address space */
1541 SDT_MEMERA, /* segment type */
1542 SEL_UPL, /* segment descriptor priority level */
1543 1, /* segment descriptor present */
1544 0, 0,
1545 1, /* default 32 vs 16 bit size */
1546 1 /* limit granularity (byte/page units)*/ },
1547 /* Null Descriptor - overwritten by call gate */
1548 { 0x0, /* segment base address */
1549 0x0, /* length - all address space */
1550 0, /* segment type */
1551 0, /* segment descriptor priority level */
1552 0, /* segment descriptor present */
1553 0, 0,
1554 0, /* default 32 vs 16 bit size */
1555 0 /* limit granularity (byte/page units)*/ },
1556 /* Data Descriptor for user */
1557 { 0x0, /* segment base address */
1558 0xfffff, /* length - all address space */
1559 SDT_MEMRWA, /* segment type */
1560 SEL_UPL, /* segment descriptor priority level */
1561 1, /* segment descriptor present */
1562 0, 0,
1563 1, /* default 32 vs 16 bit size */
1564 1 /* limit granularity (byte/page units)*/ },
1565 };
1566
1567 void
1568 setidt(idx, func, typ, dpl, selec)
1569 int idx;
1570 inthand_t *func;
1571 int typ;
1572 int dpl;
1573 int selec;
1574 {
1575 struct gate_descriptor *ip;
1576
1577 ip = idt + idx;
1578 ip->gd_looffset = (int)func;
1579 ip->gd_selector = selec;
1580 ip->gd_stkcpy = 0;
1581 ip->gd_xx = 0;
1582 ip->gd_type = typ;
1583 ip->gd_dpl = dpl;
1584 ip->gd_p = 1;
1585 ip->gd_hioffset = ((int)func)>>16 ;
1586 }
1587
1588 extern inthand_t
1589 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1590 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1591 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1592 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1593 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1594
1595 #ifdef DDB
1596 /*
1597 * Display the index and function name of any IDT entries that don't use
1598 * the default 'rsvd' entry point.
1599 */
1600 DB_SHOW_COMMAND(idt, db_show_idt)
1601 {
1602 struct gate_descriptor *ip;
1603 int idx;
1604 uintptr_t func;
1605
1606 ip = idt;
1607 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1608 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1609 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1610 db_printf("%3d\t", idx);
1611 db_printsym(func, DB_STGY_PROC);
1612 db_printf("\n");
1613 }
1614 ip++;
1615 }
1616 }
1617
1618 /* Show privileged registers. */
1619 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1620 {
1621 uint64_t idtr, gdtr;
1622
1623 idtr = ridt();
1624 db_printf("idtr\t0x%08x/%04x\n",
1625 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1626 gdtr = rgdt();
1627 db_printf("gdtr\t0x%08x/%04x\n",
1628 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1629 db_printf("ldtr\t0x%04x\n", rldt());
1630 db_printf("tr\t0x%04x\n", rtr());
1631 db_printf("cr0\t0x%08x\n", rcr0());
1632 db_printf("cr2\t0x%08x\n", rcr2());
1633 db_printf("cr3\t0x%08x\n", rcr3());
1634 db_printf("cr4\t0x%08x\n", rcr4());
1635 }
1636 #endif
1637
1638 void
1639 sdtossd(sd, ssd)
1640 struct segment_descriptor *sd;
1641 struct soft_segment_descriptor *ssd;
1642 {
1643 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1644 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1645 ssd->ssd_type = sd->sd_type;
1646 ssd->ssd_dpl = sd->sd_dpl;
1647 ssd->ssd_p = sd->sd_p;
1648 ssd->ssd_def32 = sd->sd_def32;
1649 ssd->ssd_gran = sd->sd_gran;
1650 }
1651
1652 static int
1653 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1654 {
1655 int i, physmap_idx;
1656
1657 physmap_idx = *physmap_idxp;
1658
1659 if (boothowto & RB_VERBOSE)
1660 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1661 smap->type, smap->base, smap->length);
1662
1663 if (smap->type != SMAP_TYPE_MEMORY)
1664 return (1);
1665
1666 if (smap->length == 0)
1667 return (1);
1668
1669 #ifndef PAE
1670 if (smap->base >= 0xffffffff) {
1671 printf("%uK of memory above 4GB ignored\n",
1672 (u_int)(smap->length / 1024));
1673 return (1);
1674 }
1675 #endif
1676
1677 for (i = 0; i <= physmap_idx; i += 2) {
1678 if (smap->base < physmap[i + 1]) {
1679 if (boothowto & RB_VERBOSE)
1680 printf(
1681 "Overlapping or non-monotonic memory region, ignoring second region\n");
1682 return (1);
1683 }
1684 }
1685
1686 if (smap->base == physmap[physmap_idx + 1]) {
1687 physmap[physmap_idx + 1] += smap->length;
1688 return (1);
1689 }
1690
1691 physmap_idx += 2;
1692 *physmap_idxp = physmap_idx;
1693 if (physmap_idx == PHYSMAP_SIZE) {
1694 printf(
1695 "Too many segments in the physical address map, giving up\n");
1696 return (0);
1697 }
1698 physmap[physmap_idx] = smap->base;
1699 physmap[physmap_idx + 1] = smap->base + smap->length;
1700 return (1);
1701 }
1702
1703 /*
1704 * Populate the (physmap) array with base/bound pairs describing the
1705 * available physical memory in the system, then test this memory and
1706 * build the phys_avail array describing the actually-available memory.
1707 *
1708 * If we cannot accurately determine the physical memory map, then use
1709 * value from the 0xE801 call, and failing that, the RTC.
1710 *
1711 * Total memory size may be set by the kernel environment variable
1712 * hw.physmem or the compile-time define MAXMEM.
1713 *
1714 * XXX first should be vm_paddr_t.
1715 */
1716 static void
1717 getmemsize(int first)
1718 {
1719 int i, off, physmap_idx, pa_indx, da_indx;
1720 int hasbrokenint12, has_smap;
1721 u_long physmem_tunable;
1722 u_int extmem;
1723 struct vm86frame vmf;
1724 struct vm86context vmc;
1725 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1726 pt_entry_t *pte;
1727 struct bios_smap *smap, *smapbase, *smapend;
1728 u_int32_t smapsize;
1729 quad_t dcons_addr, dcons_size;
1730 caddr_t kmdp;
1731
1732 has_smap = 0;
1733 #ifdef XBOX
1734 if (arch_i386_is_xbox) {
1735 /*
1736 * We queried the memory size before, so chop off 4MB for
1737 * the framebuffer and inform the OS of this.
1738 */
1739 physmap[0] = 0;
1740 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
1741 physmap_idx = 0;
1742 goto physmap_done;
1743 }
1744 #endif
1745
1746 hasbrokenint12 = 0;
1747 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
1748 bzero(&vmf, sizeof(vmf));
1749 bzero(physmap, sizeof(physmap));
1750 basemem = 0;
1751
1752 /*
1753 * Some newer BIOSes has broken INT 12H implementation which cause
1754 * kernel panic immediately. In this case, we need to scan SMAP
1755 * with INT 15:E820 first, then determine base memory size.
1756 */
1757 if (hasbrokenint12) {
1758 goto int15e820;
1759 }
1760
1761 /*
1762 * Perform "base memory" related probes & setup
1763 */
1764 vm86_intcall(0x12, &vmf);
1765 basemem = vmf.vmf_ax;
1766 if (basemem > 640) {
1767 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1768 basemem);
1769 basemem = 640;
1770 }
1771
1772 /*
1773 * XXX if biosbasemem is now < 640, there is a `hole'
1774 * between the end of base memory and the start of
1775 * ISA memory. The hole may be empty or it may
1776 * contain BIOS code or data. Map it read/write so
1777 * that the BIOS can write to it. (Memory from 0 to
1778 * the physical end of the kernel is mapped read-only
1779 * to begin with and then parts of it are remapped.
1780 * The parts that aren't remapped form holes that
1781 * remain read-only and are unused by the kernel.
1782 * The base memory area is below the physical end of
1783 * the kernel and right now forms a read-only hole.
1784 * The part of it from PAGE_SIZE to
1785 * (trunc_page(biosbasemem * 1024) - 1) will be
1786 * remapped and used by the kernel later.)
1787 *
1788 * This code is similar to the code used in
1789 * pmap_mapdev, but since no memory needs to be
1790 * allocated we simply change the mapping.
1791 */
1792 for (pa = trunc_page(basemem * 1024);
1793 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1794 pmap_kenter(KERNBASE + pa, pa);
1795
1796 /*
1797 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1798 * the vm86 page table so that vm86 can scribble on them using
1799 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1800 * page 0, at least as initialized here?
1801 */
1802 pte = (pt_entry_t *)vm86paddr;
1803 for (i = basemem / 4; i < 160; i++)
1804 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1805
1806 int15e820:
1807 /*
1808 * Fetch the memory map with INT 15:E820. First, check to see
1809 * if the loader supplied it and use that if so. Otherwise,
1810 * use vm86 to invoke the BIOS call directly.
1811 */
1812 physmap_idx = 0;
1813 smapbase = NULL;
1814 kmdp = preload_search_by_type("elf kernel");
1815 if (kmdp == NULL)
1816 kmdp = preload_search_by_type("elf32 kernel");
1817 if (kmdp != NULL)
1818 smapbase = (struct bios_smap *)preload_search_info(kmdp,
1819 MODINFO_METADATA | MODINFOMD_SMAP);
1820 if (smapbase != NULL) {
1821 /* subr_module.c says:
1822 * "Consumer may safely assume that size value precedes data."
1823 * ie: an int32_t immediately precedes smap.
1824 */
1825 smapsize = *((u_int32_t *)smapbase - 1);
1826 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1827 has_smap = 1;
1828
1829 for (smap = smapbase; smap < smapend; smap++)
1830 if (!add_smap_entry(smap, physmap, &physmap_idx))
1831 break;
1832 } else {
1833 /*
1834 * map page 1 R/W into the kernel page table so we can use it
1835 * as a buffer. The kernel will unmap this page later.
1836 */
1837 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
1838 vmc.npages = 0;
1839 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE +
1840 (1 << PAGE_SHIFT));
1841 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
1842
1843 vmf.vmf_ebx = 0;
1844 do {
1845 vmf.vmf_eax = 0xE820;
1846 vmf.vmf_edx = SMAP_SIG;
1847 vmf.vmf_ecx = sizeof(struct bios_smap);
1848 i = vm86_datacall(0x15, &vmf, &vmc);
1849 if (i || vmf.vmf_eax != SMAP_SIG)
1850 break;
1851 has_smap = 1;
1852 if (!add_smap_entry(smap, physmap, &physmap_idx))
1853 break;
1854 } while (vmf.vmf_ebx != 0);
1855 }
1856
1857 /*
1858 * Perform "base memory" related probes & setup based on SMAP
1859 */
1860 if (basemem == 0) {
1861 for (i = 0; i <= physmap_idx; i += 2) {
1862 if (physmap[i] == 0x00000000) {
1863 basemem = physmap[i + 1] / 1024;
1864 break;
1865 }
1866 }
1867
1868 /*
1869 * XXX this function is horribly organized and has to the same
1870 * things that it does above here.
1871 */
1872 if (basemem == 0)
1873 basemem = 640;
1874 if (basemem > 640) {
1875 printf(
1876 "Preposterous BIOS basemem of %uK, truncating to 640K\n",
1877 basemem);
1878 basemem = 640;
1879 }
1880
1881 /*
1882 * Let vm86 scribble on pages between basemem and
1883 * ISA_HOLE_START, as above.
1884 */
1885 for (pa = trunc_page(basemem * 1024);
1886 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1887 pmap_kenter(KERNBASE + pa, pa);
1888 pte = (pt_entry_t *)vm86paddr;
1889 for (i = basemem / 4; i < 160; i++)
1890 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1891 }
1892
1893 if (physmap[1] != 0)
1894 goto physmap_done;
1895
1896 /*
1897 * If we failed above, try memory map with INT 15:E801
1898 */
1899 vmf.vmf_ax = 0xE801;
1900 if (vm86_intcall(0x15, &vmf) == 0) {
1901 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1902 } else {
1903 #if 0
1904 vmf.vmf_ah = 0x88;
1905 vm86_intcall(0x15, &vmf);
1906 extmem = vmf.vmf_ax;
1907 #else
1908 /*
1909 * Prefer the RTC value for extended memory.
1910 */
1911 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1912 #endif
1913 }
1914
1915 /*
1916 * Special hack for chipsets that still remap the 384k hole when
1917 * there's 16MB of memory - this really confuses people that
1918 * are trying to use bus mastering ISA controllers with the
1919 * "16MB limit"; they only have 16MB, but the remapping puts
1920 * them beyond the limit.
1921 *
1922 * If extended memory is between 15-16MB (16-17MB phys address range),
1923 * chop it to 15MB.
1924 */
1925 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1926 extmem = 15 * 1024;
1927
1928 physmap[0] = 0;
1929 physmap[1] = basemem * 1024;
1930 physmap_idx = 2;
1931 physmap[physmap_idx] = 0x100000;
1932 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1933
1934 physmap_done:
1935 /*
1936 * Now, physmap contains a map of physical memory.
1937 */
1938
1939 #ifdef SMP
1940 /* make hole for AP bootstrap code */
1941 physmap[1] = mp_bootaddress(physmap[1]);
1942 #endif
1943
1944 /*
1945 * Maxmem isn't the "maximum memory", it's one larger than the
1946 * highest page of the physical address space. It should be
1947 * called something like "Maxphyspage". We may adjust this
1948 * based on ``hw.physmem'' and the results of the memory test.
1949 */
1950 Maxmem = atop(physmap[physmap_idx + 1]);
1951
1952 #ifdef MAXMEM
1953 Maxmem = MAXMEM / 4;
1954 #endif
1955
1956 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1957 Maxmem = atop(physmem_tunable);
1958
1959 /*
1960 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
1961 * the amount of memory in the system.
1962 */
1963 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
1964 Maxmem = atop(physmap[physmap_idx + 1]);
1965
1966 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1967 (boothowto & RB_VERBOSE))
1968 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1969
1970 /*
1971 * If Maxmem has been increased beyond what the system has detected,
1972 * extend the last memory segment to the new limit.
1973 */
1974 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1975 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1976
1977 /* call pmap initialization to make new kernel address space */
1978 pmap_bootstrap(first);
1979
1980 /*
1981 * Size up each available chunk of physical memory.
1982 */
1983 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1984 pa_indx = 0;
1985 da_indx = 1;
1986 phys_avail[pa_indx++] = physmap[0];
1987 phys_avail[pa_indx] = physmap[0];
1988 dump_avail[da_indx] = physmap[0];
1989 pte = CMAP1;
1990
1991 /*
1992 * Get dcons buffer address
1993 */
1994 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1995 getenv_quad("dcons.size", &dcons_size) == 0)
1996 dcons_addr = 0;
1997
1998 /*
1999 * physmap is in bytes, so when converting to page boundaries,
2000 * round up the start address and round down the end address.
2001 */
2002 for (i = 0; i <= physmap_idx; i += 2) {
2003 vm_paddr_t end;
2004
2005 end = ptoa((vm_paddr_t)Maxmem);
2006 if (physmap[i + 1] < end)
2007 end = trunc_page(physmap[i + 1]);
2008 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2009 int tmp, page_bad, full;
2010 int *ptr = (int *)CADDR1;
2011
2012 full = FALSE;
2013 /*
2014 * block out kernel memory as not available.
2015 */
2016 if (pa >= KERNLOAD && pa < first)
2017 goto do_dump_avail;
2018
2019 /*
2020 * block out dcons buffer
2021 */
2022 if (dcons_addr > 0
2023 && pa >= trunc_page(dcons_addr)
2024 && pa < dcons_addr + dcons_size)
2025 goto do_dump_avail;
2026
2027 page_bad = FALSE;
2028
2029 /*
2030 * map page into kernel: valid, read/write,non-cacheable
2031 */
2032 *pte = pa | PG_V | PG_RW | PG_N;
2033 invltlb();
2034
2035 tmp = *(int *)ptr;
2036 /*
2037 * Test for alternating 1's and 0's
2038 */
2039 *(volatile int *)ptr = 0xaaaaaaaa;
2040 if (*(volatile int *)ptr != 0xaaaaaaaa)
2041 page_bad = TRUE;
2042 /*
2043 * Test for alternating 0's and 1's
2044 */
2045 *(volatile int *)ptr = 0x55555555;
2046 if (*(volatile int *)ptr != 0x55555555)
2047 page_bad = TRUE;
2048 /*
2049 * Test for all 1's
2050 */
2051 *(volatile int *)ptr = 0xffffffff;
2052 if (*(volatile int *)ptr != 0xffffffff)
2053 page_bad = TRUE;
2054 /*
2055 * Test for all 0's
2056 */
2057 *(volatile int *)ptr = 0x0;
2058 if (*(volatile int *)ptr != 0x0)
2059 page_bad = TRUE;
2060 /*
2061 * Restore original value.
2062 */
2063 *(int *)ptr = tmp;
2064
2065 /*
2066 * Adjust array of valid/good pages.
2067 */
2068 if (page_bad == TRUE)
2069 continue;
2070 /*
2071 * If this good page is a continuation of the
2072 * previous set of good pages, then just increase
2073 * the end pointer. Otherwise start a new chunk.
2074 * Note that "end" points one higher than end,
2075 * making the range >= start and < end.
2076 * If we're also doing a speculative memory
2077 * test and we at or past the end, bump up Maxmem
2078 * so that we keep going. The first bad page
2079 * will terminate the loop.
2080 */
2081 if (phys_avail[pa_indx] == pa) {
2082 phys_avail[pa_indx] += PAGE_SIZE;
2083 } else {
2084 pa_indx++;
2085 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2086 printf(
2087 "Too many holes in the physical address space, giving up\n");
2088 pa_indx--;
2089 full = TRUE;
2090 goto do_dump_avail;
2091 }
2092 phys_avail[pa_indx++] = pa; /* start */
2093 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2094 }
2095 physmem++;
2096 do_dump_avail:
2097 if (dump_avail[da_indx] == pa) {
2098 dump_avail[da_indx] += PAGE_SIZE;
2099 } else {
2100 da_indx++;
2101 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2102 da_indx--;
2103 goto do_next;
2104 }
2105 dump_avail[da_indx++] = pa; /* start */
2106 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2107 }
2108 do_next:
2109 if (full)
2110 break;
2111 }
2112 }
2113 *pte = 0;
2114 invltlb();
2115
2116 /*
2117 * XXX
2118 * The last chunk must contain at least one page plus the message
2119 * buffer to avoid complicating other code (message buffer address
2120 * calculation, etc.).
2121 */
2122 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2123 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
2124 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2125 phys_avail[pa_indx--] = 0;
2126 phys_avail[pa_indx--] = 0;
2127 }
2128
2129 Maxmem = atop(phys_avail[pa_indx]);
2130
2131 /* Trim off space for the message buffer. */
2132 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
2133
2134 /* Map the message buffer. */
2135 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2136 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2137 off);
2138 }
2139
2140 void
2141 init386(first)
2142 int first;
2143 {
2144 struct gate_descriptor *gdp;
2145 int gsel_tss, metadata_missing, x;
2146 struct pcpu *pc;
2147
2148 thread0.td_kstack = proc0kstack;
2149 thread0.td_pcb = (struct pcb *)
2150 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
2151
2152 /*
2153 * This may be done better later if it gets more high level
2154 * components in it. If so just link td->td_proc here.
2155 */
2156 proc_linkup0(&proc0, &thread0);
2157
2158 metadata_missing = 0;
2159 if (bootinfo.bi_modulep) {
2160 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2161 preload_bootstrap_relocate(KERNBASE);
2162 } else {
2163 metadata_missing = 1;
2164 }
2165 if (envmode == 1)
2166 kern_envp = static_env;
2167 else if (bootinfo.bi_envp)
2168 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2169
2170 /* Init basic tunables, hz etc */
2171 init_param1();
2172
2173 /*
2174 * Make gdt memory segments. All segments cover the full 4GB
2175 * of address space and permissions are enforced at page level.
2176 */
2177 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2178 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2179 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2180 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2181 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2182 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2183
2184 pc = &__pcpu[0];
2185 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2186 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2187 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2188
2189 for (x = 0; x < NGDT; x++)
2190 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2191
2192 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2193 r_gdt.rd_base = (int) gdt;
2194 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2195 lgdt(&r_gdt);
2196
2197 pcpu_init(pc, 0, sizeof(struct pcpu));
2198 PCPU_SET(prvspace, pc);
2199 PCPU_SET(curthread, &thread0);
2200 PCPU_SET(curpcb, thread0.td_pcb);
2201
2202 /*
2203 * Initialize mutexes.
2204 *
2205 * icu_lock: in order to allow an interrupt to occur in a critical
2206 * section, to set pcpu->ipending (etc...) properly, we
2207 * must be able to get the icu lock, so it can't be
2208 * under witness.
2209 */
2210 mutex_init();
2211 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2212
2213 /* make ldt memory segments */
2214 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2215 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2216 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2217 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2218
2219 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2220 lldt(_default_ldt);
2221 PCPU_SET(currentldt, _default_ldt);
2222
2223 /* exceptions */
2224 for (x = 0; x < NIDT; x++)
2225 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2226 GSEL(GCODE_SEL, SEL_KPL));
2227 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2228 GSEL(GCODE_SEL, SEL_KPL));
2229 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2230 GSEL(GCODE_SEL, SEL_KPL));
2231 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2232 GSEL(GCODE_SEL, SEL_KPL));
2233 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2234 GSEL(GCODE_SEL, SEL_KPL));
2235 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2236 GSEL(GCODE_SEL, SEL_KPL));
2237 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2238 GSEL(GCODE_SEL, SEL_KPL));
2239 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2240 GSEL(GCODE_SEL, SEL_KPL));
2241 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2242 , GSEL(GCODE_SEL, SEL_KPL));
2243 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2244 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2245 GSEL(GCODE_SEL, SEL_KPL));
2246 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2247 GSEL(GCODE_SEL, SEL_KPL));
2248 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2249 GSEL(GCODE_SEL, SEL_KPL));
2250 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2251 GSEL(GCODE_SEL, SEL_KPL));
2252 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2253 GSEL(GCODE_SEL, SEL_KPL));
2254 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2255 GSEL(GCODE_SEL, SEL_KPL));
2256 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2257 GSEL(GCODE_SEL, SEL_KPL));
2258 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2259 GSEL(GCODE_SEL, SEL_KPL));
2260 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2261 GSEL(GCODE_SEL, SEL_KPL));
2262 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2263 GSEL(GCODE_SEL, SEL_KPL));
2264 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2265 GSEL(GCODE_SEL, SEL_KPL));
2266
2267 r_idt.rd_limit = sizeof(idt0) - 1;
2268 r_idt.rd_base = (int) idt;
2269 lidt(&r_idt);
2270
2271 #ifdef XBOX
2272 /*
2273 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2274 * This should be 0x10de / 0x02a5.
2275 *
2276 * This is exactly what Linux does.
2277 */
2278 outl(0xcf8, 0x80000000);
2279 if (inl(0xcfc) == 0x02a510de) {
2280 arch_i386_is_xbox = 1;
2281 pic16l_setled(XBOX_LED_GREEN);
2282
2283 /*
2284 * We are an XBOX, but we may have either 64MB or 128MB of
2285 * memory. The PCI host bridge should be programmed for this,
2286 * so we just query it.
2287 */
2288 outl(0xcf8, 0x80000084);
2289 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2290 }
2291 #endif /* XBOX */
2292
2293 /*
2294 * Initialize the i8254 before the console so that console
2295 * initialization can use DELAY().
2296 */
2297 i8254_init();
2298
2299 /*
2300 * Initialize the console before we print anything out.
2301 */
2302 cninit();
2303
2304 if (metadata_missing)
2305 printf("WARNING: loader(8) metadata is missing!\n");
2306
2307 #ifdef DEV_ISA
2308 elcr_probe();
2309 atpic_startup();
2310 #endif
2311
2312 #ifdef DDB
2313 ksym_start = bootinfo.bi_symtab;
2314 ksym_end = bootinfo.bi_esymtab;
2315 #endif
2316
2317 kdb_init();
2318
2319 #ifdef KDB
2320 if (boothowto & RB_KDB)
2321 kdb_enter_why(KDB_WHY_BOOTFLAGS,
2322 "Boot flags requested debugger");
2323 #endif
2324
2325 finishidentcpu(); /* Final stage of CPU initialization */
2326 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2327 GSEL(GCODE_SEL, SEL_KPL));
2328 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2329 GSEL(GCODE_SEL, SEL_KPL));
2330 initializecpu(); /* Initialize CPU registers */
2331
2332 /* make an initial tss so cpu can get interrupt stack on syscall! */
2333 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2334 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2335 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2336 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2337 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2338 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2339 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2340 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2341 ltr(gsel_tss);
2342
2343 /* pointer to selector slot for %fs/%gs */
2344 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2345
2346 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2347 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2348 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2349 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2350 #ifdef PAE
2351 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2352 #else
2353 dblfault_tss.tss_cr3 = (int)IdlePTD;
2354 #endif
2355 dblfault_tss.tss_eip = (int)dblfault_handler;
2356 dblfault_tss.tss_eflags = PSL_KERNEL;
2357 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2358 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2359 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2360 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2361 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2362
2363 vm86_initialize();
2364 getmemsize(first);
2365 init_param2(physmem);
2366
2367 /* now running on new page tables, configured,and u/iom is accessible */
2368
2369 msgbufinit(msgbufp, MSGBUF_SIZE);
2370
2371 /* make a call gate to reenter kernel with */
2372 gdp = &ldt[LSYS5CALLS_SEL].gd;
2373
2374 x = (int) &IDTVEC(lcall_syscall);
2375 gdp->gd_looffset = x;
2376 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2377 gdp->gd_stkcpy = 1;
2378 gdp->gd_type = SDT_SYS386CGT;
2379 gdp->gd_dpl = SEL_UPL;
2380 gdp->gd_p = 1;
2381 gdp->gd_hioffset = x >> 16;
2382
2383 /* XXX does this work? */
2384 /* XXX yes! */
2385 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2386 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2387
2388 /* transfer to user mode */
2389
2390 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2391 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2392
2393 /* setup proc 0's pcb */
2394 thread0.td_pcb->pcb_flags = 0;
2395 #ifdef PAE
2396 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2397 #else
2398 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2399 #endif
2400 thread0.td_pcb->pcb_ext = 0;
2401 thread0.td_frame = &proc0_tf;
2402 }
2403
2404 void
2405 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2406 {
2407
2408 pcpu->pc_acpi_id = 0xffffffff;
2409 }
2410
2411 void
2412 spinlock_enter(void)
2413 {
2414 struct thread *td;
2415
2416 td = curthread;
2417 if (td->td_md.md_spinlock_count == 0)
2418 td->td_md.md_saved_flags = intr_disable();
2419 td->td_md.md_spinlock_count++;
2420 critical_enter();
2421 }
2422
2423 void
2424 spinlock_exit(void)
2425 {
2426 struct thread *td;
2427
2428 td = curthread;
2429 critical_exit();
2430 td->td_md.md_spinlock_count--;
2431 if (td->td_md.md_spinlock_count == 0)
2432 intr_restore(td->td_md.md_saved_flags);
2433 }
2434
2435 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2436 static void f00f_hack(void *unused);
2437 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2438
2439 static void
2440 f00f_hack(void *unused)
2441 {
2442 struct gate_descriptor *new_idt;
2443 vm_offset_t tmp;
2444
2445 if (!has_f00f_bug)
2446 return;
2447
2448 GIANT_REQUIRED;
2449
2450 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2451
2452 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2453 if (tmp == 0)
2454 panic("kmem_alloc returned 0");
2455
2456 /* Put the problematic entry (#6) at the end of the lower page. */
2457 new_idt = (struct gate_descriptor*)
2458 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2459 bcopy(idt, new_idt, sizeof(idt0));
2460 r_idt.rd_base = (u_int)new_idt;
2461 lidt(&r_idt);
2462 idt = new_idt;
2463 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2464 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2465 panic("vm_map_protect failed");
2466 }
2467 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2468
2469 /*
2470 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2471 * we want to start a backtrace from the function that caused us to enter
2472 * the debugger. We have the context in the trapframe, but base the trace
2473 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2474 * enough for a backtrace.
2475 */
2476 void
2477 makectx(struct trapframe *tf, struct pcb *pcb)
2478 {
2479
2480 pcb->pcb_edi = tf->tf_edi;
2481 pcb->pcb_esi = tf->tf_esi;
2482 pcb->pcb_ebp = tf->tf_ebp;
2483 pcb->pcb_ebx = tf->tf_ebx;
2484 pcb->pcb_eip = tf->tf_eip;
2485 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2486 }
2487
2488 int
2489 ptrace_set_pc(struct thread *td, u_long addr)
2490 {
2491
2492 td->td_frame->tf_eip = addr;
2493 return (0);
2494 }
2495
2496 int
2497 ptrace_single_step(struct thread *td)
2498 {
2499 td->td_frame->tf_eflags |= PSL_T;
2500 return (0);
2501 }
2502
2503 int
2504 ptrace_clear_single_step(struct thread *td)
2505 {
2506 td->td_frame->tf_eflags &= ~PSL_T;
2507 return (0);
2508 }
2509
2510 int
2511 fill_regs(struct thread *td, struct reg *regs)
2512 {
2513 struct pcb *pcb;
2514 struct trapframe *tp;
2515
2516 tp = td->td_frame;
2517 pcb = td->td_pcb;
2518 regs->r_fs = tp->tf_fs;
2519 regs->r_es = tp->tf_es;
2520 regs->r_ds = tp->tf_ds;
2521 regs->r_edi = tp->tf_edi;
2522 regs->r_esi = tp->tf_esi;
2523 regs->r_ebp = tp->tf_ebp;
2524 regs->r_ebx = tp->tf_ebx;
2525 regs->r_edx = tp->tf_edx;
2526 regs->r_ecx = tp->tf_ecx;
2527 regs->r_eax = tp->tf_eax;
2528 regs->r_eip = tp->tf_eip;
2529 regs->r_cs = tp->tf_cs;
2530 regs->r_eflags = tp->tf_eflags;
2531 regs->r_esp = tp->tf_esp;
2532 regs->r_ss = tp->tf_ss;
2533 regs->r_gs = pcb->pcb_gs;
2534 return (0);
2535 }
2536
2537 int
2538 set_regs(struct thread *td, struct reg *regs)
2539 {
2540 struct pcb *pcb;
2541 struct trapframe *tp;
2542
2543 tp = td->td_frame;
2544 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2545 !CS_SECURE(regs->r_cs))
2546 return (EINVAL);
2547 pcb = td->td_pcb;
2548 tp->tf_fs = regs->r_fs;
2549 tp->tf_es = regs->r_es;
2550 tp->tf_ds = regs->r_ds;
2551 tp->tf_edi = regs->r_edi;
2552 tp->tf_esi = regs->r_esi;
2553 tp->tf_ebp = regs->r_ebp;
2554 tp->tf_ebx = regs->r_ebx;
2555 tp->tf_edx = regs->r_edx;
2556 tp->tf_ecx = regs->r_ecx;
2557 tp->tf_eax = regs->r_eax;
2558 tp->tf_eip = regs->r_eip;
2559 tp->tf_cs = regs->r_cs;
2560 tp->tf_eflags = regs->r_eflags;
2561 tp->tf_esp = regs->r_esp;
2562 tp->tf_ss = regs->r_ss;
2563 pcb->pcb_gs = regs->r_gs;
2564 return (0);
2565 }
2566
2567 #ifdef CPU_ENABLE_SSE
2568 static void
2569 fill_fpregs_xmm(sv_xmm, sv_87)
2570 struct savexmm *sv_xmm;
2571 struct save87 *sv_87;
2572 {
2573 register struct env87 *penv_87 = &sv_87->sv_env;
2574 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2575 int i;
2576
2577 bzero(sv_87, sizeof(*sv_87));
2578
2579 /* FPU control/status */
2580 penv_87->en_cw = penv_xmm->en_cw;
2581 penv_87->en_sw = penv_xmm->en_sw;
2582 penv_87->en_tw = penv_xmm->en_tw;
2583 penv_87->en_fip = penv_xmm->en_fip;
2584 penv_87->en_fcs = penv_xmm->en_fcs;
2585 penv_87->en_opcode = penv_xmm->en_opcode;
2586 penv_87->en_foo = penv_xmm->en_foo;
2587 penv_87->en_fos = penv_xmm->en_fos;
2588
2589 /* FPU registers */
2590 for (i = 0; i < 8; ++i)
2591 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2592 }
2593
2594 static void
2595 set_fpregs_xmm(sv_87, sv_xmm)
2596 struct save87 *sv_87;
2597 struct savexmm *sv_xmm;
2598 {
2599 register struct env87 *penv_87 = &sv_87->sv_env;
2600 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2601 int i;
2602
2603 /* FPU control/status */
2604 penv_xmm->en_cw = penv_87->en_cw;
2605 penv_xmm->en_sw = penv_87->en_sw;
2606 penv_xmm->en_tw = penv_87->en_tw;
2607 penv_xmm->en_fip = penv_87->en_fip;
2608 penv_xmm->en_fcs = penv_87->en_fcs;
2609 penv_xmm->en_opcode = penv_87->en_opcode;
2610 penv_xmm->en_foo = penv_87->en_foo;
2611 penv_xmm->en_fos = penv_87->en_fos;
2612
2613 /* FPU registers */
2614 for (i = 0; i < 8; ++i)
2615 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2616 }
2617 #endif /* CPU_ENABLE_SSE */
2618
2619 int
2620 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2621 {
2622 #ifdef CPU_ENABLE_SSE
2623 if (cpu_fxsr) {
2624 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2625 (struct save87 *)fpregs);
2626 return (0);
2627 }
2628 #endif /* CPU_ENABLE_SSE */
2629 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2630 return (0);
2631 }
2632
2633 int
2634 set_fpregs(struct thread *td, struct fpreg *fpregs)
2635 {
2636 #ifdef CPU_ENABLE_SSE
2637 if (cpu_fxsr) {
2638 set_fpregs_xmm((struct save87 *)fpregs,
2639 &td->td_pcb->pcb_save.sv_xmm);
2640 return (0);
2641 }
2642 #endif /* CPU_ENABLE_SSE */
2643 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2644 return (0);
2645 }
2646
2647 /*
2648 * Get machine context.
2649 */
2650 int
2651 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2652 {
2653 struct trapframe *tp;
2654
2655 tp = td->td_frame;
2656
2657 PROC_LOCK(curthread->td_proc);
2658 mcp->mc_onstack = sigonstack(tp->tf_esp);
2659 PROC_UNLOCK(curthread->td_proc);
2660 mcp->mc_gs = td->td_pcb->pcb_gs;
2661 mcp->mc_fs = tp->tf_fs;
2662 mcp->mc_es = tp->tf_es;
2663 mcp->mc_ds = tp->tf_ds;
2664 mcp->mc_edi = tp->tf_edi;
2665 mcp->mc_esi = tp->tf_esi;
2666 mcp->mc_ebp = tp->tf_ebp;
2667 mcp->mc_isp = tp->tf_isp;
2668 mcp->mc_eflags = tp->tf_eflags;
2669 if (flags & GET_MC_CLEAR_RET) {
2670 mcp->mc_eax = 0;
2671 mcp->mc_edx = 0;
2672 mcp->mc_eflags &= ~PSL_C;
2673 } else {
2674 mcp->mc_eax = tp->tf_eax;
2675 mcp->mc_edx = tp->tf_edx;
2676 }
2677 mcp->mc_ebx = tp->tf_ebx;
2678 mcp->mc_ecx = tp->tf_ecx;
2679 mcp->mc_eip = tp->tf_eip;
2680 mcp->mc_cs = tp->tf_cs;
2681 mcp->mc_esp = tp->tf_esp;
2682 mcp->mc_ss = tp->tf_ss;
2683 mcp->mc_len = sizeof(*mcp);
2684 get_fpcontext(td, mcp);
2685 return (0);
2686 }
2687
2688 /*
2689 * Set machine context.
2690 *
2691 * However, we don't set any but the user modifiable flags, and we won't
2692 * touch the cs selector.
2693 */
2694 int
2695 set_mcontext(struct thread *td, const mcontext_t *mcp)
2696 {
2697 struct trapframe *tp;
2698 int eflags, ret;
2699
2700 tp = td->td_frame;
2701 if (mcp->mc_len != sizeof(*mcp))
2702 return (EINVAL);
2703 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2704 (tp->tf_eflags & ~PSL_USERCHANGE);
2705 if ((ret = set_fpcontext(td, mcp)) == 0) {
2706 tp->tf_fs = mcp->mc_fs;
2707 tp->tf_es = mcp->mc_es;
2708 tp->tf_ds = mcp->mc_ds;
2709 tp->tf_edi = mcp->mc_edi;
2710 tp->tf_esi = mcp->mc_esi;
2711 tp->tf_ebp = mcp->mc_ebp;
2712 tp->tf_ebx = mcp->mc_ebx;
2713 tp->tf_edx = mcp->mc_edx;
2714 tp->tf_ecx = mcp->mc_ecx;
2715 tp->tf_eax = mcp->mc_eax;
2716 tp->tf_eip = mcp->mc_eip;
2717 tp->tf_eflags = eflags;
2718 tp->tf_esp = mcp->mc_esp;
2719 tp->tf_ss = mcp->mc_ss;
2720 td->td_pcb->pcb_gs = mcp->mc_gs;
2721 ret = 0;
2722 }
2723 return (ret);
2724 }
2725
2726 static void
2727 get_fpcontext(struct thread *td, mcontext_t *mcp)
2728 {
2729 #ifndef DEV_NPX
2730 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2731 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2732 #else
2733 union savefpu *addr;
2734
2735 /*
2736 * XXX mc_fpstate might be misaligned, since its declaration is not
2737 * unportabilized using __attribute__((aligned(16))) like the
2738 * declaration of struct savemm, and anyway, alignment doesn't work
2739 * for auto variables since we don't use gcc's pessimal stack
2740 * alignment. Work around this by abusing the spare fields after
2741 * mcp->mc_fpstate.
2742 *
2743 * XXX unpessimize most cases by only aligning when fxsave might be
2744 * called, although this requires knowing too much about
2745 * npxgetregs()'s internals.
2746 */
2747 addr = (union savefpu *)&mcp->mc_fpstate;
2748 if (td == PCPU_GET(fpcurthread) &&
2749 #ifdef CPU_ENABLE_SSE
2750 cpu_fxsr &&
2751 #endif
2752 ((uintptr_t)(void *)addr & 0xF)) {
2753 do
2754 addr = (void *)((char *)addr + 4);
2755 while ((uintptr_t)(void *)addr & 0xF);
2756 }
2757 mcp->mc_ownedfp = npxgetregs(td, addr);
2758 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2759 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2760 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2761 }
2762 mcp->mc_fpformat = npxformat();
2763 #endif
2764 }
2765
2766 static int
2767 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2768 {
2769 union savefpu *addr;
2770
2771 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2772 return (0);
2773 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2774 mcp->mc_fpformat != _MC_FPFMT_XMM)
2775 return (EINVAL);
2776 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2777 /* We don't care what state is left in the FPU or PCB. */
2778 fpstate_drop(td);
2779 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2780 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2781 /* XXX align as above. */
2782 addr = (union savefpu *)&mcp->mc_fpstate;
2783 if (td == PCPU_GET(fpcurthread) &&
2784 #ifdef CPU_ENABLE_SSE
2785 cpu_fxsr &&
2786 #endif
2787 ((uintptr_t)(void *)addr & 0xF)) {
2788 do
2789 addr = (void *)((char *)addr + 4);
2790 while ((uintptr_t)(void *)addr & 0xF);
2791 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2792 }
2793 #ifdef DEV_NPX
2794 #ifdef CPU_ENABLE_SSE
2795 if (cpu_fxsr)
2796 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
2797 #endif
2798 /*
2799 * XXX we violate the dubious requirement that npxsetregs()
2800 * be called with interrupts disabled.
2801 */
2802 npxsetregs(td, addr);
2803 #endif
2804 /*
2805 * Don't bother putting things back where they were in the
2806 * misaligned case, since we know that the caller won't use
2807 * them again.
2808 */
2809 } else
2810 return (EINVAL);
2811 return (0);
2812 }
2813
2814 static void
2815 fpstate_drop(struct thread *td)
2816 {
2817 register_t s;
2818
2819 s = intr_disable();
2820 #ifdef DEV_NPX
2821 if (PCPU_GET(fpcurthread) == td)
2822 npxdrop();
2823 #endif
2824 /*
2825 * XXX force a full drop of the npx. The above only drops it if we
2826 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2827 *
2828 * XXX I don't much like npxgetregs()'s semantics of doing a full
2829 * drop. Dropping only to the pcb matches fnsave's behaviour.
2830 * We only need to drop to !PCB_INITDONE in sendsig(). But
2831 * sendsig() is the only caller of npxgetregs()... perhaps we just
2832 * have too many layers.
2833 */
2834 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2835 intr_restore(s);
2836 }
2837
2838 int
2839 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2840 {
2841 struct pcb *pcb;
2842
2843 if (td == NULL) {
2844 dbregs->dr[0] = rdr0();
2845 dbregs->dr[1] = rdr1();
2846 dbregs->dr[2] = rdr2();
2847 dbregs->dr[3] = rdr3();
2848 dbregs->dr[4] = rdr4();
2849 dbregs->dr[5] = rdr5();
2850 dbregs->dr[6] = rdr6();
2851 dbregs->dr[7] = rdr7();
2852 } else {
2853 pcb = td->td_pcb;
2854 dbregs->dr[0] = pcb->pcb_dr0;
2855 dbregs->dr[1] = pcb->pcb_dr1;
2856 dbregs->dr[2] = pcb->pcb_dr2;
2857 dbregs->dr[3] = pcb->pcb_dr3;
2858 dbregs->dr[4] = 0;
2859 dbregs->dr[5] = 0;
2860 dbregs->dr[6] = pcb->pcb_dr6;
2861 dbregs->dr[7] = pcb->pcb_dr7;
2862 }
2863 return (0);
2864 }
2865
2866 int
2867 set_dbregs(struct thread *td, struct dbreg *dbregs)
2868 {
2869 struct pcb *pcb;
2870 int i;
2871
2872 if (td == NULL) {
2873 load_dr0(dbregs->dr[0]);
2874 load_dr1(dbregs->dr[1]);
2875 load_dr2(dbregs->dr[2]);
2876 load_dr3(dbregs->dr[3]);
2877 load_dr4(dbregs->dr[4]);
2878 load_dr5(dbregs->dr[5]);
2879 load_dr6(dbregs->dr[6]);
2880 load_dr7(dbregs->dr[7]);
2881 } else {
2882 /*
2883 * Don't let an illegal value for dr7 get set. Specifically,
2884 * check for undefined settings. Setting these bit patterns
2885 * result in undefined behaviour and can lead to an unexpected
2886 * TRCTRAP.
2887 */
2888 for (i = 0; i < 4; i++) {
2889 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2890 return (EINVAL);
2891 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
2892 return (EINVAL);
2893 }
2894
2895 pcb = td->td_pcb;
2896
2897 /*
2898 * Don't let a process set a breakpoint that is not within the
2899 * process's address space. If a process could do this, it
2900 * could halt the system by setting a breakpoint in the kernel
2901 * (if ddb was enabled). Thus, we need to check to make sure
2902 * that no breakpoints are being enabled for addresses outside
2903 * process's address space.
2904 *
2905 * XXX - what about when the watched area of the user's
2906 * address space is written into from within the kernel
2907 * ... wouldn't that still cause a breakpoint to be generated
2908 * from within kernel mode?
2909 */
2910
2911 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2912 /* dr0 is enabled */
2913 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2914 return (EINVAL);
2915 }
2916
2917 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2918 /* dr1 is enabled */
2919 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2920 return (EINVAL);
2921 }
2922
2923 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2924 /* dr2 is enabled */
2925 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2926 return (EINVAL);
2927 }
2928
2929 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2930 /* dr3 is enabled */
2931 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2932 return (EINVAL);
2933 }
2934
2935 pcb->pcb_dr0 = dbregs->dr[0];
2936 pcb->pcb_dr1 = dbregs->dr[1];
2937 pcb->pcb_dr2 = dbregs->dr[2];
2938 pcb->pcb_dr3 = dbregs->dr[3];
2939 pcb->pcb_dr6 = dbregs->dr[6];
2940 pcb->pcb_dr7 = dbregs->dr[7];
2941
2942 pcb->pcb_flags |= PCB_DBREGS;
2943 }
2944
2945 return (0);
2946 }
2947
2948 /*
2949 * Return > 0 if a hardware breakpoint has been hit, and the
2950 * breakpoint was in user space. Return 0, otherwise.
2951 */
2952 int
2953 user_dbreg_trap(void)
2954 {
2955 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2956 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2957 int nbp; /* number of breakpoints that triggered */
2958 caddr_t addr[4]; /* breakpoint addresses */
2959 int i;
2960
2961 dr7 = rdr7();
2962 if ((dr7 & 0x000000ff) == 0) {
2963 /*
2964 * all GE and LE bits in the dr7 register are zero,
2965 * thus the trap couldn't have been caused by the
2966 * hardware debug registers
2967 */
2968 return 0;
2969 }
2970
2971 nbp = 0;
2972 dr6 = rdr6();
2973 bp = dr6 & 0x0000000f;
2974
2975 if (!bp) {
2976 /*
2977 * None of the breakpoint bits are set meaning this
2978 * trap was not caused by any of the debug registers
2979 */
2980 return 0;
2981 }
2982
2983 /*
2984 * at least one of the breakpoints were hit, check to see
2985 * which ones and if any of them are user space addresses
2986 */
2987
2988 if (bp & 0x01) {
2989 addr[nbp++] = (caddr_t)rdr0();
2990 }
2991 if (bp & 0x02) {
2992 addr[nbp++] = (caddr_t)rdr1();
2993 }
2994 if (bp & 0x04) {
2995 addr[nbp++] = (caddr_t)rdr2();
2996 }
2997 if (bp & 0x08) {
2998 addr[nbp++] = (caddr_t)rdr3();
2999 }
3000
3001 for (i = 0; i < nbp; i++) {
3002 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
3003 /*
3004 * addr[i] is in user space
3005 */
3006 return nbp;
3007 }
3008 }
3009
3010 /*
3011 * None of the breakpoints are in user space.
3012 */
3013 return 0;
3014 }
3015
3016 #ifndef DEV_APIC
3017 #include <machine/apicvar.h>
3018
3019 /*
3020 * Provide stub functions so that the MADT APIC enumerator in the acpi
3021 * kernel module will link against a kernel without 'device apic'.
3022 *
3023 * XXX - This is a gross hack.
3024 */
3025 void
3026 apic_register_enumerator(struct apic_enumerator *enumerator)
3027 {
3028 }
3029
3030 void *
3031 ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase)
3032 {
3033 return (NULL);
3034 }
3035
3036 int
3037 ioapic_disable_pin(void *cookie, u_int pin)
3038 {
3039 return (ENXIO);
3040 }
3041
3042 int
3043 ioapic_get_vector(void *cookie, u_int pin)
3044 {
3045 return (-1);
3046 }
3047
3048 void
3049 ioapic_register(void *cookie)
3050 {
3051 }
3052
3053 int
3054 ioapic_remap_vector(void *cookie, u_int pin, int vector)
3055 {
3056 return (ENXIO);
3057 }
3058
3059 int
3060 ioapic_set_extint(void *cookie, u_int pin)
3061 {
3062 return (ENXIO);
3063 }
3064
3065 int
3066 ioapic_set_nmi(void *cookie, u_int pin)
3067 {
3068 return (ENXIO);
3069 }
3070
3071 int
3072 ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol)
3073 {
3074 return (ENXIO);
3075 }
3076
3077 int
3078 ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger)
3079 {
3080 return (ENXIO);
3081 }
3082
3083 void
3084 lapic_create(u_int apic_id, int boot_cpu)
3085 {
3086 }
3087
3088 void
3089 lapic_init(vm_paddr_t addr)
3090 {
3091 }
3092
3093 int
3094 lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode)
3095 {
3096 return (ENXIO);
3097 }
3098
3099 int
3100 lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol)
3101 {
3102 return (ENXIO);
3103 }
3104
3105 int
3106 lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger)
3107 {
3108 return (ENXIO);
3109 }
3110 #endif
3111
3112 #ifdef KDB
3113
3114 /*
3115 * Provide inb() and outb() as functions. They are normally only
3116 * available as macros calling inlined functions, thus cannot be
3117 * called from the debugger.
3118 *
3119 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
3120 */
3121
3122 #undef inb
3123 #undef outb
3124
3125 /* silence compiler warnings */
3126 u_char inb(u_int);
3127 void outb(u_int, u_char);
3128
3129 u_char
3130 inb(u_int port)
3131 {
3132 u_char data;
3133 /*
3134 * We use %%dx and not %1 here because i/o is done at %dx and not at
3135 * %edx, while gcc generates inferior code (movw instead of movl)
3136 * if we tell it to load (u_short) port.
3137 */
3138 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
3139 return (data);
3140 }
3141
3142 void
3143 outb(u_int port, u_char data)
3144 {
3145 u_char al;
3146 /*
3147 * Use an unnecessary assignment to help gcc's register allocator.
3148 * This make a large difference for gcc-1.40 and a tiny difference
3149 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
3150 * best results. gcc-2.6.0 can't handle this.
3151 */
3152 al = data;
3153 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
3154 }
3155
3156 #endif /* KDB */
Cache object: b984d1e4b4ab8448ccebede2888a1d36
|