1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_atpic.h"
46 #include "opt_compat.h"
47 #include "opt_cpu.h"
48 #include "opt_ddb.h"
49 #include "opt_inet.h"
50 #include "opt_ipx.h"
51 #include "opt_isa.h"
52 #include "opt_kstack_pages.h"
53 #include "opt_maxmem.h"
54 #include "opt_mp_watchdog.h"
55 #include "opt_npx.h"
56 #include "opt_perfmon.h"
57 #include "opt_xbox.h"
58 #include "opt_kdtrace.h"
59
60 #include <sys/param.h>
61 #include <sys/proc.h>
62 #include <sys/systm.h>
63 #include <sys/bio.h>
64 #include <sys/buf.h>
65 #include <sys/bus.h>
66 #include <sys/callout.h>
67 #include <sys/cons.h>
68 #include <sys/cpu.h>
69 #include <sys/eventhandler.h>
70 #include <sys/exec.h>
71 #include <sys/imgact.h>
72 #include <sys/kdb.h>
73 #include <sys/kernel.h>
74 #include <sys/ktr.h>
75 #include <sys/linker.h>
76 #include <sys/lock.h>
77 #include <sys/malloc.h>
78 #include <sys/memrange.h>
79 #include <sys/msgbuf.h>
80 #include <sys/mutex.h>
81 #include <sys/pcpu.h>
82 #include <sys/ptrace.h>
83 #include <sys/reboot.h>
84 #include <sys/sched.h>
85 #include <sys/signalvar.h>
86 #ifdef SMP
87 #include <sys/smp.h>
88 #endif
89 #include <sys/syscallsubr.h>
90 #include <sys/sysctl.h>
91 #include <sys/sysent.h>
92 #include <sys/sysproto.h>
93 #include <sys/ucontext.h>
94 #include <sys/vmmeter.h>
95
96 #include <vm/vm.h>
97 #include <vm/vm_extern.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_pager.h>
103 #include <vm/vm_param.h>
104
105 #ifdef DDB
106 #ifndef KDB
107 #error KDB must be enabled in order for DDB to work!
108 #endif
109 #include <ddb/ddb.h>
110 #include <ddb/db_sym.h>
111 #endif
112
113 #include <isa/rtc.h>
114
115 #include <net/netisr.h>
116
117 #include <machine/bootinfo.h>
118 #include <machine/clock.h>
119 #include <machine/cpu.h>
120 #include <machine/cputypes.h>
121 #include <machine/intr_machdep.h>
122 #include <x86/mca.h>
123 #include <machine/md_var.h>
124 #include <machine/metadata.h>
125 #include <machine/mp_watchdog.h>
126 #include <machine/pc/bios.h>
127 #include <machine/pcb.h>
128 #include <machine/pcb_ext.h>
129 #include <machine/proc.h>
130 #include <machine/reg.h>
131 #include <machine/sigframe.h>
132 #include <machine/specialreg.h>
133 #include <machine/vm86.h>
134 #ifdef PERFMON
135 #include <machine/perfmon.h>
136 #endif
137 #ifdef SMP
138 #include <machine/smp.h>
139 #endif
140
141 #ifdef DEV_APIC
142 #include <machine/apicvar.h>
143 #endif
144
145 #ifdef DEV_ISA
146 #include <x86/isa/icu.h>
147 #endif
148
149 #ifdef XBOX
150 #include <machine/xbox.h>
151
152 int arch_i386_is_xbox = 0;
153 uint32_t arch_i386_xbox_memsize = 0;
154 #endif
155
156 #ifdef XEN
157 /* XEN includes */
158 #include <machine/xen/xen-os.h>
159 #include <xen/hypervisor.h>
160 #include <machine/xen/xen-os.h>
161 #include <machine/xen/xenvar.h>
162 #include <machine/xen/xenfunc.h>
163 #include <xen/xen_intr.h>
164
165 void Xhypervisor_callback(void);
166 void failsafe_callback(void);
167
168 extern trap_info_t trap_table[];
169 struct proc_ldt default_proc_ldt;
170 extern int init_first;
171 int running_xen = 1;
172 extern unsigned long physfree;
173 #endif /* XEN */
174
175 /* Sanity check for __curthread() */
176 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
177
178 extern void init386(int first);
179 extern void dblfault_handler(void);
180
181 extern void printcpuinfo(void); /* XXX header file */
182 extern void finishidentcpu(void);
183 extern void panicifcpuunsupported(void);
184
185 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
186 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
187
188 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
189 #define CPU_ENABLE_SSE
190 #endif
191
192 static void cpu_startup(void *);
193 static void fpstate_drop(struct thread *td);
194 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
195 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
196 #ifdef CPU_ENABLE_SSE
197 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
198 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
199 #endif /* CPU_ENABLE_SSE */
200 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
201
202 #ifdef DDB
203 extern vm_offset_t ksym_start, ksym_end;
204 #endif
205
206 /* Intel ICH registers */
207 #define ICH_PMBASE 0x400
208 #define ICH_SMI_EN ICH_PMBASE + 0x30
209
210 int _udatasel, _ucodesel;
211 u_int basemem;
212
213 int cold = 1;
214
215 #ifdef COMPAT_43
216 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
217 #endif
218 #ifdef COMPAT_FREEBSD4
219 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
220 #endif
221
222 long Maxmem = 0;
223 long realmem = 0;
224
225 #ifdef PAE
226 FEATURE(pae, "Physical Address Extensions");
227 #endif
228
229 /*
230 * The number of PHYSMAP entries must be one less than the number of
231 * PHYSSEG entries because the PHYSMAP entry that spans the largest
232 * physical address that is accessible by ISA DMA is split into two
233 * PHYSSEG entries.
234 */
235 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
236
237 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
238 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
239
240 /* must be 2 less so 0 0 can signal end of chunks */
241 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
242 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
243
244 struct kva_md_info kmi;
245
246 static struct trapframe proc0_tf;
247 struct pcpu __pcpu[MAXCPU];
248
249 struct mtx icu_lock;
250
251 struct mem_range_softc mem_range_softc;
252
253 static void
254 cpu_startup(dummy)
255 void *dummy;
256 {
257 uintmax_t memsize;
258 char *sysenv;
259
260 /*
261 * On MacBooks, we need to disallow the legacy USB circuit to
262 * generate an SMI# because this can cause several problems,
263 * namely: incorrect CPU frequency detection and failure to
264 * start the APs.
265 * We do this by disabling a bit in the SMI_EN (SMI Control and
266 * Enable register) of the Intel ICH LPC Interface Bridge.
267 */
268 sysenv = getenv("smbios.system.product");
269 if (sysenv != NULL) {
270 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
271 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
272 strncmp(sysenv, "MacBook4,1", 10) == 0 ||
273 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
274 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
275 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
276 strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
277 strncmp(sysenv, "Macmini1,1", 10) == 0) {
278 if (bootverbose)
279 printf("Disabling LEGACY_USB_EN bit on "
280 "Intel ICH.\n");
281 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
282 }
283 freeenv(sysenv);
284 }
285
286 /*
287 * Good {morning,afternoon,evening,night}.
288 */
289 startrtclock();
290 printcpuinfo();
291 panicifcpuunsupported();
292 #ifdef PERFMON
293 perfmon_init();
294 #endif
295 realmem = Maxmem;
296
297 /*
298 * Display physical memory if SMBIOS reports reasonable amount.
299 */
300 memsize = 0;
301 sysenv = getenv("smbios.memory.enabled");
302 if (sysenv != NULL) {
303 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
304 freeenv(sysenv);
305 }
306 if (memsize < ptoa((uintmax_t)cnt.v_free_count))
307 memsize = ptoa((uintmax_t)Maxmem);
308 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
309
310 /*
311 * Display any holes after the first chunk of extended memory.
312 */
313 if (bootverbose) {
314 int indx;
315
316 printf("Physical memory chunk(s):\n");
317 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
318 vm_paddr_t size;
319
320 size = phys_avail[indx + 1] - phys_avail[indx];
321 printf(
322 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
323 (uintmax_t)phys_avail[indx],
324 (uintmax_t)phys_avail[indx + 1] - 1,
325 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
326 }
327 }
328
329 vm_ksubmap_init(&kmi);
330
331 printf("avail memory = %ju (%ju MB)\n",
332 ptoa((uintmax_t)cnt.v_free_count),
333 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
334
335 /*
336 * Set up buffers, so they can be used to read disk labels.
337 */
338 bufinit();
339 vm_pager_bufferinit();
340 #ifndef XEN
341 cpu_setregs();
342 #endif
343
344 /*
345 * Add BSP as an interrupt target.
346 */
347 intr_add_cpu(0);
348 }
349
350 /*
351 * Send an interrupt to process.
352 *
353 * Stack is set up to allow sigcode stored
354 * at top to call routine, followed by kcall
355 * to sigreturn routine below. After sigreturn
356 * resets the signal mask, the stack, and the
357 * frame pointer, it returns to the user
358 * specified pc, psl.
359 */
360 #ifdef COMPAT_43
361 static void
362 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
363 {
364 struct osigframe sf, *fp;
365 struct proc *p;
366 struct thread *td;
367 struct sigacts *psp;
368 struct trapframe *regs;
369 int sig;
370 int oonstack;
371
372 td = curthread;
373 p = td->td_proc;
374 PROC_LOCK_ASSERT(p, MA_OWNED);
375 sig = ksi->ksi_signo;
376 psp = p->p_sigacts;
377 mtx_assert(&psp->ps_mtx, MA_OWNED);
378 regs = td->td_frame;
379 oonstack = sigonstack(regs->tf_esp);
380
381 /* Allocate space for the signal handler context. */
382 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
383 SIGISMEMBER(psp->ps_sigonstack, sig)) {
384 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
385 td->td_sigstk.ss_size - sizeof(struct osigframe));
386 #if defined(COMPAT_43)
387 td->td_sigstk.ss_flags |= SS_ONSTACK;
388 #endif
389 } else
390 fp = (struct osigframe *)regs->tf_esp - 1;
391
392 /* Translate the signal if appropriate. */
393 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
394 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
395
396 /* Build the argument list for the signal handler. */
397 sf.sf_signum = sig;
398 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
399 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
400 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
401 /* Signal handler installed with SA_SIGINFO. */
402 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
403 sf.sf_siginfo.si_signo = sig;
404 sf.sf_siginfo.si_code = ksi->ksi_code;
405 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
406 sf.sf_addr = 0;
407 } else {
408 /* Old FreeBSD-style arguments. */
409 sf.sf_arg2 = ksi->ksi_code;
410 sf.sf_addr = (register_t)ksi->ksi_addr;
411 sf.sf_ahu.sf_handler = catcher;
412 }
413 mtx_unlock(&psp->ps_mtx);
414 PROC_UNLOCK(p);
415
416 /* Save most if not all of trap frame. */
417 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
418 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
419 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
420 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
421 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
422 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
423 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
424 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
425 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
426 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
427 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
428 sf.sf_siginfo.si_sc.sc_gs = rgs();
429 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
430
431 /* Build the signal context to be used by osigreturn(). */
432 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
433 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
434 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
435 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
436 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
437 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
438 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
439 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
440
441 /*
442 * If we're a vm86 process, we want to save the segment registers.
443 * We also change eflags to be our emulated eflags, not the actual
444 * eflags.
445 */
446 if (regs->tf_eflags & PSL_VM) {
447 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
448 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
449 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
450
451 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
452 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
453 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
454 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
455
456 if (vm86->vm86_has_vme == 0)
457 sf.sf_siginfo.si_sc.sc_ps =
458 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
459 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
460
461 /* See sendsig() for comments. */
462 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
463 }
464
465 /*
466 * Copy the sigframe out to the user's stack.
467 */
468 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
469 #ifdef DEBUG
470 printf("process %ld has trashed its stack\n", (long)p->p_pid);
471 #endif
472 PROC_LOCK(p);
473 sigexit(td, SIGILL);
474 }
475
476 regs->tf_esp = (int)fp;
477 if (p->p_sysent->sv_sigcode_base != 0) {
478 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
479 szosigcode;
480 } else {
481 /* a.out sysentvec does not use shared page */
482 regs->tf_eip = p->p_sysent->sv_psstrings - szosigcode;
483 }
484 regs->tf_eflags &= ~(PSL_T | PSL_D);
485 regs->tf_cs = _ucodesel;
486 regs->tf_ds = _udatasel;
487 regs->tf_es = _udatasel;
488 regs->tf_fs = _udatasel;
489 load_gs(_udatasel);
490 regs->tf_ss = _udatasel;
491 PROC_LOCK(p);
492 mtx_lock(&psp->ps_mtx);
493 }
494 #endif /* COMPAT_43 */
495
496 #ifdef COMPAT_FREEBSD4
497 static void
498 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
499 {
500 struct sigframe4 sf, *sfp;
501 struct proc *p;
502 struct thread *td;
503 struct sigacts *psp;
504 struct trapframe *regs;
505 int sig;
506 int oonstack;
507
508 td = curthread;
509 p = td->td_proc;
510 PROC_LOCK_ASSERT(p, MA_OWNED);
511 sig = ksi->ksi_signo;
512 psp = p->p_sigacts;
513 mtx_assert(&psp->ps_mtx, MA_OWNED);
514 regs = td->td_frame;
515 oonstack = sigonstack(regs->tf_esp);
516
517 /* Save user context. */
518 bzero(&sf, sizeof(sf));
519 sf.sf_uc.uc_sigmask = *mask;
520 sf.sf_uc.uc_stack = td->td_sigstk;
521 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
522 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
523 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
524 sf.sf_uc.uc_mcontext.mc_gs = rgs();
525 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
526 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
527 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
528 bzero(sf.sf_uc.uc_mcontext.__spare__,
529 sizeof(sf.sf_uc.uc_mcontext.__spare__));
530 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
531
532 /* Allocate space for the signal handler context. */
533 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
534 SIGISMEMBER(psp->ps_sigonstack, sig)) {
535 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
536 td->td_sigstk.ss_size - sizeof(struct sigframe4));
537 #if defined(COMPAT_43)
538 td->td_sigstk.ss_flags |= SS_ONSTACK;
539 #endif
540 } else
541 sfp = (struct sigframe4 *)regs->tf_esp - 1;
542
543 /* Translate the signal if appropriate. */
544 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
545 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
546
547 /* Build the argument list for the signal handler. */
548 sf.sf_signum = sig;
549 sf.sf_ucontext = (register_t)&sfp->sf_uc;
550 bzero(&sf.sf_si, sizeof(sf.sf_si));
551 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
552 /* Signal handler installed with SA_SIGINFO. */
553 sf.sf_siginfo = (register_t)&sfp->sf_si;
554 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
555
556 /* Fill in POSIX parts */
557 sf.sf_si.si_signo = sig;
558 sf.sf_si.si_code = ksi->ksi_code;
559 sf.sf_si.si_addr = ksi->ksi_addr;
560 } else {
561 /* Old FreeBSD-style arguments. */
562 sf.sf_siginfo = ksi->ksi_code;
563 sf.sf_addr = (register_t)ksi->ksi_addr;
564 sf.sf_ahu.sf_handler = catcher;
565 }
566 mtx_unlock(&psp->ps_mtx);
567 PROC_UNLOCK(p);
568
569 /*
570 * If we're a vm86 process, we want to save the segment registers.
571 * We also change eflags to be our emulated eflags, not the actual
572 * eflags.
573 */
574 if (regs->tf_eflags & PSL_VM) {
575 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
576 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
577
578 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
579 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
580 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
581 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
582
583 if (vm86->vm86_has_vme == 0)
584 sf.sf_uc.uc_mcontext.mc_eflags =
585 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
586 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
587
588 /*
589 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
590 * syscalls made by the signal handler. This just avoids
591 * wasting time for our lazy fixup of such faults. PSL_NT
592 * does nothing in vm86 mode, but vm86 programs can set it
593 * almost legitimately in probes for old cpu types.
594 */
595 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
596 }
597
598 /*
599 * Copy the sigframe out to the user's stack.
600 */
601 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
602 #ifdef DEBUG
603 printf("process %ld has trashed its stack\n", (long)p->p_pid);
604 #endif
605 PROC_LOCK(p);
606 sigexit(td, SIGILL);
607 }
608
609 regs->tf_esp = (int)sfp;
610 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
611 szfreebsd4_sigcode;
612 regs->tf_eflags &= ~(PSL_T | PSL_D);
613 regs->tf_cs = _ucodesel;
614 regs->tf_ds = _udatasel;
615 regs->tf_es = _udatasel;
616 regs->tf_fs = _udatasel;
617 regs->tf_ss = _udatasel;
618 PROC_LOCK(p);
619 mtx_lock(&psp->ps_mtx);
620 }
621 #endif /* COMPAT_FREEBSD4 */
622
623 void
624 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
625 {
626 struct sigframe sf, *sfp;
627 struct proc *p;
628 struct thread *td;
629 struct sigacts *psp;
630 char *sp;
631 struct trapframe *regs;
632 struct segment_descriptor *sdp;
633 int sig;
634 int oonstack;
635
636 td = curthread;
637 p = td->td_proc;
638 PROC_LOCK_ASSERT(p, MA_OWNED);
639 sig = ksi->ksi_signo;
640 psp = p->p_sigacts;
641 mtx_assert(&psp->ps_mtx, MA_OWNED);
642 #ifdef COMPAT_FREEBSD4
643 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
644 freebsd4_sendsig(catcher, ksi, mask);
645 return;
646 }
647 #endif
648 #ifdef COMPAT_43
649 if (SIGISMEMBER(psp->ps_osigset, sig)) {
650 osendsig(catcher, ksi, mask);
651 return;
652 }
653 #endif
654 regs = td->td_frame;
655 oonstack = sigonstack(regs->tf_esp);
656
657 /* Save user context. */
658 bzero(&sf, sizeof(sf));
659 sf.sf_uc.uc_sigmask = *mask;
660 sf.sf_uc.uc_stack = td->td_sigstk;
661 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
662 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
663 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
664 sf.sf_uc.uc_mcontext.mc_gs = rgs();
665 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
666 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
667 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
668 fpstate_drop(td);
669 /*
670 * Unconditionally fill the fsbase and gsbase into the mcontext.
671 */
672 sdp = &td->td_pcb->pcb_fsd;
673 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
674 sdp->sd_lobase;
675 sdp = &td->td_pcb->pcb_gsd;
676 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
677 sdp->sd_lobase;
678 sf.sf_uc.uc_mcontext.mc_flags = 0;
679 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
680 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
681 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
682
683 /* Allocate space for the signal handler context. */
684 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
685 SIGISMEMBER(psp->ps_sigonstack, sig)) {
686 sp = td->td_sigstk.ss_sp +
687 td->td_sigstk.ss_size - sizeof(struct sigframe);
688 #if defined(COMPAT_43)
689 td->td_sigstk.ss_flags |= SS_ONSTACK;
690 #endif
691 } else
692 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
693 /* Align to 16 bytes. */
694 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
695
696 /* Translate the signal if appropriate. */
697 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
698 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
699
700 /* Build the argument list for the signal handler. */
701 sf.sf_signum = sig;
702 sf.sf_ucontext = (register_t)&sfp->sf_uc;
703 bzero(&sf.sf_si, sizeof(sf.sf_si));
704 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
705 /* Signal handler installed with SA_SIGINFO. */
706 sf.sf_siginfo = (register_t)&sfp->sf_si;
707 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
708
709 /* Fill in POSIX parts */
710 sf.sf_si = ksi->ksi_info;
711 sf.sf_si.si_signo = sig; /* maybe a translated signal */
712 } else {
713 /* Old FreeBSD-style arguments. */
714 sf.sf_siginfo = ksi->ksi_code;
715 sf.sf_addr = (register_t)ksi->ksi_addr;
716 sf.sf_ahu.sf_handler = catcher;
717 }
718 mtx_unlock(&psp->ps_mtx);
719 PROC_UNLOCK(p);
720
721 /*
722 * If we're a vm86 process, we want to save the segment registers.
723 * We also change eflags to be our emulated eflags, not the actual
724 * eflags.
725 */
726 if (regs->tf_eflags & PSL_VM) {
727 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
728 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
729
730 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
731 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
732 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
733 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
734
735 if (vm86->vm86_has_vme == 0)
736 sf.sf_uc.uc_mcontext.mc_eflags =
737 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
738 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
739
740 /*
741 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
742 * syscalls made by the signal handler. This just avoids
743 * wasting time for our lazy fixup of such faults. PSL_NT
744 * does nothing in vm86 mode, but vm86 programs can set it
745 * almost legitimately in probes for old cpu types.
746 */
747 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
748 }
749
750 /*
751 * Copy the sigframe out to the user's stack.
752 */
753 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
754 #ifdef DEBUG
755 printf("process %ld has trashed its stack\n", (long)p->p_pid);
756 #endif
757 PROC_LOCK(p);
758 sigexit(td, SIGILL);
759 }
760
761 regs->tf_esp = (int)sfp;
762 regs->tf_eip = p->p_sysent->sv_sigcode_base;
763 if (regs->tf_eip == 0)
764 regs->tf_eip = p->p_sysent->sv_psstrings - szsigcode;
765 regs->tf_eflags &= ~(PSL_T | PSL_D);
766 regs->tf_cs = _ucodesel;
767 regs->tf_ds = _udatasel;
768 regs->tf_es = _udatasel;
769 regs->tf_fs = _udatasel;
770 regs->tf_ss = _udatasel;
771 PROC_LOCK(p);
772 mtx_lock(&psp->ps_mtx);
773 }
774
775 /*
776 * System call to cleanup state after a signal
777 * has been taken. Reset signal mask and
778 * stack state from context left by sendsig (above).
779 * Return to previous pc and psl as specified by
780 * context left by sendsig. Check carefully to
781 * make sure that the user has not modified the
782 * state to gain improper privileges.
783 *
784 * MPSAFE
785 */
786 #ifdef COMPAT_43
787 int
788 osigreturn(td, uap)
789 struct thread *td;
790 struct osigreturn_args /* {
791 struct osigcontext *sigcntxp;
792 } */ *uap;
793 {
794 struct osigcontext sc;
795 struct trapframe *regs;
796 struct osigcontext *scp;
797 int eflags, error;
798 ksiginfo_t ksi;
799
800 regs = td->td_frame;
801 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
802 if (error != 0)
803 return (error);
804 scp = ≻
805 eflags = scp->sc_ps;
806 if (eflags & PSL_VM) {
807 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
808 struct vm86_kernel *vm86;
809
810 /*
811 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
812 * set up the vm86 area, and we can't enter vm86 mode.
813 */
814 if (td->td_pcb->pcb_ext == 0)
815 return (EINVAL);
816 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
817 if (vm86->vm86_inited == 0)
818 return (EINVAL);
819
820 /* Go back to user mode if both flags are set. */
821 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
822 ksiginfo_init_trap(&ksi);
823 ksi.ksi_signo = SIGBUS;
824 ksi.ksi_code = BUS_OBJERR;
825 ksi.ksi_addr = (void *)regs->tf_eip;
826 trapsignal(td, &ksi);
827 }
828
829 if (vm86->vm86_has_vme) {
830 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
831 (eflags & VME_USERCHANGE) | PSL_VM;
832 } else {
833 vm86->vm86_eflags = eflags; /* save VIF, VIP */
834 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
835 (eflags & VM_USERCHANGE) | PSL_VM;
836 }
837 tf->tf_vm86_ds = scp->sc_ds;
838 tf->tf_vm86_es = scp->sc_es;
839 tf->tf_vm86_fs = scp->sc_fs;
840 tf->tf_vm86_gs = scp->sc_gs;
841 tf->tf_ds = _udatasel;
842 tf->tf_es = _udatasel;
843 tf->tf_fs = _udatasel;
844 } else {
845 /*
846 * Don't allow users to change privileged or reserved flags.
847 */
848 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
849 return (EINVAL);
850 }
851
852 /*
853 * Don't allow users to load a valid privileged %cs. Let the
854 * hardware check for invalid selectors, excess privilege in
855 * other selectors, invalid %eip's and invalid %esp's.
856 */
857 if (!CS_SECURE(scp->sc_cs)) {
858 ksiginfo_init_trap(&ksi);
859 ksi.ksi_signo = SIGBUS;
860 ksi.ksi_code = BUS_OBJERR;
861 ksi.ksi_trapno = T_PROTFLT;
862 ksi.ksi_addr = (void *)regs->tf_eip;
863 trapsignal(td, &ksi);
864 return (EINVAL);
865 }
866 regs->tf_ds = scp->sc_ds;
867 regs->tf_es = scp->sc_es;
868 regs->tf_fs = scp->sc_fs;
869 }
870
871 /* Restore remaining registers. */
872 regs->tf_eax = scp->sc_eax;
873 regs->tf_ebx = scp->sc_ebx;
874 regs->tf_ecx = scp->sc_ecx;
875 regs->tf_edx = scp->sc_edx;
876 regs->tf_esi = scp->sc_esi;
877 regs->tf_edi = scp->sc_edi;
878 regs->tf_cs = scp->sc_cs;
879 regs->tf_ss = scp->sc_ss;
880 regs->tf_isp = scp->sc_isp;
881 regs->tf_ebp = scp->sc_fp;
882 regs->tf_esp = scp->sc_sp;
883 regs->tf_eip = scp->sc_pc;
884 regs->tf_eflags = eflags;
885
886 #if defined(COMPAT_43)
887 if (scp->sc_onstack & 1)
888 td->td_sigstk.ss_flags |= SS_ONSTACK;
889 else
890 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
891 #endif
892 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
893 SIGPROCMASK_OLD);
894 return (EJUSTRETURN);
895 }
896 #endif /* COMPAT_43 */
897
898 #ifdef COMPAT_FREEBSD4
899 /*
900 * MPSAFE
901 */
902 int
903 freebsd4_sigreturn(td, uap)
904 struct thread *td;
905 struct freebsd4_sigreturn_args /* {
906 const ucontext4 *sigcntxp;
907 } */ *uap;
908 {
909 struct ucontext4 uc;
910 struct trapframe *regs;
911 struct ucontext4 *ucp;
912 int cs, eflags, error;
913 ksiginfo_t ksi;
914
915 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
916 if (error != 0)
917 return (error);
918 ucp = &uc;
919 regs = td->td_frame;
920 eflags = ucp->uc_mcontext.mc_eflags;
921 if (eflags & PSL_VM) {
922 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
923 struct vm86_kernel *vm86;
924
925 /*
926 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
927 * set up the vm86 area, and we can't enter vm86 mode.
928 */
929 if (td->td_pcb->pcb_ext == 0)
930 return (EINVAL);
931 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
932 if (vm86->vm86_inited == 0)
933 return (EINVAL);
934
935 /* Go back to user mode if both flags are set. */
936 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
937 ksiginfo_init_trap(&ksi);
938 ksi.ksi_signo = SIGBUS;
939 ksi.ksi_code = BUS_OBJERR;
940 ksi.ksi_addr = (void *)regs->tf_eip;
941 trapsignal(td, &ksi);
942 }
943 if (vm86->vm86_has_vme) {
944 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
945 (eflags & VME_USERCHANGE) | PSL_VM;
946 } else {
947 vm86->vm86_eflags = eflags; /* save VIF, VIP */
948 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
949 (eflags & VM_USERCHANGE) | PSL_VM;
950 }
951 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
952 tf->tf_eflags = eflags;
953 tf->tf_vm86_ds = tf->tf_ds;
954 tf->tf_vm86_es = tf->tf_es;
955 tf->tf_vm86_fs = tf->tf_fs;
956 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
957 tf->tf_ds = _udatasel;
958 tf->tf_es = _udatasel;
959 tf->tf_fs = _udatasel;
960 } else {
961 /*
962 * Don't allow users to change privileged or reserved flags.
963 */
964 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
965 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
966 td->td_proc->p_pid, td->td_name, eflags);
967 return (EINVAL);
968 }
969
970 /*
971 * Don't allow users to load a valid privileged %cs. Let the
972 * hardware check for invalid selectors, excess privilege in
973 * other selectors, invalid %eip's and invalid %esp's.
974 */
975 cs = ucp->uc_mcontext.mc_cs;
976 if (!CS_SECURE(cs)) {
977 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
978 td->td_proc->p_pid, td->td_name, cs);
979 ksiginfo_init_trap(&ksi);
980 ksi.ksi_signo = SIGBUS;
981 ksi.ksi_code = BUS_OBJERR;
982 ksi.ksi_trapno = T_PROTFLT;
983 ksi.ksi_addr = (void *)regs->tf_eip;
984 trapsignal(td, &ksi);
985 return (EINVAL);
986 }
987
988 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
989 }
990
991 #if defined(COMPAT_43)
992 if (ucp->uc_mcontext.mc_onstack & 1)
993 td->td_sigstk.ss_flags |= SS_ONSTACK;
994 else
995 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
996 #endif
997 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
998 return (EJUSTRETURN);
999 }
1000 #endif /* COMPAT_FREEBSD4 */
1001
1002 /*
1003 * MPSAFE
1004 */
1005 int
1006 sys_sigreturn(td, uap)
1007 struct thread *td;
1008 struct sigreturn_args /* {
1009 const struct __ucontext *sigcntxp;
1010 } */ *uap;
1011 {
1012 ucontext_t uc;
1013 struct trapframe *regs;
1014 ucontext_t *ucp;
1015 int cs, eflags, error, ret;
1016 ksiginfo_t ksi;
1017
1018 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
1019 if (error != 0)
1020 return (error);
1021 ucp = &uc;
1022 regs = td->td_frame;
1023 eflags = ucp->uc_mcontext.mc_eflags;
1024 if (eflags & PSL_VM) {
1025 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
1026 struct vm86_kernel *vm86;
1027
1028 /*
1029 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
1030 * set up the vm86 area, and we can't enter vm86 mode.
1031 */
1032 if (td->td_pcb->pcb_ext == 0)
1033 return (EINVAL);
1034 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
1035 if (vm86->vm86_inited == 0)
1036 return (EINVAL);
1037
1038 /* Go back to user mode if both flags are set. */
1039 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
1040 ksiginfo_init_trap(&ksi);
1041 ksi.ksi_signo = SIGBUS;
1042 ksi.ksi_code = BUS_OBJERR;
1043 ksi.ksi_addr = (void *)regs->tf_eip;
1044 trapsignal(td, &ksi);
1045 }
1046
1047 if (vm86->vm86_has_vme) {
1048 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
1049 (eflags & VME_USERCHANGE) | PSL_VM;
1050 } else {
1051 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1052 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1053 (eflags & VM_USERCHANGE) | PSL_VM;
1054 }
1055 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1056 tf->tf_eflags = eflags;
1057 tf->tf_vm86_ds = tf->tf_ds;
1058 tf->tf_vm86_es = tf->tf_es;
1059 tf->tf_vm86_fs = tf->tf_fs;
1060 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1061 tf->tf_ds = _udatasel;
1062 tf->tf_es = _udatasel;
1063 tf->tf_fs = _udatasel;
1064 } else {
1065 /*
1066 * Don't allow users to change privileged or reserved flags.
1067 */
1068 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
1069 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1070 td->td_proc->p_pid, td->td_name, eflags);
1071 return (EINVAL);
1072 }
1073
1074 /*
1075 * Don't allow users to load a valid privileged %cs. Let the
1076 * hardware check for invalid selectors, excess privilege in
1077 * other selectors, invalid %eip's and invalid %esp's.
1078 */
1079 cs = ucp->uc_mcontext.mc_cs;
1080 if (!CS_SECURE(cs)) {
1081 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1082 td->td_proc->p_pid, td->td_name, cs);
1083 ksiginfo_init_trap(&ksi);
1084 ksi.ksi_signo = SIGBUS;
1085 ksi.ksi_code = BUS_OBJERR;
1086 ksi.ksi_trapno = T_PROTFLT;
1087 ksi.ksi_addr = (void *)regs->tf_eip;
1088 trapsignal(td, &ksi);
1089 return (EINVAL);
1090 }
1091
1092 ret = set_fpcontext(td, &ucp->uc_mcontext);
1093 if (ret != 0)
1094 return (ret);
1095 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1096 }
1097
1098 #if defined(COMPAT_43)
1099 if (ucp->uc_mcontext.mc_onstack & 1)
1100 td->td_sigstk.ss_flags |= SS_ONSTACK;
1101 else
1102 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1103 #endif
1104
1105 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1106 return (EJUSTRETURN);
1107 }
1108
1109 /*
1110 * Machine dependent boot() routine
1111 *
1112 * I haven't seen anything to put here yet
1113 * Possibly some stuff might be grafted back here from boot()
1114 */
1115 void
1116 cpu_boot(int howto)
1117 {
1118 }
1119
1120 /*
1121 * Flush the D-cache for non-DMA I/O so that the I-cache can
1122 * be made coherent later.
1123 */
1124 void
1125 cpu_flush_dcache(void *ptr, size_t len)
1126 {
1127 /* Not applicable */
1128 }
1129
1130 /* Get current clock frequency for the given cpu id. */
1131 int
1132 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1133 {
1134 uint64_t tsc1, tsc2;
1135 uint64_t acnt, mcnt, perf;
1136 register_t reg;
1137
1138 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1139 return (EINVAL);
1140 if ((cpu_feature & CPUID_TSC) == 0)
1141 return (EOPNOTSUPP);
1142
1143 /*
1144 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
1145 * DELAY(9) based logic fails.
1146 */
1147 if (tsc_is_invariant && !tsc_perf_stat)
1148 return (EOPNOTSUPP);
1149
1150 #ifdef SMP
1151 if (smp_cpus > 1) {
1152 /* Schedule ourselves on the indicated cpu. */
1153 thread_lock(curthread);
1154 sched_bind(curthread, cpu_id);
1155 thread_unlock(curthread);
1156 }
1157 #endif
1158
1159 /* Calibrate by measuring a short delay. */
1160 reg = intr_disable();
1161 if (tsc_is_invariant) {
1162 wrmsr(MSR_MPERF, 0);
1163 wrmsr(MSR_APERF, 0);
1164 tsc1 = rdtsc();
1165 DELAY(1000);
1166 mcnt = rdmsr(MSR_MPERF);
1167 acnt = rdmsr(MSR_APERF);
1168 tsc2 = rdtsc();
1169 intr_restore(reg);
1170 perf = 1000 * acnt / mcnt;
1171 *rate = (tsc2 - tsc1) * perf;
1172 } else {
1173 tsc1 = rdtsc();
1174 DELAY(1000);
1175 tsc2 = rdtsc();
1176 intr_restore(reg);
1177 *rate = (tsc2 - tsc1) * 1000;
1178 }
1179
1180 #ifdef SMP
1181 if (smp_cpus > 1) {
1182 thread_lock(curthread);
1183 sched_unbind(curthread);
1184 thread_unlock(curthread);
1185 }
1186 #endif
1187
1188 return (0);
1189 }
1190
1191 #ifdef XEN
1192
1193 void
1194 cpu_halt(void)
1195 {
1196 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
1197 }
1198
1199 int scheduler_running;
1200
1201 static void
1202 cpu_idle_hlt(int busy)
1203 {
1204
1205 scheduler_running = 1;
1206 enable_intr();
1207 idle_block();
1208 }
1209
1210 #else
1211 /*
1212 * Shutdown the CPU as much as possible
1213 */
1214 void
1215 cpu_halt(void)
1216 {
1217 for (;;)
1218 __asm__ ("hlt");
1219 }
1220
1221 #endif
1222
1223 void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
1224 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
1225 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
1226 TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
1227 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
1228 0, "Use MONITOR/MWAIT for short idle");
1229
1230 #define STATE_RUNNING 0x0
1231 #define STATE_MWAIT 0x1
1232 #define STATE_SLEEPING 0x2
1233
1234 static void
1235 cpu_idle_acpi(int busy)
1236 {
1237 int *state;
1238
1239 state = (int *)PCPU_PTR(monitorbuf);
1240 *state = STATE_SLEEPING;
1241 disable_intr();
1242 if (sched_runnable())
1243 enable_intr();
1244 else if (cpu_idle_hook)
1245 cpu_idle_hook();
1246 else
1247 __asm __volatile("sti; hlt");
1248 *state = STATE_RUNNING;
1249 }
1250
1251 #ifndef XEN
1252 static void
1253 cpu_idle_hlt(int busy)
1254 {
1255 int *state;
1256
1257 state = (int *)PCPU_PTR(monitorbuf);
1258 *state = STATE_SLEEPING;
1259 /*
1260 * We must absolutely guarentee that hlt is the next instruction
1261 * after sti or we introduce a timing window.
1262 */
1263 disable_intr();
1264 if (sched_runnable())
1265 enable_intr();
1266 else
1267 __asm __volatile("sti; hlt");
1268 *state = STATE_RUNNING;
1269 }
1270 #endif
1271
1272 /*
1273 * MWAIT cpu power states. Lower 4 bits are sub-states.
1274 */
1275 #define MWAIT_C0 0xf0
1276 #define MWAIT_C1 0x00
1277 #define MWAIT_C2 0x10
1278 #define MWAIT_C3 0x20
1279 #define MWAIT_C4 0x30
1280
1281 static void
1282 cpu_idle_mwait(int busy)
1283 {
1284 int *state;
1285
1286 state = (int *)PCPU_PTR(monitorbuf);
1287 *state = STATE_MWAIT;
1288 if (!sched_runnable()) {
1289 cpu_monitor(state, 0, 0);
1290 if (*state == STATE_MWAIT)
1291 cpu_mwait(0, MWAIT_C1);
1292 }
1293 *state = STATE_RUNNING;
1294 }
1295
1296 static void
1297 cpu_idle_spin(int busy)
1298 {
1299 int *state;
1300 int i;
1301
1302 state = (int *)PCPU_PTR(monitorbuf);
1303 *state = STATE_RUNNING;
1304 for (i = 0; i < 1000; i++) {
1305 if (sched_runnable())
1306 return;
1307 cpu_spinwait();
1308 }
1309 }
1310
1311 /*
1312 * C1E renders the local APIC timer dead, so we disable it by
1313 * reading the Interrupt Pending Message register and clearing
1314 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
1315 *
1316 * Reference:
1317 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
1318 * #32559 revision 3.00+
1319 */
1320 #define MSR_AMDK8_IPM 0xc0010055
1321 #define AMDK8_SMIONCMPHALT (1ULL << 27)
1322 #define AMDK8_C1EONCMPHALT (1ULL << 28)
1323 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
1324
1325 static void
1326 cpu_probe_amdc1e(void)
1327 {
1328
1329 /*
1330 * Detect the presence of C1E capability mostly on latest
1331 * dual-cores (or future) k8 family.
1332 */
1333 if (cpu_vendor_id == CPU_VENDOR_AMD &&
1334 (cpu_id & 0x00000f00) == 0x00000f00 &&
1335 (cpu_id & 0x0fff0000) >= 0x00040000) {
1336 cpu_ident_amdc1e = 1;
1337 }
1338 }
1339
1340 #ifdef XEN
1341 void (*cpu_idle_fn)(int) = cpu_idle_hlt;
1342 #else
1343 void (*cpu_idle_fn)(int) = cpu_idle_acpi;
1344 #endif
1345
1346 void
1347 cpu_idle(int busy)
1348 {
1349 #ifndef XEN
1350 uint64_t msr;
1351 #endif
1352
1353 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
1354 busy, curcpu);
1355 #if defined(MP_WATCHDOG) && !defined(XEN)
1356 ap_watchdog(PCPU_GET(cpuid));
1357 #endif
1358 #ifndef XEN
1359 /* If we are busy - try to use fast methods. */
1360 if (busy) {
1361 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
1362 cpu_idle_mwait(busy);
1363 goto out;
1364 }
1365 }
1366 #endif
1367
1368 /* If we have time - switch timers into idle mode. */
1369 if (!busy) {
1370 critical_enter();
1371 cpu_idleclock();
1372 }
1373
1374 #ifndef XEN
1375 /* Apply AMD APIC timer C1E workaround. */
1376 if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
1377 msr = rdmsr(MSR_AMDK8_IPM);
1378 if (msr & AMDK8_CMPHALT)
1379 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
1380 }
1381 #endif
1382
1383 /* Call main idle method. */
1384 cpu_idle_fn(busy);
1385
1386 /* Switch timers mack into active mode. */
1387 if (!busy) {
1388 cpu_activeclock();
1389 critical_exit();
1390 }
1391 #ifndef XEN
1392 out:
1393 #endif
1394 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
1395 busy, curcpu);
1396 }
1397
1398 int
1399 cpu_idle_wakeup(int cpu)
1400 {
1401 struct pcpu *pcpu;
1402 int *state;
1403
1404 pcpu = pcpu_find(cpu);
1405 state = (int *)pcpu->pc_monitorbuf;
1406 /*
1407 * This doesn't need to be atomic since missing the race will
1408 * simply result in unnecessary IPIs.
1409 */
1410 if (*state == STATE_SLEEPING)
1411 return (0);
1412 if (*state == STATE_MWAIT)
1413 *state = STATE_RUNNING;
1414 return (1);
1415 }
1416
1417 /*
1418 * Ordered by speed/power consumption.
1419 */
1420 struct {
1421 void *id_fn;
1422 char *id_name;
1423 } idle_tbl[] = {
1424 { cpu_idle_spin, "spin" },
1425 { cpu_idle_mwait, "mwait" },
1426 { cpu_idle_hlt, "hlt" },
1427 { cpu_idle_acpi, "acpi" },
1428 { NULL, NULL }
1429 };
1430
1431 static int
1432 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
1433 {
1434 char *avail, *p;
1435 int error;
1436 int i;
1437
1438 avail = malloc(256, M_TEMP, M_WAITOK);
1439 p = avail;
1440 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1441 if (strstr(idle_tbl[i].id_name, "mwait") &&
1442 (cpu_feature2 & CPUID2_MON) == 0)
1443 continue;
1444 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1445 cpu_idle_hook == NULL)
1446 continue;
1447 p += sprintf(p, "%s%s", p != avail ? ", " : "",
1448 idle_tbl[i].id_name);
1449 }
1450 error = sysctl_handle_string(oidp, avail, 0, req);
1451 free(avail, M_TEMP);
1452 return (error);
1453 }
1454
1455 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
1456 0, 0, idle_sysctl_available, "A", "list of available idle functions");
1457
1458 static int
1459 idle_sysctl(SYSCTL_HANDLER_ARGS)
1460 {
1461 char buf[16];
1462 int error;
1463 char *p;
1464 int i;
1465
1466 p = "unknown";
1467 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1468 if (idle_tbl[i].id_fn == cpu_idle_fn) {
1469 p = idle_tbl[i].id_name;
1470 break;
1471 }
1472 }
1473 strncpy(buf, p, sizeof(buf));
1474 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1475 if (error != 0 || req->newptr == NULL)
1476 return (error);
1477 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1478 if (strstr(idle_tbl[i].id_name, "mwait") &&
1479 (cpu_feature2 & CPUID2_MON) == 0)
1480 continue;
1481 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1482 cpu_idle_hook == NULL)
1483 continue;
1484 if (strcmp(idle_tbl[i].id_name, buf))
1485 continue;
1486 cpu_idle_fn = idle_tbl[i].id_fn;
1487 return (0);
1488 }
1489 return (EINVAL);
1490 }
1491
1492 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
1493 idle_sysctl, "A", "currently selected idle function");
1494
1495 uint64_t (*atomic_load_acq_64)(volatile uint64_t *) =
1496 atomic_load_acq_64_i386;
1497 void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t) =
1498 atomic_store_rel_64_i386;
1499
1500 static void
1501 cpu_probe_cmpxchg8b(void)
1502 {
1503
1504 if ((cpu_feature & CPUID_CX8) != 0 ||
1505 cpu_vendor_id == CPU_VENDOR_RISE) {
1506 atomic_load_acq_64 = atomic_load_acq_64_i586;
1507 atomic_store_rel_64 = atomic_store_rel_64_i586;
1508 }
1509 }
1510
1511 /*
1512 * Reset registers to default values on exec.
1513 */
1514 void
1515 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1516 {
1517 struct trapframe *regs = td->td_frame;
1518 struct pcb *pcb = td->td_pcb;
1519
1520 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1521 pcb->pcb_gs = _udatasel;
1522 load_gs(_udatasel);
1523
1524 mtx_lock_spin(&dt_lock);
1525 if (td->td_proc->p_md.md_ldt)
1526 user_ldt_free(td);
1527 else
1528 mtx_unlock_spin(&dt_lock);
1529
1530 bzero((char *)regs, sizeof(struct trapframe));
1531 regs->tf_eip = imgp->entry_addr;
1532 regs->tf_esp = stack;
1533 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1534 regs->tf_ss = _udatasel;
1535 regs->tf_ds = _udatasel;
1536 regs->tf_es = _udatasel;
1537 regs->tf_fs = _udatasel;
1538 regs->tf_cs = _ucodesel;
1539
1540 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1541 regs->tf_ebx = imgp->ps_strings;
1542
1543 /*
1544 * Reset the hardware debug registers if they were in use.
1545 * They won't have any meaning for the newly exec'd process.
1546 */
1547 if (pcb->pcb_flags & PCB_DBREGS) {
1548 pcb->pcb_dr0 = 0;
1549 pcb->pcb_dr1 = 0;
1550 pcb->pcb_dr2 = 0;
1551 pcb->pcb_dr3 = 0;
1552 pcb->pcb_dr6 = 0;
1553 pcb->pcb_dr7 = 0;
1554 if (pcb == curpcb) {
1555 /*
1556 * Clear the debug registers on the running
1557 * CPU, otherwise they will end up affecting
1558 * the next process we switch to.
1559 */
1560 reset_dbregs();
1561 }
1562 pcb->pcb_flags &= ~PCB_DBREGS;
1563 }
1564
1565 /*
1566 * Initialize the math emulator (if any) for the current process.
1567 * Actually, just clear the bit that says that the emulator has
1568 * been initialized. Initialization is delayed until the process
1569 * traps to the emulator (if it is done at all) mainly because
1570 * emulators don't provide an entry point for initialization.
1571 */
1572 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1573 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1574
1575 /*
1576 * Drop the FP state if we hold it, so that the process gets a
1577 * clean FP state if it uses the FPU again.
1578 */
1579 fpstate_drop(td);
1580
1581 /*
1582 * XXX - Linux emulator
1583 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1584 * on it.
1585 */
1586 td->td_retval[1] = 0;
1587 }
1588
1589 void
1590 cpu_setregs(void)
1591 {
1592 unsigned int cr0;
1593
1594 cr0 = rcr0();
1595
1596 /*
1597 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1598 *
1599 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1600 * instructions. We must set the CR0_MP bit and use the CR0_TS
1601 * bit to control the trap, because setting the CR0_EM bit does
1602 * not cause WAIT instructions to trap. It's important to trap
1603 * WAIT instructions - otherwise the "wait" variants of no-wait
1604 * control instructions would degenerate to the "no-wait" variants
1605 * after FP context switches but work correctly otherwise. It's
1606 * particularly important to trap WAITs when there is no NPX -
1607 * otherwise the "wait" variants would always degenerate.
1608 *
1609 * Try setting CR0_NE to get correct error reporting on 486DX's.
1610 * Setting it should fail or do nothing on lesser processors.
1611 */
1612 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1613 load_cr0(cr0);
1614 load_gs(_udatasel);
1615 }
1616
1617 u_long bootdev; /* not a struct cdev *- encoding is different */
1618 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1619 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1620
1621 /*
1622 * Initialize 386 and configure to run kernel
1623 */
1624
1625 /*
1626 * Initialize segments & interrupt table
1627 */
1628
1629 int _default_ldt;
1630
1631 #ifdef XEN
1632 union descriptor *gdt;
1633 union descriptor *ldt;
1634 #else
1635 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1636 union descriptor ldt[NLDT]; /* local descriptor table */
1637 #endif
1638 static struct gate_descriptor idt0[NIDT];
1639 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1640 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1641 struct mtx dt_lock; /* lock for GDT and LDT */
1642
1643 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1644 extern int has_f00f_bug;
1645 #endif
1646
1647 static struct i386tss dblfault_tss;
1648 static char dblfault_stack[PAGE_SIZE];
1649
1650 extern vm_offset_t proc0kstack;
1651
1652
1653 /*
1654 * software prototypes -- in more palatable form.
1655 *
1656 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1657 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1658 */
1659 struct soft_segment_descriptor gdt_segs[] = {
1660 /* GNULL_SEL 0 Null Descriptor */
1661 { .ssd_base = 0x0,
1662 .ssd_limit = 0x0,
1663 .ssd_type = 0,
1664 .ssd_dpl = SEL_KPL,
1665 .ssd_p = 0,
1666 .ssd_xx = 0, .ssd_xx1 = 0,
1667 .ssd_def32 = 0,
1668 .ssd_gran = 0 },
1669 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1670 { .ssd_base = 0x0,
1671 .ssd_limit = 0xfffff,
1672 .ssd_type = SDT_MEMRWA,
1673 .ssd_dpl = SEL_KPL,
1674 .ssd_p = 1,
1675 .ssd_xx = 0, .ssd_xx1 = 0,
1676 .ssd_def32 = 1,
1677 .ssd_gran = 1 },
1678 /* GUFS_SEL 2 %fs Descriptor for user */
1679 { .ssd_base = 0x0,
1680 .ssd_limit = 0xfffff,
1681 .ssd_type = SDT_MEMRWA,
1682 .ssd_dpl = SEL_UPL,
1683 .ssd_p = 1,
1684 .ssd_xx = 0, .ssd_xx1 = 0,
1685 .ssd_def32 = 1,
1686 .ssd_gran = 1 },
1687 /* GUGS_SEL 3 %gs Descriptor for user */
1688 { .ssd_base = 0x0,
1689 .ssd_limit = 0xfffff,
1690 .ssd_type = SDT_MEMRWA,
1691 .ssd_dpl = SEL_UPL,
1692 .ssd_p = 1,
1693 .ssd_xx = 0, .ssd_xx1 = 0,
1694 .ssd_def32 = 1,
1695 .ssd_gran = 1 },
1696 /* GCODE_SEL 4 Code Descriptor for kernel */
1697 { .ssd_base = 0x0,
1698 .ssd_limit = 0xfffff,
1699 .ssd_type = SDT_MEMERA,
1700 .ssd_dpl = SEL_KPL,
1701 .ssd_p = 1,
1702 .ssd_xx = 0, .ssd_xx1 = 0,
1703 .ssd_def32 = 1,
1704 .ssd_gran = 1 },
1705 /* GDATA_SEL 5 Data Descriptor for kernel */
1706 { .ssd_base = 0x0,
1707 .ssd_limit = 0xfffff,
1708 .ssd_type = SDT_MEMRWA,
1709 .ssd_dpl = SEL_KPL,
1710 .ssd_p = 1,
1711 .ssd_xx = 0, .ssd_xx1 = 0,
1712 .ssd_def32 = 1,
1713 .ssd_gran = 1 },
1714 /* GUCODE_SEL 6 Code Descriptor for user */
1715 { .ssd_base = 0x0,
1716 .ssd_limit = 0xfffff,
1717 .ssd_type = SDT_MEMERA,
1718 .ssd_dpl = SEL_UPL,
1719 .ssd_p = 1,
1720 .ssd_xx = 0, .ssd_xx1 = 0,
1721 .ssd_def32 = 1,
1722 .ssd_gran = 1 },
1723 /* GUDATA_SEL 7 Data Descriptor for user */
1724 { .ssd_base = 0x0,
1725 .ssd_limit = 0xfffff,
1726 .ssd_type = SDT_MEMRWA,
1727 .ssd_dpl = SEL_UPL,
1728 .ssd_p = 1,
1729 .ssd_xx = 0, .ssd_xx1 = 0,
1730 .ssd_def32 = 1,
1731 .ssd_gran = 1 },
1732 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1733 { .ssd_base = 0x400,
1734 .ssd_limit = 0xfffff,
1735 .ssd_type = SDT_MEMRWA,
1736 .ssd_dpl = SEL_KPL,
1737 .ssd_p = 1,
1738 .ssd_xx = 0, .ssd_xx1 = 0,
1739 .ssd_def32 = 1,
1740 .ssd_gran = 1 },
1741 #ifndef XEN
1742 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1743 {
1744 .ssd_base = 0x0,
1745 .ssd_limit = sizeof(struct i386tss)-1,
1746 .ssd_type = SDT_SYS386TSS,
1747 .ssd_dpl = 0,
1748 .ssd_p = 1,
1749 .ssd_xx = 0, .ssd_xx1 = 0,
1750 .ssd_def32 = 0,
1751 .ssd_gran = 0 },
1752 /* GLDT_SEL 10 LDT Descriptor */
1753 { .ssd_base = (int) ldt,
1754 .ssd_limit = sizeof(ldt)-1,
1755 .ssd_type = SDT_SYSLDT,
1756 .ssd_dpl = SEL_UPL,
1757 .ssd_p = 1,
1758 .ssd_xx = 0, .ssd_xx1 = 0,
1759 .ssd_def32 = 0,
1760 .ssd_gran = 0 },
1761 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1762 { .ssd_base = (int) ldt,
1763 .ssd_limit = (512 * sizeof(union descriptor)-1),
1764 .ssd_type = SDT_SYSLDT,
1765 .ssd_dpl = 0,
1766 .ssd_p = 1,
1767 .ssd_xx = 0, .ssd_xx1 = 0,
1768 .ssd_def32 = 0,
1769 .ssd_gran = 0 },
1770 /* GPANIC_SEL 12 Panic Tss Descriptor */
1771 { .ssd_base = (int) &dblfault_tss,
1772 .ssd_limit = sizeof(struct i386tss)-1,
1773 .ssd_type = SDT_SYS386TSS,
1774 .ssd_dpl = 0,
1775 .ssd_p = 1,
1776 .ssd_xx = 0, .ssd_xx1 = 0,
1777 .ssd_def32 = 0,
1778 .ssd_gran = 0 },
1779 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1780 { .ssd_base = 0,
1781 .ssd_limit = 0xfffff,
1782 .ssd_type = SDT_MEMERA,
1783 .ssd_dpl = 0,
1784 .ssd_p = 1,
1785 .ssd_xx = 0, .ssd_xx1 = 0,
1786 .ssd_def32 = 0,
1787 .ssd_gran = 1 },
1788 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1789 { .ssd_base = 0,
1790 .ssd_limit = 0xfffff,
1791 .ssd_type = SDT_MEMERA,
1792 .ssd_dpl = 0,
1793 .ssd_p = 1,
1794 .ssd_xx = 0, .ssd_xx1 = 0,
1795 .ssd_def32 = 0,
1796 .ssd_gran = 1 },
1797 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1798 { .ssd_base = 0,
1799 .ssd_limit = 0xfffff,
1800 .ssd_type = SDT_MEMRWA,
1801 .ssd_dpl = 0,
1802 .ssd_p = 1,
1803 .ssd_xx = 0, .ssd_xx1 = 0,
1804 .ssd_def32 = 1,
1805 .ssd_gran = 1 },
1806 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1807 { .ssd_base = 0,
1808 .ssd_limit = 0xfffff,
1809 .ssd_type = SDT_MEMRWA,
1810 .ssd_dpl = 0,
1811 .ssd_p = 1,
1812 .ssd_xx = 0, .ssd_xx1 = 0,
1813 .ssd_def32 = 0,
1814 .ssd_gran = 1 },
1815 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1816 { .ssd_base = 0,
1817 .ssd_limit = 0xfffff,
1818 .ssd_type = SDT_MEMRWA,
1819 .ssd_dpl = 0,
1820 .ssd_p = 1,
1821 .ssd_xx = 0, .ssd_xx1 = 0,
1822 .ssd_def32 = 0,
1823 .ssd_gran = 1 },
1824 /* GNDIS_SEL 18 NDIS Descriptor */
1825 { .ssd_base = 0x0,
1826 .ssd_limit = 0x0,
1827 .ssd_type = 0,
1828 .ssd_dpl = 0,
1829 .ssd_p = 0,
1830 .ssd_xx = 0, .ssd_xx1 = 0,
1831 .ssd_def32 = 0,
1832 .ssd_gran = 0 },
1833 #endif /* !XEN */
1834 };
1835
1836 static struct soft_segment_descriptor ldt_segs[] = {
1837 /* Null Descriptor - overwritten by call gate */
1838 { .ssd_base = 0x0,
1839 .ssd_limit = 0x0,
1840 .ssd_type = 0,
1841 .ssd_dpl = 0,
1842 .ssd_p = 0,
1843 .ssd_xx = 0, .ssd_xx1 = 0,
1844 .ssd_def32 = 0,
1845 .ssd_gran = 0 },
1846 /* Null Descriptor - overwritten by call gate */
1847 { .ssd_base = 0x0,
1848 .ssd_limit = 0x0,
1849 .ssd_type = 0,
1850 .ssd_dpl = 0,
1851 .ssd_p = 0,
1852 .ssd_xx = 0, .ssd_xx1 = 0,
1853 .ssd_def32 = 0,
1854 .ssd_gran = 0 },
1855 /* Null Descriptor - overwritten by call gate */
1856 { .ssd_base = 0x0,
1857 .ssd_limit = 0x0,
1858 .ssd_type = 0,
1859 .ssd_dpl = 0,
1860 .ssd_p = 0,
1861 .ssd_xx = 0, .ssd_xx1 = 0,
1862 .ssd_def32 = 0,
1863 .ssd_gran = 0 },
1864 /* Code Descriptor for user */
1865 { .ssd_base = 0x0,
1866 .ssd_limit = 0xfffff,
1867 .ssd_type = SDT_MEMERA,
1868 .ssd_dpl = SEL_UPL,
1869 .ssd_p = 1,
1870 .ssd_xx = 0, .ssd_xx1 = 0,
1871 .ssd_def32 = 1,
1872 .ssd_gran = 1 },
1873 /* Null Descriptor - overwritten by call gate */
1874 { .ssd_base = 0x0,
1875 .ssd_limit = 0x0,
1876 .ssd_type = 0,
1877 .ssd_dpl = 0,
1878 .ssd_p = 0,
1879 .ssd_xx = 0, .ssd_xx1 = 0,
1880 .ssd_def32 = 0,
1881 .ssd_gran = 0 },
1882 /* Data Descriptor for user */
1883 { .ssd_base = 0x0,
1884 .ssd_limit = 0xfffff,
1885 .ssd_type = SDT_MEMRWA,
1886 .ssd_dpl = SEL_UPL,
1887 .ssd_p = 1,
1888 .ssd_xx = 0, .ssd_xx1 = 0,
1889 .ssd_def32 = 1,
1890 .ssd_gran = 1 },
1891 };
1892
1893 void
1894 setidt(idx, func, typ, dpl, selec)
1895 int idx;
1896 inthand_t *func;
1897 int typ;
1898 int dpl;
1899 int selec;
1900 {
1901 struct gate_descriptor *ip;
1902
1903 ip = idt + idx;
1904 ip->gd_looffset = (int)func;
1905 ip->gd_selector = selec;
1906 ip->gd_stkcpy = 0;
1907 ip->gd_xx = 0;
1908 ip->gd_type = typ;
1909 ip->gd_dpl = dpl;
1910 ip->gd_p = 1;
1911 ip->gd_hioffset = ((int)func)>>16 ;
1912 }
1913
1914 extern inthand_t
1915 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1916 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1917 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1918 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1919 IDTVEC(xmm),
1920 #ifdef KDTRACE_HOOKS
1921 IDTVEC(dtrace_ret),
1922 #endif
1923 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1924
1925 #ifdef DDB
1926 /*
1927 * Display the index and function name of any IDT entries that don't use
1928 * the default 'rsvd' entry point.
1929 */
1930 DB_SHOW_COMMAND(idt, db_show_idt)
1931 {
1932 struct gate_descriptor *ip;
1933 int idx;
1934 uintptr_t func;
1935
1936 ip = idt;
1937 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1938 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1939 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1940 db_printf("%3d\t", idx);
1941 db_printsym(func, DB_STGY_PROC);
1942 db_printf("\n");
1943 }
1944 ip++;
1945 }
1946 }
1947
1948 /* Show privileged registers. */
1949 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1950 {
1951 uint64_t idtr, gdtr;
1952
1953 idtr = ridt();
1954 db_printf("idtr\t0x%08x/%04x\n",
1955 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1956 gdtr = rgdt();
1957 db_printf("gdtr\t0x%08x/%04x\n",
1958 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1959 db_printf("ldtr\t0x%04x\n", rldt());
1960 db_printf("tr\t0x%04x\n", rtr());
1961 db_printf("cr0\t0x%08x\n", rcr0());
1962 db_printf("cr2\t0x%08x\n", rcr2());
1963 db_printf("cr3\t0x%08x\n", rcr3());
1964 db_printf("cr4\t0x%08x\n", rcr4());
1965 if (amd_feature & (AMDID_NX | AMDID_LM))
1966 db_printf("EFER\t0x%016llx\n", rdmsr(MSR_EFER));
1967 if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
1968 db_printf("FEATURES_CTL\t0x%016llx\n",
1969 rdmsr(MSR_IA32_FEATURE_CONTROL));
1970 if ((cpu_vendor_id == CPU_VENDOR_INTEL ||
1971 cpu_vendor_id == CPU_VENDOR_AMD) && CPUID_TO_FAMILY(cpu_id) >= 6)
1972 db_printf("DEBUG_CTL\t0x%016llx\n", rdmsr(MSR_DEBUGCTLMSR));
1973 if (cpu_feature & CPUID_PAT)
1974 db_printf("PAT\t0x%016llx\n", rdmsr(MSR_PAT));
1975 }
1976
1977 DB_SHOW_COMMAND(dbregs, db_show_dbregs)
1978 {
1979
1980 db_printf("dr0\t0x%08x\n", rdr0());
1981 db_printf("dr1\t0x%08x\n", rdr1());
1982 db_printf("dr2\t0x%08x\n", rdr2());
1983 db_printf("dr3\t0x%08x\n", rdr3());
1984 db_printf("dr6\t0x%08x\n", rdr6());
1985 db_printf("dr7\t0x%08x\n", rdr7());
1986 }
1987 #endif
1988
1989 void
1990 sdtossd(sd, ssd)
1991 struct segment_descriptor *sd;
1992 struct soft_segment_descriptor *ssd;
1993 {
1994 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1995 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1996 ssd->ssd_type = sd->sd_type;
1997 ssd->ssd_dpl = sd->sd_dpl;
1998 ssd->ssd_p = sd->sd_p;
1999 ssd->ssd_def32 = sd->sd_def32;
2000 ssd->ssd_gran = sd->sd_gran;
2001 }
2002
2003 #ifndef XEN
2004 static int
2005 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
2006 {
2007 int i, insert_idx, physmap_idx;
2008
2009 physmap_idx = *physmap_idxp;
2010
2011 if (boothowto & RB_VERBOSE)
2012 printf("SMAP type=%02x base=%016llx len=%016llx\n",
2013 smap->type, smap->base, smap->length);
2014
2015 if (smap->type != SMAP_TYPE_MEMORY)
2016 return (1);
2017
2018 if (smap->length == 0)
2019 return (1);
2020
2021 #ifndef PAE
2022 if (smap->base > 0xffffffff) {
2023 printf("%uK of memory above 4GB ignored\n",
2024 (u_int)(smap->length / 1024));
2025 return (1);
2026 }
2027 #endif
2028
2029 /*
2030 * Find insertion point while checking for overlap. Start off by
2031 * assuming the new entry will be added to the end.
2032 */
2033 insert_idx = physmap_idx + 2;
2034 for (i = 0; i <= physmap_idx; i += 2) {
2035 if (smap->base < physmap[i + 1]) {
2036 if (smap->base + smap->length <= physmap[i]) {
2037 insert_idx = i;
2038 break;
2039 }
2040 if (boothowto & RB_VERBOSE)
2041 printf(
2042 "Overlapping memory regions, ignoring second region\n");
2043 return (1);
2044 }
2045 }
2046
2047 /* See if we can prepend to the next entry. */
2048 if (insert_idx <= physmap_idx &&
2049 smap->base + smap->length == physmap[insert_idx]) {
2050 physmap[insert_idx] = smap->base;
2051 return (1);
2052 }
2053
2054 /* See if we can append to the previous entry. */
2055 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) {
2056 physmap[insert_idx - 1] += smap->length;
2057 return (1);
2058 }
2059
2060 physmap_idx += 2;
2061 *physmap_idxp = physmap_idx;
2062 if (physmap_idx == PHYSMAP_SIZE) {
2063 printf(
2064 "Too many segments in the physical address map, giving up\n");
2065 return (0);
2066 }
2067
2068 /*
2069 * Move the last 'N' entries down to make room for the new
2070 * entry if needed.
2071 */
2072 for (i = physmap_idx; i > insert_idx; i -= 2) {
2073 physmap[i] = physmap[i - 2];
2074 physmap[i + 1] = physmap[i - 1];
2075 }
2076
2077 /* Insert the new entry. */
2078 physmap[insert_idx] = smap->base;
2079 physmap[insert_idx + 1] = smap->base + smap->length;
2080 return (1);
2081 }
2082
2083 static void
2084 basemem_setup(void)
2085 {
2086 vm_paddr_t pa;
2087 pt_entry_t *pte;
2088 int i;
2089
2090 if (basemem > 640) {
2091 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
2092 basemem);
2093 basemem = 640;
2094 }
2095
2096 /*
2097 * XXX if biosbasemem is now < 640, there is a `hole'
2098 * between the end of base memory and the start of
2099 * ISA memory. The hole may be empty or it may
2100 * contain BIOS code or data. Map it read/write so
2101 * that the BIOS can write to it. (Memory from 0 to
2102 * the physical end of the kernel is mapped read-only
2103 * to begin with and then parts of it are remapped.
2104 * The parts that aren't remapped form holes that
2105 * remain read-only and are unused by the kernel.
2106 * The base memory area is below the physical end of
2107 * the kernel and right now forms a read-only hole.
2108 * The part of it from PAGE_SIZE to
2109 * (trunc_page(biosbasemem * 1024) - 1) will be
2110 * remapped and used by the kernel later.)
2111 *
2112 * This code is similar to the code used in
2113 * pmap_mapdev, but since no memory needs to be
2114 * allocated we simply change the mapping.
2115 */
2116 for (pa = trunc_page(basemem * 1024);
2117 pa < ISA_HOLE_START; pa += PAGE_SIZE)
2118 pmap_kenter(KERNBASE + pa, pa);
2119
2120 /*
2121 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
2122 * the vm86 page table so that vm86 can scribble on them using
2123 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
2124 * page 0, at least as initialized here?
2125 */
2126 pte = (pt_entry_t *)vm86paddr;
2127 for (i = basemem / 4; i < 160; i++)
2128 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
2129 }
2130 #endif
2131
2132 /*
2133 * Populate the (physmap) array with base/bound pairs describing the
2134 * available physical memory in the system, then test this memory and
2135 * build the phys_avail array describing the actually-available memory.
2136 *
2137 * If we cannot accurately determine the physical memory map, then use
2138 * value from the 0xE801 call, and failing that, the RTC.
2139 *
2140 * Total memory size may be set by the kernel environment variable
2141 * hw.physmem or the compile-time define MAXMEM.
2142 *
2143 * XXX first should be vm_paddr_t.
2144 */
2145 static void
2146 getmemsize(int first)
2147 {
2148 int has_smap, off, physmap_idx, pa_indx, da_indx;
2149 u_long physmem_tunable, memtest;
2150 vm_paddr_t physmap[PHYSMAP_SIZE];
2151 pt_entry_t *pte;
2152 quad_t dcons_addr, dcons_size;
2153 #ifndef XEN
2154 int hasbrokenint12, i;
2155 u_int extmem;
2156 struct vm86frame vmf;
2157 struct vm86context vmc;
2158 vm_paddr_t pa;
2159 struct bios_smap *smap, *smapbase, *smapend;
2160 u_int32_t smapsize;
2161 caddr_t kmdp;
2162 #endif
2163
2164 has_smap = 0;
2165 #if defined(XEN)
2166 Maxmem = xen_start_info->nr_pages - init_first;
2167 physmem = Maxmem;
2168 basemem = 0;
2169 physmap[0] = init_first << PAGE_SHIFT;
2170 physmap[1] = ptoa(Maxmem) - round_page(msgbufsize);
2171 physmap_idx = 0;
2172 #else
2173 #ifdef XBOX
2174 if (arch_i386_is_xbox) {
2175 /*
2176 * We queried the memory size before, so chop off 4MB for
2177 * the framebuffer and inform the OS of this.
2178 */
2179 physmap[0] = 0;
2180 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
2181 physmap_idx = 0;
2182 goto physmap_done;
2183 }
2184 #endif
2185 bzero(&vmf, sizeof(vmf));
2186 bzero(physmap, sizeof(physmap));
2187 basemem = 0;
2188
2189 /*
2190 * Check if the loader supplied an SMAP memory map. If so,
2191 * use that and do not make any VM86 calls.
2192 */
2193 physmap_idx = 0;
2194 smapbase = NULL;
2195 kmdp = preload_search_by_type("elf kernel");
2196 if (kmdp == NULL)
2197 kmdp = preload_search_by_type("elf32 kernel");
2198 if (kmdp != NULL)
2199 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2200 MODINFO_METADATA | MODINFOMD_SMAP);
2201 if (smapbase != NULL) {
2202 /*
2203 * subr_module.c says:
2204 * "Consumer may safely assume that size value precedes data."
2205 * ie: an int32_t immediately precedes SMAP.
2206 */
2207 smapsize = *((u_int32_t *)smapbase - 1);
2208 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
2209 has_smap = 1;
2210
2211 for (smap = smapbase; smap < smapend; smap++)
2212 if (!add_smap_entry(smap, physmap, &physmap_idx))
2213 break;
2214 goto have_smap;
2215 }
2216
2217 /*
2218 * Some newer BIOSes have a broken INT 12H implementation
2219 * which causes a kernel panic immediately. In this case, we
2220 * need use the SMAP to determine the base memory size.
2221 */
2222 hasbrokenint12 = 0;
2223 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
2224 if (hasbrokenint12 == 0) {
2225 /* Use INT12 to determine base memory size. */
2226 vm86_intcall(0x12, &vmf);
2227 basemem = vmf.vmf_ax;
2228 basemem_setup();
2229 }
2230
2231 /*
2232 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
2233 * the kernel page table so we can use it as a buffer. The
2234 * kernel will unmap this page later.
2235 */
2236 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
2237 vmc.npages = 0;
2238 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
2239 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
2240
2241 vmf.vmf_ebx = 0;
2242 do {
2243 vmf.vmf_eax = 0xE820;
2244 vmf.vmf_edx = SMAP_SIG;
2245 vmf.vmf_ecx = sizeof(struct bios_smap);
2246 i = vm86_datacall(0x15, &vmf, &vmc);
2247 if (i || vmf.vmf_eax != SMAP_SIG)
2248 break;
2249 has_smap = 1;
2250 if (!add_smap_entry(smap, physmap, &physmap_idx))
2251 break;
2252 } while (vmf.vmf_ebx != 0);
2253
2254 have_smap:
2255 /*
2256 * If we didn't fetch the "base memory" size from INT12,
2257 * figure it out from the SMAP (or just guess).
2258 */
2259 if (basemem == 0) {
2260 for (i = 0; i <= physmap_idx; i += 2) {
2261 if (physmap[i] == 0x00000000) {
2262 basemem = physmap[i + 1] / 1024;
2263 break;
2264 }
2265 }
2266
2267 /* XXX: If we couldn't find basemem from SMAP, just guess. */
2268 if (basemem == 0)
2269 basemem = 640;
2270 basemem_setup();
2271 }
2272
2273 if (physmap[1] != 0)
2274 goto physmap_done;
2275
2276 /*
2277 * If we failed to find an SMAP, figure out the extended
2278 * memory size. We will then build a simple memory map with
2279 * two segments, one for "base memory" and the second for
2280 * "extended memory". Note that "extended memory" starts at a
2281 * physical address of 1MB and that both basemem and extmem
2282 * are in units of 1KB.
2283 *
2284 * First, try to fetch the extended memory size via INT 15:E801.
2285 */
2286 vmf.vmf_ax = 0xE801;
2287 if (vm86_intcall(0x15, &vmf) == 0) {
2288 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
2289 } else {
2290 /*
2291 * If INT15:E801 fails, this is our last ditch effort
2292 * to determine the extended memory size. Currently
2293 * we prefer the RTC value over INT15:88.
2294 */
2295 #if 0
2296 vmf.vmf_ah = 0x88;
2297 vm86_intcall(0x15, &vmf);
2298 extmem = vmf.vmf_ax;
2299 #else
2300 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
2301 #endif
2302 }
2303
2304 /*
2305 * Special hack for chipsets that still remap the 384k hole when
2306 * there's 16MB of memory - this really confuses people that
2307 * are trying to use bus mastering ISA controllers with the
2308 * "16MB limit"; they only have 16MB, but the remapping puts
2309 * them beyond the limit.
2310 *
2311 * If extended memory is between 15-16MB (16-17MB phys address range),
2312 * chop it to 15MB.
2313 */
2314 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
2315 extmem = 15 * 1024;
2316
2317 physmap[0] = 0;
2318 physmap[1] = basemem * 1024;
2319 physmap_idx = 2;
2320 physmap[physmap_idx] = 0x100000;
2321 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2322
2323 physmap_done:
2324 #endif
2325 /*
2326 * Now, physmap contains a map of physical memory.
2327 */
2328
2329 #ifdef SMP
2330 /* make hole for AP bootstrap code */
2331 physmap[1] = mp_bootaddress(physmap[1]);
2332 #endif
2333
2334 /*
2335 * Maxmem isn't the "maximum memory", it's one larger than the
2336 * highest page of the physical address space. It should be
2337 * called something like "Maxphyspage". We may adjust this
2338 * based on ``hw.physmem'' and the results of the memory test.
2339 */
2340 Maxmem = atop(physmap[physmap_idx + 1]);
2341
2342 #ifdef MAXMEM
2343 Maxmem = MAXMEM / 4;
2344 #endif
2345
2346 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
2347 Maxmem = atop(physmem_tunable);
2348
2349 /*
2350 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
2351 * the amount of memory in the system.
2352 */
2353 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
2354 Maxmem = atop(physmap[physmap_idx + 1]);
2355
2356 /*
2357 * By default enable the memory test on real hardware, and disable
2358 * it if we appear to be running in a VM. This avoids touching all
2359 * pages unnecessarily, which doesn't matter on real hardware but is
2360 * bad for shared VM hosts. Use a general name so that
2361 * one could eventually do more with the code than just disable it.
2362 */
2363 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
2364 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
2365
2366 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2367 (boothowto & RB_VERBOSE))
2368 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2369
2370 /*
2371 * If Maxmem has been increased beyond what the system has detected,
2372 * extend the last memory segment to the new limit.
2373 */
2374 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2375 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2376
2377 /* call pmap initialization to make new kernel address space */
2378 pmap_bootstrap(first);
2379
2380 /*
2381 * Size up each available chunk of physical memory.
2382 */
2383 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2384 pa_indx = 0;
2385 da_indx = 1;
2386 phys_avail[pa_indx++] = physmap[0];
2387 phys_avail[pa_indx] = physmap[0];
2388 dump_avail[da_indx] = physmap[0];
2389 pte = CMAP3;
2390
2391 /*
2392 * Get dcons buffer address
2393 */
2394 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2395 getenv_quad("dcons.size", &dcons_size) == 0)
2396 dcons_addr = 0;
2397
2398 #ifndef XEN
2399 /*
2400 * physmap is in bytes, so when converting to page boundaries,
2401 * round up the start address and round down the end address.
2402 */
2403 for (i = 0; i <= physmap_idx; i += 2) {
2404 vm_paddr_t end;
2405
2406 end = ptoa((vm_paddr_t)Maxmem);
2407 if (physmap[i + 1] < end)
2408 end = trunc_page(physmap[i + 1]);
2409 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2410 int tmp, page_bad, full;
2411 int *ptr = (int *)CADDR3;
2412
2413 full = FALSE;
2414 /*
2415 * block out kernel memory as not available.
2416 */
2417 if (pa >= KERNLOAD && pa < first)
2418 goto do_dump_avail;
2419
2420 /*
2421 * block out dcons buffer
2422 */
2423 if (dcons_addr > 0
2424 && pa >= trunc_page(dcons_addr)
2425 && pa < dcons_addr + dcons_size)
2426 goto do_dump_avail;
2427
2428 page_bad = FALSE;
2429 if (memtest == 0)
2430 goto skip_memtest;
2431
2432 /*
2433 * map page into kernel: valid, read/write,non-cacheable
2434 */
2435 *pte = pa | PG_V | PG_RW | PG_N;
2436 invltlb();
2437
2438 tmp = *(int *)ptr;
2439 /*
2440 * Test for alternating 1's and 0's
2441 */
2442 *(volatile int *)ptr = 0xaaaaaaaa;
2443 if (*(volatile int *)ptr != 0xaaaaaaaa)
2444 page_bad = TRUE;
2445 /*
2446 * Test for alternating 0's and 1's
2447 */
2448 *(volatile int *)ptr = 0x55555555;
2449 if (*(volatile int *)ptr != 0x55555555)
2450 page_bad = TRUE;
2451 /*
2452 * Test for all 1's
2453 */
2454 *(volatile int *)ptr = 0xffffffff;
2455 if (*(volatile int *)ptr != 0xffffffff)
2456 page_bad = TRUE;
2457 /*
2458 * Test for all 0's
2459 */
2460 *(volatile int *)ptr = 0x0;
2461 if (*(volatile int *)ptr != 0x0)
2462 page_bad = TRUE;
2463 /*
2464 * Restore original value.
2465 */
2466 *(int *)ptr = tmp;
2467
2468 skip_memtest:
2469 /*
2470 * Adjust array of valid/good pages.
2471 */
2472 if (page_bad == TRUE)
2473 continue;
2474 /*
2475 * If this good page is a continuation of the
2476 * previous set of good pages, then just increase
2477 * the end pointer. Otherwise start a new chunk.
2478 * Note that "end" points one higher than end,
2479 * making the range >= start and < end.
2480 * If we're also doing a speculative memory
2481 * test and we at or past the end, bump up Maxmem
2482 * so that we keep going. The first bad page
2483 * will terminate the loop.
2484 */
2485 if (phys_avail[pa_indx] == pa) {
2486 phys_avail[pa_indx] += PAGE_SIZE;
2487 } else {
2488 pa_indx++;
2489 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2490 printf(
2491 "Too many holes in the physical address space, giving up\n");
2492 pa_indx--;
2493 full = TRUE;
2494 goto do_dump_avail;
2495 }
2496 phys_avail[pa_indx++] = pa; /* start */
2497 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2498 }
2499 physmem++;
2500 do_dump_avail:
2501 if (dump_avail[da_indx] == pa) {
2502 dump_avail[da_indx] += PAGE_SIZE;
2503 } else {
2504 da_indx++;
2505 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2506 da_indx--;
2507 goto do_next;
2508 }
2509 dump_avail[da_indx++] = pa; /* start */
2510 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2511 }
2512 do_next:
2513 if (full)
2514 break;
2515 }
2516 }
2517 *pte = 0;
2518 invltlb();
2519 #else
2520 phys_avail[0] = physfree;
2521 phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2522 dump_avail[0] = 0;
2523 dump_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2524
2525 #endif
2526
2527 /*
2528 * XXX
2529 * The last chunk must contain at least one page plus the message
2530 * buffer to avoid complicating other code (message buffer address
2531 * calculation, etc.).
2532 */
2533 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2534 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2535 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2536 phys_avail[pa_indx--] = 0;
2537 phys_avail[pa_indx--] = 0;
2538 }
2539
2540 Maxmem = atop(phys_avail[pa_indx]);
2541
2542 /* Trim off space for the message buffer. */
2543 phys_avail[pa_indx] -= round_page(msgbufsize);
2544
2545 /* Map the message buffer. */
2546 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2547 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2548 off);
2549
2550 PT_UPDATES_FLUSH();
2551 }
2552
2553 #ifdef XEN
2554 #define MTOPSIZE (1<<(14 + PAGE_SHIFT))
2555
2556 void
2557 init386(first)
2558 int first;
2559 {
2560 unsigned long gdtmachpfn;
2561 int error, gsel_tss, metadata_missing, x, pa;
2562 size_t kstack0_sz;
2563 struct pcpu *pc;
2564 struct callback_register event = {
2565 .type = CALLBACKTYPE_event,
2566 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback },
2567 };
2568 struct callback_register failsafe = {
2569 .type = CALLBACKTYPE_failsafe,
2570 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback },
2571 };
2572
2573 thread0.td_kstack = proc0kstack;
2574 thread0.td_kstack_pages = KSTACK_PAGES;
2575 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2576 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2577
2578 /*
2579 * This may be done better later if it gets more high level
2580 * components in it. If so just link td->td_proc here.
2581 */
2582 proc_linkup0(&proc0, &thread0);
2583
2584 metadata_missing = 0;
2585 if (xen_start_info->mod_start) {
2586 preload_metadata = (caddr_t)xen_start_info->mod_start;
2587 preload_bootstrap_relocate(KERNBASE);
2588 } else {
2589 metadata_missing = 1;
2590 }
2591 if (envmode == 1)
2592 kern_envp = static_env;
2593 else if ((caddr_t)xen_start_info->cmd_line)
2594 kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line);
2595
2596 boothowto |= xen_boothowto(kern_envp);
2597
2598 /* Init basic tunables, hz etc */
2599 init_param1();
2600
2601 /*
2602 * XEN occupies a portion of the upper virtual address space
2603 * At its base it manages an array mapping machine page frames
2604 * to physical page frames - hence we need to be able to
2605 * access 4GB - (64MB - 4MB + 64k)
2606 */
2607 gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2608 gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2609 gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2610 gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2611 gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2612 gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2613 gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2614 gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2615
2616 pc = &__pcpu[0];
2617 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2618 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2619
2620 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW);
2621 bzero(gdt, PAGE_SIZE);
2622 for (x = 0; x < NGDT; x++)
2623 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2624
2625 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2626
2627 gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
2628 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V);
2629 PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
2630 lgdt(&r_gdt);
2631 gdtset = 1;
2632
2633 if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
2634 panic("set_trap_table failed - error %d\n", error);
2635 }
2636
2637 error = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
2638 if (error == 0)
2639 error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
2640 #if CONFIG_XEN_COMPAT <= 0x030002
2641 if (error == -ENOXENSYS)
2642 HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL),
2643 (unsigned long)Xhypervisor_callback,
2644 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
2645 #endif
2646 pcpu_init(pc, 0, sizeof(struct pcpu));
2647 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2648 pmap_kenter(pa + KERNBASE, pa);
2649 dpcpu_init((void *)(first + KERNBASE), 0);
2650 first += DPCPU_SIZE;
2651 physfree += DPCPU_SIZE;
2652 init_first += DPCPU_SIZE / PAGE_SIZE;
2653
2654 PCPU_SET(prvspace, pc);
2655 PCPU_SET(curthread, &thread0);
2656 PCPU_SET(curpcb, thread0.td_pcb);
2657
2658 /*
2659 * Initialize mutexes.
2660 *
2661 * icu_lock: in order to allow an interrupt to occur in a critical
2662 * section, to set pcpu->ipending (etc...) properly, we
2663 * must be able to get the icu lock, so it can't be
2664 * under witness.
2665 */
2666 mutex_init();
2667 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2668
2669 /* make ldt memory segments */
2670 PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW);
2671 bzero(ldt, PAGE_SIZE);
2672 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2673 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2674 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2675 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2676
2677 default_proc_ldt.ldt_base = (caddr_t)ldt;
2678 default_proc_ldt.ldt_len = 6;
2679 _default_ldt = (int)&default_proc_ldt;
2680 PCPU_SET(currentldt, _default_ldt);
2681 PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
2682 xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
2683
2684 #if defined(XEN_PRIVILEGED)
2685 /*
2686 * Initialize the i8254 before the console so that console
2687 * initialization can use DELAY().
2688 */
2689 i8254_init();
2690 #endif
2691
2692 /*
2693 * Initialize the console before we print anything out.
2694 */
2695 cninit();
2696
2697 if (metadata_missing)
2698 printf("WARNING: loader(8) metadata is missing!\n");
2699
2700 #ifdef DEV_ISA
2701 #ifdef DEV_ATPIC
2702 elcr_probe();
2703 atpic_startup();
2704 #else
2705 /* Reset and mask the atpics and leave them shut down. */
2706 atpic_reset();
2707
2708 /*
2709 * Point the ICU spurious interrupt vectors at the APIC spurious
2710 * interrupt handler.
2711 */
2712 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2713 GSEL(GCODE_SEL, SEL_KPL));
2714 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2715 GSEL(GCODE_SEL, SEL_KPL));
2716 #endif
2717 #endif
2718
2719 #ifdef DDB
2720 ksym_start = bootinfo.bi_symtab;
2721 ksym_end = bootinfo.bi_esymtab;
2722 #endif
2723
2724 kdb_init();
2725
2726 #ifdef KDB
2727 if (boothowto & RB_KDB)
2728 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2729 #endif
2730
2731 finishidentcpu(); /* Final stage of CPU initialization */
2732 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2733 GSEL(GCODE_SEL, SEL_KPL));
2734 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2735 GSEL(GCODE_SEL, SEL_KPL));
2736 initializecpu(); /* Initialize CPU registers */
2737
2738 /* make an initial tss so cpu can get interrupt stack on syscall! */
2739 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2740 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2741 kstack0_sz - sizeof(struct pcb) - 16);
2742 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2743 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2744 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL),
2745 PCPU_GET(common_tss.tss_esp0));
2746
2747 /* pointer to selector slot for %fs/%gs */
2748 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2749
2750 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2751 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2752 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2753 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2754 #ifdef PAE
2755 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2756 #else
2757 dblfault_tss.tss_cr3 = (int)IdlePTD;
2758 #endif
2759 dblfault_tss.tss_eip = (int)dblfault_handler;
2760 dblfault_tss.tss_eflags = PSL_KERNEL;
2761 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2762 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2763 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2764 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2765 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2766
2767 vm86_initialize();
2768 getmemsize(first);
2769 init_param2(physmem);
2770
2771 /* now running on new page tables, configured,and u/iom is accessible */
2772
2773 msgbufinit(msgbufp, msgbufsize);
2774 /* transfer to user mode */
2775
2776 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2777 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2778
2779 /* setup proc 0's pcb */
2780 thread0.td_pcb->pcb_flags = 0;
2781 #ifdef PAE
2782 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2783 #else
2784 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2785 #endif
2786 thread0.td_pcb->pcb_ext = 0;
2787 thread0.td_frame = &proc0_tf;
2788 thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0];
2789 thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1];
2790
2791 cpu_probe_amdc1e();
2792 cpu_probe_cmpxchg8b();
2793 }
2794
2795 #else
2796 void
2797 init386(first)
2798 int first;
2799 {
2800 struct gate_descriptor *gdp;
2801 int gsel_tss, metadata_missing, x, pa;
2802 size_t kstack0_sz;
2803 struct pcpu *pc;
2804
2805 thread0.td_kstack = proc0kstack;
2806 thread0.td_kstack_pages = KSTACK_PAGES;
2807 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2808 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2809
2810 /*
2811 * This may be done better later if it gets more high level
2812 * components in it. If so just link td->td_proc here.
2813 */
2814 proc_linkup0(&proc0, &thread0);
2815
2816 metadata_missing = 0;
2817 if (bootinfo.bi_modulep) {
2818 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2819 preload_bootstrap_relocate(KERNBASE);
2820 } else {
2821 metadata_missing = 1;
2822 }
2823 if (envmode == 1)
2824 kern_envp = static_env;
2825 else if (bootinfo.bi_envp)
2826 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2827
2828 /* Init basic tunables, hz etc */
2829 init_param1();
2830
2831 /*
2832 * Make gdt memory segments. All segments cover the full 4GB
2833 * of address space and permissions are enforced at page level.
2834 */
2835 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2836 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2837 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2838 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2839 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2840 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2841
2842 pc = &__pcpu[0];
2843 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2844 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2845 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2846
2847 for (x = 0; x < NGDT; x++)
2848 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2849
2850 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2851 r_gdt.rd_base = (int) gdt;
2852 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2853 lgdt(&r_gdt);
2854
2855 pcpu_init(pc, 0, sizeof(struct pcpu));
2856 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2857 pmap_kenter(pa + KERNBASE, pa);
2858 dpcpu_init((void *)(first + KERNBASE), 0);
2859 first += DPCPU_SIZE;
2860 PCPU_SET(prvspace, pc);
2861 PCPU_SET(curthread, &thread0);
2862 PCPU_SET(curpcb, thread0.td_pcb);
2863
2864 /*
2865 * Initialize mutexes.
2866 *
2867 * icu_lock: in order to allow an interrupt to occur in a critical
2868 * section, to set pcpu->ipending (etc...) properly, we
2869 * must be able to get the icu lock, so it can't be
2870 * under witness.
2871 */
2872 mutex_init();
2873 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2874
2875 /* make ldt memory segments */
2876 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2877 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2878 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2879 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2880
2881 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2882 lldt(_default_ldt);
2883 PCPU_SET(currentldt, _default_ldt);
2884
2885 /* exceptions */
2886 for (x = 0; x < NIDT; x++)
2887 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2888 GSEL(GCODE_SEL, SEL_KPL));
2889 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2890 GSEL(GCODE_SEL, SEL_KPL));
2891 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2892 GSEL(GCODE_SEL, SEL_KPL));
2893 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2894 GSEL(GCODE_SEL, SEL_KPL));
2895 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2896 GSEL(GCODE_SEL, SEL_KPL));
2897 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2898 GSEL(GCODE_SEL, SEL_KPL));
2899 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2900 GSEL(GCODE_SEL, SEL_KPL));
2901 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2902 GSEL(GCODE_SEL, SEL_KPL));
2903 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2904 , GSEL(GCODE_SEL, SEL_KPL));
2905 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2906 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2907 GSEL(GCODE_SEL, SEL_KPL));
2908 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2909 GSEL(GCODE_SEL, SEL_KPL));
2910 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2911 GSEL(GCODE_SEL, SEL_KPL));
2912 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2913 GSEL(GCODE_SEL, SEL_KPL));
2914 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2915 GSEL(GCODE_SEL, SEL_KPL));
2916 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2917 GSEL(GCODE_SEL, SEL_KPL));
2918 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2919 GSEL(GCODE_SEL, SEL_KPL));
2920 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2921 GSEL(GCODE_SEL, SEL_KPL));
2922 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2923 GSEL(GCODE_SEL, SEL_KPL));
2924 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2925 GSEL(GCODE_SEL, SEL_KPL));
2926 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2927 GSEL(GCODE_SEL, SEL_KPL));
2928 #ifdef KDTRACE_HOOKS
2929 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
2930 GSEL(GCODE_SEL, SEL_KPL));
2931 #endif
2932
2933 r_idt.rd_limit = sizeof(idt0) - 1;
2934 r_idt.rd_base = (int) idt;
2935 lidt(&r_idt);
2936
2937 #ifdef XBOX
2938 /*
2939 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2940 * This should be 0x10de / 0x02a5.
2941 *
2942 * This is exactly what Linux does.
2943 */
2944 outl(0xcf8, 0x80000000);
2945 if (inl(0xcfc) == 0x02a510de) {
2946 arch_i386_is_xbox = 1;
2947 pic16l_setled(XBOX_LED_GREEN);
2948
2949 /*
2950 * We are an XBOX, but we may have either 64MB or 128MB of
2951 * memory. The PCI host bridge should be programmed for this,
2952 * so we just query it.
2953 */
2954 outl(0xcf8, 0x80000084);
2955 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2956 }
2957 #endif /* XBOX */
2958
2959 /*
2960 * Initialize the i8254 before the console so that console
2961 * initialization can use DELAY().
2962 */
2963 i8254_init();
2964
2965 /*
2966 * Initialize the console before we print anything out.
2967 */
2968 cninit();
2969
2970 if (metadata_missing)
2971 printf("WARNING: loader(8) metadata is missing!\n");
2972
2973 #ifdef DEV_ISA
2974 #ifdef DEV_ATPIC
2975 elcr_probe();
2976 atpic_startup();
2977 #else
2978 /* Reset and mask the atpics and leave them shut down. */
2979 atpic_reset();
2980
2981 /*
2982 * Point the ICU spurious interrupt vectors at the APIC spurious
2983 * interrupt handler.
2984 */
2985 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2986 GSEL(GCODE_SEL, SEL_KPL));
2987 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2988 GSEL(GCODE_SEL, SEL_KPL));
2989 #endif
2990 #endif
2991
2992 #ifdef DDB
2993 ksym_start = bootinfo.bi_symtab;
2994 ksym_end = bootinfo.bi_esymtab;
2995 #endif
2996
2997 kdb_init();
2998
2999 #ifdef KDB
3000 if (boothowto & RB_KDB)
3001 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
3002 #endif
3003
3004 finishidentcpu(); /* Final stage of CPU initialization */
3005 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
3006 GSEL(GCODE_SEL, SEL_KPL));
3007 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
3008 GSEL(GCODE_SEL, SEL_KPL));
3009 initializecpu(); /* Initialize CPU registers */
3010
3011 /* make an initial tss so cpu can get interrupt stack on syscall! */
3012 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
3013 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
3014 kstack0_sz - sizeof(struct pcb) - 16);
3015 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
3016 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
3017 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
3018 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
3019 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
3020 ltr(gsel_tss);
3021
3022 /* pointer to selector slot for %fs/%gs */
3023 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
3024
3025 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
3026 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
3027 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
3028 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
3029 #ifdef PAE
3030 dblfault_tss.tss_cr3 = (int)IdlePDPT;
3031 #else
3032 dblfault_tss.tss_cr3 = (int)IdlePTD;
3033 #endif
3034 dblfault_tss.tss_eip = (int)dblfault_handler;
3035 dblfault_tss.tss_eflags = PSL_KERNEL;
3036 dblfault_tss.tss_ds = dblfault_tss.tss_es =
3037 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
3038 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
3039 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
3040 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
3041
3042 vm86_initialize();
3043 getmemsize(first);
3044 init_param2(physmem);
3045
3046 /* now running on new page tables, configured,and u/iom is accessible */
3047
3048 msgbufinit(msgbufp, msgbufsize);
3049
3050 /* make a call gate to reenter kernel with */
3051 gdp = &ldt[LSYS5CALLS_SEL].gd;
3052
3053 x = (int) &IDTVEC(lcall_syscall);
3054 gdp->gd_looffset = x;
3055 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
3056 gdp->gd_stkcpy = 1;
3057 gdp->gd_type = SDT_SYS386CGT;
3058 gdp->gd_dpl = SEL_UPL;
3059 gdp->gd_p = 1;
3060 gdp->gd_hioffset = x >> 16;
3061
3062 /* XXX does this work? */
3063 /* XXX yes! */
3064 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
3065 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
3066
3067 /* transfer to user mode */
3068
3069 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
3070 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
3071
3072 /* setup proc 0's pcb */
3073 thread0.td_pcb->pcb_flags = 0;
3074 #ifdef PAE
3075 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
3076 #else
3077 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
3078 #endif
3079 thread0.td_pcb->pcb_ext = 0;
3080 thread0.td_frame = &proc0_tf;
3081
3082 cpu_probe_amdc1e();
3083 cpu_probe_cmpxchg8b();
3084 }
3085 #endif
3086
3087 void
3088 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
3089 {
3090
3091 pcpu->pc_acpi_id = 0xffffffff;
3092 }
3093
3094 void
3095 spinlock_enter(void)
3096 {
3097 struct thread *td;
3098 register_t flags;
3099
3100 td = curthread;
3101 if (td->td_md.md_spinlock_count == 0) {
3102 flags = intr_disable();
3103 td->td_md.md_spinlock_count = 1;
3104 td->td_md.md_saved_flags = flags;
3105 } else
3106 td->td_md.md_spinlock_count++;
3107 critical_enter();
3108 }
3109
3110 void
3111 spinlock_exit(void)
3112 {
3113 struct thread *td;
3114 register_t flags;
3115
3116 td = curthread;
3117 critical_exit();
3118 flags = td->td_md.md_saved_flags;
3119 td->td_md.md_spinlock_count--;
3120 if (td->td_md.md_spinlock_count == 0)
3121 intr_restore(flags);
3122 }
3123
3124 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
3125 static void f00f_hack(void *unused);
3126 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
3127
3128 static void
3129 f00f_hack(void *unused)
3130 {
3131 struct gate_descriptor *new_idt;
3132 vm_offset_t tmp;
3133
3134 if (!has_f00f_bug)
3135 return;
3136
3137 GIANT_REQUIRED;
3138
3139 printf("Intel Pentium detected, installing workaround for F00F bug\n");
3140
3141 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
3142 if (tmp == 0)
3143 panic("kmem_alloc returned 0");
3144
3145 /* Put the problematic entry (#6) at the end of the lower page. */
3146 new_idt = (struct gate_descriptor*)
3147 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
3148 bcopy(idt, new_idt, sizeof(idt0));
3149 r_idt.rd_base = (u_int)new_idt;
3150 lidt(&r_idt);
3151 idt = new_idt;
3152 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
3153 VM_PROT_READ, FALSE) != KERN_SUCCESS)
3154 panic("vm_map_protect failed");
3155 }
3156 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
3157
3158 /*
3159 * Construct a PCB from a trapframe. This is called from kdb_trap() where
3160 * we want to start a backtrace from the function that caused us to enter
3161 * the debugger. We have the context in the trapframe, but base the trace
3162 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
3163 * enough for a backtrace.
3164 */
3165 void
3166 makectx(struct trapframe *tf, struct pcb *pcb)
3167 {
3168
3169 pcb->pcb_edi = tf->tf_edi;
3170 pcb->pcb_esi = tf->tf_esi;
3171 pcb->pcb_ebp = tf->tf_ebp;
3172 pcb->pcb_ebx = tf->tf_ebx;
3173 pcb->pcb_eip = tf->tf_eip;
3174 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
3175 pcb->pcb_gs = rgs();
3176 }
3177
3178 int
3179 ptrace_set_pc(struct thread *td, u_long addr)
3180 {
3181
3182 td->td_frame->tf_eip = addr;
3183 return (0);
3184 }
3185
3186 int
3187 ptrace_single_step(struct thread *td)
3188 {
3189 td->td_frame->tf_eflags |= PSL_T;
3190 return (0);
3191 }
3192
3193 int
3194 ptrace_clear_single_step(struct thread *td)
3195 {
3196 td->td_frame->tf_eflags &= ~PSL_T;
3197 return (0);
3198 }
3199
3200 int
3201 fill_regs(struct thread *td, struct reg *regs)
3202 {
3203 struct pcb *pcb;
3204 struct trapframe *tp;
3205
3206 tp = td->td_frame;
3207 pcb = td->td_pcb;
3208 regs->r_gs = pcb->pcb_gs;
3209 return (fill_frame_regs(tp, regs));
3210 }
3211
3212 int
3213 fill_frame_regs(struct trapframe *tp, struct reg *regs)
3214 {
3215 regs->r_fs = tp->tf_fs;
3216 regs->r_es = tp->tf_es;
3217 regs->r_ds = tp->tf_ds;
3218 regs->r_edi = tp->tf_edi;
3219 regs->r_esi = tp->tf_esi;
3220 regs->r_ebp = tp->tf_ebp;
3221 regs->r_ebx = tp->tf_ebx;
3222 regs->r_edx = tp->tf_edx;
3223 regs->r_ecx = tp->tf_ecx;
3224 regs->r_eax = tp->tf_eax;
3225 regs->r_eip = tp->tf_eip;
3226 regs->r_cs = tp->tf_cs;
3227 regs->r_eflags = tp->tf_eflags;
3228 regs->r_esp = tp->tf_esp;
3229 regs->r_ss = tp->tf_ss;
3230 return (0);
3231 }
3232
3233 int
3234 set_regs(struct thread *td, struct reg *regs)
3235 {
3236 struct pcb *pcb;
3237 struct trapframe *tp;
3238
3239 tp = td->td_frame;
3240 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
3241 !CS_SECURE(regs->r_cs))
3242 return (EINVAL);
3243 pcb = td->td_pcb;
3244 tp->tf_fs = regs->r_fs;
3245 tp->tf_es = regs->r_es;
3246 tp->tf_ds = regs->r_ds;
3247 tp->tf_edi = regs->r_edi;
3248 tp->tf_esi = regs->r_esi;
3249 tp->tf_ebp = regs->r_ebp;
3250 tp->tf_ebx = regs->r_ebx;
3251 tp->tf_edx = regs->r_edx;
3252 tp->tf_ecx = regs->r_ecx;
3253 tp->tf_eax = regs->r_eax;
3254 tp->tf_eip = regs->r_eip;
3255 tp->tf_cs = regs->r_cs;
3256 tp->tf_eflags = regs->r_eflags;
3257 tp->tf_esp = regs->r_esp;
3258 tp->tf_ss = regs->r_ss;
3259 pcb->pcb_gs = regs->r_gs;
3260 return (0);
3261 }
3262
3263 #ifdef CPU_ENABLE_SSE
3264 static void
3265 fill_fpregs_xmm(sv_xmm, sv_87)
3266 struct savexmm *sv_xmm;
3267 struct save87 *sv_87;
3268 {
3269 register struct env87 *penv_87 = &sv_87->sv_env;
3270 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3271 int i;
3272
3273 bzero(sv_87, sizeof(*sv_87));
3274
3275 /* FPU control/status */
3276 penv_87->en_cw = penv_xmm->en_cw;
3277 penv_87->en_sw = penv_xmm->en_sw;
3278 penv_87->en_tw = penv_xmm->en_tw;
3279 penv_87->en_fip = penv_xmm->en_fip;
3280 penv_87->en_fcs = penv_xmm->en_fcs;
3281 penv_87->en_opcode = penv_xmm->en_opcode;
3282 penv_87->en_foo = penv_xmm->en_foo;
3283 penv_87->en_fos = penv_xmm->en_fos;
3284
3285 /* FPU registers */
3286 for (i = 0; i < 8; ++i)
3287 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
3288 }
3289
3290 static void
3291 set_fpregs_xmm(sv_87, sv_xmm)
3292 struct save87 *sv_87;
3293 struct savexmm *sv_xmm;
3294 {
3295 register struct env87 *penv_87 = &sv_87->sv_env;
3296 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3297 int i;
3298
3299 /* FPU control/status */
3300 penv_xmm->en_cw = penv_87->en_cw;
3301 penv_xmm->en_sw = penv_87->en_sw;
3302 penv_xmm->en_tw = penv_87->en_tw;
3303 penv_xmm->en_fip = penv_87->en_fip;
3304 penv_xmm->en_fcs = penv_87->en_fcs;
3305 penv_xmm->en_opcode = penv_87->en_opcode;
3306 penv_xmm->en_foo = penv_87->en_foo;
3307 penv_xmm->en_fos = penv_87->en_fos;
3308
3309 /* FPU registers */
3310 for (i = 0; i < 8; ++i)
3311 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
3312 }
3313 #endif /* CPU_ENABLE_SSE */
3314
3315 int
3316 fill_fpregs(struct thread *td, struct fpreg *fpregs)
3317 {
3318
3319 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
3320 P_SHOULDSTOP(td->td_proc),
3321 ("not suspended thread %p", td));
3322 #ifdef DEV_NPX
3323 npxgetregs(td);
3324 #else
3325 bzero(fpregs, sizeof(*fpregs));
3326 #endif
3327 #ifdef CPU_ENABLE_SSE
3328 if (cpu_fxsr)
3329 fill_fpregs_xmm(&td->td_pcb->pcb_user_save.sv_xmm,
3330 (struct save87 *)fpregs);
3331 else
3332 #endif /* CPU_ENABLE_SSE */
3333 bcopy(&td->td_pcb->pcb_user_save.sv_87, fpregs,
3334 sizeof(*fpregs));
3335 return (0);
3336 }
3337
3338 int
3339 set_fpregs(struct thread *td, struct fpreg *fpregs)
3340 {
3341
3342 #ifdef CPU_ENABLE_SSE
3343 if (cpu_fxsr)
3344 set_fpregs_xmm((struct save87 *)fpregs,
3345 &td->td_pcb->pcb_user_save.sv_xmm);
3346 else
3347 #endif /* CPU_ENABLE_SSE */
3348 bcopy(fpregs, &td->td_pcb->pcb_user_save.sv_87,
3349 sizeof(*fpregs));
3350 #ifdef DEV_NPX
3351 npxuserinited(td);
3352 #endif
3353 return (0);
3354 }
3355
3356 /*
3357 * Get machine context.
3358 */
3359 int
3360 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
3361 {
3362 struct trapframe *tp;
3363 struct segment_descriptor *sdp;
3364
3365 tp = td->td_frame;
3366
3367 PROC_LOCK(curthread->td_proc);
3368 mcp->mc_onstack = sigonstack(tp->tf_esp);
3369 PROC_UNLOCK(curthread->td_proc);
3370 mcp->mc_gs = td->td_pcb->pcb_gs;
3371 mcp->mc_fs = tp->tf_fs;
3372 mcp->mc_es = tp->tf_es;
3373 mcp->mc_ds = tp->tf_ds;
3374 mcp->mc_edi = tp->tf_edi;
3375 mcp->mc_esi = tp->tf_esi;
3376 mcp->mc_ebp = tp->tf_ebp;
3377 mcp->mc_isp = tp->tf_isp;
3378 mcp->mc_eflags = tp->tf_eflags;
3379 if (flags & GET_MC_CLEAR_RET) {
3380 mcp->mc_eax = 0;
3381 mcp->mc_edx = 0;
3382 mcp->mc_eflags &= ~PSL_C;
3383 } else {
3384 mcp->mc_eax = tp->tf_eax;
3385 mcp->mc_edx = tp->tf_edx;
3386 }
3387 mcp->mc_ebx = tp->tf_ebx;
3388 mcp->mc_ecx = tp->tf_ecx;
3389 mcp->mc_eip = tp->tf_eip;
3390 mcp->mc_cs = tp->tf_cs;
3391 mcp->mc_esp = tp->tf_esp;
3392 mcp->mc_ss = tp->tf_ss;
3393 mcp->mc_len = sizeof(*mcp);
3394 get_fpcontext(td, mcp);
3395 sdp = &td->td_pcb->pcb_fsd;
3396 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3397 sdp = &td->td_pcb->pcb_gsd;
3398 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3399 mcp->mc_flags = 0;
3400 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
3401 return (0);
3402 }
3403
3404 /*
3405 * Set machine context.
3406 *
3407 * However, we don't set any but the user modifiable flags, and we won't
3408 * touch the cs selector.
3409 */
3410 int
3411 set_mcontext(struct thread *td, const mcontext_t *mcp)
3412 {
3413 struct trapframe *tp;
3414 int eflags, ret;
3415
3416 tp = td->td_frame;
3417 if (mcp->mc_len != sizeof(*mcp))
3418 return (EINVAL);
3419 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
3420 (tp->tf_eflags & ~PSL_USERCHANGE);
3421 if ((ret = set_fpcontext(td, mcp)) == 0) {
3422 tp->tf_fs = mcp->mc_fs;
3423 tp->tf_es = mcp->mc_es;
3424 tp->tf_ds = mcp->mc_ds;
3425 tp->tf_edi = mcp->mc_edi;
3426 tp->tf_esi = mcp->mc_esi;
3427 tp->tf_ebp = mcp->mc_ebp;
3428 tp->tf_ebx = mcp->mc_ebx;
3429 tp->tf_edx = mcp->mc_edx;
3430 tp->tf_ecx = mcp->mc_ecx;
3431 tp->tf_eax = mcp->mc_eax;
3432 tp->tf_eip = mcp->mc_eip;
3433 tp->tf_eflags = eflags;
3434 tp->tf_esp = mcp->mc_esp;
3435 tp->tf_ss = mcp->mc_ss;
3436 td->td_pcb->pcb_gs = mcp->mc_gs;
3437 ret = 0;
3438 }
3439 return (ret);
3440 }
3441
3442 static void
3443 get_fpcontext(struct thread *td, mcontext_t *mcp)
3444 {
3445
3446 #ifndef DEV_NPX
3447 mcp->mc_fpformat = _MC_FPFMT_NODEV;
3448 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
3449 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
3450 #else
3451 mcp->mc_ownedfp = npxgetregs(td);
3452 bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate[0],
3453 sizeof(mcp->mc_fpstate));
3454 mcp->mc_fpformat = npxformat();
3455 #endif
3456 }
3457
3458 static int
3459 set_fpcontext(struct thread *td, const mcontext_t *mcp)
3460 {
3461
3462 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
3463 return (0);
3464 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
3465 mcp->mc_fpformat != _MC_FPFMT_XMM)
3466 return (EINVAL);
3467 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
3468 /* We don't care what state is left in the FPU or PCB. */
3469 fpstate_drop(td);
3470 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
3471 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
3472 #ifdef DEV_NPX
3473 #ifdef CPU_ENABLE_SSE
3474 if (cpu_fxsr)
3475 ((union savefpu *)&mcp->mc_fpstate)->sv_xmm.sv_env.
3476 en_mxcsr &= cpu_mxcsr_mask;
3477 #endif
3478 npxsetregs(td, (union savefpu *)&mcp->mc_fpstate);
3479 #endif
3480 } else
3481 return (EINVAL);
3482 return (0);
3483 }
3484
3485 static void
3486 fpstate_drop(struct thread *td)
3487 {
3488
3489 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
3490 critical_enter();
3491 #ifdef DEV_NPX
3492 if (PCPU_GET(fpcurthread) == td)
3493 npxdrop();
3494 #endif
3495 /*
3496 * XXX force a full drop of the npx. The above only drops it if we
3497 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
3498 *
3499 * XXX I don't much like npxgetregs()'s semantics of doing a full
3500 * drop. Dropping only to the pcb matches fnsave's behaviour.
3501 * We only need to drop to !PCB_INITDONE in sendsig(). But
3502 * sendsig() is the only caller of npxgetregs()... perhaps we just
3503 * have too many layers.
3504 */
3505 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
3506 PCB_NPXUSERINITDONE);
3507 critical_exit();
3508 }
3509
3510 int
3511 fill_dbregs(struct thread *td, struct dbreg *dbregs)
3512 {
3513 struct pcb *pcb;
3514
3515 if (td == NULL) {
3516 dbregs->dr[0] = rdr0();
3517 dbregs->dr[1] = rdr1();
3518 dbregs->dr[2] = rdr2();
3519 dbregs->dr[3] = rdr3();
3520 dbregs->dr[4] = rdr4();
3521 dbregs->dr[5] = rdr5();
3522 dbregs->dr[6] = rdr6();
3523 dbregs->dr[7] = rdr7();
3524 } else {
3525 pcb = td->td_pcb;
3526 dbregs->dr[0] = pcb->pcb_dr0;
3527 dbregs->dr[1] = pcb->pcb_dr1;
3528 dbregs->dr[2] = pcb->pcb_dr2;
3529 dbregs->dr[3] = pcb->pcb_dr3;
3530 dbregs->dr[4] = 0;
3531 dbregs->dr[5] = 0;
3532 dbregs->dr[6] = pcb->pcb_dr6;
3533 dbregs->dr[7] = pcb->pcb_dr7;
3534 }
3535 return (0);
3536 }
3537
3538 int
3539 set_dbregs(struct thread *td, struct dbreg *dbregs)
3540 {
3541 struct pcb *pcb;
3542 int i;
3543
3544 if (td == NULL) {
3545 load_dr0(dbregs->dr[0]);
3546 load_dr1(dbregs->dr[1]);
3547 load_dr2(dbregs->dr[2]);
3548 load_dr3(dbregs->dr[3]);
3549 load_dr4(dbregs->dr[4]);
3550 load_dr5(dbregs->dr[5]);
3551 load_dr6(dbregs->dr[6]);
3552 load_dr7(dbregs->dr[7]);
3553 } else {
3554 /*
3555 * Don't let an illegal value for dr7 get set. Specifically,
3556 * check for undefined settings. Setting these bit patterns
3557 * result in undefined behaviour and can lead to an unexpected
3558 * TRCTRAP.
3559 */
3560 for (i = 0; i < 4; i++) {
3561 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
3562 return (EINVAL);
3563 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
3564 return (EINVAL);
3565 }
3566
3567 pcb = td->td_pcb;
3568
3569 /*
3570 * Don't let a process set a breakpoint that is not within the
3571 * process's address space. If a process could do this, it
3572 * could halt the system by setting a breakpoint in the kernel
3573 * (if ddb was enabled). Thus, we need to check to make sure
3574 * that no breakpoints are being enabled for addresses outside
3575 * process's address space.
3576 *
3577 * XXX - what about when the watched area of the user's
3578 * address space is written into from within the kernel
3579 * ... wouldn't that still cause a breakpoint to be generated
3580 * from within kernel mode?
3581 */
3582
3583 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
3584 /* dr0 is enabled */
3585 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
3586 return (EINVAL);
3587 }
3588
3589 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
3590 /* dr1 is enabled */
3591 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
3592 return (EINVAL);
3593 }
3594
3595 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
3596 /* dr2 is enabled */
3597 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
3598 return (EINVAL);
3599 }
3600
3601 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
3602 /* dr3 is enabled */
3603 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
3604 return (EINVAL);
3605 }
3606
3607 pcb->pcb_dr0 = dbregs->dr[0];
3608 pcb->pcb_dr1 = dbregs->dr[1];
3609 pcb->pcb_dr2 = dbregs->dr[2];
3610 pcb->pcb_dr3 = dbregs->dr[3];
3611 pcb->pcb_dr6 = dbregs->dr[6];
3612 pcb->pcb_dr7 = dbregs->dr[7];
3613
3614 pcb->pcb_flags |= PCB_DBREGS;
3615 }
3616
3617 return (0);
3618 }
3619
3620 /*
3621 * Return > 0 if a hardware breakpoint has been hit, and the
3622 * breakpoint was in user space. Return 0, otherwise.
3623 */
3624 int
3625 user_dbreg_trap(void)
3626 {
3627 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
3628 u_int32_t bp; /* breakpoint bits extracted from dr6 */
3629 int nbp; /* number of breakpoints that triggered */
3630 caddr_t addr[4]; /* breakpoint addresses */
3631 int i;
3632
3633 dr7 = rdr7();
3634 if ((dr7 & 0x000000ff) == 0) {
3635 /*
3636 * all GE and LE bits in the dr7 register are zero,
3637 * thus the trap couldn't have been caused by the
3638 * hardware debug registers
3639 */
3640 return 0;
3641 }
3642
3643 nbp = 0;
3644 dr6 = rdr6();
3645 bp = dr6 & 0x0000000f;
3646
3647 if (!bp) {
3648 /*
3649 * None of the breakpoint bits are set meaning this
3650 * trap was not caused by any of the debug registers
3651 */
3652 return 0;
3653 }
3654
3655 /*
3656 * at least one of the breakpoints were hit, check to see
3657 * which ones and if any of them are user space addresses
3658 */
3659
3660 if (bp & 0x01) {
3661 addr[nbp++] = (caddr_t)rdr0();
3662 }
3663 if (bp & 0x02) {
3664 addr[nbp++] = (caddr_t)rdr1();
3665 }
3666 if (bp & 0x04) {
3667 addr[nbp++] = (caddr_t)rdr2();
3668 }
3669 if (bp & 0x08) {
3670 addr[nbp++] = (caddr_t)rdr3();
3671 }
3672
3673 for (i = 0; i < nbp; i++) {
3674 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
3675 /*
3676 * addr[i] is in user space
3677 */
3678 return nbp;
3679 }
3680 }
3681
3682 /*
3683 * None of the breakpoints are in user space.
3684 */
3685 return 0;
3686 }
3687
3688 #ifdef KDB
3689
3690 /*
3691 * Provide inb() and outb() as functions. They are normally only available as
3692 * inline functions, thus cannot be called from the debugger.
3693 */
3694
3695 /* silence compiler warnings */
3696 u_char inb_(u_short);
3697 void outb_(u_short, u_char);
3698
3699 u_char
3700 inb_(u_short port)
3701 {
3702 return inb(port);
3703 }
3704
3705 void
3706 outb_(u_short port, u_char data)
3707 {
3708 outb(port, data);
3709 }
3710
3711 #endif /* KDB */
Cache object: bca13591a6fe7e6cfcac0fd1c59318d5
|