1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/9.1/sys/i386/i386/machdep.c 235796 2012-05-22 17:44:01Z iwasaki $");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_atpic.h"
46 #include "opt_compat.h"
47 #include "opt_cpu.h"
48 #include "opt_ddb.h"
49 #include "opt_inet.h"
50 #include "opt_ipx.h"
51 #include "opt_isa.h"
52 #include "opt_kstack_pages.h"
53 #include "opt_maxmem.h"
54 #include "opt_mp_watchdog.h"
55 #include "opt_npx.h"
56 #include "opt_perfmon.h"
57 #include "opt_xbox.h"
58 #include "opt_kdtrace.h"
59
60 #include <sys/param.h>
61 #include <sys/proc.h>
62 #include <sys/systm.h>
63 #include <sys/bio.h>
64 #include <sys/buf.h>
65 #include <sys/bus.h>
66 #include <sys/callout.h>
67 #include <sys/cons.h>
68 #include <sys/cpu.h>
69 #include <sys/eventhandler.h>
70 #include <sys/exec.h>
71 #include <sys/imgact.h>
72 #include <sys/kdb.h>
73 #include <sys/kernel.h>
74 #include <sys/ktr.h>
75 #include <sys/linker.h>
76 #include <sys/lock.h>
77 #include <sys/malloc.h>
78 #include <sys/msgbuf.h>
79 #include <sys/mutex.h>
80 #include <sys/pcpu.h>
81 #include <sys/ptrace.h>
82 #include <sys/reboot.h>
83 #include <sys/sched.h>
84 #include <sys/signalvar.h>
85 #ifdef SMP
86 #include <sys/smp.h>
87 #endif
88 #include <sys/syscallsubr.h>
89 #include <sys/sysctl.h>
90 #include <sys/sysent.h>
91 #include <sys/sysproto.h>
92 #include <sys/ucontext.h>
93 #include <sys/vmmeter.h>
94
95 #include <vm/vm.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_pager.h>
102 #include <vm/vm_param.h>
103
104 #ifdef DDB
105 #ifndef KDB
106 #error KDB must be enabled in order for DDB to work!
107 #endif
108 #include <ddb/ddb.h>
109 #include <ddb/db_sym.h>
110 #endif
111
112 #include <isa/rtc.h>
113
114 #include <net/netisr.h>
115
116 #include <machine/bootinfo.h>
117 #include <machine/clock.h>
118 #include <machine/cpu.h>
119 #include <machine/cputypes.h>
120 #include <machine/intr_machdep.h>
121 #include <x86/mca.h>
122 #include <machine/md_var.h>
123 #include <machine/metadata.h>
124 #include <machine/mp_watchdog.h>
125 #include <machine/pc/bios.h>
126 #include <machine/pcb.h>
127 #include <machine/pcb_ext.h>
128 #include <machine/proc.h>
129 #include <machine/reg.h>
130 #include <machine/sigframe.h>
131 #include <machine/specialreg.h>
132 #include <machine/vm86.h>
133 #ifdef PERFMON
134 #include <machine/perfmon.h>
135 #endif
136 #ifdef SMP
137 #include <machine/smp.h>
138 #endif
139
140 #ifdef DEV_APIC
141 #include <machine/apicvar.h>
142 #endif
143
144 #ifdef DEV_ISA
145 #include <x86/isa/icu.h>
146 #endif
147
148 #ifdef XBOX
149 #include <machine/xbox.h>
150
151 int arch_i386_is_xbox = 0;
152 uint32_t arch_i386_xbox_memsize = 0;
153 #endif
154
155 #ifdef XEN
156 /* XEN includes */
157 #include <machine/xen/xen-os.h>
158 #include <xen/hypervisor.h>
159 #include <machine/xen/xen-os.h>
160 #include <machine/xen/xenvar.h>
161 #include <machine/xen/xenfunc.h>
162 #include <xen/xen_intr.h>
163
164 void Xhypervisor_callback(void);
165 void failsafe_callback(void);
166
167 extern trap_info_t trap_table[];
168 struct proc_ldt default_proc_ldt;
169 extern int init_first;
170 int running_xen = 1;
171 extern unsigned long physfree;
172 #endif /* XEN */
173
174 /* Sanity check for __curthread() */
175 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
176
177 extern void init386(int first);
178 extern void dblfault_handler(void);
179
180 extern void printcpuinfo(void); /* XXX header file */
181 extern void finishidentcpu(void);
182 extern void panicifcpuunsupported(void);
183
184 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
185 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
186
187 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
188 #define CPU_ENABLE_SSE
189 #endif
190
191 static void cpu_startup(void *);
192 static void fpstate_drop(struct thread *td);
193 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
194 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
195 #ifdef CPU_ENABLE_SSE
196 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
197 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
198 #endif /* CPU_ENABLE_SSE */
199 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
200
201 #ifdef DDB
202 extern vm_offset_t ksym_start, ksym_end;
203 #endif
204
205 /* Intel ICH registers */
206 #define ICH_PMBASE 0x400
207 #define ICH_SMI_EN ICH_PMBASE + 0x30
208
209 int _udatasel, _ucodesel;
210 u_int basemem;
211
212 int cold = 1;
213
214 #ifdef COMPAT_43
215 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
216 #endif
217 #ifdef COMPAT_FREEBSD4
218 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
219 #endif
220
221 long Maxmem = 0;
222 long realmem = 0;
223
224 #ifdef PAE
225 FEATURE(pae, "Physical Address Extensions");
226 #endif
227
228 /*
229 * The number of PHYSMAP entries must be one less than the number of
230 * PHYSSEG entries because the PHYSMAP entry that spans the largest
231 * physical address that is accessible by ISA DMA is split into two
232 * PHYSSEG entries.
233 */
234 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
235
236 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
237 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
238
239 /* must be 2 less so 0 0 can signal end of chunks */
240 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
241 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
242
243 struct kva_md_info kmi;
244
245 static struct trapframe proc0_tf;
246 struct pcpu __pcpu[MAXCPU];
247
248 struct mtx icu_lock;
249
250 static void
251 cpu_startup(dummy)
252 void *dummy;
253 {
254 uintmax_t memsize;
255 char *sysenv;
256
257 /*
258 * On MacBooks, we need to disallow the legacy USB circuit to
259 * generate an SMI# because this can cause several problems,
260 * namely: incorrect CPU frequency detection and failure to
261 * start the APs.
262 * We do this by disabling a bit in the SMI_EN (SMI Control and
263 * Enable register) of the Intel ICH LPC Interface Bridge.
264 */
265 sysenv = getenv("smbios.system.product");
266 if (sysenv != NULL) {
267 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
268 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
269 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
270 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
271 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
272 strncmp(sysenv, "Macmini1,1", 10) == 0) {
273 if (bootverbose)
274 printf("Disabling LEGACY_USB_EN bit on "
275 "Intel ICH.\n");
276 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
277 }
278 freeenv(sysenv);
279 }
280
281 /*
282 * Good {morning,afternoon,evening,night}.
283 */
284 startrtclock();
285 printcpuinfo();
286 panicifcpuunsupported();
287 #ifdef PERFMON
288 perfmon_init();
289 #endif
290 realmem = Maxmem;
291
292 /*
293 * Display physical memory if SMBIOS reports reasonable amount.
294 */
295 memsize = 0;
296 sysenv = getenv("smbios.memory.enabled");
297 if (sysenv != NULL) {
298 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
299 freeenv(sysenv);
300 }
301 if (memsize < ptoa((uintmax_t)cnt.v_free_count))
302 memsize = ptoa((uintmax_t)Maxmem);
303 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
304
305 /*
306 * Display any holes after the first chunk of extended memory.
307 */
308 if (bootverbose) {
309 int indx;
310
311 printf("Physical memory chunk(s):\n");
312 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
313 vm_paddr_t size;
314
315 size = phys_avail[indx + 1] - phys_avail[indx];
316 printf(
317 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
318 (uintmax_t)phys_avail[indx],
319 (uintmax_t)phys_avail[indx + 1] - 1,
320 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
321 }
322 }
323
324 vm_ksubmap_init(&kmi);
325
326 printf("avail memory = %ju (%ju MB)\n",
327 ptoa((uintmax_t)cnt.v_free_count),
328 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
329
330 /*
331 * Set up buffers, so they can be used to read disk labels.
332 */
333 bufinit();
334 vm_pager_bufferinit();
335 #ifndef XEN
336 cpu_setregs();
337 #endif
338
339 /*
340 * Add BSP as an interrupt target.
341 */
342 intr_add_cpu(0);
343 }
344
345 /*
346 * Send an interrupt to process.
347 *
348 * Stack is set up to allow sigcode stored
349 * at top to call routine, followed by kcall
350 * to sigreturn routine below. After sigreturn
351 * resets the signal mask, the stack, and the
352 * frame pointer, it returns to the user
353 * specified pc, psl.
354 */
355 #ifdef COMPAT_43
356 static void
357 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
358 {
359 struct osigframe sf, *fp;
360 struct proc *p;
361 struct thread *td;
362 struct sigacts *psp;
363 struct trapframe *regs;
364 int sig;
365 int oonstack;
366
367 td = curthread;
368 p = td->td_proc;
369 PROC_LOCK_ASSERT(p, MA_OWNED);
370 sig = ksi->ksi_signo;
371 psp = p->p_sigacts;
372 mtx_assert(&psp->ps_mtx, MA_OWNED);
373 regs = td->td_frame;
374 oonstack = sigonstack(regs->tf_esp);
375
376 /* Allocate space for the signal handler context. */
377 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
378 SIGISMEMBER(psp->ps_sigonstack, sig)) {
379 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
380 td->td_sigstk.ss_size - sizeof(struct osigframe));
381 #if defined(COMPAT_43)
382 td->td_sigstk.ss_flags |= SS_ONSTACK;
383 #endif
384 } else
385 fp = (struct osigframe *)regs->tf_esp - 1;
386
387 /* Translate the signal if appropriate. */
388 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
389 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
390
391 /* Build the argument list for the signal handler. */
392 sf.sf_signum = sig;
393 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
394 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
395 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
396 /* Signal handler installed with SA_SIGINFO. */
397 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
398 sf.sf_siginfo.si_signo = sig;
399 sf.sf_siginfo.si_code = ksi->ksi_code;
400 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
401 sf.sf_addr = 0;
402 } else {
403 /* Old FreeBSD-style arguments. */
404 sf.sf_arg2 = ksi->ksi_code;
405 sf.sf_addr = (register_t)ksi->ksi_addr;
406 sf.sf_ahu.sf_handler = catcher;
407 }
408 mtx_unlock(&psp->ps_mtx);
409 PROC_UNLOCK(p);
410
411 /* Save most if not all of trap frame. */
412 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
413 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
414 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
415 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
416 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
417 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
418 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
419 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
420 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
421 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
422 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
423 sf.sf_siginfo.si_sc.sc_gs = rgs();
424 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
425
426 /* Build the signal context to be used by osigreturn(). */
427 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
428 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
429 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
430 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
431 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
432 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
433 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
434 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
435
436 /*
437 * If we're a vm86 process, we want to save the segment registers.
438 * We also change eflags to be our emulated eflags, not the actual
439 * eflags.
440 */
441 if (regs->tf_eflags & PSL_VM) {
442 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
443 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
444 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
445
446 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
447 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
448 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
449 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
450
451 if (vm86->vm86_has_vme == 0)
452 sf.sf_siginfo.si_sc.sc_ps =
453 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
454 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
455
456 /* See sendsig() for comments. */
457 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
458 }
459
460 /*
461 * Copy the sigframe out to the user's stack.
462 */
463 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
464 #ifdef DEBUG
465 printf("process %ld has trashed its stack\n", (long)p->p_pid);
466 #endif
467 PROC_LOCK(p);
468 sigexit(td, SIGILL);
469 }
470
471 regs->tf_esp = (int)fp;
472 regs->tf_eip = PS_STRINGS - szosigcode;
473 regs->tf_eflags &= ~(PSL_T | PSL_D);
474 regs->tf_cs = _ucodesel;
475 regs->tf_ds = _udatasel;
476 regs->tf_es = _udatasel;
477 regs->tf_fs = _udatasel;
478 load_gs(_udatasel);
479 regs->tf_ss = _udatasel;
480 PROC_LOCK(p);
481 mtx_lock(&psp->ps_mtx);
482 }
483 #endif /* COMPAT_43 */
484
485 #ifdef COMPAT_FREEBSD4
486 static void
487 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
488 {
489 struct sigframe4 sf, *sfp;
490 struct proc *p;
491 struct thread *td;
492 struct sigacts *psp;
493 struct trapframe *regs;
494 int sig;
495 int oonstack;
496
497 td = curthread;
498 p = td->td_proc;
499 PROC_LOCK_ASSERT(p, MA_OWNED);
500 sig = ksi->ksi_signo;
501 psp = p->p_sigacts;
502 mtx_assert(&psp->ps_mtx, MA_OWNED);
503 regs = td->td_frame;
504 oonstack = sigonstack(regs->tf_esp);
505
506 /* Save user context. */
507 bzero(&sf, sizeof(sf));
508 sf.sf_uc.uc_sigmask = *mask;
509 sf.sf_uc.uc_stack = td->td_sigstk;
510 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
511 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
512 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
513 sf.sf_uc.uc_mcontext.mc_gs = rgs();
514 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
515 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
516 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
517 bzero(sf.sf_uc.uc_mcontext.__spare__,
518 sizeof(sf.sf_uc.uc_mcontext.__spare__));
519 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
520
521 /* Allocate space for the signal handler context. */
522 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
523 SIGISMEMBER(psp->ps_sigonstack, sig)) {
524 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
525 td->td_sigstk.ss_size - sizeof(struct sigframe4));
526 #if defined(COMPAT_43)
527 td->td_sigstk.ss_flags |= SS_ONSTACK;
528 #endif
529 } else
530 sfp = (struct sigframe4 *)regs->tf_esp - 1;
531
532 /* Translate the signal if appropriate. */
533 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
534 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
535
536 /* Build the argument list for the signal handler. */
537 sf.sf_signum = sig;
538 sf.sf_ucontext = (register_t)&sfp->sf_uc;
539 bzero(&sf.sf_si, sizeof(sf.sf_si));
540 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
541 /* Signal handler installed with SA_SIGINFO. */
542 sf.sf_siginfo = (register_t)&sfp->sf_si;
543 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
544
545 /* Fill in POSIX parts */
546 sf.sf_si.si_signo = sig;
547 sf.sf_si.si_code = ksi->ksi_code;
548 sf.sf_si.si_addr = ksi->ksi_addr;
549 } else {
550 /* Old FreeBSD-style arguments. */
551 sf.sf_siginfo = ksi->ksi_code;
552 sf.sf_addr = (register_t)ksi->ksi_addr;
553 sf.sf_ahu.sf_handler = catcher;
554 }
555 mtx_unlock(&psp->ps_mtx);
556 PROC_UNLOCK(p);
557
558 /*
559 * If we're a vm86 process, we want to save the segment registers.
560 * We also change eflags to be our emulated eflags, not the actual
561 * eflags.
562 */
563 if (regs->tf_eflags & PSL_VM) {
564 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
565 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
566
567 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
568 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
569 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
570 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
571
572 if (vm86->vm86_has_vme == 0)
573 sf.sf_uc.uc_mcontext.mc_eflags =
574 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
575 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
576
577 /*
578 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
579 * syscalls made by the signal handler. This just avoids
580 * wasting time for our lazy fixup of such faults. PSL_NT
581 * does nothing in vm86 mode, but vm86 programs can set it
582 * almost legitimately in probes for old cpu types.
583 */
584 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
585 }
586
587 /*
588 * Copy the sigframe out to the user's stack.
589 */
590 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
591 #ifdef DEBUG
592 printf("process %ld has trashed its stack\n", (long)p->p_pid);
593 #endif
594 PROC_LOCK(p);
595 sigexit(td, SIGILL);
596 }
597
598 regs->tf_esp = (int)sfp;
599 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
600 regs->tf_eflags &= ~(PSL_T | PSL_D);
601 regs->tf_cs = _ucodesel;
602 regs->tf_ds = _udatasel;
603 regs->tf_es = _udatasel;
604 regs->tf_fs = _udatasel;
605 regs->tf_ss = _udatasel;
606 PROC_LOCK(p);
607 mtx_lock(&psp->ps_mtx);
608 }
609 #endif /* COMPAT_FREEBSD4 */
610
611 void
612 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
613 {
614 struct sigframe sf, *sfp;
615 struct proc *p;
616 struct thread *td;
617 struct sigacts *psp;
618 char *sp;
619 struct trapframe *regs;
620 struct segment_descriptor *sdp;
621 int sig;
622 int oonstack;
623
624 td = curthread;
625 p = td->td_proc;
626 PROC_LOCK_ASSERT(p, MA_OWNED);
627 sig = ksi->ksi_signo;
628 psp = p->p_sigacts;
629 mtx_assert(&psp->ps_mtx, MA_OWNED);
630 #ifdef COMPAT_FREEBSD4
631 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
632 freebsd4_sendsig(catcher, ksi, mask);
633 return;
634 }
635 #endif
636 #ifdef COMPAT_43
637 if (SIGISMEMBER(psp->ps_osigset, sig)) {
638 osendsig(catcher, ksi, mask);
639 return;
640 }
641 #endif
642 regs = td->td_frame;
643 oonstack = sigonstack(regs->tf_esp);
644
645 /* Save user context. */
646 bzero(&sf, sizeof(sf));
647 sf.sf_uc.uc_sigmask = *mask;
648 sf.sf_uc.uc_stack = td->td_sigstk;
649 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
650 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
651 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
652 sf.sf_uc.uc_mcontext.mc_gs = rgs();
653 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
654 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
655 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
656 fpstate_drop(td);
657 /*
658 * Unconditionally fill the fsbase and gsbase into the mcontext.
659 */
660 sdp = &td->td_pcb->pcb_fsd;
661 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
662 sdp->sd_lobase;
663 sdp = &td->td_pcb->pcb_gsd;
664 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
665 sdp->sd_lobase;
666 sf.sf_uc.uc_mcontext.mc_flags = 0;
667 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
668 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
669 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
670
671 /* Allocate space for the signal handler context. */
672 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
673 SIGISMEMBER(psp->ps_sigonstack, sig)) {
674 sp = td->td_sigstk.ss_sp +
675 td->td_sigstk.ss_size - sizeof(struct sigframe);
676 #if defined(COMPAT_43)
677 td->td_sigstk.ss_flags |= SS_ONSTACK;
678 #endif
679 } else
680 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
681 /* Align to 16 bytes. */
682 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
683
684 /* Translate the signal if appropriate. */
685 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
686 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
687
688 /* Build the argument list for the signal handler. */
689 sf.sf_signum = sig;
690 sf.sf_ucontext = (register_t)&sfp->sf_uc;
691 bzero(&sf.sf_si, sizeof(sf.sf_si));
692 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
693 /* Signal handler installed with SA_SIGINFO. */
694 sf.sf_siginfo = (register_t)&sfp->sf_si;
695 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
696
697 /* Fill in POSIX parts */
698 sf.sf_si = ksi->ksi_info;
699 sf.sf_si.si_signo = sig; /* maybe a translated signal */
700 } else {
701 /* Old FreeBSD-style arguments. */
702 sf.sf_siginfo = ksi->ksi_code;
703 sf.sf_addr = (register_t)ksi->ksi_addr;
704 sf.sf_ahu.sf_handler = catcher;
705 }
706 mtx_unlock(&psp->ps_mtx);
707 PROC_UNLOCK(p);
708
709 /*
710 * If we're a vm86 process, we want to save the segment registers.
711 * We also change eflags to be our emulated eflags, not the actual
712 * eflags.
713 */
714 if (regs->tf_eflags & PSL_VM) {
715 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
716 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
717
718 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
719 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
720 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
721 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
722
723 if (vm86->vm86_has_vme == 0)
724 sf.sf_uc.uc_mcontext.mc_eflags =
725 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
726 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
727
728 /*
729 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
730 * syscalls made by the signal handler. This just avoids
731 * wasting time for our lazy fixup of such faults. PSL_NT
732 * does nothing in vm86 mode, but vm86 programs can set it
733 * almost legitimately in probes for old cpu types.
734 */
735 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
736 }
737
738 /*
739 * Copy the sigframe out to the user's stack.
740 */
741 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
742 #ifdef DEBUG
743 printf("process %ld has trashed its stack\n", (long)p->p_pid);
744 #endif
745 PROC_LOCK(p);
746 sigexit(td, SIGILL);
747 }
748
749 regs->tf_esp = (int)sfp;
750 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
751 regs->tf_eflags &= ~(PSL_T | PSL_D);
752 regs->tf_cs = _ucodesel;
753 regs->tf_ds = _udatasel;
754 regs->tf_es = _udatasel;
755 regs->tf_fs = _udatasel;
756 regs->tf_ss = _udatasel;
757 PROC_LOCK(p);
758 mtx_lock(&psp->ps_mtx);
759 }
760
761 /*
762 * System call to cleanup state after a signal
763 * has been taken. Reset signal mask and
764 * stack state from context left by sendsig (above).
765 * Return to previous pc and psl as specified by
766 * context left by sendsig. Check carefully to
767 * make sure that the user has not modified the
768 * state to gain improper privileges.
769 *
770 * MPSAFE
771 */
772 #ifdef COMPAT_43
773 int
774 osigreturn(td, uap)
775 struct thread *td;
776 struct osigreturn_args /* {
777 struct osigcontext *sigcntxp;
778 } */ *uap;
779 {
780 struct osigcontext sc;
781 struct trapframe *regs;
782 struct osigcontext *scp;
783 int eflags, error;
784 ksiginfo_t ksi;
785
786 regs = td->td_frame;
787 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
788 if (error != 0)
789 return (error);
790 scp = ≻
791 eflags = scp->sc_ps;
792 if (eflags & PSL_VM) {
793 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
794 struct vm86_kernel *vm86;
795
796 /*
797 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
798 * set up the vm86 area, and we can't enter vm86 mode.
799 */
800 if (td->td_pcb->pcb_ext == 0)
801 return (EINVAL);
802 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
803 if (vm86->vm86_inited == 0)
804 return (EINVAL);
805
806 /* Go back to user mode if both flags are set. */
807 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
808 ksiginfo_init_trap(&ksi);
809 ksi.ksi_signo = SIGBUS;
810 ksi.ksi_code = BUS_OBJERR;
811 ksi.ksi_addr = (void *)regs->tf_eip;
812 trapsignal(td, &ksi);
813 }
814
815 if (vm86->vm86_has_vme) {
816 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
817 (eflags & VME_USERCHANGE) | PSL_VM;
818 } else {
819 vm86->vm86_eflags = eflags; /* save VIF, VIP */
820 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
821 (eflags & VM_USERCHANGE) | PSL_VM;
822 }
823 tf->tf_vm86_ds = scp->sc_ds;
824 tf->tf_vm86_es = scp->sc_es;
825 tf->tf_vm86_fs = scp->sc_fs;
826 tf->tf_vm86_gs = scp->sc_gs;
827 tf->tf_ds = _udatasel;
828 tf->tf_es = _udatasel;
829 tf->tf_fs = _udatasel;
830 } else {
831 /*
832 * Don't allow users to change privileged or reserved flags.
833 */
834 /*
835 * XXX do allow users to change the privileged flag PSL_RF.
836 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
837 * should sometimes set it there too. tf_eflags is kept in
838 * the signal context during signal handling and there is no
839 * other place to remember it, so the PSL_RF bit may be
840 * corrupted by the signal handler without us knowing.
841 * Corruption of the PSL_RF bit at worst causes one more or
842 * one less debugger trap, so allowing it is fairly harmless.
843 */
844 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
845 return (EINVAL);
846 }
847
848 /*
849 * Don't allow users to load a valid privileged %cs. Let the
850 * hardware check for invalid selectors, excess privilege in
851 * other selectors, invalid %eip's and invalid %esp's.
852 */
853 if (!CS_SECURE(scp->sc_cs)) {
854 ksiginfo_init_trap(&ksi);
855 ksi.ksi_signo = SIGBUS;
856 ksi.ksi_code = BUS_OBJERR;
857 ksi.ksi_trapno = T_PROTFLT;
858 ksi.ksi_addr = (void *)regs->tf_eip;
859 trapsignal(td, &ksi);
860 return (EINVAL);
861 }
862 regs->tf_ds = scp->sc_ds;
863 regs->tf_es = scp->sc_es;
864 regs->tf_fs = scp->sc_fs;
865 }
866
867 /* Restore remaining registers. */
868 regs->tf_eax = scp->sc_eax;
869 regs->tf_ebx = scp->sc_ebx;
870 regs->tf_ecx = scp->sc_ecx;
871 regs->tf_edx = scp->sc_edx;
872 regs->tf_esi = scp->sc_esi;
873 regs->tf_edi = scp->sc_edi;
874 regs->tf_cs = scp->sc_cs;
875 regs->tf_ss = scp->sc_ss;
876 regs->tf_isp = scp->sc_isp;
877 regs->tf_ebp = scp->sc_fp;
878 regs->tf_esp = scp->sc_sp;
879 regs->tf_eip = scp->sc_pc;
880 regs->tf_eflags = eflags;
881
882 #if defined(COMPAT_43)
883 if (scp->sc_onstack & 1)
884 td->td_sigstk.ss_flags |= SS_ONSTACK;
885 else
886 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
887 #endif
888 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
889 SIGPROCMASK_OLD);
890 return (EJUSTRETURN);
891 }
892 #endif /* COMPAT_43 */
893
894 #ifdef COMPAT_FREEBSD4
895 /*
896 * MPSAFE
897 */
898 int
899 freebsd4_sigreturn(td, uap)
900 struct thread *td;
901 struct freebsd4_sigreturn_args /* {
902 const ucontext4 *sigcntxp;
903 } */ *uap;
904 {
905 struct ucontext4 uc;
906 struct trapframe *regs;
907 struct ucontext4 *ucp;
908 int cs, eflags, error;
909 ksiginfo_t ksi;
910
911 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
912 if (error != 0)
913 return (error);
914 ucp = &uc;
915 regs = td->td_frame;
916 eflags = ucp->uc_mcontext.mc_eflags;
917 if (eflags & PSL_VM) {
918 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
919 struct vm86_kernel *vm86;
920
921 /*
922 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
923 * set up the vm86 area, and we can't enter vm86 mode.
924 */
925 if (td->td_pcb->pcb_ext == 0)
926 return (EINVAL);
927 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
928 if (vm86->vm86_inited == 0)
929 return (EINVAL);
930
931 /* Go back to user mode if both flags are set. */
932 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
933 ksiginfo_init_trap(&ksi);
934 ksi.ksi_signo = SIGBUS;
935 ksi.ksi_code = BUS_OBJERR;
936 ksi.ksi_addr = (void *)regs->tf_eip;
937 trapsignal(td, &ksi);
938 }
939 if (vm86->vm86_has_vme) {
940 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
941 (eflags & VME_USERCHANGE) | PSL_VM;
942 } else {
943 vm86->vm86_eflags = eflags; /* save VIF, VIP */
944 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
945 (eflags & VM_USERCHANGE) | PSL_VM;
946 }
947 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
948 tf->tf_eflags = eflags;
949 tf->tf_vm86_ds = tf->tf_ds;
950 tf->tf_vm86_es = tf->tf_es;
951 tf->tf_vm86_fs = tf->tf_fs;
952 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
953 tf->tf_ds = _udatasel;
954 tf->tf_es = _udatasel;
955 tf->tf_fs = _udatasel;
956 } else {
957 /*
958 * Don't allow users to change privileged or reserved flags.
959 */
960 /*
961 * XXX do allow users to change the privileged flag PSL_RF.
962 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
963 * should sometimes set it there too. tf_eflags is kept in
964 * the signal context during signal handling and there is no
965 * other place to remember it, so the PSL_RF bit may be
966 * corrupted by the signal handler without us knowing.
967 * Corruption of the PSL_RF bit at worst causes one more or
968 * one less debugger trap, so allowing it is fairly harmless.
969 */
970 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
971 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
972 td->td_proc->p_pid, td->td_name, eflags);
973 return (EINVAL);
974 }
975
976 /*
977 * Don't allow users to load a valid privileged %cs. Let the
978 * hardware check for invalid selectors, excess privilege in
979 * other selectors, invalid %eip's and invalid %esp's.
980 */
981 cs = ucp->uc_mcontext.mc_cs;
982 if (!CS_SECURE(cs)) {
983 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
984 td->td_proc->p_pid, td->td_name, cs);
985 ksiginfo_init_trap(&ksi);
986 ksi.ksi_signo = SIGBUS;
987 ksi.ksi_code = BUS_OBJERR;
988 ksi.ksi_trapno = T_PROTFLT;
989 ksi.ksi_addr = (void *)regs->tf_eip;
990 trapsignal(td, &ksi);
991 return (EINVAL);
992 }
993
994 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
995 }
996
997 #if defined(COMPAT_43)
998 if (ucp->uc_mcontext.mc_onstack & 1)
999 td->td_sigstk.ss_flags |= SS_ONSTACK;
1000 else
1001 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1002 #endif
1003 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1004 return (EJUSTRETURN);
1005 }
1006 #endif /* COMPAT_FREEBSD4 */
1007
1008 /*
1009 * MPSAFE
1010 */
1011 int
1012 sys_sigreturn(td, uap)
1013 struct thread *td;
1014 struct sigreturn_args /* {
1015 const struct __ucontext *sigcntxp;
1016 } */ *uap;
1017 {
1018 ucontext_t uc;
1019 struct trapframe *regs;
1020 ucontext_t *ucp;
1021 int cs, eflags, error, ret;
1022 ksiginfo_t ksi;
1023
1024 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
1025 if (error != 0)
1026 return (error);
1027 ucp = &uc;
1028 regs = td->td_frame;
1029 eflags = ucp->uc_mcontext.mc_eflags;
1030 if (eflags & PSL_VM) {
1031 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
1032 struct vm86_kernel *vm86;
1033
1034 /*
1035 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
1036 * set up the vm86 area, and we can't enter vm86 mode.
1037 */
1038 if (td->td_pcb->pcb_ext == 0)
1039 return (EINVAL);
1040 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
1041 if (vm86->vm86_inited == 0)
1042 return (EINVAL);
1043
1044 /* Go back to user mode if both flags are set. */
1045 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
1046 ksiginfo_init_trap(&ksi);
1047 ksi.ksi_signo = SIGBUS;
1048 ksi.ksi_code = BUS_OBJERR;
1049 ksi.ksi_addr = (void *)regs->tf_eip;
1050 trapsignal(td, &ksi);
1051 }
1052
1053 if (vm86->vm86_has_vme) {
1054 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
1055 (eflags & VME_USERCHANGE) | PSL_VM;
1056 } else {
1057 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1058 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1059 (eflags & VM_USERCHANGE) | PSL_VM;
1060 }
1061 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1062 tf->tf_eflags = eflags;
1063 tf->tf_vm86_ds = tf->tf_ds;
1064 tf->tf_vm86_es = tf->tf_es;
1065 tf->tf_vm86_fs = tf->tf_fs;
1066 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1067 tf->tf_ds = _udatasel;
1068 tf->tf_es = _udatasel;
1069 tf->tf_fs = _udatasel;
1070 } else {
1071 /*
1072 * Don't allow users to change privileged or reserved flags.
1073 */
1074 /*
1075 * XXX do allow users to change the privileged flag PSL_RF.
1076 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1077 * should sometimes set it there too. tf_eflags is kept in
1078 * the signal context during signal handling and there is no
1079 * other place to remember it, so the PSL_RF bit may be
1080 * corrupted by the signal handler without us knowing.
1081 * Corruption of the PSL_RF bit at worst causes one more or
1082 * one less debugger trap, so allowing it is fairly harmless.
1083 */
1084 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1085 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1086 td->td_proc->p_pid, td->td_name, eflags);
1087 return (EINVAL);
1088 }
1089
1090 /*
1091 * Don't allow users to load a valid privileged %cs. Let the
1092 * hardware check for invalid selectors, excess privilege in
1093 * other selectors, invalid %eip's and invalid %esp's.
1094 */
1095 cs = ucp->uc_mcontext.mc_cs;
1096 if (!CS_SECURE(cs)) {
1097 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1098 td->td_proc->p_pid, td->td_name, cs);
1099 ksiginfo_init_trap(&ksi);
1100 ksi.ksi_signo = SIGBUS;
1101 ksi.ksi_code = BUS_OBJERR;
1102 ksi.ksi_trapno = T_PROTFLT;
1103 ksi.ksi_addr = (void *)regs->tf_eip;
1104 trapsignal(td, &ksi);
1105 return (EINVAL);
1106 }
1107
1108 ret = set_fpcontext(td, &ucp->uc_mcontext);
1109 if (ret != 0)
1110 return (ret);
1111 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1112 }
1113
1114 #if defined(COMPAT_43)
1115 if (ucp->uc_mcontext.mc_onstack & 1)
1116 td->td_sigstk.ss_flags |= SS_ONSTACK;
1117 else
1118 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1119 #endif
1120
1121 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1122 return (EJUSTRETURN);
1123 }
1124
1125 /*
1126 * Machine dependent boot() routine
1127 *
1128 * I haven't seen anything to put here yet
1129 * Possibly some stuff might be grafted back here from boot()
1130 */
1131 void
1132 cpu_boot(int howto)
1133 {
1134 }
1135
1136 /*
1137 * Flush the D-cache for non-DMA I/O so that the I-cache can
1138 * be made coherent later.
1139 */
1140 void
1141 cpu_flush_dcache(void *ptr, size_t len)
1142 {
1143 /* Not applicable */
1144 }
1145
1146 /* Get current clock frequency for the given cpu id. */
1147 int
1148 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1149 {
1150 uint64_t tsc1, tsc2;
1151 uint64_t acnt, mcnt, perf;
1152 register_t reg;
1153
1154 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1155 return (EINVAL);
1156 if ((cpu_feature & CPUID_TSC) == 0)
1157 return (EOPNOTSUPP);
1158
1159 /*
1160 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
1161 * DELAY(9) based logic fails.
1162 */
1163 if (tsc_is_invariant && !tsc_perf_stat)
1164 return (EOPNOTSUPP);
1165
1166 #ifdef SMP
1167 if (smp_cpus > 1) {
1168 /* Schedule ourselves on the indicated cpu. */
1169 thread_lock(curthread);
1170 sched_bind(curthread, cpu_id);
1171 thread_unlock(curthread);
1172 }
1173 #endif
1174
1175 /* Calibrate by measuring a short delay. */
1176 reg = intr_disable();
1177 if (tsc_is_invariant) {
1178 wrmsr(MSR_MPERF, 0);
1179 wrmsr(MSR_APERF, 0);
1180 tsc1 = rdtsc();
1181 DELAY(1000);
1182 mcnt = rdmsr(MSR_MPERF);
1183 acnt = rdmsr(MSR_APERF);
1184 tsc2 = rdtsc();
1185 intr_restore(reg);
1186 perf = 1000 * acnt / mcnt;
1187 *rate = (tsc2 - tsc1) * perf;
1188 } else {
1189 tsc1 = rdtsc();
1190 DELAY(1000);
1191 tsc2 = rdtsc();
1192 intr_restore(reg);
1193 *rate = (tsc2 - tsc1) * 1000;
1194 }
1195
1196 #ifdef SMP
1197 if (smp_cpus > 1) {
1198 thread_lock(curthread);
1199 sched_unbind(curthread);
1200 thread_unlock(curthread);
1201 }
1202 #endif
1203
1204 return (0);
1205 }
1206
1207 #ifdef XEN
1208
1209 void
1210 cpu_halt(void)
1211 {
1212 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
1213 }
1214
1215 int scheduler_running;
1216
1217 static void
1218 cpu_idle_hlt(int busy)
1219 {
1220
1221 scheduler_running = 1;
1222 enable_intr();
1223 idle_block();
1224 }
1225
1226 #else
1227 /*
1228 * Shutdown the CPU as much as possible
1229 */
1230 void
1231 cpu_halt(void)
1232 {
1233 for (;;)
1234 __asm__ ("hlt");
1235 }
1236
1237 #endif
1238
1239 void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
1240 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
1241 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
1242 TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
1243 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
1244 0, "Use MONITOR/MWAIT for short idle");
1245
1246 #define STATE_RUNNING 0x0
1247 #define STATE_MWAIT 0x1
1248 #define STATE_SLEEPING 0x2
1249
1250 static void
1251 cpu_idle_acpi(int busy)
1252 {
1253 int *state;
1254
1255 state = (int *)PCPU_PTR(monitorbuf);
1256 *state = STATE_SLEEPING;
1257 disable_intr();
1258 if (sched_runnable())
1259 enable_intr();
1260 else if (cpu_idle_hook)
1261 cpu_idle_hook();
1262 else
1263 __asm __volatile("sti; hlt");
1264 *state = STATE_RUNNING;
1265 }
1266
1267 #ifndef XEN
1268 static void
1269 cpu_idle_hlt(int busy)
1270 {
1271 int *state;
1272
1273 state = (int *)PCPU_PTR(monitorbuf);
1274 *state = STATE_SLEEPING;
1275 /*
1276 * We must absolutely guarentee that hlt is the next instruction
1277 * after sti or we introduce a timing window.
1278 */
1279 disable_intr();
1280 if (sched_runnable())
1281 enable_intr();
1282 else
1283 __asm __volatile("sti; hlt");
1284 *state = STATE_RUNNING;
1285 }
1286 #endif
1287
1288 /*
1289 * MWAIT cpu power states. Lower 4 bits are sub-states.
1290 */
1291 #define MWAIT_C0 0xf0
1292 #define MWAIT_C1 0x00
1293 #define MWAIT_C2 0x10
1294 #define MWAIT_C3 0x20
1295 #define MWAIT_C4 0x30
1296
1297 static void
1298 cpu_idle_mwait(int busy)
1299 {
1300 int *state;
1301
1302 state = (int *)PCPU_PTR(monitorbuf);
1303 *state = STATE_MWAIT;
1304 if (!sched_runnable()) {
1305 cpu_monitor(state, 0, 0);
1306 if (*state == STATE_MWAIT)
1307 cpu_mwait(0, MWAIT_C1);
1308 }
1309 *state = STATE_RUNNING;
1310 }
1311
1312 static void
1313 cpu_idle_spin(int busy)
1314 {
1315 int *state;
1316 int i;
1317
1318 state = (int *)PCPU_PTR(monitorbuf);
1319 *state = STATE_RUNNING;
1320 for (i = 0; i < 1000; i++) {
1321 if (sched_runnable())
1322 return;
1323 cpu_spinwait();
1324 }
1325 }
1326
1327 /*
1328 * C1E renders the local APIC timer dead, so we disable it by
1329 * reading the Interrupt Pending Message register and clearing
1330 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
1331 *
1332 * Reference:
1333 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
1334 * #32559 revision 3.00+
1335 */
1336 #define MSR_AMDK8_IPM 0xc0010055
1337 #define AMDK8_SMIONCMPHALT (1ULL << 27)
1338 #define AMDK8_C1EONCMPHALT (1ULL << 28)
1339 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
1340
1341 static void
1342 cpu_probe_amdc1e(void)
1343 {
1344
1345 /*
1346 * Detect the presence of C1E capability mostly on latest
1347 * dual-cores (or future) k8 family.
1348 */
1349 if (cpu_vendor_id == CPU_VENDOR_AMD &&
1350 (cpu_id & 0x00000f00) == 0x00000f00 &&
1351 (cpu_id & 0x0fff0000) >= 0x00040000) {
1352 cpu_ident_amdc1e = 1;
1353 }
1354 }
1355
1356 #ifdef XEN
1357 void (*cpu_idle_fn)(int) = cpu_idle_hlt;
1358 #else
1359 void (*cpu_idle_fn)(int) = cpu_idle_acpi;
1360 #endif
1361
1362 void
1363 cpu_idle(int busy)
1364 {
1365 #ifndef XEN
1366 uint64_t msr;
1367 #endif
1368
1369 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
1370 busy, curcpu);
1371 #if defined(MP_WATCHDOG) && !defined(XEN)
1372 ap_watchdog(PCPU_GET(cpuid));
1373 #endif
1374 #ifndef XEN
1375 /* If we are busy - try to use fast methods. */
1376 if (busy) {
1377 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
1378 cpu_idle_mwait(busy);
1379 goto out;
1380 }
1381 }
1382 #endif
1383
1384 /* If we have time - switch timers into idle mode. */
1385 if (!busy) {
1386 critical_enter();
1387 cpu_idleclock();
1388 }
1389
1390 #ifndef XEN
1391 /* Apply AMD APIC timer C1E workaround. */
1392 if (cpu_ident_amdc1e && cpu_disable_deep_sleep) {
1393 msr = rdmsr(MSR_AMDK8_IPM);
1394 if (msr & AMDK8_CMPHALT)
1395 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
1396 }
1397 #endif
1398
1399 /* Call main idle method. */
1400 cpu_idle_fn(busy);
1401
1402 /* Switch timers mack into active mode. */
1403 if (!busy) {
1404 cpu_activeclock();
1405 critical_exit();
1406 }
1407 #ifndef XEN
1408 out:
1409 #endif
1410 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
1411 busy, curcpu);
1412 }
1413
1414 int
1415 cpu_idle_wakeup(int cpu)
1416 {
1417 struct pcpu *pcpu;
1418 int *state;
1419
1420 pcpu = pcpu_find(cpu);
1421 state = (int *)pcpu->pc_monitorbuf;
1422 /*
1423 * This doesn't need to be atomic since missing the race will
1424 * simply result in unnecessary IPIs.
1425 */
1426 if (*state == STATE_SLEEPING)
1427 return (0);
1428 if (*state == STATE_MWAIT)
1429 *state = STATE_RUNNING;
1430 return (1);
1431 }
1432
1433 /*
1434 * Ordered by speed/power consumption.
1435 */
1436 struct {
1437 void *id_fn;
1438 char *id_name;
1439 } idle_tbl[] = {
1440 { cpu_idle_spin, "spin" },
1441 { cpu_idle_mwait, "mwait" },
1442 { cpu_idle_hlt, "hlt" },
1443 { cpu_idle_acpi, "acpi" },
1444 { NULL, NULL }
1445 };
1446
1447 static int
1448 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
1449 {
1450 char *avail, *p;
1451 int error;
1452 int i;
1453
1454 avail = malloc(256, M_TEMP, M_WAITOK);
1455 p = avail;
1456 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1457 if (strstr(idle_tbl[i].id_name, "mwait") &&
1458 (cpu_feature2 & CPUID2_MON) == 0)
1459 continue;
1460 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1461 cpu_idle_hook == NULL)
1462 continue;
1463 p += sprintf(p, "%s%s", p != avail ? ", " : "",
1464 idle_tbl[i].id_name);
1465 }
1466 error = sysctl_handle_string(oidp, avail, 0, req);
1467 free(avail, M_TEMP);
1468 return (error);
1469 }
1470
1471 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
1472 0, 0, idle_sysctl_available, "A", "list of available idle functions");
1473
1474 static int
1475 idle_sysctl(SYSCTL_HANDLER_ARGS)
1476 {
1477 char buf[16];
1478 int error;
1479 char *p;
1480 int i;
1481
1482 p = "unknown";
1483 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1484 if (idle_tbl[i].id_fn == cpu_idle_fn) {
1485 p = idle_tbl[i].id_name;
1486 break;
1487 }
1488 }
1489 strncpy(buf, p, sizeof(buf));
1490 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1491 if (error != 0 || req->newptr == NULL)
1492 return (error);
1493 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1494 if (strstr(idle_tbl[i].id_name, "mwait") &&
1495 (cpu_feature2 & CPUID2_MON) == 0)
1496 continue;
1497 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1498 cpu_idle_hook == NULL)
1499 continue;
1500 if (strcmp(idle_tbl[i].id_name, buf))
1501 continue;
1502 cpu_idle_fn = idle_tbl[i].id_fn;
1503 return (0);
1504 }
1505 return (EINVAL);
1506 }
1507
1508 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
1509 idle_sysctl, "A", "currently selected idle function");
1510
1511 uint64_t (*atomic_load_acq_64)(volatile uint64_t *) =
1512 atomic_load_acq_64_i386;
1513 void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t) =
1514 atomic_store_rel_64_i386;
1515
1516 static void
1517 cpu_probe_cmpxchg8b(void)
1518 {
1519
1520 if ((cpu_feature & CPUID_CX8) != 0 ||
1521 cpu_vendor_id == CPU_VENDOR_RISE) {
1522 atomic_load_acq_64 = atomic_load_acq_64_i586;
1523 atomic_store_rel_64 = atomic_store_rel_64_i586;
1524 }
1525 }
1526
1527 /*
1528 * Reset registers to default values on exec.
1529 */
1530 void
1531 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1532 {
1533 struct trapframe *regs = td->td_frame;
1534 struct pcb *pcb = td->td_pcb;
1535
1536 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1537 pcb->pcb_gs = _udatasel;
1538 load_gs(_udatasel);
1539
1540 mtx_lock_spin(&dt_lock);
1541 if (td->td_proc->p_md.md_ldt)
1542 user_ldt_free(td);
1543 else
1544 mtx_unlock_spin(&dt_lock);
1545
1546 bzero((char *)regs, sizeof(struct trapframe));
1547 regs->tf_eip = imgp->entry_addr;
1548 regs->tf_esp = stack;
1549 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1550 regs->tf_ss = _udatasel;
1551 regs->tf_ds = _udatasel;
1552 regs->tf_es = _udatasel;
1553 regs->tf_fs = _udatasel;
1554 regs->tf_cs = _ucodesel;
1555
1556 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1557 regs->tf_ebx = imgp->ps_strings;
1558
1559 /*
1560 * Reset the hardware debug registers if they were in use.
1561 * They won't have any meaning for the newly exec'd process.
1562 */
1563 if (pcb->pcb_flags & PCB_DBREGS) {
1564 pcb->pcb_dr0 = 0;
1565 pcb->pcb_dr1 = 0;
1566 pcb->pcb_dr2 = 0;
1567 pcb->pcb_dr3 = 0;
1568 pcb->pcb_dr6 = 0;
1569 pcb->pcb_dr7 = 0;
1570 if (pcb == PCPU_GET(curpcb)) {
1571 /*
1572 * Clear the debug registers on the running
1573 * CPU, otherwise they will end up affecting
1574 * the next process we switch to.
1575 */
1576 reset_dbregs();
1577 }
1578 pcb->pcb_flags &= ~PCB_DBREGS;
1579 }
1580
1581 /*
1582 * Initialize the math emulator (if any) for the current process.
1583 * Actually, just clear the bit that says that the emulator has
1584 * been initialized. Initialization is delayed until the process
1585 * traps to the emulator (if it is done at all) mainly because
1586 * emulators don't provide an entry point for initialization.
1587 */
1588 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1589 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1590
1591 /*
1592 * Drop the FP state if we hold it, so that the process gets a
1593 * clean FP state if it uses the FPU again.
1594 */
1595 fpstate_drop(td);
1596
1597 /*
1598 * XXX - Linux emulator
1599 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1600 * on it.
1601 */
1602 td->td_retval[1] = 0;
1603 }
1604
1605 void
1606 cpu_setregs(void)
1607 {
1608 unsigned int cr0;
1609
1610 cr0 = rcr0();
1611
1612 /*
1613 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1614 *
1615 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1616 * instructions. We must set the CR0_MP bit and use the CR0_TS
1617 * bit to control the trap, because setting the CR0_EM bit does
1618 * not cause WAIT instructions to trap. It's important to trap
1619 * WAIT instructions - otherwise the "wait" variants of no-wait
1620 * control instructions would degenerate to the "no-wait" variants
1621 * after FP context switches but work correctly otherwise. It's
1622 * particularly important to trap WAITs when there is no NPX -
1623 * otherwise the "wait" variants would always degenerate.
1624 *
1625 * Try setting CR0_NE to get correct error reporting on 486DX's.
1626 * Setting it should fail or do nothing on lesser processors.
1627 */
1628 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1629 load_cr0(cr0);
1630 load_gs(_udatasel);
1631 }
1632
1633 u_long bootdev; /* not a struct cdev *- encoding is different */
1634 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1635 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1636
1637 /*
1638 * Initialize 386 and configure to run kernel
1639 */
1640
1641 /*
1642 * Initialize segments & interrupt table
1643 */
1644
1645 int _default_ldt;
1646
1647 #ifdef XEN
1648 union descriptor *gdt;
1649 union descriptor *ldt;
1650 #else
1651 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1652 union descriptor ldt[NLDT]; /* local descriptor table */
1653 #endif
1654 static struct gate_descriptor idt0[NIDT];
1655 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1656 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1657 struct mtx dt_lock; /* lock for GDT and LDT */
1658
1659 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1660 extern int has_f00f_bug;
1661 #endif
1662
1663 static struct i386tss dblfault_tss;
1664 static char dblfault_stack[PAGE_SIZE];
1665
1666 extern vm_offset_t proc0kstack;
1667
1668
1669 /*
1670 * software prototypes -- in more palatable form.
1671 *
1672 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1673 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1674 */
1675 struct soft_segment_descriptor gdt_segs[] = {
1676 /* GNULL_SEL 0 Null Descriptor */
1677 { .ssd_base = 0x0,
1678 .ssd_limit = 0x0,
1679 .ssd_type = 0,
1680 .ssd_dpl = SEL_KPL,
1681 .ssd_p = 0,
1682 .ssd_xx = 0, .ssd_xx1 = 0,
1683 .ssd_def32 = 0,
1684 .ssd_gran = 0 },
1685 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1686 { .ssd_base = 0x0,
1687 .ssd_limit = 0xfffff,
1688 .ssd_type = SDT_MEMRWA,
1689 .ssd_dpl = SEL_KPL,
1690 .ssd_p = 1,
1691 .ssd_xx = 0, .ssd_xx1 = 0,
1692 .ssd_def32 = 1,
1693 .ssd_gran = 1 },
1694 /* GUFS_SEL 2 %fs Descriptor for user */
1695 { .ssd_base = 0x0,
1696 .ssd_limit = 0xfffff,
1697 .ssd_type = SDT_MEMRWA,
1698 .ssd_dpl = SEL_UPL,
1699 .ssd_p = 1,
1700 .ssd_xx = 0, .ssd_xx1 = 0,
1701 .ssd_def32 = 1,
1702 .ssd_gran = 1 },
1703 /* GUGS_SEL 3 %gs Descriptor for user */
1704 { .ssd_base = 0x0,
1705 .ssd_limit = 0xfffff,
1706 .ssd_type = SDT_MEMRWA,
1707 .ssd_dpl = SEL_UPL,
1708 .ssd_p = 1,
1709 .ssd_xx = 0, .ssd_xx1 = 0,
1710 .ssd_def32 = 1,
1711 .ssd_gran = 1 },
1712 /* GCODE_SEL 4 Code Descriptor for kernel */
1713 { .ssd_base = 0x0,
1714 .ssd_limit = 0xfffff,
1715 .ssd_type = SDT_MEMERA,
1716 .ssd_dpl = SEL_KPL,
1717 .ssd_p = 1,
1718 .ssd_xx = 0, .ssd_xx1 = 0,
1719 .ssd_def32 = 1,
1720 .ssd_gran = 1 },
1721 /* GDATA_SEL 5 Data Descriptor for kernel */
1722 { .ssd_base = 0x0,
1723 .ssd_limit = 0xfffff,
1724 .ssd_type = SDT_MEMRWA,
1725 .ssd_dpl = SEL_KPL,
1726 .ssd_p = 1,
1727 .ssd_xx = 0, .ssd_xx1 = 0,
1728 .ssd_def32 = 1,
1729 .ssd_gran = 1 },
1730 /* GUCODE_SEL 6 Code Descriptor for user */
1731 { .ssd_base = 0x0,
1732 .ssd_limit = 0xfffff,
1733 .ssd_type = SDT_MEMERA,
1734 .ssd_dpl = SEL_UPL,
1735 .ssd_p = 1,
1736 .ssd_xx = 0, .ssd_xx1 = 0,
1737 .ssd_def32 = 1,
1738 .ssd_gran = 1 },
1739 /* GUDATA_SEL 7 Data Descriptor for user */
1740 { .ssd_base = 0x0,
1741 .ssd_limit = 0xfffff,
1742 .ssd_type = SDT_MEMRWA,
1743 .ssd_dpl = SEL_UPL,
1744 .ssd_p = 1,
1745 .ssd_xx = 0, .ssd_xx1 = 0,
1746 .ssd_def32 = 1,
1747 .ssd_gran = 1 },
1748 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1749 { .ssd_base = 0x400,
1750 .ssd_limit = 0xfffff,
1751 .ssd_type = SDT_MEMRWA,
1752 .ssd_dpl = SEL_KPL,
1753 .ssd_p = 1,
1754 .ssd_xx = 0, .ssd_xx1 = 0,
1755 .ssd_def32 = 1,
1756 .ssd_gran = 1 },
1757 #ifndef XEN
1758 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1759 {
1760 .ssd_base = 0x0,
1761 .ssd_limit = sizeof(struct i386tss)-1,
1762 .ssd_type = SDT_SYS386TSS,
1763 .ssd_dpl = 0,
1764 .ssd_p = 1,
1765 .ssd_xx = 0, .ssd_xx1 = 0,
1766 .ssd_def32 = 0,
1767 .ssd_gran = 0 },
1768 /* GLDT_SEL 10 LDT Descriptor */
1769 { .ssd_base = (int) ldt,
1770 .ssd_limit = sizeof(ldt)-1,
1771 .ssd_type = SDT_SYSLDT,
1772 .ssd_dpl = SEL_UPL,
1773 .ssd_p = 1,
1774 .ssd_xx = 0, .ssd_xx1 = 0,
1775 .ssd_def32 = 0,
1776 .ssd_gran = 0 },
1777 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1778 { .ssd_base = (int) ldt,
1779 .ssd_limit = (512 * sizeof(union descriptor)-1),
1780 .ssd_type = SDT_SYSLDT,
1781 .ssd_dpl = 0,
1782 .ssd_p = 1,
1783 .ssd_xx = 0, .ssd_xx1 = 0,
1784 .ssd_def32 = 0,
1785 .ssd_gran = 0 },
1786 /* GPANIC_SEL 12 Panic Tss Descriptor */
1787 { .ssd_base = (int) &dblfault_tss,
1788 .ssd_limit = sizeof(struct i386tss)-1,
1789 .ssd_type = SDT_SYS386TSS,
1790 .ssd_dpl = 0,
1791 .ssd_p = 1,
1792 .ssd_xx = 0, .ssd_xx1 = 0,
1793 .ssd_def32 = 0,
1794 .ssd_gran = 0 },
1795 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1796 { .ssd_base = 0,
1797 .ssd_limit = 0xfffff,
1798 .ssd_type = SDT_MEMERA,
1799 .ssd_dpl = 0,
1800 .ssd_p = 1,
1801 .ssd_xx = 0, .ssd_xx1 = 0,
1802 .ssd_def32 = 0,
1803 .ssd_gran = 1 },
1804 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1805 { .ssd_base = 0,
1806 .ssd_limit = 0xfffff,
1807 .ssd_type = SDT_MEMERA,
1808 .ssd_dpl = 0,
1809 .ssd_p = 1,
1810 .ssd_xx = 0, .ssd_xx1 = 0,
1811 .ssd_def32 = 0,
1812 .ssd_gran = 1 },
1813 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1814 { .ssd_base = 0,
1815 .ssd_limit = 0xfffff,
1816 .ssd_type = SDT_MEMRWA,
1817 .ssd_dpl = 0,
1818 .ssd_p = 1,
1819 .ssd_xx = 0, .ssd_xx1 = 0,
1820 .ssd_def32 = 1,
1821 .ssd_gran = 1 },
1822 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1823 { .ssd_base = 0,
1824 .ssd_limit = 0xfffff,
1825 .ssd_type = SDT_MEMRWA,
1826 .ssd_dpl = 0,
1827 .ssd_p = 1,
1828 .ssd_xx = 0, .ssd_xx1 = 0,
1829 .ssd_def32 = 0,
1830 .ssd_gran = 1 },
1831 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1832 { .ssd_base = 0,
1833 .ssd_limit = 0xfffff,
1834 .ssd_type = SDT_MEMRWA,
1835 .ssd_dpl = 0,
1836 .ssd_p = 1,
1837 .ssd_xx = 0, .ssd_xx1 = 0,
1838 .ssd_def32 = 0,
1839 .ssd_gran = 1 },
1840 /* GNDIS_SEL 18 NDIS Descriptor */
1841 { .ssd_base = 0x0,
1842 .ssd_limit = 0x0,
1843 .ssd_type = 0,
1844 .ssd_dpl = 0,
1845 .ssd_p = 0,
1846 .ssd_xx = 0, .ssd_xx1 = 0,
1847 .ssd_def32 = 0,
1848 .ssd_gran = 0 },
1849 #endif /* !XEN */
1850 };
1851
1852 static struct soft_segment_descriptor ldt_segs[] = {
1853 /* Null Descriptor - overwritten by call gate */
1854 { .ssd_base = 0x0,
1855 .ssd_limit = 0x0,
1856 .ssd_type = 0,
1857 .ssd_dpl = 0,
1858 .ssd_p = 0,
1859 .ssd_xx = 0, .ssd_xx1 = 0,
1860 .ssd_def32 = 0,
1861 .ssd_gran = 0 },
1862 /* Null Descriptor - overwritten by call gate */
1863 { .ssd_base = 0x0,
1864 .ssd_limit = 0x0,
1865 .ssd_type = 0,
1866 .ssd_dpl = 0,
1867 .ssd_p = 0,
1868 .ssd_xx = 0, .ssd_xx1 = 0,
1869 .ssd_def32 = 0,
1870 .ssd_gran = 0 },
1871 /* Null Descriptor - overwritten by call gate */
1872 { .ssd_base = 0x0,
1873 .ssd_limit = 0x0,
1874 .ssd_type = 0,
1875 .ssd_dpl = 0,
1876 .ssd_p = 0,
1877 .ssd_xx = 0, .ssd_xx1 = 0,
1878 .ssd_def32 = 0,
1879 .ssd_gran = 0 },
1880 /* Code Descriptor for user */
1881 { .ssd_base = 0x0,
1882 .ssd_limit = 0xfffff,
1883 .ssd_type = SDT_MEMERA,
1884 .ssd_dpl = SEL_UPL,
1885 .ssd_p = 1,
1886 .ssd_xx = 0, .ssd_xx1 = 0,
1887 .ssd_def32 = 1,
1888 .ssd_gran = 1 },
1889 /* Null Descriptor - overwritten by call gate */
1890 { .ssd_base = 0x0,
1891 .ssd_limit = 0x0,
1892 .ssd_type = 0,
1893 .ssd_dpl = 0,
1894 .ssd_p = 0,
1895 .ssd_xx = 0, .ssd_xx1 = 0,
1896 .ssd_def32 = 0,
1897 .ssd_gran = 0 },
1898 /* Data Descriptor for user */
1899 { .ssd_base = 0x0,
1900 .ssd_limit = 0xfffff,
1901 .ssd_type = SDT_MEMRWA,
1902 .ssd_dpl = SEL_UPL,
1903 .ssd_p = 1,
1904 .ssd_xx = 0, .ssd_xx1 = 0,
1905 .ssd_def32 = 1,
1906 .ssd_gran = 1 },
1907 };
1908
1909 void
1910 setidt(idx, func, typ, dpl, selec)
1911 int idx;
1912 inthand_t *func;
1913 int typ;
1914 int dpl;
1915 int selec;
1916 {
1917 struct gate_descriptor *ip;
1918
1919 ip = idt + idx;
1920 ip->gd_looffset = (int)func;
1921 ip->gd_selector = selec;
1922 ip->gd_stkcpy = 0;
1923 ip->gd_xx = 0;
1924 ip->gd_type = typ;
1925 ip->gd_dpl = dpl;
1926 ip->gd_p = 1;
1927 ip->gd_hioffset = ((int)func)>>16 ;
1928 }
1929
1930 extern inthand_t
1931 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1932 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1933 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1934 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1935 IDTVEC(xmm),
1936 #ifdef KDTRACE_HOOKS
1937 IDTVEC(dtrace_ret),
1938 #endif
1939 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1940
1941 #ifdef DDB
1942 /*
1943 * Display the index and function name of any IDT entries that don't use
1944 * the default 'rsvd' entry point.
1945 */
1946 DB_SHOW_COMMAND(idt, db_show_idt)
1947 {
1948 struct gate_descriptor *ip;
1949 int idx;
1950 uintptr_t func;
1951
1952 ip = idt;
1953 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1954 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1955 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1956 db_printf("%3d\t", idx);
1957 db_printsym(func, DB_STGY_PROC);
1958 db_printf("\n");
1959 }
1960 ip++;
1961 }
1962 }
1963
1964 /* Show privileged registers. */
1965 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1966 {
1967 uint64_t idtr, gdtr;
1968
1969 idtr = ridt();
1970 db_printf("idtr\t0x%08x/%04x\n",
1971 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1972 gdtr = rgdt();
1973 db_printf("gdtr\t0x%08x/%04x\n",
1974 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1975 db_printf("ldtr\t0x%04x\n", rldt());
1976 db_printf("tr\t0x%04x\n", rtr());
1977 db_printf("cr0\t0x%08x\n", rcr0());
1978 db_printf("cr2\t0x%08x\n", rcr2());
1979 db_printf("cr3\t0x%08x\n", rcr3());
1980 db_printf("cr4\t0x%08x\n", rcr4());
1981 }
1982 #endif
1983
1984 void
1985 sdtossd(sd, ssd)
1986 struct segment_descriptor *sd;
1987 struct soft_segment_descriptor *ssd;
1988 {
1989 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1990 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1991 ssd->ssd_type = sd->sd_type;
1992 ssd->ssd_dpl = sd->sd_dpl;
1993 ssd->ssd_p = sd->sd_p;
1994 ssd->ssd_def32 = sd->sd_def32;
1995 ssd->ssd_gran = sd->sd_gran;
1996 }
1997
1998 #ifndef XEN
1999 static int
2000 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
2001 {
2002 int i, insert_idx, physmap_idx;
2003
2004 physmap_idx = *physmap_idxp;
2005
2006 if (boothowto & RB_VERBOSE)
2007 printf("SMAP type=%02x base=%016llx len=%016llx\n",
2008 smap->type, smap->base, smap->length);
2009
2010 if (smap->type != SMAP_TYPE_MEMORY)
2011 return (1);
2012
2013 if (smap->length == 0)
2014 return (1);
2015
2016 #ifndef PAE
2017 if (smap->base > 0xffffffff) {
2018 printf("%uK of memory above 4GB ignored\n",
2019 (u_int)(smap->length / 1024));
2020 return (1);
2021 }
2022 #endif
2023
2024 /*
2025 * Find insertion point while checking for overlap. Start off by
2026 * assuming the new entry will be added to the end.
2027 */
2028 insert_idx = physmap_idx + 2;
2029 for (i = 0; i <= physmap_idx; i += 2) {
2030 if (smap->base < physmap[i + 1]) {
2031 if (smap->base + smap->length <= physmap[i]) {
2032 insert_idx = i;
2033 break;
2034 }
2035 if (boothowto & RB_VERBOSE)
2036 printf(
2037 "Overlapping memory regions, ignoring second region\n");
2038 return (1);
2039 }
2040 }
2041
2042 /* See if we can prepend to the next entry. */
2043 if (insert_idx <= physmap_idx &&
2044 smap->base + smap->length == physmap[insert_idx]) {
2045 physmap[insert_idx] = smap->base;
2046 return (1);
2047 }
2048
2049 /* See if we can append to the previous entry. */
2050 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) {
2051 physmap[insert_idx - 1] += smap->length;
2052 return (1);
2053 }
2054
2055 physmap_idx += 2;
2056 *physmap_idxp = physmap_idx;
2057 if (physmap_idx == PHYSMAP_SIZE) {
2058 printf(
2059 "Too many segments in the physical address map, giving up\n");
2060 return (0);
2061 }
2062
2063 /*
2064 * Move the last 'N' entries down to make room for the new
2065 * entry if needed.
2066 */
2067 for (i = physmap_idx; i > insert_idx; i -= 2) {
2068 physmap[i] = physmap[i - 2];
2069 physmap[i + 1] = physmap[i - 1];
2070 }
2071
2072 /* Insert the new entry. */
2073 physmap[insert_idx] = smap->base;
2074 physmap[insert_idx + 1] = smap->base + smap->length;
2075 return (1);
2076 }
2077
2078 static void
2079 basemem_setup(void)
2080 {
2081 vm_paddr_t pa;
2082 pt_entry_t *pte;
2083 int i;
2084
2085 if (basemem > 640) {
2086 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
2087 basemem);
2088 basemem = 640;
2089 }
2090
2091 /*
2092 * XXX if biosbasemem is now < 640, there is a `hole'
2093 * between the end of base memory and the start of
2094 * ISA memory. The hole may be empty or it may
2095 * contain BIOS code or data. Map it read/write so
2096 * that the BIOS can write to it. (Memory from 0 to
2097 * the physical end of the kernel is mapped read-only
2098 * to begin with and then parts of it are remapped.
2099 * The parts that aren't remapped form holes that
2100 * remain read-only and are unused by the kernel.
2101 * The base memory area is below the physical end of
2102 * the kernel and right now forms a read-only hole.
2103 * The part of it from PAGE_SIZE to
2104 * (trunc_page(biosbasemem * 1024) - 1) will be
2105 * remapped and used by the kernel later.)
2106 *
2107 * This code is similar to the code used in
2108 * pmap_mapdev, but since no memory needs to be
2109 * allocated we simply change the mapping.
2110 */
2111 for (pa = trunc_page(basemem * 1024);
2112 pa < ISA_HOLE_START; pa += PAGE_SIZE)
2113 pmap_kenter(KERNBASE + pa, pa);
2114
2115 /*
2116 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
2117 * the vm86 page table so that vm86 can scribble on them using
2118 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
2119 * page 0, at least as initialized here?
2120 */
2121 pte = (pt_entry_t *)vm86paddr;
2122 for (i = basemem / 4; i < 160; i++)
2123 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
2124 }
2125 #endif
2126
2127 /*
2128 * Populate the (physmap) array with base/bound pairs describing the
2129 * available physical memory in the system, then test this memory and
2130 * build the phys_avail array describing the actually-available memory.
2131 *
2132 * If we cannot accurately determine the physical memory map, then use
2133 * value from the 0xE801 call, and failing that, the RTC.
2134 *
2135 * Total memory size may be set by the kernel environment variable
2136 * hw.physmem or the compile-time define MAXMEM.
2137 *
2138 * XXX first should be vm_paddr_t.
2139 */
2140 static void
2141 getmemsize(int first)
2142 {
2143 int has_smap, off, physmap_idx, pa_indx, da_indx;
2144 u_long physmem_tunable, memtest;
2145 vm_paddr_t physmap[PHYSMAP_SIZE];
2146 pt_entry_t *pte;
2147 quad_t dcons_addr, dcons_size;
2148 #ifndef XEN
2149 int hasbrokenint12, i;
2150 u_int extmem;
2151 struct vm86frame vmf;
2152 struct vm86context vmc;
2153 vm_paddr_t pa;
2154 struct bios_smap *smap, *smapbase, *smapend;
2155 u_int32_t smapsize;
2156 caddr_t kmdp;
2157 #endif
2158
2159 has_smap = 0;
2160 #if defined(XEN)
2161 Maxmem = xen_start_info->nr_pages - init_first;
2162 physmem = Maxmem;
2163 basemem = 0;
2164 physmap[0] = init_first << PAGE_SHIFT;
2165 physmap[1] = ptoa(Maxmem) - round_page(msgbufsize);
2166 physmap_idx = 0;
2167 #else
2168 #ifdef XBOX
2169 if (arch_i386_is_xbox) {
2170 /*
2171 * We queried the memory size before, so chop off 4MB for
2172 * the framebuffer and inform the OS of this.
2173 */
2174 physmap[0] = 0;
2175 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
2176 physmap_idx = 0;
2177 goto physmap_done;
2178 }
2179 #endif
2180 bzero(&vmf, sizeof(vmf));
2181 bzero(physmap, sizeof(physmap));
2182 basemem = 0;
2183
2184 /*
2185 * Check if the loader supplied an SMAP memory map. If so,
2186 * use that and do not make any VM86 calls.
2187 */
2188 physmap_idx = 0;
2189 smapbase = NULL;
2190 kmdp = preload_search_by_type("elf kernel");
2191 if (kmdp == NULL)
2192 kmdp = preload_search_by_type("elf32 kernel");
2193 if (kmdp != NULL)
2194 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2195 MODINFO_METADATA | MODINFOMD_SMAP);
2196 if (smapbase != NULL) {
2197 /*
2198 * subr_module.c says:
2199 * "Consumer may safely assume that size value precedes data."
2200 * ie: an int32_t immediately precedes SMAP.
2201 */
2202 smapsize = *((u_int32_t *)smapbase - 1);
2203 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
2204 has_smap = 1;
2205
2206 for (smap = smapbase; smap < smapend; smap++)
2207 if (!add_smap_entry(smap, physmap, &physmap_idx))
2208 break;
2209 goto have_smap;
2210 }
2211
2212 /*
2213 * Some newer BIOSes have a broken INT 12H implementation
2214 * which causes a kernel panic immediately. In this case, we
2215 * need use the SMAP to determine the base memory size.
2216 */
2217 hasbrokenint12 = 0;
2218 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
2219 if (hasbrokenint12 == 0) {
2220 /* Use INT12 to determine base memory size. */
2221 vm86_intcall(0x12, &vmf);
2222 basemem = vmf.vmf_ax;
2223 basemem_setup();
2224 }
2225
2226 /*
2227 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
2228 * the kernel page table so we can use it as a buffer. The
2229 * kernel will unmap this page later.
2230 */
2231 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
2232 vmc.npages = 0;
2233 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
2234 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
2235
2236 vmf.vmf_ebx = 0;
2237 do {
2238 vmf.vmf_eax = 0xE820;
2239 vmf.vmf_edx = SMAP_SIG;
2240 vmf.vmf_ecx = sizeof(struct bios_smap);
2241 i = vm86_datacall(0x15, &vmf, &vmc);
2242 if (i || vmf.vmf_eax != SMAP_SIG)
2243 break;
2244 has_smap = 1;
2245 if (!add_smap_entry(smap, physmap, &physmap_idx))
2246 break;
2247 } while (vmf.vmf_ebx != 0);
2248
2249 have_smap:
2250 /*
2251 * If we didn't fetch the "base memory" size from INT12,
2252 * figure it out from the SMAP (or just guess).
2253 */
2254 if (basemem == 0) {
2255 for (i = 0; i <= physmap_idx; i += 2) {
2256 if (physmap[i] == 0x00000000) {
2257 basemem = physmap[i + 1] / 1024;
2258 break;
2259 }
2260 }
2261
2262 /* XXX: If we couldn't find basemem from SMAP, just guess. */
2263 if (basemem == 0)
2264 basemem = 640;
2265 basemem_setup();
2266 }
2267
2268 if (physmap[1] != 0)
2269 goto physmap_done;
2270
2271 /*
2272 * If we failed to find an SMAP, figure out the extended
2273 * memory size. We will then build a simple memory map with
2274 * two segments, one for "base memory" and the second for
2275 * "extended memory". Note that "extended memory" starts at a
2276 * physical address of 1MB and that both basemem and extmem
2277 * are in units of 1KB.
2278 *
2279 * First, try to fetch the extended memory size via INT 15:E801.
2280 */
2281 vmf.vmf_ax = 0xE801;
2282 if (vm86_intcall(0x15, &vmf) == 0) {
2283 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
2284 } else {
2285 /*
2286 * If INT15:E801 fails, this is our last ditch effort
2287 * to determine the extended memory size. Currently
2288 * we prefer the RTC value over INT15:88.
2289 */
2290 #if 0
2291 vmf.vmf_ah = 0x88;
2292 vm86_intcall(0x15, &vmf);
2293 extmem = vmf.vmf_ax;
2294 #else
2295 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
2296 #endif
2297 }
2298
2299 /*
2300 * Special hack for chipsets that still remap the 384k hole when
2301 * there's 16MB of memory - this really confuses people that
2302 * are trying to use bus mastering ISA controllers with the
2303 * "16MB limit"; they only have 16MB, but the remapping puts
2304 * them beyond the limit.
2305 *
2306 * If extended memory is between 15-16MB (16-17MB phys address range),
2307 * chop it to 15MB.
2308 */
2309 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
2310 extmem = 15 * 1024;
2311
2312 physmap[0] = 0;
2313 physmap[1] = basemem * 1024;
2314 physmap_idx = 2;
2315 physmap[physmap_idx] = 0x100000;
2316 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2317
2318 physmap_done:
2319 #endif
2320 /*
2321 * Now, physmap contains a map of physical memory.
2322 */
2323
2324 #ifdef SMP
2325 /* make hole for AP bootstrap code */
2326 physmap[1] = mp_bootaddress(physmap[1]);
2327 #endif
2328
2329 /*
2330 * Maxmem isn't the "maximum memory", it's one larger than the
2331 * highest page of the physical address space. It should be
2332 * called something like "Maxphyspage". We may adjust this
2333 * based on ``hw.physmem'' and the results of the memory test.
2334 */
2335 Maxmem = atop(physmap[physmap_idx + 1]);
2336
2337 #ifdef MAXMEM
2338 Maxmem = MAXMEM / 4;
2339 #endif
2340
2341 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
2342 Maxmem = atop(physmem_tunable);
2343
2344 /*
2345 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
2346 * the amount of memory in the system.
2347 */
2348 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
2349 Maxmem = atop(physmap[physmap_idx + 1]);
2350
2351 /*
2352 * By default enable the memory test on real hardware, and disable
2353 * it if we appear to be running in a VM. This avoids touching all
2354 * pages unnecessarily, which doesn't matter on real hardware but is
2355 * bad for shared VM hosts. Use a general name so that
2356 * one could eventually do more with the code than just disable it.
2357 */
2358 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
2359 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
2360
2361 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2362 (boothowto & RB_VERBOSE))
2363 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2364
2365 /*
2366 * If Maxmem has been increased beyond what the system has detected,
2367 * extend the last memory segment to the new limit.
2368 */
2369 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2370 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2371
2372 /* call pmap initialization to make new kernel address space */
2373 pmap_bootstrap(first);
2374
2375 /*
2376 * Size up each available chunk of physical memory.
2377 */
2378 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2379 pa_indx = 0;
2380 da_indx = 1;
2381 phys_avail[pa_indx++] = physmap[0];
2382 phys_avail[pa_indx] = physmap[0];
2383 dump_avail[da_indx] = physmap[0];
2384 pte = CMAP1;
2385
2386 /*
2387 * Get dcons buffer address
2388 */
2389 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2390 getenv_quad("dcons.size", &dcons_size) == 0)
2391 dcons_addr = 0;
2392
2393 #ifndef XEN
2394 /*
2395 * physmap is in bytes, so when converting to page boundaries,
2396 * round up the start address and round down the end address.
2397 */
2398 for (i = 0; i <= physmap_idx; i += 2) {
2399 vm_paddr_t end;
2400
2401 end = ptoa((vm_paddr_t)Maxmem);
2402 if (physmap[i + 1] < end)
2403 end = trunc_page(physmap[i + 1]);
2404 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2405 int tmp, page_bad, full;
2406 int *ptr = (int *)CADDR1;
2407
2408 full = FALSE;
2409 /*
2410 * block out kernel memory as not available.
2411 */
2412 if (pa >= KERNLOAD && pa < first)
2413 goto do_dump_avail;
2414
2415 /*
2416 * block out dcons buffer
2417 */
2418 if (dcons_addr > 0
2419 && pa >= trunc_page(dcons_addr)
2420 && pa < dcons_addr + dcons_size)
2421 goto do_dump_avail;
2422
2423 page_bad = FALSE;
2424 if (memtest == 0)
2425 goto skip_memtest;
2426
2427 /*
2428 * map page into kernel: valid, read/write,non-cacheable
2429 */
2430 *pte = pa | PG_V | PG_RW | PG_N;
2431 invltlb();
2432
2433 tmp = *(int *)ptr;
2434 /*
2435 * Test for alternating 1's and 0's
2436 */
2437 *(volatile int *)ptr = 0xaaaaaaaa;
2438 if (*(volatile int *)ptr != 0xaaaaaaaa)
2439 page_bad = TRUE;
2440 /*
2441 * Test for alternating 0's and 1's
2442 */
2443 *(volatile int *)ptr = 0x55555555;
2444 if (*(volatile int *)ptr != 0x55555555)
2445 page_bad = TRUE;
2446 /*
2447 * Test for all 1's
2448 */
2449 *(volatile int *)ptr = 0xffffffff;
2450 if (*(volatile int *)ptr != 0xffffffff)
2451 page_bad = TRUE;
2452 /*
2453 * Test for all 0's
2454 */
2455 *(volatile int *)ptr = 0x0;
2456 if (*(volatile int *)ptr != 0x0)
2457 page_bad = TRUE;
2458 /*
2459 * Restore original value.
2460 */
2461 *(int *)ptr = tmp;
2462
2463 skip_memtest:
2464 /*
2465 * Adjust array of valid/good pages.
2466 */
2467 if (page_bad == TRUE)
2468 continue;
2469 /*
2470 * If this good page is a continuation of the
2471 * previous set of good pages, then just increase
2472 * the end pointer. Otherwise start a new chunk.
2473 * Note that "end" points one higher than end,
2474 * making the range >= start and < end.
2475 * If we're also doing a speculative memory
2476 * test and we at or past the end, bump up Maxmem
2477 * so that we keep going. The first bad page
2478 * will terminate the loop.
2479 */
2480 if (phys_avail[pa_indx] == pa) {
2481 phys_avail[pa_indx] += PAGE_SIZE;
2482 } else {
2483 pa_indx++;
2484 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2485 printf(
2486 "Too many holes in the physical address space, giving up\n");
2487 pa_indx--;
2488 full = TRUE;
2489 goto do_dump_avail;
2490 }
2491 phys_avail[pa_indx++] = pa; /* start */
2492 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2493 }
2494 physmem++;
2495 do_dump_avail:
2496 if (dump_avail[da_indx] == pa) {
2497 dump_avail[da_indx] += PAGE_SIZE;
2498 } else {
2499 da_indx++;
2500 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2501 da_indx--;
2502 goto do_next;
2503 }
2504 dump_avail[da_indx++] = pa; /* start */
2505 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2506 }
2507 do_next:
2508 if (full)
2509 break;
2510 }
2511 }
2512 *pte = 0;
2513 invltlb();
2514 #else
2515 phys_avail[0] = physfree;
2516 phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2517 dump_avail[0] = 0;
2518 dump_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2519
2520 #endif
2521
2522 /*
2523 * XXX
2524 * The last chunk must contain at least one page plus the message
2525 * buffer to avoid complicating other code (message buffer address
2526 * calculation, etc.).
2527 */
2528 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2529 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2530 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2531 phys_avail[pa_indx--] = 0;
2532 phys_avail[pa_indx--] = 0;
2533 }
2534
2535 Maxmem = atop(phys_avail[pa_indx]);
2536
2537 /* Trim off space for the message buffer. */
2538 phys_avail[pa_indx] -= round_page(msgbufsize);
2539
2540 /* Map the message buffer. */
2541 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2542 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2543 off);
2544
2545 PT_UPDATES_FLUSH();
2546 }
2547
2548 #ifdef XEN
2549 #define MTOPSIZE (1<<(14 + PAGE_SHIFT))
2550
2551 void
2552 init386(first)
2553 int first;
2554 {
2555 unsigned long gdtmachpfn;
2556 int error, gsel_tss, metadata_missing, x, pa;
2557 size_t kstack0_sz;
2558 struct pcpu *pc;
2559 struct callback_register event = {
2560 .type = CALLBACKTYPE_event,
2561 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback },
2562 };
2563 struct callback_register failsafe = {
2564 .type = CALLBACKTYPE_failsafe,
2565 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback },
2566 };
2567
2568 thread0.td_kstack = proc0kstack;
2569 thread0.td_kstack_pages = KSTACK_PAGES;
2570 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2571 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2572
2573 /*
2574 * This may be done better later if it gets more high level
2575 * components in it. If so just link td->td_proc here.
2576 */
2577 proc_linkup0(&proc0, &thread0);
2578
2579 metadata_missing = 0;
2580 if (xen_start_info->mod_start) {
2581 preload_metadata = (caddr_t)xen_start_info->mod_start;
2582 preload_bootstrap_relocate(KERNBASE);
2583 } else {
2584 metadata_missing = 1;
2585 }
2586 if (envmode == 1)
2587 kern_envp = static_env;
2588 else if ((caddr_t)xen_start_info->cmd_line)
2589 kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line);
2590
2591 boothowto |= xen_boothowto(kern_envp);
2592
2593 /* Init basic tunables, hz etc */
2594 init_param1();
2595
2596 /*
2597 * XEN occupies a portion of the upper virtual address space
2598 * At its base it manages an array mapping machine page frames
2599 * to physical page frames - hence we need to be able to
2600 * access 4GB - (64MB - 4MB + 64k)
2601 */
2602 gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2603 gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2604 gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2605 gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2606 gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2607 gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2608 gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2609 gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2610
2611 pc = &__pcpu[0];
2612 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2613 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2614
2615 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW);
2616 bzero(gdt, PAGE_SIZE);
2617 for (x = 0; x < NGDT; x++)
2618 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2619
2620 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2621
2622 gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
2623 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V);
2624 PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
2625 lgdt(&r_gdt);
2626 gdtset = 1;
2627
2628 if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
2629 panic("set_trap_table failed - error %d\n", error);
2630 }
2631
2632 error = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
2633 if (error == 0)
2634 error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
2635 #if CONFIG_XEN_COMPAT <= 0x030002
2636 if (error == -ENOXENSYS)
2637 HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL),
2638 (unsigned long)Xhypervisor_callback,
2639 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
2640 #endif
2641 pcpu_init(pc, 0, sizeof(struct pcpu));
2642 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2643 pmap_kenter(pa + KERNBASE, pa);
2644 dpcpu_init((void *)(first + KERNBASE), 0);
2645 first += DPCPU_SIZE;
2646 physfree += DPCPU_SIZE;
2647 init_first += DPCPU_SIZE / PAGE_SIZE;
2648
2649 PCPU_SET(prvspace, pc);
2650 PCPU_SET(curthread, &thread0);
2651 PCPU_SET(curpcb, thread0.td_pcb);
2652
2653 /*
2654 * Initialize mutexes.
2655 *
2656 * icu_lock: in order to allow an interrupt to occur in a critical
2657 * section, to set pcpu->ipending (etc...) properly, we
2658 * must be able to get the icu lock, so it can't be
2659 * under witness.
2660 */
2661 mutex_init();
2662 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2663
2664 /* make ldt memory segments */
2665 PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW);
2666 bzero(ldt, PAGE_SIZE);
2667 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2668 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2669 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2670 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2671
2672 default_proc_ldt.ldt_base = (caddr_t)ldt;
2673 default_proc_ldt.ldt_len = 6;
2674 _default_ldt = (int)&default_proc_ldt;
2675 PCPU_SET(currentldt, _default_ldt);
2676 PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
2677 xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
2678
2679 #if defined(XEN_PRIVILEGED)
2680 /*
2681 * Initialize the i8254 before the console so that console
2682 * initialization can use DELAY().
2683 */
2684 i8254_init();
2685 #endif
2686
2687 /*
2688 * Initialize the console before we print anything out.
2689 */
2690 cninit();
2691
2692 if (metadata_missing)
2693 printf("WARNING: loader(8) metadata is missing!\n");
2694
2695 #ifdef DEV_ISA
2696 #ifdef DEV_ATPIC
2697 elcr_probe();
2698 atpic_startup();
2699 #else
2700 /* Reset and mask the atpics and leave them shut down. */
2701 atpic_reset();
2702
2703 /*
2704 * Point the ICU spurious interrupt vectors at the APIC spurious
2705 * interrupt handler.
2706 */
2707 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2708 GSEL(GCODE_SEL, SEL_KPL));
2709 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2710 GSEL(GCODE_SEL, SEL_KPL));
2711 #endif
2712 #endif
2713
2714 #ifdef DDB
2715 ksym_start = bootinfo.bi_symtab;
2716 ksym_end = bootinfo.bi_esymtab;
2717 #endif
2718
2719 kdb_init();
2720
2721 #ifdef KDB
2722 if (boothowto & RB_KDB)
2723 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2724 #endif
2725
2726 finishidentcpu(); /* Final stage of CPU initialization */
2727 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2728 GSEL(GCODE_SEL, SEL_KPL));
2729 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2730 GSEL(GCODE_SEL, SEL_KPL));
2731 initializecpu(); /* Initialize CPU registers */
2732
2733 /* make an initial tss so cpu can get interrupt stack on syscall! */
2734 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2735 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2736 kstack0_sz - sizeof(struct pcb) - 16);
2737 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2738 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2739 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL),
2740 PCPU_GET(common_tss.tss_esp0));
2741
2742 /* pointer to selector slot for %fs/%gs */
2743 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2744
2745 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2746 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2747 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2748 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2749 #ifdef PAE
2750 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2751 #else
2752 dblfault_tss.tss_cr3 = (int)IdlePTD;
2753 #endif
2754 dblfault_tss.tss_eip = (int)dblfault_handler;
2755 dblfault_tss.tss_eflags = PSL_KERNEL;
2756 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2757 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2758 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2759 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2760 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2761
2762 vm86_initialize();
2763 getmemsize(first);
2764 init_param2(physmem);
2765
2766 /* now running on new page tables, configured,and u/iom is accessible */
2767
2768 msgbufinit(msgbufp, msgbufsize);
2769 /* transfer to user mode */
2770
2771 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2772 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2773
2774 /* setup proc 0's pcb */
2775 thread0.td_pcb->pcb_flags = 0;
2776 #ifdef PAE
2777 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2778 #else
2779 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2780 #endif
2781 thread0.td_pcb->pcb_ext = 0;
2782 thread0.td_frame = &proc0_tf;
2783 thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0];
2784 thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1];
2785
2786 cpu_probe_amdc1e();
2787 cpu_probe_cmpxchg8b();
2788 }
2789
2790 #else
2791 void
2792 init386(first)
2793 int first;
2794 {
2795 struct gate_descriptor *gdp;
2796 int gsel_tss, metadata_missing, x, pa;
2797 size_t kstack0_sz;
2798 struct pcpu *pc;
2799
2800 thread0.td_kstack = proc0kstack;
2801 thread0.td_kstack_pages = KSTACK_PAGES;
2802 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2803 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2804
2805 /*
2806 * This may be done better later if it gets more high level
2807 * components in it. If so just link td->td_proc here.
2808 */
2809 proc_linkup0(&proc0, &thread0);
2810
2811 metadata_missing = 0;
2812 if (bootinfo.bi_modulep) {
2813 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2814 preload_bootstrap_relocate(KERNBASE);
2815 } else {
2816 metadata_missing = 1;
2817 }
2818 if (envmode == 1)
2819 kern_envp = static_env;
2820 else if (bootinfo.bi_envp)
2821 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2822
2823 /* Init basic tunables, hz etc */
2824 init_param1();
2825
2826 /*
2827 * Make gdt memory segments. All segments cover the full 4GB
2828 * of address space and permissions are enforced at page level.
2829 */
2830 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2831 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2832 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2833 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2834 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2835 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2836
2837 pc = &__pcpu[0];
2838 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2839 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2840 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2841
2842 for (x = 0; x < NGDT; x++)
2843 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2844
2845 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2846 r_gdt.rd_base = (int) gdt;
2847 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2848 lgdt(&r_gdt);
2849
2850 pcpu_init(pc, 0, sizeof(struct pcpu));
2851 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2852 pmap_kenter(pa + KERNBASE, pa);
2853 dpcpu_init((void *)(first + KERNBASE), 0);
2854 first += DPCPU_SIZE;
2855 PCPU_SET(prvspace, pc);
2856 PCPU_SET(curthread, &thread0);
2857 PCPU_SET(curpcb, thread0.td_pcb);
2858
2859 /*
2860 * Initialize mutexes.
2861 *
2862 * icu_lock: in order to allow an interrupt to occur in a critical
2863 * section, to set pcpu->ipending (etc...) properly, we
2864 * must be able to get the icu lock, so it can't be
2865 * under witness.
2866 */
2867 mutex_init();
2868 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2869
2870 /* make ldt memory segments */
2871 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2872 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2873 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2874 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2875
2876 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2877 lldt(_default_ldt);
2878 PCPU_SET(currentldt, _default_ldt);
2879
2880 /* exceptions */
2881 for (x = 0; x < NIDT; x++)
2882 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2883 GSEL(GCODE_SEL, SEL_KPL));
2884 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2885 GSEL(GCODE_SEL, SEL_KPL));
2886 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2887 GSEL(GCODE_SEL, SEL_KPL));
2888 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2889 GSEL(GCODE_SEL, SEL_KPL));
2890 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2891 GSEL(GCODE_SEL, SEL_KPL));
2892 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2893 GSEL(GCODE_SEL, SEL_KPL));
2894 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2895 GSEL(GCODE_SEL, SEL_KPL));
2896 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2897 GSEL(GCODE_SEL, SEL_KPL));
2898 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2899 , GSEL(GCODE_SEL, SEL_KPL));
2900 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2901 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2902 GSEL(GCODE_SEL, SEL_KPL));
2903 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2904 GSEL(GCODE_SEL, SEL_KPL));
2905 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2906 GSEL(GCODE_SEL, SEL_KPL));
2907 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2908 GSEL(GCODE_SEL, SEL_KPL));
2909 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2910 GSEL(GCODE_SEL, SEL_KPL));
2911 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2912 GSEL(GCODE_SEL, SEL_KPL));
2913 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2914 GSEL(GCODE_SEL, SEL_KPL));
2915 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2916 GSEL(GCODE_SEL, SEL_KPL));
2917 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2918 GSEL(GCODE_SEL, SEL_KPL));
2919 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2920 GSEL(GCODE_SEL, SEL_KPL));
2921 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2922 GSEL(GCODE_SEL, SEL_KPL));
2923 #ifdef KDTRACE_HOOKS
2924 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
2925 GSEL(GCODE_SEL, SEL_KPL));
2926 #endif
2927
2928 r_idt.rd_limit = sizeof(idt0) - 1;
2929 r_idt.rd_base = (int) idt;
2930 lidt(&r_idt);
2931
2932 #ifdef XBOX
2933 /*
2934 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2935 * This should be 0x10de / 0x02a5.
2936 *
2937 * This is exactly what Linux does.
2938 */
2939 outl(0xcf8, 0x80000000);
2940 if (inl(0xcfc) == 0x02a510de) {
2941 arch_i386_is_xbox = 1;
2942 pic16l_setled(XBOX_LED_GREEN);
2943
2944 /*
2945 * We are an XBOX, but we may have either 64MB or 128MB of
2946 * memory. The PCI host bridge should be programmed for this,
2947 * so we just query it.
2948 */
2949 outl(0xcf8, 0x80000084);
2950 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2951 }
2952 #endif /* XBOX */
2953
2954 /*
2955 * Initialize the i8254 before the console so that console
2956 * initialization can use DELAY().
2957 */
2958 i8254_init();
2959
2960 /*
2961 * Initialize the console before we print anything out.
2962 */
2963 cninit();
2964
2965 if (metadata_missing)
2966 printf("WARNING: loader(8) metadata is missing!\n");
2967
2968 #ifdef DEV_ISA
2969 #ifdef DEV_ATPIC
2970 elcr_probe();
2971 atpic_startup();
2972 #else
2973 /* Reset and mask the atpics and leave them shut down. */
2974 atpic_reset();
2975
2976 /*
2977 * Point the ICU spurious interrupt vectors at the APIC spurious
2978 * interrupt handler.
2979 */
2980 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2981 GSEL(GCODE_SEL, SEL_KPL));
2982 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2983 GSEL(GCODE_SEL, SEL_KPL));
2984 #endif
2985 #endif
2986
2987 #ifdef DDB
2988 ksym_start = bootinfo.bi_symtab;
2989 ksym_end = bootinfo.bi_esymtab;
2990 #endif
2991
2992 kdb_init();
2993
2994 #ifdef KDB
2995 if (boothowto & RB_KDB)
2996 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2997 #endif
2998
2999 finishidentcpu(); /* Final stage of CPU initialization */
3000 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
3001 GSEL(GCODE_SEL, SEL_KPL));
3002 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
3003 GSEL(GCODE_SEL, SEL_KPL));
3004 initializecpu(); /* Initialize CPU registers */
3005
3006 /* make an initial tss so cpu can get interrupt stack on syscall! */
3007 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
3008 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
3009 kstack0_sz - sizeof(struct pcb) - 16);
3010 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
3011 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
3012 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
3013 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
3014 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
3015 ltr(gsel_tss);
3016
3017 /* pointer to selector slot for %fs/%gs */
3018 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
3019
3020 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
3021 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
3022 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
3023 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
3024 #ifdef PAE
3025 dblfault_tss.tss_cr3 = (int)IdlePDPT;
3026 #else
3027 dblfault_tss.tss_cr3 = (int)IdlePTD;
3028 #endif
3029 dblfault_tss.tss_eip = (int)dblfault_handler;
3030 dblfault_tss.tss_eflags = PSL_KERNEL;
3031 dblfault_tss.tss_ds = dblfault_tss.tss_es =
3032 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
3033 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
3034 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
3035 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
3036
3037 vm86_initialize();
3038 getmemsize(first);
3039 init_param2(physmem);
3040
3041 /* now running on new page tables, configured,and u/iom is accessible */
3042
3043 msgbufinit(msgbufp, msgbufsize);
3044
3045 /* make a call gate to reenter kernel with */
3046 gdp = &ldt[LSYS5CALLS_SEL].gd;
3047
3048 x = (int) &IDTVEC(lcall_syscall);
3049 gdp->gd_looffset = x;
3050 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
3051 gdp->gd_stkcpy = 1;
3052 gdp->gd_type = SDT_SYS386CGT;
3053 gdp->gd_dpl = SEL_UPL;
3054 gdp->gd_p = 1;
3055 gdp->gd_hioffset = x >> 16;
3056
3057 /* XXX does this work? */
3058 /* XXX yes! */
3059 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
3060 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
3061
3062 /* transfer to user mode */
3063
3064 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
3065 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
3066
3067 /* setup proc 0's pcb */
3068 thread0.td_pcb->pcb_flags = 0;
3069 #ifdef PAE
3070 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
3071 #else
3072 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
3073 #endif
3074 thread0.td_pcb->pcb_ext = 0;
3075 thread0.td_frame = &proc0_tf;
3076
3077 cpu_probe_amdc1e();
3078 cpu_probe_cmpxchg8b();
3079 }
3080 #endif
3081
3082 void
3083 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
3084 {
3085
3086 pcpu->pc_acpi_id = 0xffffffff;
3087 }
3088
3089 void
3090 spinlock_enter(void)
3091 {
3092 struct thread *td;
3093 register_t flags;
3094
3095 td = curthread;
3096 if (td->td_md.md_spinlock_count == 0) {
3097 flags = intr_disable();
3098 td->td_md.md_spinlock_count = 1;
3099 td->td_md.md_saved_flags = flags;
3100 } else
3101 td->td_md.md_spinlock_count++;
3102 critical_enter();
3103 }
3104
3105 void
3106 spinlock_exit(void)
3107 {
3108 struct thread *td;
3109 register_t flags;
3110
3111 td = curthread;
3112 critical_exit();
3113 flags = td->td_md.md_saved_flags;
3114 td->td_md.md_spinlock_count--;
3115 if (td->td_md.md_spinlock_count == 0)
3116 intr_restore(flags);
3117 }
3118
3119 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
3120 static void f00f_hack(void *unused);
3121 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
3122
3123 static void
3124 f00f_hack(void *unused)
3125 {
3126 struct gate_descriptor *new_idt;
3127 vm_offset_t tmp;
3128
3129 if (!has_f00f_bug)
3130 return;
3131
3132 GIANT_REQUIRED;
3133
3134 printf("Intel Pentium detected, installing workaround for F00F bug\n");
3135
3136 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
3137 if (tmp == 0)
3138 panic("kmem_alloc returned 0");
3139
3140 /* Put the problematic entry (#6) at the end of the lower page. */
3141 new_idt = (struct gate_descriptor*)
3142 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
3143 bcopy(idt, new_idt, sizeof(idt0));
3144 r_idt.rd_base = (u_int)new_idt;
3145 lidt(&r_idt);
3146 idt = new_idt;
3147 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
3148 VM_PROT_READ, FALSE) != KERN_SUCCESS)
3149 panic("vm_map_protect failed");
3150 }
3151 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
3152
3153 /*
3154 * Construct a PCB from a trapframe. This is called from kdb_trap() where
3155 * we want to start a backtrace from the function that caused us to enter
3156 * the debugger. We have the context in the trapframe, but base the trace
3157 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
3158 * enough for a backtrace.
3159 */
3160 void
3161 makectx(struct trapframe *tf, struct pcb *pcb)
3162 {
3163
3164 pcb->pcb_edi = tf->tf_edi;
3165 pcb->pcb_esi = tf->tf_esi;
3166 pcb->pcb_ebp = tf->tf_ebp;
3167 pcb->pcb_ebx = tf->tf_ebx;
3168 pcb->pcb_eip = tf->tf_eip;
3169 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
3170 }
3171
3172 int
3173 ptrace_set_pc(struct thread *td, u_long addr)
3174 {
3175
3176 td->td_frame->tf_eip = addr;
3177 return (0);
3178 }
3179
3180 int
3181 ptrace_single_step(struct thread *td)
3182 {
3183 td->td_frame->tf_eflags |= PSL_T;
3184 return (0);
3185 }
3186
3187 int
3188 ptrace_clear_single_step(struct thread *td)
3189 {
3190 td->td_frame->tf_eflags &= ~PSL_T;
3191 return (0);
3192 }
3193
3194 int
3195 fill_regs(struct thread *td, struct reg *regs)
3196 {
3197 struct pcb *pcb;
3198 struct trapframe *tp;
3199
3200 tp = td->td_frame;
3201 pcb = td->td_pcb;
3202 regs->r_gs = pcb->pcb_gs;
3203 return (fill_frame_regs(tp, regs));
3204 }
3205
3206 int
3207 fill_frame_regs(struct trapframe *tp, struct reg *regs)
3208 {
3209 regs->r_fs = tp->tf_fs;
3210 regs->r_es = tp->tf_es;
3211 regs->r_ds = tp->tf_ds;
3212 regs->r_edi = tp->tf_edi;
3213 regs->r_esi = tp->tf_esi;
3214 regs->r_ebp = tp->tf_ebp;
3215 regs->r_ebx = tp->tf_ebx;
3216 regs->r_edx = tp->tf_edx;
3217 regs->r_ecx = tp->tf_ecx;
3218 regs->r_eax = tp->tf_eax;
3219 regs->r_eip = tp->tf_eip;
3220 regs->r_cs = tp->tf_cs;
3221 regs->r_eflags = tp->tf_eflags;
3222 regs->r_esp = tp->tf_esp;
3223 regs->r_ss = tp->tf_ss;
3224 return (0);
3225 }
3226
3227 int
3228 set_regs(struct thread *td, struct reg *regs)
3229 {
3230 struct pcb *pcb;
3231 struct trapframe *tp;
3232
3233 tp = td->td_frame;
3234 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
3235 !CS_SECURE(regs->r_cs))
3236 return (EINVAL);
3237 pcb = td->td_pcb;
3238 tp->tf_fs = regs->r_fs;
3239 tp->tf_es = regs->r_es;
3240 tp->tf_ds = regs->r_ds;
3241 tp->tf_edi = regs->r_edi;
3242 tp->tf_esi = regs->r_esi;
3243 tp->tf_ebp = regs->r_ebp;
3244 tp->tf_ebx = regs->r_ebx;
3245 tp->tf_edx = regs->r_edx;
3246 tp->tf_ecx = regs->r_ecx;
3247 tp->tf_eax = regs->r_eax;
3248 tp->tf_eip = regs->r_eip;
3249 tp->tf_cs = regs->r_cs;
3250 tp->tf_eflags = regs->r_eflags;
3251 tp->tf_esp = regs->r_esp;
3252 tp->tf_ss = regs->r_ss;
3253 pcb->pcb_gs = regs->r_gs;
3254 return (0);
3255 }
3256
3257 #ifdef CPU_ENABLE_SSE
3258 static void
3259 fill_fpregs_xmm(sv_xmm, sv_87)
3260 struct savexmm *sv_xmm;
3261 struct save87 *sv_87;
3262 {
3263 register struct env87 *penv_87 = &sv_87->sv_env;
3264 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3265 int i;
3266
3267 bzero(sv_87, sizeof(*sv_87));
3268
3269 /* FPU control/status */
3270 penv_87->en_cw = penv_xmm->en_cw;
3271 penv_87->en_sw = penv_xmm->en_sw;
3272 penv_87->en_tw = penv_xmm->en_tw;
3273 penv_87->en_fip = penv_xmm->en_fip;
3274 penv_87->en_fcs = penv_xmm->en_fcs;
3275 penv_87->en_opcode = penv_xmm->en_opcode;
3276 penv_87->en_foo = penv_xmm->en_foo;
3277 penv_87->en_fos = penv_xmm->en_fos;
3278
3279 /* FPU registers */
3280 for (i = 0; i < 8; ++i)
3281 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
3282 }
3283
3284 static void
3285 set_fpregs_xmm(sv_87, sv_xmm)
3286 struct save87 *sv_87;
3287 struct savexmm *sv_xmm;
3288 {
3289 register struct env87 *penv_87 = &sv_87->sv_env;
3290 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3291 int i;
3292
3293 /* FPU control/status */
3294 penv_xmm->en_cw = penv_87->en_cw;
3295 penv_xmm->en_sw = penv_87->en_sw;
3296 penv_xmm->en_tw = penv_87->en_tw;
3297 penv_xmm->en_fip = penv_87->en_fip;
3298 penv_xmm->en_fcs = penv_87->en_fcs;
3299 penv_xmm->en_opcode = penv_87->en_opcode;
3300 penv_xmm->en_foo = penv_87->en_foo;
3301 penv_xmm->en_fos = penv_87->en_fos;
3302
3303 /* FPU registers */
3304 for (i = 0; i < 8; ++i)
3305 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
3306 }
3307 #endif /* CPU_ENABLE_SSE */
3308
3309 int
3310 fill_fpregs(struct thread *td, struct fpreg *fpregs)
3311 {
3312
3313 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
3314 P_SHOULDSTOP(td->td_proc),
3315 ("not suspended thread %p", td));
3316 #ifdef DEV_NPX
3317 npxgetregs(td);
3318 #else
3319 bzero(fpregs, sizeof(*fpregs));
3320 #endif
3321 #ifdef CPU_ENABLE_SSE
3322 if (cpu_fxsr)
3323 fill_fpregs_xmm(&td->td_pcb->pcb_user_save.sv_xmm,
3324 (struct save87 *)fpregs);
3325 else
3326 #endif /* CPU_ENABLE_SSE */
3327 bcopy(&td->td_pcb->pcb_user_save.sv_87, fpregs,
3328 sizeof(*fpregs));
3329 return (0);
3330 }
3331
3332 int
3333 set_fpregs(struct thread *td, struct fpreg *fpregs)
3334 {
3335
3336 #ifdef CPU_ENABLE_SSE
3337 if (cpu_fxsr)
3338 set_fpregs_xmm((struct save87 *)fpregs,
3339 &td->td_pcb->pcb_user_save.sv_xmm);
3340 else
3341 #endif /* CPU_ENABLE_SSE */
3342 bcopy(fpregs, &td->td_pcb->pcb_user_save.sv_87,
3343 sizeof(*fpregs));
3344 #ifdef DEV_NPX
3345 npxuserinited(td);
3346 #endif
3347 return (0);
3348 }
3349
3350 /*
3351 * Get machine context.
3352 */
3353 int
3354 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
3355 {
3356 struct trapframe *tp;
3357 struct segment_descriptor *sdp;
3358
3359 tp = td->td_frame;
3360
3361 PROC_LOCK(curthread->td_proc);
3362 mcp->mc_onstack = sigonstack(tp->tf_esp);
3363 PROC_UNLOCK(curthread->td_proc);
3364 mcp->mc_gs = td->td_pcb->pcb_gs;
3365 mcp->mc_fs = tp->tf_fs;
3366 mcp->mc_es = tp->tf_es;
3367 mcp->mc_ds = tp->tf_ds;
3368 mcp->mc_edi = tp->tf_edi;
3369 mcp->mc_esi = tp->tf_esi;
3370 mcp->mc_ebp = tp->tf_ebp;
3371 mcp->mc_isp = tp->tf_isp;
3372 mcp->mc_eflags = tp->tf_eflags;
3373 if (flags & GET_MC_CLEAR_RET) {
3374 mcp->mc_eax = 0;
3375 mcp->mc_edx = 0;
3376 mcp->mc_eflags &= ~PSL_C;
3377 } else {
3378 mcp->mc_eax = tp->tf_eax;
3379 mcp->mc_edx = tp->tf_edx;
3380 }
3381 mcp->mc_ebx = tp->tf_ebx;
3382 mcp->mc_ecx = tp->tf_ecx;
3383 mcp->mc_eip = tp->tf_eip;
3384 mcp->mc_cs = tp->tf_cs;
3385 mcp->mc_esp = tp->tf_esp;
3386 mcp->mc_ss = tp->tf_ss;
3387 mcp->mc_len = sizeof(*mcp);
3388 get_fpcontext(td, mcp);
3389 sdp = &td->td_pcb->pcb_fsd;
3390 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3391 sdp = &td->td_pcb->pcb_gsd;
3392 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3393 mcp->mc_flags = 0;
3394 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
3395 return (0);
3396 }
3397
3398 /*
3399 * Set machine context.
3400 *
3401 * However, we don't set any but the user modifiable flags, and we won't
3402 * touch the cs selector.
3403 */
3404 int
3405 set_mcontext(struct thread *td, const mcontext_t *mcp)
3406 {
3407 struct trapframe *tp;
3408 int eflags, ret;
3409
3410 tp = td->td_frame;
3411 if (mcp->mc_len != sizeof(*mcp))
3412 return (EINVAL);
3413 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
3414 (tp->tf_eflags & ~PSL_USERCHANGE);
3415 if ((ret = set_fpcontext(td, mcp)) == 0) {
3416 tp->tf_fs = mcp->mc_fs;
3417 tp->tf_es = mcp->mc_es;
3418 tp->tf_ds = mcp->mc_ds;
3419 tp->tf_edi = mcp->mc_edi;
3420 tp->tf_esi = mcp->mc_esi;
3421 tp->tf_ebp = mcp->mc_ebp;
3422 tp->tf_ebx = mcp->mc_ebx;
3423 tp->tf_edx = mcp->mc_edx;
3424 tp->tf_ecx = mcp->mc_ecx;
3425 tp->tf_eax = mcp->mc_eax;
3426 tp->tf_eip = mcp->mc_eip;
3427 tp->tf_eflags = eflags;
3428 tp->tf_esp = mcp->mc_esp;
3429 tp->tf_ss = mcp->mc_ss;
3430 td->td_pcb->pcb_gs = mcp->mc_gs;
3431 ret = 0;
3432 }
3433 return (ret);
3434 }
3435
3436 static void
3437 get_fpcontext(struct thread *td, mcontext_t *mcp)
3438 {
3439
3440 #ifndef DEV_NPX
3441 mcp->mc_fpformat = _MC_FPFMT_NODEV;
3442 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
3443 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
3444 #else
3445 mcp->mc_ownedfp = npxgetregs(td);
3446 bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
3447 sizeof(mcp->mc_fpstate));
3448 mcp->mc_fpformat = npxformat();
3449 #endif
3450 }
3451
3452 static int
3453 set_fpcontext(struct thread *td, const mcontext_t *mcp)
3454 {
3455
3456 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
3457 return (0);
3458 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
3459 mcp->mc_fpformat != _MC_FPFMT_XMM)
3460 return (EINVAL);
3461 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
3462 /* We don't care what state is left in the FPU or PCB. */
3463 fpstate_drop(td);
3464 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
3465 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
3466 #ifdef DEV_NPX
3467 #ifdef CPU_ENABLE_SSE
3468 if (cpu_fxsr)
3469 ((union savefpu *)&mcp->mc_fpstate)->sv_xmm.sv_env.
3470 en_mxcsr &= cpu_mxcsr_mask;
3471 #endif
3472 npxsetregs(td, (union savefpu *)&mcp->mc_fpstate);
3473 #endif
3474 } else
3475 return (EINVAL);
3476 return (0);
3477 }
3478
3479 static void
3480 fpstate_drop(struct thread *td)
3481 {
3482
3483 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
3484 critical_enter();
3485 #ifdef DEV_NPX
3486 if (PCPU_GET(fpcurthread) == td)
3487 npxdrop();
3488 #endif
3489 /*
3490 * XXX force a full drop of the npx. The above only drops it if we
3491 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
3492 *
3493 * XXX I don't much like npxgetregs()'s semantics of doing a full
3494 * drop. Dropping only to the pcb matches fnsave's behaviour.
3495 * We only need to drop to !PCB_INITDONE in sendsig(). But
3496 * sendsig() is the only caller of npxgetregs()... perhaps we just
3497 * have too many layers.
3498 */
3499 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
3500 PCB_NPXUSERINITDONE);
3501 critical_exit();
3502 }
3503
3504 int
3505 fill_dbregs(struct thread *td, struct dbreg *dbregs)
3506 {
3507 struct pcb *pcb;
3508
3509 if (td == NULL) {
3510 dbregs->dr[0] = rdr0();
3511 dbregs->dr[1] = rdr1();
3512 dbregs->dr[2] = rdr2();
3513 dbregs->dr[3] = rdr3();
3514 dbregs->dr[4] = rdr4();
3515 dbregs->dr[5] = rdr5();
3516 dbregs->dr[6] = rdr6();
3517 dbregs->dr[7] = rdr7();
3518 } else {
3519 pcb = td->td_pcb;
3520 dbregs->dr[0] = pcb->pcb_dr0;
3521 dbregs->dr[1] = pcb->pcb_dr1;
3522 dbregs->dr[2] = pcb->pcb_dr2;
3523 dbregs->dr[3] = pcb->pcb_dr3;
3524 dbregs->dr[4] = 0;
3525 dbregs->dr[5] = 0;
3526 dbregs->dr[6] = pcb->pcb_dr6;
3527 dbregs->dr[7] = pcb->pcb_dr7;
3528 }
3529 return (0);
3530 }
3531
3532 int
3533 set_dbregs(struct thread *td, struct dbreg *dbregs)
3534 {
3535 struct pcb *pcb;
3536 int i;
3537
3538 if (td == NULL) {
3539 load_dr0(dbregs->dr[0]);
3540 load_dr1(dbregs->dr[1]);
3541 load_dr2(dbregs->dr[2]);
3542 load_dr3(dbregs->dr[3]);
3543 load_dr4(dbregs->dr[4]);
3544 load_dr5(dbregs->dr[5]);
3545 load_dr6(dbregs->dr[6]);
3546 load_dr7(dbregs->dr[7]);
3547 } else {
3548 /*
3549 * Don't let an illegal value for dr7 get set. Specifically,
3550 * check for undefined settings. Setting these bit patterns
3551 * result in undefined behaviour and can lead to an unexpected
3552 * TRCTRAP.
3553 */
3554 for (i = 0; i < 4; i++) {
3555 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
3556 return (EINVAL);
3557 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
3558 return (EINVAL);
3559 }
3560
3561 pcb = td->td_pcb;
3562
3563 /*
3564 * Don't let a process set a breakpoint that is not within the
3565 * process's address space. If a process could do this, it
3566 * could halt the system by setting a breakpoint in the kernel
3567 * (if ddb was enabled). Thus, we need to check to make sure
3568 * that no breakpoints are being enabled for addresses outside
3569 * process's address space.
3570 *
3571 * XXX - what about when the watched area of the user's
3572 * address space is written into from within the kernel
3573 * ... wouldn't that still cause a breakpoint to be generated
3574 * from within kernel mode?
3575 */
3576
3577 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
3578 /* dr0 is enabled */
3579 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
3580 return (EINVAL);
3581 }
3582
3583 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
3584 /* dr1 is enabled */
3585 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
3586 return (EINVAL);
3587 }
3588
3589 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
3590 /* dr2 is enabled */
3591 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
3592 return (EINVAL);
3593 }
3594
3595 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
3596 /* dr3 is enabled */
3597 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
3598 return (EINVAL);
3599 }
3600
3601 pcb->pcb_dr0 = dbregs->dr[0];
3602 pcb->pcb_dr1 = dbregs->dr[1];
3603 pcb->pcb_dr2 = dbregs->dr[2];
3604 pcb->pcb_dr3 = dbregs->dr[3];
3605 pcb->pcb_dr6 = dbregs->dr[6];
3606 pcb->pcb_dr7 = dbregs->dr[7];
3607
3608 pcb->pcb_flags |= PCB_DBREGS;
3609 }
3610
3611 return (0);
3612 }
3613
3614 /*
3615 * Return > 0 if a hardware breakpoint has been hit, and the
3616 * breakpoint was in user space. Return 0, otherwise.
3617 */
3618 int
3619 user_dbreg_trap(void)
3620 {
3621 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
3622 u_int32_t bp; /* breakpoint bits extracted from dr6 */
3623 int nbp; /* number of breakpoints that triggered */
3624 caddr_t addr[4]; /* breakpoint addresses */
3625 int i;
3626
3627 dr7 = rdr7();
3628 if ((dr7 & 0x000000ff) == 0) {
3629 /*
3630 * all GE and LE bits in the dr7 register are zero,
3631 * thus the trap couldn't have been caused by the
3632 * hardware debug registers
3633 */
3634 return 0;
3635 }
3636
3637 nbp = 0;
3638 dr6 = rdr6();
3639 bp = dr6 & 0x0000000f;
3640
3641 if (!bp) {
3642 /*
3643 * None of the breakpoint bits are set meaning this
3644 * trap was not caused by any of the debug registers
3645 */
3646 return 0;
3647 }
3648
3649 /*
3650 * at least one of the breakpoints were hit, check to see
3651 * which ones and if any of them are user space addresses
3652 */
3653
3654 if (bp & 0x01) {
3655 addr[nbp++] = (caddr_t)rdr0();
3656 }
3657 if (bp & 0x02) {
3658 addr[nbp++] = (caddr_t)rdr1();
3659 }
3660 if (bp & 0x04) {
3661 addr[nbp++] = (caddr_t)rdr2();
3662 }
3663 if (bp & 0x08) {
3664 addr[nbp++] = (caddr_t)rdr3();
3665 }
3666
3667 for (i = 0; i < nbp; i++) {
3668 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
3669 /*
3670 * addr[i] is in user space
3671 */
3672 return nbp;
3673 }
3674 }
3675
3676 /*
3677 * None of the breakpoints are in user space.
3678 */
3679 return 0;
3680 }
3681
3682 #ifdef KDB
3683
3684 /*
3685 * Provide inb() and outb() as functions. They are normally only available as
3686 * inline functions, thus cannot be called from the debugger.
3687 */
3688
3689 /* silence compiler warnings */
3690 u_char inb_(u_short);
3691 void outb_(u_short, u_char);
3692
3693 u_char
3694 inb_(u_short port)
3695 {
3696 return inb(port);
3697 }
3698
3699 void
3700 outb_(u_short port, u_char data)
3701 {
3702 outb(port, data);
3703 }
3704
3705 #endif /* KDB */
Cache object: 1f5a8f41ab4bc1928fcdd1b647107e28
|