1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/8.4/sys/i386/i386/machdep.c 239881 2012-08-29 20:50:01Z jhb $");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_compat.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_ipx.h"
50 #include "opt_isa.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_npx.h"
54 #include "opt_perfmon.h"
55 #include "opt_xbox.h"
56 #include "opt_kdtrace.h"
57
58 #include <sys/param.h>
59 #include <sys/proc.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/buf.h>
63 #include <sys/bus.h>
64 #include <sys/callout.h>
65 #include <sys/cons.h>
66 #include <sys/cpu.h>
67 #include <sys/eventhandler.h>
68 #include <sys/exec.h>
69 #include <sys/imgact.h>
70 #include <sys/kdb.h>
71 #include <sys/kernel.h>
72 #include <sys/ktr.h>
73 #include <sys/linker.h>
74 #include <sys/lock.h>
75 #include <sys/malloc.h>
76 #include <sys/memrange.h>
77 #include <sys/msgbuf.h>
78 #include <sys/mutex.h>
79 #include <sys/pcpu.h>
80 #include <sys/ptrace.h>
81 #include <sys/reboot.h>
82 #include <sys/sched.h>
83 #include <sys/signalvar.h>
84 #include <sys/syscallsubr.h>
85 #include <sys/sysctl.h>
86 #include <sys/sysent.h>
87 #include <sys/sysproto.h>
88 #include <sys/ucontext.h>
89 #include <sys/vmmeter.h>
90
91 #include <vm/vm.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_param.h>
99
100 #ifdef DDB
101 #ifndef KDB
102 #error KDB must be enabled in order for DDB to work!
103 #endif
104 #include <ddb/ddb.h>
105 #include <ddb/db_sym.h>
106 #endif
107
108 #include <isa/rtc.h>
109
110 #include <net/netisr.h>
111
112 #include <machine/bootinfo.h>
113 #include <machine/clock.h>
114 #include <machine/cpu.h>
115 #include <machine/cputypes.h>
116 #include <machine/intr_machdep.h>
117 #include <machine/mca.h>
118 #include <machine/md_var.h>
119 #include <machine/metadata.h>
120 #include <machine/pc/bios.h>
121 #include <machine/pcb.h>
122 #include <machine/pcb_ext.h>
123 #include <machine/proc.h>
124 #include <machine/reg.h>
125 #include <machine/sigframe.h>
126 #include <machine/specialreg.h>
127 #include <machine/vm86.h>
128 #ifdef PERFMON
129 #include <machine/perfmon.h>
130 #endif
131 #ifdef SMP
132 #include <machine/smp.h>
133 #endif
134
135 #ifdef DEV_ISA
136 #include <i386/isa/icu.h>
137 #endif
138
139 #ifdef XBOX
140 #include <machine/xbox.h>
141
142 int arch_i386_is_xbox = 0;
143 uint32_t arch_i386_xbox_memsize = 0;
144 #endif
145
146 #ifdef XEN
147 /* XEN includes */
148 #include <machine/xen/xen-os.h>
149 #include <xen/hypervisor.h>
150 #include <machine/xen/xen-os.h>
151 #include <machine/xen/xenvar.h>
152 #include <machine/xen/xenfunc.h>
153 #include <xen/xen_intr.h>
154
155 void Xhypervisor_callback(void);
156 void failsafe_callback(void);
157
158 extern trap_info_t trap_table[];
159 struct proc_ldt default_proc_ldt;
160 extern int init_first;
161 int running_xen = 1;
162 extern unsigned long physfree;
163 #endif /* XEN */
164
165 /* Sanity check for __curthread() */
166 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
167
168 extern void init386(int first);
169 extern void dblfault_handler(void);
170
171 extern void printcpuinfo(void); /* XXX header file */
172 extern void finishidentcpu(void);
173 extern void panicifcpuunsupported(void);
174 extern void initializecpu(void);
175
176 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
177 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
178
179 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
180 #define CPU_ENABLE_SSE
181 #endif
182
183 static void cpu_startup(void *);
184 static void fpstate_drop(struct thread *td);
185 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
186 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
187 #ifdef CPU_ENABLE_SSE
188 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
189 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
190 #endif /* CPU_ENABLE_SSE */
191 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
192
193 #ifdef DDB
194 extern vm_offset_t ksym_start, ksym_end;
195 #endif
196
197 /* Intel ICH registers */
198 #define ICH_PMBASE 0x400
199 #define ICH_SMI_EN ICH_PMBASE + 0x30
200
201 int _udatasel, _ucodesel;
202 u_int basemem;
203
204 int cold = 1;
205
206 #ifdef COMPAT_43
207 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
208 #endif
209 #ifdef COMPAT_FREEBSD4
210 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
211 #endif
212
213 long Maxmem = 0;
214 long realmem = 0;
215
216 #ifdef PAE
217 FEATURE(pae, "Physical Address Extensions");
218 #endif
219
220 /*
221 * The number of PHYSMAP entries must be one less than the number of
222 * PHYSSEG entries because the PHYSMAP entry that spans the largest
223 * physical address that is accessible by ISA DMA is split into two
224 * PHYSSEG entries.
225 */
226 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
227
228 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
229 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
230
231 /* must be 2 less so 0 0 can signal end of chunks */
232 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
233 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
234
235 struct kva_md_info kmi;
236
237 static struct trapframe proc0_tf;
238 struct pcpu __pcpu[MAXCPU];
239
240 struct mtx icu_lock;
241
242 struct mem_range_softc mem_range_softc;
243
244 static void
245 cpu_startup(dummy)
246 void *dummy;
247 {
248 uintmax_t memsize;
249 char *sysenv;
250
251 /*
252 * On MacBooks, we need to disallow the legacy USB circuit to
253 * generate an SMI# because this can cause several problems,
254 * namely: incorrect CPU frequency detection and failure to
255 * start the APs.
256 * We do this by disabling a bit in the SMI_EN (SMI Control and
257 * Enable register) of the Intel ICH LPC Interface Bridge.
258 */
259 sysenv = getenv("smbios.system.product");
260 if (sysenv != NULL) {
261 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
262 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
263 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
264 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
265 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
266 strncmp(sysenv, "Macmini1,1", 10) == 0) {
267 if (bootverbose)
268 printf("Disabling LEGACY_USB_EN bit on "
269 "Intel ICH.\n");
270 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
271 }
272 freeenv(sysenv);
273 }
274
275 /*
276 * Good {morning,afternoon,evening,night}.
277 */
278 startrtclock();
279 printcpuinfo();
280 panicifcpuunsupported();
281 #ifdef PERFMON
282 perfmon_init();
283 #endif
284 realmem = Maxmem;
285
286 /*
287 * Display physical memory if SMBIOS reports reasonable amount.
288 */
289 memsize = 0;
290 sysenv = getenv("smbios.memory.enabled");
291 if (sysenv != NULL) {
292 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
293 freeenv(sysenv);
294 }
295 if (memsize < ptoa((uintmax_t)cnt.v_free_count))
296 memsize = ptoa((uintmax_t)Maxmem);
297 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
298
299 /*
300 * Display any holes after the first chunk of extended memory.
301 */
302 if (bootverbose) {
303 int indx;
304
305 printf("Physical memory chunk(s):\n");
306 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
307 vm_paddr_t size;
308
309 size = phys_avail[indx + 1] - phys_avail[indx];
310 printf(
311 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
312 (uintmax_t)phys_avail[indx],
313 (uintmax_t)phys_avail[indx + 1] - 1,
314 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
315 }
316 }
317
318 vm_ksubmap_init(&kmi);
319
320 printf("avail memory = %ju (%ju MB)\n",
321 ptoa((uintmax_t)cnt.v_free_count),
322 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
323
324 /*
325 * Set up buffers, so they can be used to read disk labels.
326 */
327 bufinit();
328 vm_pager_bufferinit();
329 #ifndef XEN
330 cpu_setregs();
331 #endif
332 }
333
334 /*
335 * Send an interrupt to process.
336 *
337 * Stack is set up to allow sigcode stored
338 * at top to call routine, followed by kcall
339 * to sigreturn routine below. After sigreturn
340 * resets the signal mask, the stack, and the
341 * frame pointer, it returns to the user
342 * specified pc, psl.
343 */
344 #ifdef COMPAT_43
345 static void
346 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
347 {
348 struct osigframe sf, *fp;
349 struct proc *p;
350 struct thread *td;
351 struct sigacts *psp;
352 struct trapframe *regs;
353 int sig;
354 int oonstack;
355
356 td = curthread;
357 p = td->td_proc;
358 PROC_LOCK_ASSERT(p, MA_OWNED);
359 sig = ksi->ksi_signo;
360 psp = p->p_sigacts;
361 mtx_assert(&psp->ps_mtx, MA_OWNED);
362 regs = td->td_frame;
363 oonstack = sigonstack(regs->tf_esp);
364
365 /* Allocate space for the signal handler context. */
366 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
367 SIGISMEMBER(psp->ps_sigonstack, sig)) {
368 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
369 td->td_sigstk.ss_size - sizeof(struct osigframe));
370 #if defined(COMPAT_43)
371 td->td_sigstk.ss_flags |= SS_ONSTACK;
372 #endif
373 } else
374 fp = (struct osigframe *)regs->tf_esp - 1;
375
376 /* Translate the signal if appropriate. */
377 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
378 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
379
380 /* Build the argument list for the signal handler. */
381 sf.sf_signum = sig;
382 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
383 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
384 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
385 /* Signal handler installed with SA_SIGINFO. */
386 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
387 sf.sf_siginfo.si_signo = sig;
388 sf.sf_siginfo.si_code = ksi->ksi_code;
389 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
390 sf.sf_addr = 0;
391 } else {
392 /* Old FreeBSD-style arguments. */
393 sf.sf_arg2 = ksi->ksi_code;
394 sf.sf_addr = (register_t)ksi->ksi_addr;
395 sf.sf_ahu.sf_handler = catcher;
396 }
397 mtx_unlock(&psp->ps_mtx);
398 PROC_UNLOCK(p);
399
400 /* Save most if not all of trap frame. */
401 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
402 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
403 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
404 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
405 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
406 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
407 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
408 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
409 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
410 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
411 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
412 sf.sf_siginfo.si_sc.sc_gs = rgs();
413 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
414
415 /* Build the signal context to be used by osigreturn(). */
416 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
417 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
418 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
419 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
420 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
421 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
422 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
423 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
424
425 /*
426 * If we're a vm86 process, we want to save the segment registers.
427 * We also change eflags to be our emulated eflags, not the actual
428 * eflags.
429 */
430 if (regs->tf_eflags & PSL_VM) {
431 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
432 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
433 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
434
435 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
436 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
437 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
438 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
439
440 if (vm86->vm86_has_vme == 0)
441 sf.sf_siginfo.si_sc.sc_ps =
442 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
443 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
444
445 /* See sendsig() for comments. */
446 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
447 }
448
449 /*
450 * Copy the sigframe out to the user's stack.
451 */
452 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
453 #ifdef DEBUG
454 printf("process %ld has trashed its stack\n", (long)p->p_pid);
455 #endif
456 PROC_LOCK(p);
457 sigexit(td, SIGILL);
458 }
459
460 regs->tf_esp = (int)fp;
461 regs->tf_eip = PS_STRINGS - szosigcode;
462 regs->tf_eflags &= ~(PSL_T | PSL_D);
463 regs->tf_cs = _ucodesel;
464 regs->tf_ds = _udatasel;
465 regs->tf_es = _udatasel;
466 regs->tf_fs = _udatasel;
467 load_gs(_udatasel);
468 regs->tf_ss = _udatasel;
469 PROC_LOCK(p);
470 mtx_lock(&psp->ps_mtx);
471 }
472 #endif /* COMPAT_43 */
473
474 #ifdef COMPAT_FREEBSD4
475 static void
476 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
477 {
478 struct sigframe4 sf, *sfp;
479 struct proc *p;
480 struct thread *td;
481 struct sigacts *psp;
482 struct trapframe *regs;
483 int sig;
484 int oonstack;
485
486 td = curthread;
487 p = td->td_proc;
488 PROC_LOCK_ASSERT(p, MA_OWNED);
489 sig = ksi->ksi_signo;
490 psp = p->p_sigacts;
491 mtx_assert(&psp->ps_mtx, MA_OWNED);
492 regs = td->td_frame;
493 oonstack = sigonstack(regs->tf_esp);
494
495 /* Save user context. */
496 bzero(&sf, sizeof(sf));
497 sf.sf_uc.uc_sigmask = *mask;
498 sf.sf_uc.uc_stack = td->td_sigstk;
499 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
500 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
501 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
502 sf.sf_uc.uc_mcontext.mc_gs = rgs();
503 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
504 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
505 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
506 bzero(sf.sf_uc.uc_mcontext.__spare__,
507 sizeof(sf.sf_uc.uc_mcontext.__spare__));
508 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
509
510 /* Allocate space for the signal handler context. */
511 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
512 SIGISMEMBER(psp->ps_sigonstack, sig)) {
513 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
514 td->td_sigstk.ss_size - sizeof(struct sigframe4));
515 #if defined(COMPAT_43)
516 td->td_sigstk.ss_flags |= SS_ONSTACK;
517 #endif
518 } else
519 sfp = (struct sigframe4 *)regs->tf_esp - 1;
520
521 /* Translate the signal if appropriate. */
522 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
523 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
524
525 /* Build the argument list for the signal handler. */
526 sf.sf_signum = sig;
527 sf.sf_ucontext = (register_t)&sfp->sf_uc;
528 bzero(&sf.sf_si, sizeof(sf.sf_si));
529 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
530 /* Signal handler installed with SA_SIGINFO. */
531 sf.sf_siginfo = (register_t)&sfp->sf_si;
532 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
533
534 /* Fill in POSIX parts */
535 sf.sf_si.si_signo = sig;
536 sf.sf_si.si_code = ksi->ksi_code;
537 sf.sf_si.si_addr = ksi->ksi_addr;
538 } else {
539 /* Old FreeBSD-style arguments. */
540 sf.sf_siginfo = ksi->ksi_code;
541 sf.sf_addr = (register_t)ksi->ksi_addr;
542 sf.sf_ahu.sf_handler = catcher;
543 }
544 mtx_unlock(&psp->ps_mtx);
545 PROC_UNLOCK(p);
546
547 /*
548 * If we're a vm86 process, we want to save the segment registers.
549 * We also change eflags to be our emulated eflags, not the actual
550 * eflags.
551 */
552 if (regs->tf_eflags & PSL_VM) {
553 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
554 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
555
556 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
557 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
558 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
559 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
560
561 if (vm86->vm86_has_vme == 0)
562 sf.sf_uc.uc_mcontext.mc_eflags =
563 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
564 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
565
566 /*
567 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
568 * syscalls made by the signal handler. This just avoids
569 * wasting time for our lazy fixup of such faults. PSL_NT
570 * does nothing in vm86 mode, but vm86 programs can set it
571 * almost legitimately in probes for old cpu types.
572 */
573 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
574 }
575
576 /*
577 * Copy the sigframe out to the user's stack.
578 */
579 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
580 #ifdef DEBUG
581 printf("process %ld has trashed its stack\n", (long)p->p_pid);
582 #endif
583 PROC_LOCK(p);
584 sigexit(td, SIGILL);
585 }
586
587 regs->tf_esp = (int)sfp;
588 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
589 regs->tf_eflags &= ~(PSL_T | PSL_D);
590 regs->tf_cs = _ucodesel;
591 regs->tf_ds = _udatasel;
592 regs->tf_es = _udatasel;
593 regs->tf_fs = _udatasel;
594 regs->tf_ss = _udatasel;
595 PROC_LOCK(p);
596 mtx_lock(&psp->ps_mtx);
597 }
598 #endif /* COMPAT_FREEBSD4 */
599
600 void
601 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
602 {
603 struct sigframe sf, *sfp;
604 struct proc *p;
605 struct thread *td;
606 struct sigacts *psp;
607 char *sp;
608 struct trapframe *regs;
609 struct segment_descriptor *sdp;
610 int sig;
611 int oonstack;
612
613 td = curthread;
614 p = td->td_proc;
615 PROC_LOCK_ASSERT(p, MA_OWNED);
616 sig = ksi->ksi_signo;
617 psp = p->p_sigacts;
618 mtx_assert(&psp->ps_mtx, MA_OWNED);
619 #ifdef COMPAT_FREEBSD4
620 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
621 freebsd4_sendsig(catcher, ksi, mask);
622 return;
623 }
624 #endif
625 #ifdef COMPAT_43
626 if (SIGISMEMBER(psp->ps_osigset, sig)) {
627 osendsig(catcher, ksi, mask);
628 return;
629 }
630 #endif
631 regs = td->td_frame;
632 oonstack = sigonstack(regs->tf_esp);
633
634 /* Save user context. */
635 bzero(&sf, sizeof(sf));
636 sf.sf_uc.uc_sigmask = *mask;
637 sf.sf_uc.uc_stack = td->td_sigstk;
638 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
639 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
640 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
641 sf.sf_uc.uc_mcontext.mc_gs = rgs();
642 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
643 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
644 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
645 fpstate_drop(td);
646 /*
647 * Unconditionally fill the fsbase and gsbase into the mcontext.
648 */
649 sdp = &td->td_pcb->pcb_fsd;
650 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
651 sdp->sd_lobase;
652 sdp = &td->td_pcb->pcb_gsd;
653 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
654 sdp->sd_lobase;
655 sf.sf_uc.uc_mcontext.mc_flags = 0;
656 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
657 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
658 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
659
660 /* Allocate space for the signal handler context. */
661 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
662 SIGISMEMBER(psp->ps_sigonstack, sig)) {
663 sp = td->td_sigstk.ss_sp +
664 td->td_sigstk.ss_size - sizeof(struct sigframe);
665 #if defined(COMPAT_43)
666 td->td_sigstk.ss_flags |= SS_ONSTACK;
667 #endif
668 } else
669 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
670 /* Align to 16 bytes. */
671 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
672
673 /* Translate the signal if appropriate. */
674 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
675 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
676
677 /* Build the argument list for the signal handler. */
678 sf.sf_signum = sig;
679 sf.sf_ucontext = (register_t)&sfp->sf_uc;
680 bzero(&sf.sf_si, sizeof(sf.sf_si));
681 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
682 /* Signal handler installed with SA_SIGINFO. */
683 sf.sf_siginfo = (register_t)&sfp->sf_si;
684 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
685
686 /* Fill in POSIX parts */
687 sf.sf_si = ksi->ksi_info;
688 sf.sf_si.si_signo = sig; /* maybe a translated signal */
689 } else {
690 /* Old FreeBSD-style arguments. */
691 sf.sf_siginfo = ksi->ksi_code;
692 sf.sf_addr = (register_t)ksi->ksi_addr;
693 sf.sf_ahu.sf_handler = catcher;
694 }
695 mtx_unlock(&psp->ps_mtx);
696 PROC_UNLOCK(p);
697
698 /*
699 * If we're a vm86 process, we want to save the segment registers.
700 * We also change eflags to be our emulated eflags, not the actual
701 * eflags.
702 */
703 if (regs->tf_eflags & PSL_VM) {
704 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
705 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
706
707 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
708 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
709 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
710 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
711
712 if (vm86->vm86_has_vme == 0)
713 sf.sf_uc.uc_mcontext.mc_eflags =
714 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
715 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
716
717 /*
718 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
719 * syscalls made by the signal handler. This just avoids
720 * wasting time for our lazy fixup of such faults. PSL_NT
721 * does nothing in vm86 mode, but vm86 programs can set it
722 * almost legitimately in probes for old cpu types.
723 */
724 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
725 }
726
727 /*
728 * Copy the sigframe out to the user's stack.
729 */
730 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
731 #ifdef DEBUG
732 printf("process %ld has trashed its stack\n", (long)p->p_pid);
733 #endif
734 PROC_LOCK(p);
735 sigexit(td, SIGILL);
736 }
737
738 regs->tf_esp = (int)sfp;
739 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
740 regs->tf_eflags &= ~(PSL_T | PSL_D);
741 regs->tf_cs = _ucodesel;
742 regs->tf_ds = _udatasel;
743 regs->tf_es = _udatasel;
744 regs->tf_fs = _udatasel;
745 regs->tf_ss = _udatasel;
746 PROC_LOCK(p);
747 mtx_lock(&psp->ps_mtx);
748 }
749
750 /*
751 * System call to cleanup state after a signal
752 * has been taken. Reset signal mask and
753 * stack state from context left by sendsig (above).
754 * Return to previous pc and psl as specified by
755 * context left by sendsig. Check carefully to
756 * make sure that the user has not modified the
757 * state to gain improper privileges.
758 *
759 * MPSAFE
760 */
761 #ifdef COMPAT_43
762 int
763 osigreturn(td, uap)
764 struct thread *td;
765 struct osigreturn_args /* {
766 struct osigcontext *sigcntxp;
767 } */ *uap;
768 {
769 struct osigcontext sc;
770 struct trapframe *regs;
771 struct osigcontext *scp;
772 int eflags, error;
773 ksiginfo_t ksi;
774
775 regs = td->td_frame;
776 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
777 if (error != 0)
778 return (error);
779 scp = ≻
780 eflags = scp->sc_ps;
781 if (eflags & PSL_VM) {
782 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
783 struct vm86_kernel *vm86;
784
785 /*
786 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
787 * set up the vm86 area, and we can't enter vm86 mode.
788 */
789 if (td->td_pcb->pcb_ext == 0)
790 return (EINVAL);
791 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
792 if (vm86->vm86_inited == 0)
793 return (EINVAL);
794
795 /* Go back to user mode if both flags are set. */
796 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
797 ksiginfo_init_trap(&ksi);
798 ksi.ksi_signo = SIGBUS;
799 ksi.ksi_code = BUS_OBJERR;
800 ksi.ksi_addr = (void *)regs->tf_eip;
801 trapsignal(td, &ksi);
802 }
803
804 if (vm86->vm86_has_vme) {
805 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
806 (eflags & VME_USERCHANGE) | PSL_VM;
807 } else {
808 vm86->vm86_eflags = eflags; /* save VIF, VIP */
809 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
810 (eflags & VM_USERCHANGE) | PSL_VM;
811 }
812 tf->tf_vm86_ds = scp->sc_ds;
813 tf->tf_vm86_es = scp->sc_es;
814 tf->tf_vm86_fs = scp->sc_fs;
815 tf->tf_vm86_gs = scp->sc_gs;
816 tf->tf_ds = _udatasel;
817 tf->tf_es = _udatasel;
818 tf->tf_fs = _udatasel;
819 } else {
820 /*
821 * Don't allow users to change privileged or reserved flags.
822 */
823 /*
824 * XXX do allow users to change the privileged flag PSL_RF.
825 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
826 * should sometimes set it there too. tf_eflags is kept in
827 * the signal context during signal handling and there is no
828 * other place to remember it, so the PSL_RF bit may be
829 * corrupted by the signal handler without us knowing.
830 * Corruption of the PSL_RF bit at worst causes one more or
831 * one less debugger trap, so allowing it is fairly harmless.
832 */
833 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
834 return (EINVAL);
835 }
836
837 /*
838 * Don't allow users to load a valid privileged %cs. Let the
839 * hardware check for invalid selectors, excess privilege in
840 * other selectors, invalid %eip's and invalid %esp's.
841 */
842 if (!CS_SECURE(scp->sc_cs)) {
843 ksiginfo_init_trap(&ksi);
844 ksi.ksi_signo = SIGBUS;
845 ksi.ksi_code = BUS_OBJERR;
846 ksi.ksi_trapno = T_PROTFLT;
847 ksi.ksi_addr = (void *)regs->tf_eip;
848 trapsignal(td, &ksi);
849 return (EINVAL);
850 }
851 regs->tf_ds = scp->sc_ds;
852 regs->tf_es = scp->sc_es;
853 regs->tf_fs = scp->sc_fs;
854 }
855
856 /* Restore remaining registers. */
857 regs->tf_eax = scp->sc_eax;
858 regs->tf_ebx = scp->sc_ebx;
859 regs->tf_ecx = scp->sc_ecx;
860 regs->tf_edx = scp->sc_edx;
861 regs->tf_esi = scp->sc_esi;
862 regs->tf_edi = scp->sc_edi;
863 regs->tf_cs = scp->sc_cs;
864 regs->tf_ss = scp->sc_ss;
865 regs->tf_isp = scp->sc_isp;
866 regs->tf_ebp = scp->sc_fp;
867 regs->tf_esp = scp->sc_sp;
868 regs->tf_eip = scp->sc_pc;
869 regs->tf_eflags = eflags;
870
871 #if defined(COMPAT_43)
872 if (scp->sc_onstack & 1)
873 td->td_sigstk.ss_flags |= SS_ONSTACK;
874 else
875 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
876 #endif
877 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
878 SIGPROCMASK_OLD);
879 return (EJUSTRETURN);
880 }
881 #endif /* COMPAT_43 */
882
883 #ifdef COMPAT_FREEBSD4
884 /*
885 * MPSAFE
886 */
887 int
888 freebsd4_sigreturn(td, uap)
889 struct thread *td;
890 struct freebsd4_sigreturn_args /* {
891 const ucontext4 *sigcntxp;
892 } */ *uap;
893 {
894 struct ucontext4 uc;
895 struct trapframe *regs;
896 struct ucontext4 *ucp;
897 int cs, eflags, error;
898 ksiginfo_t ksi;
899
900 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
901 if (error != 0)
902 return (error);
903 ucp = &uc;
904 regs = td->td_frame;
905 eflags = ucp->uc_mcontext.mc_eflags;
906 if (eflags & PSL_VM) {
907 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
908 struct vm86_kernel *vm86;
909
910 /*
911 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
912 * set up the vm86 area, and we can't enter vm86 mode.
913 */
914 if (td->td_pcb->pcb_ext == 0)
915 return (EINVAL);
916 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
917 if (vm86->vm86_inited == 0)
918 return (EINVAL);
919
920 /* Go back to user mode if both flags are set. */
921 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
922 ksiginfo_init_trap(&ksi);
923 ksi.ksi_signo = SIGBUS;
924 ksi.ksi_code = BUS_OBJERR;
925 ksi.ksi_addr = (void *)regs->tf_eip;
926 trapsignal(td, &ksi);
927 }
928 if (vm86->vm86_has_vme) {
929 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
930 (eflags & VME_USERCHANGE) | PSL_VM;
931 } else {
932 vm86->vm86_eflags = eflags; /* save VIF, VIP */
933 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
934 (eflags & VM_USERCHANGE) | PSL_VM;
935 }
936 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
937 tf->tf_eflags = eflags;
938 tf->tf_vm86_ds = tf->tf_ds;
939 tf->tf_vm86_es = tf->tf_es;
940 tf->tf_vm86_fs = tf->tf_fs;
941 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
942 tf->tf_ds = _udatasel;
943 tf->tf_es = _udatasel;
944 tf->tf_fs = _udatasel;
945 } else {
946 /*
947 * Don't allow users to change privileged or reserved flags.
948 */
949 /*
950 * XXX do allow users to change the privileged flag PSL_RF.
951 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
952 * should sometimes set it there too. tf_eflags is kept in
953 * the signal context during signal handling and there is no
954 * other place to remember it, so the PSL_RF bit may be
955 * corrupted by the signal handler without us knowing.
956 * Corruption of the PSL_RF bit at worst causes one more or
957 * one less debugger trap, so allowing it is fairly harmless.
958 */
959 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
960 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
961 td->td_proc->p_pid, td->td_name, eflags);
962 return (EINVAL);
963 }
964
965 /*
966 * Don't allow users to load a valid privileged %cs. Let the
967 * hardware check for invalid selectors, excess privilege in
968 * other selectors, invalid %eip's and invalid %esp's.
969 */
970 cs = ucp->uc_mcontext.mc_cs;
971 if (!CS_SECURE(cs)) {
972 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
973 td->td_proc->p_pid, td->td_name, cs);
974 ksiginfo_init_trap(&ksi);
975 ksi.ksi_signo = SIGBUS;
976 ksi.ksi_code = BUS_OBJERR;
977 ksi.ksi_trapno = T_PROTFLT;
978 ksi.ksi_addr = (void *)regs->tf_eip;
979 trapsignal(td, &ksi);
980 return (EINVAL);
981 }
982
983 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
984 }
985
986 #if defined(COMPAT_43)
987 if (ucp->uc_mcontext.mc_onstack & 1)
988 td->td_sigstk.ss_flags |= SS_ONSTACK;
989 else
990 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
991 #endif
992 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
993 return (EJUSTRETURN);
994 }
995 #endif /* COMPAT_FREEBSD4 */
996
997 /*
998 * MPSAFE
999 */
1000 int
1001 sigreturn(td, uap)
1002 struct thread *td;
1003 struct sigreturn_args /* {
1004 const struct __ucontext *sigcntxp;
1005 } */ *uap;
1006 {
1007 ucontext_t uc;
1008 struct trapframe *regs;
1009 ucontext_t *ucp;
1010 int cs, eflags, error, ret;
1011 ksiginfo_t ksi;
1012
1013 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
1014 if (error != 0)
1015 return (error);
1016 ucp = &uc;
1017 regs = td->td_frame;
1018 eflags = ucp->uc_mcontext.mc_eflags;
1019 if (eflags & PSL_VM) {
1020 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
1021 struct vm86_kernel *vm86;
1022
1023 /*
1024 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
1025 * set up the vm86 area, and we can't enter vm86 mode.
1026 */
1027 if (td->td_pcb->pcb_ext == 0)
1028 return (EINVAL);
1029 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
1030 if (vm86->vm86_inited == 0)
1031 return (EINVAL);
1032
1033 /* Go back to user mode if both flags are set. */
1034 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
1035 ksiginfo_init_trap(&ksi);
1036 ksi.ksi_signo = SIGBUS;
1037 ksi.ksi_code = BUS_OBJERR;
1038 ksi.ksi_addr = (void *)regs->tf_eip;
1039 trapsignal(td, &ksi);
1040 }
1041
1042 if (vm86->vm86_has_vme) {
1043 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
1044 (eflags & VME_USERCHANGE) | PSL_VM;
1045 } else {
1046 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1047 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1048 (eflags & VM_USERCHANGE) | PSL_VM;
1049 }
1050 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1051 tf->tf_eflags = eflags;
1052 tf->tf_vm86_ds = tf->tf_ds;
1053 tf->tf_vm86_es = tf->tf_es;
1054 tf->tf_vm86_fs = tf->tf_fs;
1055 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1056 tf->tf_ds = _udatasel;
1057 tf->tf_es = _udatasel;
1058 tf->tf_fs = _udatasel;
1059 } else {
1060 /*
1061 * Don't allow users to change privileged or reserved flags.
1062 */
1063 /*
1064 * XXX do allow users to change the privileged flag PSL_RF.
1065 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1066 * should sometimes set it there too. tf_eflags is kept in
1067 * the signal context during signal handling and there is no
1068 * other place to remember it, so the PSL_RF bit may be
1069 * corrupted by the signal handler without us knowing.
1070 * Corruption of the PSL_RF bit at worst causes one more or
1071 * one less debugger trap, so allowing it is fairly harmless.
1072 */
1073 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1074 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1075 td->td_proc->p_pid, td->td_name, eflags);
1076 return (EINVAL);
1077 }
1078
1079 /*
1080 * Don't allow users to load a valid privileged %cs. Let the
1081 * hardware check for invalid selectors, excess privilege in
1082 * other selectors, invalid %eip's and invalid %esp's.
1083 */
1084 cs = ucp->uc_mcontext.mc_cs;
1085 if (!CS_SECURE(cs)) {
1086 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1087 td->td_proc->p_pid, td->td_name, cs);
1088 ksiginfo_init_trap(&ksi);
1089 ksi.ksi_signo = SIGBUS;
1090 ksi.ksi_code = BUS_OBJERR;
1091 ksi.ksi_trapno = T_PROTFLT;
1092 ksi.ksi_addr = (void *)regs->tf_eip;
1093 trapsignal(td, &ksi);
1094 return (EINVAL);
1095 }
1096
1097 ret = set_fpcontext(td, &ucp->uc_mcontext);
1098 if (ret != 0)
1099 return (ret);
1100 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1101 }
1102
1103 #if defined(COMPAT_43)
1104 if (ucp->uc_mcontext.mc_onstack & 1)
1105 td->td_sigstk.ss_flags |= SS_ONSTACK;
1106 else
1107 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1108 #endif
1109
1110 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1111 return (EJUSTRETURN);
1112 }
1113
1114 /*
1115 * Machine dependent boot() routine
1116 *
1117 * I haven't seen anything to put here yet
1118 * Possibly some stuff might be grafted back here from boot()
1119 */
1120 void
1121 cpu_boot(int howto)
1122 {
1123 }
1124
1125 /*
1126 * Flush the D-cache for non-DMA I/O so that the I-cache can
1127 * be made coherent later.
1128 */
1129 void
1130 cpu_flush_dcache(void *ptr, size_t len)
1131 {
1132 /* Not applicable */
1133 }
1134
1135 /* Get current clock frequency for the given cpu id. */
1136 int
1137 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1138 {
1139 register_t reg;
1140 uint64_t tsc1, tsc2;
1141
1142 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1143 return (EINVAL);
1144 if (!tsc_present)
1145 return (EOPNOTSUPP);
1146
1147 /* If we're booting, trust the rate calibrated moments ago. */
1148 if (cold) {
1149 *rate = tsc_freq;
1150 return (0);
1151 }
1152
1153 #ifdef SMP
1154 /* Schedule ourselves on the indicated cpu. */
1155 thread_lock(curthread);
1156 sched_bind(curthread, cpu_id);
1157 thread_unlock(curthread);
1158 #endif
1159
1160 /* Calibrate by measuring a short delay. */
1161 reg = intr_disable();
1162 tsc1 = rdtsc();
1163 DELAY(1000);
1164 tsc2 = rdtsc();
1165 intr_restore(reg);
1166
1167 #ifdef SMP
1168 thread_lock(curthread);
1169 sched_unbind(curthread);
1170 thread_unlock(curthread);
1171 #endif
1172
1173 /*
1174 * Calculate the difference in readings, convert to Mhz, and
1175 * subtract 0.5% of the total. Empirical testing has shown that
1176 * overhead in DELAY() works out to approximately this value.
1177 */
1178 tsc2 -= tsc1;
1179 *rate = tsc2 * 1000 - tsc2 * 5;
1180 return (0);
1181 }
1182
1183 #ifdef XEN
1184
1185 void
1186 cpu_halt(void)
1187 {
1188 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
1189 }
1190
1191 int scheduler_running;
1192
1193 static void
1194 cpu_idle_hlt(int busy)
1195 {
1196
1197 scheduler_running = 1;
1198 enable_intr();
1199 idle_block();
1200 }
1201
1202 #else
1203 /*
1204 * Shutdown the CPU as much as possible
1205 */
1206 void
1207 cpu_halt(void)
1208 {
1209 for (;;)
1210 halt();
1211 }
1212
1213 #endif
1214
1215 void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
1216 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
1217 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
1218 TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
1219 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
1220 0, "Use MONITOR/MWAIT for short idle");
1221
1222 #define STATE_RUNNING 0x0
1223 #define STATE_MWAIT 0x1
1224 #define STATE_SLEEPING 0x2
1225
1226 static void
1227 cpu_idle_acpi(int busy)
1228 {
1229 int *state;
1230
1231 state = (int *)PCPU_PTR(monitorbuf);
1232 *state = STATE_SLEEPING;
1233
1234 /* See comments in cpu_idle_hlt(). */
1235 disable_intr();
1236 if (sched_runnable())
1237 enable_intr();
1238 else if (cpu_idle_hook)
1239 cpu_idle_hook();
1240 else
1241 __asm __volatile("sti; hlt");
1242 *state = STATE_RUNNING;
1243 }
1244
1245 #ifndef XEN
1246 static void
1247 cpu_idle_hlt(int busy)
1248 {
1249 int *state;
1250
1251 state = (int *)PCPU_PTR(monitorbuf);
1252 *state = STATE_SLEEPING;
1253
1254 /*
1255 * Since we may be in a critical section from cpu_idle(), if
1256 * an interrupt fires during that critical section we may have
1257 * a pending preemption. If the CPU halts, then that thread
1258 * may not execute until a later interrupt awakens the CPU.
1259 * To handle this race, check for a runnable thread after
1260 * disabling interrupts and immediately return if one is
1261 * found. Also, we must absolutely guarentee that hlt is
1262 * the next instruction after sti. This ensures that any
1263 * interrupt that fires after the call to disable_intr() will
1264 * immediately awaken the CPU from hlt. Finally, please note
1265 * that on x86 this works fine because of interrupts enabled only
1266 * after the instruction following sti takes place, while IF is set
1267 * to 1 immediately, allowing hlt instruction to acknowledge the
1268 * interrupt.
1269 */
1270 disable_intr();
1271 if (sched_runnable())
1272 enable_intr();
1273 else
1274 __asm __volatile("sti; hlt");
1275 *state = STATE_RUNNING;
1276 }
1277 #endif
1278
1279 /*
1280 * MWAIT cpu power states. Lower 4 bits are sub-states.
1281 */
1282 #define MWAIT_C0 0xf0
1283 #define MWAIT_C1 0x00
1284 #define MWAIT_C2 0x10
1285 #define MWAIT_C3 0x20
1286 #define MWAIT_C4 0x30
1287
1288 static void
1289 cpu_idle_mwait(int busy)
1290 {
1291 int *state;
1292
1293 state = (int *)PCPU_PTR(monitorbuf);
1294 *state = STATE_MWAIT;
1295
1296 /* See comments in cpu_idle_hlt(). */
1297 disable_intr();
1298 if (sched_runnable()) {
1299 enable_intr();
1300 *state = STATE_RUNNING;
1301 return;
1302 }
1303 cpu_monitor(state, 0, 0);
1304 if (*state == STATE_MWAIT)
1305 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
1306 else
1307 enable_intr();
1308 *state = STATE_RUNNING;
1309 }
1310
1311 static void
1312 cpu_idle_spin(int busy)
1313 {
1314 int *state;
1315 int i;
1316
1317 state = (int *)PCPU_PTR(monitorbuf);
1318 *state = STATE_RUNNING;
1319
1320 /*
1321 * The sched_runnable() call is racy but as long as there is
1322 * a loop missing it one time will have just a little impact if any
1323 * (and it is much better than missing the check at all).
1324 */
1325 for (i = 0; i < 1000; i++) {
1326 if (sched_runnable())
1327 return;
1328 cpu_spinwait();
1329 }
1330 }
1331
1332 /*
1333 * C1E renders the local APIC timer dead, so we disable it by
1334 * reading the Interrupt Pending Message register and clearing
1335 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
1336 *
1337 * Reference:
1338 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
1339 * #32559 revision 3.00+
1340 */
1341 #define MSR_AMDK8_IPM 0xc0010055
1342 #define AMDK8_SMIONCMPHALT (1ULL << 27)
1343 #define AMDK8_C1EONCMPHALT (1ULL << 28)
1344 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
1345
1346 static void
1347 cpu_probe_amdc1e(void)
1348 {
1349
1350 /*
1351 * Detect the presence of C1E capability mostly on latest
1352 * dual-cores (or future) k8 family.
1353 */
1354 if (cpu_vendor_id == CPU_VENDOR_AMD &&
1355 (cpu_id & 0x00000f00) == 0x00000f00 &&
1356 (cpu_id & 0x0fff0000) >= 0x00040000) {
1357 cpu_ident_amdc1e = 1;
1358 }
1359 }
1360
1361 #ifdef XEN
1362 void (*cpu_idle_fn)(int) = cpu_idle_hlt;
1363 #else
1364 void (*cpu_idle_fn)(int) = cpu_idle_acpi;
1365 #endif
1366
1367 void
1368 cpu_idle(int busy)
1369 {
1370 #ifndef XEN
1371 uint64_t msr;
1372 #endif
1373
1374 #if defined(SMP) && !defined(XEN)
1375 if (mp_grab_cpu_hlt())
1376 return;
1377 #endif
1378 #ifndef XEN
1379 /* If we are busy - try to use fast methods. */
1380 if (busy) {
1381 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
1382 cpu_idle_mwait(busy);
1383 return;
1384 }
1385 }
1386
1387 /* Apply AMD APIC timer C1E workaround. */
1388 if (cpu_ident_amdc1e) {
1389 msr = rdmsr(MSR_AMDK8_IPM);
1390 if (msr & AMDK8_CMPHALT)
1391 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
1392 }
1393 #endif
1394
1395 /* Call main idle method. */
1396 cpu_idle_fn(busy);
1397 }
1398
1399 int
1400 cpu_idle_wakeup(int cpu)
1401 {
1402 struct pcpu *pcpu;
1403 int *state;
1404
1405 pcpu = pcpu_find(cpu);
1406 state = (int *)pcpu->pc_monitorbuf;
1407 /*
1408 * This doesn't need to be atomic since missing the race will
1409 * simply result in unnecessary IPIs.
1410 */
1411 if (*state == STATE_SLEEPING)
1412 return (0);
1413 if (*state == STATE_MWAIT)
1414 *state = STATE_RUNNING;
1415 return (1);
1416 }
1417
1418 /*
1419 * Ordered by speed/power consumption.
1420 */
1421 struct {
1422 void *id_fn;
1423 char *id_name;
1424 } idle_tbl[] = {
1425 { cpu_idle_spin, "spin" },
1426 { cpu_idle_mwait, "mwait" },
1427 { cpu_idle_hlt, "hlt" },
1428 { cpu_idle_acpi, "acpi" },
1429 { NULL, NULL }
1430 };
1431
1432 static int
1433 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
1434 {
1435 char *avail, *p;
1436 int error;
1437 int i;
1438
1439 avail = malloc(256, M_TEMP, M_WAITOK);
1440 p = avail;
1441 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1442 if (strstr(idle_tbl[i].id_name, "mwait") &&
1443 (cpu_feature2 & CPUID2_MON) == 0)
1444 continue;
1445 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1446 cpu_idle_hook == NULL)
1447 continue;
1448 p += sprintf(p, "%s, ", idle_tbl[i].id_name);
1449 }
1450 error = sysctl_handle_string(oidp, avail, 0, req);
1451 free(avail, M_TEMP);
1452 return (error);
1453 }
1454
1455 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
1456 0, 0, idle_sysctl_available, "A", "list of available idle functions");
1457
1458 static int
1459 idle_sysctl(SYSCTL_HANDLER_ARGS)
1460 {
1461 char buf[16];
1462 int error;
1463 char *p;
1464 int i;
1465
1466 p = "unknown";
1467 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1468 if (idle_tbl[i].id_fn == cpu_idle_fn) {
1469 p = idle_tbl[i].id_name;
1470 break;
1471 }
1472 }
1473 strncpy(buf, p, sizeof(buf));
1474 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1475 if (error != 0 || req->newptr == NULL)
1476 return (error);
1477 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1478 if (strstr(idle_tbl[i].id_name, "mwait") &&
1479 (cpu_feature2 & CPUID2_MON) == 0)
1480 continue;
1481 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1482 cpu_idle_hook == NULL)
1483 continue;
1484 if (strcmp(idle_tbl[i].id_name, buf))
1485 continue;
1486 cpu_idle_fn = idle_tbl[i].id_fn;
1487 return (0);
1488 }
1489 return (EINVAL);
1490 }
1491
1492 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
1493 idle_sysctl, "A", "currently selected idle function");
1494
1495 /*
1496 * Reset registers to default values on exec.
1497 */
1498 void
1499 exec_setregs(td, entry, stack, ps_strings)
1500 struct thread *td;
1501 u_long entry;
1502 u_long stack;
1503 u_long ps_strings;
1504 {
1505 struct trapframe *regs = td->td_frame;
1506 struct pcb *pcb = td->td_pcb;
1507
1508 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1509 pcb->pcb_gs = _udatasel;
1510 load_gs(_udatasel);
1511
1512 mtx_lock_spin(&dt_lock);
1513 if (td->td_proc->p_md.md_ldt)
1514 user_ldt_free(td);
1515 else
1516 mtx_unlock_spin(&dt_lock);
1517
1518 bzero((char *)regs, sizeof(struct trapframe));
1519 regs->tf_eip = entry;
1520 regs->tf_esp = stack;
1521 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1522 regs->tf_ss = _udatasel;
1523 regs->tf_ds = _udatasel;
1524 regs->tf_es = _udatasel;
1525 regs->tf_fs = _udatasel;
1526 regs->tf_cs = _ucodesel;
1527
1528 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1529 regs->tf_ebx = ps_strings;
1530
1531 /*
1532 * Reset the hardware debug registers if they were in use.
1533 * They won't have any meaning for the newly exec'd process.
1534 */
1535 if (pcb->pcb_flags & PCB_DBREGS) {
1536 pcb->pcb_dr0 = 0;
1537 pcb->pcb_dr1 = 0;
1538 pcb->pcb_dr2 = 0;
1539 pcb->pcb_dr3 = 0;
1540 pcb->pcb_dr6 = 0;
1541 pcb->pcb_dr7 = 0;
1542 if (pcb == PCPU_GET(curpcb)) {
1543 /*
1544 * Clear the debug registers on the running
1545 * CPU, otherwise they will end up affecting
1546 * the next process we switch to.
1547 */
1548 reset_dbregs();
1549 }
1550 pcb->pcb_flags &= ~PCB_DBREGS;
1551 }
1552
1553 /*
1554 * Initialize the math emulator (if any) for the current process.
1555 * Actually, just clear the bit that says that the emulator has
1556 * been initialized. Initialization is delayed until the process
1557 * traps to the emulator (if it is done at all) mainly because
1558 * emulators don't provide an entry point for initialization.
1559 */
1560 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1561 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1562
1563 /*
1564 * Drop the FP state if we hold it, so that the process gets a
1565 * clean FP state if it uses the FPU again.
1566 */
1567 fpstate_drop(td);
1568
1569 /*
1570 * XXX - Linux emulator
1571 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1572 * on it.
1573 */
1574 td->td_retval[1] = 0;
1575 }
1576
1577 void
1578 cpu_setregs(void)
1579 {
1580 unsigned int cr0;
1581
1582 cr0 = rcr0();
1583
1584 /*
1585 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1586 *
1587 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1588 * instructions. We must set the CR0_MP bit and use the CR0_TS
1589 * bit to control the trap, because setting the CR0_EM bit does
1590 * not cause WAIT instructions to trap. It's important to trap
1591 * WAIT instructions - otherwise the "wait" variants of no-wait
1592 * control instructions would degenerate to the "no-wait" variants
1593 * after FP context switches but work correctly otherwise. It's
1594 * particularly important to trap WAITs when there is no NPX -
1595 * otherwise the "wait" variants would always degenerate.
1596 *
1597 * Try setting CR0_NE to get correct error reporting on 486DX's.
1598 * Setting it should fail or do nothing on lesser processors.
1599 */
1600 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1601 load_cr0(cr0);
1602 load_gs(_udatasel);
1603 }
1604
1605 u_long bootdev; /* not a struct cdev *- encoding is different */
1606 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1607 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1608
1609 /*
1610 * Initialize 386 and configure to run kernel
1611 */
1612
1613 /*
1614 * Initialize segments & interrupt table
1615 */
1616
1617 int _default_ldt;
1618
1619 #ifdef XEN
1620 union descriptor *gdt;
1621 union descriptor *ldt;
1622 #else
1623 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1624 union descriptor ldt[NLDT]; /* local descriptor table */
1625 #endif
1626 static struct gate_descriptor idt0[NIDT];
1627 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1628 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1629 struct mtx dt_lock; /* lock for GDT and LDT */
1630
1631 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1632 extern int has_f00f_bug;
1633 #endif
1634
1635 static struct i386tss dblfault_tss;
1636 static char dblfault_stack[PAGE_SIZE];
1637
1638 extern vm_offset_t proc0kstack;
1639
1640
1641 /*
1642 * software prototypes -- in more palatable form.
1643 *
1644 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1645 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1646 */
1647 struct soft_segment_descriptor gdt_segs[] = {
1648 /* GNULL_SEL 0 Null Descriptor */
1649 { .ssd_base = 0x0,
1650 .ssd_limit = 0x0,
1651 .ssd_type = 0,
1652 .ssd_dpl = SEL_KPL,
1653 .ssd_p = 0,
1654 .ssd_xx = 0, .ssd_xx1 = 0,
1655 .ssd_def32 = 0,
1656 .ssd_gran = 0 },
1657 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1658 { .ssd_base = 0x0,
1659 .ssd_limit = 0xfffff,
1660 .ssd_type = SDT_MEMRWA,
1661 .ssd_dpl = SEL_KPL,
1662 .ssd_p = 1,
1663 .ssd_xx = 0, .ssd_xx1 = 0,
1664 .ssd_def32 = 1,
1665 .ssd_gran = 1 },
1666 /* GUFS_SEL 2 %fs Descriptor for user */
1667 { .ssd_base = 0x0,
1668 .ssd_limit = 0xfffff,
1669 .ssd_type = SDT_MEMRWA,
1670 .ssd_dpl = SEL_UPL,
1671 .ssd_p = 1,
1672 .ssd_xx = 0, .ssd_xx1 = 0,
1673 .ssd_def32 = 1,
1674 .ssd_gran = 1 },
1675 /* GUGS_SEL 3 %gs Descriptor for user */
1676 { .ssd_base = 0x0,
1677 .ssd_limit = 0xfffff,
1678 .ssd_type = SDT_MEMRWA,
1679 .ssd_dpl = SEL_UPL,
1680 .ssd_p = 1,
1681 .ssd_xx = 0, .ssd_xx1 = 0,
1682 .ssd_def32 = 1,
1683 .ssd_gran = 1 },
1684 /* GCODE_SEL 4 Code Descriptor for kernel */
1685 { .ssd_base = 0x0,
1686 .ssd_limit = 0xfffff,
1687 .ssd_type = SDT_MEMERA,
1688 .ssd_dpl = SEL_KPL,
1689 .ssd_p = 1,
1690 .ssd_xx = 0, .ssd_xx1 = 0,
1691 .ssd_def32 = 1,
1692 .ssd_gran = 1 },
1693 /* GDATA_SEL 5 Data Descriptor for kernel */
1694 { .ssd_base = 0x0,
1695 .ssd_limit = 0xfffff,
1696 .ssd_type = SDT_MEMRWA,
1697 .ssd_dpl = SEL_KPL,
1698 .ssd_p = 1,
1699 .ssd_xx = 0, .ssd_xx1 = 0,
1700 .ssd_def32 = 1,
1701 .ssd_gran = 1 },
1702 /* GUCODE_SEL 6 Code Descriptor for user */
1703 { .ssd_base = 0x0,
1704 .ssd_limit = 0xfffff,
1705 .ssd_type = SDT_MEMERA,
1706 .ssd_dpl = SEL_UPL,
1707 .ssd_p = 1,
1708 .ssd_xx = 0, .ssd_xx1 = 0,
1709 .ssd_def32 = 1,
1710 .ssd_gran = 1 },
1711 /* GUDATA_SEL 7 Data Descriptor for user */
1712 { .ssd_base = 0x0,
1713 .ssd_limit = 0xfffff,
1714 .ssd_type = SDT_MEMRWA,
1715 .ssd_dpl = SEL_UPL,
1716 .ssd_p = 1,
1717 .ssd_xx = 0, .ssd_xx1 = 0,
1718 .ssd_def32 = 1,
1719 .ssd_gran = 1 },
1720 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1721 { .ssd_base = 0x400,
1722 .ssd_limit = 0xfffff,
1723 .ssd_type = SDT_MEMRWA,
1724 .ssd_dpl = SEL_KPL,
1725 .ssd_p = 1,
1726 .ssd_xx = 0, .ssd_xx1 = 0,
1727 .ssd_def32 = 1,
1728 .ssd_gran = 1 },
1729 #ifndef XEN
1730 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1731 {
1732 .ssd_base = 0x0,
1733 .ssd_limit = sizeof(struct i386tss)-1,
1734 .ssd_type = SDT_SYS386TSS,
1735 .ssd_dpl = 0,
1736 .ssd_p = 1,
1737 .ssd_xx = 0, .ssd_xx1 = 0,
1738 .ssd_def32 = 0,
1739 .ssd_gran = 0 },
1740 /* GLDT_SEL 10 LDT Descriptor */
1741 { .ssd_base = (int) ldt,
1742 .ssd_limit = sizeof(ldt)-1,
1743 .ssd_type = SDT_SYSLDT,
1744 .ssd_dpl = SEL_UPL,
1745 .ssd_p = 1,
1746 .ssd_xx = 0, .ssd_xx1 = 0,
1747 .ssd_def32 = 0,
1748 .ssd_gran = 0 },
1749 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1750 { .ssd_base = (int) ldt,
1751 .ssd_limit = (512 * sizeof(union descriptor)-1),
1752 .ssd_type = SDT_SYSLDT,
1753 .ssd_dpl = 0,
1754 .ssd_p = 1,
1755 .ssd_xx = 0, .ssd_xx1 = 0,
1756 .ssd_def32 = 0,
1757 .ssd_gran = 0 },
1758 /* GPANIC_SEL 12 Panic Tss Descriptor */
1759 { .ssd_base = (int) &dblfault_tss,
1760 .ssd_limit = sizeof(struct i386tss)-1,
1761 .ssd_type = SDT_SYS386TSS,
1762 .ssd_dpl = 0,
1763 .ssd_p = 1,
1764 .ssd_xx = 0, .ssd_xx1 = 0,
1765 .ssd_def32 = 0,
1766 .ssd_gran = 0 },
1767 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1768 { .ssd_base = 0,
1769 .ssd_limit = 0xfffff,
1770 .ssd_type = SDT_MEMERA,
1771 .ssd_dpl = 0,
1772 .ssd_p = 1,
1773 .ssd_xx = 0, .ssd_xx1 = 0,
1774 .ssd_def32 = 0,
1775 .ssd_gran = 1 },
1776 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1777 { .ssd_base = 0,
1778 .ssd_limit = 0xfffff,
1779 .ssd_type = SDT_MEMERA,
1780 .ssd_dpl = 0,
1781 .ssd_p = 1,
1782 .ssd_xx = 0, .ssd_xx1 = 0,
1783 .ssd_def32 = 0,
1784 .ssd_gran = 1 },
1785 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1786 { .ssd_base = 0,
1787 .ssd_limit = 0xfffff,
1788 .ssd_type = SDT_MEMRWA,
1789 .ssd_dpl = 0,
1790 .ssd_p = 1,
1791 .ssd_xx = 0, .ssd_xx1 = 0,
1792 .ssd_def32 = 1,
1793 .ssd_gran = 1 },
1794 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1795 { .ssd_base = 0,
1796 .ssd_limit = 0xfffff,
1797 .ssd_type = SDT_MEMRWA,
1798 .ssd_dpl = 0,
1799 .ssd_p = 1,
1800 .ssd_xx = 0, .ssd_xx1 = 0,
1801 .ssd_def32 = 0,
1802 .ssd_gran = 1 },
1803 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1804 { .ssd_base = 0,
1805 .ssd_limit = 0xfffff,
1806 .ssd_type = SDT_MEMRWA,
1807 .ssd_dpl = 0,
1808 .ssd_p = 1,
1809 .ssd_xx = 0, .ssd_xx1 = 0,
1810 .ssd_def32 = 0,
1811 .ssd_gran = 1 },
1812 /* GNDIS_SEL 18 NDIS Descriptor */
1813 { .ssd_base = 0x0,
1814 .ssd_limit = 0x0,
1815 .ssd_type = 0,
1816 .ssd_dpl = 0,
1817 .ssd_p = 0,
1818 .ssd_xx = 0, .ssd_xx1 = 0,
1819 .ssd_def32 = 0,
1820 .ssd_gran = 0 },
1821 #endif /* !XEN */
1822 };
1823
1824 static struct soft_segment_descriptor ldt_segs[] = {
1825 /* Null Descriptor - overwritten by call gate */
1826 { .ssd_base = 0x0,
1827 .ssd_limit = 0x0,
1828 .ssd_type = 0,
1829 .ssd_dpl = 0,
1830 .ssd_p = 0,
1831 .ssd_xx = 0, .ssd_xx1 = 0,
1832 .ssd_def32 = 0,
1833 .ssd_gran = 0 },
1834 /* Null Descriptor - overwritten by call gate */
1835 { .ssd_base = 0x0,
1836 .ssd_limit = 0x0,
1837 .ssd_type = 0,
1838 .ssd_dpl = 0,
1839 .ssd_p = 0,
1840 .ssd_xx = 0, .ssd_xx1 = 0,
1841 .ssd_def32 = 0,
1842 .ssd_gran = 0 },
1843 /* Null Descriptor - overwritten by call gate */
1844 { .ssd_base = 0x0,
1845 .ssd_limit = 0x0,
1846 .ssd_type = 0,
1847 .ssd_dpl = 0,
1848 .ssd_p = 0,
1849 .ssd_xx = 0, .ssd_xx1 = 0,
1850 .ssd_def32 = 0,
1851 .ssd_gran = 0 },
1852 /* Code Descriptor for user */
1853 { .ssd_base = 0x0,
1854 .ssd_limit = 0xfffff,
1855 .ssd_type = SDT_MEMERA,
1856 .ssd_dpl = SEL_UPL,
1857 .ssd_p = 1,
1858 .ssd_xx = 0, .ssd_xx1 = 0,
1859 .ssd_def32 = 1,
1860 .ssd_gran = 1 },
1861 /* Null Descriptor - overwritten by call gate */
1862 { .ssd_base = 0x0,
1863 .ssd_limit = 0x0,
1864 .ssd_type = 0,
1865 .ssd_dpl = 0,
1866 .ssd_p = 0,
1867 .ssd_xx = 0, .ssd_xx1 = 0,
1868 .ssd_def32 = 0,
1869 .ssd_gran = 0 },
1870 /* Data Descriptor for user */
1871 { .ssd_base = 0x0,
1872 .ssd_limit = 0xfffff,
1873 .ssd_type = SDT_MEMRWA,
1874 .ssd_dpl = SEL_UPL,
1875 .ssd_p = 1,
1876 .ssd_xx = 0, .ssd_xx1 = 0,
1877 .ssd_def32 = 1,
1878 .ssd_gran = 1 },
1879 };
1880
1881 void
1882 setidt(idx, func, typ, dpl, selec)
1883 int idx;
1884 inthand_t *func;
1885 int typ;
1886 int dpl;
1887 int selec;
1888 {
1889 struct gate_descriptor *ip;
1890
1891 ip = idt + idx;
1892 ip->gd_looffset = (int)func;
1893 ip->gd_selector = selec;
1894 ip->gd_stkcpy = 0;
1895 ip->gd_xx = 0;
1896 ip->gd_type = typ;
1897 ip->gd_dpl = dpl;
1898 ip->gd_p = 1;
1899 ip->gd_hioffset = ((int)func)>>16 ;
1900 }
1901
1902 extern inthand_t
1903 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1904 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1905 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1906 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1907 IDTVEC(xmm),
1908 #ifdef KDTRACE_HOOKS
1909 IDTVEC(dtrace_ret),
1910 #endif
1911 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1912
1913 #ifdef DDB
1914 /*
1915 * Display the index and function name of any IDT entries that don't use
1916 * the default 'rsvd' entry point.
1917 */
1918 DB_SHOW_COMMAND(idt, db_show_idt)
1919 {
1920 struct gate_descriptor *ip;
1921 int idx;
1922 uintptr_t func;
1923
1924 ip = idt;
1925 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1926 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1927 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1928 db_printf("%3d\t", idx);
1929 db_printsym(func, DB_STGY_PROC);
1930 db_printf("\n");
1931 }
1932 ip++;
1933 }
1934 }
1935
1936 /* Show privileged registers. */
1937 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1938 {
1939 uint64_t idtr, gdtr;
1940
1941 idtr = ridt();
1942 db_printf("idtr\t0x%08x/%04x\n",
1943 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1944 gdtr = rgdt();
1945 db_printf("gdtr\t0x%08x/%04x\n",
1946 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1947 db_printf("ldtr\t0x%04x\n", rldt());
1948 db_printf("tr\t0x%04x\n", rtr());
1949 db_printf("cr0\t0x%08x\n", rcr0());
1950 db_printf("cr2\t0x%08x\n", rcr2());
1951 db_printf("cr3\t0x%08x\n", rcr3());
1952 db_printf("cr4\t0x%08x\n", rcr4());
1953 }
1954 #endif
1955
1956 void
1957 sdtossd(sd, ssd)
1958 struct segment_descriptor *sd;
1959 struct soft_segment_descriptor *ssd;
1960 {
1961 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1962 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1963 ssd->ssd_type = sd->sd_type;
1964 ssd->ssd_dpl = sd->sd_dpl;
1965 ssd->ssd_p = sd->sd_p;
1966 ssd->ssd_def32 = sd->sd_def32;
1967 ssd->ssd_gran = sd->sd_gran;
1968 }
1969
1970 #ifndef XEN
1971 static int
1972 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1973 {
1974 int i, insert_idx, physmap_idx;
1975
1976 physmap_idx = *physmap_idxp;
1977
1978 if (boothowto & RB_VERBOSE)
1979 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1980 smap->type, smap->base, smap->length);
1981
1982 if (smap->type != SMAP_TYPE_MEMORY)
1983 return (1);
1984
1985 if (smap->length == 0)
1986 return (1);
1987
1988 #ifndef PAE
1989 if (smap->base > 0xffffffff) {
1990 printf("%uK of memory above 4GB ignored\n",
1991 (u_int)(smap->length / 1024));
1992 return (1);
1993 }
1994 #endif
1995
1996 /*
1997 * Find insertion point while checking for overlap. Start off by
1998 * assuming the new entry will be added to the end.
1999 */
2000 insert_idx = physmap_idx + 2;
2001 for (i = 0; i <= physmap_idx; i += 2) {
2002 if (smap->base < physmap[i + 1]) {
2003 if (smap->base + smap->length <= physmap[i]) {
2004 insert_idx = i;
2005 break;
2006 }
2007 if (boothowto & RB_VERBOSE)
2008 printf(
2009 "Overlapping memory regions, ignoring second region\n");
2010 return (1);
2011 }
2012 }
2013
2014 /* See if we can prepend to the next entry. */
2015 if (insert_idx <= physmap_idx &&
2016 smap->base + smap->length == physmap[insert_idx]) {
2017 physmap[insert_idx] = smap->base;
2018 return (1);
2019 }
2020
2021 /* See if we can append to the previous entry. */
2022 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) {
2023 physmap[insert_idx - 1] += smap->length;
2024 return (1);
2025 }
2026
2027 physmap_idx += 2;
2028 *physmap_idxp = physmap_idx;
2029 if (physmap_idx == PHYSMAP_SIZE) {
2030 printf(
2031 "Too many segments in the physical address map, giving up\n");
2032 return (0);
2033 }
2034
2035 /*
2036 * Move the last 'N' entries down to make room for the new
2037 * entry if needed.
2038 */
2039 for (i = physmap_idx; i > insert_idx; i -= 2) {
2040 physmap[i] = physmap[i - 2];
2041 physmap[i + 1] = physmap[i - 1];
2042 }
2043
2044 /* Insert the new entry. */
2045 physmap[insert_idx] = smap->base;
2046 physmap[insert_idx + 1] = smap->base + smap->length;
2047 return (1);
2048 }
2049
2050 static void
2051 basemem_setup(void)
2052 {
2053 vm_paddr_t pa;
2054 pt_entry_t *pte;
2055 int i;
2056
2057 if (basemem > 640) {
2058 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
2059 basemem);
2060 basemem = 640;
2061 }
2062
2063 /*
2064 * XXX if biosbasemem is now < 640, there is a `hole'
2065 * between the end of base memory and the start of
2066 * ISA memory. The hole may be empty or it may
2067 * contain BIOS code or data. Map it read/write so
2068 * that the BIOS can write to it. (Memory from 0 to
2069 * the physical end of the kernel is mapped read-only
2070 * to begin with and then parts of it are remapped.
2071 * The parts that aren't remapped form holes that
2072 * remain read-only and are unused by the kernel.
2073 * The base memory area is below the physical end of
2074 * the kernel and right now forms a read-only hole.
2075 * The part of it from PAGE_SIZE to
2076 * (trunc_page(biosbasemem * 1024) - 1) will be
2077 * remapped and used by the kernel later.)
2078 *
2079 * This code is similar to the code used in
2080 * pmap_mapdev, but since no memory needs to be
2081 * allocated we simply change the mapping.
2082 */
2083 for (pa = trunc_page(basemem * 1024);
2084 pa < ISA_HOLE_START; pa += PAGE_SIZE)
2085 pmap_kenter(KERNBASE + pa, pa);
2086
2087 /*
2088 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
2089 * the vm86 page table so that vm86 can scribble on them using
2090 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
2091 * page 0, at least as initialized here?
2092 */
2093 pte = (pt_entry_t *)vm86paddr;
2094 for (i = basemem / 4; i < 160; i++)
2095 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
2096 }
2097 #endif
2098
2099 /*
2100 * Populate the (physmap) array with base/bound pairs describing the
2101 * available physical memory in the system, then test this memory and
2102 * build the phys_avail array describing the actually-available memory.
2103 *
2104 * If we cannot accurately determine the physical memory map, then use
2105 * value from the 0xE801 call, and failing that, the RTC.
2106 *
2107 * Total memory size may be set by the kernel environment variable
2108 * hw.physmem or the compile-time define MAXMEM.
2109 *
2110 * XXX first should be vm_paddr_t.
2111 */
2112 static void
2113 getmemsize(int first)
2114 {
2115 int has_smap, off, physmap_idx, pa_indx, da_indx;
2116 u_long physmem_tunable, memtest;
2117 vm_paddr_t physmap[PHYSMAP_SIZE];
2118 pt_entry_t *pte;
2119 quad_t dcons_addr, dcons_size;
2120 #ifndef XEN
2121 int hasbrokenint12, i;
2122 u_int extmem;
2123 struct vm86frame vmf;
2124 struct vm86context vmc;
2125 vm_paddr_t pa;
2126 struct bios_smap *smap, *smapbase, *smapend;
2127 u_int32_t smapsize;
2128 caddr_t kmdp;
2129 #endif
2130
2131 has_smap = 0;
2132 #if defined(XEN)
2133 Maxmem = xen_start_info->nr_pages - init_first;
2134 physmem = Maxmem;
2135 basemem = 0;
2136 physmap[0] = init_first << PAGE_SHIFT;
2137 physmap[1] = ptoa(Maxmem) - round_page(msgbufsize);
2138 physmap_idx = 0;
2139 #else
2140 #ifdef XBOX
2141 if (arch_i386_is_xbox) {
2142 /*
2143 * We queried the memory size before, so chop off 4MB for
2144 * the framebuffer and inform the OS of this.
2145 */
2146 physmap[0] = 0;
2147 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
2148 physmap_idx = 0;
2149 goto physmap_done;
2150 }
2151 #endif
2152 bzero(&vmf, sizeof(vmf));
2153 bzero(physmap, sizeof(physmap));
2154 basemem = 0;
2155
2156 /*
2157 * Check if the loader supplied an SMAP memory map. If so,
2158 * use that and do not make any VM86 calls.
2159 */
2160 physmap_idx = 0;
2161 smapbase = NULL;
2162 kmdp = preload_search_by_type("elf kernel");
2163 if (kmdp == NULL)
2164 kmdp = preload_search_by_type("elf32 kernel");
2165 if (kmdp != NULL)
2166 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2167 MODINFO_METADATA | MODINFOMD_SMAP);
2168 if (smapbase != NULL) {
2169 /*
2170 * subr_module.c says:
2171 * "Consumer may safely assume that size value precedes data."
2172 * ie: an int32_t immediately precedes SMAP.
2173 */
2174 smapsize = *((u_int32_t *)smapbase - 1);
2175 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
2176 has_smap = 1;
2177
2178 for (smap = smapbase; smap < smapend; smap++)
2179 if (!add_smap_entry(smap, physmap, &physmap_idx))
2180 break;
2181 goto have_smap;
2182 }
2183
2184 /*
2185 * Some newer BIOSes have a broken INT 12H implementation
2186 * which causes a kernel panic immediately. In this case, we
2187 * need use the SMAP to determine the base memory size.
2188 */
2189 hasbrokenint12 = 0;
2190 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
2191 if (hasbrokenint12 == 0) {
2192 /* Use INT12 to determine base memory size. */
2193 vm86_intcall(0x12, &vmf);
2194 basemem = vmf.vmf_ax;
2195 basemem_setup();
2196 }
2197
2198 /*
2199 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
2200 * the kernel page table so we can use it as a buffer. The
2201 * kernel will unmap this page later.
2202 */
2203 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
2204 vmc.npages = 0;
2205 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
2206 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
2207
2208 vmf.vmf_ebx = 0;
2209 do {
2210 vmf.vmf_eax = 0xE820;
2211 vmf.vmf_edx = SMAP_SIG;
2212 vmf.vmf_ecx = sizeof(struct bios_smap);
2213 i = vm86_datacall(0x15, &vmf, &vmc);
2214 if (i || vmf.vmf_eax != SMAP_SIG)
2215 break;
2216 has_smap = 1;
2217 if (!add_smap_entry(smap, physmap, &physmap_idx))
2218 break;
2219 } while (vmf.vmf_ebx != 0);
2220
2221 have_smap:
2222 /*
2223 * If we didn't fetch the "base memory" size from INT12,
2224 * figure it out from the SMAP (or just guess).
2225 */
2226 if (basemem == 0) {
2227 for (i = 0; i <= physmap_idx; i += 2) {
2228 if (physmap[i] == 0x00000000) {
2229 basemem = physmap[i + 1] / 1024;
2230 break;
2231 }
2232 }
2233
2234 /* XXX: If we couldn't find basemem from SMAP, just guess. */
2235 if (basemem == 0)
2236 basemem = 640;
2237 basemem_setup();
2238 }
2239
2240 if (physmap[1] != 0)
2241 goto physmap_done;
2242
2243 /*
2244 * If we failed to find an SMAP, figure out the extended
2245 * memory size. We will then build a simple memory map with
2246 * two segments, one for "base memory" and the second for
2247 * "extended memory". Note that "extended memory" starts at a
2248 * physical address of 1MB and that both basemem and extmem
2249 * are in units of 1KB.
2250 *
2251 * First, try to fetch the extended memory size via INT 15:E801.
2252 */
2253 vmf.vmf_ax = 0xE801;
2254 if (vm86_intcall(0x15, &vmf) == 0) {
2255 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
2256 } else {
2257 /*
2258 * If INT15:E801 fails, this is our last ditch effort
2259 * to determine the extended memory size. Currently
2260 * we prefer the RTC value over INT15:88.
2261 */
2262 #if 0
2263 vmf.vmf_ah = 0x88;
2264 vm86_intcall(0x15, &vmf);
2265 extmem = vmf.vmf_ax;
2266 #else
2267 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
2268 #endif
2269 }
2270
2271 /*
2272 * Special hack for chipsets that still remap the 384k hole when
2273 * there's 16MB of memory - this really confuses people that
2274 * are trying to use bus mastering ISA controllers with the
2275 * "16MB limit"; they only have 16MB, but the remapping puts
2276 * them beyond the limit.
2277 *
2278 * If extended memory is between 15-16MB (16-17MB phys address range),
2279 * chop it to 15MB.
2280 */
2281 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
2282 extmem = 15 * 1024;
2283
2284 physmap[0] = 0;
2285 physmap[1] = basemem * 1024;
2286 physmap_idx = 2;
2287 physmap[physmap_idx] = 0x100000;
2288 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2289
2290 physmap_done:
2291 #endif
2292 /*
2293 * Now, physmap contains a map of physical memory.
2294 */
2295
2296 #ifdef SMP
2297 /* make hole for AP bootstrap code */
2298 physmap[1] = mp_bootaddress(physmap[1]);
2299 #endif
2300
2301 /*
2302 * Maxmem isn't the "maximum memory", it's one larger than the
2303 * highest page of the physical address space. It should be
2304 * called something like "Maxphyspage". We may adjust this
2305 * based on ``hw.physmem'' and the results of the memory test.
2306 */
2307 Maxmem = atop(physmap[physmap_idx + 1]);
2308
2309 #ifdef MAXMEM
2310 Maxmem = MAXMEM / 4;
2311 #endif
2312
2313 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
2314 Maxmem = atop(physmem_tunable);
2315
2316 /*
2317 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
2318 * the amount of memory in the system.
2319 */
2320 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
2321 Maxmem = atop(physmap[physmap_idx + 1]);
2322
2323 /*
2324 * By default enable the memory test on real hardware, and disable
2325 * it if we appear to be running in a VM. This avoids touching all
2326 * pages unnecessarily, which doesn't matter on real hardware but is
2327 * bad for shared VM hosts. Use a general name so that
2328 * one could eventually do more with the code than just disable it.
2329 */
2330 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
2331 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
2332
2333 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2334 (boothowto & RB_VERBOSE))
2335 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2336
2337 /*
2338 * If Maxmem has been increased beyond what the system has detected,
2339 * extend the last memory segment to the new limit.
2340 */
2341 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2342 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2343
2344 /* call pmap initialization to make new kernel address space */
2345 pmap_bootstrap(first);
2346
2347 /*
2348 * Size up each available chunk of physical memory.
2349 */
2350 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2351 pa_indx = 0;
2352 da_indx = 1;
2353 phys_avail[pa_indx++] = physmap[0];
2354 phys_avail[pa_indx] = physmap[0];
2355 dump_avail[da_indx] = physmap[0];
2356 pte = CMAP1;
2357
2358 /*
2359 * Get dcons buffer address
2360 */
2361 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2362 getenv_quad("dcons.size", &dcons_size) == 0)
2363 dcons_addr = 0;
2364
2365 #ifndef XEN
2366 /*
2367 * physmap is in bytes, so when converting to page boundaries,
2368 * round up the start address and round down the end address.
2369 */
2370 for (i = 0; i <= physmap_idx; i += 2) {
2371 vm_paddr_t end;
2372
2373 end = ptoa((vm_paddr_t)Maxmem);
2374 if (physmap[i + 1] < end)
2375 end = trunc_page(physmap[i + 1]);
2376 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2377 int tmp, page_bad, full;
2378 int *ptr = (int *)CADDR1;
2379
2380 full = FALSE;
2381 /*
2382 * block out kernel memory as not available.
2383 */
2384 if (pa >= KERNLOAD && pa < first)
2385 goto do_dump_avail;
2386
2387 /*
2388 * block out dcons buffer
2389 */
2390 if (dcons_addr > 0
2391 && pa >= trunc_page(dcons_addr)
2392 && pa < dcons_addr + dcons_size)
2393 goto do_dump_avail;
2394
2395 page_bad = FALSE;
2396 if (memtest == 0)
2397 goto skip_memtest;
2398
2399 /*
2400 * map page into kernel: valid, read/write,non-cacheable
2401 */
2402 *pte = pa | PG_V | PG_RW | PG_N;
2403 invltlb();
2404
2405 tmp = *(int *)ptr;
2406 /*
2407 * Test for alternating 1's and 0's
2408 */
2409 *(volatile int *)ptr = 0xaaaaaaaa;
2410 if (*(volatile int *)ptr != 0xaaaaaaaa)
2411 page_bad = TRUE;
2412 /*
2413 * Test for alternating 0's and 1's
2414 */
2415 *(volatile int *)ptr = 0x55555555;
2416 if (*(volatile int *)ptr != 0x55555555)
2417 page_bad = TRUE;
2418 /*
2419 * Test for all 1's
2420 */
2421 *(volatile int *)ptr = 0xffffffff;
2422 if (*(volatile int *)ptr != 0xffffffff)
2423 page_bad = TRUE;
2424 /*
2425 * Test for all 0's
2426 */
2427 *(volatile int *)ptr = 0x0;
2428 if (*(volatile int *)ptr != 0x0)
2429 page_bad = TRUE;
2430 /*
2431 * Restore original value.
2432 */
2433 *(int *)ptr = tmp;
2434
2435 skip_memtest:
2436 /*
2437 * Adjust array of valid/good pages.
2438 */
2439 if (page_bad == TRUE)
2440 continue;
2441 /*
2442 * If this good page is a continuation of the
2443 * previous set of good pages, then just increase
2444 * the end pointer. Otherwise start a new chunk.
2445 * Note that "end" points one higher than end,
2446 * making the range >= start and < end.
2447 * If we're also doing a speculative memory
2448 * test and we at or past the end, bump up Maxmem
2449 * so that we keep going. The first bad page
2450 * will terminate the loop.
2451 */
2452 if (phys_avail[pa_indx] == pa) {
2453 phys_avail[pa_indx] += PAGE_SIZE;
2454 } else {
2455 pa_indx++;
2456 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2457 printf(
2458 "Too many holes in the physical address space, giving up\n");
2459 pa_indx--;
2460 full = TRUE;
2461 goto do_dump_avail;
2462 }
2463 phys_avail[pa_indx++] = pa; /* start */
2464 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2465 }
2466 physmem++;
2467 do_dump_avail:
2468 if (dump_avail[da_indx] == pa) {
2469 dump_avail[da_indx] += PAGE_SIZE;
2470 } else {
2471 da_indx++;
2472 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2473 da_indx--;
2474 goto do_next;
2475 }
2476 dump_avail[da_indx++] = pa; /* start */
2477 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2478 }
2479 do_next:
2480 if (full)
2481 break;
2482 }
2483 }
2484 *pte = 0;
2485 invltlb();
2486 #else
2487 phys_avail[0] = physfree;
2488 phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2489 dump_avail[0] = 0;
2490 dump_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2491
2492 #endif
2493
2494 /*
2495 * XXX
2496 * The last chunk must contain at least one page plus the message
2497 * buffer to avoid complicating other code (message buffer address
2498 * calculation, etc.).
2499 */
2500 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2501 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2502 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2503 phys_avail[pa_indx--] = 0;
2504 phys_avail[pa_indx--] = 0;
2505 }
2506
2507 Maxmem = atop(phys_avail[pa_indx]);
2508
2509 /* Trim off space for the message buffer. */
2510 phys_avail[pa_indx] -= round_page(msgbufsize);
2511
2512 /* Map the message buffer. */
2513 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2514 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2515 off);
2516
2517 PT_UPDATES_FLUSH();
2518 }
2519
2520 #ifdef XEN
2521 #define MTOPSIZE (1<<(14 + PAGE_SHIFT))
2522
2523 void
2524 init386(first)
2525 int first;
2526 {
2527 unsigned long gdtmachpfn;
2528 int error, gsel_tss, metadata_missing, x, pa;
2529 size_t kstack0_sz;
2530 struct pcpu *pc;
2531 struct callback_register event = {
2532 .type = CALLBACKTYPE_event,
2533 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback },
2534 };
2535 struct callback_register failsafe = {
2536 .type = CALLBACKTYPE_failsafe,
2537 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback },
2538 };
2539
2540 thread0.td_kstack = proc0kstack;
2541 thread0.td_kstack_pages = KSTACK_PAGES;
2542 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2543 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2544
2545 /*
2546 * This may be done better later if it gets more high level
2547 * components in it. If so just link td->td_proc here.
2548 */
2549 proc_linkup0(&proc0, &thread0);
2550
2551 metadata_missing = 0;
2552 if (xen_start_info->mod_start) {
2553 preload_metadata = (caddr_t)xen_start_info->mod_start;
2554 preload_bootstrap_relocate(KERNBASE);
2555 } else {
2556 metadata_missing = 1;
2557 }
2558 if (envmode == 1)
2559 kern_envp = static_env;
2560 else if ((caddr_t)xen_start_info->cmd_line)
2561 kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line);
2562
2563 boothowto |= xen_boothowto(kern_envp);
2564
2565 /* Init basic tunables, hz etc */
2566 init_param1();
2567
2568 /*
2569 * XEN occupies a portion of the upper virtual address space
2570 * At its base it manages an array mapping machine page frames
2571 * to physical page frames - hence we need to be able to
2572 * access 4GB - (64MB - 4MB + 64k)
2573 */
2574 gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2575 gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2576 gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2577 gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2578 gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2579 gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2580 gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2581 gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2582
2583 pc = &__pcpu[0];
2584 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2585 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2586
2587 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW);
2588 bzero(gdt, PAGE_SIZE);
2589 for (x = 0; x < NGDT; x++)
2590 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2591
2592 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2593
2594 gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
2595 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V);
2596 PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
2597 lgdt(&r_gdt);
2598 gdtset = 1;
2599
2600 if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
2601 panic("set_trap_table failed - error %d\n", error);
2602 }
2603
2604 error = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
2605 if (error == 0)
2606 error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
2607 #if CONFIG_XEN_COMPAT <= 0x030002
2608 if (error == -ENOXENSYS)
2609 HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL),
2610 (unsigned long)Xhypervisor_callback,
2611 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
2612 #endif
2613 pcpu_init(pc, 0, sizeof(struct pcpu));
2614 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2615 pmap_kenter(pa + KERNBASE, pa);
2616 dpcpu_init((void *)(first + KERNBASE), 0);
2617 first += DPCPU_SIZE;
2618 physfree += DPCPU_SIZE;
2619 init_first += DPCPU_SIZE / PAGE_SIZE;
2620
2621 PCPU_SET(prvspace, pc);
2622 PCPU_SET(curthread, &thread0);
2623 PCPU_SET(curpcb, thread0.td_pcb);
2624
2625 /*
2626 * Initialize mutexes.
2627 *
2628 * icu_lock: in order to allow an interrupt to occur in a critical
2629 * section, to set pcpu->ipending (etc...) properly, we
2630 * must be able to get the icu lock, so it can't be
2631 * under witness.
2632 */
2633 mutex_init();
2634 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2635
2636 /* make ldt memory segments */
2637 PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW);
2638 bzero(ldt, PAGE_SIZE);
2639 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2640 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2641 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2642 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2643
2644 default_proc_ldt.ldt_base = (caddr_t)ldt;
2645 default_proc_ldt.ldt_len = 6;
2646 _default_ldt = (int)&default_proc_ldt;
2647 PCPU_SET(currentldt, _default_ldt);
2648 PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
2649 xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
2650
2651 #if defined(XEN_PRIVILEGED)
2652 /*
2653 * Initialize the i8254 before the console so that console
2654 * initialization can use DELAY().
2655 */
2656 i8254_init();
2657 #endif
2658
2659 /*
2660 * Initialize the console before we print anything out.
2661 */
2662 cninit();
2663
2664 if (metadata_missing)
2665 printf("WARNING: loader(8) metadata is missing!\n");
2666
2667 #ifdef DEV_ISA
2668 elcr_probe();
2669 atpic_startup();
2670 #endif
2671
2672 #ifdef DDB
2673 ksym_start = bootinfo.bi_symtab;
2674 ksym_end = bootinfo.bi_esymtab;
2675 #endif
2676
2677 kdb_init();
2678
2679 #ifdef KDB
2680 if (boothowto & RB_KDB)
2681 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2682 #endif
2683
2684 finishidentcpu(); /* Final stage of CPU initialization */
2685 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2686 GSEL(GCODE_SEL, SEL_KPL));
2687 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2688 GSEL(GCODE_SEL, SEL_KPL));
2689 initializecpu(); /* Initialize CPU registers */
2690
2691 /* make an initial tss so cpu can get interrupt stack on syscall! */
2692 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2693 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2694 kstack0_sz - sizeof(struct pcb) - 16);
2695 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2696 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2697 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL),
2698 PCPU_GET(common_tss.tss_esp0));
2699
2700 /* pointer to selector slot for %fs/%gs */
2701 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2702
2703 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2704 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2705 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2706 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2707 #ifdef PAE
2708 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2709 #else
2710 dblfault_tss.tss_cr3 = (int)IdlePTD;
2711 #endif
2712 dblfault_tss.tss_eip = (int)dblfault_handler;
2713 dblfault_tss.tss_eflags = PSL_KERNEL;
2714 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2715 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2716 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2717 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2718 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2719
2720 vm86_initialize();
2721 getmemsize(first);
2722 init_param2(physmem);
2723
2724 /* now running on new page tables, configured,and u/iom is accessible */
2725
2726 msgbufinit(msgbufp, msgbufsize);
2727 /* transfer to user mode */
2728
2729 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2730 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2731
2732 /* setup proc 0's pcb */
2733 thread0.td_pcb->pcb_flags = 0;
2734 #ifdef PAE
2735 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2736 #else
2737 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2738 #endif
2739 thread0.td_pcb->pcb_ext = 0;
2740 thread0.td_frame = &proc0_tf;
2741 thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0];
2742 thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1];
2743
2744 cpu_probe_amdc1e();
2745 }
2746
2747 #else
2748 void
2749 init386(first)
2750 int first;
2751 {
2752 struct gate_descriptor *gdp;
2753 int gsel_tss, metadata_missing, x, pa;
2754 size_t kstack0_sz;
2755 struct pcpu *pc;
2756
2757 thread0.td_kstack = proc0kstack;
2758 thread0.td_kstack_pages = KSTACK_PAGES;
2759 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2760 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2761
2762 /*
2763 * This may be done better later if it gets more high level
2764 * components in it. If so just link td->td_proc here.
2765 */
2766 proc_linkup0(&proc0, &thread0);
2767
2768 metadata_missing = 0;
2769 if (bootinfo.bi_modulep) {
2770 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2771 preload_bootstrap_relocate(KERNBASE);
2772 } else {
2773 metadata_missing = 1;
2774 }
2775 if (envmode == 1)
2776 kern_envp = static_env;
2777 else if (bootinfo.bi_envp)
2778 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2779
2780 /* Init basic tunables, hz etc */
2781 init_param1();
2782
2783 /*
2784 * Make gdt memory segments. All segments cover the full 4GB
2785 * of address space and permissions are enforced at page level.
2786 */
2787 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2788 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2789 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2790 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2791 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2792 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2793
2794 pc = &__pcpu[0];
2795 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2796 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2797 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2798
2799 for (x = 0; x < NGDT; x++)
2800 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2801
2802 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2803 r_gdt.rd_base = (int) gdt;
2804 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2805 lgdt(&r_gdt);
2806
2807 pcpu_init(pc, 0, sizeof(struct pcpu));
2808 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2809 pmap_kenter(pa + KERNBASE, pa);
2810 dpcpu_init((void *)(first + KERNBASE), 0);
2811 first += DPCPU_SIZE;
2812 PCPU_SET(prvspace, pc);
2813 PCPU_SET(curthread, &thread0);
2814 PCPU_SET(curpcb, thread0.td_pcb);
2815
2816 /*
2817 * Initialize mutexes.
2818 *
2819 * icu_lock: in order to allow an interrupt to occur in a critical
2820 * section, to set pcpu->ipending (etc...) properly, we
2821 * must be able to get the icu lock, so it can't be
2822 * under witness.
2823 */
2824 mutex_init();
2825 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2826
2827 /* make ldt memory segments */
2828 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2829 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2830 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2831 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2832
2833 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2834 lldt(_default_ldt);
2835 PCPU_SET(currentldt, _default_ldt);
2836
2837 /* exceptions */
2838 for (x = 0; x < NIDT; x++)
2839 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2840 GSEL(GCODE_SEL, SEL_KPL));
2841 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2842 GSEL(GCODE_SEL, SEL_KPL));
2843 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2844 GSEL(GCODE_SEL, SEL_KPL));
2845 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2846 GSEL(GCODE_SEL, SEL_KPL));
2847 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2848 GSEL(GCODE_SEL, SEL_KPL));
2849 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2850 GSEL(GCODE_SEL, SEL_KPL));
2851 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2852 GSEL(GCODE_SEL, SEL_KPL));
2853 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2854 GSEL(GCODE_SEL, SEL_KPL));
2855 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2856 , GSEL(GCODE_SEL, SEL_KPL));
2857 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2858 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2859 GSEL(GCODE_SEL, SEL_KPL));
2860 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2861 GSEL(GCODE_SEL, SEL_KPL));
2862 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2863 GSEL(GCODE_SEL, SEL_KPL));
2864 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2865 GSEL(GCODE_SEL, SEL_KPL));
2866 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2867 GSEL(GCODE_SEL, SEL_KPL));
2868 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2869 GSEL(GCODE_SEL, SEL_KPL));
2870 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2871 GSEL(GCODE_SEL, SEL_KPL));
2872 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2873 GSEL(GCODE_SEL, SEL_KPL));
2874 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2875 GSEL(GCODE_SEL, SEL_KPL));
2876 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2877 GSEL(GCODE_SEL, SEL_KPL));
2878 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2879 GSEL(GCODE_SEL, SEL_KPL));
2880 #ifdef KDTRACE_HOOKS
2881 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
2882 GSEL(GCODE_SEL, SEL_KPL));
2883 #endif
2884
2885 r_idt.rd_limit = sizeof(idt0) - 1;
2886 r_idt.rd_base = (int) idt;
2887 lidt(&r_idt);
2888
2889 #ifdef XBOX
2890 /*
2891 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2892 * This should be 0x10de / 0x02a5.
2893 *
2894 * This is exactly what Linux does.
2895 */
2896 outl(0xcf8, 0x80000000);
2897 if (inl(0xcfc) == 0x02a510de) {
2898 arch_i386_is_xbox = 1;
2899 pic16l_setled(XBOX_LED_GREEN);
2900
2901 /*
2902 * We are an XBOX, but we may have either 64MB or 128MB of
2903 * memory. The PCI host bridge should be programmed for this,
2904 * so we just query it.
2905 */
2906 outl(0xcf8, 0x80000084);
2907 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2908 }
2909 #endif /* XBOX */
2910
2911 /*
2912 * Initialize the i8254 before the console so that console
2913 * initialization can use DELAY().
2914 */
2915 i8254_init();
2916
2917 /*
2918 * Initialize the console before we print anything out.
2919 */
2920 cninit();
2921
2922 if (metadata_missing)
2923 printf("WARNING: loader(8) metadata is missing!\n");
2924
2925 #ifdef DEV_ISA
2926 elcr_probe();
2927 atpic_startup();
2928 #endif
2929
2930 #ifdef DDB
2931 ksym_start = bootinfo.bi_symtab;
2932 ksym_end = bootinfo.bi_esymtab;
2933 #endif
2934
2935 kdb_init();
2936
2937 #ifdef KDB
2938 if (boothowto & RB_KDB)
2939 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2940 #endif
2941
2942 finishidentcpu(); /* Final stage of CPU initialization */
2943 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2944 GSEL(GCODE_SEL, SEL_KPL));
2945 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2946 GSEL(GCODE_SEL, SEL_KPL));
2947 initializecpu(); /* Initialize CPU registers */
2948
2949 /* make an initial tss so cpu can get interrupt stack on syscall! */
2950 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2951 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2952 kstack0_sz - sizeof(struct pcb) - 16);
2953 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2954 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2955 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2956 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2957 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2958 ltr(gsel_tss);
2959
2960 /* pointer to selector slot for %fs/%gs */
2961 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2962
2963 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2964 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2965 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2966 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2967 #ifdef PAE
2968 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2969 #else
2970 dblfault_tss.tss_cr3 = (int)IdlePTD;
2971 #endif
2972 dblfault_tss.tss_eip = (int)dblfault_handler;
2973 dblfault_tss.tss_eflags = PSL_KERNEL;
2974 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2975 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2976 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2977 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2978 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2979
2980 vm86_initialize();
2981 getmemsize(first);
2982 init_param2(physmem);
2983
2984 /* now running on new page tables, configured,and u/iom is accessible */
2985
2986 msgbufinit(msgbufp, msgbufsize);
2987
2988 /* make a call gate to reenter kernel with */
2989 gdp = &ldt[LSYS5CALLS_SEL].gd;
2990
2991 x = (int) &IDTVEC(lcall_syscall);
2992 gdp->gd_looffset = x;
2993 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2994 gdp->gd_stkcpy = 1;
2995 gdp->gd_type = SDT_SYS386CGT;
2996 gdp->gd_dpl = SEL_UPL;
2997 gdp->gd_p = 1;
2998 gdp->gd_hioffset = x >> 16;
2999
3000 /* XXX does this work? */
3001 /* XXX yes! */
3002 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
3003 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
3004
3005 /* transfer to user mode */
3006
3007 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
3008 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
3009
3010 /* setup proc 0's pcb */
3011 thread0.td_pcb->pcb_flags = 0;
3012 #ifdef PAE
3013 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
3014 #else
3015 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
3016 #endif
3017 thread0.td_pcb->pcb_ext = 0;
3018 thread0.td_frame = &proc0_tf;
3019
3020 cpu_probe_amdc1e();
3021 }
3022 #endif
3023
3024 void
3025 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
3026 {
3027
3028 pcpu->pc_acpi_id = 0xffffffff;
3029 }
3030
3031 void
3032 spinlock_enter(void)
3033 {
3034 struct thread *td;
3035 register_t flags;
3036
3037 td = curthread;
3038 if (td->td_md.md_spinlock_count == 0) {
3039 flags = intr_disable();
3040 td->td_md.md_spinlock_count = 1;
3041 td->td_md.md_saved_flags = flags;
3042 } else
3043 td->td_md.md_spinlock_count++;
3044 critical_enter();
3045 }
3046
3047 void
3048 spinlock_exit(void)
3049 {
3050 struct thread *td;
3051 register_t flags;
3052
3053 td = curthread;
3054 critical_exit();
3055 flags = td->td_md.md_saved_flags;
3056 td->td_md.md_spinlock_count--;
3057 if (td->td_md.md_spinlock_count == 0)
3058 intr_restore(flags);
3059 }
3060
3061 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
3062 static void f00f_hack(void *unused);
3063 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
3064
3065 static void
3066 f00f_hack(void *unused)
3067 {
3068 struct gate_descriptor *new_idt;
3069 vm_offset_t tmp;
3070
3071 if (!has_f00f_bug)
3072 return;
3073
3074 GIANT_REQUIRED;
3075
3076 printf("Intel Pentium detected, installing workaround for F00F bug\n");
3077
3078 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
3079 if (tmp == 0)
3080 panic("kmem_alloc returned 0");
3081
3082 /* Put the problematic entry (#6) at the end of the lower page. */
3083 new_idt = (struct gate_descriptor*)
3084 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
3085 bcopy(idt, new_idt, sizeof(idt0));
3086 r_idt.rd_base = (u_int)new_idt;
3087 lidt(&r_idt);
3088 idt = new_idt;
3089 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
3090 VM_PROT_READ, FALSE) != KERN_SUCCESS)
3091 panic("vm_map_protect failed");
3092 }
3093 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
3094
3095 /*
3096 * Construct a PCB from a trapframe. This is called from kdb_trap() where
3097 * we want to start a backtrace from the function that caused us to enter
3098 * the debugger. We have the context in the trapframe, but base the trace
3099 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
3100 * enough for a backtrace.
3101 */
3102 void
3103 makectx(struct trapframe *tf, struct pcb *pcb)
3104 {
3105
3106 pcb->pcb_edi = tf->tf_edi;
3107 pcb->pcb_esi = tf->tf_esi;
3108 pcb->pcb_ebp = tf->tf_ebp;
3109 pcb->pcb_ebx = tf->tf_ebx;
3110 pcb->pcb_eip = tf->tf_eip;
3111 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
3112 }
3113
3114 int
3115 ptrace_set_pc(struct thread *td, u_long addr)
3116 {
3117
3118 td->td_frame->tf_eip = addr;
3119 return (0);
3120 }
3121
3122 int
3123 ptrace_single_step(struct thread *td)
3124 {
3125 td->td_frame->tf_eflags |= PSL_T;
3126 return (0);
3127 }
3128
3129 int
3130 ptrace_clear_single_step(struct thread *td)
3131 {
3132 td->td_frame->tf_eflags &= ~PSL_T;
3133 return (0);
3134 }
3135
3136 int
3137 fill_regs(struct thread *td, struct reg *regs)
3138 {
3139 struct pcb *pcb;
3140 struct trapframe *tp;
3141
3142 tp = td->td_frame;
3143 pcb = td->td_pcb;
3144 regs->r_gs = pcb->pcb_gs;
3145 return (fill_frame_regs(tp, regs));
3146 }
3147
3148 int
3149 fill_frame_regs(struct trapframe *tp, struct reg *regs)
3150 {
3151 regs->r_fs = tp->tf_fs;
3152 regs->r_es = tp->tf_es;
3153 regs->r_ds = tp->tf_ds;
3154 regs->r_edi = tp->tf_edi;
3155 regs->r_esi = tp->tf_esi;
3156 regs->r_ebp = tp->tf_ebp;
3157 regs->r_ebx = tp->tf_ebx;
3158 regs->r_edx = tp->tf_edx;
3159 regs->r_ecx = tp->tf_ecx;
3160 regs->r_eax = tp->tf_eax;
3161 regs->r_eip = tp->tf_eip;
3162 regs->r_cs = tp->tf_cs;
3163 regs->r_eflags = tp->tf_eflags;
3164 regs->r_esp = tp->tf_esp;
3165 regs->r_ss = tp->tf_ss;
3166 return (0);
3167 }
3168
3169 int
3170 set_regs(struct thread *td, struct reg *regs)
3171 {
3172 struct pcb *pcb;
3173 struct trapframe *tp;
3174
3175 tp = td->td_frame;
3176 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
3177 !CS_SECURE(regs->r_cs))
3178 return (EINVAL);
3179 pcb = td->td_pcb;
3180 tp->tf_fs = regs->r_fs;
3181 tp->tf_es = regs->r_es;
3182 tp->tf_ds = regs->r_ds;
3183 tp->tf_edi = regs->r_edi;
3184 tp->tf_esi = regs->r_esi;
3185 tp->tf_ebp = regs->r_ebp;
3186 tp->tf_ebx = regs->r_ebx;
3187 tp->tf_edx = regs->r_edx;
3188 tp->tf_ecx = regs->r_ecx;
3189 tp->tf_eax = regs->r_eax;
3190 tp->tf_eip = regs->r_eip;
3191 tp->tf_cs = regs->r_cs;
3192 tp->tf_eflags = regs->r_eflags;
3193 tp->tf_esp = regs->r_esp;
3194 tp->tf_ss = regs->r_ss;
3195 pcb->pcb_gs = regs->r_gs;
3196 return (0);
3197 }
3198
3199 #ifdef CPU_ENABLE_SSE
3200 static void
3201 fill_fpregs_xmm(sv_xmm, sv_87)
3202 struct savexmm *sv_xmm;
3203 struct save87 *sv_87;
3204 {
3205 register struct env87 *penv_87 = &sv_87->sv_env;
3206 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3207 int i;
3208
3209 bzero(sv_87, sizeof(*sv_87));
3210
3211 /* FPU control/status */
3212 penv_87->en_cw = penv_xmm->en_cw;
3213 penv_87->en_sw = penv_xmm->en_sw;
3214 penv_87->en_tw = penv_xmm->en_tw;
3215 penv_87->en_fip = penv_xmm->en_fip;
3216 penv_87->en_fcs = penv_xmm->en_fcs;
3217 penv_87->en_opcode = penv_xmm->en_opcode;
3218 penv_87->en_foo = penv_xmm->en_foo;
3219 penv_87->en_fos = penv_xmm->en_fos;
3220
3221 /* FPU registers */
3222 for (i = 0; i < 8; ++i)
3223 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
3224 }
3225
3226 static void
3227 set_fpregs_xmm(sv_87, sv_xmm)
3228 struct save87 *sv_87;
3229 struct savexmm *sv_xmm;
3230 {
3231 register struct env87 *penv_87 = &sv_87->sv_env;
3232 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3233 int i;
3234
3235 /* FPU control/status */
3236 penv_xmm->en_cw = penv_87->en_cw;
3237 penv_xmm->en_sw = penv_87->en_sw;
3238 penv_xmm->en_tw = penv_87->en_tw;
3239 penv_xmm->en_fip = penv_87->en_fip;
3240 penv_xmm->en_fcs = penv_87->en_fcs;
3241 penv_xmm->en_opcode = penv_87->en_opcode;
3242 penv_xmm->en_foo = penv_87->en_foo;
3243 penv_xmm->en_fos = penv_87->en_fos;
3244
3245 /* FPU registers */
3246 for (i = 0; i < 8; ++i)
3247 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
3248 }
3249 #endif /* CPU_ENABLE_SSE */
3250
3251 int
3252 fill_fpregs(struct thread *td, struct fpreg *fpregs)
3253 {
3254
3255 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
3256 P_SHOULDSTOP(td->td_proc),
3257 ("not suspended thread %p", td));
3258 #ifdef DEV_NPX
3259 npxgetregs(td);
3260 #else
3261 bzero(fpregs, sizeof(*fpregs));
3262 #endif
3263 #ifdef CPU_ENABLE_SSE
3264 if (cpu_fxsr)
3265 fill_fpregs_xmm(&td->td_pcb->pcb_user_save.sv_xmm,
3266 (struct save87 *)fpregs);
3267 else
3268 #endif /* CPU_ENABLE_SSE */
3269 bcopy(&td->td_pcb->pcb_user_save.sv_87, fpregs,
3270 sizeof(*fpregs));
3271 return (0);
3272 }
3273
3274 int
3275 set_fpregs(struct thread *td, struct fpreg *fpregs)
3276 {
3277
3278 #ifdef CPU_ENABLE_SSE
3279 if (cpu_fxsr)
3280 set_fpregs_xmm((struct save87 *)fpregs,
3281 &td->td_pcb->pcb_user_save.sv_xmm);
3282 else
3283 #endif /* CPU_ENABLE_SSE */
3284 bcopy(fpregs, &td->td_pcb->pcb_user_save.sv_87,
3285 sizeof(*fpregs));
3286 #ifdef DEV_NPX
3287 npxuserinited(td);
3288 #endif
3289 return (0);
3290 }
3291
3292 /*
3293 * Get machine context.
3294 */
3295 int
3296 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
3297 {
3298 struct trapframe *tp;
3299 struct segment_descriptor *sdp;
3300
3301 tp = td->td_frame;
3302
3303 PROC_LOCK(curthread->td_proc);
3304 mcp->mc_onstack = sigonstack(tp->tf_esp);
3305 PROC_UNLOCK(curthread->td_proc);
3306 mcp->mc_gs = td->td_pcb->pcb_gs;
3307 mcp->mc_fs = tp->tf_fs;
3308 mcp->mc_es = tp->tf_es;
3309 mcp->mc_ds = tp->tf_ds;
3310 mcp->mc_edi = tp->tf_edi;
3311 mcp->mc_esi = tp->tf_esi;
3312 mcp->mc_ebp = tp->tf_ebp;
3313 mcp->mc_isp = tp->tf_isp;
3314 mcp->mc_eflags = tp->tf_eflags;
3315 if (flags & GET_MC_CLEAR_RET) {
3316 mcp->mc_eax = 0;
3317 mcp->mc_edx = 0;
3318 mcp->mc_eflags &= ~PSL_C;
3319 } else {
3320 mcp->mc_eax = tp->tf_eax;
3321 mcp->mc_edx = tp->tf_edx;
3322 }
3323 mcp->mc_ebx = tp->tf_ebx;
3324 mcp->mc_ecx = tp->tf_ecx;
3325 mcp->mc_eip = tp->tf_eip;
3326 mcp->mc_cs = tp->tf_cs;
3327 mcp->mc_esp = tp->tf_esp;
3328 mcp->mc_ss = tp->tf_ss;
3329 mcp->mc_len = sizeof(*mcp);
3330 get_fpcontext(td, mcp);
3331 sdp = &td->td_pcb->pcb_fsd;
3332 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3333 sdp = &td->td_pcb->pcb_gsd;
3334 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3335 mcp->mc_flags = 0;
3336 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
3337 return (0);
3338 }
3339
3340 /*
3341 * Set machine context.
3342 *
3343 * However, we don't set any but the user modifiable flags, and we won't
3344 * touch the cs selector.
3345 */
3346 int
3347 set_mcontext(struct thread *td, const mcontext_t *mcp)
3348 {
3349 struct trapframe *tp;
3350 int eflags, ret;
3351
3352 tp = td->td_frame;
3353 if (mcp->mc_len != sizeof(*mcp))
3354 return (EINVAL);
3355 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
3356 (tp->tf_eflags & ~PSL_USERCHANGE);
3357 if ((ret = set_fpcontext(td, mcp)) == 0) {
3358 tp->tf_fs = mcp->mc_fs;
3359 tp->tf_es = mcp->mc_es;
3360 tp->tf_ds = mcp->mc_ds;
3361 tp->tf_edi = mcp->mc_edi;
3362 tp->tf_esi = mcp->mc_esi;
3363 tp->tf_ebp = mcp->mc_ebp;
3364 tp->tf_ebx = mcp->mc_ebx;
3365 tp->tf_edx = mcp->mc_edx;
3366 tp->tf_ecx = mcp->mc_ecx;
3367 tp->tf_eax = mcp->mc_eax;
3368 tp->tf_eip = mcp->mc_eip;
3369 tp->tf_eflags = eflags;
3370 tp->tf_esp = mcp->mc_esp;
3371 tp->tf_ss = mcp->mc_ss;
3372 td->td_pcb->pcb_gs = mcp->mc_gs;
3373 ret = 0;
3374 }
3375 return (ret);
3376 }
3377
3378 static void
3379 get_fpcontext(struct thread *td, mcontext_t *mcp)
3380 {
3381
3382 #ifndef DEV_NPX
3383 mcp->mc_fpformat = _MC_FPFMT_NODEV;
3384 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
3385 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
3386 #else
3387 mcp->mc_ownedfp = npxgetregs(td);
3388 bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
3389 sizeof(mcp->mc_fpstate));
3390 mcp->mc_fpformat = npxformat();
3391 #endif
3392 }
3393
3394 static int
3395 set_fpcontext(struct thread *td, const mcontext_t *mcp)
3396 {
3397
3398 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
3399 return (0);
3400 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
3401 mcp->mc_fpformat != _MC_FPFMT_XMM)
3402 return (EINVAL);
3403 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
3404 /* We don't care what state is left in the FPU or PCB. */
3405 fpstate_drop(td);
3406 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
3407 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
3408 #ifdef DEV_NPX
3409 #ifdef CPU_ENABLE_SSE
3410 if (cpu_fxsr)
3411 ((union savefpu *)&mcp->mc_fpstate)->sv_xmm.sv_env.
3412 en_mxcsr &= cpu_mxcsr_mask;
3413 #endif
3414 npxsetregs(td, (union savefpu *)&mcp->mc_fpstate);
3415 #endif
3416 } else
3417 return (EINVAL);
3418 return (0);
3419 }
3420
3421 static void
3422 fpstate_drop(struct thread *td)
3423 {
3424
3425 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
3426 critical_enter();
3427 #ifdef DEV_NPX
3428 if (PCPU_GET(fpcurthread) == td)
3429 npxdrop();
3430 #endif
3431 /*
3432 * XXX force a full drop of the npx. The above only drops it if we
3433 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
3434 *
3435 * XXX I don't much like npxgetregs()'s semantics of doing a full
3436 * drop. Dropping only to the pcb matches fnsave's behaviour.
3437 * We only need to drop to !PCB_INITDONE in sendsig(). But
3438 * sendsig() is the only caller of npxgetregs()... perhaps we just
3439 * have too many layers.
3440 */
3441 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
3442 PCB_NPXUSERINITDONE);
3443 critical_exit();
3444 }
3445
3446 int
3447 fill_dbregs(struct thread *td, struct dbreg *dbregs)
3448 {
3449 struct pcb *pcb;
3450
3451 if (td == NULL) {
3452 dbregs->dr[0] = rdr0();
3453 dbregs->dr[1] = rdr1();
3454 dbregs->dr[2] = rdr2();
3455 dbregs->dr[3] = rdr3();
3456 dbregs->dr[4] = rdr4();
3457 dbregs->dr[5] = rdr5();
3458 dbregs->dr[6] = rdr6();
3459 dbregs->dr[7] = rdr7();
3460 } else {
3461 pcb = td->td_pcb;
3462 dbregs->dr[0] = pcb->pcb_dr0;
3463 dbregs->dr[1] = pcb->pcb_dr1;
3464 dbregs->dr[2] = pcb->pcb_dr2;
3465 dbregs->dr[3] = pcb->pcb_dr3;
3466 dbregs->dr[4] = 0;
3467 dbregs->dr[5] = 0;
3468 dbregs->dr[6] = pcb->pcb_dr6;
3469 dbregs->dr[7] = pcb->pcb_dr7;
3470 }
3471 return (0);
3472 }
3473
3474 int
3475 set_dbregs(struct thread *td, struct dbreg *dbregs)
3476 {
3477 struct pcb *pcb;
3478 int i;
3479
3480 if (td == NULL) {
3481 load_dr0(dbregs->dr[0]);
3482 load_dr1(dbregs->dr[1]);
3483 load_dr2(dbregs->dr[2]);
3484 load_dr3(dbregs->dr[3]);
3485 load_dr4(dbregs->dr[4]);
3486 load_dr5(dbregs->dr[5]);
3487 load_dr6(dbregs->dr[6]);
3488 load_dr7(dbregs->dr[7]);
3489 } else {
3490 /*
3491 * Don't let an illegal value for dr7 get set. Specifically,
3492 * check for undefined settings. Setting these bit patterns
3493 * result in undefined behaviour and can lead to an unexpected
3494 * TRCTRAP.
3495 */
3496 for (i = 0; i < 4; i++) {
3497 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
3498 return (EINVAL);
3499 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
3500 return (EINVAL);
3501 }
3502
3503 pcb = td->td_pcb;
3504
3505 /*
3506 * Don't let a process set a breakpoint that is not within the
3507 * process's address space. If a process could do this, it
3508 * could halt the system by setting a breakpoint in the kernel
3509 * (if ddb was enabled). Thus, we need to check to make sure
3510 * that no breakpoints are being enabled for addresses outside
3511 * process's address space.
3512 *
3513 * XXX - what about when the watched area of the user's
3514 * address space is written into from within the kernel
3515 * ... wouldn't that still cause a breakpoint to be generated
3516 * from within kernel mode?
3517 */
3518
3519 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
3520 /* dr0 is enabled */
3521 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
3522 return (EINVAL);
3523 }
3524
3525 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
3526 /* dr1 is enabled */
3527 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
3528 return (EINVAL);
3529 }
3530
3531 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
3532 /* dr2 is enabled */
3533 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
3534 return (EINVAL);
3535 }
3536
3537 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
3538 /* dr3 is enabled */
3539 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
3540 return (EINVAL);
3541 }
3542
3543 pcb->pcb_dr0 = dbregs->dr[0];
3544 pcb->pcb_dr1 = dbregs->dr[1];
3545 pcb->pcb_dr2 = dbregs->dr[2];
3546 pcb->pcb_dr3 = dbregs->dr[3];
3547 pcb->pcb_dr6 = dbregs->dr[6];
3548 pcb->pcb_dr7 = dbregs->dr[7];
3549
3550 pcb->pcb_flags |= PCB_DBREGS;
3551 }
3552
3553 return (0);
3554 }
3555
3556 /*
3557 * Return > 0 if a hardware breakpoint has been hit, and the
3558 * breakpoint was in user space. Return 0, otherwise.
3559 */
3560 int
3561 user_dbreg_trap(void)
3562 {
3563 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
3564 u_int32_t bp; /* breakpoint bits extracted from dr6 */
3565 int nbp; /* number of breakpoints that triggered */
3566 caddr_t addr[4]; /* breakpoint addresses */
3567 int i;
3568
3569 dr7 = rdr7();
3570 if ((dr7 & 0x000000ff) == 0) {
3571 /*
3572 * all GE and LE bits in the dr7 register are zero,
3573 * thus the trap couldn't have been caused by the
3574 * hardware debug registers
3575 */
3576 return 0;
3577 }
3578
3579 nbp = 0;
3580 dr6 = rdr6();
3581 bp = dr6 & 0x0000000f;
3582
3583 if (!bp) {
3584 /*
3585 * None of the breakpoint bits are set meaning this
3586 * trap was not caused by any of the debug registers
3587 */
3588 return 0;
3589 }
3590
3591 /*
3592 * at least one of the breakpoints were hit, check to see
3593 * which ones and if any of them are user space addresses
3594 */
3595
3596 if (bp & 0x01) {
3597 addr[nbp++] = (caddr_t)rdr0();
3598 }
3599 if (bp & 0x02) {
3600 addr[nbp++] = (caddr_t)rdr1();
3601 }
3602 if (bp & 0x04) {
3603 addr[nbp++] = (caddr_t)rdr2();
3604 }
3605 if (bp & 0x08) {
3606 addr[nbp++] = (caddr_t)rdr3();
3607 }
3608
3609 for (i = 0; i < nbp; i++) {
3610 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
3611 /*
3612 * addr[i] is in user space
3613 */
3614 return nbp;
3615 }
3616 }
3617
3618 /*
3619 * None of the breakpoints are in user space.
3620 */
3621 return 0;
3622 }
3623
3624 #ifndef DEV_APIC
3625 #include <machine/apicvar.h>
3626
3627 /*
3628 * Provide stub functions so that the MADT APIC enumerator in the acpi
3629 * kernel module will link against a kernel without 'device apic'.
3630 *
3631 * XXX - This is a gross hack.
3632 */
3633 void
3634 apic_register_enumerator(struct apic_enumerator *enumerator)
3635 {
3636 }
3637
3638 void *
3639 ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase)
3640 {
3641 return (NULL);
3642 }
3643
3644 int
3645 ioapic_disable_pin(void *cookie, u_int pin)
3646 {
3647 return (ENXIO);
3648 }
3649
3650 int
3651 ioapic_get_vector(void *cookie, u_int pin)
3652 {
3653 return (-1);
3654 }
3655
3656 void
3657 ioapic_register(void *cookie)
3658 {
3659 }
3660
3661 int
3662 ioapic_remap_vector(void *cookie, u_int pin, int vector)
3663 {
3664 return (ENXIO);
3665 }
3666
3667 int
3668 ioapic_set_extint(void *cookie, u_int pin)
3669 {
3670 return (ENXIO);
3671 }
3672
3673 int
3674 ioapic_set_nmi(void *cookie, u_int pin)
3675 {
3676 return (ENXIO);
3677 }
3678
3679 int
3680 ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol)
3681 {
3682 return (ENXIO);
3683 }
3684
3685 int
3686 ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger)
3687 {
3688 return (ENXIO);
3689 }
3690
3691 void
3692 lapic_create(u_int apic_id, int boot_cpu)
3693 {
3694 }
3695
3696 void
3697 lapic_init(vm_paddr_t addr)
3698 {
3699 }
3700
3701 int
3702 lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode)
3703 {
3704 return (ENXIO);
3705 }
3706
3707 int
3708 lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol)
3709 {
3710 return (ENXIO);
3711 }
3712
3713 int
3714 lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger)
3715 {
3716 return (ENXIO);
3717 }
3718 #endif
3719
3720 #ifdef KDB
3721
3722 /*
3723 * Provide inb() and outb() as functions. They are normally only available as
3724 * inline functions, thus cannot be called from the debugger.
3725 */
3726
3727 /* silence compiler warnings */
3728 u_char inb_(u_short);
3729 void outb_(u_short, u_char);
3730
3731 u_char
3732 inb_(u_short port)
3733 {
3734 return inb(port);
3735 }
3736
3737 void
3738 outb_(u_short port, u_char data)
3739 {
3740 outb(port, data);
3741 }
3742
3743 #endif /* KDB */
Cache object: 729b5ded5dba9f7f7ce2da185cc2b30f
|