1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/6.3/sys/i386/i386/machdep.c 174792 2007-12-19 21:05:43Z rpaulo $");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_compat.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_ipx.h"
50 #include "opt_isa.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_msgbuf.h"
54 #include "opt_npx.h"
55 #include "opt_perfmon.h"
56 #include "opt_xbox.h"
57
58 #include <sys/param.h>
59 #include <sys/proc.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/buf.h>
63 #include <sys/bus.h>
64 #include <sys/callout.h>
65 #include <sys/cons.h>
66 #include <sys/cpu.h>
67 #include <sys/eventhandler.h>
68 #include <sys/exec.h>
69 #include <sys/imgact.h>
70 #include <sys/kdb.h>
71 #include <sys/kernel.h>
72 #include <sys/ktr.h>
73 #include <sys/linker.h>
74 #include <sys/lock.h>
75 #include <sys/malloc.h>
76 #include <sys/memrange.h>
77 #include <sys/msgbuf.h>
78 #include <sys/mutex.h>
79 #include <sys/pcpu.h>
80 #include <sys/ptrace.h>
81 #include <sys/reboot.h>
82 #include <sys/sched.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
85 #include <sys/sysent.h>
86 #include <sys/sysproto.h>
87 #include <sys/ucontext.h>
88 #include <sys/vmmeter.h>
89
90 #include <vm/vm.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_param.h>
98
99 #ifdef DDB
100 #ifndef KDB
101 #error KDB must be enabled in order for DDB to work!
102 #endif
103 #include <ddb/ddb.h>
104 #include <ddb/db_sym.h>
105 #endif
106
107 #include <isa/rtc.h>
108
109 #include <net/netisr.h>
110
111 #include <machine/bootinfo.h>
112 #include <machine/clock.h>
113 #include <machine/cpu.h>
114 #include <machine/cputypes.h>
115 #include <machine/intr_machdep.h>
116 #include <machine/md_var.h>
117 #include <machine/pc/bios.h>
118 #include <machine/pcb.h>
119 #include <machine/pcb_ext.h>
120 #include <machine/proc.h>
121 #include <machine/reg.h>
122 #include <machine/sigframe.h>
123 #include <machine/specialreg.h>
124 #include <machine/vm86.h>
125 #ifdef PERFMON
126 #include <machine/perfmon.h>
127 #endif
128 #ifdef SMP
129 #include <machine/privatespace.h>
130 #include <machine/smp.h>
131 #endif
132
133 #ifdef DEV_ISA
134 #include <i386/isa/icu.h>
135 #endif
136
137 #ifdef XBOX
138 #include <machine/xbox.h>
139
140 int arch_i386_is_xbox = 0;
141 uint32_t arch_i386_xbox_memsize = 0;
142 #endif
143
144 /* Sanity check for __curthread() */
145 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
146
147 extern void init386(int first);
148 extern void dblfault_handler(void);
149
150 extern void printcpuinfo(void); /* XXX header file */
151 extern void finishidentcpu(void);
152 extern void panicifcpuunsupported(void);
153 extern void initializecpu(void);
154
155 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
156 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
157
158 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
159 #define CPU_ENABLE_SSE
160 #endif
161
162 static void cpu_startup(void *);
163 static void fpstate_drop(struct thread *td);
164 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
165 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
166 #ifdef CPU_ENABLE_SSE
167 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
168 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
169 #endif /* CPU_ENABLE_SSE */
170 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
171
172 #ifdef DDB
173 extern vm_offset_t ksym_start, ksym_end;
174 #endif
175
176 /* Intel ICH registers */
177 #define ICH_PMBASE 0x400
178 #define ICH_SMI_EN ICH_PMBASE + 0x30
179
180 int _udatasel, _ucodesel;
181 u_int basemem;
182
183 int cold = 1;
184
185 #ifdef COMPAT_43
186 static void osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code);
187 #endif
188 #ifdef COMPAT_FREEBSD4
189 static void freebsd4_sendsig(sig_t catcher, int sig, sigset_t *mask,
190 u_long code);
191 #endif
192
193 long Maxmem = 0;
194 long realmem = 0;
195
196 #define PHYSMAP_SIZE (2 * 16)
197
198 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
199 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
200
201 /* must be 2 less so 0 0 can signal end of chunks */
202 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
203 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
204
205 struct kva_md_info kmi;
206
207 static struct trapframe proc0_tf;
208 #ifndef SMP
209 static struct pcpu __pcpu;
210 #endif
211
212 struct mtx icu_lock;
213
214 struct mem_range_softc mem_range_softc;
215
216 static void
217 cpu_startup(dummy)
218 void *dummy;
219 {
220 char *sysenv;
221
222 /*
223 * On MacBooks, we need to disallow the legacy USB circuit to
224 * generate an SMI# because this can cause several problems,
225 * namely: incorrect CPU frequency detection and failure to
226 * start the APs.
227 * We do this by disabling a bit in the SMI_EN (SMI Control and
228 * Enable register) of the Intel ICH LPC Interface Bridge.
229 */
230 sysenv = getenv("smbios.system.product");
231 if (sysenv != NULL) {
232 if (strncmp(sysenv, "MacBook", 7) == 0) {
233 if (bootverbose)
234 printf("Disabling LEGACY_USB_EN bit on "
235 "Intel ICH.\n");
236 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
237 }
238 freeenv(sysenv);
239 }
240
241 /*
242 * Good {morning,afternoon,evening,night}.
243 */
244 startrtclock();
245 printcpuinfo();
246 panicifcpuunsupported();
247 #ifdef PERFMON
248 perfmon_init();
249 #endif
250 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
251 ptoa((uintmax_t)Maxmem) / 1048576);
252 realmem = Maxmem;
253 /*
254 * Display any holes after the first chunk of extended memory.
255 */
256 if (bootverbose) {
257 int indx;
258
259 printf("Physical memory chunk(s):\n");
260 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
261 vm_paddr_t size;
262
263 size = phys_avail[indx + 1] - phys_avail[indx];
264 printf(
265 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
266 (uintmax_t)phys_avail[indx],
267 (uintmax_t)phys_avail[indx + 1] - 1,
268 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
269 }
270 }
271
272 vm_ksubmap_init(&kmi);
273
274 printf("avail memory = %ju (%ju MB)\n",
275 ptoa((uintmax_t)cnt.v_free_count),
276 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
277
278 /*
279 * Set up buffers, so they can be used to read disk labels.
280 */
281 bufinit();
282 vm_pager_bufferinit();
283
284 cpu_setregs();
285 }
286
287 /*
288 * Send an interrupt to process.
289 *
290 * Stack is set up to allow sigcode stored
291 * at top to call routine, followed by kcall
292 * to sigreturn routine below. After sigreturn
293 * resets the signal mask, the stack, and the
294 * frame pointer, it returns to the user
295 * specified pc, psl.
296 */
297 #ifdef COMPAT_43
298 static void
299 osendsig(catcher, sig, mask, code)
300 sig_t catcher;
301 int sig;
302 sigset_t *mask;
303 u_long code;
304 {
305 struct osigframe sf, *fp;
306 struct proc *p;
307 struct thread *td;
308 struct sigacts *psp;
309 struct trapframe *regs;
310 int oonstack;
311
312 td = curthread;
313 p = td->td_proc;
314 PROC_LOCK_ASSERT(p, MA_OWNED);
315 psp = p->p_sigacts;
316 mtx_assert(&psp->ps_mtx, MA_OWNED);
317 regs = td->td_frame;
318 oonstack = sigonstack(regs->tf_esp);
319
320 /* Allocate space for the signal handler context. */
321 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
322 SIGISMEMBER(psp->ps_sigonstack, sig)) {
323 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
324 td->td_sigstk.ss_size - sizeof(struct osigframe));
325 #if defined(COMPAT_43)
326 td->td_sigstk.ss_flags |= SS_ONSTACK;
327 #endif
328 } else
329 fp = (struct osigframe *)regs->tf_esp - 1;
330
331 /* Translate the signal if appropriate. */
332 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
333 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
334
335 /* Build the argument list for the signal handler. */
336 sf.sf_signum = sig;
337 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
338 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
339 /* Signal handler installed with SA_SIGINFO. */
340 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
341 sf.sf_siginfo.si_signo = sig;
342 sf.sf_siginfo.si_code = code;
343 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
344 } else {
345 /* Old FreeBSD-style arguments. */
346 sf.sf_arg2 = code;
347 sf.sf_addr = td->td_md.md_fault_addr;
348 sf.sf_ahu.sf_handler = catcher;
349 }
350 mtx_unlock(&psp->ps_mtx);
351 PROC_UNLOCK(p);
352
353 /* Save most if not all of trap frame. */
354 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
355 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
356 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
357 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
358 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
359 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
360 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
361 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
362 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
363 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
364 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
365 sf.sf_siginfo.si_sc.sc_gs = rgs();
366 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
367
368 /* Build the signal context to be used by osigreturn(). */
369 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
370 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
371 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
372 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
373 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
374 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
375 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
376 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
377
378 /*
379 * If we're a vm86 process, we want to save the segment registers.
380 * We also change eflags to be our emulated eflags, not the actual
381 * eflags.
382 */
383 if (regs->tf_eflags & PSL_VM) {
384 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
385 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
386 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
387
388 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
389 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
390 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
391 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
392
393 if (vm86->vm86_has_vme == 0)
394 sf.sf_siginfo.si_sc.sc_ps =
395 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
396 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
397
398 /* See sendsig() for comments. */
399 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
400 }
401
402 /*
403 * Copy the sigframe out to the user's stack.
404 */
405 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
406 #ifdef DEBUG
407 printf("process %ld has trashed its stack\n", (long)p->p_pid);
408 #endif
409 PROC_LOCK(p);
410 sigexit(td, SIGILL);
411 }
412
413 regs->tf_esp = (int)fp;
414 regs->tf_eip = PS_STRINGS - szosigcode;
415 regs->tf_eflags &= ~PSL_T;
416 regs->tf_cs = _ucodesel;
417 regs->tf_ds = _udatasel;
418 regs->tf_es = _udatasel;
419 regs->tf_fs = _udatasel;
420 load_gs(_udatasel);
421 regs->tf_ss = _udatasel;
422 PROC_LOCK(p);
423 mtx_lock(&psp->ps_mtx);
424 }
425 #endif /* COMPAT_43 */
426
427 #ifdef COMPAT_FREEBSD4
428 static void
429 freebsd4_sendsig(catcher, sig, mask, code)
430 sig_t catcher;
431 int sig;
432 sigset_t *mask;
433 u_long code;
434 {
435 struct sigframe4 sf, *sfp;
436 struct proc *p;
437 struct thread *td;
438 struct sigacts *psp;
439 struct trapframe *regs;
440 int oonstack;
441
442 td = curthread;
443 p = td->td_proc;
444 PROC_LOCK_ASSERT(p, MA_OWNED);
445 psp = p->p_sigacts;
446 mtx_assert(&psp->ps_mtx, MA_OWNED);
447 regs = td->td_frame;
448 oonstack = sigonstack(regs->tf_esp);
449
450 /* Save user context. */
451 bzero(&sf, sizeof(sf));
452 sf.sf_uc.uc_sigmask = *mask;
453 sf.sf_uc.uc_stack = td->td_sigstk;
454 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
455 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
456 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
457 sf.sf_uc.uc_mcontext.mc_gs = rgs();
458 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
459
460 /* Allocate space for the signal handler context. */
461 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
462 SIGISMEMBER(psp->ps_sigonstack, sig)) {
463 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
464 td->td_sigstk.ss_size - sizeof(struct sigframe4));
465 #if defined(COMPAT_43)
466 td->td_sigstk.ss_flags |= SS_ONSTACK;
467 #endif
468 } else
469 sfp = (struct sigframe4 *)regs->tf_esp - 1;
470
471 /* Translate the signal if appropriate. */
472 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
473 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
474
475 /* Build the argument list for the signal handler. */
476 sf.sf_signum = sig;
477 sf.sf_ucontext = (register_t)&sfp->sf_uc;
478 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
479 /* Signal handler installed with SA_SIGINFO. */
480 sf.sf_siginfo = (register_t)&sfp->sf_si;
481 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
482
483 /* Fill in POSIX parts */
484 sf.sf_si.si_signo = sig;
485 sf.sf_si.si_code = code;
486 sf.sf_si.si_addr = (void *)td->td_md.md_fault_addr;
487 } else {
488 /* Old FreeBSD-style arguments. */
489 sf.sf_siginfo = code;
490 sf.sf_addr = td->td_md.md_fault_addr;
491 sf.sf_ahu.sf_handler = catcher;
492 }
493 mtx_unlock(&psp->ps_mtx);
494 PROC_UNLOCK(p);
495
496 /*
497 * If we're a vm86 process, we want to save the segment registers.
498 * We also change eflags to be our emulated eflags, not the actual
499 * eflags.
500 */
501 if (regs->tf_eflags & PSL_VM) {
502 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
503 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
504
505 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
506 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
507 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
508 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
509
510 if (vm86->vm86_has_vme == 0)
511 sf.sf_uc.uc_mcontext.mc_eflags =
512 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
513 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
514
515 /*
516 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
517 * syscalls made by the signal handler. This just avoids
518 * wasting time for our lazy fixup of such faults. PSL_NT
519 * does nothing in vm86 mode, but vm86 programs can set it
520 * almost legitimately in probes for old cpu types.
521 */
522 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
523 }
524
525 /*
526 * Copy the sigframe out to the user's stack.
527 */
528 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
529 #ifdef DEBUG
530 printf("process %ld has trashed its stack\n", (long)p->p_pid);
531 #endif
532 PROC_LOCK(p);
533 sigexit(td, SIGILL);
534 }
535
536 regs->tf_esp = (int)sfp;
537 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
538 regs->tf_eflags &= ~PSL_T;
539 regs->tf_cs = _ucodesel;
540 regs->tf_ds = _udatasel;
541 regs->tf_es = _udatasel;
542 regs->tf_fs = _udatasel;
543 regs->tf_ss = _udatasel;
544 PROC_LOCK(p);
545 mtx_lock(&psp->ps_mtx);
546 }
547 #endif /* COMPAT_FREEBSD4 */
548
549 void
550 sendsig(catcher, sig, mask, code)
551 sig_t catcher;
552 int sig;
553 sigset_t *mask;
554 u_long code;
555 {
556 struct sigframe sf, *sfp;
557 struct proc *p;
558 struct thread *td;
559 struct sigacts *psp;
560 char *sp;
561 struct trapframe *regs;
562 int oonstack;
563
564 td = curthread;
565 p = td->td_proc;
566 PROC_LOCK_ASSERT(p, MA_OWNED);
567 psp = p->p_sigacts;
568 mtx_assert(&psp->ps_mtx, MA_OWNED);
569 #ifdef COMPAT_FREEBSD4
570 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
571 freebsd4_sendsig(catcher, sig, mask, code);
572 return;
573 }
574 #endif
575 #ifdef COMPAT_43
576 if (SIGISMEMBER(psp->ps_osigset, sig)) {
577 osendsig(catcher, sig, mask, code);
578 return;
579 }
580 #endif
581 regs = td->td_frame;
582 oonstack = sigonstack(regs->tf_esp);
583
584 /* Save user context. */
585 bzero(&sf, sizeof(sf));
586 sf.sf_uc.uc_sigmask = *mask;
587 sf.sf_uc.uc_stack = td->td_sigstk;
588 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
589 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
590 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
591 sf.sf_uc.uc_mcontext.mc_gs = rgs();
592 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
593 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
594 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
595 fpstate_drop(td);
596
597 /* Allocate space for the signal handler context. */
598 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
599 SIGISMEMBER(psp->ps_sigonstack, sig)) {
600 sp = td->td_sigstk.ss_sp +
601 td->td_sigstk.ss_size - sizeof(struct sigframe);
602 #if defined(COMPAT_43)
603 td->td_sigstk.ss_flags |= SS_ONSTACK;
604 #endif
605 } else
606 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
607 /* Align to 16 bytes. */
608 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
609
610 /* Translate the signal if appropriate. */
611 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
612 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
613
614 /* Build the argument list for the signal handler. */
615 sf.sf_signum = sig;
616 sf.sf_ucontext = (register_t)&sfp->sf_uc;
617 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
618 /* Signal handler installed with SA_SIGINFO. */
619 sf.sf_siginfo = (register_t)&sfp->sf_si;
620 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
621
622 /* Fill in POSIX parts */
623 sf.sf_si.si_signo = sig;
624 sf.sf_si.si_code = code;
625 sf.sf_si.si_addr = (void *)td->td_md.md_fault_addr;
626 } else {
627 /* Old FreeBSD-style arguments. */
628 sf.sf_siginfo = code;
629 sf.sf_addr = td->td_md.md_fault_addr;
630 sf.sf_ahu.sf_handler = catcher;
631 }
632 mtx_unlock(&psp->ps_mtx);
633 PROC_UNLOCK(p);
634
635 /*
636 * If we're a vm86 process, we want to save the segment registers.
637 * We also change eflags to be our emulated eflags, not the actual
638 * eflags.
639 */
640 if (regs->tf_eflags & PSL_VM) {
641 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
642 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
643
644 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
645 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
646 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
647 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
648
649 if (vm86->vm86_has_vme == 0)
650 sf.sf_uc.uc_mcontext.mc_eflags =
651 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
652 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
653
654 /*
655 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
656 * syscalls made by the signal handler. This just avoids
657 * wasting time for our lazy fixup of such faults. PSL_NT
658 * does nothing in vm86 mode, but vm86 programs can set it
659 * almost legitimately in probes for old cpu types.
660 */
661 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
662 }
663
664 /*
665 * Copy the sigframe out to the user's stack.
666 */
667 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
668 #ifdef DEBUG
669 printf("process %ld has trashed its stack\n", (long)p->p_pid);
670 #endif
671 PROC_LOCK(p);
672 sigexit(td, SIGILL);
673 }
674
675 regs->tf_esp = (int)sfp;
676 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
677 regs->tf_eflags &= ~PSL_T;
678 regs->tf_cs = _ucodesel;
679 regs->tf_ds = _udatasel;
680 regs->tf_es = _udatasel;
681 regs->tf_fs = _udatasel;
682 regs->tf_ss = _udatasel;
683 PROC_LOCK(p);
684 mtx_lock(&psp->ps_mtx);
685 }
686
687 /*
688 * Build siginfo_t for SA thread
689 */
690 void
691 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
692 {
693 struct proc *p;
694 struct thread *td;
695
696 td = curthread;
697 p = td->td_proc;
698 PROC_LOCK_ASSERT(p, MA_OWNED);
699
700 bzero(si, sizeof(*si));
701 si->si_signo = sig;
702 si->si_code = code;
703 si->si_addr = (void *)td->td_md.md_fault_addr;
704 /* XXXKSE fill other fields */
705 }
706
707 /*
708 * System call to cleanup state after a signal
709 * has been taken. Reset signal mask and
710 * stack state from context left by sendsig (above).
711 * Return to previous pc and psl as specified by
712 * context left by sendsig. Check carefully to
713 * make sure that the user has not modified the
714 * state to gain improper privileges.
715 *
716 * MPSAFE
717 */
718 #ifdef COMPAT_43
719 int
720 osigreturn(td, uap)
721 struct thread *td;
722 struct osigreturn_args /* {
723 struct osigcontext *sigcntxp;
724 } */ *uap;
725 {
726 struct osigcontext sc;
727 struct trapframe *regs;
728 struct osigcontext *scp;
729 struct proc *p = td->td_proc;
730 int eflags, error;
731
732 regs = td->td_frame;
733 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
734 if (error != 0)
735 return (error);
736 scp = ≻
737 eflags = scp->sc_ps;
738 if (eflags & PSL_VM) {
739 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
740 struct vm86_kernel *vm86;
741
742 /*
743 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
744 * set up the vm86 area, and we can't enter vm86 mode.
745 */
746 if (td->td_pcb->pcb_ext == 0)
747 return (EINVAL);
748 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
749 if (vm86->vm86_inited == 0)
750 return (EINVAL);
751
752 /* Go back to user mode if both flags are set. */
753 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
754 trapsignal(td, SIGBUS, 0);
755
756 if (vm86->vm86_has_vme) {
757 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
758 (eflags & VME_USERCHANGE) | PSL_VM;
759 } else {
760 vm86->vm86_eflags = eflags; /* save VIF, VIP */
761 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
762 (eflags & VM_USERCHANGE) | PSL_VM;
763 }
764 tf->tf_vm86_ds = scp->sc_ds;
765 tf->tf_vm86_es = scp->sc_es;
766 tf->tf_vm86_fs = scp->sc_fs;
767 tf->tf_vm86_gs = scp->sc_gs;
768 tf->tf_ds = _udatasel;
769 tf->tf_es = _udatasel;
770 tf->tf_fs = _udatasel;
771 } else {
772 /*
773 * Don't allow users to change privileged or reserved flags.
774 */
775 /*
776 * XXX do allow users to change the privileged flag PSL_RF.
777 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
778 * should sometimes set it there too. tf_eflags is kept in
779 * the signal context during signal handling and there is no
780 * other place to remember it, so the PSL_RF bit may be
781 * corrupted by the signal handler without us knowing.
782 * Corruption of the PSL_RF bit at worst causes one more or
783 * one less debugger trap, so allowing it is fairly harmless.
784 */
785 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
786 return (EINVAL);
787 }
788
789 /*
790 * Don't allow users to load a valid privileged %cs. Let the
791 * hardware check for invalid selectors, excess privilege in
792 * other selectors, invalid %eip's and invalid %esp's.
793 */
794 if (!CS_SECURE(scp->sc_cs)) {
795 trapsignal(td, SIGBUS, T_PROTFLT);
796 return (EINVAL);
797 }
798 regs->tf_ds = scp->sc_ds;
799 regs->tf_es = scp->sc_es;
800 regs->tf_fs = scp->sc_fs;
801 }
802
803 /* Restore remaining registers. */
804 regs->tf_eax = scp->sc_eax;
805 regs->tf_ebx = scp->sc_ebx;
806 regs->tf_ecx = scp->sc_ecx;
807 regs->tf_edx = scp->sc_edx;
808 regs->tf_esi = scp->sc_esi;
809 regs->tf_edi = scp->sc_edi;
810 regs->tf_cs = scp->sc_cs;
811 regs->tf_ss = scp->sc_ss;
812 regs->tf_isp = scp->sc_isp;
813 regs->tf_ebp = scp->sc_fp;
814 regs->tf_esp = scp->sc_sp;
815 regs->tf_eip = scp->sc_pc;
816 regs->tf_eflags = eflags;
817
818 PROC_LOCK(p);
819 #if defined(COMPAT_43)
820 if (scp->sc_onstack & 1)
821 td->td_sigstk.ss_flags |= SS_ONSTACK;
822 else
823 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
824 #endif
825 SIGSETOLD(td->td_sigmask, scp->sc_mask);
826 SIG_CANTMASK(td->td_sigmask);
827 signotify(td);
828 PROC_UNLOCK(p);
829 return (EJUSTRETURN);
830 }
831 #endif /* COMPAT_43 */
832
833 #ifdef COMPAT_FREEBSD4
834 /*
835 * MPSAFE
836 */
837 int
838 freebsd4_sigreturn(td, uap)
839 struct thread *td;
840 struct freebsd4_sigreturn_args /* {
841 const ucontext4 *sigcntxp;
842 } */ *uap;
843 {
844 struct ucontext4 uc;
845 struct proc *p = td->td_proc;
846 struct trapframe *regs;
847 const struct ucontext4 *ucp;
848 int cs, eflags, error;
849
850 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
851 if (error != 0)
852 return (error);
853 ucp = &uc;
854 regs = td->td_frame;
855 eflags = ucp->uc_mcontext.mc_eflags;
856 if (eflags & PSL_VM) {
857 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
858 struct vm86_kernel *vm86;
859
860 /*
861 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
862 * set up the vm86 area, and we can't enter vm86 mode.
863 */
864 if (td->td_pcb->pcb_ext == 0)
865 return (EINVAL);
866 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
867 if (vm86->vm86_inited == 0)
868 return (EINVAL);
869
870 /* Go back to user mode if both flags are set. */
871 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
872 trapsignal(td, SIGBUS, 0);
873
874 if (vm86->vm86_has_vme) {
875 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
876 (eflags & VME_USERCHANGE) | PSL_VM;
877 } else {
878 vm86->vm86_eflags = eflags; /* save VIF, VIP */
879 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
880 (eflags & VM_USERCHANGE) | PSL_VM;
881 }
882 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
883 tf->tf_eflags = eflags;
884 tf->tf_vm86_ds = tf->tf_ds;
885 tf->tf_vm86_es = tf->tf_es;
886 tf->tf_vm86_fs = tf->tf_fs;
887 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
888 tf->tf_ds = _udatasel;
889 tf->tf_es = _udatasel;
890 tf->tf_fs = _udatasel;
891 } else {
892 /*
893 * Don't allow users to change privileged or reserved flags.
894 */
895 /*
896 * XXX do allow users to change the privileged flag PSL_RF.
897 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
898 * should sometimes set it there too. tf_eflags is kept in
899 * the signal context during signal handling and there is no
900 * other place to remember it, so the PSL_RF bit may be
901 * corrupted by the signal handler without us knowing.
902 * Corruption of the PSL_RF bit at worst causes one more or
903 * one less debugger trap, so allowing it is fairly harmless.
904 */
905 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
906 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
907 return (EINVAL);
908 }
909
910 /*
911 * Don't allow users to load a valid privileged %cs. Let the
912 * hardware check for invalid selectors, excess privilege in
913 * other selectors, invalid %eip's and invalid %esp's.
914 */
915 cs = ucp->uc_mcontext.mc_cs;
916 if (!CS_SECURE(cs)) {
917 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
918 trapsignal(td, SIGBUS, T_PROTFLT);
919 return (EINVAL);
920 }
921
922 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
923 }
924
925 PROC_LOCK(p);
926 #if defined(COMPAT_43)
927 if (ucp->uc_mcontext.mc_onstack & 1)
928 td->td_sigstk.ss_flags |= SS_ONSTACK;
929 else
930 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
931 #endif
932
933 td->td_sigmask = ucp->uc_sigmask;
934 SIG_CANTMASK(td->td_sigmask);
935 signotify(td);
936 PROC_UNLOCK(p);
937 return (EJUSTRETURN);
938 }
939 #endif /* COMPAT_FREEBSD4 */
940
941 /*
942 * MPSAFE
943 */
944 int
945 sigreturn(td, uap)
946 struct thread *td;
947 struct sigreturn_args /* {
948 const __ucontext *sigcntxp;
949 } */ *uap;
950 {
951 ucontext_t uc;
952 struct proc *p = td->td_proc;
953 struct trapframe *regs;
954 const ucontext_t *ucp;
955 int cs, eflags, error, ret;
956
957 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
958 if (error != 0)
959 return (error);
960 ucp = &uc;
961 regs = td->td_frame;
962 eflags = ucp->uc_mcontext.mc_eflags;
963 if (eflags & PSL_VM) {
964 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
965 struct vm86_kernel *vm86;
966
967 /*
968 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
969 * set up the vm86 area, and we can't enter vm86 mode.
970 */
971 if (td->td_pcb->pcb_ext == 0)
972 return (EINVAL);
973 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
974 if (vm86->vm86_inited == 0)
975 return (EINVAL);
976
977 /* Go back to user mode if both flags are set. */
978 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
979 trapsignal(td, SIGBUS, 0);
980
981 if (vm86->vm86_has_vme) {
982 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
983 (eflags & VME_USERCHANGE) | PSL_VM;
984 } else {
985 vm86->vm86_eflags = eflags; /* save VIF, VIP */
986 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
987 (eflags & VM_USERCHANGE) | PSL_VM;
988 }
989 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
990 tf->tf_eflags = eflags;
991 tf->tf_vm86_ds = tf->tf_ds;
992 tf->tf_vm86_es = tf->tf_es;
993 tf->tf_vm86_fs = tf->tf_fs;
994 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
995 tf->tf_ds = _udatasel;
996 tf->tf_es = _udatasel;
997 tf->tf_fs = _udatasel;
998 } else {
999 /*
1000 * Don't allow users to change privileged or reserved flags.
1001 */
1002 /*
1003 * XXX do allow users to change the privileged flag PSL_RF.
1004 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1005 * should sometimes set it there too. tf_eflags is kept in
1006 * the signal context during signal handling and there is no
1007 * other place to remember it, so the PSL_RF bit may be
1008 * corrupted by the signal handler without us knowing.
1009 * Corruption of the PSL_RF bit at worst causes one more or
1010 * one less debugger trap, so allowing it is fairly harmless.
1011 */
1012 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1013 printf("sigreturn: eflags = 0x%x\n", eflags);
1014 return (EINVAL);
1015 }
1016
1017 /*
1018 * Don't allow users to load a valid privileged %cs. Let the
1019 * hardware check for invalid selectors, excess privilege in
1020 * other selectors, invalid %eip's and invalid %esp's.
1021 */
1022 cs = ucp->uc_mcontext.mc_cs;
1023 if (!CS_SECURE(cs)) {
1024 printf("sigreturn: cs = 0x%x\n", cs);
1025 trapsignal(td, SIGBUS, T_PROTFLT);
1026 return (EINVAL);
1027 }
1028
1029 ret = set_fpcontext(td, &ucp->uc_mcontext);
1030 if (ret != 0)
1031 return (ret);
1032 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1033 }
1034
1035 PROC_LOCK(p);
1036 #if defined(COMPAT_43)
1037 if (ucp->uc_mcontext.mc_onstack & 1)
1038 td->td_sigstk.ss_flags |= SS_ONSTACK;
1039 else
1040 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1041 #endif
1042
1043 td->td_sigmask = ucp->uc_sigmask;
1044 SIG_CANTMASK(td->td_sigmask);
1045 signotify(td);
1046 PROC_UNLOCK(p);
1047 return (EJUSTRETURN);
1048 }
1049
1050 /*
1051 * Machine dependent boot() routine
1052 *
1053 * I haven't seen anything to put here yet
1054 * Possibly some stuff might be grafted back here from boot()
1055 */
1056 void
1057 cpu_boot(int howto)
1058 {
1059 }
1060
1061 /* Get current clock frequency for the given cpu id. */
1062 int
1063 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1064 {
1065 register_t reg;
1066 uint64_t tsc1, tsc2;
1067
1068 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1069 return (EINVAL);
1070 if (!tsc_present)
1071 return (EOPNOTSUPP);
1072
1073 /* If we're booting, trust the rate calibrated moments ago. */
1074 if (cold) {
1075 *rate = tsc_freq;
1076 return (0);
1077 }
1078
1079 #ifdef SMP
1080 /* Schedule ourselves on the indicated cpu. */
1081 mtx_lock_spin(&sched_lock);
1082 sched_bind(curthread, cpu_id);
1083 mtx_unlock_spin(&sched_lock);
1084 #endif
1085
1086 /* Calibrate by measuring a short delay. */
1087 reg = intr_disable();
1088 tsc1 = rdtsc();
1089 DELAY(1000);
1090 tsc2 = rdtsc();
1091 intr_restore(reg);
1092
1093 #ifdef SMP
1094 mtx_lock_spin(&sched_lock);
1095 sched_unbind(curthread);
1096 mtx_unlock_spin(&sched_lock);
1097 #endif
1098
1099 /*
1100 * Calculate the difference in readings, convert to Mhz, and
1101 * subtract 0.5% of the total. Empirical testing has shown that
1102 * overhead in DELAY() works out to approximately this value.
1103 */
1104 tsc2 -= tsc1;
1105 *rate = tsc2 * 1000 - tsc2 * 5;
1106 return (0);
1107 }
1108
1109 /*
1110 * Shutdown the CPU as much as possible
1111 */
1112 void
1113 cpu_halt(void)
1114 {
1115 for (;;)
1116 __asm__ ("hlt");
1117 }
1118
1119 /*
1120 * Hook to idle the CPU when possible. In the SMP case we default to
1121 * off because a halted cpu will not currently pick up a new thread in the
1122 * run queue until the next timer tick. If turned on this will result in
1123 * approximately a 4.2% loss in real time performance in buildworld tests
1124 * (but improves user and sys times oddly enough), and saves approximately
1125 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1126 *
1127 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1128 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1129 * Then we can have our cake and eat it too.
1130 *
1131 * XXX I'm turning it on for SMP as well by default for now. It seems to
1132 * help lock contention somewhat, and this is critical for HTT. -Peter
1133 */
1134 static int cpu_idle_hlt = 1;
1135 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1136 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1137
1138 static void
1139 cpu_idle_default(void)
1140 {
1141 /*
1142 * we must absolutely guarentee that hlt is the
1143 * absolute next instruction after sti or we
1144 * introduce a timing window.
1145 */
1146 __asm __volatile("sti; hlt");
1147 }
1148
1149 /*
1150 * Note that we have to be careful here to avoid a race between checking
1151 * sched_runnable() and actually halting. If we don't do this, we may waste
1152 * the time between calling hlt and the next interrupt even though there
1153 * is a runnable process.
1154 */
1155 void
1156 cpu_idle(void)
1157 {
1158
1159 #ifdef SMP
1160 if (mp_grab_cpu_hlt())
1161 return;
1162 #endif
1163
1164 if (cpu_idle_hlt) {
1165 disable_intr();
1166 if (sched_runnable())
1167 enable_intr();
1168 else
1169 (*cpu_idle_hook)();
1170 }
1171 }
1172
1173 /* Other subsystems (e.g., ACPI) can hook this later. */
1174 void (*cpu_idle_hook)(void) = cpu_idle_default;
1175
1176 /*
1177 * Clear registers on exec
1178 */
1179 void
1180 exec_setregs(td, entry, stack, ps_strings)
1181 struct thread *td;
1182 u_long entry;
1183 u_long stack;
1184 u_long ps_strings;
1185 {
1186 struct trapframe *regs = td->td_frame;
1187 struct pcb *pcb = td->td_pcb;
1188
1189 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1190 pcb->pcb_gs = _udatasel;
1191 load_gs(_udatasel);
1192
1193 if (td->td_proc->p_md.md_ldt)
1194 user_ldt_free(td);
1195
1196 bzero((char *)regs, sizeof(struct trapframe));
1197 regs->tf_eip = entry;
1198 regs->tf_esp = stack;
1199 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1200 regs->tf_ss = _udatasel;
1201 regs->tf_ds = _udatasel;
1202 regs->tf_es = _udatasel;
1203 regs->tf_fs = _udatasel;
1204 regs->tf_cs = _ucodesel;
1205
1206 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1207 regs->tf_ebx = ps_strings;
1208
1209 /*
1210 * Reset the hardware debug registers if they were in use.
1211 * They won't have any meaning for the newly exec'd process.
1212 */
1213 if (pcb->pcb_flags & PCB_DBREGS) {
1214 pcb->pcb_dr0 = 0;
1215 pcb->pcb_dr1 = 0;
1216 pcb->pcb_dr2 = 0;
1217 pcb->pcb_dr3 = 0;
1218 pcb->pcb_dr6 = 0;
1219 pcb->pcb_dr7 = 0;
1220 if (pcb == PCPU_GET(curpcb)) {
1221 /*
1222 * Clear the debug registers on the running
1223 * CPU, otherwise they will end up affecting
1224 * the next process we switch to.
1225 */
1226 reset_dbregs();
1227 }
1228 pcb->pcb_flags &= ~PCB_DBREGS;
1229 }
1230
1231 /*
1232 * Initialize the math emulator (if any) for the current process.
1233 * Actually, just clear the bit that says that the emulator has
1234 * been initialized. Initialization is delayed until the process
1235 * traps to the emulator (if it is done at all) mainly because
1236 * emulators don't provide an entry point for initialization.
1237 */
1238 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1239
1240 /*
1241 * Drop the FP state if we hold it, so that the process gets a
1242 * clean FP state if it uses the FPU again.
1243 */
1244 fpstate_drop(td);
1245
1246 /*
1247 * XXX - Linux emulator
1248 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1249 * on it.
1250 */
1251 td->td_retval[1] = 0;
1252 }
1253
1254 void
1255 cpu_setregs(void)
1256 {
1257 unsigned int cr0;
1258
1259 cr0 = rcr0();
1260
1261 /*
1262 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1263 *
1264 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1265 * instructions. We must set the CR0_MP bit and use the CR0_TS
1266 * bit to control the trap, because setting the CR0_EM bit does
1267 * not cause WAIT instructions to trap. It's important to trap
1268 * WAIT instructions - otherwise the "wait" variants of no-wait
1269 * control instructions would degenerate to the "no-wait" variants
1270 * after FP context switches but work correctly otherwise. It's
1271 * particularly important to trap WAITs when there is no NPX -
1272 * otherwise the "wait" variants would always degenerate.
1273 *
1274 * Try setting CR0_NE to get correct error reporting on 486DX's.
1275 * Setting it should fail or do nothing on lesser processors.
1276 */
1277 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1278 load_cr0(cr0);
1279 load_gs(_udatasel);
1280 }
1281
1282 static int
1283 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
1284 {
1285 int error;
1286 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
1287 req);
1288 if (!error && req->newptr)
1289 resettodr();
1290 return (error);
1291 }
1292
1293 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
1294 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
1295
1296 SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
1297 CTLFLAG_RW, &disable_rtc_set, 0, "");
1298
1299 SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo,
1300 CTLFLAG_RD, &bootinfo, bootinfo, "");
1301
1302 SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
1303 CTLFLAG_RW, &wall_cmos_clock, 0, "");
1304
1305 u_long bootdev; /* not a struct cdev *- encoding is different */
1306 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1307 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1308
1309 /*
1310 * Initialize 386 and configure to run kernel
1311 */
1312
1313 /*
1314 * Initialize segments & interrupt table
1315 */
1316
1317 int _default_ldt;
1318 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1319 static struct gate_descriptor idt0[NIDT];
1320 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1321 union descriptor ldt[NLDT]; /* local descriptor table */
1322 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1323
1324 int private_tss; /* flag indicating private tss */
1325
1326 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1327 extern int has_f00f_bug;
1328 #endif
1329
1330 static struct i386tss dblfault_tss;
1331 static char dblfault_stack[PAGE_SIZE];
1332
1333 extern vm_offset_t proc0kstack;
1334
1335
1336 /*
1337 * software prototypes -- in more palatable form.
1338 *
1339 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1340 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1341 */
1342 struct soft_segment_descriptor gdt_segs[] = {
1343 /* GNULL_SEL 0 Null Descriptor */
1344 { 0x0, /* segment base address */
1345 0x0, /* length */
1346 0, /* segment type */
1347 0, /* segment descriptor priority level */
1348 0, /* segment descriptor present */
1349 0, 0,
1350 0, /* default 32 vs 16 bit size */
1351 0 /* limit granularity (byte/page units)*/ },
1352 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1353 { 0x0, /* segment base address */
1354 0xfffff, /* length - all address space */
1355 SDT_MEMRWA, /* segment type */
1356 0, /* segment descriptor priority level */
1357 1, /* segment descriptor present */
1358 0, 0,
1359 1, /* default 32 vs 16 bit size */
1360 1 /* limit granularity (byte/page units)*/ },
1361 /* GUFS_SEL 2 %fs Descriptor for user */
1362 { 0x0, /* segment base address */
1363 0xfffff, /* length - all address space */
1364 SDT_MEMRWA, /* segment type */
1365 SEL_UPL, /* segment descriptor priority level */
1366 1, /* segment descriptor present */
1367 0, 0,
1368 1, /* default 32 vs 16 bit size */
1369 1 /* limit granularity (byte/page units)*/ },
1370 /* GUGS_SEL 3 %gs Descriptor for user */
1371 { 0x0, /* segment base address */
1372 0xfffff, /* length - all address space */
1373 SDT_MEMRWA, /* segment type */
1374 SEL_UPL, /* segment descriptor priority level */
1375 1, /* segment descriptor present */
1376 0, 0,
1377 1, /* default 32 vs 16 bit size */
1378 1 /* limit granularity (byte/page units)*/ },
1379 /* GCODE_SEL 4 Code Descriptor for kernel */
1380 { 0x0, /* segment base address */
1381 0xfffff, /* length - all address space */
1382 SDT_MEMERA, /* segment type */
1383 0, /* segment descriptor priority level */
1384 1, /* segment descriptor present */
1385 0, 0,
1386 1, /* default 32 vs 16 bit size */
1387 1 /* limit granularity (byte/page units)*/ },
1388 /* GDATA_SEL 5 Data Descriptor for kernel */
1389 { 0x0, /* segment base address */
1390 0xfffff, /* length - all address space */
1391 SDT_MEMRWA, /* segment type */
1392 0, /* segment descriptor priority level */
1393 1, /* segment descriptor present */
1394 0, 0,
1395 1, /* default 32 vs 16 bit size */
1396 1 /* limit granularity (byte/page units)*/ },
1397 /* GUCODE_SEL 6 Code Descriptor for user */
1398 { 0x0, /* segment base address */
1399 0xfffff, /* length - all address space */
1400 SDT_MEMERA, /* segment type */
1401 SEL_UPL, /* segment descriptor priority level */
1402 1, /* segment descriptor present */
1403 0, 0,
1404 1, /* default 32 vs 16 bit size */
1405 1 /* limit granularity (byte/page units)*/ },
1406 /* GUDATA_SEL 7 Data Descriptor for user */
1407 { 0x0, /* segment base address */
1408 0xfffff, /* length - all address space */
1409 SDT_MEMRWA, /* segment type */
1410 SEL_UPL, /* segment descriptor priority level */
1411 1, /* segment descriptor present */
1412 0, 0,
1413 1, /* default 32 vs 16 bit size */
1414 1 /* limit granularity (byte/page units)*/ },
1415 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1416 { 0x400, /* segment base address */
1417 0xfffff, /* length */
1418 SDT_MEMRWA, /* segment type */
1419 0, /* segment descriptor priority level */
1420 1, /* segment descriptor present */
1421 0, 0,
1422 1, /* default 32 vs 16 bit size */
1423 1 /* limit granularity (byte/page units)*/ },
1424 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1425 {
1426 0x0, /* segment base address */
1427 sizeof(struct i386tss)-1,/* length */
1428 SDT_SYS386TSS, /* segment type */
1429 0, /* segment descriptor priority level */
1430 1, /* segment descriptor present */
1431 0, 0,
1432 0, /* unused - default 32 vs 16 bit size */
1433 0 /* limit granularity (byte/page units)*/ },
1434 /* GLDT_SEL 10 LDT Descriptor */
1435 { (int) ldt, /* segment base address */
1436 sizeof(ldt)-1, /* length - all address space */
1437 SDT_SYSLDT, /* segment type */
1438 SEL_UPL, /* segment descriptor priority level */
1439 1, /* segment descriptor present */
1440 0, 0,
1441 0, /* unused - default 32 vs 16 bit size */
1442 0 /* limit granularity (byte/page units)*/ },
1443 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1444 { (int) ldt, /* segment base address */
1445 (512 * sizeof(union descriptor)-1), /* length */
1446 SDT_SYSLDT, /* segment type */
1447 0, /* segment descriptor priority level */
1448 1, /* segment descriptor present */
1449 0, 0,
1450 0, /* unused - default 32 vs 16 bit size */
1451 0 /* limit granularity (byte/page units)*/ },
1452 /* GPANIC_SEL 12 Panic Tss Descriptor */
1453 { (int) &dblfault_tss, /* segment base address */
1454 sizeof(struct i386tss)-1,/* length - all address space */
1455 SDT_SYS386TSS, /* segment type */
1456 0, /* segment descriptor priority level */
1457 1, /* segment descriptor present */
1458 0, 0,
1459 0, /* unused - default 32 vs 16 bit size */
1460 0 /* limit granularity (byte/page units)*/ },
1461 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1462 { 0, /* segment base address (overwritten) */
1463 0xfffff, /* length */
1464 SDT_MEMERA, /* segment type */
1465 0, /* segment descriptor priority level */
1466 1, /* segment descriptor present */
1467 0, 0,
1468 0, /* default 32 vs 16 bit size */
1469 1 /* limit granularity (byte/page units)*/ },
1470 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1471 { 0, /* segment base address (overwritten) */
1472 0xfffff, /* length */
1473 SDT_MEMERA, /* segment type */
1474 0, /* segment descriptor priority level */
1475 1, /* segment descriptor present */
1476 0, 0,
1477 0, /* default 32 vs 16 bit size */
1478 1 /* limit granularity (byte/page units)*/ },
1479 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1480 { 0, /* segment base address (overwritten) */
1481 0xfffff, /* length */
1482 SDT_MEMRWA, /* segment type */
1483 0, /* segment descriptor priority level */
1484 1, /* segment descriptor present */
1485 0, 0,
1486 1, /* default 32 vs 16 bit size */
1487 1 /* limit granularity (byte/page units)*/ },
1488 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1489 { 0, /* segment base address (overwritten) */
1490 0xfffff, /* length */
1491 SDT_MEMRWA, /* segment type */
1492 0, /* segment descriptor priority level */
1493 1, /* segment descriptor present */
1494 0, 0,
1495 0, /* default 32 vs 16 bit size */
1496 1 /* limit granularity (byte/page units)*/ },
1497 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1498 { 0, /* segment base address (overwritten) */
1499 0xfffff, /* length */
1500 SDT_MEMRWA, /* segment type */
1501 0, /* segment descriptor priority level */
1502 1, /* segment descriptor present */
1503 0, 0,
1504 0, /* default 32 vs 16 bit size */
1505 1 /* limit granularity (byte/page units)*/ },
1506 /* GNDIS_SEL 18 NDIS Descriptor */
1507 { 0x0, /* segment base address */
1508 0x0, /* length */
1509 0, /* segment type */
1510 0, /* segment descriptor priority level */
1511 0, /* segment descriptor present */
1512 0, 0,
1513 0, /* default 32 vs 16 bit size */
1514 0 /* limit granularity (byte/page units)*/ },
1515 };
1516
1517 static struct soft_segment_descriptor ldt_segs[] = {
1518 /* Null Descriptor - overwritten by call gate */
1519 { 0x0, /* segment base address */
1520 0x0, /* length - all address space */
1521 0, /* segment type */
1522 0, /* segment descriptor priority level */
1523 0, /* segment descriptor present */
1524 0, 0,
1525 0, /* default 32 vs 16 bit size */
1526 0 /* limit granularity (byte/page units)*/ },
1527 /* Null Descriptor - overwritten by call gate */
1528 { 0x0, /* segment base address */
1529 0x0, /* length - all address space */
1530 0, /* segment type */
1531 0, /* segment descriptor priority level */
1532 0, /* segment descriptor present */
1533 0, 0,
1534 0, /* default 32 vs 16 bit size */
1535 0 /* limit granularity (byte/page units)*/ },
1536 /* Null Descriptor - overwritten by call gate */
1537 { 0x0, /* segment base address */
1538 0x0, /* length - all address space */
1539 0, /* segment type */
1540 0, /* segment descriptor priority level */
1541 0, /* segment descriptor present */
1542 0, 0,
1543 0, /* default 32 vs 16 bit size */
1544 0 /* limit granularity (byte/page units)*/ },
1545 /* Code Descriptor for user */
1546 { 0x0, /* segment base address */
1547 0xfffff, /* length - all address space */
1548 SDT_MEMERA, /* segment type */
1549 SEL_UPL, /* segment descriptor priority level */
1550 1, /* segment descriptor present */
1551 0, 0,
1552 1, /* default 32 vs 16 bit size */
1553 1 /* limit granularity (byte/page units)*/ },
1554 /* Null Descriptor - overwritten by call gate */
1555 { 0x0, /* segment base address */
1556 0x0, /* length - all address space */
1557 0, /* segment type */
1558 0, /* segment descriptor priority level */
1559 0, /* segment descriptor present */
1560 0, 0,
1561 0, /* default 32 vs 16 bit size */
1562 0 /* limit granularity (byte/page units)*/ },
1563 /* Data Descriptor for user */
1564 { 0x0, /* segment base address */
1565 0xfffff, /* length - all address space */
1566 SDT_MEMRWA, /* segment type */
1567 SEL_UPL, /* segment descriptor priority level */
1568 1, /* segment descriptor present */
1569 0, 0,
1570 1, /* default 32 vs 16 bit size */
1571 1 /* limit granularity (byte/page units)*/ },
1572 };
1573
1574 void
1575 setidt(idx, func, typ, dpl, selec)
1576 int idx;
1577 inthand_t *func;
1578 int typ;
1579 int dpl;
1580 int selec;
1581 {
1582 struct gate_descriptor *ip;
1583
1584 ip = idt + idx;
1585 ip->gd_looffset = (int)func;
1586 ip->gd_selector = selec;
1587 ip->gd_stkcpy = 0;
1588 ip->gd_xx = 0;
1589 ip->gd_type = typ;
1590 ip->gd_dpl = dpl;
1591 ip->gd_p = 1;
1592 ip->gd_hioffset = ((int)func)>>16 ;
1593 }
1594
1595 #define IDTVEC(name) __CONCAT(X,name)
1596
1597 extern inthand_t
1598 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1599 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1600 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1601 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1602 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1603
1604 #ifdef DDB
1605 /*
1606 * Display the index and function name of any IDT entries that don't use
1607 * the default 'rsvd' entry point.
1608 */
1609 DB_SHOW_COMMAND(idt, db_show_idt)
1610 {
1611 struct gate_descriptor *ip;
1612 int idx, quit;
1613 uintptr_t func;
1614
1615 ip = idt;
1616 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
1617 for (idx = 0, quit = 0; idx < NIDT; idx++) {
1618 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1619 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1620 db_printf("%3d\t", idx);
1621 db_printsym(func, DB_STGY_PROC);
1622 db_printf("\n");
1623 }
1624 ip++;
1625 }
1626 }
1627 #endif
1628
1629 void
1630 sdtossd(sd, ssd)
1631 struct segment_descriptor *sd;
1632 struct soft_segment_descriptor *ssd;
1633 {
1634 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1635 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1636 ssd->ssd_type = sd->sd_type;
1637 ssd->ssd_dpl = sd->sd_dpl;
1638 ssd->ssd_p = sd->sd_p;
1639 ssd->ssd_def32 = sd->sd_def32;
1640 ssd->ssd_gran = sd->sd_gran;
1641 }
1642
1643 /*
1644 * Populate the (physmap) array with base/bound pairs describing the
1645 * available physical memory in the system, then test this memory and
1646 * build the phys_avail array describing the actually-available memory.
1647 *
1648 * If we cannot accurately determine the physical memory map, then use
1649 * value from the 0xE801 call, and failing that, the RTC.
1650 *
1651 * Total memory size may be set by the kernel environment variable
1652 * hw.physmem or the compile-time define MAXMEM.
1653 *
1654 * XXX first should be vm_paddr_t.
1655 */
1656 static void
1657 getmemsize(int first)
1658 {
1659 int i, physmap_idx, pa_indx, da_indx;
1660 int hasbrokenint12, has_smap;
1661 u_long physmem_tunable;
1662 u_int extmem;
1663 struct vm86frame vmf;
1664 struct vm86context vmc;
1665 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1666 pt_entry_t *pte;
1667 struct bios_smap *smap;
1668 quad_t dcons_addr, dcons_size;
1669
1670 #ifdef XBOX
1671 if (arch_i386_is_xbox) {
1672 /*
1673 * We queried the memory size before, so chop off 4MB for
1674 * the framebuffer and inform the OS of this.
1675 */
1676 physmap[0] = 0;
1677 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
1678 physmap_idx = 0;
1679 has_smap = 0;
1680 goto physmap_done;
1681 }
1682 #endif
1683
1684 hasbrokenint12 = 0;
1685 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
1686 bzero(&vmf, sizeof(vmf));
1687 bzero(physmap, sizeof(physmap));
1688 basemem = 0;
1689 has_smap = 0;
1690
1691 /*
1692 * Some newer BIOSes has broken INT 12H implementation which cause
1693 * kernel panic immediately. In this case, we need to scan SMAP
1694 * with INT 15:E820 first, then determine base memory size.
1695 */
1696 if (hasbrokenint12) {
1697 goto int15e820;
1698 }
1699
1700 /*
1701 * Perform "base memory" related probes & setup
1702 */
1703 vm86_intcall(0x12, &vmf);
1704 basemem = vmf.vmf_ax;
1705 if (basemem > 640) {
1706 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1707 basemem);
1708 basemem = 640;
1709 }
1710
1711 /*
1712 * XXX if biosbasemem is now < 640, there is a `hole'
1713 * between the end of base memory and the start of
1714 * ISA memory. The hole may be empty or it may
1715 * contain BIOS code or data. Map it read/write so
1716 * that the BIOS can write to it. (Memory from 0 to
1717 * the physical end of the kernel is mapped read-only
1718 * to begin with and then parts of it are remapped.
1719 * The parts that aren't remapped form holes that
1720 * remain read-only and are unused by the kernel.
1721 * The base memory area is below the physical end of
1722 * the kernel and right now forms a read-only hole.
1723 * The part of it from PAGE_SIZE to
1724 * (trunc_page(biosbasemem * 1024) - 1) will be
1725 * remapped and used by the kernel later.)
1726 *
1727 * This code is similar to the code used in
1728 * pmap_mapdev, but since no memory needs to be
1729 * allocated we simply change the mapping.
1730 */
1731 for (pa = trunc_page(basemem * 1024);
1732 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1733 pmap_kenter(KERNBASE + pa, pa);
1734
1735 /*
1736 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1737 * the vm86 page table so that vm86 can scribble on them using
1738 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1739 * page 0, at least as initialized here?
1740 */
1741 pte = (pt_entry_t *)vm86paddr;
1742 for (i = basemem / 4; i < 160; i++)
1743 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1744
1745 int15e820:
1746 /*
1747 * map page 1 R/W into the kernel page table so we can use it
1748 * as a buffer. The kernel will unmap this page later.
1749 */
1750 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
1751
1752 /*
1753 * get memory map with INT 15:E820
1754 */
1755 vmc.npages = 0;
1756 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
1757 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
1758
1759 physmap_idx = 0;
1760 vmf.vmf_ebx = 0;
1761 do {
1762 vmf.vmf_eax = 0xE820;
1763 vmf.vmf_edx = SMAP_SIG;
1764 vmf.vmf_ecx = sizeof(struct bios_smap);
1765 i = vm86_datacall(0x15, &vmf, &vmc);
1766 if (i || vmf.vmf_eax != SMAP_SIG)
1767 break;
1768 if (boothowto & RB_VERBOSE)
1769 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1770 smap->type, smap->base, smap->length);
1771 has_smap = 1;
1772
1773 if (smap->type != 0x01)
1774 continue;
1775
1776 if (smap->length == 0)
1777 continue;
1778
1779 #ifndef PAE
1780 if (smap->base >= 0xffffffff) {
1781 printf("%uK of memory above 4GB ignored\n",
1782 (u_int)(smap->length / 1024));
1783 continue;
1784 }
1785 #endif
1786
1787 for (i = 0; i <= physmap_idx; i += 2) {
1788 if (smap->base < physmap[i + 1]) {
1789 if (boothowto & RB_VERBOSE)
1790 printf(
1791 "Overlapping or non-montonic memory region, ignoring second region\n");
1792 continue;
1793 }
1794 }
1795
1796 if (smap->base == physmap[physmap_idx + 1]) {
1797 physmap[physmap_idx + 1] += smap->length;
1798 continue;
1799 }
1800
1801 physmap_idx += 2;
1802 if (physmap_idx == PHYSMAP_SIZE) {
1803 printf(
1804 "Too many segments in the physical address map, giving up\n");
1805 break;
1806 }
1807 physmap[physmap_idx] = smap->base;
1808 physmap[physmap_idx + 1] = smap->base + smap->length;
1809 } while (vmf.vmf_ebx != 0);
1810
1811 /*
1812 * Perform "base memory" related probes & setup based on SMAP
1813 */
1814 if (basemem == 0) {
1815 for (i = 0; i <= physmap_idx; i += 2) {
1816 if (physmap[i] == 0x00000000) {
1817 basemem = physmap[i + 1] / 1024;
1818 break;
1819 }
1820 }
1821
1822 /*
1823 * XXX this function is horribly organized and has to the same
1824 * things that it does above here.
1825 */
1826 if (basemem == 0)
1827 basemem = 640;
1828 if (basemem > 640) {
1829 printf(
1830 "Preposterous BIOS basemem of %uK, truncating to 640K\n",
1831 basemem);
1832 basemem = 640;
1833 }
1834
1835 /*
1836 * Let vm86 scribble on pages between basemem and
1837 * ISA_HOLE_START, as above.
1838 */
1839 for (pa = trunc_page(basemem * 1024);
1840 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1841 pmap_kenter(KERNBASE + pa, pa);
1842 pte = (pt_entry_t *)vm86paddr;
1843 for (i = basemem / 4; i < 160; i++)
1844 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1845 }
1846
1847 if (physmap[1] != 0)
1848 goto physmap_done;
1849
1850 /*
1851 * If we failed above, try memory map with INT 15:E801
1852 */
1853 vmf.vmf_ax = 0xE801;
1854 if (vm86_intcall(0x15, &vmf) == 0) {
1855 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1856 } else {
1857 #if 0
1858 vmf.vmf_ah = 0x88;
1859 vm86_intcall(0x15, &vmf);
1860 extmem = vmf.vmf_ax;
1861 #else
1862 /*
1863 * Prefer the RTC value for extended memory.
1864 */
1865 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1866 #endif
1867 }
1868
1869 /*
1870 * Special hack for chipsets that still remap the 384k hole when
1871 * there's 16MB of memory - this really confuses people that
1872 * are trying to use bus mastering ISA controllers with the
1873 * "16MB limit"; they only have 16MB, but the remapping puts
1874 * them beyond the limit.
1875 *
1876 * If extended memory is between 15-16MB (16-17MB phys address range),
1877 * chop it to 15MB.
1878 */
1879 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1880 extmem = 15 * 1024;
1881
1882 physmap[0] = 0;
1883 physmap[1] = basemem * 1024;
1884 physmap_idx = 2;
1885 physmap[physmap_idx] = 0x100000;
1886 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1887
1888 physmap_done:
1889 /*
1890 * Now, physmap contains a map of physical memory.
1891 */
1892
1893 #ifdef SMP
1894 /* make hole for AP bootstrap code */
1895 physmap[1] = mp_bootaddress(physmap[1]);
1896 #endif
1897
1898 /*
1899 * Maxmem isn't the "maximum memory", it's one larger than the
1900 * highest page of the physical address space. It should be
1901 * called something like "Maxphyspage". We may adjust this
1902 * based on ``hw.physmem'' and the results of the memory test.
1903 */
1904 Maxmem = atop(physmap[physmap_idx + 1]);
1905
1906 #ifdef MAXMEM
1907 Maxmem = MAXMEM / 4;
1908 #endif
1909
1910 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1911 Maxmem = atop(physmem_tunable);
1912
1913 /*
1914 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
1915 * the amount of memory in the system.
1916 */
1917 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
1918 Maxmem = atop(physmap[physmap_idx + 1]);
1919
1920 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1921 (boothowto & RB_VERBOSE))
1922 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1923
1924 /*
1925 * If Maxmem has been increased beyond what the system has detected,
1926 * extend the last memory segment to the new limit.
1927 */
1928 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1929 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1930
1931 /* call pmap initialization to make new kernel address space */
1932 pmap_bootstrap(first, 0);
1933
1934 /*
1935 * Size up each available chunk of physical memory.
1936 */
1937 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1938 pa_indx = 0;
1939 da_indx = 1;
1940 phys_avail[pa_indx++] = physmap[0];
1941 phys_avail[pa_indx] = physmap[0];
1942 dump_avail[da_indx] = physmap[0];
1943 pte = CMAP1;
1944
1945 /*
1946 * Get dcons buffer address
1947 */
1948 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1949 getenv_quad("dcons.size", &dcons_size) == 0)
1950 dcons_addr = 0;
1951
1952 /*
1953 * physmap is in bytes, so when converting to page boundaries,
1954 * round up the start address and round down the end address.
1955 */
1956 for (i = 0; i <= physmap_idx; i += 2) {
1957 vm_paddr_t end;
1958
1959 end = ptoa((vm_paddr_t)Maxmem);
1960 if (physmap[i + 1] < end)
1961 end = trunc_page(physmap[i + 1]);
1962 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1963 int tmp, page_bad, full;
1964 int *ptr = (int *)CADDR1;
1965
1966 full = FALSE;
1967 /*
1968 * block out kernel memory as not available.
1969 */
1970 if (pa >= KERNLOAD && pa < first)
1971 goto do_dump_avail;
1972
1973 /*
1974 * block out dcons buffer
1975 */
1976 if (dcons_addr > 0
1977 && pa >= trunc_page(dcons_addr)
1978 && pa < dcons_addr + dcons_size)
1979 goto do_dump_avail;
1980
1981 page_bad = FALSE;
1982
1983 /*
1984 * map page into kernel: valid, read/write,non-cacheable
1985 */
1986 *pte = pa | PG_V | PG_RW | PG_N;
1987 invltlb();
1988
1989 tmp = *(int *)ptr;
1990 /*
1991 * Test for alternating 1's and 0's
1992 */
1993 *(volatile int *)ptr = 0xaaaaaaaa;
1994 if (*(volatile int *)ptr != 0xaaaaaaaa)
1995 page_bad = TRUE;
1996 /*
1997 * Test for alternating 0's and 1's
1998 */
1999 *(volatile int *)ptr = 0x55555555;
2000 if (*(volatile int *)ptr != 0x55555555)
2001 page_bad = TRUE;
2002 /*
2003 * Test for all 1's
2004 */
2005 *(volatile int *)ptr = 0xffffffff;
2006 if (*(volatile int *)ptr != 0xffffffff)
2007 page_bad = TRUE;
2008 /*
2009 * Test for all 0's
2010 */
2011 *(volatile int *)ptr = 0x0;
2012 if (*(volatile int *)ptr != 0x0)
2013 page_bad = TRUE;
2014 /*
2015 * Restore original value.
2016 */
2017 *(int *)ptr = tmp;
2018
2019 /*
2020 * Adjust array of valid/good pages.
2021 */
2022 if (page_bad == TRUE)
2023 continue;
2024 /*
2025 * If this good page is a continuation of the
2026 * previous set of good pages, then just increase
2027 * the end pointer. Otherwise start a new chunk.
2028 * Note that "end" points one higher than end,
2029 * making the range >= start and < end.
2030 * If we're also doing a speculative memory
2031 * test and we at or past the end, bump up Maxmem
2032 * so that we keep going. The first bad page
2033 * will terminate the loop.
2034 */
2035 if (phys_avail[pa_indx] == pa) {
2036 phys_avail[pa_indx] += PAGE_SIZE;
2037 } else {
2038 pa_indx++;
2039 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2040 printf(
2041 "Too many holes in the physical address space, giving up\n");
2042 pa_indx--;
2043 full = TRUE;
2044 goto do_dump_avail;
2045 }
2046 phys_avail[pa_indx++] = pa; /* start */
2047 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2048 }
2049 physmem++;
2050 do_dump_avail:
2051 if (dump_avail[da_indx] == pa) {
2052 dump_avail[da_indx] += PAGE_SIZE;
2053 } else {
2054 da_indx++;
2055 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2056 da_indx--;
2057 goto do_next;
2058 }
2059 dump_avail[da_indx++] = pa; /* start */
2060 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2061 }
2062 do_next:
2063 if (full)
2064 break;
2065 }
2066 }
2067 *pte = 0;
2068 invltlb();
2069
2070 /*
2071 * XXX
2072 * The last chunk must contain at least one page plus the message
2073 * buffer to avoid complicating other code (message buffer address
2074 * calculation, etc.).
2075 */
2076 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2077 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
2078 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2079 phys_avail[pa_indx--] = 0;
2080 phys_avail[pa_indx--] = 0;
2081 }
2082
2083 Maxmem = atop(phys_avail[pa_indx]);
2084
2085 /* Trim off space for the message buffer. */
2086 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
2087
2088 avail_end = phys_avail[pa_indx];
2089 }
2090
2091 void
2092 init386(first)
2093 int first;
2094 {
2095 struct gate_descriptor *gdp;
2096 int gsel_tss, metadata_missing, off, x;
2097 struct pcpu *pc;
2098
2099 thread0.td_kstack = proc0kstack;
2100 thread0.td_pcb = (struct pcb *)
2101 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
2102
2103 /*
2104 * This may be done better later if it gets more high level
2105 * components in it. If so just link td->td_proc here.
2106 */
2107 proc_linkup(&proc0, &ksegrp0, &thread0);
2108
2109 metadata_missing = 0;
2110 if (bootinfo.bi_modulep) {
2111 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2112 preload_bootstrap_relocate(KERNBASE);
2113 } else {
2114 metadata_missing = 1;
2115 }
2116 if (envmode == 1)
2117 kern_envp = static_env;
2118 else if (bootinfo.bi_envp)
2119 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2120
2121 /* Init basic tunables, hz etc */
2122 init_param1();
2123
2124 /*
2125 * Make gdt memory segments. All segments cover the full 4GB
2126 * of address space and permissions are enforced at page level.
2127 */
2128 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2129 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2130 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2131 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2132 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2133 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2134
2135 #ifdef SMP
2136 pc = &SMP_prvspace[0].pcpu;
2137 #else
2138 pc = &__pcpu;
2139 #endif
2140 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2141 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2142 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2143
2144 for (x = 0; x < NGDT; x++)
2145 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2146
2147 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2148 r_gdt.rd_base = (int) gdt;
2149 lgdt(&r_gdt);
2150
2151 pcpu_init(pc, 0, sizeof(struct pcpu));
2152 PCPU_SET(prvspace, pc);
2153 PCPU_SET(curthread, &thread0);
2154 PCPU_SET(curpcb, thread0.td_pcb);
2155
2156 /*
2157 * Initialize mutexes.
2158 *
2159 * icu_lock: in order to allow an interrupt to occur in a critical
2160 * section, to set pcpu->ipending (etc...) properly, we
2161 * must be able to get the icu lock, so it can't be
2162 * under witness.
2163 */
2164 mutex_init();
2165 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN);
2166 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
2167
2168 /* make ldt memory segments */
2169 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2170 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2171 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2172 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2173
2174 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2175 lldt(_default_ldt);
2176 PCPU_SET(currentldt, _default_ldt);
2177
2178 /* exceptions */
2179 for (x = 0; x < NIDT; x++)
2180 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2181 GSEL(GCODE_SEL, SEL_KPL));
2182 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2183 GSEL(GCODE_SEL, SEL_KPL));
2184 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2185 GSEL(GCODE_SEL, SEL_KPL));
2186 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2187 GSEL(GCODE_SEL, SEL_KPL));
2188 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2189 GSEL(GCODE_SEL, SEL_KPL));
2190 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2191 GSEL(GCODE_SEL, SEL_KPL));
2192 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2193 GSEL(GCODE_SEL, SEL_KPL));
2194 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2195 GSEL(GCODE_SEL, SEL_KPL));
2196 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2197 , GSEL(GCODE_SEL, SEL_KPL));
2198 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2199 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2200 GSEL(GCODE_SEL, SEL_KPL));
2201 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2202 GSEL(GCODE_SEL, SEL_KPL));
2203 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2204 GSEL(GCODE_SEL, SEL_KPL));
2205 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2206 GSEL(GCODE_SEL, SEL_KPL));
2207 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2208 GSEL(GCODE_SEL, SEL_KPL));
2209 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2210 GSEL(GCODE_SEL, SEL_KPL));
2211 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2212 GSEL(GCODE_SEL, SEL_KPL));
2213 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2214 GSEL(GCODE_SEL, SEL_KPL));
2215 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2216 GSEL(GCODE_SEL, SEL_KPL));
2217 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2218 GSEL(GCODE_SEL, SEL_KPL));
2219 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2220 GSEL(GCODE_SEL, SEL_KPL));
2221
2222 r_idt.rd_limit = sizeof(idt0) - 1;
2223 r_idt.rd_base = (int) idt;
2224 lidt(&r_idt);
2225
2226 #ifdef XBOX
2227 /*
2228 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2229 * This should be 0x10de / 0x02a5.
2230 *
2231 * This is exactly what Linux does.
2232 */
2233 outl(0xcf8, 0x80000000);
2234 if (inl(0xcfc) == 0x02a510de) {
2235 arch_i386_is_xbox = 1;
2236 pic16l_setled(XBOX_LED_GREEN);
2237
2238 /*
2239 * We are an XBOX, but we may have either 64MB or 128MB of
2240 * memory. The PCI host bridge should be programmed for this,
2241 * so we just query it.
2242 */
2243 outl (0xcf8, 0x80000084);
2244 arch_i386_xbox_memsize = (inl (0xcfc) == 0x7FFFFFF) ? 128 : 64;
2245 }
2246 #endif /* XBOX */
2247
2248 /*
2249 * Initialize the console before we print anything out.
2250 */
2251 cninit();
2252
2253 if (metadata_missing)
2254 printf("WARNING: loader(8) metadata is missing!\n");
2255
2256 #ifdef DEV_ISA
2257 elcr_probe();
2258 atpic_startup();
2259 #endif
2260
2261 #ifdef DDB
2262 ksym_start = bootinfo.bi_symtab;
2263 ksym_end = bootinfo.bi_esymtab;
2264 #endif
2265
2266 kdb_init();
2267
2268 #ifdef KDB
2269 if (boothowto & RB_KDB)
2270 kdb_enter("Boot flags requested debugger");
2271 #endif
2272
2273 finishidentcpu(); /* Final stage of CPU initialization */
2274 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2275 GSEL(GCODE_SEL, SEL_KPL));
2276 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2277 GSEL(GCODE_SEL, SEL_KPL));
2278 initializecpu(); /* Initialize CPU registers */
2279
2280 /* make an initial tss so cpu can get interrupt stack on syscall! */
2281 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2282 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2283 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2284 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2285 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2286 private_tss = 0;
2287 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2288 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2289 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2290 ltr(gsel_tss);
2291
2292 /* pointer to selector slot for %fs/%gs */
2293 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2294
2295 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2296 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2297 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2298 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2299 #ifdef PAE
2300 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2301 #else
2302 dblfault_tss.tss_cr3 = (int)IdlePTD;
2303 #endif
2304 dblfault_tss.tss_eip = (int)dblfault_handler;
2305 dblfault_tss.tss_eflags = PSL_KERNEL;
2306 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2307 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2308 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2309 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2310 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2311
2312 vm86_initialize();
2313 getmemsize(first);
2314 init_param2(physmem);
2315
2316 /* now running on new page tables, configured,and u/iom is accessible */
2317
2318 /* Map the message buffer. */
2319 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2320 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
2321
2322 msgbufinit(msgbufp, MSGBUF_SIZE);
2323
2324 /* make a call gate to reenter kernel with */
2325 gdp = &ldt[LSYS5CALLS_SEL].gd;
2326
2327 x = (int) &IDTVEC(lcall_syscall);
2328 gdp->gd_looffset = x;
2329 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2330 gdp->gd_stkcpy = 1;
2331 gdp->gd_type = SDT_SYS386CGT;
2332 gdp->gd_dpl = SEL_UPL;
2333 gdp->gd_p = 1;
2334 gdp->gd_hioffset = x >> 16;
2335
2336 /* XXX does this work? */
2337 /* XXX yes! */
2338 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2339 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2340
2341 /* transfer to user mode */
2342
2343 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2344 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2345
2346 /* setup proc 0's pcb */
2347 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
2348 #ifdef PAE
2349 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2350 #else
2351 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2352 #endif
2353 thread0.td_pcb->pcb_ext = 0;
2354 thread0.td_frame = &proc0_tf;
2355 }
2356
2357 void
2358 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2359 {
2360
2361 pcpu->pc_acpi_id = 0xffffffff;
2362 }
2363
2364 void
2365 spinlock_enter(void)
2366 {
2367 struct thread *td;
2368
2369 td = curthread;
2370 if (td->td_md.md_spinlock_count == 0)
2371 td->td_md.md_saved_flags = intr_disable();
2372 td->td_md.md_spinlock_count++;
2373 critical_enter();
2374 }
2375
2376 void
2377 spinlock_exit(void)
2378 {
2379 struct thread *td;
2380
2381 td = curthread;
2382 critical_exit();
2383 td->td_md.md_spinlock_count--;
2384 if (td->td_md.md_spinlock_count == 0)
2385 intr_restore(td->td_md.md_saved_flags);
2386 }
2387
2388 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2389 static void f00f_hack(void *unused);
2390 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL)
2391
2392 static void
2393 f00f_hack(void *unused)
2394 {
2395 struct gate_descriptor *new_idt;
2396 vm_offset_t tmp;
2397
2398 if (!has_f00f_bug)
2399 return;
2400
2401 GIANT_REQUIRED;
2402
2403 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2404
2405 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2406 if (tmp == 0)
2407 panic("kmem_alloc returned 0");
2408
2409 /* Put the problematic entry (#6) at the end of the lower page. */
2410 new_idt = (struct gate_descriptor*)
2411 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2412 bcopy(idt, new_idt, sizeof(idt0));
2413 r_idt.rd_base = (u_int)new_idt;
2414 lidt(&r_idt);
2415 idt = new_idt;
2416 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2417 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2418 panic("vm_map_protect failed");
2419 }
2420 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2421
2422 /*
2423 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2424 * we want to start a backtrace from the function that caused us to enter
2425 * the debugger. We have the context in the trapframe, but base the trace
2426 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2427 * enough for a backtrace.
2428 */
2429 void
2430 makectx(struct trapframe *tf, struct pcb *pcb)
2431 {
2432
2433 pcb->pcb_edi = tf->tf_edi;
2434 pcb->pcb_esi = tf->tf_esi;
2435 pcb->pcb_ebp = tf->tf_ebp;
2436 pcb->pcb_ebx = tf->tf_ebx;
2437 pcb->pcb_eip = tf->tf_eip;
2438 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2439 }
2440
2441 int
2442 ptrace_set_pc(struct thread *td, u_long addr)
2443 {
2444
2445 td->td_frame->tf_eip = addr;
2446 return (0);
2447 }
2448
2449 int
2450 ptrace_single_step(struct thread *td)
2451 {
2452 td->td_frame->tf_eflags |= PSL_T;
2453 return (0);
2454 }
2455
2456 int
2457 ptrace_clear_single_step(struct thread *td)
2458 {
2459 td->td_frame->tf_eflags &= ~PSL_T;
2460 return (0);
2461 }
2462
2463 int
2464 fill_regs(struct thread *td, struct reg *regs)
2465 {
2466 struct pcb *pcb;
2467 struct trapframe *tp;
2468
2469 tp = td->td_frame;
2470 pcb = td->td_pcb;
2471 regs->r_fs = tp->tf_fs;
2472 regs->r_es = tp->tf_es;
2473 regs->r_ds = tp->tf_ds;
2474 regs->r_edi = tp->tf_edi;
2475 regs->r_esi = tp->tf_esi;
2476 regs->r_ebp = tp->tf_ebp;
2477 regs->r_ebx = tp->tf_ebx;
2478 regs->r_edx = tp->tf_edx;
2479 regs->r_ecx = tp->tf_ecx;
2480 regs->r_eax = tp->tf_eax;
2481 regs->r_eip = tp->tf_eip;
2482 regs->r_cs = tp->tf_cs;
2483 regs->r_eflags = tp->tf_eflags;
2484 regs->r_esp = tp->tf_esp;
2485 regs->r_ss = tp->tf_ss;
2486 regs->r_gs = pcb->pcb_gs;
2487 return (0);
2488 }
2489
2490 int
2491 set_regs(struct thread *td, struct reg *regs)
2492 {
2493 struct pcb *pcb;
2494 struct trapframe *tp;
2495
2496 tp = td->td_frame;
2497 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2498 !CS_SECURE(regs->r_cs))
2499 return (EINVAL);
2500 pcb = td->td_pcb;
2501 tp->tf_fs = regs->r_fs;
2502 tp->tf_es = regs->r_es;
2503 tp->tf_ds = regs->r_ds;
2504 tp->tf_edi = regs->r_edi;
2505 tp->tf_esi = regs->r_esi;
2506 tp->tf_ebp = regs->r_ebp;
2507 tp->tf_ebx = regs->r_ebx;
2508 tp->tf_edx = regs->r_edx;
2509 tp->tf_ecx = regs->r_ecx;
2510 tp->tf_eax = regs->r_eax;
2511 tp->tf_eip = regs->r_eip;
2512 tp->tf_cs = regs->r_cs;
2513 tp->tf_eflags = regs->r_eflags;
2514 tp->tf_esp = regs->r_esp;
2515 tp->tf_ss = regs->r_ss;
2516 pcb->pcb_gs = regs->r_gs;
2517 return (0);
2518 }
2519
2520 #ifdef CPU_ENABLE_SSE
2521 static void
2522 fill_fpregs_xmm(sv_xmm, sv_87)
2523 struct savexmm *sv_xmm;
2524 struct save87 *sv_87;
2525 {
2526 register struct env87 *penv_87 = &sv_87->sv_env;
2527 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2528 int i;
2529
2530 bzero(sv_87, sizeof(*sv_87));
2531
2532 /* FPU control/status */
2533 penv_87->en_cw = penv_xmm->en_cw;
2534 penv_87->en_sw = penv_xmm->en_sw;
2535 penv_87->en_tw = penv_xmm->en_tw;
2536 penv_87->en_fip = penv_xmm->en_fip;
2537 penv_87->en_fcs = penv_xmm->en_fcs;
2538 penv_87->en_opcode = penv_xmm->en_opcode;
2539 penv_87->en_foo = penv_xmm->en_foo;
2540 penv_87->en_fos = penv_xmm->en_fos;
2541
2542 /* FPU registers */
2543 for (i = 0; i < 8; ++i)
2544 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2545 }
2546
2547 static void
2548 set_fpregs_xmm(sv_87, sv_xmm)
2549 struct save87 *sv_87;
2550 struct savexmm *sv_xmm;
2551 {
2552 register struct env87 *penv_87 = &sv_87->sv_env;
2553 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2554 int i;
2555
2556 /* FPU control/status */
2557 penv_xmm->en_cw = penv_87->en_cw;
2558 penv_xmm->en_sw = penv_87->en_sw;
2559 penv_xmm->en_tw = penv_87->en_tw;
2560 penv_xmm->en_fip = penv_87->en_fip;
2561 penv_xmm->en_fcs = penv_87->en_fcs;
2562 penv_xmm->en_opcode = penv_87->en_opcode;
2563 penv_xmm->en_foo = penv_87->en_foo;
2564 penv_xmm->en_fos = penv_87->en_fos;
2565
2566 /* FPU registers */
2567 for (i = 0; i < 8; ++i)
2568 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2569 }
2570 #endif /* CPU_ENABLE_SSE */
2571
2572 int
2573 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2574 {
2575 #ifdef CPU_ENABLE_SSE
2576 if (cpu_fxsr) {
2577 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2578 (struct save87 *)fpregs);
2579 return (0);
2580 }
2581 #endif /* CPU_ENABLE_SSE */
2582 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2583 return (0);
2584 }
2585
2586 int
2587 set_fpregs(struct thread *td, struct fpreg *fpregs)
2588 {
2589 #ifdef CPU_ENABLE_SSE
2590 if (cpu_fxsr) {
2591 set_fpregs_xmm((struct save87 *)fpregs,
2592 &td->td_pcb->pcb_save.sv_xmm);
2593 return (0);
2594 }
2595 #endif /* CPU_ENABLE_SSE */
2596 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2597 return (0);
2598 }
2599
2600 /*
2601 * Get machine context.
2602 */
2603 int
2604 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2605 {
2606 struct trapframe *tp;
2607
2608 tp = td->td_frame;
2609
2610 PROC_LOCK(curthread->td_proc);
2611 mcp->mc_onstack = sigonstack(tp->tf_esp);
2612 PROC_UNLOCK(curthread->td_proc);
2613 mcp->mc_gs = td->td_pcb->pcb_gs;
2614 mcp->mc_fs = tp->tf_fs;
2615 mcp->mc_es = tp->tf_es;
2616 mcp->mc_ds = tp->tf_ds;
2617 mcp->mc_edi = tp->tf_edi;
2618 mcp->mc_esi = tp->tf_esi;
2619 mcp->mc_ebp = tp->tf_ebp;
2620 mcp->mc_isp = tp->tf_isp;
2621 mcp->mc_eflags = tp->tf_eflags;
2622 if (flags & GET_MC_CLEAR_RET) {
2623 mcp->mc_eax = 0;
2624 mcp->mc_edx = 0;
2625 mcp->mc_eflags &= ~PSL_C;
2626 } else {
2627 mcp->mc_eax = tp->tf_eax;
2628 mcp->mc_edx = tp->tf_edx;
2629 }
2630 mcp->mc_ebx = tp->tf_ebx;
2631 mcp->mc_ecx = tp->tf_ecx;
2632 mcp->mc_eip = tp->tf_eip;
2633 mcp->mc_cs = tp->tf_cs;
2634 mcp->mc_esp = tp->tf_esp;
2635 mcp->mc_ss = tp->tf_ss;
2636 mcp->mc_len = sizeof(*mcp);
2637 get_fpcontext(td, mcp);
2638 return (0);
2639 }
2640
2641 /*
2642 * Set machine context.
2643 *
2644 * However, we don't set any but the user modifiable flags, and we won't
2645 * touch the cs selector.
2646 */
2647 int
2648 set_mcontext(struct thread *td, const mcontext_t *mcp)
2649 {
2650 struct trapframe *tp;
2651 int eflags, ret;
2652
2653 tp = td->td_frame;
2654 if (mcp->mc_len != sizeof(*mcp))
2655 return (EINVAL);
2656 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2657 (tp->tf_eflags & ~PSL_USERCHANGE);
2658 if ((ret = set_fpcontext(td, mcp)) == 0) {
2659 tp->tf_fs = mcp->mc_fs;
2660 tp->tf_es = mcp->mc_es;
2661 tp->tf_ds = mcp->mc_ds;
2662 tp->tf_edi = mcp->mc_edi;
2663 tp->tf_esi = mcp->mc_esi;
2664 tp->tf_ebp = mcp->mc_ebp;
2665 tp->tf_ebx = mcp->mc_ebx;
2666 tp->tf_edx = mcp->mc_edx;
2667 tp->tf_ecx = mcp->mc_ecx;
2668 tp->tf_eax = mcp->mc_eax;
2669 tp->tf_eip = mcp->mc_eip;
2670 tp->tf_eflags = eflags;
2671 tp->tf_esp = mcp->mc_esp;
2672 tp->tf_ss = mcp->mc_ss;
2673 td->td_pcb->pcb_gs = mcp->mc_gs;
2674 ret = 0;
2675 }
2676 return (ret);
2677 }
2678
2679 static void
2680 get_fpcontext(struct thread *td, mcontext_t *mcp)
2681 {
2682 #ifndef DEV_NPX
2683 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2684 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2685 #else
2686 union savefpu *addr;
2687
2688 /*
2689 * XXX mc_fpstate might be misaligned, since its declaration is not
2690 * unportabilized using __attribute__((aligned(16))) like the
2691 * declaration of struct savemm, and anyway, alignment doesn't work
2692 * for auto variables since we don't use gcc's pessimal stack
2693 * alignment. Work around this by abusing the spare fields after
2694 * mcp->mc_fpstate.
2695 *
2696 * XXX unpessimize most cases by only aligning when fxsave might be
2697 * called, although this requires knowing too much about
2698 * npxgetregs()'s internals.
2699 */
2700 addr = (union savefpu *)&mcp->mc_fpstate;
2701 if (td == PCPU_GET(fpcurthread) &&
2702 #ifdef CPU_ENABLE_SSE
2703 cpu_fxsr &&
2704 #endif
2705 ((uintptr_t)(void *)addr & 0xF)) {
2706 do
2707 addr = (void *)((char *)addr + 4);
2708 while ((uintptr_t)(void *)addr & 0xF);
2709 }
2710 mcp->mc_ownedfp = npxgetregs(td, addr);
2711 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2712 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2713 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2714 }
2715 mcp->mc_fpformat = npxformat();
2716 #endif
2717 }
2718
2719 static int
2720 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2721 {
2722 union savefpu *addr;
2723
2724 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2725 return (0);
2726 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2727 mcp->mc_fpformat != _MC_FPFMT_XMM)
2728 return (EINVAL);
2729 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2730 /* We don't care what state is left in the FPU or PCB. */
2731 fpstate_drop(td);
2732 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2733 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2734 /* XXX align as above. */
2735 addr = (union savefpu *)&mcp->mc_fpstate;
2736 if (td == PCPU_GET(fpcurthread) &&
2737 #ifdef CPU_ENABLE_SSE
2738 cpu_fxsr &&
2739 #endif
2740 ((uintptr_t)(void *)addr & 0xF)) {
2741 do
2742 addr = (void *)((char *)addr + 4);
2743 while ((uintptr_t)(void *)addr & 0xF);
2744 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2745 }
2746 #ifdef DEV_NPX
2747 #ifdef CPU_ENABLE_SSE
2748 if (cpu_fxsr)
2749 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
2750 #endif
2751 /*
2752 * XXX we violate the dubious requirement that npxsetregs()
2753 * be called with interrupts disabled.
2754 */
2755 npxsetregs(td, addr);
2756 #endif
2757 /*
2758 * Don't bother putting things back where they were in the
2759 * misaligned case, since we know that the caller won't use
2760 * them again.
2761 */
2762 } else
2763 return (EINVAL);
2764 return (0);
2765 }
2766
2767 static void
2768 fpstate_drop(struct thread *td)
2769 {
2770 register_t s;
2771
2772 s = intr_disable();
2773 #ifdef DEV_NPX
2774 if (PCPU_GET(fpcurthread) == td)
2775 npxdrop();
2776 #endif
2777 /*
2778 * XXX force a full drop of the npx. The above only drops it if we
2779 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2780 *
2781 * XXX I don't much like npxgetregs()'s semantics of doing a full
2782 * drop. Dropping only to the pcb matches fnsave's behaviour.
2783 * We only need to drop to !PCB_INITDONE in sendsig(). But
2784 * sendsig() is the only caller of npxgetregs()... perhaps we just
2785 * have too many layers.
2786 */
2787 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2788 intr_restore(s);
2789 }
2790
2791 int
2792 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2793 {
2794 struct pcb *pcb;
2795
2796 if (td == NULL) {
2797 dbregs->dr[0] = rdr0();
2798 dbregs->dr[1] = rdr1();
2799 dbregs->dr[2] = rdr2();
2800 dbregs->dr[3] = rdr3();
2801 dbregs->dr[4] = rdr4();
2802 dbregs->dr[5] = rdr5();
2803 dbregs->dr[6] = rdr6();
2804 dbregs->dr[7] = rdr7();
2805 } else {
2806 pcb = td->td_pcb;
2807 dbregs->dr[0] = pcb->pcb_dr0;
2808 dbregs->dr[1] = pcb->pcb_dr1;
2809 dbregs->dr[2] = pcb->pcb_dr2;
2810 dbregs->dr[3] = pcb->pcb_dr3;
2811 dbregs->dr[4] = 0;
2812 dbregs->dr[5] = 0;
2813 dbregs->dr[6] = pcb->pcb_dr6;
2814 dbregs->dr[7] = pcb->pcb_dr7;
2815 }
2816 return (0);
2817 }
2818
2819 int
2820 set_dbregs(struct thread *td, struct dbreg *dbregs)
2821 {
2822 struct pcb *pcb;
2823 int i;
2824 u_int32_t mask1, mask2;
2825
2826 if (td == NULL) {
2827 load_dr0(dbregs->dr[0]);
2828 load_dr1(dbregs->dr[1]);
2829 load_dr2(dbregs->dr[2]);
2830 load_dr3(dbregs->dr[3]);
2831 load_dr4(dbregs->dr[4]);
2832 load_dr5(dbregs->dr[5]);
2833 load_dr6(dbregs->dr[6]);
2834 load_dr7(dbregs->dr[7]);
2835 } else {
2836 /*
2837 * Don't let an illegal value for dr7 get set. Specifically,
2838 * check for undefined settings. Setting these bit patterns
2839 * result in undefined behaviour and can lead to an unexpected
2840 * TRCTRAP.
2841 */
2842 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8;
2843 i++, mask1 <<= 2, mask2 <<= 2)
2844 if ((dbregs->dr[7] & mask1) == mask2)
2845 return (EINVAL);
2846
2847 pcb = td->td_pcb;
2848
2849 /*
2850 * Don't let a process set a breakpoint that is not within the
2851 * process's address space. If a process could do this, it
2852 * could halt the system by setting a breakpoint in the kernel
2853 * (if ddb was enabled). Thus, we need to check to make sure
2854 * that no breakpoints are being enabled for addresses outside
2855 * process's address space.
2856 *
2857 * XXX - what about when the watched area of the user's
2858 * address space is written into from within the kernel
2859 * ... wouldn't that still cause a breakpoint to be generated
2860 * from within kernel mode?
2861 */
2862
2863 if (dbregs->dr[7] & 0x3) {
2864 /* dr0 is enabled */
2865 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2866 return (EINVAL);
2867 }
2868
2869 if (dbregs->dr[7] & (0x3<<2)) {
2870 /* dr1 is enabled */
2871 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2872 return (EINVAL);
2873 }
2874
2875 if (dbregs->dr[7] & (0x3<<4)) {
2876 /* dr2 is enabled */
2877 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2878 return (EINVAL);
2879 }
2880
2881 if (dbregs->dr[7] & (0x3<<6)) {
2882 /* dr3 is enabled */
2883 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2884 return (EINVAL);
2885 }
2886
2887 pcb->pcb_dr0 = dbregs->dr[0];
2888 pcb->pcb_dr1 = dbregs->dr[1];
2889 pcb->pcb_dr2 = dbregs->dr[2];
2890 pcb->pcb_dr3 = dbregs->dr[3];
2891 pcb->pcb_dr6 = dbregs->dr[6];
2892 pcb->pcb_dr7 = dbregs->dr[7];
2893
2894 pcb->pcb_flags |= PCB_DBREGS;
2895 }
2896
2897 return (0);
2898 }
2899
2900 /*
2901 * Return > 0 if a hardware breakpoint has been hit, and the
2902 * breakpoint was in user space. Return 0, otherwise.
2903 */
2904 int
2905 user_dbreg_trap(void)
2906 {
2907 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2908 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2909 int nbp; /* number of breakpoints that triggered */
2910 caddr_t addr[4]; /* breakpoint addresses */
2911 int i;
2912
2913 dr7 = rdr7();
2914 if ((dr7 & 0x000000ff) == 0) {
2915 /*
2916 * all GE and LE bits in the dr7 register are zero,
2917 * thus the trap couldn't have been caused by the
2918 * hardware debug registers
2919 */
2920 return 0;
2921 }
2922
2923 nbp = 0;
2924 dr6 = rdr6();
2925 bp = dr6 & 0x0000000f;
2926
2927 if (!bp) {
2928 /*
2929 * None of the breakpoint bits are set meaning this
2930 * trap was not caused by any of the debug registers
2931 */
2932 return 0;
2933 }
2934
2935 /*
2936 * at least one of the breakpoints were hit, check to see
2937 * which ones and if any of them are user space addresses
2938 */
2939
2940 if (bp & 0x01) {
2941 addr[nbp++] = (caddr_t)rdr0();
2942 }
2943 if (bp & 0x02) {
2944 addr[nbp++] = (caddr_t)rdr1();
2945 }
2946 if (bp & 0x04) {
2947 addr[nbp++] = (caddr_t)rdr2();
2948 }
2949 if (bp & 0x08) {
2950 addr[nbp++] = (caddr_t)rdr3();
2951 }
2952
2953 for (i=0; i<nbp; i++) {
2954 if (addr[i] <
2955 (caddr_t)VM_MAXUSER_ADDRESS) {
2956 /*
2957 * addr[i] is in user space
2958 */
2959 return nbp;
2960 }
2961 }
2962
2963 /*
2964 * None of the breakpoints are in user space.
2965 */
2966 return 0;
2967 }
2968
2969 #ifndef DEV_APIC
2970 #include <machine/apicvar.h>
2971
2972 /*
2973 * Provide stub functions so that the MADT APIC enumerator in the acpi
2974 * kernel module will link against a kernel without 'device apic'.
2975 *
2976 * XXX - This is a gross hack.
2977 */
2978 void
2979 apic_register_enumerator(struct apic_enumerator *enumerator)
2980 {
2981 }
2982
2983 void *
2984 ioapic_create(uintptr_t addr, int32_t id, int intbase)
2985 {
2986 return (NULL);
2987 }
2988
2989 int
2990 ioapic_disable_pin(void *cookie, u_int pin)
2991 {
2992 return (ENXIO);
2993 }
2994
2995 int
2996 ioapic_get_vector(void *cookie, u_int pin)
2997 {
2998 return (-1);
2999 }
3000
3001 void
3002 ioapic_register(void *cookie)
3003 {
3004 }
3005
3006 int
3007 ioapic_remap_vector(void *cookie, u_int pin, int vector)
3008 {
3009 return (ENXIO);
3010 }
3011
3012 int
3013 ioapic_set_extint(void *cookie, u_int pin)
3014 {
3015 return (ENXIO);
3016 }
3017
3018 int
3019 ioapic_set_nmi(void *cookie, u_int pin)
3020 {
3021 return (ENXIO);
3022 }
3023
3024 int
3025 ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol)
3026 {
3027 return (ENXIO);
3028 }
3029
3030 int
3031 ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger)
3032 {
3033 return (ENXIO);
3034 }
3035
3036 void
3037 lapic_create(u_int apic_id, int boot_cpu)
3038 {
3039 }
3040
3041 void
3042 lapic_init(uintptr_t addr)
3043 {
3044 }
3045
3046 int
3047 lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode)
3048 {
3049 return (ENXIO);
3050 }
3051
3052 int
3053 lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol)
3054 {
3055 return (ENXIO);
3056 }
3057
3058 int
3059 lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger)
3060 {
3061 return (ENXIO);
3062 }
3063 #endif
3064
3065 #ifdef KDB
3066
3067 /*
3068 * Provide inb() and outb() as functions. They are normally only
3069 * available as macros calling inlined functions, thus cannot be
3070 * called from the debugger.
3071 *
3072 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
3073 */
3074
3075 #undef inb
3076 #undef outb
3077
3078 /* silence compiler warnings */
3079 u_char inb(u_int);
3080 void outb(u_int, u_char);
3081
3082 u_char
3083 inb(u_int port)
3084 {
3085 u_char data;
3086 /*
3087 * We use %%dx and not %1 here because i/o is done at %dx and not at
3088 * %edx, while gcc generates inferior code (movw instead of movl)
3089 * if we tell it to load (u_short) port.
3090 */
3091 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
3092 return (data);
3093 }
3094
3095 void
3096 outb(u_int port, u_char data)
3097 {
3098 u_char al;
3099 /*
3100 * Use an unnecessary assignment to help gcc's register allocator.
3101 * This make a large difference for gcc-1.40 and a tiny difference
3102 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
3103 * best results. gcc-2.6.0 can't handle this.
3104 */
3105 al = data;
3106 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
3107 }
3108
3109 #endif /* KDB */
Cache object: b71d55d718cdf132791df9fcfba47928
|