1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/7.4/sys/i386/i386/machdep.c 213746 2010-10-12 19:28:33Z jhb $");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_compat.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_ipx.h"
50 #include "opt_isa.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_msgbuf.h"
54 #include "opt_npx.h"
55 #include "opt_perfmon.h"
56 #include "opt_xbox.h"
57
58 #include <sys/param.h>
59 #include <sys/proc.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/buf.h>
63 #include <sys/bus.h>
64 #include <sys/callout.h>
65 #include <sys/clock.h>
66 #include <sys/cons.h>
67 #include <sys/cpu.h>
68 #include <sys/eventhandler.h>
69 #include <sys/exec.h>
70 #include <sys/imgact.h>
71 #include <sys/kdb.h>
72 #include <sys/kernel.h>
73 #include <sys/ktr.h>
74 #include <sys/linker.h>
75 #include <sys/lock.h>
76 #include <sys/malloc.h>
77 #include <sys/memrange.h>
78 #include <sys/msgbuf.h>
79 #include <sys/mutex.h>
80 #include <sys/pcpu.h>
81 #include <sys/ptrace.h>
82 #include <sys/reboot.h>
83 #include <sys/sched.h>
84 #include <sys/signalvar.h>
85 #include <sys/sysctl.h>
86 #include <sys/sysent.h>
87 #include <sys/sysproto.h>
88 #include <sys/ucontext.h>
89 #include <sys/vmmeter.h>
90
91 #include <vm/vm.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_param.h>
99
100 #ifdef DDB
101 #ifndef KDB
102 #error KDB must be enabled in order for DDB to work!
103 #endif
104 #include <ddb/ddb.h>
105 #include <ddb/db_sym.h>
106 #endif
107
108 #include <isa/rtc.h>
109
110 #include <net/netisr.h>
111
112 #include <machine/bootinfo.h>
113 #include <machine/clock.h>
114 #include <machine/cpu.h>
115 #include <machine/cputypes.h>
116 #include <machine/intr_machdep.h>
117 #include <machine/mca.h>
118 #include <machine/md_var.h>
119 #include <machine/metadata.h>
120 #include <machine/pc/bios.h>
121 #include <machine/pcb.h>
122 #include <machine/pcb_ext.h>
123 #include <machine/proc.h>
124 #include <machine/reg.h>
125 #include <machine/sigframe.h>
126 #include <machine/specialreg.h>
127 #include <machine/vm86.h>
128 #ifdef PERFMON
129 #include <machine/perfmon.h>
130 #endif
131 #ifdef SMP
132 #include <machine/smp.h>
133 #endif
134
135 #ifdef DEV_ISA
136 #include <i386/isa/icu.h>
137 #endif
138
139 #ifdef XBOX
140 #include <machine/xbox.h>
141
142 int arch_i386_is_xbox = 0;
143 uint32_t arch_i386_xbox_memsize = 0;
144 #endif
145
146 /* Sanity check for __curthread() */
147 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
148
149 extern void init386(int first);
150 extern void dblfault_handler(void);
151
152 extern void printcpuinfo(void); /* XXX header file */
153 extern void finishidentcpu(void);
154 extern void panicifcpuunsupported(void);
155 extern void initializecpu(void);
156
157 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
158 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
159
160 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
161 #define CPU_ENABLE_SSE
162 #endif
163
164 static void cpu_startup(void *);
165 static void fpstate_drop(struct thread *td);
166 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
167 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
168 #ifdef CPU_ENABLE_SSE
169 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
170 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
171 #endif /* CPU_ENABLE_SSE */
172 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
173
174 #ifdef DDB
175 extern vm_offset_t ksym_start, ksym_end;
176 #endif
177
178 /* Intel ICH registers */
179 #define ICH_PMBASE 0x400
180 #define ICH_SMI_EN ICH_PMBASE + 0x30
181
182 int _udatasel, _ucodesel;
183 u_int basemem;
184
185 int cold = 1;
186
187 #ifdef COMPAT_43
188 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
189 #endif
190 #ifdef COMPAT_FREEBSD4
191 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
192 #endif
193
194 long Maxmem = 0;
195 long realmem = 0;
196
197 #ifdef PAE
198 FEATURE(pae, "Physical Address Extensions");
199 #endif
200
201 /*
202 * The number of PHYSMAP entries must be one less than the number of
203 * PHYSSEG entries because the PHYSMAP entry that spans the largest
204 * physical address that is accessible by ISA DMA is split into two
205 * PHYSSEG entries.
206 */
207 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
208
209 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
210 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
211
212 /* must be 2 less so 0 0 can signal end of chunks */
213 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
214 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
215
216 struct kva_md_info kmi;
217
218 static struct trapframe proc0_tf;
219 struct pcpu __pcpu[MAXCPU];
220
221 struct mtx icu_lock;
222
223 struct mem_range_softc mem_range_softc;
224
225 static void
226 cpu_startup(dummy)
227 void *dummy;
228 {
229 char *sysenv;
230
231 /*
232 * On MacBooks, we need to disallow the legacy USB circuit to
233 * generate an SMI# because this can cause several problems,
234 * namely: incorrect CPU frequency detection and failure to
235 * start the APs.
236 * We do this by disabling a bit in the SMI_EN (SMI Control and
237 * Enable register) of the Intel ICH LPC Interface Bridge.
238 */
239 sysenv = getenv("smbios.system.product");
240 if (sysenv != NULL) {
241 if (strncmp(sysenv, "MacBook", 7) == 0) {
242 if (bootverbose)
243 printf("Disabling LEGACY_USB_EN bit on "
244 "Intel ICH.\n");
245 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
246 }
247 freeenv(sysenv);
248 }
249
250 /*
251 * Good {morning,afternoon,evening,night}.
252 */
253 startrtclock();
254 printcpuinfo();
255 panicifcpuunsupported();
256 #ifdef PERFMON
257 perfmon_init();
258 #endif
259 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
260 ptoa((uintmax_t)Maxmem) / 1048576);
261 realmem = Maxmem;
262 /*
263 * Display any holes after the first chunk of extended memory.
264 */
265 if (bootverbose) {
266 int indx;
267
268 printf("Physical memory chunk(s):\n");
269 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
270 vm_paddr_t size;
271
272 size = phys_avail[indx + 1] - phys_avail[indx];
273 printf(
274 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
275 (uintmax_t)phys_avail[indx],
276 (uintmax_t)phys_avail[indx + 1] - 1,
277 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
278 }
279 }
280
281 vm_ksubmap_init(&kmi);
282
283 printf("avail memory = %ju (%ju MB)\n",
284 ptoa((uintmax_t)cnt.v_free_count),
285 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
286
287 /*
288 * Set up buffers, so they can be used to read disk labels.
289 */
290 bufinit();
291 vm_pager_bufferinit();
292
293 cpu_setregs();
294 }
295
296 /*
297 * Send an interrupt to process.
298 *
299 * Stack is set up to allow sigcode stored
300 * at top to call routine, followed by kcall
301 * to sigreturn routine below. After sigreturn
302 * resets the signal mask, the stack, and the
303 * frame pointer, it returns to the user
304 * specified pc, psl.
305 */
306 #ifdef COMPAT_43
307 static void
308 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
309 {
310 struct osigframe sf, *fp;
311 struct proc *p;
312 struct thread *td;
313 struct sigacts *psp;
314 struct trapframe *regs;
315 int sig;
316 int oonstack;
317
318 td = curthread;
319 p = td->td_proc;
320 PROC_LOCK_ASSERT(p, MA_OWNED);
321 sig = ksi->ksi_signo;
322 psp = p->p_sigacts;
323 mtx_assert(&psp->ps_mtx, MA_OWNED);
324 regs = td->td_frame;
325 oonstack = sigonstack(regs->tf_esp);
326
327 /* Allocate space for the signal handler context. */
328 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
329 SIGISMEMBER(psp->ps_sigonstack, sig)) {
330 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
331 td->td_sigstk.ss_size - sizeof(struct osigframe));
332 #if defined(COMPAT_43)
333 td->td_sigstk.ss_flags |= SS_ONSTACK;
334 #endif
335 } else
336 fp = (struct osigframe *)regs->tf_esp - 1;
337
338 /* Translate the signal if appropriate. */
339 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
340 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
341
342 /* Build the argument list for the signal handler. */
343 sf.sf_signum = sig;
344 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
345 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
346 /* Signal handler installed with SA_SIGINFO. */
347 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
348 sf.sf_siginfo.si_signo = sig;
349 sf.sf_siginfo.si_code = ksi->ksi_code;
350 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
351 } else {
352 /* Old FreeBSD-style arguments. */
353 sf.sf_arg2 = ksi->ksi_code;
354 sf.sf_addr = (register_t)ksi->ksi_addr;
355 sf.sf_ahu.sf_handler = catcher;
356 }
357 mtx_unlock(&psp->ps_mtx);
358 PROC_UNLOCK(p);
359
360 /* Save most if not all of trap frame. */
361 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
362 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
363 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
364 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
365 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
366 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
367 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
368 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
369 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
370 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
371 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
372 sf.sf_siginfo.si_sc.sc_gs = rgs();
373 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
374
375 /* Build the signal context to be used by osigreturn(). */
376 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
377 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
378 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
379 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
380 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
381 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
382 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
383 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
384
385 /*
386 * If we're a vm86 process, we want to save the segment registers.
387 * We also change eflags to be our emulated eflags, not the actual
388 * eflags.
389 */
390 if (regs->tf_eflags & PSL_VM) {
391 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
392 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
393 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
394
395 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
396 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
397 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
398 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
399
400 if (vm86->vm86_has_vme == 0)
401 sf.sf_siginfo.si_sc.sc_ps =
402 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
403 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
404
405 /* See sendsig() for comments. */
406 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
407 }
408
409 /*
410 * Copy the sigframe out to the user's stack.
411 */
412 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
413 #ifdef DEBUG
414 printf("process %ld has trashed its stack\n", (long)p->p_pid);
415 #endif
416 PROC_LOCK(p);
417 sigexit(td, SIGILL);
418 }
419
420 regs->tf_esp = (int)fp;
421 regs->tf_eip = PS_STRINGS - szosigcode;
422 regs->tf_eflags &= ~(PSL_T | PSL_D);
423 regs->tf_cs = _ucodesel;
424 regs->tf_ds = _udatasel;
425 regs->tf_es = _udatasel;
426 regs->tf_fs = _udatasel;
427 load_gs(_udatasel);
428 regs->tf_ss = _udatasel;
429 PROC_LOCK(p);
430 mtx_lock(&psp->ps_mtx);
431 }
432 #endif /* COMPAT_43 */
433
434 #ifdef COMPAT_FREEBSD4
435 static void
436 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
437 {
438 struct sigframe4 sf, *sfp;
439 struct proc *p;
440 struct thread *td;
441 struct sigacts *psp;
442 struct trapframe *regs;
443 int sig;
444 int oonstack;
445
446 td = curthread;
447 p = td->td_proc;
448 PROC_LOCK_ASSERT(p, MA_OWNED);
449 sig = ksi->ksi_signo;
450 psp = p->p_sigacts;
451 mtx_assert(&psp->ps_mtx, MA_OWNED);
452 regs = td->td_frame;
453 oonstack = sigonstack(regs->tf_esp);
454
455 /* Save user context. */
456 bzero(&sf, sizeof(sf));
457 sf.sf_uc.uc_sigmask = *mask;
458 sf.sf_uc.uc_stack = td->td_sigstk;
459 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
460 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
461 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
462 sf.sf_uc.uc_mcontext.mc_gs = rgs();
463 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
464
465 /* Allocate space for the signal handler context. */
466 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
467 SIGISMEMBER(psp->ps_sigonstack, sig)) {
468 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
469 td->td_sigstk.ss_size - sizeof(struct sigframe4));
470 #if defined(COMPAT_43)
471 td->td_sigstk.ss_flags |= SS_ONSTACK;
472 #endif
473 } else
474 sfp = (struct sigframe4 *)regs->tf_esp - 1;
475
476 /* Translate the signal if appropriate. */
477 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
478 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
479
480 /* Build the argument list for the signal handler. */
481 sf.sf_signum = sig;
482 sf.sf_ucontext = (register_t)&sfp->sf_uc;
483 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
484 /* Signal handler installed with SA_SIGINFO. */
485 sf.sf_siginfo = (register_t)&sfp->sf_si;
486 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
487
488 /* Fill in POSIX parts */
489 sf.sf_si.si_signo = sig;
490 sf.sf_si.si_code = ksi->ksi_code;
491 sf.sf_si.si_addr = ksi->ksi_addr;
492 } else {
493 /* Old FreeBSD-style arguments. */
494 sf.sf_siginfo = ksi->ksi_code;
495 sf.sf_addr = (register_t)ksi->ksi_addr;
496 sf.sf_ahu.sf_handler = catcher;
497 }
498 mtx_unlock(&psp->ps_mtx);
499 PROC_UNLOCK(p);
500
501 /*
502 * If we're a vm86 process, we want to save the segment registers.
503 * We also change eflags to be our emulated eflags, not the actual
504 * eflags.
505 */
506 if (regs->tf_eflags & PSL_VM) {
507 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
508 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
509
510 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
511 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
512 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
513 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
514
515 if (vm86->vm86_has_vme == 0)
516 sf.sf_uc.uc_mcontext.mc_eflags =
517 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
518 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
519
520 /*
521 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
522 * syscalls made by the signal handler. This just avoids
523 * wasting time for our lazy fixup of such faults. PSL_NT
524 * does nothing in vm86 mode, but vm86 programs can set it
525 * almost legitimately in probes for old cpu types.
526 */
527 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
528 }
529
530 /*
531 * Copy the sigframe out to the user's stack.
532 */
533 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
534 #ifdef DEBUG
535 printf("process %ld has trashed its stack\n", (long)p->p_pid);
536 #endif
537 PROC_LOCK(p);
538 sigexit(td, SIGILL);
539 }
540
541 regs->tf_esp = (int)sfp;
542 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
543 regs->tf_eflags &= ~(PSL_T | PSL_D);
544 regs->tf_cs = _ucodesel;
545 regs->tf_ds = _udatasel;
546 regs->tf_es = _udatasel;
547 regs->tf_fs = _udatasel;
548 regs->tf_ss = _udatasel;
549 PROC_LOCK(p);
550 mtx_lock(&psp->ps_mtx);
551 }
552 #endif /* COMPAT_FREEBSD4 */
553
554 void
555 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
556 {
557 struct sigframe sf, *sfp;
558 struct proc *p;
559 struct thread *td;
560 struct sigacts *psp;
561 char *sp;
562 struct trapframe *regs;
563 int sig;
564 int oonstack;
565
566 td = curthread;
567 p = td->td_proc;
568 PROC_LOCK_ASSERT(p, MA_OWNED);
569 sig = ksi->ksi_signo;
570 psp = p->p_sigacts;
571 mtx_assert(&psp->ps_mtx, MA_OWNED);
572 #ifdef COMPAT_FREEBSD4
573 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
574 freebsd4_sendsig(catcher, ksi, mask);
575 return;
576 }
577 #endif
578 #ifdef COMPAT_43
579 if (SIGISMEMBER(psp->ps_osigset, sig)) {
580 osendsig(catcher, ksi, mask);
581 return;
582 }
583 #endif
584 regs = td->td_frame;
585 oonstack = sigonstack(regs->tf_esp);
586
587 /* Save user context. */
588 bzero(&sf, sizeof(sf));
589 sf.sf_uc.uc_sigmask = *mask;
590 sf.sf_uc.uc_stack = td->td_sigstk;
591 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
592 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
593 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
594 sf.sf_uc.uc_mcontext.mc_gs = rgs();
595 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
596 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
597 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
598 fpstate_drop(td);
599
600 /* Allocate space for the signal handler context. */
601 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
602 SIGISMEMBER(psp->ps_sigonstack, sig)) {
603 sp = td->td_sigstk.ss_sp +
604 td->td_sigstk.ss_size - sizeof(struct sigframe);
605 #if defined(COMPAT_43)
606 td->td_sigstk.ss_flags |= SS_ONSTACK;
607 #endif
608 } else
609 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
610 /* Align to 16 bytes. */
611 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
612
613 /* Translate the signal if appropriate. */
614 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
615 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
616
617 /* Build the argument list for the signal handler. */
618 sf.sf_signum = sig;
619 sf.sf_ucontext = (register_t)&sfp->sf_uc;
620 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
621 /* Signal handler installed with SA_SIGINFO. */
622 sf.sf_siginfo = (register_t)&sfp->sf_si;
623 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
624
625 /* Fill in POSIX parts */
626 sf.sf_si = ksi->ksi_info;
627 sf.sf_si.si_signo = sig; /* maybe a translated signal */
628 } else {
629 /* Old FreeBSD-style arguments. */
630 sf.sf_siginfo = ksi->ksi_code;
631 sf.sf_addr = (register_t)ksi->ksi_addr;
632 sf.sf_ahu.sf_handler = catcher;
633 }
634 mtx_unlock(&psp->ps_mtx);
635 PROC_UNLOCK(p);
636
637 /*
638 * If we're a vm86 process, we want to save the segment registers.
639 * We also change eflags to be our emulated eflags, not the actual
640 * eflags.
641 */
642 if (regs->tf_eflags & PSL_VM) {
643 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
644 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
645
646 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
647 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
648 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
649 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
650
651 if (vm86->vm86_has_vme == 0)
652 sf.sf_uc.uc_mcontext.mc_eflags =
653 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
654 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
655
656 /*
657 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
658 * syscalls made by the signal handler. This just avoids
659 * wasting time for our lazy fixup of such faults. PSL_NT
660 * does nothing in vm86 mode, but vm86 programs can set it
661 * almost legitimately in probes for old cpu types.
662 */
663 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
664 }
665
666 /*
667 * Copy the sigframe out to the user's stack.
668 */
669 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
670 #ifdef DEBUG
671 printf("process %ld has trashed its stack\n", (long)p->p_pid);
672 #endif
673 PROC_LOCK(p);
674 sigexit(td, SIGILL);
675 }
676
677 regs->tf_esp = (int)sfp;
678 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
679 regs->tf_eflags &= ~(PSL_T | PSL_D);
680 regs->tf_cs = _ucodesel;
681 regs->tf_ds = _udatasel;
682 regs->tf_es = _udatasel;
683 regs->tf_fs = _udatasel;
684 regs->tf_ss = _udatasel;
685 PROC_LOCK(p);
686 mtx_lock(&psp->ps_mtx);
687 }
688
689 /*
690 * System call to cleanup state after a signal
691 * has been taken. Reset signal mask and
692 * stack state from context left by sendsig (above).
693 * Return to previous pc and psl as specified by
694 * context left by sendsig. Check carefully to
695 * make sure that the user has not modified the
696 * state to gain improper privileges.
697 *
698 * MPSAFE
699 */
700 #ifdef COMPAT_43
701 int
702 osigreturn(td, uap)
703 struct thread *td;
704 struct osigreturn_args /* {
705 struct osigcontext *sigcntxp;
706 } */ *uap;
707 {
708 struct osigcontext sc;
709 struct trapframe *regs;
710 struct osigcontext *scp;
711 struct proc *p = td->td_proc;
712 int eflags, error;
713 ksiginfo_t ksi;
714
715 regs = td->td_frame;
716 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
717 if (error != 0)
718 return (error);
719 scp = ≻
720 eflags = scp->sc_ps;
721 if (eflags & PSL_VM) {
722 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
723 struct vm86_kernel *vm86;
724
725 /*
726 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
727 * set up the vm86 area, and we can't enter vm86 mode.
728 */
729 if (td->td_pcb->pcb_ext == 0)
730 return (EINVAL);
731 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
732 if (vm86->vm86_inited == 0)
733 return (EINVAL);
734
735 /* Go back to user mode if both flags are set. */
736 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
737 ksiginfo_init_trap(&ksi);
738 ksi.ksi_signo = SIGBUS;
739 ksi.ksi_code = BUS_OBJERR;
740 ksi.ksi_addr = (void *)regs->tf_eip;
741 trapsignal(td, &ksi);
742 }
743
744 if (vm86->vm86_has_vme) {
745 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
746 (eflags & VME_USERCHANGE) | PSL_VM;
747 } else {
748 vm86->vm86_eflags = eflags; /* save VIF, VIP */
749 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
750 (eflags & VM_USERCHANGE) | PSL_VM;
751 }
752 tf->tf_vm86_ds = scp->sc_ds;
753 tf->tf_vm86_es = scp->sc_es;
754 tf->tf_vm86_fs = scp->sc_fs;
755 tf->tf_vm86_gs = scp->sc_gs;
756 tf->tf_ds = _udatasel;
757 tf->tf_es = _udatasel;
758 tf->tf_fs = _udatasel;
759 } else {
760 /*
761 * Don't allow users to change privileged or reserved flags.
762 */
763 /*
764 * XXX do allow users to change the privileged flag PSL_RF.
765 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
766 * should sometimes set it there too. tf_eflags is kept in
767 * the signal context during signal handling and there is no
768 * other place to remember it, so the PSL_RF bit may be
769 * corrupted by the signal handler without us knowing.
770 * Corruption of the PSL_RF bit at worst causes one more or
771 * one less debugger trap, so allowing it is fairly harmless.
772 */
773 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
774 return (EINVAL);
775 }
776
777 /*
778 * Don't allow users to load a valid privileged %cs. Let the
779 * hardware check for invalid selectors, excess privilege in
780 * other selectors, invalid %eip's and invalid %esp's.
781 */
782 if (!CS_SECURE(scp->sc_cs)) {
783 ksiginfo_init_trap(&ksi);
784 ksi.ksi_signo = SIGBUS;
785 ksi.ksi_code = BUS_OBJERR;
786 ksi.ksi_trapno = T_PROTFLT;
787 ksi.ksi_addr = (void *)regs->tf_eip;
788 trapsignal(td, &ksi);
789 return (EINVAL);
790 }
791 regs->tf_ds = scp->sc_ds;
792 regs->tf_es = scp->sc_es;
793 regs->tf_fs = scp->sc_fs;
794 }
795
796 /* Restore remaining registers. */
797 regs->tf_eax = scp->sc_eax;
798 regs->tf_ebx = scp->sc_ebx;
799 regs->tf_ecx = scp->sc_ecx;
800 regs->tf_edx = scp->sc_edx;
801 regs->tf_esi = scp->sc_esi;
802 regs->tf_edi = scp->sc_edi;
803 regs->tf_cs = scp->sc_cs;
804 regs->tf_ss = scp->sc_ss;
805 regs->tf_isp = scp->sc_isp;
806 regs->tf_ebp = scp->sc_fp;
807 regs->tf_esp = scp->sc_sp;
808 regs->tf_eip = scp->sc_pc;
809 regs->tf_eflags = eflags;
810
811 PROC_LOCK(p);
812 #if defined(COMPAT_43)
813 if (scp->sc_onstack & 1)
814 td->td_sigstk.ss_flags |= SS_ONSTACK;
815 else
816 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
817 #endif
818 SIGSETOLD(td->td_sigmask, scp->sc_mask);
819 SIG_CANTMASK(td->td_sigmask);
820 signotify(td);
821 PROC_UNLOCK(p);
822 return (EJUSTRETURN);
823 }
824 #endif /* COMPAT_43 */
825
826 #ifdef COMPAT_FREEBSD4
827 /*
828 * MPSAFE
829 */
830 int
831 freebsd4_sigreturn(td, uap)
832 struct thread *td;
833 struct freebsd4_sigreturn_args /* {
834 const ucontext4 *sigcntxp;
835 } */ *uap;
836 {
837 struct ucontext4 uc;
838 struct proc *p = td->td_proc;
839 struct trapframe *regs;
840 const struct ucontext4 *ucp;
841 int cs, eflags, error;
842 ksiginfo_t ksi;
843
844 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
845 if (error != 0)
846 return (error);
847 ucp = &uc;
848 regs = td->td_frame;
849 eflags = ucp->uc_mcontext.mc_eflags;
850 if (eflags & PSL_VM) {
851 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
852 struct vm86_kernel *vm86;
853
854 /*
855 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
856 * set up the vm86 area, and we can't enter vm86 mode.
857 */
858 if (td->td_pcb->pcb_ext == 0)
859 return (EINVAL);
860 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
861 if (vm86->vm86_inited == 0)
862 return (EINVAL);
863
864 /* Go back to user mode if both flags are set. */
865 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
866 ksiginfo_init_trap(&ksi);
867 ksi.ksi_signo = SIGBUS;
868 ksi.ksi_code = BUS_OBJERR;
869 ksi.ksi_addr = (void *)regs->tf_eip;
870 trapsignal(td, &ksi);
871 }
872 if (vm86->vm86_has_vme) {
873 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
874 (eflags & VME_USERCHANGE) | PSL_VM;
875 } else {
876 vm86->vm86_eflags = eflags; /* save VIF, VIP */
877 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
878 (eflags & VM_USERCHANGE) | PSL_VM;
879 }
880 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
881 tf->tf_eflags = eflags;
882 tf->tf_vm86_ds = tf->tf_ds;
883 tf->tf_vm86_es = tf->tf_es;
884 tf->tf_vm86_fs = tf->tf_fs;
885 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
886 tf->tf_ds = _udatasel;
887 tf->tf_es = _udatasel;
888 tf->tf_fs = _udatasel;
889 } else {
890 /*
891 * Don't allow users to change privileged or reserved flags.
892 */
893 /*
894 * XXX do allow users to change the privileged flag PSL_RF.
895 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
896 * should sometimes set it there too. tf_eflags is kept in
897 * the signal context during signal handling and there is no
898 * other place to remember it, so the PSL_RF bit may be
899 * corrupted by the signal handler without us knowing.
900 * Corruption of the PSL_RF bit at worst causes one more or
901 * one less debugger trap, so allowing it is fairly harmless.
902 */
903 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
904 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
905 return (EINVAL);
906 }
907
908 /*
909 * Don't allow users to load a valid privileged %cs. Let the
910 * hardware check for invalid selectors, excess privilege in
911 * other selectors, invalid %eip's and invalid %esp's.
912 */
913 cs = ucp->uc_mcontext.mc_cs;
914 if (!CS_SECURE(cs)) {
915 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
916 ksiginfo_init_trap(&ksi);
917 ksi.ksi_signo = SIGBUS;
918 ksi.ksi_code = BUS_OBJERR;
919 ksi.ksi_trapno = T_PROTFLT;
920 ksi.ksi_addr = (void *)regs->tf_eip;
921 trapsignal(td, &ksi);
922 return (EINVAL);
923 }
924
925 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
926 }
927
928 PROC_LOCK(p);
929 #if defined(COMPAT_43)
930 if (ucp->uc_mcontext.mc_onstack & 1)
931 td->td_sigstk.ss_flags |= SS_ONSTACK;
932 else
933 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
934 #endif
935
936 td->td_sigmask = ucp->uc_sigmask;
937 SIG_CANTMASK(td->td_sigmask);
938 signotify(td);
939 PROC_UNLOCK(p);
940 return (EJUSTRETURN);
941 }
942 #endif /* COMPAT_FREEBSD4 */
943
944 /*
945 * MPSAFE
946 */
947 int
948 sigreturn(td, uap)
949 struct thread *td;
950 struct sigreturn_args /* {
951 const struct __ucontext *sigcntxp;
952 } */ *uap;
953 {
954 ucontext_t uc;
955 struct proc *p = td->td_proc;
956 struct trapframe *regs;
957 const ucontext_t *ucp;
958 int cs, eflags, error, ret;
959 ksiginfo_t ksi;
960
961 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
962 if (error != 0)
963 return (error);
964 ucp = &uc;
965 regs = td->td_frame;
966 eflags = ucp->uc_mcontext.mc_eflags;
967 if (eflags & PSL_VM) {
968 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
969 struct vm86_kernel *vm86;
970
971 /*
972 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
973 * set up the vm86 area, and we can't enter vm86 mode.
974 */
975 if (td->td_pcb->pcb_ext == 0)
976 return (EINVAL);
977 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
978 if (vm86->vm86_inited == 0)
979 return (EINVAL);
980
981 /* Go back to user mode if both flags are set. */
982 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
983 ksiginfo_init_trap(&ksi);
984 ksi.ksi_signo = SIGBUS;
985 ksi.ksi_code = BUS_OBJERR;
986 ksi.ksi_addr = (void *)regs->tf_eip;
987 trapsignal(td, &ksi);
988 }
989
990 if (vm86->vm86_has_vme) {
991 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
992 (eflags & VME_USERCHANGE) | PSL_VM;
993 } else {
994 vm86->vm86_eflags = eflags; /* save VIF, VIP */
995 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
996 (eflags & VM_USERCHANGE) | PSL_VM;
997 }
998 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
999 tf->tf_eflags = eflags;
1000 tf->tf_vm86_ds = tf->tf_ds;
1001 tf->tf_vm86_es = tf->tf_es;
1002 tf->tf_vm86_fs = tf->tf_fs;
1003 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1004 tf->tf_ds = _udatasel;
1005 tf->tf_es = _udatasel;
1006 tf->tf_fs = _udatasel;
1007 } else {
1008 /*
1009 * Don't allow users to change privileged or reserved flags.
1010 */
1011 /*
1012 * XXX do allow users to change the privileged flag PSL_RF.
1013 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1014 * should sometimes set it there too. tf_eflags is kept in
1015 * the signal context during signal handling and there is no
1016 * other place to remember it, so the PSL_RF bit may be
1017 * corrupted by the signal handler without us knowing.
1018 * Corruption of the PSL_RF bit at worst causes one more or
1019 * one less debugger trap, so allowing it is fairly harmless.
1020 */
1021 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1022 printf("sigreturn: eflags = 0x%x\n", eflags);
1023 return (EINVAL);
1024 }
1025
1026 /*
1027 * Don't allow users to load a valid privileged %cs. Let the
1028 * hardware check for invalid selectors, excess privilege in
1029 * other selectors, invalid %eip's and invalid %esp's.
1030 */
1031 cs = ucp->uc_mcontext.mc_cs;
1032 if (!CS_SECURE(cs)) {
1033 printf("sigreturn: cs = 0x%x\n", cs);
1034 ksiginfo_init_trap(&ksi);
1035 ksi.ksi_signo = SIGBUS;
1036 ksi.ksi_code = BUS_OBJERR;
1037 ksi.ksi_trapno = T_PROTFLT;
1038 ksi.ksi_addr = (void *)regs->tf_eip;
1039 trapsignal(td, &ksi);
1040 return (EINVAL);
1041 }
1042
1043 ret = set_fpcontext(td, &ucp->uc_mcontext);
1044 if (ret != 0)
1045 return (ret);
1046 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1047 }
1048
1049 PROC_LOCK(p);
1050 #if defined(COMPAT_43)
1051 if (ucp->uc_mcontext.mc_onstack & 1)
1052 td->td_sigstk.ss_flags |= SS_ONSTACK;
1053 else
1054 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1055 #endif
1056
1057 td->td_sigmask = ucp->uc_sigmask;
1058 SIG_CANTMASK(td->td_sigmask);
1059 signotify(td);
1060 PROC_UNLOCK(p);
1061 return (EJUSTRETURN);
1062 }
1063
1064 /*
1065 * Machine dependent boot() routine
1066 *
1067 * I haven't seen anything to put here yet
1068 * Possibly some stuff might be grafted back here from boot()
1069 */
1070 void
1071 cpu_boot(int howto)
1072 {
1073 }
1074
1075 /* Get current clock frequency for the given cpu id. */
1076 int
1077 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1078 {
1079 register_t reg;
1080 uint64_t tsc1, tsc2;
1081
1082 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1083 return (EINVAL);
1084 if (!tsc_present)
1085 return (EOPNOTSUPP);
1086
1087 /* If we're booting, trust the rate calibrated moments ago. */
1088 if (cold) {
1089 *rate = tsc_freq;
1090 return (0);
1091 }
1092
1093 #ifdef SMP
1094 /* Schedule ourselves on the indicated cpu. */
1095 thread_lock(curthread);
1096 sched_bind(curthread, cpu_id);
1097 thread_unlock(curthread);
1098 #endif
1099
1100 /* Calibrate by measuring a short delay. */
1101 reg = intr_disable();
1102 tsc1 = rdtsc();
1103 DELAY(1000);
1104 tsc2 = rdtsc();
1105 intr_restore(reg);
1106
1107 #ifdef SMP
1108 thread_lock(curthread);
1109 sched_unbind(curthread);
1110 thread_unlock(curthread);
1111 #endif
1112
1113 /*
1114 * Calculate the difference in readings, convert to Mhz, and
1115 * subtract 0.5% of the total. Empirical testing has shown that
1116 * overhead in DELAY() works out to approximately this value.
1117 */
1118 tsc2 -= tsc1;
1119 *rate = tsc2 * 1000 - tsc2 * 5;
1120 return (0);
1121 }
1122
1123 /*
1124 * Shutdown the CPU as much as possible
1125 */
1126 void
1127 cpu_halt(void)
1128 {
1129 for (;;)
1130 __asm__ ("hlt");
1131 }
1132
1133 /*
1134 * Hook to idle the CPU when possible. In the SMP case we default to
1135 * off because a halted cpu will not currently pick up a new thread in the
1136 * run queue until the next timer tick. If turned on this will result in
1137 * approximately a 4.2% loss in real time performance in buildworld tests
1138 * (but improves user and sys times oddly enough), and saves approximately
1139 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1140 *
1141 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1142 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1143 * Then we can have our cake and eat it too.
1144 *
1145 * XXX I'm turning it on for SMP as well by default for now. It seems to
1146 * help lock contention somewhat, and this is critical for HTT. -Peter
1147 */
1148 static int cpu_idle_hlt = 1;
1149 TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt);
1150 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1151 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1152
1153 static void
1154 cpu_idle_default(void)
1155 {
1156 /*
1157 * we must absolutely guarentee that hlt is the
1158 * absolute next instruction after sti or we
1159 * introduce a timing window.
1160 */
1161 __asm __volatile("sti; hlt");
1162 }
1163
1164 /*
1165 * Note that we have to be careful here to avoid a race between checking
1166 * sched_runnable() and actually halting. If we don't do this, we may waste
1167 * the time between calling hlt and the next interrupt even though there
1168 * is a runnable process.
1169 */
1170 void
1171 cpu_idle(void)
1172 {
1173
1174 #ifdef SMP
1175 if (mp_grab_cpu_hlt())
1176 return;
1177 #endif
1178
1179 if (cpu_idle_hlt) {
1180 disable_intr();
1181 if (sched_runnable())
1182 enable_intr();
1183 else
1184 (*cpu_idle_hook)();
1185 }
1186 }
1187
1188 /* Other subsystems (e.g., ACPI) can hook this later. */
1189 void (*cpu_idle_hook)(void) = cpu_idle_default;
1190
1191 /*
1192 * Reset registers to default values on exec.
1193 */
1194 void
1195 exec_setregs(td, entry, stack, ps_strings)
1196 struct thread *td;
1197 u_long entry;
1198 u_long stack;
1199 u_long ps_strings;
1200 {
1201 struct trapframe *regs = td->td_frame;
1202 struct pcb *pcb = td->td_pcb;
1203
1204 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1205 pcb->pcb_gs = _udatasel;
1206 load_gs(_udatasel);
1207
1208 mtx_lock_spin(&dt_lock);
1209 if (td->td_proc->p_md.md_ldt)
1210 user_ldt_free(td);
1211 else
1212 mtx_unlock_spin(&dt_lock);
1213
1214 bzero((char *)regs, sizeof(struct trapframe));
1215 regs->tf_eip = entry;
1216 regs->tf_esp = stack;
1217 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1218 regs->tf_ss = _udatasel;
1219 regs->tf_ds = _udatasel;
1220 regs->tf_es = _udatasel;
1221 regs->tf_fs = _udatasel;
1222 regs->tf_cs = _ucodesel;
1223
1224 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1225 regs->tf_ebx = ps_strings;
1226
1227 /*
1228 * Reset the hardware debug registers if they were in use.
1229 * They won't have any meaning for the newly exec'd process.
1230 */
1231 if (pcb->pcb_flags & PCB_DBREGS) {
1232 pcb->pcb_dr0 = 0;
1233 pcb->pcb_dr1 = 0;
1234 pcb->pcb_dr2 = 0;
1235 pcb->pcb_dr3 = 0;
1236 pcb->pcb_dr6 = 0;
1237 pcb->pcb_dr7 = 0;
1238 if (pcb == PCPU_GET(curpcb)) {
1239 /*
1240 * Clear the debug registers on the running
1241 * CPU, otherwise they will end up affecting
1242 * the next process we switch to.
1243 */
1244 reset_dbregs();
1245 }
1246 pcb->pcb_flags &= ~PCB_DBREGS;
1247 }
1248
1249 /*
1250 * Initialize the math emulator (if any) for the current process.
1251 * Actually, just clear the bit that says that the emulator has
1252 * been initialized. Initialization is delayed until the process
1253 * traps to the emulator (if it is done at all) mainly because
1254 * emulators don't provide an entry point for initialization.
1255 */
1256 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1257 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1258
1259 /*
1260 * Drop the FP state if we hold it, so that the process gets a
1261 * clean FP state if it uses the FPU again.
1262 */
1263 fpstate_drop(td);
1264
1265 /*
1266 * XXX - Linux emulator
1267 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1268 * on it.
1269 */
1270 td->td_retval[1] = 0;
1271 }
1272
1273 void
1274 cpu_setregs(void)
1275 {
1276 unsigned int cr0;
1277
1278 cr0 = rcr0();
1279
1280 /*
1281 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1282 *
1283 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1284 * instructions. We must set the CR0_MP bit and use the CR0_TS
1285 * bit to control the trap, because setting the CR0_EM bit does
1286 * not cause WAIT instructions to trap. It's important to trap
1287 * WAIT instructions - otherwise the "wait" variants of no-wait
1288 * control instructions would degenerate to the "no-wait" variants
1289 * after FP context switches but work correctly otherwise. It's
1290 * particularly important to trap WAITs when there is no NPX -
1291 * otherwise the "wait" variants would always degenerate.
1292 *
1293 * Try setting CR0_NE to get correct error reporting on 486DX's.
1294 * Setting it should fail or do nothing on lesser processors.
1295 */
1296 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1297 load_cr0(cr0);
1298 load_gs(_udatasel);
1299 }
1300
1301 u_long bootdev; /* not a struct cdev *- encoding is different */
1302 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1303 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1304
1305 /*
1306 * Initialize 386 and configure to run kernel
1307 */
1308
1309 /*
1310 * Initialize segments & interrupt table
1311 */
1312
1313 int _default_ldt;
1314 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1315 static struct gate_descriptor idt0[NIDT];
1316 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1317 union descriptor ldt[NLDT]; /* local descriptor table */
1318 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1319 struct mtx dt_lock; /* lock for GDT and LDT */
1320
1321 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1322 extern int has_f00f_bug;
1323 #endif
1324
1325 static struct i386tss dblfault_tss;
1326 static char dblfault_stack[PAGE_SIZE];
1327
1328 extern vm_offset_t proc0kstack;
1329
1330
1331 /*
1332 * software prototypes -- in more palatable form.
1333 *
1334 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1335 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1336 */
1337 struct soft_segment_descriptor gdt_segs[] = {
1338 /* GNULL_SEL 0 Null Descriptor */
1339 { 0x0, /* segment base address */
1340 0x0, /* length */
1341 0, /* segment type */
1342 0, /* segment descriptor priority level */
1343 0, /* segment descriptor present */
1344 0, 0,
1345 0, /* default 32 vs 16 bit size */
1346 0 /* limit granularity (byte/page units)*/ },
1347 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1348 { 0x0, /* segment base address */
1349 0xfffff, /* length - all address space */
1350 SDT_MEMRWA, /* segment type */
1351 0, /* segment descriptor priority level */
1352 1, /* segment descriptor present */
1353 0, 0,
1354 1, /* default 32 vs 16 bit size */
1355 1 /* limit granularity (byte/page units)*/ },
1356 /* GUFS_SEL 2 %fs Descriptor for user */
1357 { 0x0, /* segment base address */
1358 0xfffff, /* length - all address space */
1359 SDT_MEMRWA, /* segment type */
1360 SEL_UPL, /* segment descriptor priority level */
1361 1, /* segment descriptor present */
1362 0, 0,
1363 1, /* default 32 vs 16 bit size */
1364 1 /* limit granularity (byte/page units)*/ },
1365 /* GUGS_SEL 3 %gs Descriptor for user */
1366 { 0x0, /* segment base address */
1367 0xfffff, /* length - all address space */
1368 SDT_MEMRWA, /* segment type */
1369 SEL_UPL, /* segment descriptor priority level */
1370 1, /* segment descriptor present */
1371 0, 0,
1372 1, /* default 32 vs 16 bit size */
1373 1 /* limit granularity (byte/page units)*/ },
1374 /* GCODE_SEL 4 Code Descriptor for kernel */
1375 { 0x0, /* segment base address */
1376 0xfffff, /* length - all address space */
1377 SDT_MEMERA, /* segment type */
1378 0, /* segment descriptor priority level */
1379 1, /* segment descriptor present */
1380 0, 0,
1381 1, /* default 32 vs 16 bit size */
1382 1 /* limit granularity (byte/page units)*/ },
1383 /* GDATA_SEL 5 Data Descriptor for kernel */
1384 { 0x0, /* segment base address */
1385 0xfffff, /* length - all address space */
1386 SDT_MEMRWA, /* segment type */
1387 0, /* segment descriptor priority level */
1388 1, /* segment descriptor present */
1389 0, 0,
1390 1, /* default 32 vs 16 bit size */
1391 1 /* limit granularity (byte/page units)*/ },
1392 /* GUCODE_SEL 6 Code Descriptor for user */
1393 { 0x0, /* segment base address */
1394 0xfffff, /* length - all address space */
1395 SDT_MEMERA, /* segment type */
1396 SEL_UPL, /* segment descriptor priority level */
1397 1, /* segment descriptor present */
1398 0, 0,
1399 1, /* default 32 vs 16 bit size */
1400 1 /* limit granularity (byte/page units)*/ },
1401 /* GUDATA_SEL 7 Data Descriptor for user */
1402 { 0x0, /* segment base address */
1403 0xfffff, /* length - all address space */
1404 SDT_MEMRWA, /* segment type */
1405 SEL_UPL, /* segment descriptor priority level */
1406 1, /* segment descriptor present */
1407 0, 0,
1408 1, /* default 32 vs 16 bit size */
1409 1 /* limit granularity (byte/page units)*/ },
1410 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1411 { 0x400, /* segment base address */
1412 0xfffff, /* length */
1413 SDT_MEMRWA, /* segment type */
1414 0, /* segment descriptor priority level */
1415 1, /* segment descriptor present */
1416 0, 0,
1417 1, /* default 32 vs 16 bit size */
1418 1 /* limit granularity (byte/page units)*/ },
1419 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1420 {
1421 0x0, /* segment base address */
1422 sizeof(struct i386tss)-1,/* length */
1423 SDT_SYS386TSS, /* segment type */
1424 0, /* segment descriptor priority level */
1425 1, /* segment descriptor present */
1426 0, 0,
1427 0, /* unused - default 32 vs 16 bit size */
1428 0 /* limit granularity (byte/page units)*/ },
1429 /* GLDT_SEL 10 LDT Descriptor */
1430 { (int) ldt, /* segment base address */
1431 sizeof(ldt)-1, /* length - all address space */
1432 SDT_SYSLDT, /* segment type */
1433 SEL_UPL, /* segment descriptor priority level */
1434 1, /* segment descriptor present */
1435 0, 0,
1436 0, /* unused - default 32 vs 16 bit size */
1437 0 /* limit granularity (byte/page units)*/ },
1438 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1439 { (int) ldt, /* segment base address */
1440 (512 * sizeof(union descriptor)-1), /* length */
1441 SDT_SYSLDT, /* segment type */
1442 0, /* segment descriptor priority level */
1443 1, /* segment descriptor present */
1444 0, 0,
1445 0, /* unused - default 32 vs 16 bit size */
1446 0 /* limit granularity (byte/page units)*/ },
1447 /* GPANIC_SEL 12 Panic Tss Descriptor */
1448 { (int) &dblfault_tss, /* segment base address */
1449 sizeof(struct i386tss)-1,/* length - all address space */
1450 SDT_SYS386TSS, /* segment type */
1451 0, /* segment descriptor priority level */
1452 1, /* segment descriptor present */
1453 0, 0,
1454 0, /* unused - default 32 vs 16 bit size */
1455 0 /* limit granularity (byte/page units)*/ },
1456 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1457 { 0, /* segment base address (overwritten) */
1458 0xfffff, /* length */
1459 SDT_MEMERA, /* segment type */
1460 0, /* segment descriptor priority level */
1461 1, /* segment descriptor present */
1462 0, 0,
1463 0, /* default 32 vs 16 bit size */
1464 1 /* limit granularity (byte/page units)*/ },
1465 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1466 { 0, /* segment base address (overwritten) */
1467 0xfffff, /* length */
1468 SDT_MEMERA, /* segment type */
1469 0, /* segment descriptor priority level */
1470 1, /* segment descriptor present */
1471 0, 0,
1472 0, /* default 32 vs 16 bit size */
1473 1 /* limit granularity (byte/page units)*/ },
1474 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1475 { 0, /* segment base address (overwritten) */
1476 0xfffff, /* length */
1477 SDT_MEMRWA, /* segment type */
1478 0, /* segment descriptor priority level */
1479 1, /* segment descriptor present */
1480 0, 0,
1481 1, /* default 32 vs 16 bit size */
1482 1 /* limit granularity (byte/page units)*/ },
1483 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1484 { 0, /* segment base address (overwritten) */
1485 0xfffff, /* length */
1486 SDT_MEMRWA, /* segment type */
1487 0, /* segment descriptor priority level */
1488 1, /* segment descriptor present */
1489 0, 0,
1490 0, /* default 32 vs 16 bit size */
1491 1 /* limit granularity (byte/page units)*/ },
1492 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1493 { 0, /* segment base address (overwritten) */
1494 0xfffff, /* length */
1495 SDT_MEMRWA, /* segment type */
1496 0, /* segment descriptor priority level */
1497 1, /* segment descriptor present */
1498 0, 0,
1499 0, /* default 32 vs 16 bit size */
1500 1 /* limit granularity (byte/page units)*/ },
1501 /* GNDIS_SEL 18 NDIS Descriptor */
1502 { 0x0, /* segment base address */
1503 0x0, /* length */
1504 0, /* segment type */
1505 0, /* segment descriptor priority level */
1506 0, /* segment descriptor present */
1507 0, 0,
1508 0, /* default 32 vs 16 bit size */
1509 0 /* limit granularity (byte/page units)*/ },
1510 };
1511
1512 static struct soft_segment_descriptor ldt_segs[] = {
1513 /* Null Descriptor - overwritten by call gate */
1514 { 0x0, /* segment base address */
1515 0x0, /* length - all address space */
1516 0, /* segment type */
1517 0, /* segment descriptor priority level */
1518 0, /* segment descriptor present */
1519 0, 0,
1520 0, /* default 32 vs 16 bit size */
1521 0 /* limit granularity (byte/page units)*/ },
1522 /* Null Descriptor - overwritten by call gate */
1523 { 0x0, /* segment base address */
1524 0x0, /* length - all address space */
1525 0, /* segment type */
1526 0, /* segment descriptor priority level */
1527 0, /* segment descriptor present */
1528 0, 0,
1529 0, /* default 32 vs 16 bit size */
1530 0 /* limit granularity (byte/page units)*/ },
1531 /* Null Descriptor - overwritten by call gate */
1532 { 0x0, /* segment base address */
1533 0x0, /* length - all address space */
1534 0, /* segment type */
1535 0, /* segment descriptor priority level */
1536 0, /* segment descriptor present */
1537 0, 0,
1538 0, /* default 32 vs 16 bit size */
1539 0 /* limit granularity (byte/page units)*/ },
1540 /* Code Descriptor for user */
1541 { 0x0, /* segment base address */
1542 0xfffff, /* length - all address space */
1543 SDT_MEMERA, /* segment type */
1544 SEL_UPL, /* segment descriptor priority level */
1545 1, /* segment descriptor present */
1546 0, 0,
1547 1, /* default 32 vs 16 bit size */
1548 1 /* limit granularity (byte/page units)*/ },
1549 /* Null Descriptor - overwritten by call gate */
1550 { 0x0, /* segment base address */
1551 0x0, /* length - all address space */
1552 0, /* segment type */
1553 0, /* segment descriptor priority level */
1554 0, /* segment descriptor present */
1555 0, 0,
1556 0, /* default 32 vs 16 bit size */
1557 0 /* limit granularity (byte/page units)*/ },
1558 /* Data Descriptor for user */
1559 { 0x0, /* segment base address */
1560 0xfffff, /* length - all address space */
1561 SDT_MEMRWA, /* segment type */
1562 SEL_UPL, /* segment descriptor priority level */
1563 1, /* segment descriptor present */
1564 0, 0,
1565 1, /* default 32 vs 16 bit size */
1566 1 /* limit granularity (byte/page units)*/ },
1567 };
1568
1569 void
1570 setidt(idx, func, typ, dpl, selec)
1571 int idx;
1572 inthand_t *func;
1573 int typ;
1574 int dpl;
1575 int selec;
1576 {
1577 struct gate_descriptor *ip;
1578
1579 ip = idt + idx;
1580 ip->gd_looffset = (int)func;
1581 ip->gd_selector = selec;
1582 ip->gd_stkcpy = 0;
1583 ip->gd_xx = 0;
1584 ip->gd_type = typ;
1585 ip->gd_dpl = dpl;
1586 ip->gd_p = 1;
1587 ip->gd_hioffset = ((int)func)>>16 ;
1588 }
1589
1590 extern inthand_t
1591 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1592 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1593 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1594 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1595 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1596
1597 #ifdef DDB
1598 /*
1599 * Display the index and function name of any IDT entries that don't use
1600 * the default 'rsvd' entry point.
1601 */
1602 DB_SHOW_COMMAND(idt, db_show_idt)
1603 {
1604 struct gate_descriptor *ip;
1605 int idx;
1606 uintptr_t func;
1607
1608 ip = idt;
1609 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1610 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1611 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1612 db_printf("%3d\t", idx);
1613 db_printsym(func, DB_STGY_PROC);
1614 db_printf("\n");
1615 }
1616 ip++;
1617 }
1618 }
1619
1620 /* Show privileged registers. */
1621 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1622 {
1623 uint64_t idtr, gdtr;
1624
1625 idtr = ridt();
1626 db_printf("idtr\t0x%08x/%04x\n",
1627 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1628 gdtr = rgdt();
1629 db_printf("gdtr\t0x%08x/%04x\n",
1630 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1631 db_printf("ldtr\t0x%04x\n", rldt());
1632 db_printf("tr\t0x%04x\n", rtr());
1633 db_printf("cr0\t0x%08x\n", rcr0());
1634 db_printf("cr2\t0x%08x\n", rcr2());
1635 db_printf("cr3\t0x%08x\n", rcr3());
1636 db_printf("cr4\t0x%08x\n", rcr4());
1637 }
1638 #endif
1639
1640 void
1641 sdtossd(sd, ssd)
1642 struct segment_descriptor *sd;
1643 struct soft_segment_descriptor *ssd;
1644 {
1645 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1646 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1647 ssd->ssd_type = sd->sd_type;
1648 ssd->ssd_dpl = sd->sd_dpl;
1649 ssd->ssd_p = sd->sd_p;
1650 ssd->ssd_def32 = sd->sd_def32;
1651 ssd->ssd_gran = sd->sd_gran;
1652 }
1653
1654 static int
1655 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1656 {
1657 int i, insert_idx, physmap_idx;
1658
1659 physmap_idx = *physmap_idxp;
1660
1661 if (boothowto & RB_VERBOSE)
1662 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1663 smap->type, smap->base, smap->length);
1664
1665 if (smap->type != SMAP_TYPE_MEMORY)
1666 return (1);
1667
1668 if (smap->length == 0)
1669 return (1);
1670
1671 #ifndef PAE
1672 if (smap->base >= 0xffffffff) {
1673 printf("%uK of memory above 4GB ignored\n",
1674 (u_int)(smap->length / 1024));
1675 return (1);
1676 }
1677 #endif
1678
1679 /*
1680 * Find insertion point while checking for overlap. Start off by
1681 * assuming the new entry will be added to the end.
1682 */
1683 insert_idx = physmap_idx + 2;
1684 for (i = 0; i <= physmap_idx; i += 2) {
1685 if (smap->base < physmap[i + 1]) {
1686 if (smap->base + smap->length <= physmap[i]) {
1687 insert_idx = i;
1688 break;
1689 }
1690 if (boothowto & RB_VERBOSE)
1691 printf(
1692 "Overlapping memory regions, ignoring second region\n");
1693 return (1);
1694 }
1695 }
1696
1697 /* See if we can prepend to the next entry. */
1698 if (insert_idx <= physmap_idx &&
1699 smap->base + smap->length == physmap[insert_idx]) {
1700 physmap[insert_idx] = smap->base;
1701 return (1);
1702 }
1703
1704 /* See if we can append to the previous entry. */
1705 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) {
1706 physmap[insert_idx - 1] += smap->length;
1707 return (1);
1708 }
1709
1710 physmap_idx += 2;
1711 *physmap_idxp = physmap_idx;
1712 if (physmap_idx == PHYSMAP_SIZE) {
1713 printf(
1714 "Too many segments in the physical address map, giving up\n");
1715 return (0);
1716 }
1717
1718 /*
1719 * Move the last 'N' entries down to make room for the new
1720 * entry if needed.
1721 */
1722 for (i = physmap_idx; i > insert_idx; i -= 2) {
1723 physmap[i] = physmap[i - 2];
1724 physmap[i + 1] = physmap[i - 1];
1725 }
1726
1727 /* Insert the new entry. */
1728 physmap[insert_idx] = smap->base;
1729 physmap[insert_idx + 1] = smap->base + smap->length;
1730 return (1);
1731 }
1732
1733 static void
1734 basemem_setup(void)
1735 {
1736 vm_paddr_t pa;
1737 pt_entry_t *pte;
1738 int i;
1739
1740 if (basemem > 640) {
1741 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1742 basemem);
1743 basemem = 640;
1744 }
1745
1746 /*
1747 * XXX if biosbasemem is now < 640, there is a `hole'
1748 * between the end of base memory and the start of
1749 * ISA memory. The hole may be empty or it may
1750 * contain BIOS code or data. Map it read/write so
1751 * that the BIOS can write to it. (Memory from 0 to
1752 * the physical end of the kernel is mapped read-only
1753 * to begin with and then parts of it are remapped.
1754 * The parts that aren't remapped form holes that
1755 * remain read-only and are unused by the kernel.
1756 * The base memory area is below the physical end of
1757 * the kernel and right now forms a read-only hole.
1758 * The part of it from PAGE_SIZE to
1759 * (trunc_page(biosbasemem * 1024) - 1) will be
1760 * remapped and used by the kernel later.)
1761 *
1762 * This code is similar to the code used in
1763 * pmap_mapdev, but since no memory needs to be
1764 * allocated we simply change the mapping.
1765 */
1766 for (pa = trunc_page(basemem * 1024);
1767 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1768 pmap_kenter(KERNBASE + pa, pa);
1769
1770 /*
1771 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1772 * the vm86 page table so that vm86 can scribble on them using
1773 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1774 * page 0, at least as initialized here?
1775 */
1776 pte = (pt_entry_t *)vm86paddr;
1777 for (i = basemem / 4; i < 160; i++)
1778 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1779 }
1780
1781 /*
1782 * Populate the (physmap) array with base/bound pairs describing the
1783 * available physical memory in the system, then test this memory and
1784 * build the phys_avail array describing the actually-available memory.
1785 *
1786 * If we cannot accurately determine the physical memory map, then use
1787 * value from the 0xE801 call, and failing that, the RTC.
1788 *
1789 * Total memory size may be set by the kernel environment variable
1790 * hw.physmem or the compile-time define MAXMEM.
1791 *
1792 * XXX first should be vm_paddr_t.
1793 */
1794 static void
1795 getmemsize(int first)
1796 {
1797 int has_smap, off, physmap_idx, pa_indx, da_indx;
1798 u_long physmem_tunable;
1799 vm_paddr_t physmap[PHYSMAP_SIZE];
1800 pt_entry_t *pte;
1801 quad_t dcons_addr, dcons_size;
1802 int hasbrokenint12, i;
1803 u_int extmem;
1804 struct vm86frame vmf;
1805 struct vm86context vmc;
1806 vm_paddr_t pa;
1807 struct bios_smap *smap, *smapbase, *smapend;
1808 u_int32_t smapsize;
1809 caddr_t kmdp;
1810
1811 has_smap = 0;
1812 #ifdef XBOX
1813 if (arch_i386_is_xbox) {
1814 /*
1815 * We queried the memory size before, so chop off 4MB for
1816 * the framebuffer and inform the OS of this.
1817 */
1818 physmap[0] = 0;
1819 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
1820 physmap_idx = 0;
1821 goto physmap_done;
1822 }
1823 #endif
1824 bzero(&vmf, sizeof(vmf));
1825 bzero(physmap, sizeof(physmap));
1826 basemem = 0;
1827
1828 /*
1829 * Check if the loader supplied an SMAP memory map. If so,
1830 * use that and do not make any VM86 calls.
1831 */
1832 physmap_idx = 0;
1833 smapbase = NULL;
1834 kmdp = preload_search_by_type("elf kernel");
1835 if (kmdp == NULL)
1836 kmdp = preload_search_by_type("elf32 kernel");
1837 if (kmdp != NULL)
1838 smapbase = (struct bios_smap *)preload_search_info(kmdp,
1839 MODINFO_METADATA | MODINFOMD_SMAP);
1840 if (smapbase != NULL) {
1841 /*
1842 * subr_module.c says:
1843 * "Consumer may safely assume that size value precedes data."
1844 * ie: an int32_t immediately precedes SMAP.
1845 */
1846 smapsize = *((u_int32_t *)smapbase - 1);
1847 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1848 has_smap = 1;
1849
1850 for (smap = smapbase; smap < smapend; smap++)
1851 if (!add_smap_entry(smap, physmap, &physmap_idx))
1852 break;
1853 goto have_smap;
1854 }
1855
1856 /*
1857 * Some newer BIOSes have a broken INT 12H implementation
1858 * which causes a kernel panic immediately. In this case, we
1859 * need use the SMAP to determine the base memory size.
1860 */
1861 hasbrokenint12 = 0;
1862 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
1863 if (hasbrokenint12 == 0) {
1864 /* Use INT12 to determine base memory size. */
1865 vm86_intcall(0x12, &vmf);
1866 basemem = vmf.vmf_ax;
1867 basemem_setup();
1868 }
1869
1870 /*
1871 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
1872 * the kernel page table so we can use it as a buffer. The
1873 * kernel will unmap this page later.
1874 */
1875 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
1876 vmc.npages = 0;
1877 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
1878 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
1879
1880 vmf.vmf_ebx = 0;
1881 do {
1882 vmf.vmf_eax = 0xE820;
1883 vmf.vmf_edx = SMAP_SIG;
1884 vmf.vmf_ecx = sizeof(struct bios_smap);
1885 i = vm86_datacall(0x15, &vmf, &vmc);
1886 if (i || vmf.vmf_eax != SMAP_SIG)
1887 break;
1888 has_smap = 1;
1889 if (!add_smap_entry(smap, physmap, &physmap_idx))
1890 break;
1891 } while (vmf.vmf_ebx != 0);
1892
1893 have_smap:
1894 /*
1895 * If we didn't fetch the "base memory" size from INT12,
1896 * figure it out from the SMAP (or just guess).
1897 */
1898 if (basemem == 0) {
1899 for (i = 0; i <= physmap_idx; i += 2) {
1900 if (physmap[i] == 0x00000000) {
1901 basemem = physmap[i + 1] / 1024;
1902 break;
1903 }
1904 }
1905
1906 /* XXX: If we couldn't find basemem from SMAP, just guess. */
1907 if (basemem == 0)
1908 basemem = 640;
1909 basemem_setup();
1910 }
1911
1912 if (physmap[1] != 0)
1913 goto physmap_done;
1914
1915 /*
1916 * If we failed to find an SMAP, figure out the extended
1917 * memory size. We will then build a simple memory map with
1918 * two segments, one for "base memory" and the second for
1919 * "extended memory". Note that "extended memory" starts at a
1920 * physical address of 1MB and that both basemem and extmem
1921 * are in units of 1KB.
1922 *
1923 * First, try to fetch the extended memory size via INT 15:E801.
1924 */
1925 vmf.vmf_ax = 0xE801;
1926 if (vm86_intcall(0x15, &vmf) == 0) {
1927 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1928 } else {
1929 /*
1930 * If INT15:E801 fails, this is our last ditch effort
1931 * to determine the extended memory size. Currently
1932 * we prefer the RTC value over INT15:88.
1933 */
1934 #if 0
1935 vmf.vmf_ah = 0x88;
1936 vm86_intcall(0x15, &vmf);
1937 extmem = vmf.vmf_ax;
1938 #else
1939 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1940 #endif
1941 }
1942
1943 /*
1944 * Special hack for chipsets that still remap the 384k hole when
1945 * there's 16MB of memory - this really confuses people that
1946 * are trying to use bus mastering ISA controllers with the
1947 * "16MB limit"; they only have 16MB, but the remapping puts
1948 * them beyond the limit.
1949 *
1950 * If extended memory is between 15-16MB (16-17MB phys address range),
1951 * chop it to 15MB.
1952 */
1953 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1954 extmem = 15 * 1024;
1955
1956 physmap[0] = 0;
1957 physmap[1] = basemem * 1024;
1958 physmap_idx = 2;
1959 physmap[physmap_idx] = 0x100000;
1960 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1961
1962 physmap_done:
1963 /*
1964 * Now, physmap contains a map of physical memory.
1965 */
1966
1967 #ifdef SMP
1968 /* make hole for AP bootstrap code */
1969 physmap[1] = mp_bootaddress(physmap[1]);
1970 #endif
1971
1972 /*
1973 * Maxmem isn't the "maximum memory", it's one larger than the
1974 * highest page of the physical address space. It should be
1975 * called something like "Maxphyspage". We may adjust this
1976 * based on ``hw.physmem'' and the results of the memory test.
1977 */
1978 Maxmem = atop(physmap[physmap_idx + 1]);
1979
1980 #ifdef MAXMEM
1981 Maxmem = MAXMEM / 4;
1982 #endif
1983
1984 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1985 Maxmem = atop(physmem_tunable);
1986
1987 /*
1988 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
1989 * the amount of memory in the system.
1990 */
1991 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
1992 Maxmem = atop(physmap[physmap_idx + 1]);
1993
1994 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1995 (boothowto & RB_VERBOSE))
1996 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1997
1998 /*
1999 * If Maxmem has been increased beyond what the system has detected,
2000 * extend the last memory segment to the new limit.
2001 */
2002 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2003 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2004
2005 /* call pmap initialization to make new kernel address space */
2006 pmap_bootstrap(first);
2007
2008 /*
2009 * Size up each available chunk of physical memory.
2010 */
2011 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2012 pa_indx = 0;
2013 da_indx = 1;
2014 phys_avail[pa_indx++] = physmap[0];
2015 phys_avail[pa_indx] = physmap[0];
2016 dump_avail[da_indx] = physmap[0];
2017 pte = CMAP1;
2018
2019 /*
2020 * Get dcons buffer address
2021 */
2022 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2023 getenv_quad("dcons.size", &dcons_size) == 0)
2024 dcons_addr = 0;
2025
2026 /*
2027 * physmap is in bytes, so when converting to page boundaries,
2028 * round up the start address and round down the end address.
2029 */
2030 for (i = 0; i <= physmap_idx; i += 2) {
2031 vm_paddr_t end;
2032
2033 end = ptoa((vm_paddr_t)Maxmem);
2034 if (physmap[i + 1] < end)
2035 end = trunc_page(physmap[i + 1]);
2036 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2037 int tmp, page_bad, full;
2038 int *ptr = (int *)CADDR1;
2039
2040 full = FALSE;
2041 /*
2042 * block out kernel memory as not available.
2043 */
2044 if (pa >= KERNLOAD && pa < first)
2045 goto do_dump_avail;
2046
2047 /*
2048 * block out dcons buffer
2049 */
2050 if (dcons_addr > 0
2051 && pa >= trunc_page(dcons_addr)
2052 && pa < dcons_addr + dcons_size)
2053 goto do_dump_avail;
2054
2055 page_bad = FALSE;
2056
2057 /*
2058 * map page into kernel: valid, read/write,non-cacheable
2059 */
2060 *pte = pa | PG_V | PG_RW | PG_N;
2061 invltlb();
2062
2063 tmp = *(int *)ptr;
2064 /*
2065 * Test for alternating 1's and 0's
2066 */
2067 *(volatile int *)ptr = 0xaaaaaaaa;
2068 if (*(volatile int *)ptr != 0xaaaaaaaa)
2069 page_bad = TRUE;
2070 /*
2071 * Test for alternating 0's and 1's
2072 */
2073 *(volatile int *)ptr = 0x55555555;
2074 if (*(volatile int *)ptr != 0x55555555)
2075 page_bad = TRUE;
2076 /*
2077 * Test for all 1's
2078 */
2079 *(volatile int *)ptr = 0xffffffff;
2080 if (*(volatile int *)ptr != 0xffffffff)
2081 page_bad = TRUE;
2082 /*
2083 * Test for all 0's
2084 */
2085 *(volatile int *)ptr = 0x0;
2086 if (*(volatile int *)ptr != 0x0)
2087 page_bad = TRUE;
2088 /*
2089 * Restore original value.
2090 */
2091 *(int *)ptr = tmp;
2092
2093 /*
2094 * Adjust array of valid/good pages.
2095 */
2096 if (page_bad == TRUE)
2097 continue;
2098 /*
2099 * If this good page is a continuation of the
2100 * previous set of good pages, then just increase
2101 * the end pointer. Otherwise start a new chunk.
2102 * Note that "end" points one higher than end,
2103 * making the range >= start and < end.
2104 * If we're also doing a speculative memory
2105 * test and we at or past the end, bump up Maxmem
2106 * so that we keep going. The first bad page
2107 * will terminate the loop.
2108 */
2109 if (phys_avail[pa_indx] == pa) {
2110 phys_avail[pa_indx] += PAGE_SIZE;
2111 } else {
2112 pa_indx++;
2113 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2114 printf(
2115 "Too many holes in the physical address space, giving up\n");
2116 pa_indx--;
2117 full = TRUE;
2118 goto do_dump_avail;
2119 }
2120 phys_avail[pa_indx++] = pa; /* start */
2121 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2122 }
2123 physmem++;
2124 do_dump_avail:
2125 if (dump_avail[da_indx] == pa) {
2126 dump_avail[da_indx] += PAGE_SIZE;
2127 } else {
2128 da_indx++;
2129 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2130 da_indx--;
2131 goto do_next;
2132 }
2133 dump_avail[da_indx++] = pa; /* start */
2134 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2135 }
2136 do_next:
2137 if (full)
2138 break;
2139 }
2140 }
2141 *pte = 0;
2142 invltlb();
2143
2144 /*
2145 * XXX
2146 * The last chunk must contain at least one page plus the message
2147 * buffer to avoid complicating other code (message buffer address
2148 * calculation, etc.).
2149 */
2150 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2151 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
2152 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2153 phys_avail[pa_indx--] = 0;
2154 phys_avail[pa_indx--] = 0;
2155 }
2156
2157 Maxmem = atop(phys_avail[pa_indx]);
2158
2159 /* Trim off space for the message buffer. */
2160 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
2161
2162 /* Map the message buffer. */
2163 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2164 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2165 off);
2166 }
2167
2168 void
2169 init386(first)
2170 int first;
2171 {
2172 struct gate_descriptor *gdp;
2173 int gsel_tss, metadata_missing, x;
2174 struct pcpu *pc;
2175
2176 thread0.td_kstack = proc0kstack;
2177 thread0.td_pcb = (struct pcb *)
2178 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
2179
2180 /*
2181 * This may be done better later if it gets more high level
2182 * components in it. If so just link td->td_proc here.
2183 */
2184 proc_linkup0(&proc0, &thread0);
2185
2186 metadata_missing = 0;
2187 if (bootinfo.bi_modulep) {
2188 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2189 preload_bootstrap_relocate(KERNBASE);
2190 } else {
2191 metadata_missing = 1;
2192 }
2193 if (envmode == 1)
2194 kern_envp = static_env;
2195 else if (bootinfo.bi_envp)
2196 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2197
2198 /* Init basic tunables, hz etc */
2199 init_param1();
2200
2201 /*
2202 * Make gdt memory segments. All segments cover the full 4GB
2203 * of address space and permissions are enforced at page level.
2204 */
2205 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2206 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2207 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2208 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2209 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2210 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2211
2212 pc = &__pcpu[0];
2213 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2214 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2215 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2216
2217 for (x = 0; x < NGDT; x++)
2218 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2219
2220 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2221 r_gdt.rd_base = (int) gdt;
2222 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2223 lgdt(&r_gdt);
2224
2225 pcpu_init(pc, 0, sizeof(struct pcpu));
2226 PCPU_SET(prvspace, pc);
2227 PCPU_SET(curthread, &thread0);
2228 PCPU_SET(curpcb, thread0.td_pcb);
2229
2230 /*
2231 * Initialize mutexes.
2232 *
2233 * icu_lock: in order to allow an interrupt to occur in a critical
2234 * section, to set pcpu->ipending (etc...) properly, we
2235 * must be able to get the icu lock, so it can't be
2236 * under witness.
2237 */
2238 mutex_init();
2239 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2240
2241 /* make ldt memory segments */
2242 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2243 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2244 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2245 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2246
2247 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2248 lldt(_default_ldt);
2249 PCPU_SET(currentldt, _default_ldt);
2250
2251 /* exceptions */
2252 for (x = 0; x < NIDT; x++)
2253 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2254 GSEL(GCODE_SEL, SEL_KPL));
2255 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2256 GSEL(GCODE_SEL, SEL_KPL));
2257 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2258 GSEL(GCODE_SEL, SEL_KPL));
2259 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2260 GSEL(GCODE_SEL, SEL_KPL));
2261 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2262 GSEL(GCODE_SEL, SEL_KPL));
2263 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2264 GSEL(GCODE_SEL, SEL_KPL));
2265 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2266 GSEL(GCODE_SEL, SEL_KPL));
2267 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2268 GSEL(GCODE_SEL, SEL_KPL));
2269 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2270 , GSEL(GCODE_SEL, SEL_KPL));
2271 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2272 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2273 GSEL(GCODE_SEL, SEL_KPL));
2274 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2275 GSEL(GCODE_SEL, SEL_KPL));
2276 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2277 GSEL(GCODE_SEL, SEL_KPL));
2278 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2279 GSEL(GCODE_SEL, SEL_KPL));
2280 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2281 GSEL(GCODE_SEL, SEL_KPL));
2282 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2283 GSEL(GCODE_SEL, SEL_KPL));
2284 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2285 GSEL(GCODE_SEL, SEL_KPL));
2286 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2287 GSEL(GCODE_SEL, SEL_KPL));
2288 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2289 GSEL(GCODE_SEL, SEL_KPL));
2290 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2291 GSEL(GCODE_SEL, SEL_KPL));
2292 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2293 GSEL(GCODE_SEL, SEL_KPL));
2294
2295 r_idt.rd_limit = sizeof(idt0) - 1;
2296 r_idt.rd_base = (int) idt;
2297 lidt(&r_idt);
2298
2299 #ifdef XBOX
2300 /*
2301 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2302 * This should be 0x10de / 0x02a5.
2303 *
2304 * This is exactly what Linux does.
2305 */
2306 outl(0xcf8, 0x80000000);
2307 if (inl(0xcfc) == 0x02a510de) {
2308 arch_i386_is_xbox = 1;
2309 pic16l_setled(XBOX_LED_GREEN);
2310
2311 /*
2312 * We are an XBOX, but we may have either 64MB or 128MB of
2313 * memory. The PCI host bridge should be programmed for this,
2314 * so we just query it.
2315 */
2316 outl(0xcf8, 0x80000084);
2317 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2318 }
2319 #endif /* XBOX */
2320
2321 /*
2322 * Initialize the i8254 before the console so that console
2323 * initialization can use DELAY().
2324 */
2325 i8254_init();
2326
2327 /*
2328 * Initialize the console before we print anything out.
2329 */
2330 cninit();
2331
2332 if (metadata_missing)
2333 printf("WARNING: loader(8) metadata is missing!\n");
2334
2335 #ifdef DEV_ISA
2336 elcr_probe();
2337 atpic_startup();
2338 #endif
2339
2340 #ifdef DDB
2341 ksym_start = bootinfo.bi_symtab;
2342 ksym_end = bootinfo.bi_esymtab;
2343 #endif
2344
2345 kdb_init();
2346
2347 #ifdef KDB
2348 if (boothowto & RB_KDB)
2349 kdb_enter_why(KDB_WHY_BOOTFLAGS,
2350 "Boot flags requested debugger");
2351 #endif
2352
2353 finishidentcpu(); /* Final stage of CPU initialization */
2354 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2355 GSEL(GCODE_SEL, SEL_KPL));
2356 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2357 GSEL(GCODE_SEL, SEL_KPL));
2358 initializecpu(); /* Initialize CPU registers */
2359
2360 /* make an initial tss so cpu can get interrupt stack on syscall! */
2361 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2362 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2363 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2364 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2365 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2366 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2367 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2368 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2369 ltr(gsel_tss);
2370
2371 /* pointer to selector slot for %fs/%gs */
2372 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2373
2374 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2375 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2376 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2377 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2378 #ifdef PAE
2379 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2380 #else
2381 dblfault_tss.tss_cr3 = (int)IdlePTD;
2382 #endif
2383 dblfault_tss.tss_eip = (int)dblfault_handler;
2384 dblfault_tss.tss_eflags = PSL_KERNEL;
2385 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2386 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2387 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2388 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2389 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2390
2391 vm86_initialize();
2392 getmemsize(first);
2393 init_param2(physmem);
2394
2395 /* now running on new page tables, configured,and u/iom is accessible */
2396
2397 msgbufinit(msgbufp, MSGBUF_SIZE);
2398
2399 /* make a call gate to reenter kernel with */
2400 gdp = &ldt[LSYS5CALLS_SEL].gd;
2401
2402 x = (int) &IDTVEC(lcall_syscall);
2403 gdp->gd_looffset = x;
2404 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2405 gdp->gd_stkcpy = 1;
2406 gdp->gd_type = SDT_SYS386CGT;
2407 gdp->gd_dpl = SEL_UPL;
2408 gdp->gd_p = 1;
2409 gdp->gd_hioffset = x >> 16;
2410
2411 /* XXX does this work? */
2412 /* XXX yes! */
2413 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2414 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2415
2416 /* transfer to user mode */
2417
2418 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2419 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2420
2421 /* setup proc 0's pcb */
2422 thread0.td_pcb->pcb_flags = 0;
2423 #ifdef PAE
2424 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2425 #else
2426 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2427 #endif
2428 thread0.td_pcb->pcb_ext = 0;
2429 thread0.td_frame = &proc0_tf;
2430 }
2431
2432 void
2433 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2434 {
2435
2436 pcpu->pc_acpi_id = 0xffffffff;
2437 }
2438
2439 void
2440 spinlock_enter(void)
2441 {
2442 struct thread *td;
2443
2444 td = curthread;
2445 if (td->td_md.md_spinlock_count == 0)
2446 td->td_md.md_saved_flags = intr_disable();
2447 td->td_md.md_spinlock_count++;
2448 critical_enter();
2449 }
2450
2451 void
2452 spinlock_exit(void)
2453 {
2454 struct thread *td;
2455
2456 td = curthread;
2457 critical_exit();
2458 td->td_md.md_spinlock_count--;
2459 if (td->td_md.md_spinlock_count == 0)
2460 intr_restore(td->td_md.md_saved_flags);
2461 }
2462
2463 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2464 static void f00f_hack(void *unused);
2465 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2466
2467 static void
2468 f00f_hack(void *unused)
2469 {
2470 struct gate_descriptor *new_idt;
2471 vm_offset_t tmp;
2472
2473 if (!has_f00f_bug)
2474 return;
2475
2476 GIANT_REQUIRED;
2477
2478 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2479
2480 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2481 if (tmp == 0)
2482 panic("kmem_alloc returned 0");
2483
2484 /* Put the problematic entry (#6) at the end of the lower page. */
2485 new_idt = (struct gate_descriptor*)
2486 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2487 bcopy(idt, new_idt, sizeof(idt0));
2488 r_idt.rd_base = (u_int)new_idt;
2489 lidt(&r_idt);
2490 idt = new_idt;
2491 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2492 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2493 panic("vm_map_protect failed");
2494 }
2495 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2496
2497 /*
2498 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2499 * we want to start a backtrace from the function that caused us to enter
2500 * the debugger. We have the context in the trapframe, but base the trace
2501 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2502 * enough for a backtrace.
2503 */
2504 void
2505 makectx(struct trapframe *tf, struct pcb *pcb)
2506 {
2507
2508 pcb->pcb_edi = tf->tf_edi;
2509 pcb->pcb_esi = tf->tf_esi;
2510 pcb->pcb_ebp = tf->tf_ebp;
2511 pcb->pcb_ebx = tf->tf_ebx;
2512 pcb->pcb_eip = tf->tf_eip;
2513 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2514 }
2515
2516 int
2517 ptrace_set_pc(struct thread *td, u_long addr)
2518 {
2519
2520 td->td_frame->tf_eip = addr;
2521 return (0);
2522 }
2523
2524 int
2525 ptrace_single_step(struct thread *td)
2526 {
2527 td->td_frame->tf_eflags |= PSL_T;
2528 return (0);
2529 }
2530
2531 int
2532 ptrace_clear_single_step(struct thread *td)
2533 {
2534 td->td_frame->tf_eflags &= ~PSL_T;
2535 return (0);
2536 }
2537
2538 int
2539 fill_regs(struct thread *td, struct reg *regs)
2540 {
2541 struct pcb *pcb;
2542 struct trapframe *tp;
2543
2544 tp = td->td_frame;
2545 pcb = td->td_pcb;
2546 regs->r_fs = tp->tf_fs;
2547 regs->r_es = tp->tf_es;
2548 regs->r_ds = tp->tf_ds;
2549 regs->r_edi = tp->tf_edi;
2550 regs->r_esi = tp->tf_esi;
2551 regs->r_ebp = tp->tf_ebp;
2552 regs->r_ebx = tp->tf_ebx;
2553 regs->r_edx = tp->tf_edx;
2554 regs->r_ecx = tp->tf_ecx;
2555 regs->r_eax = tp->tf_eax;
2556 regs->r_eip = tp->tf_eip;
2557 regs->r_cs = tp->tf_cs;
2558 regs->r_eflags = tp->tf_eflags;
2559 regs->r_esp = tp->tf_esp;
2560 regs->r_ss = tp->tf_ss;
2561 regs->r_gs = pcb->pcb_gs;
2562 return (0);
2563 }
2564
2565 int
2566 set_regs(struct thread *td, struct reg *regs)
2567 {
2568 struct pcb *pcb;
2569 struct trapframe *tp;
2570
2571 tp = td->td_frame;
2572 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2573 !CS_SECURE(regs->r_cs))
2574 return (EINVAL);
2575 pcb = td->td_pcb;
2576 tp->tf_fs = regs->r_fs;
2577 tp->tf_es = regs->r_es;
2578 tp->tf_ds = regs->r_ds;
2579 tp->tf_edi = regs->r_edi;
2580 tp->tf_esi = regs->r_esi;
2581 tp->tf_ebp = regs->r_ebp;
2582 tp->tf_ebx = regs->r_ebx;
2583 tp->tf_edx = regs->r_edx;
2584 tp->tf_ecx = regs->r_ecx;
2585 tp->tf_eax = regs->r_eax;
2586 tp->tf_eip = regs->r_eip;
2587 tp->tf_cs = regs->r_cs;
2588 tp->tf_eflags = regs->r_eflags;
2589 tp->tf_esp = regs->r_esp;
2590 tp->tf_ss = regs->r_ss;
2591 pcb->pcb_gs = regs->r_gs;
2592 return (0);
2593 }
2594
2595 #ifdef CPU_ENABLE_SSE
2596 static void
2597 fill_fpregs_xmm(sv_xmm, sv_87)
2598 struct savexmm *sv_xmm;
2599 struct save87 *sv_87;
2600 {
2601 register struct env87 *penv_87 = &sv_87->sv_env;
2602 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2603 int i;
2604
2605 bzero(sv_87, sizeof(*sv_87));
2606
2607 /* FPU control/status */
2608 penv_87->en_cw = penv_xmm->en_cw;
2609 penv_87->en_sw = penv_xmm->en_sw;
2610 penv_87->en_tw = penv_xmm->en_tw;
2611 penv_87->en_fip = penv_xmm->en_fip;
2612 penv_87->en_fcs = penv_xmm->en_fcs;
2613 penv_87->en_opcode = penv_xmm->en_opcode;
2614 penv_87->en_foo = penv_xmm->en_foo;
2615 penv_87->en_fos = penv_xmm->en_fos;
2616
2617 /* FPU registers */
2618 for (i = 0; i < 8; ++i)
2619 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2620 }
2621
2622 static void
2623 set_fpregs_xmm(sv_87, sv_xmm)
2624 struct save87 *sv_87;
2625 struct savexmm *sv_xmm;
2626 {
2627 register struct env87 *penv_87 = &sv_87->sv_env;
2628 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2629 int i;
2630
2631 /* FPU control/status */
2632 penv_xmm->en_cw = penv_87->en_cw;
2633 penv_xmm->en_sw = penv_87->en_sw;
2634 penv_xmm->en_tw = penv_87->en_tw;
2635 penv_xmm->en_fip = penv_87->en_fip;
2636 penv_xmm->en_fcs = penv_87->en_fcs;
2637 penv_xmm->en_opcode = penv_87->en_opcode;
2638 penv_xmm->en_foo = penv_87->en_foo;
2639 penv_xmm->en_fos = penv_87->en_fos;
2640
2641 /* FPU registers */
2642 for (i = 0; i < 8; ++i)
2643 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2644 }
2645 #endif /* CPU_ENABLE_SSE */
2646
2647 int
2648 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2649 {
2650 #ifdef CPU_ENABLE_SSE
2651 if (cpu_fxsr) {
2652 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2653 (struct save87 *)fpregs);
2654 return (0);
2655 }
2656 #endif /* CPU_ENABLE_SSE */
2657 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2658 return (0);
2659 }
2660
2661 int
2662 set_fpregs(struct thread *td, struct fpreg *fpregs)
2663 {
2664 #ifdef CPU_ENABLE_SSE
2665 if (cpu_fxsr) {
2666 set_fpregs_xmm((struct save87 *)fpregs,
2667 &td->td_pcb->pcb_save.sv_xmm);
2668 return (0);
2669 }
2670 #endif /* CPU_ENABLE_SSE */
2671 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2672 return (0);
2673 }
2674
2675 /*
2676 * Get machine context.
2677 */
2678 int
2679 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2680 {
2681 struct trapframe *tp;
2682
2683 tp = td->td_frame;
2684
2685 PROC_LOCK(curthread->td_proc);
2686 mcp->mc_onstack = sigonstack(tp->tf_esp);
2687 PROC_UNLOCK(curthread->td_proc);
2688 mcp->mc_gs = td->td_pcb->pcb_gs;
2689 mcp->mc_fs = tp->tf_fs;
2690 mcp->mc_es = tp->tf_es;
2691 mcp->mc_ds = tp->tf_ds;
2692 mcp->mc_edi = tp->tf_edi;
2693 mcp->mc_esi = tp->tf_esi;
2694 mcp->mc_ebp = tp->tf_ebp;
2695 mcp->mc_isp = tp->tf_isp;
2696 mcp->mc_eflags = tp->tf_eflags;
2697 if (flags & GET_MC_CLEAR_RET) {
2698 mcp->mc_eax = 0;
2699 mcp->mc_edx = 0;
2700 mcp->mc_eflags &= ~PSL_C;
2701 } else {
2702 mcp->mc_eax = tp->tf_eax;
2703 mcp->mc_edx = tp->tf_edx;
2704 }
2705 mcp->mc_ebx = tp->tf_ebx;
2706 mcp->mc_ecx = tp->tf_ecx;
2707 mcp->mc_eip = tp->tf_eip;
2708 mcp->mc_cs = tp->tf_cs;
2709 mcp->mc_esp = tp->tf_esp;
2710 mcp->mc_ss = tp->tf_ss;
2711 mcp->mc_len = sizeof(*mcp);
2712 get_fpcontext(td, mcp);
2713 return (0);
2714 }
2715
2716 /*
2717 * Set machine context.
2718 *
2719 * However, we don't set any but the user modifiable flags, and we won't
2720 * touch the cs selector.
2721 */
2722 int
2723 set_mcontext(struct thread *td, const mcontext_t *mcp)
2724 {
2725 struct trapframe *tp;
2726 int eflags, ret;
2727
2728 tp = td->td_frame;
2729 if (mcp->mc_len != sizeof(*mcp))
2730 return (EINVAL);
2731 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2732 (tp->tf_eflags & ~PSL_USERCHANGE);
2733 if ((ret = set_fpcontext(td, mcp)) == 0) {
2734 tp->tf_fs = mcp->mc_fs;
2735 tp->tf_es = mcp->mc_es;
2736 tp->tf_ds = mcp->mc_ds;
2737 tp->tf_edi = mcp->mc_edi;
2738 tp->tf_esi = mcp->mc_esi;
2739 tp->tf_ebp = mcp->mc_ebp;
2740 tp->tf_ebx = mcp->mc_ebx;
2741 tp->tf_edx = mcp->mc_edx;
2742 tp->tf_ecx = mcp->mc_ecx;
2743 tp->tf_eax = mcp->mc_eax;
2744 tp->tf_eip = mcp->mc_eip;
2745 tp->tf_eflags = eflags;
2746 tp->tf_esp = mcp->mc_esp;
2747 tp->tf_ss = mcp->mc_ss;
2748 td->td_pcb->pcb_gs = mcp->mc_gs;
2749 ret = 0;
2750 }
2751 return (ret);
2752 }
2753
2754 static void
2755 get_fpcontext(struct thread *td, mcontext_t *mcp)
2756 {
2757 #ifndef DEV_NPX
2758 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2759 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2760 #else
2761 union savefpu *addr;
2762
2763 /*
2764 * XXX mc_fpstate might be misaligned, since its declaration is not
2765 * unportabilized using __attribute__((aligned(16))) like the
2766 * declaration of struct savemm, and anyway, alignment doesn't work
2767 * for auto variables since we don't use gcc's pessimal stack
2768 * alignment. Work around this by abusing the spare fields after
2769 * mcp->mc_fpstate.
2770 *
2771 * XXX unpessimize most cases by only aligning when fxsave might be
2772 * called, although this requires knowing too much about
2773 * npxgetregs()'s internals.
2774 */
2775 addr = (union savefpu *)&mcp->mc_fpstate;
2776 if (td == PCPU_GET(fpcurthread) &&
2777 #ifdef CPU_ENABLE_SSE
2778 cpu_fxsr &&
2779 #endif
2780 ((uintptr_t)(void *)addr & 0xF)) {
2781 do
2782 addr = (void *)((char *)addr + 4);
2783 while ((uintptr_t)(void *)addr & 0xF);
2784 }
2785 mcp->mc_ownedfp = npxgetregs(td, addr);
2786 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2787 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2788 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2789 }
2790 mcp->mc_fpformat = npxformat();
2791 #endif
2792 }
2793
2794 static int
2795 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2796 {
2797 union savefpu *addr;
2798
2799 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2800 return (0);
2801 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2802 mcp->mc_fpformat != _MC_FPFMT_XMM)
2803 return (EINVAL);
2804 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2805 /* We don't care what state is left in the FPU or PCB. */
2806 fpstate_drop(td);
2807 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2808 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2809 /* XXX align as above. */
2810 addr = (union savefpu *)&mcp->mc_fpstate;
2811 if (td == PCPU_GET(fpcurthread) &&
2812 #ifdef CPU_ENABLE_SSE
2813 cpu_fxsr &&
2814 #endif
2815 ((uintptr_t)(void *)addr & 0xF)) {
2816 do
2817 addr = (void *)((char *)addr + 4);
2818 while ((uintptr_t)(void *)addr & 0xF);
2819 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2820 }
2821 #ifdef DEV_NPX
2822 #ifdef CPU_ENABLE_SSE
2823 if (cpu_fxsr)
2824 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
2825 #endif
2826 /*
2827 * XXX we violate the dubious requirement that npxsetregs()
2828 * be called with interrupts disabled.
2829 */
2830 npxsetregs(td, addr);
2831 #endif
2832 /*
2833 * Don't bother putting things back where they were in the
2834 * misaligned case, since we know that the caller won't use
2835 * them again.
2836 */
2837 } else
2838 return (EINVAL);
2839 return (0);
2840 }
2841
2842 static void
2843 fpstate_drop(struct thread *td)
2844 {
2845 register_t s;
2846
2847 s = intr_disable();
2848 #ifdef DEV_NPX
2849 if (PCPU_GET(fpcurthread) == td)
2850 npxdrop();
2851 #endif
2852 /*
2853 * XXX force a full drop of the npx. The above only drops it if we
2854 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2855 *
2856 * XXX I don't much like npxgetregs()'s semantics of doing a full
2857 * drop. Dropping only to the pcb matches fnsave's behaviour.
2858 * We only need to drop to !PCB_INITDONE in sendsig(). But
2859 * sendsig() is the only caller of npxgetregs()... perhaps we just
2860 * have too many layers.
2861 */
2862 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2863 intr_restore(s);
2864 }
2865
2866 int
2867 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2868 {
2869 struct pcb *pcb;
2870
2871 if (td == NULL) {
2872 dbregs->dr[0] = rdr0();
2873 dbregs->dr[1] = rdr1();
2874 dbregs->dr[2] = rdr2();
2875 dbregs->dr[3] = rdr3();
2876 dbregs->dr[4] = rdr4();
2877 dbregs->dr[5] = rdr5();
2878 dbregs->dr[6] = rdr6();
2879 dbregs->dr[7] = rdr7();
2880 } else {
2881 pcb = td->td_pcb;
2882 dbregs->dr[0] = pcb->pcb_dr0;
2883 dbregs->dr[1] = pcb->pcb_dr1;
2884 dbregs->dr[2] = pcb->pcb_dr2;
2885 dbregs->dr[3] = pcb->pcb_dr3;
2886 dbregs->dr[4] = 0;
2887 dbregs->dr[5] = 0;
2888 dbregs->dr[6] = pcb->pcb_dr6;
2889 dbregs->dr[7] = pcb->pcb_dr7;
2890 }
2891 return (0);
2892 }
2893
2894 int
2895 set_dbregs(struct thread *td, struct dbreg *dbregs)
2896 {
2897 struct pcb *pcb;
2898 int i;
2899
2900 if (td == NULL) {
2901 load_dr0(dbregs->dr[0]);
2902 load_dr1(dbregs->dr[1]);
2903 load_dr2(dbregs->dr[2]);
2904 load_dr3(dbregs->dr[3]);
2905 load_dr4(dbregs->dr[4]);
2906 load_dr5(dbregs->dr[5]);
2907 load_dr6(dbregs->dr[6]);
2908 load_dr7(dbregs->dr[7]);
2909 } else {
2910 /*
2911 * Don't let an illegal value for dr7 get set. Specifically,
2912 * check for undefined settings. Setting these bit patterns
2913 * result in undefined behaviour and can lead to an unexpected
2914 * TRCTRAP.
2915 */
2916 for (i = 0; i < 4; i++) {
2917 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2918 return (EINVAL);
2919 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
2920 return (EINVAL);
2921 }
2922
2923 pcb = td->td_pcb;
2924
2925 /*
2926 * Don't let a process set a breakpoint that is not within the
2927 * process's address space. If a process could do this, it
2928 * could halt the system by setting a breakpoint in the kernel
2929 * (if ddb was enabled). Thus, we need to check to make sure
2930 * that no breakpoints are being enabled for addresses outside
2931 * process's address space.
2932 *
2933 * XXX - what about when the watched area of the user's
2934 * address space is written into from within the kernel
2935 * ... wouldn't that still cause a breakpoint to be generated
2936 * from within kernel mode?
2937 */
2938
2939 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2940 /* dr0 is enabled */
2941 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2942 return (EINVAL);
2943 }
2944
2945 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2946 /* dr1 is enabled */
2947 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2948 return (EINVAL);
2949 }
2950
2951 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2952 /* dr2 is enabled */
2953 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2954 return (EINVAL);
2955 }
2956
2957 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2958 /* dr3 is enabled */
2959 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2960 return (EINVAL);
2961 }
2962
2963 pcb->pcb_dr0 = dbregs->dr[0];
2964 pcb->pcb_dr1 = dbregs->dr[1];
2965 pcb->pcb_dr2 = dbregs->dr[2];
2966 pcb->pcb_dr3 = dbregs->dr[3];
2967 pcb->pcb_dr6 = dbregs->dr[6];
2968 pcb->pcb_dr7 = dbregs->dr[7];
2969
2970 pcb->pcb_flags |= PCB_DBREGS;
2971 }
2972
2973 return (0);
2974 }
2975
2976 /*
2977 * Return > 0 if a hardware breakpoint has been hit, and the
2978 * breakpoint was in user space. Return 0, otherwise.
2979 */
2980 int
2981 user_dbreg_trap(void)
2982 {
2983 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2984 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2985 int nbp; /* number of breakpoints that triggered */
2986 caddr_t addr[4]; /* breakpoint addresses */
2987 int i;
2988
2989 dr7 = rdr7();
2990 if ((dr7 & 0x000000ff) == 0) {
2991 /*
2992 * all GE and LE bits in the dr7 register are zero,
2993 * thus the trap couldn't have been caused by the
2994 * hardware debug registers
2995 */
2996 return 0;
2997 }
2998
2999 nbp = 0;
3000 dr6 = rdr6();
3001 bp = dr6 & 0x0000000f;
3002
3003 if (!bp) {
3004 /*
3005 * None of the breakpoint bits are set meaning this
3006 * trap was not caused by any of the debug registers
3007 */
3008 return 0;
3009 }
3010
3011 /*
3012 * at least one of the breakpoints were hit, check to see
3013 * which ones and if any of them are user space addresses
3014 */
3015
3016 if (bp & 0x01) {
3017 addr[nbp++] = (caddr_t)rdr0();
3018 }
3019 if (bp & 0x02) {
3020 addr[nbp++] = (caddr_t)rdr1();
3021 }
3022 if (bp & 0x04) {
3023 addr[nbp++] = (caddr_t)rdr2();
3024 }
3025 if (bp & 0x08) {
3026 addr[nbp++] = (caddr_t)rdr3();
3027 }
3028
3029 for (i = 0; i < nbp; i++) {
3030 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
3031 /*
3032 * addr[i] is in user space
3033 */
3034 return nbp;
3035 }
3036 }
3037
3038 /*
3039 * None of the breakpoints are in user space.
3040 */
3041 return 0;
3042 }
3043
3044 #ifndef DEV_APIC
3045 #include <machine/apicvar.h>
3046
3047 /*
3048 * Provide stub functions so that the MADT APIC enumerator in the acpi
3049 * kernel module will link against a kernel without 'device apic'.
3050 *
3051 * XXX - This is a gross hack.
3052 */
3053 void
3054 apic_register_enumerator(struct apic_enumerator *enumerator)
3055 {
3056 }
3057
3058 void *
3059 ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase)
3060 {
3061 return (NULL);
3062 }
3063
3064 int
3065 ioapic_disable_pin(void *cookie, u_int pin)
3066 {
3067 return (ENXIO);
3068 }
3069
3070 int
3071 ioapic_get_vector(void *cookie, u_int pin)
3072 {
3073 return (-1);
3074 }
3075
3076 void
3077 ioapic_register(void *cookie)
3078 {
3079 }
3080
3081 int
3082 ioapic_remap_vector(void *cookie, u_int pin, int vector)
3083 {
3084 return (ENXIO);
3085 }
3086
3087 int
3088 ioapic_set_extint(void *cookie, u_int pin)
3089 {
3090 return (ENXIO);
3091 }
3092
3093 int
3094 ioapic_set_nmi(void *cookie, u_int pin)
3095 {
3096 return (ENXIO);
3097 }
3098
3099 int
3100 ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol)
3101 {
3102 return (ENXIO);
3103 }
3104
3105 int
3106 ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger)
3107 {
3108 return (ENXIO);
3109 }
3110
3111 void
3112 lapic_create(u_int apic_id, int boot_cpu)
3113 {
3114 }
3115
3116 void
3117 lapic_init(vm_paddr_t addr)
3118 {
3119 }
3120
3121 int
3122 lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode)
3123 {
3124 return (ENXIO);
3125 }
3126
3127 int
3128 lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol)
3129 {
3130 return (ENXIO);
3131 }
3132
3133 int
3134 lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger)
3135 {
3136 return (ENXIO);
3137 }
3138 #endif
3139
3140 #ifdef KDB
3141
3142 /*
3143 * Provide inb() and outb() as functions. They are normally only
3144 * available as macros calling inlined functions, thus cannot be
3145 * called from the debugger.
3146 *
3147 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
3148 */
3149
3150 #undef inb
3151 #undef outb
3152
3153 /* silence compiler warnings */
3154 u_char inb(u_int);
3155 void outb(u_int, u_char);
3156
3157 u_char
3158 inb(u_int port)
3159 {
3160 u_char data;
3161 /*
3162 * We use %%dx and not %1 here because i/o is done at %dx and not at
3163 * %edx, while gcc generates inferior code (movw instead of movl)
3164 * if we tell it to load (u_short) port.
3165 */
3166 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
3167 return (data);
3168 }
3169
3170 void
3171 outb(u_int port, u_char data)
3172 {
3173 u_char al;
3174 /*
3175 * Use an unnecessary assignment to help gcc's register allocator.
3176 * This make a large difference for gcc-1.40 and a tiny difference
3177 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
3178 * best results. gcc-2.6.0 can't handle this.
3179 */
3180 al = data;
3181 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
3182 }
3183
3184 #endif /* KDB */
Cache object: 7dbfc8ef8f88e31b2fb3ad8202a95f8e
|