1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/9.0/sys/i386/i386/machdep.c 225617 2011-09-16 13:58:51Z kmacy $");
42
43 #include "opt_atalk.h"
44 #include "opt_compat.h"
45 #include "opt_cpu.h"
46 #include "opt_ddb.h"
47 #include "opt_inet.h"
48 #include "opt_ipx.h"
49 #include "opt_isa.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_mp_watchdog.h"
53 #include "opt_npx.h"
54 #include "opt_perfmon.h"
55 #include "opt_xbox.h"
56 #include "opt_kdtrace.h"
57
58 #include <sys/param.h>
59 #include <sys/proc.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/buf.h>
63 #include <sys/bus.h>
64 #include <sys/callout.h>
65 #include <sys/cons.h>
66 #include <sys/cpu.h>
67 #include <sys/eventhandler.h>
68 #include <sys/exec.h>
69 #include <sys/imgact.h>
70 #include <sys/kdb.h>
71 #include <sys/kernel.h>
72 #include <sys/ktr.h>
73 #include <sys/linker.h>
74 #include <sys/lock.h>
75 #include <sys/malloc.h>
76 #include <sys/msgbuf.h>
77 #include <sys/mutex.h>
78 #include <sys/pcpu.h>
79 #include <sys/ptrace.h>
80 #include <sys/reboot.h>
81 #include <sys/sched.h>
82 #include <sys/signalvar.h>
83 #ifdef SMP
84 #include <sys/smp.h>
85 #endif
86 #include <sys/syscallsubr.h>
87 #include <sys/sysctl.h>
88 #include <sys/sysent.h>
89 #include <sys/sysproto.h>
90 #include <sys/ucontext.h>
91 #include <sys/vmmeter.h>
92
93 #include <vm/vm.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_param.h>
101
102 #ifdef DDB
103 #ifndef KDB
104 #error KDB must be enabled in order for DDB to work!
105 #endif
106 #include <ddb/ddb.h>
107 #include <ddb/db_sym.h>
108 #endif
109
110 #include <isa/rtc.h>
111
112 #include <net/netisr.h>
113
114 #include <machine/bootinfo.h>
115 #include <machine/clock.h>
116 #include <machine/cpu.h>
117 #include <machine/cputypes.h>
118 #include <machine/intr_machdep.h>
119 #include <x86/mca.h>
120 #include <machine/md_var.h>
121 #include <machine/metadata.h>
122 #include <machine/mp_watchdog.h>
123 #include <machine/pc/bios.h>
124 #include <machine/pcb.h>
125 #include <machine/pcb_ext.h>
126 #include <machine/proc.h>
127 #include <machine/reg.h>
128 #include <machine/sigframe.h>
129 #include <machine/specialreg.h>
130 #include <machine/vm86.h>
131 #ifdef PERFMON
132 #include <machine/perfmon.h>
133 #endif
134 #ifdef SMP
135 #include <machine/smp.h>
136 #endif
137
138 #ifdef DEV_ISA
139 #include <x86/isa/icu.h>
140 #endif
141
142 #ifdef XBOX
143 #include <machine/xbox.h>
144
145 int arch_i386_is_xbox = 0;
146 uint32_t arch_i386_xbox_memsize = 0;
147 #endif
148
149 #ifdef XEN
150 /* XEN includes */
151 #include <machine/xen/xen-os.h>
152 #include <xen/hypervisor.h>
153 #include <machine/xen/xen-os.h>
154 #include <machine/xen/xenvar.h>
155 #include <machine/xen/xenfunc.h>
156 #include <xen/xen_intr.h>
157
158 void Xhypervisor_callback(void);
159 void failsafe_callback(void);
160
161 extern trap_info_t trap_table[];
162 struct proc_ldt default_proc_ldt;
163 extern int init_first;
164 int running_xen = 1;
165 extern unsigned long physfree;
166 #endif /* XEN */
167
168 /* Sanity check for __curthread() */
169 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
170
171 extern void init386(int first);
172 extern void dblfault_handler(void);
173
174 extern void printcpuinfo(void); /* XXX header file */
175 extern void finishidentcpu(void);
176 extern void panicifcpuunsupported(void);
177 extern void initializecpu(void);
178
179 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
180 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
181
182 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
183 #define CPU_ENABLE_SSE
184 #endif
185
186 static void cpu_startup(void *);
187 static void fpstate_drop(struct thread *td);
188 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
189 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
190 #ifdef CPU_ENABLE_SSE
191 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
192 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
193 #endif /* CPU_ENABLE_SSE */
194 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
195
196 #ifdef DDB
197 extern vm_offset_t ksym_start, ksym_end;
198 #endif
199
200 /* Intel ICH registers */
201 #define ICH_PMBASE 0x400
202 #define ICH_SMI_EN ICH_PMBASE + 0x30
203
204 int _udatasel, _ucodesel;
205 u_int basemem;
206
207 int cold = 1;
208
209 #ifdef COMPAT_43
210 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
211 #endif
212 #ifdef COMPAT_FREEBSD4
213 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
214 #endif
215
216 long Maxmem = 0;
217 long realmem = 0;
218
219 #ifdef PAE
220 FEATURE(pae, "Physical Address Extensions");
221 #endif
222
223 /*
224 * The number of PHYSMAP entries must be one less than the number of
225 * PHYSSEG entries because the PHYSMAP entry that spans the largest
226 * physical address that is accessible by ISA DMA is split into two
227 * PHYSSEG entries.
228 */
229 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
230
231 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
232 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
233
234 /* must be 2 less so 0 0 can signal end of chunks */
235 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
236 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
237
238 struct kva_md_info kmi;
239
240 static struct trapframe proc0_tf;
241 struct pcpu __pcpu[MAXCPU];
242
243 struct mtx icu_lock;
244
245 static void
246 cpu_startup(dummy)
247 void *dummy;
248 {
249 uintmax_t memsize;
250 char *sysenv;
251
252 /*
253 * On MacBooks, we need to disallow the legacy USB circuit to
254 * generate an SMI# because this can cause several problems,
255 * namely: incorrect CPU frequency detection and failure to
256 * start the APs.
257 * We do this by disabling a bit in the SMI_EN (SMI Control and
258 * Enable register) of the Intel ICH LPC Interface Bridge.
259 */
260 sysenv = getenv("smbios.system.product");
261 if (sysenv != NULL) {
262 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
263 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
264 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
265 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
266 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
267 strncmp(sysenv, "Macmini1,1", 10) == 0) {
268 if (bootverbose)
269 printf("Disabling LEGACY_USB_EN bit on "
270 "Intel ICH.\n");
271 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
272 }
273 freeenv(sysenv);
274 }
275
276 /*
277 * Good {morning,afternoon,evening,night}.
278 */
279 startrtclock();
280 printcpuinfo();
281 panicifcpuunsupported();
282 #ifdef PERFMON
283 perfmon_init();
284 #endif
285 realmem = Maxmem;
286
287 /*
288 * Display physical memory if SMBIOS reports reasonable amount.
289 */
290 memsize = 0;
291 sysenv = getenv("smbios.memory.enabled");
292 if (sysenv != NULL) {
293 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
294 freeenv(sysenv);
295 }
296 if (memsize < ptoa((uintmax_t)cnt.v_free_count))
297 memsize = ptoa((uintmax_t)Maxmem);
298 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
299
300 /*
301 * Display any holes after the first chunk of extended memory.
302 */
303 if (bootverbose) {
304 int indx;
305
306 printf("Physical memory chunk(s):\n");
307 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
308 vm_paddr_t size;
309
310 size = phys_avail[indx + 1] - phys_avail[indx];
311 printf(
312 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
313 (uintmax_t)phys_avail[indx],
314 (uintmax_t)phys_avail[indx + 1] - 1,
315 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
316 }
317 }
318
319 vm_ksubmap_init(&kmi);
320
321 printf("avail memory = %ju (%ju MB)\n",
322 ptoa((uintmax_t)cnt.v_free_count),
323 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
324
325 /*
326 * Set up buffers, so they can be used to read disk labels.
327 */
328 bufinit();
329 vm_pager_bufferinit();
330 #ifndef XEN
331 cpu_setregs();
332 #endif
333 }
334
335 /*
336 * Send an interrupt to process.
337 *
338 * Stack is set up to allow sigcode stored
339 * at top to call routine, followed by kcall
340 * to sigreturn routine below. After sigreturn
341 * resets the signal mask, the stack, and the
342 * frame pointer, it returns to the user
343 * specified pc, psl.
344 */
345 #ifdef COMPAT_43
346 static void
347 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
348 {
349 struct osigframe sf, *fp;
350 struct proc *p;
351 struct thread *td;
352 struct sigacts *psp;
353 struct trapframe *regs;
354 int sig;
355 int oonstack;
356
357 td = curthread;
358 p = td->td_proc;
359 PROC_LOCK_ASSERT(p, MA_OWNED);
360 sig = ksi->ksi_signo;
361 psp = p->p_sigacts;
362 mtx_assert(&psp->ps_mtx, MA_OWNED);
363 regs = td->td_frame;
364 oonstack = sigonstack(regs->tf_esp);
365
366 /* Allocate space for the signal handler context. */
367 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
368 SIGISMEMBER(psp->ps_sigonstack, sig)) {
369 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
370 td->td_sigstk.ss_size - sizeof(struct osigframe));
371 #if defined(COMPAT_43)
372 td->td_sigstk.ss_flags |= SS_ONSTACK;
373 #endif
374 } else
375 fp = (struct osigframe *)regs->tf_esp - 1;
376
377 /* Translate the signal if appropriate. */
378 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
379 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
380
381 /* Build the argument list for the signal handler. */
382 sf.sf_signum = sig;
383 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
384 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
385 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
386 /* Signal handler installed with SA_SIGINFO. */
387 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
388 sf.sf_siginfo.si_signo = sig;
389 sf.sf_siginfo.si_code = ksi->ksi_code;
390 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
391 sf.sf_addr = 0;
392 } else {
393 /* Old FreeBSD-style arguments. */
394 sf.sf_arg2 = ksi->ksi_code;
395 sf.sf_addr = (register_t)ksi->ksi_addr;
396 sf.sf_ahu.sf_handler = catcher;
397 }
398 mtx_unlock(&psp->ps_mtx);
399 PROC_UNLOCK(p);
400
401 /* Save most if not all of trap frame. */
402 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
403 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
404 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
405 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
406 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
407 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
408 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
409 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
410 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
411 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
412 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
413 sf.sf_siginfo.si_sc.sc_gs = rgs();
414 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
415
416 /* Build the signal context to be used by osigreturn(). */
417 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
418 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
419 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
420 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
421 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
422 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
423 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
424 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
425
426 /*
427 * If we're a vm86 process, we want to save the segment registers.
428 * We also change eflags to be our emulated eflags, not the actual
429 * eflags.
430 */
431 if (regs->tf_eflags & PSL_VM) {
432 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
433 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
434 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
435
436 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
437 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
438 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
439 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
440
441 if (vm86->vm86_has_vme == 0)
442 sf.sf_siginfo.si_sc.sc_ps =
443 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
444 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
445
446 /* See sendsig() for comments. */
447 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
448 }
449
450 /*
451 * Copy the sigframe out to the user's stack.
452 */
453 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
454 #ifdef DEBUG
455 printf("process %ld has trashed its stack\n", (long)p->p_pid);
456 #endif
457 PROC_LOCK(p);
458 sigexit(td, SIGILL);
459 }
460
461 regs->tf_esp = (int)fp;
462 regs->tf_eip = PS_STRINGS - szosigcode;
463 regs->tf_eflags &= ~(PSL_T | PSL_D);
464 regs->tf_cs = _ucodesel;
465 regs->tf_ds = _udatasel;
466 regs->tf_es = _udatasel;
467 regs->tf_fs = _udatasel;
468 load_gs(_udatasel);
469 regs->tf_ss = _udatasel;
470 PROC_LOCK(p);
471 mtx_lock(&psp->ps_mtx);
472 }
473 #endif /* COMPAT_43 */
474
475 #ifdef COMPAT_FREEBSD4
476 static void
477 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
478 {
479 struct sigframe4 sf, *sfp;
480 struct proc *p;
481 struct thread *td;
482 struct sigacts *psp;
483 struct trapframe *regs;
484 int sig;
485 int oonstack;
486
487 td = curthread;
488 p = td->td_proc;
489 PROC_LOCK_ASSERT(p, MA_OWNED);
490 sig = ksi->ksi_signo;
491 psp = p->p_sigacts;
492 mtx_assert(&psp->ps_mtx, MA_OWNED);
493 regs = td->td_frame;
494 oonstack = sigonstack(regs->tf_esp);
495
496 /* Save user context. */
497 bzero(&sf, sizeof(sf));
498 sf.sf_uc.uc_sigmask = *mask;
499 sf.sf_uc.uc_stack = td->td_sigstk;
500 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
501 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
502 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
503 sf.sf_uc.uc_mcontext.mc_gs = rgs();
504 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
505 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
506 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
507 bzero(sf.sf_uc.uc_mcontext.__spare__,
508 sizeof(sf.sf_uc.uc_mcontext.__spare__));
509 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
510
511 /* Allocate space for the signal handler context. */
512 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
513 SIGISMEMBER(psp->ps_sigonstack, sig)) {
514 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
515 td->td_sigstk.ss_size - sizeof(struct sigframe4));
516 #if defined(COMPAT_43)
517 td->td_sigstk.ss_flags |= SS_ONSTACK;
518 #endif
519 } else
520 sfp = (struct sigframe4 *)regs->tf_esp - 1;
521
522 /* Translate the signal if appropriate. */
523 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
524 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
525
526 /* Build the argument list for the signal handler. */
527 sf.sf_signum = sig;
528 sf.sf_ucontext = (register_t)&sfp->sf_uc;
529 bzero(&sf.sf_si, sizeof(sf.sf_si));
530 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
531 /* Signal handler installed with SA_SIGINFO. */
532 sf.sf_siginfo = (register_t)&sfp->sf_si;
533 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
534
535 /* Fill in POSIX parts */
536 sf.sf_si.si_signo = sig;
537 sf.sf_si.si_code = ksi->ksi_code;
538 sf.sf_si.si_addr = ksi->ksi_addr;
539 } else {
540 /* Old FreeBSD-style arguments. */
541 sf.sf_siginfo = ksi->ksi_code;
542 sf.sf_addr = (register_t)ksi->ksi_addr;
543 sf.sf_ahu.sf_handler = catcher;
544 }
545 mtx_unlock(&psp->ps_mtx);
546 PROC_UNLOCK(p);
547
548 /*
549 * If we're a vm86 process, we want to save the segment registers.
550 * We also change eflags to be our emulated eflags, not the actual
551 * eflags.
552 */
553 if (regs->tf_eflags & PSL_VM) {
554 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
555 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
556
557 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
558 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
559 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
560 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
561
562 if (vm86->vm86_has_vme == 0)
563 sf.sf_uc.uc_mcontext.mc_eflags =
564 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
565 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
566
567 /*
568 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
569 * syscalls made by the signal handler. This just avoids
570 * wasting time for our lazy fixup of such faults. PSL_NT
571 * does nothing in vm86 mode, but vm86 programs can set it
572 * almost legitimately in probes for old cpu types.
573 */
574 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
575 }
576
577 /*
578 * Copy the sigframe out to the user's stack.
579 */
580 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
581 #ifdef DEBUG
582 printf("process %ld has trashed its stack\n", (long)p->p_pid);
583 #endif
584 PROC_LOCK(p);
585 sigexit(td, SIGILL);
586 }
587
588 regs->tf_esp = (int)sfp;
589 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
590 regs->tf_eflags &= ~(PSL_T | PSL_D);
591 regs->tf_cs = _ucodesel;
592 regs->tf_ds = _udatasel;
593 regs->tf_es = _udatasel;
594 regs->tf_fs = _udatasel;
595 regs->tf_ss = _udatasel;
596 PROC_LOCK(p);
597 mtx_lock(&psp->ps_mtx);
598 }
599 #endif /* COMPAT_FREEBSD4 */
600
601 void
602 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
603 {
604 struct sigframe sf, *sfp;
605 struct proc *p;
606 struct thread *td;
607 struct sigacts *psp;
608 char *sp;
609 struct trapframe *regs;
610 struct segment_descriptor *sdp;
611 int sig;
612 int oonstack;
613
614 td = curthread;
615 p = td->td_proc;
616 PROC_LOCK_ASSERT(p, MA_OWNED);
617 sig = ksi->ksi_signo;
618 psp = p->p_sigacts;
619 mtx_assert(&psp->ps_mtx, MA_OWNED);
620 #ifdef COMPAT_FREEBSD4
621 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
622 freebsd4_sendsig(catcher, ksi, mask);
623 return;
624 }
625 #endif
626 #ifdef COMPAT_43
627 if (SIGISMEMBER(psp->ps_osigset, sig)) {
628 osendsig(catcher, ksi, mask);
629 return;
630 }
631 #endif
632 regs = td->td_frame;
633 oonstack = sigonstack(regs->tf_esp);
634
635 /* Save user context. */
636 bzero(&sf, sizeof(sf));
637 sf.sf_uc.uc_sigmask = *mask;
638 sf.sf_uc.uc_stack = td->td_sigstk;
639 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
640 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
641 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
642 sf.sf_uc.uc_mcontext.mc_gs = rgs();
643 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
644 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
645 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
646 fpstate_drop(td);
647 /*
648 * Unconditionally fill the fsbase and gsbase into the mcontext.
649 */
650 sdp = &td->td_pcb->pcb_fsd;
651 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
652 sdp->sd_lobase;
653 sdp = &td->td_pcb->pcb_gsd;
654 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
655 sdp->sd_lobase;
656 bzero(sf.sf_uc.uc_mcontext.mc_spare1,
657 sizeof(sf.sf_uc.uc_mcontext.mc_spare1));
658 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
659 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
660 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
661
662 /* Allocate space for the signal handler context. */
663 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
664 SIGISMEMBER(psp->ps_sigonstack, sig)) {
665 sp = td->td_sigstk.ss_sp +
666 td->td_sigstk.ss_size - sizeof(struct sigframe);
667 #if defined(COMPAT_43)
668 td->td_sigstk.ss_flags |= SS_ONSTACK;
669 #endif
670 } else
671 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
672 /* Align to 16 bytes. */
673 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
674
675 /* Translate the signal if appropriate. */
676 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
677 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
678
679 /* Build the argument list for the signal handler. */
680 sf.sf_signum = sig;
681 sf.sf_ucontext = (register_t)&sfp->sf_uc;
682 bzero(&sf.sf_si, sizeof(sf.sf_si));
683 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
684 /* Signal handler installed with SA_SIGINFO. */
685 sf.sf_siginfo = (register_t)&sfp->sf_si;
686 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
687
688 /* Fill in POSIX parts */
689 sf.sf_si = ksi->ksi_info;
690 sf.sf_si.si_signo = sig; /* maybe a translated signal */
691 } else {
692 /* Old FreeBSD-style arguments. */
693 sf.sf_siginfo = ksi->ksi_code;
694 sf.sf_addr = (register_t)ksi->ksi_addr;
695 sf.sf_ahu.sf_handler = catcher;
696 }
697 mtx_unlock(&psp->ps_mtx);
698 PROC_UNLOCK(p);
699
700 /*
701 * If we're a vm86 process, we want to save the segment registers.
702 * We also change eflags to be our emulated eflags, not the actual
703 * eflags.
704 */
705 if (regs->tf_eflags & PSL_VM) {
706 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
707 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
708
709 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
710 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
711 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
712 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
713
714 if (vm86->vm86_has_vme == 0)
715 sf.sf_uc.uc_mcontext.mc_eflags =
716 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
717 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
718
719 /*
720 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
721 * syscalls made by the signal handler. This just avoids
722 * wasting time for our lazy fixup of such faults. PSL_NT
723 * does nothing in vm86 mode, but vm86 programs can set it
724 * almost legitimately in probes for old cpu types.
725 */
726 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
727 }
728
729 /*
730 * Copy the sigframe out to the user's stack.
731 */
732 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
733 #ifdef DEBUG
734 printf("process %ld has trashed its stack\n", (long)p->p_pid);
735 #endif
736 PROC_LOCK(p);
737 sigexit(td, SIGILL);
738 }
739
740 regs->tf_esp = (int)sfp;
741 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
742 regs->tf_eflags &= ~(PSL_T | PSL_D);
743 regs->tf_cs = _ucodesel;
744 regs->tf_ds = _udatasel;
745 regs->tf_es = _udatasel;
746 regs->tf_fs = _udatasel;
747 regs->tf_ss = _udatasel;
748 PROC_LOCK(p);
749 mtx_lock(&psp->ps_mtx);
750 }
751
752 /*
753 * System call to cleanup state after a signal
754 * has been taken. Reset signal mask and
755 * stack state from context left by sendsig (above).
756 * Return to previous pc and psl as specified by
757 * context left by sendsig. Check carefully to
758 * make sure that the user has not modified the
759 * state to gain improper privileges.
760 *
761 * MPSAFE
762 */
763 #ifdef COMPAT_43
764 int
765 osigreturn(td, uap)
766 struct thread *td;
767 struct osigreturn_args /* {
768 struct osigcontext *sigcntxp;
769 } */ *uap;
770 {
771 struct osigcontext sc;
772 struct trapframe *regs;
773 struct osigcontext *scp;
774 int eflags, error;
775 ksiginfo_t ksi;
776
777 regs = td->td_frame;
778 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
779 if (error != 0)
780 return (error);
781 scp = ≻
782 eflags = scp->sc_ps;
783 if (eflags & PSL_VM) {
784 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
785 struct vm86_kernel *vm86;
786
787 /*
788 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
789 * set up the vm86 area, and we can't enter vm86 mode.
790 */
791 if (td->td_pcb->pcb_ext == 0)
792 return (EINVAL);
793 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
794 if (vm86->vm86_inited == 0)
795 return (EINVAL);
796
797 /* Go back to user mode if both flags are set. */
798 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
799 ksiginfo_init_trap(&ksi);
800 ksi.ksi_signo = SIGBUS;
801 ksi.ksi_code = BUS_OBJERR;
802 ksi.ksi_addr = (void *)regs->tf_eip;
803 trapsignal(td, &ksi);
804 }
805
806 if (vm86->vm86_has_vme) {
807 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
808 (eflags & VME_USERCHANGE) | PSL_VM;
809 } else {
810 vm86->vm86_eflags = eflags; /* save VIF, VIP */
811 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
812 (eflags & VM_USERCHANGE) | PSL_VM;
813 }
814 tf->tf_vm86_ds = scp->sc_ds;
815 tf->tf_vm86_es = scp->sc_es;
816 tf->tf_vm86_fs = scp->sc_fs;
817 tf->tf_vm86_gs = scp->sc_gs;
818 tf->tf_ds = _udatasel;
819 tf->tf_es = _udatasel;
820 tf->tf_fs = _udatasel;
821 } else {
822 /*
823 * Don't allow users to change privileged or reserved flags.
824 */
825 /*
826 * XXX do allow users to change the privileged flag PSL_RF.
827 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
828 * should sometimes set it there too. tf_eflags is kept in
829 * the signal context during signal handling and there is no
830 * other place to remember it, so the PSL_RF bit may be
831 * corrupted by the signal handler without us knowing.
832 * Corruption of the PSL_RF bit at worst causes one more or
833 * one less debugger trap, so allowing it is fairly harmless.
834 */
835 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
836 return (EINVAL);
837 }
838
839 /*
840 * Don't allow users to load a valid privileged %cs. Let the
841 * hardware check for invalid selectors, excess privilege in
842 * other selectors, invalid %eip's and invalid %esp's.
843 */
844 if (!CS_SECURE(scp->sc_cs)) {
845 ksiginfo_init_trap(&ksi);
846 ksi.ksi_signo = SIGBUS;
847 ksi.ksi_code = BUS_OBJERR;
848 ksi.ksi_trapno = T_PROTFLT;
849 ksi.ksi_addr = (void *)regs->tf_eip;
850 trapsignal(td, &ksi);
851 return (EINVAL);
852 }
853 regs->tf_ds = scp->sc_ds;
854 regs->tf_es = scp->sc_es;
855 regs->tf_fs = scp->sc_fs;
856 }
857
858 /* Restore remaining registers. */
859 regs->tf_eax = scp->sc_eax;
860 regs->tf_ebx = scp->sc_ebx;
861 regs->tf_ecx = scp->sc_ecx;
862 regs->tf_edx = scp->sc_edx;
863 regs->tf_esi = scp->sc_esi;
864 regs->tf_edi = scp->sc_edi;
865 regs->tf_cs = scp->sc_cs;
866 regs->tf_ss = scp->sc_ss;
867 regs->tf_isp = scp->sc_isp;
868 regs->tf_ebp = scp->sc_fp;
869 regs->tf_esp = scp->sc_sp;
870 regs->tf_eip = scp->sc_pc;
871 regs->tf_eflags = eflags;
872
873 #if defined(COMPAT_43)
874 if (scp->sc_onstack & 1)
875 td->td_sigstk.ss_flags |= SS_ONSTACK;
876 else
877 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
878 #endif
879 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
880 SIGPROCMASK_OLD);
881 return (EJUSTRETURN);
882 }
883 #endif /* COMPAT_43 */
884
885 #ifdef COMPAT_FREEBSD4
886 /*
887 * MPSAFE
888 */
889 int
890 freebsd4_sigreturn(td, uap)
891 struct thread *td;
892 struct freebsd4_sigreturn_args /* {
893 const ucontext4 *sigcntxp;
894 } */ *uap;
895 {
896 struct ucontext4 uc;
897 struct trapframe *regs;
898 struct ucontext4 *ucp;
899 int cs, eflags, error;
900 ksiginfo_t ksi;
901
902 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
903 if (error != 0)
904 return (error);
905 ucp = &uc;
906 regs = td->td_frame;
907 eflags = ucp->uc_mcontext.mc_eflags;
908 if (eflags & PSL_VM) {
909 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
910 struct vm86_kernel *vm86;
911
912 /*
913 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
914 * set up the vm86 area, and we can't enter vm86 mode.
915 */
916 if (td->td_pcb->pcb_ext == 0)
917 return (EINVAL);
918 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
919 if (vm86->vm86_inited == 0)
920 return (EINVAL);
921
922 /* Go back to user mode if both flags are set. */
923 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
924 ksiginfo_init_trap(&ksi);
925 ksi.ksi_signo = SIGBUS;
926 ksi.ksi_code = BUS_OBJERR;
927 ksi.ksi_addr = (void *)regs->tf_eip;
928 trapsignal(td, &ksi);
929 }
930 if (vm86->vm86_has_vme) {
931 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
932 (eflags & VME_USERCHANGE) | PSL_VM;
933 } else {
934 vm86->vm86_eflags = eflags; /* save VIF, VIP */
935 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
936 (eflags & VM_USERCHANGE) | PSL_VM;
937 }
938 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
939 tf->tf_eflags = eflags;
940 tf->tf_vm86_ds = tf->tf_ds;
941 tf->tf_vm86_es = tf->tf_es;
942 tf->tf_vm86_fs = tf->tf_fs;
943 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
944 tf->tf_ds = _udatasel;
945 tf->tf_es = _udatasel;
946 tf->tf_fs = _udatasel;
947 } else {
948 /*
949 * Don't allow users to change privileged or reserved flags.
950 */
951 /*
952 * XXX do allow users to change the privileged flag PSL_RF.
953 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
954 * should sometimes set it there too. tf_eflags is kept in
955 * the signal context during signal handling and there is no
956 * other place to remember it, so the PSL_RF bit may be
957 * corrupted by the signal handler without us knowing.
958 * Corruption of the PSL_RF bit at worst causes one more or
959 * one less debugger trap, so allowing it is fairly harmless.
960 */
961 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
962 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
963 td->td_proc->p_pid, td->td_name, eflags);
964 return (EINVAL);
965 }
966
967 /*
968 * Don't allow users to load a valid privileged %cs. Let the
969 * hardware check for invalid selectors, excess privilege in
970 * other selectors, invalid %eip's and invalid %esp's.
971 */
972 cs = ucp->uc_mcontext.mc_cs;
973 if (!CS_SECURE(cs)) {
974 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
975 td->td_proc->p_pid, td->td_name, cs);
976 ksiginfo_init_trap(&ksi);
977 ksi.ksi_signo = SIGBUS;
978 ksi.ksi_code = BUS_OBJERR;
979 ksi.ksi_trapno = T_PROTFLT;
980 ksi.ksi_addr = (void *)regs->tf_eip;
981 trapsignal(td, &ksi);
982 return (EINVAL);
983 }
984
985 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
986 }
987
988 #if defined(COMPAT_43)
989 if (ucp->uc_mcontext.mc_onstack & 1)
990 td->td_sigstk.ss_flags |= SS_ONSTACK;
991 else
992 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
993 #endif
994 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
995 return (EJUSTRETURN);
996 }
997 #endif /* COMPAT_FREEBSD4 */
998
999 /*
1000 * MPSAFE
1001 */
1002 int
1003 sys_sigreturn(td, uap)
1004 struct thread *td;
1005 struct sigreturn_args /* {
1006 const struct __ucontext *sigcntxp;
1007 } */ *uap;
1008 {
1009 ucontext_t uc;
1010 struct trapframe *regs;
1011 ucontext_t *ucp;
1012 int cs, eflags, error, ret;
1013 ksiginfo_t ksi;
1014
1015 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
1016 if (error != 0)
1017 return (error);
1018 ucp = &uc;
1019 regs = td->td_frame;
1020 eflags = ucp->uc_mcontext.mc_eflags;
1021 if (eflags & PSL_VM) {
1022 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
1023 struct vm86_kernel *vm86;
1024
1025 /*
1026 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
1027 * set up the vm86 area, and we can't enter vm86 mode.
1028 */
1029 if (td->td_pcb->pcb_ext == 0)
1030 return (EINVAL);
1031 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
1032 if (vm86->vm86_inited == 0)
1033 return (EINVAL);
1034
1035 /* Go back to user mode if both flags are set. */
1036 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
1037 ksiginfo_init_trap(&ksi);
1038 ksi.ksi_signo = SIGBUS;
1039 ksi.ksi_code = BUS_OBJERR;
1040 ksi.ksi_addr = (void *)regs->tf_eip;
1041 trapsignal(td, &ksi);
1042 }
1043
1044 if (vm86->vm86_has_vme) {
1045 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
1046 (eflags & VME_USERCHANGE) | PSL_VM;
1047 } else {
1048 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1049 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1050 (eflags & VM_USERCHANGE) | PSL_VM;
1051 }
1052 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1053 tf->tf_eflags = eflags;
1054 tf->tf_vm86_ds = tf->tf_ds;
1055 tf->tf_vm86_es = tf->tf_es;
1056 tf->tf_vm86_fs = tf->tf_fs;
1057 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1058 tf->tf_ds = _udatasel;
1059 tf->tf_es = _udatasel;
1060 tf->tf_fs = _udatasel;
1061 } else {
1062 /*
1063 * Don't allow users to change privileged or reserved flags.
1064 */
1065 /*
1066 * XXX do allow users to change the privileged flag PSL_RF.
1067 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1068 * should sometimes set it there too. tf_eflags is kept in
1069 * the signal context during signal handling and there is no
1070 * other place to remember it, so the PSL_RF bit may be
1071 * corrupted by the signal handler without us knowing.
1072 * Corruption of the PSL_RF bit at worst causes one more or
1073 * one less debugger trap, so allowing it is fairly harmless.
1074 */
1075 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1076 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1077 td->td_proc->p_pid, td->td_name, eflags);
1078 return (EINVAL);
1079 }
1080
1081 /*
1082 * Don't allow users to load a valid privileged %cs. Let the
1083 * hardware check for invalid selectors, excess privilege in
1084 * other selectors, invalid %eip's and invalid %esp's.
1085 */
1086 cs = ucp->uc_mcontext.mc_cs;
1087 if (!CS_SECURE(cs)) {
1088 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1089 td->td_proc->p_pid, td->td_name, cs);
1090 ksiginfo_init_trap(&ksi);
1091 ksi.ksi_signo = SIGBUS;
1092 ksi.ksi_code = BUS_OBJERR;
1093 ksi.ksi_trapno = T_PROTFLT;
1094 ksi.ksi_addr = (void *)regs->tf_eip;
1095 trapsignal(td, &ksi);
1096 return (EINVAL);
1097 }
1098
1099 ret = set_fpcontext(td, &ucp->uc_mcontext);
1100 if (ret != 0)
1101 return (ret);
1102 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1103 }
1104
1105 #if defined(COMPAT_43)
1106 if (ucp->uc_mcontext.mc_onstack & 1)
1107 td->td_sigstk.ss_flags |= SS_ONSTACK;
1108 else
1109 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1110 #endif
1111
1112 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1113 return (EJUSTRETURN);
1114 }
1115
1116 /*
1117 * Machine dependent boot() routine
1118 *
1119 * I haven't seen anything to put here yet
1120 * Possibly some stuff might be grafted back here from boot()
1121 */
1122 void
1123 cpu_boot(int howto)
1124 {
1125 }
1126
1127 /*
1128 * Flush the D-cache for non-DMA I/O so that the I-cache can
1129 * be made coherent later.
1130 */
1131 void
1132 cpu_flush_dcache(void *ptr, size_t len)
1133 {
1134 /* Not applicable */
1135 }
1136
1137 /* Get current clock frequency for the given cpu id. */
1138 int
1139 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1140 {
1141 uint64_t tsc1, tsc2;
1142 uint64_t acnt, mcnt, perf;
1143 register_t reg;
1144
1145 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1146 return (EINVAL);
1147 if ((cpu_feature & CPUID_TSC) == 0)
1148 return (EOPNOTSUPP);
1149
1150 /*
1151 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
1152 * DELAY(9) based logic fails.
1153 */
1154 if (tsc_is_invariant && !tsc_perf_stat)
1155 return (EOPNOTSUPP);
1156
1157 #ifdef SMP
1158 if (smp_cpus > 1) {
1159 /* Schedule ourselves on the indicated cpu. */
1160 thread_lock(curthread);
1161 sched_bind(curthread, cpu_id);
1162 thread_unlock(curthread);
1163 }
1164 #endif
1165
1166 /* Calibrate by measuring a short delay. */
1167 reg = intr_disable();
1168 if (tsc_is_invariant) {
1169 wrmsr(MSR_MPERF, 0);
1170 wrmsr(MSR_APERF, 0);
1171 tsc1 = rdtsc();
1172 DELAY(1000);
1173 mcnt = rdmsr(MSR_MPERF);
1174 acnt = rdmsr(MSR_APERF);
1175 tsc2 = rdtsc();
1176 intr_restore(reg);
1177 perf = 1000 * acnt / mcnt;
1178 *rate = (tsc2 - tsc1) * perf;
1179 } else {
1180 tsc1 = rdtsc();
1181 DELAY(1000);
1182 tsc2 = rdtsc();
1183 intr_restore(reg);
1184 *rate = (tsc2 - tsc1) * 1000;
1185 }
1186
1187 #ifdef SMP
1188 if (smp_cpus > 1) {
1189 thread_lock(curthread);
1190 sched_unbind(curthread);
1191 thread_unlock(curthread);
1192 }
1193 #endif
1194
1195 return (0);
1196 }
1197
1198 #ifdef XEN
1199
1200 void
1201 cpu_halt(void)
1202 {
1203 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
1204 }
1205
1206 int scheduler_running;
1207
1208 static void
1209 cpu_idle_hlt(int busy)
1210 {
1211
1212 scheduler_running = 1;
1213 enable_intr();
1214 idle_block();
1215 }
1216
1217 #else
1218 /*
1219 * Shutdown the CPU as much as possible
1220 */
1221 void
1222 cpu_halt(void)
1223 {
1224 for (;;)
1225 __asm__ ("hlt");
1226 }
1227
1228 #endif
1229
1230 void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
1231 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
1232 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
1233 TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
1234 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
1235 0, "Use MONITOR/MWAIT for short idle");
1236
1237 #define STATE_RUNNING 0x0
1238 #define STATE_MWAIT 0x1
1239 #define STATE_SLEEPING 0x2
1240
1241 static void
1242 cpu_idle_acpi(int busy)
1243 {
1244 int *state;
1245
1246 state = (int *)PCPU_PTR(monitorbuf);
1247 *state = STATE_SLEEPING;
1248 disable_intr();
1249 if (sched_runnable())
1250 enable_intr();
1251 else if (cpu_idle_hook)
1252 cpu_idle_hook();
1253 else
1254 __asm __volatile("sti; hlt");
1255 *state = STATE_RUNNING;
1256 }
1257
1258 #ifndef XEN
1259 static void
1260 cpu_idle_hlt(int busy)
1261 {
1262 int *state;
1263
1264 state = (int *)PCPU_PTR(monitorbuf);
1265 *state = STATE_SLEEPING;
1266 /*
1267 * We must absolutely guarentee that hlt is the next instruction
1268 * after sti or we introduce a timing window.
1269 */
1270 disable_intr();
1271 if (sched_runnable())
1272 enable_intr();
1273 else
1274 __asm __volatile("sti; hlt");
1275 *state = STATE_RUNNING;
1276 }
1277 #endif
1278
1279 /*
1280 * MWAIT cpu power states. Lower 4 bits are sub-states.
1281 */
1282 #define MWAIT_C0 0xf0
1283 #define MWAIT_C1 0x00
1284 #define MWAIT_C2 0x10
1285 #define MWAIT_C3 0x20
1286 #define MWAIT_C4 0x30
1287
1288 static void
1289 cpu_idle_mwait(int busy)
1290 {
1291 int *state;
1292
1293 state = (int *)PCPU_PTR(monitorbuf);
1294 *state = STATE_MWAIT;
1295 if (!sched_runnable()) {
1296 cpu_monitor(state, 0, 0);
1297 if (*state == STATE_MWAIT)
1298 cpu_mwait(0, MWAIT_C1);
1299 }
1300 *state = STATE_RUNNING;
1301 }
1302
1303 static void
1304 cpu_idle_spin(int busy)
1305 {
1306 int *state;
1307 int i;
1308
1309 state = (int *)PCPU_PTR(monitorbuf);
1310 *state = STATE_RUNNING;
1311 for (i = 0; i < 1000; i++) {
1312 if (sched_runnable())
1313 return;
1314 cpu_spinwait();
1315 }
1316 }
1317
1318 /*
1319 * C1E renders the local APIC timer dead, so we disable it by
1320 * reading the Interrupt Pending Message register and clearing
1321 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
1322 *
1323 * Reference:
1324 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
1325 * #32559 revision 3.00+
1326 */
1327 #define MSR_AMDK8_IPM 0xc0010055
1328 #define AMDK8_SMIONCMPHALT (1ULL << 27)
1329 #define AMDK8_C1EONCMPHALT (1ULL << 28)
1330 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
1331
1332 static void
1333 cpu_probe_amdc1e(void)
1334 {
1335
1336 /*
1337 * Detect the presence of C1E capability mostly on latest
1338 * dual-cores (or future) k8 family.
1339 */
1340 if (cpu_vendor_id == CPU_VENDOR_AMD &&
1341 (cpu_id & 0x00000f00) == 0x00000f00 &&
1342 (cpu_id & 0x0fff0000) >= 0x00040000) {
1343 cpu_ident_amdc1e = 1;
1344 }
1345 }
1346
1347 #ifdef XEN
1348 void (*cpu_idle_fn)(int) = cpu_idle_hlt;
1349 #else
1350 void (*cpu_idle_fn)(int) = cpu_idle_acpi;
1351 #endif
1352
1353 void
1354 cpu_idle(int busy)
1355 {
1356 #ifndef XEN
1357 uint64_t msr;
1358 #endif
1359
1360 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
1361 busy, curcpu);
1362 #if defined(MP_WATCHDOG) && !defined(XEN)
1363 ap_watchdog(PCPU_GET(cpuid));
1364 #endif
1365 #ifndef XEN
1366 /* If we are busy - try to use fast methods. */
1367 if (busy) {
1368 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
1369 cpu_idle_mwait(busy);
1370 goto out;
1371 }
1372 }
1373 #endif
1374
1375 /* If we have time - switch timers into idle mode. */
1376 if (!busy) {
1377 critical_enter();
1378 cpu_idleclock();
1379 }
1380
1381 #ifndef XEN
1382 /* Apply AMD APIC timer C1E workaround. */
1383 if (cpu_ident_amdc1e && cpu_disable_deep_sleep) {
1384 msr = rdmsr(MSR_AMDK8_IPM);
1385 if (msr & AMDK8_CMPHALT)
1386 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
1387 }
1388 #endif
1389
1390 /* Call main idle method. */
1391 cpu_idle_fn(busy);
1392
1393 /* Switch timers mack into active mode. */
1394 if (!busy) {
1395 cpu_activeclock();
1396 critical_exit();
1397 }
1398 #ifndef XEN
1399 out:
1400 #endif
1401 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
1402 busy, curcpu);
1403 }
1404
1405 int
1406 cpu_idle_wakeup(int cpu)
1407 {
1408 struct pcpu *pcpu;
1409 int *state;
1410
1411 pcpu = pcpu_find(cpu);
1412 state = (int *)pcpu->pc_monitorbuf;
1413 /*
1414 * This doesn't need to be atomic since missing the race will
1415 * simply result in unnecessary IPIs.
1416 */
1417 if (*state == STATE_SLEEPING)
1418 return (0);
1419 if (*state == STATE_MWAIT)
1420 *state = STATE_RUNNING;
1421 return (1);
1422 }
1423
1424 /*
1425 * Ordered by speed/power consumption.
1426 */
1427 struct {
1428 void *id_fn;
1429 char *id_name;
1430 } idle_tbl[] = {
1431 { cpu_idle_spin, "spin" },
1432 { cpu_idle_mwait, "mwait" },
1433 { cpu_idle_hlt, "hlt" },
1434 { cpu_idle_acpi, "acpi" },
1435 { NULL, NULL }
1436 };
1437
1438 static int
1439 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
1440 {
1441 char *avail, *p;
1442 int error;
1443 int i;
1444
1445 avail = malloc(256, M_TEMP, M_WAITOK);
1446 p = avail;
1447 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1448 if (strstr(idle_tbl[i].id_name, "mwait") &&
1449 (cpu_feature2 & CPUID2_MON) == 0)
1450 continue;
1451 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1452 cpu_idle_hook == NULL)
1453 continue;
1454 p += sprintf(p, "%s%s", p != avail ? ", " : "",
1455 idle_tbl[i].id_name);
1456 }
1457 error = sysctl_handle_string(oidp, avail, 0, req);
1458 free(avail, M_TEMP);
1459 return (error);
1460 }
1461
1462 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
1463 0, 0, idle_sysctl_available, "A", "list of available idle functions");
1464
1465 static int
1466 idle_sysctl(SYSCTL_HANDLER_ARGS)
1467 {
1468 char buf[16];
1469 int error;
1470 char *p;
1471 int i;
1472
1473 p = "unknown";
1474 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1475 if (idle_tbl[i].id_fn == cpu_idle_fn) {
1476 p = idle_tbl[i].id_name;
1477 break;
1478 }
1479 }
1480 strncpy(buf, p, sizeof(buf));
1481 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1482 if (error != 0 || req->newptr == NULL)
1483 return (error);
1484 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1485 if (strstr(idle_tbl[i].id_name, "mwait") &&
1486 (cpu_feature2 & CPUID2_MON) == 0)
1487 continue;
1488 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1489 cpu_idle_hook == NULL)
1490 continue;
1491 if (strcmp(idle_tbl[i].id_name, buf))
1492 continue;
1493 cpu_idle_fn = idle_tbl[i].id_fn;
1494 return (0);
1495 }
1496 return (EINVAL);
1497 }
1498
1499 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
1500 idle_sysctl, "A", "currently selected idle function");
1501
1502 uint64_t (*atomic_load_acq_64)(volatile uint64_t *) =
1503 atomic_load_acq_64_i386;
1504 void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t) =
1505 atomic_store_rel_64_i386;
1506
1507 static void
1508 cpu_probe_cmpxchg8b(void)
1509 {
1510
1511 if ((cpu_feature & CPUID_CX8) != 0 ||
1512 cpu_vendor_id == CPU_VENDOR_RISE) {
1513 atomic_load_acq_64 = atomic_load_acq_64_i586;
1514 atomic_store_rel_64 = atomic_store_rel_64_i586;
1515 }
1516 }
1517
1518 /*
1519 * Reset registers to default values on exec.
1520 */
1521 void
1522 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1523 {
1524 struct trapframe *regs = td->td_frame;
1525 struct pcb *pcb = td->td_pcb;
1526
1527 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1528 pcb->pcb_gs = _udatasel;
1529 load_gs(_udatasel);
1530
1531 mtx_lock_spin(&dt_lock);
1532 if (td->td_proc->p_md.md_ldt)
1533 user_ldt_free(td);
1534 else
1535 mtx_unlock_spin(&dt_lock);
1536
1537 bzero((char *)regs, sizeof(struct trapframe));
1538 regs->tf_eip = imgp->entry_addr;
1539 regs->tf_esp = stack;
1540 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1541 regs->tf_ss = _udatasel;
1542 regs->tf_ds = _udatasel;
1543 regs->tf_es = _udatasel;
1544 regs->tf_fs = _udatasel;
1545 regs->tf_cs = _ucodesel;
1546
1547 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1548 regs->tf_ebx = imgp->ps_strings;
1549
1550 /*
1551 * Reset the hardware debug registers if they were in use.
1552 * They won't have any meaning for the newly exec'd process.
1553 */
1554 if (pcb->pcb_flags & PCB_DBREGS) {
1555 pcb->pcb_dr0 = 0;
1556 pcb->pcb_dr1 = 0;
1557 pcb->pcb_dr2 = 0;
1558 pcb->pcb_dr3 = 0;
1559 pcb->pcb_dr6 = 0;
1560 pcb->pcb_dr7 = 0;
1561 if (pcb == PCPU_GET(curpcb)) {
1562 /*
1563 * Clear the debug registers on the running
1564 * CPU, otherwise they will end up affecting
1565 * the next process we switch to.
1566 */
1567 reset_dbregs();
1568 }
1569 pcb->pcb_flags &= ~PCB_DBREGS;
1570 }
1571
1572 /*
1573 * Initialize the math emulator (if any) for the current process.
1574 * Actually, just clear the bit that says that the emulator has
1575 * been initialized. Initialization is delayed until the process
1576 * traps to the emulator (if it is done at all) mainly because
1577 * emulators don't provide an entry point for initialization.
1578 */
1579 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1580 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1581
1582 /*
1583 * Drop the FP state if we hold it, so that the process gets a
1584 * clean FP state if it uses the FPU again.
1585 */
1586 fpstate_drop(td);
1587
1588 /*
1589 * XXX - Linux emulator
1590 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1591 * on it.
1592 */
1593 td->td_retval[1] = 0;
1594 }
1595
1596 void
1597 cpu_setregs(void)
1598 {
1599 unsigned int cr0;
1600
1601 cr0 = rcr0();
1602
1603 /*
1604 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1605 *
1606 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1607 * instructions. We must set the CR0_MP bit and use the CR0_TS
1608 * bit to control the trap, because setting the CR0_EM bit does
1609 * not cause WAIT instructions to trap. It's important to trap
1610 * WAIT instructions - otherwise the "wait" variants of no-wait
1611 * control instructions would degenerate to the "no-wait" variants
1612 * after FP context switches but work correctly otherwise. It's
1613 * particularly important to trap WAITs when there is no NPX -
1614 * otherwise the "wait" variants would always degenerate.
1615 *
1616 * Try setting CR0_NE to get correct error reporting on 486DX's.
1617 * Setting it should fail or do nothing on lesser processors.
1618 */
1619 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1620 load_cr0(cr0);
1621 load_gs(_udatasel);
1622 }
1623
1624 u_long bootdev; /* not a struct cdev *- encoding is different */
1625 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1626 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1627
1628 /*
1629 * Initialize 386 and configure to run kernel
1630 */
1631
1632 /*
1633 * Initialize segments & interrupt table
1634 */
1635
1636 int _default_ldt;
1637
1638 #ifdef XEN
1639 union descriptor *gdt;
1640 union descriptor *ldt;
1641 #else
1642 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1643 union descriptor ldt[NLDT]; /* local descriptor table */
1644 #endif
1645 static struct gate_descriptor idt0[NIDT];
1646 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1647 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1648 struct mtx dt_lock; /* lock for GDT and LDT */
1649
1650 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1651 extern int has_f00f_bug;
1652 #endif
1653
1654 static struct i386tss dblfault_tss;
1655 static char dblfault_stack[PAGE_SIZE];
1656
1657 extern vm_offset_t proc0kstack;
1658
1659
1660 /*
1661 * software prototypes -- in more palatable form.
1662 *
1663 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1664 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1665 */
1666 struct soft_segment_descriptor gdt_segs[] = {
1667 /* GNULL_SEL 0 Null Descriptor */
1668 { .ssd_base = 0x0,
1669 .ssd_limit = 0x0,
1670 .ssd_type = 0,
1671 .ssd_dpl = SEL_KPL,
1672 .ssd_p = 0,
1673 .ssd_xx = 0, .ssd_xx1 = 0,
1674 .ssd_def32 = 0,
1675 .ssd_gran = 0 },
1676 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1677 { .ssd_base = 0x0,
1678 .ssd_limit = 0xfffff,
1679 .ssd_type = SDT_MEMRWA,
1680 .ssd_dpl = SEL_KPL,
1681 .ssd_p = 1,
1682 .ssd_xx = 0, .ssd_xx1 = 0,
1683 .ssd_def32 = 1,
1684 .ssd_gran = 1 },
1685 /* GUFS_SEL 2 %fs Descriptor for user */
1686 { .ssd_base = 0x0,
1687 .ssd_limit = 0xfffff,
1688 .ssd_type = SDT_MEMRWA,
1689 .ssd_dpl = SEL_UPL,
1690 .ssd_p = 1,
1691 .ssd_xx = 0, .ssd_xx1 = 0,
1692 .ssd_def32 = 1,
1693 .ssd_gran = 1 },
1694 /* GUGS_SEL 3 %gs Descriptor for user */
1695 { .ssd_base = 0x0,
1696 .ssd_limit = 0xfffff,
1697 .ssd_type = SDT_MEMRWA,
1698 .ssd_dpl = SEL_UPL,
1699 .ssd_p = 1,
1700 .ssd_xx = 0, .ssd_xx1 = 0,
1701 .ssd_def32 = 1,
1702 .ssd_gran = 1 },
1703 /* GCODE_SEL 4 Code Descriptor for kernel */
1704 { .ssd_base = 0x0,
1705 .ssd_limit = 0xfffff,
1706 .ssd_type = SDT_MEMERA,
1707 .ssd_dpl = SEL_KPL,
1708 .ssd_p = 1,
1709 .ssd_xx = 0, .ssd_xx1 = 0,
1710 .ssd_def32 = 1,
1711 .ssd_gran = 1 },
1712 /* GDATA_SEL 5 Data Descriptor for kernel */
1713 { .ssd_base = 0x0,
1714 .ssd_limit = 0xfffff,
1715 .ssd_type = SDT_MEMRWA,
1716 .ssd_dpl = SEL_KPL,
1717 .ssd_p = 1,
1718 .ssd_xx = 0, .ssd_xx1 = 0,
1719 .ssd_def32 = 1,
1720 .ssd_gran = 1 },
1721 /* GUCODE_SEL 6 Code Descriptor for user */
1722 { .ssd_base = 0x0,
1723 .ssd_limit = 0xfffff,
1724 .ssd_type = SDT_MEMERA,
1725 .ssd_dpl = SEL_UPL,
1726 .ssd_p = 1,
1727 .ssd_xx = 0, .ssd_xx1 = 0,
1728 .ssd_def32 = 1,
1729 .ssd_gran = 1 },
1730 /* GUDATA_SEL 7 Data Descriptor for user */
1731 { .ssd_base = 0x0,
1732 .ssd_limit = 0xfffff,
1733 .ssd_type = SDT_MEMRWA,
1734 .ssd_dpl = SEL_UPL,
1735 .ssd_p = 1,
1736 .ssd_xx = 0, .ssd_xx1 = 0,
1737 .ssd_def32 = 1,
1738 .ssd_gran = 1 },
1739 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1740 { .ssd_base = 0x400,
1741 .ssd_limit = 0xfffff,
1742 .ssd_type = SDT_MEMRWA,
1743 .ssd_dpl = SEL_KPL,
1744 .ssd_p = 1,
1745 .ssd_xx = 0, .ssd_xx1 = 0,
1746 .ssd_def32 = 1,
1747 .ssd_gran = 1 },
1748 #ifndef XEN
1749 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1750 {
1751 .ssd_base = 0x0,
1752 .ssd_limit = sizeof(struct i386tss)-1,
1753 .ssd_type = SDT_SYS386TSS,
1754 .ssd_dpl = 0,
1755 .ssd_p = 1,
1756 .ssd_xx = 0, .ssd_xx1 = 0,
1757 .ssd_def32 = 0,
1758 .ssd_gran = 0 },
1759 /* GLDT_SEL 10 LDT Descriptor */
1760 { .ssd_base = (int) ldt,
1761 .ssd_limit = sizeof(ldt)-1,
1762 .ssd_type = SDT_SYSLDT,
1763 .ssd_dpl = SEL_UPL,
1764 .ssd_p = 1,
1765 .ssd_xx = 0, .ssd_xx1 = 0,
1766 .ssd_def32 = 0,
1767 .ssd_gran = 0 },
1768 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1769 { .ssd_base = (int) ldt,
1770 .ssd_limit = (512 * sizeof(union descriptor)-1),
1771 .ssd_type = SDT_SYSLDT,
1772 .ssd_dpl = 0,
1773 .ssd_p = 1,
1774 .ssd_xx = 0, .ssd_xx1 = 0,
1775 .ssd_def32 = 0,
1776 .ssd_gran = 0 },
1777 /* GPANIC_SEL 12 Panic Tss Descriptor */
1778 { .ssd_base = (int) &dblfault_tss,
1779 .ssd_limit = sizeof(struct i386tss)-1,
1780 .ssd_type = SDT_SYS386TSS,
1781 .ssd_dpl = 0,
1782 .ssd_p = 1,
1783 .ssd_xx = 0, .ssd_xx1 = 0,
1784 .ssd_def32 = 0,
1785 .ssd_gran = 0 },
1786 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1787 { .ssd_base = 0,
1788 .ssd_limit = 0xfffff,
1789 .ssd_type = SDT_MEMERA,
1790 .ssd_dpl = 0,
1791 .ssd_p = 1,
1792 .ssd_xx = 0, .ssd_xx1 = 0,
1793 .ssd_def32 = 0,
1794 .ssd_gran = 1 },
1795 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1796 { .ssd_base = 0,
1797 .ssd_limit = 0xfffff,
1798 .ssd_type = SDT_MEMERA,
1799 .ssd_dpl = 0,
1800 .ssd_p = 1,
1801 .ssd_xx = 0, .ssd_xx1 = 0,
1802 .ssd_def32 = 0,
1803 .ssd_gran = 1 },
1804 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1805 { .ssd_base = 0,
1806 .ssd_limit = 0xfffff,
1807 .ssd_type = SDT_MEMRWA,
1808 .ssd_dpl = 0,
1809 .ssd_p = 1,
1810 .ssd_xx = 0, .ssd_xx1 = 0,
1811 .ssd_def32 = 1,
1812 .ssd_gran = 1 },
1813 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1814 { .ssd_base = 0,
1815 .ssd_limit = 0xfffff,
1816 .ssd_type = SDT_MEMRWA,
1817 .ssd_dpl = 0,
1818 .ssd_p = 1,
1819 .ssd_xx = 0, .ssd_xx1 = 0,
1820 .ssd_def32 = 0,
1821 .ssd_gran = 1 },
1822 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1823 { .ssd_base = 0,
1824 .ssd_limit = 0xfffff,
1825 .ssd_type = SDT_MEMRWA,
1826 .ssd_dpl = 0,
1827 .ssd_p = 1,
1828 .ssd_xx = 0, .ssd_xx1 = 0,
1829 .ssd_def32 = 0,
1830 .ssd_gran = 1 },
1831 /* GNDIS_SEL 18 NDIS Descriptor */
1832 { .ssd_base = 0x0,
1833 .ssd_limit = 0x0,
1834 .ssd_type = 0,
1835 .ssd_dpl = 0,
1836 .ssd_p = 0,
1837 .ssd_xx = 0, .ssd_xx1 = 0,
1838 .ssd_def32 = 0,
1839 .ssd_gran = 0 },
1840 #endif /* !XEN */
1841 };
1842
1843 static struct soft_segment_descriptor ldt_segs[] = {
1844 /* Null Descriptor - overwritten by call gate */
1845 { .ssd_base = 0x0,
1846 .ssd_limit = 0x0,
1847 .ssd_type = 0,
1848 .ssd_dpl = 0,
1849 .ssd_p = 0,
1850 .ssd_xx = 0, .ssd_xx1 = 0,
1851 .ssd_def32 = 0,
1852 .ssd_gran = 0 },
1853 /* Null Descriptor - overwritten by call gate */
1854 { .ssd_base = 0x0,
1855 .ssd_limit = 0x0,
1856 .ssd_type = 0,
1857 .ssd_dpl = 0,
1858 .ssd_p = 0,
1859 .ssd_xx = 0, .ssd_xx1 = 0,
1860 .ssd_def32 = 0,
1861 .ssd_gran = 0 },
1862 /* Null Descriptor - overwritten by call gate */
1863 { .ssd_base = 0x0,
1864 .ssd_limit = 0x0,
1865 .ssd_type = 0,
1866 .ssd_dpl = 0,
1867 .ssd_p = 0,
1868 .ssd_xx = 0, .ssd_xx1 = 0,
1869 .ssd_def32 = 0,
1870 .ssd_gran = 0 },
1871 /* Code Descriptor for user */
1872 { .ssd_base = 0x0,
1873 .ssd_limit = 0xfffff,
1874 .ssd_type = SDT_MEMERA,
1875 .ssd_dpl = SEL_UPL,
1876 .ssd_p = 1,
1877 .ssd_xx = 0, .ssd_xx1 = 0,
1878 .ssd_def32 = 1,
1879 .ssd_gran = 1 },
1880 /* Null Descriptor - overwritten by call gate */
1881 { .ssd_base = 0x0,
1882 .ssd_limit = 0x0,
1883 .ssd_type = 0,
1884 .ssd_dpl = 0,
1885 .ssd_p = 0,
1886 .ssd_xx = 0, .ssd_xx1 = 0,
1887 .ssd_def32 = 0,
1888 .ssd_gran = 0 },
1889 /* Data Descriptor for user */
1890 { .ssd_base = 0x0,
1891 .ssd_limit = 0xfffff,
1892 .ssd_type = SDT_MEMRWA,
1893 .ssd_dpl = SEL_UPL,
1894 .ssd_p = 1,
1895 .ssd_xx = 0, .ssd_xx1 = 0,
1896 .ssd_def32 = 1,
1897 .ssd_gran = 1 },
1898 };
1899
1900 void
1901 setidt(idx, func, typ, dpl, selec)
1902 int idx;
1903 inthand_t *func;
1904 int typ;
1905 int dpl;
1906 int selec;
1907 {
1908 struct gate_descriptor *ip;
1909
1910 ip = idt + idx;
1911 ip->gd_looffset = (int)func;
1912 ip->gd_selector = selec;
1913 ip->gd_stkcpy = 0;
1914 ip->gd_xx = 0;
1915 ip->gd_type = typ;
1916 ip->gd_dpl = dpl;
1917 ip->gd_p = 1;
1918 ip->gd_hioffset = ((int)func)>>16 ;
1919 }
1920
1921 extern inthand_t
1922 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1923 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1924 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1925 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1926 IDTVEC(xmm),
1927 #ifdef KDTRACE_HOOKS
1928 IDTVEC(dtrace_ret),
1929 #endif
1930 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1931
1932 #ifdef DDB
1933 /*
1934 * Display the index and function name of any IDT entries that don't use
1935 * the default 'rsvd' entry point.
1936 */
1937 DB_SHOW_COMMAND(idt, db_show_idt)
1938 {
1939 struct gate_descriptor *ip;
1940 int idx;
1941 uintptr_t func;
1942
1943 ip = idt;
1944 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1945 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1946 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1947 db_printf("%3d\t", idx);
1948 db_printsym(func, DB_STGY_PROC);
1949 db_printf("\n");
1950 }
1951 ip++;
1952 }
1953 }
1954
1955 /* Show privileged registers. */
1956 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1957 {
1958 uint64_t idtr, gdtr;
1959
1960 idtr = ridt();
1961 db_printf("idtr\t0x%08x/%04x\n",
1962 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1963 gdtr = rgdt();
1964 db_printf("gdtr\t0x%08x/%04x\n",
1965 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1966 db_printf("ldtr\t0x%04x\n", rldt());
1967 db_printf("tr\t0x%04x\n", rtr());
1968 db_printf("cr0\t0x%08x\n", rcr0());
1969 db_printf("cr2\t0x%08x\n", rcr2());
1970 db_printf("cr3\t0x%08x\n", rcr3());
1971 db_printf("cr4\t0x%08x\n", rcr4());
1972 }
1973 #endif
1974
1975 void
1976 sdtossd(sd, ssd)
1977 struct segment_descriptor *sd;
1978 struct soft_segment_descriptor *ssd;
1979 {
1980 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1981 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1982 ssd->ssd_type = sd->sd_type;
1983 ssd->ssd_dpl = sd->sd_dpl;
1984 ssd->ssd_p = sd->sd_p;
1985 ssd->ssd_def32 = sd->sd_def32;
1986 ssd->ssd_gran = sd->sd_gran;
1987 }
1988
1989 #ifndef XEN
1990 static int
1991 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1992 {
1993 int i, insert_idx, physmap_idx;
1994
1995 physmap_idx = *physmap_idxp;
1996
1997 if (boothowto & RB_VERBOSE)
1998 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1999 smap->type, smap->base, smap->length);
2000
2001 if (smap->type != SMAP_TYPE_MEMORY)
2002 return (1);
2003
2004 if (smap->length == 0)
2005 return (1);
2006
2007 #ifndef PAE
2008 if (smap->base > 0xffffffff) {
2009 printf("%uK of memory above 4GB ignored\n",
2010 (u_int)(smap->length / 1024));
2011 return (1);
2012 }
2013 #endif
2014
2015 /*
2016 * Find insertion point while checking for overlap. Start off by
2017 * assuming the new entry will be added to the end.
2018 */
2019 insert_idx = physmap_idx + 2;
2020 for (i = 0; i <= physmap_idx; i += 2) {
2021 if (smap->base < physmap[i + 1]) {
2022 if (smap->base + smap->length <= physmap[i]) {
2023 insert_idx = i;
2024 break;
2025 }
2026 if (boothowto & RB_VERBOSE)
2027 printf(
2028 "Overlapping memory regions, ignoring second region\n");
2029 return (1);
2030 }
2031 }
2032
2033 /* See if we can prepend to the next entry. */
2034 if (insert_idx <= physmap_idx &&
2035 smap->base + smap->length == physmap[insert_idx]) {
2036 physmap[insert_idx] = smap->base;
2037 return (1);
2038 }
2039
2040 /* See if we can append to the previous entry. */
2041 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) {
2042 physmap[insert_idx - 1] += smap->length;
2043 return (1);
2044 }
2045
2046 physmap_idx += 2;
2047 *physmap_idxp = physmap_idx;
2048 if (physmap_idx == PHYSMAP_SIZE) {
2049 printf(
2050 "Too many segments in the physical address map, giving up\n");
2051 return (0);
2052 }
2053
2054 /*
2055 * Move the last 'N' entries down to make room for the new
2056 * entry if needed.
2057 */
2058 for (i = physmap_idx; i > insert_idx; i -= 2) {
2059 physmap[i] = physmap[i - 2];
2060 physmap[i + 1] = physmap[i - 1];
2061 }
2062
2063 /* Insert the new entry. */
2064 physmap[insert_idx] = smap->base;
2065 physmap[insert_idx + 1] = smap->base + smap->length;
2066 return (1);
2067 }
2068
2069 static void
2070 basemem_setup(void)
2071 {
2072 vm_paddr_t pa;
2073 pt_entry_t *pte;
2074 int i;
2075
2076 if (basemem > 640) {
2077 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
2078 basemem);
2079 basemem = 640;
2080 }
2081
2082 /*
2083 * XXX if biosbasemem is now < 640, there is a `hole'
2084 * between the end of base memory and the start of
2085 * ISA memory. The hole may be empty or it may
2086 * contain BIOS code or data. Map it read/write so
2087 * that the BIOS can write to it. (Memory from 0 to
2088 * the physical end of the kernel is mapped read-only
2089 * to begin with and then parts of it are remapped.
2090 * The parts that aren't remapped form holes that
2091 * remain read-only and are unused by the kernel.
2092 * The base memory area is below the physical end of
2093 * the kernel and right now forms a read-only hole.
2094 * The part of it from PAGE_SIZE to
2095 * (trunc_page(biosbasemem * 1024) - 1) will be
2096 * remapped and used by the kernel later.)
2097 *
2098 * This code is similar to the code used in
2099 * pmap_mapdev, but since no memory needs to be
2100 * allocated we simply change the mapping.
2101 */
2102 for (pa = trunc_page(basemem * 1024);
2103 pa < ISA_HOLE_START; pa += PAGE_SIZE)
2104 pmap_kenter(KERNBASE + pa, pa);
2105
2106 /*
2107 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
2108 * the vm86 page table so that vm86 can scribble on them using
2109 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
2110 * page 0, at least as initialized here?
2111 */
2112 pte = (pt_entry_t *)vm86paddr;
2113 for (i = basemem / 4; i < 160; i++)
2114 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
2115 }
2116 #endif
2117
2118 /*
2119 * Populate the (physmap) array with base/bound pairs describing the
2120 * available physical memory in the system, then test this memory and
2121 * build the phys_avail array describing the actually-available memory.
2122 *
2123 * If we cannot accurately determine the physical memory map, then use
2124 * value from the 0xE801 call, and failing that, the RTC.
2125 *
2126 * Total memory size may be set by the kernel environment variable
2127 * hw.physmem or the compile-time define MAXMEM.
2128 *
2129 * XXX first should be vm_paddr_t.
2130 */
2131 static void
2132 getmemsize(int first)
2133 {
2134 int has_smap, off, physmap_idx, pa_indx, da_indx;
2135 u_long physmem_tunable, memtest;
2136 vm_paddr_t physmap[PHYSMAP_SIZE];
2137 pt_entry_t *pte;
2138 quad_t dcons_addr, dcons_size;
2139 #ifndef XEN
2140 int hasbrokenint12, i;
2141 u_int extmem;
2142 struct vm86frame vmf;
2143 struct vm86context vmc;
2144 vm_paddr_t pa;
2145 struct bios_smap *smap, *smapbase, *smapend;
2146 u_int32_t smapsize;
2147 caddr_t kmdp;
2148 #endif
2149
2150 has_smap = 0;
2151 #if defined(XEN)
2152 Maxmem = xen_start_info->nr_pages - init_first;
2153 physmem = Maxmem;
2154 basemem = 0;
2155 physmap[0] = init_first << PAGE_SHIFT;
2156 physmap[1] = ptoa(Maxmem) - round_page(msgbufsize);
2157 physmap_idx = 0;
2158 #else
2159 #ifdef XBOX
2160 if (arch_i386_is_xbox) {
2161 /*
2162 * We queried the memory size before, so chop off 4MB for
2163 * the framebuffer and inform the OS of this.
2164 */
2165 physmap[0] = 0;
2166 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
2167 physmap_idx = 0;
2168 goto physmap_done;
2169 }
2170 #endif
2171 bzero(&vmf, sizeof(vmf));
2172 bzero(physmap, sizeof(physmap));
2173 basemem = 0;
2174
2175 /*
2176 * Check if the loader supplied an SMAP memory map. If so,
2177 * use that and do not make any VM86 calls.
2178 */
2179 physmap_idx = 0;
2180 smapbase = NULL;
2181 kmdp = preload_search_by_type("elf kernel");
2182 if (kmdp == NULL)
2183 kmdp = preload_search_by_type("elf32 kernel");
2184 if (kmdp != NULL)
2185 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2186 MODINFO_METADATA | MODINFOMD_SMAP);
2187 if (smapbase != NULL) {
2188 /*
2189 * subr_module.c says:
2190 * "Consumer may safely assume that size value precedes data."
2191 * ie: an int32_t immediately precedes SMAP.
2192 */
2193 smapsize = *((u_int32_t *)smapbase - 1);
2194 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
2195 has_smap = 1;
2196
2197 for (smap = smapbase; smap < smapend; smap++)
2198 if (!add_smap_entry(smap, physmap, &physmap_idx))
2199 break;
2200 goto have_smap;
2201 }
2202
2203 /*
2204 * Some newer BIOSes have a broken INT 12H implementation
2205 * which causes a kernel panic immediately. In this case, we
2206 * need use the SMAP to determine the base memory size.
2207 */
2208 hasbrokenint12 = 0;
2209 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
2210 if (hasbrokenint12 == 0) {
2211 /* Use INT12 to determine base memory size. */
2212 vm86_intcall(0x12, &vmf);
2213 basemem = vmf.vmf_ax;
2214 basemem_setup();
2215 }
2216
2217 /*
2218 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
2219 * the kernel page table so we can use it as a buffer. The
2220 * kernel will unmap this page later.
2221 */
2222 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
2223 vmc.npages = 0;
2224 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
2225 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
2226
2227 vmf.vmf_ebx = 0;
2228 do {
2229 vmf.vmf_eax = 0xE820;
2230 vmf.vmf_edx = SMAP_SIG;
2231 vmf.vmf_ecx = sizeof(struct bios_smap);
2232 i = vm86_datacall(0x15, &vmf, &vmc);
2233 if (i || vmf.vmf_eax != SMAP_SIG)
2234 break;
2235 has_smap = 1;
2236 if (!add_smap_entry(smap, physmap, &physmap_idx))
2237 break;
2238 } while (vmf.vmf_ebx != 0);
2239
2240 have_smap:
2241 /*
2242 * If we didn't fetch the "base memory" size from INT12,
2243 * figure it out from the SMAP (or just guess).
2244 */
2245 if (basemem == 0) {
2246 for (i = 0; i <= physmap_idx; i += 2) {
2247 if (physmap[i] == 0x00000000) {
2248 basemem = physmap[i + 1] / 1024;
2249 break;
2250 }
2251 }
2252
2253 /* XXX: If we couldn't find basemem from SMAP, just guess. */
2254 if (basemem == 0)
2255 basemem = 640;
2256 basemem_setup();
2257 }
2258
2259 if (physmap[1] != 0)
2260 goto physmap_done;
2261
2262 /*
2263 * If we failed to find an SMAP, figure out the extended
2264 * memory size. We will then build a simple memory map with
2265 * two segments, one for "base memory" and the second for
2266 * "extended memory". Note that "extended memory" starts at a
2267 * physical address of 1MB and that both basemem and extmem
2268 * are in units of 1KB.
2269 *
2270 * First, try to fetch the extended memory size via INT 15:E801.
2271 */
2272 vmf.vmf_ax = 0xE801;
2273 if (vm86_intcall(0x15, &vmf) == 0) {
2274 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
2275 } else {
2276 /*
2277 * If INT15:E801 fails, this is our last ditch effort
2278 * to determine the extended memory size. Currently
2279 * we prefer the RTC value over INT15:88.
2280 */
2281 #if 0
2282 vmf.vmf_ah = 0x88;
2283 vm86_intcall(0x15, &vmf);
2284 extmem = vmf.vmf_ax;
2285 #else
2286 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
2287 #endif
2288 }
2289
2290 /*
2291 * Special hack for chipsets that still remap the 384k hole when
2292 * there's 16MB of memory - this really confuses people that
2293 * are trying to use bus mastering ISA controllers with the
2294 * "16MB limit"; they only have 16MB, but the remapping puts
2295 * them beyond the limit.
2296 *
2297 * If extended memory is between 15-16MB (16-17MB phys address range),
2298 * chop it to 15MB.
2299 */
2300 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
2301 extmem = 15 * 1024;
2302
2303 physmap[0] = 0;
2304 physmap[1] = basemem * 1024;
2305 physmap_idx = 2;
2306 physmap[physmap_idx] = 0x100000;
2307 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2308
2309 physmap_done:
2310 #endif
2311 /*
2312 * Now, physmap contains a map of physical memory.
2313 */
2314
2315 #ifdef SMP
2316 /* make hole for AP bootstrap code */
2317 physmap[1] = mp_bootaddress(physmap[1]);
2318 #endif
2319
2320 /*
2321 * Maxmem isn't the "maximum memory", it's one larger than the
2322 * highest page of the physical address space. It should be
2323 * called something like "Maxphyspage". We may adjust this
2324 * based on ``hw.physmem'' and the results of the memory test.
2325 */
2326 Maxmem = atop(physmap[physmap_idx + 1]);
2327
2328 #ifdef MAXMEM
2329 Maxmem = MAXMEM / 4;
2330 #endif
2331
2332 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
2333 Maxmem = atop(physmem_tunable);
2334
2335 /*
2336 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
2337 * the amount of memory in the system.
2338 */
2339 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
2340 Maxmem = atop(physmap[physmap_idx + 1]);
2341
2342 /*
2343 * By default keep the memtest enabled. Use a general name so that
2344 * one could eventually do more with the code than just disable it.
2345 */
2346 memtest = 1;
2347 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
2348
2349 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2350 (boothowto & RB_VERBOSE))
2351 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2352
2353 /*
2354 * If Maxmem has been increased beyond what the system has detected,
2355 * extend the last memory segment to the new limit.
2356 */
2357 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2358 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2359
2360 /* call pmap initialization to make new kernel address space */
2361 pmap_bootstrap(first);
2362
2363 /*
2364 * Size up each available chunk of physical memory.
2365 */
2366 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2367 pa_indx = 0;
2368 da_indx = 1;
2369 phys_avail[pa_indx++] = physmap[0];
2370 phys_avail[pa_indx] = physmap[0];
2371 dump_avail[da_indx] = physmap[0];
2372 pte = CMAP1;
2373
2374 /*
2375 * Get dcons buffer address
2376 */
2377 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2378 getenv_quad("dcons.size", &dcons_size) == 0)
2379 dcons_addr = 0;
2380
2381 #ifndef XEN
2382 /*
2383 * physmap is in bytes, so when converting to page boundaries,
2384 * round up the start address and round down the end address.
2385 */
2386 for (i = 0; i <= physmap_idx; i += 2) {
2387 vm_paddr_t end;
2388
2389 end = ptoa((vm_paddr_t)Maxmem);
2390 if (physmap[i + 1] < end)
2391 end = trunc_page(physmap[i + 1]);
2392 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2393 int tmp, page_bad, full;
2394 int *ptr = (int *)CADDR1;
2395
2396 full = FALSE;
2397 /*
2398 * block out kernel memory as not available.
2399 */
2400 if (pa >= KERNLOAD && pa < first)
2401 goto do_dump_avail;
2402
2403 /*
2404 * block out dcons buffer
2405 */
2406 if (dcons_addr > 0
2407 && pa >= trunc_page(dcons_addr)
2408 && pa < dcons_addr + dcons_size)
2409 goto do_dump_avail;
2410
2411 page_bad = FALSE;
2412 if (memtest == 0)
2413 goto skip_memtest;
2414
2415 /*
2416 * map page into kernel: valid, read/write,non-cacheable
2417 */
2418 *pte = pa | PG_V | PG_RW | PG_N;
2419 invltlb();
2420
2421 tmp = *(int *)ptr;
2422 /*
2423 * Test for alternating 1's and 0's
2424 */
2425 *(volatile int *)ptr = 0xaaaaaaaa;
2426 if (*(volatile int *)ptr != 0xaaaaaaaa)
2427 page_bad = TRUE;
2428 /*
2429 * Test for alternating 0's and 1's
2430 */
2431 *(volatile int *)ptr = 0x55555555;
2432 if (*(volatile int *)ptr != 0x55555555)
2433 page_bad = TRUE;
2434 /*
2435 * Test for all 1's
2436 */
2437 *(volatile int *)ptr = 0xffffffff;
2438 if (*(volatile int *)ptr != 0xffffffff)
2439 page_bad = TRUE;
2440 /*
2441 * Test for all 0's
2442 */
2443 *(volatile int *)ptr = 0x0;
2444 if (*(volatile int *)ptr != 0x0)
2445 page_bad = TRUE;
2446 /*
2447 * Restore original value.
2448 */
2449 *(int *)ptr = tmp;
2450
2451 skip_memtest:
2452 /*
2453 * Adjust array of valid/good pages.
2454 */
2455 if (page_bad == TRUE)
2456 continue;
2457 /*
2458 * If this good page is a continuation of the
2459 * previous set of good pages, then just increase
2460 * the end pointer. Otherwise start a new chunk.
2461 * Note that "end" points one higher than end,
2462 * making the range >= start and < end.
2463 * If we're also doing a speculative memory
2464 * test and we at or past the end, bump up Maxmem
2465 * so that we keep going. The first bad page
2466 * will terminate the loop.
2467 */
2468 if (phys_avail[pa_indx] == pa) {
2469 phys_avail[pa_indx] += PAGE_SIZE;
2470 } else {
2471 pa_indx++;
2472 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2473 printf(
2474 "Too many holes in the physical address space, giving up\n");
2475 pa_indx--;
2476 full = TRUE;
2477 goto do_dump_avail;
2478 }
2479 phys_avail[pa_indx++] = pa; /* start */
2480 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2481 }
2482 physmem++;
2483 do_dump_avail:
2484 if (dump_avail[da_indx] == pa) {
2485 dump_avail[da_indx] += PAGE_SIZE;
2486 } else {
2487 da_indx++;
2488 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2489 da_indx--;
2490 goto do_next;
2491 }
2492 dump_avail[da_indx++] = pa; /* start */
2493 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2494 }
2495 do_next:
2496 if (full)
2497 break;
2498 }
2499 }
2500 *pte = 0;
2501 invltlb();
2502 #else
2503 phys_avail[0] = physfree;
2504 phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2505 dump_avail[0] = 0;
2506 dump_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2507
2508 #endif
2509
2510 /*
2511 * XXX
2512 * The last chunk must contain at least one page plus the message
2513 * buffer to avoid complicating other code (message buffer address
2514 * calculation, etc.).
2515 */
2516 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2517 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2518 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2519 phys_avail[pa_indx--] = 0;
2520 phys_avail[pa_indx--] = 0;
2521 }
2522
2523 Maxmem = atop(phys_avail[pa_indx]);
2524
2525 /* Trim off space for the message buffer. */
2526 phys_avail[pa_indx] -= round_page(msgbufsize);
2527
2528 /* Map the message buffer. */
2529 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2530 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2531 off);
2532
2533 PT_UPDATES_FLUSH();
2534 }
2535
2536 #ifdef XEN
2537 #define MTOPSIZE (1<<(14 + PAGE_SHIFT))
2538
2539 void
2540 init386(first)
2541 int first;
2542 {
2543 unsigned long gdtmachpfn;
2544 int error, gsel_tss, metadata_missing, x, pa;
2545 size_t kstack0_sz;
2546 struct pcpu *pc;
2547 struct callback_register event = {
2548 .type = CALLBACKTYPE_event,
2549 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback },
2550 };
2551 struct callback_register failsafe = {
2552 .type = CALLBACKTYPE_failsafe,
2553 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback },
2554 };
2555
2556 thread0.td_kstack = proc0kstack;
2557 thread0.td_kstack_pages = KSTACK_PAGES;
2558 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2559 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2560
2561 /*
2562 * This may be done better later if it gets more high level
2563 * components in it. If so just link td->td_proc here.
2564 */
2565 proc_linkup0(&proc0, &thread0);
2566
2567 metadata_missing = 0;
2568 if (xen_start_info->mod_start) {
2569 preload_metadata = (caddr_t)xen_start_info->mod_start;
2570 preload_bootstrap_relocate(KERNBASE);
2571 } else {
2572 metadata_missing = 1;
2573 }
2574 if (envmode == 1)
2575 kern_envp = static_env;
2576 else if ((caddr_t)xen_start_info->cmd_line)
2577 kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line);
2578
2579 boothowto |= xen_boothowto(kern_envp);
2580
2581 /* Init basic tunables, hz etc */
2582 init_param1();
2583
2584 /*
2585 * XEN occupies a portion of the upper virtual address space
2586 * At its base it manages an array mapping machine page frames
2587 * to physical page frames - hence we need to be able to
2588 * access 4GB - (64MB - 4MB + 64k)
2589 */
2590 gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2591 gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2592 gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2593 gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2594 gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2595 gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2596 gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2597 gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2598
2599 pc = &__pcpu[0];
2600 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2601 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2602
2603 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW);
2604 bzero(gdt, PAGE_SIZE);
2605 for (x = 0; x < NGDT; x++)
2606 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2607
2608 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2609
2610 gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
2611 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V);
2612 PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
2613 lgdt(&r_gdt);
2614 gdtset = 1;
2615
2616 if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
2617 panic("set_trap_table failed - error %d\n", error);
2618 }
2619
2620 error = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
2621 if (error == 0)
2622 error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
2623 #if CONFIG_XEN_COMPAT <= 0x030002
2624 if (error == -ENOXENSYS)
2625 HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL),
2626 (unsigned long)Xhypervisor_callback,
2627 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
2628 #endif
2629 pcpu_init(pc, 0, sizeof(struct pcpu));
2630 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2631 pmap_kenter(pa + KERNBASE, pa);
2632 dpcpu_init((void *)(first + KERNBASE), 0);
2633 first += DPCPU_SIZE;
2634 physfree += DPCPU_SIZE;
2635 init_first += DPCPU_SIZE / PAGE_SIZE;
2636
2637 PCPU_SET(prvspace, pc);
2638 PCPU_SET(curthread, &thread0);
2639 PCPU_SET(curpcb, thread0.td_pcb);
2640
2641 /*
2642 * Initialize mutexes.
2643 *
2644 * icu_lock: in order to allow an interrupt to occur in a critical
2645 * section, to set pcpu->ipending (etc...) properly, we
2646 * must be able to get the icu lock, so it can't be
2647 * under witness.
2648 */
2649 mutex_init();
2650 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2651
2652 /* make ldt memory segments */
2653 PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW);
2654 bzero(ldt, PAGE_SIZE);
2655 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2656 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2657 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2658 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2659
2660 default_proc_ldt.ldt_base = (caddr_t)ldt;
2661 default_proc_ldt.ldt_len = 6;
2662 _default_ldt = (int)&default_proc_ldt;
2663 PCPU_SET(currentldt, _default_ldt);
2664 PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
2665 xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
2666
2667 #if defined(XEN_PRIVILEGED)
2668 /*
2669 * Initialize the i8254 before the console so that console
2670 * initialization can use DELAY().
2671 */
2672 i8254_init();
2673 #endif
2674
2675 /*
2676 * Initialize the console before we print anything out.
2677 */
2678 cninit();
2679
2680 if (metadata_missing)
2681 printf("WARNING: loader(8) metadata is missing!\n");
2682
2683 #ifdef DEV_ISA
2684 elcr_probe();
2685 atpic_startup();
2686 #endif
2687
2688 #ifdef DDB
2689 ksym_start = bootinfo.bi_symtab;
2690 ksym_end = bootinfo.bi_esymtab;
2691 #endif
2692
2693 kdb_init();
2694
2695 #ifdef KDB
2696 if (boothowto & RB_KDB)
2697 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2698 #endif
2699
2700 finishidentcpu(); /* Final stage of CPU initialization */
2701 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2702 GSEL(GCODE_SEL, SEL_KPL));
2703 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2704 GSEL(GCODE_SEL, SEL_KPL));
2705 initializecpu(); /* Initialize CPU registers */
2706
2707 /* make an initial tss so cpu can get interrupt stack on syscall! */
2708 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2709 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2710 kstack0_sz - sizeof(struct pcb) - 16);
2711 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2712 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2713 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL),
2714 PCPU_GET(common_tss.tss_esp0));
2715
2716 /* pointer to selector slot for %fs/%gs */
2717 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2718
2719 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2720 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2721 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2722 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2723 #ifdef PAE
2724 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2725 #else
2726 dblfault_tss.tss_cr3 = (int)IdlePTD;
2727 #endif
2728 dblfault_tss.tss_eip = (int)dblfault_handler;
2729 dblfault_tss.tss_eflags = PSL_KERNEL;
2730 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2731 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2732 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2733 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2734 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2735
2736 vm86_initialize();
2737 getmemsize(first);
2738 init_param2(physmem);
2739
2740 /* now running on new page tables, configured,and u/iom is accessible */
2741
2742 msgbufinit(msgbufp, msgbufsize);
2743 /* transfer to user mode */
2744
2745 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2746 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2747
2748 /* setup proc 0's pcb */
2749 thread0.td_pcb->pcb_flags = 0;
2750 #ifdef PAE
2751 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2752 #else
2753 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2754 #endif
2755 thread0.td_pcb->pcb_ext = 0;
2756 thread0.td_frame = &proc0_tf;
2757 thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0];
2758 thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1];
2759
2760 cpu_probe_amdc1e();
2761 cpu_probe_cmpxchg8b();
2762 }
2763
2764 #else
2765 void
2766 init386(first)
2767 int first;
2768 {
2769 struct gate_descriptor *gdp;
2770 int gsel_tss, metadata_missing, x, pa;
2771 size_t kstack0_sz;
2772 struct pcpu *pc;
2773
2774 thread0.td_kstack = proc0kstack;
2775 thread0.td_kstack_pages = KSTACK_PAGES;
2776 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2777 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2778
2779 /*
2780 * This may be done better later if it gets more high level
2781 * components in it. If so just link td->td_proc here.
2782 */
2783 proc_linkup0(&proc0, &thread0);
2784
2785 metadata_missing = 0;
2786 if (bootinfo.bi_modulep) {
2787 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2788 preload_bootstrap_relocate(KERNBASE);
2789 } else {
2790 metadata_missing = 1;
2791 }
2792 if (envmode == 1)
2793 kern_envp = static_env;
2794 else if (bootinfo.bi_envp)
2795 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2796
2797 /* Init basic tunables, hz etc */
2798 init_param1();
2799
2800 /*
2801 * Make gdt memory segments. All segments cover the full 4GB
2802 * of address space and permissions are enforced at page level.
2803 */
2804 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2805 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2806 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2807 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2808 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2809 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2810
2811 pc = &__pcpu[0];
2812 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2813 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2814 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2815
2816 for (x = 0; x < NGDT; x++)
2817 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2818
2819 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2820 r_gdt.rd_base = (int) gdt;
2821 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2822 lgdt(&r_gdt);
2823
2824 pcpu_init(pc, 0, sizeof(struct pcpu));
2825 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2826 pmap_kenter(pa + KERNBASE, pa);
2827 dpcpu_init((void *)(first + KERNBASE), 0);
2828 first += DPCPU_SIZE;
2829 PCPU_SET(prvspace, pc);
2830 PCPU_SET(curthread, &thread0);
2831 PCPU_SET(curpcb, thread0.td_pcb);
2832
2833 /*
2834 * Initialize mutexes.
2835 *
2836 * icu_lock: in order to allow an interrupt to occur in a critical
2837 * section, to set pcpu->ipending (etc...) properly, we
2838 * must be able to get the icu lock, so it can't be
2839 * under witness.
2840 */
2841 mutex_init();
2842 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2843
2844 /* make ldt memory segments */
2845 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2846 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2847 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2848 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2849
2850 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2851 lldt(_default_ldt);
2852 PCPU_SET(currentldt, _default_ldt);
2853
2854 /* exceptions */
2855 for (x = 0; x < NIDT; x++)
2856 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2857 GSEL(GCODE_SEL, SEL_KPL));
2858 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2859 GSEL(GCODE_SEL, SEL_KPL));
2860 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2861 GSEL(GCODE_SEL, SEL_KPL));
2862 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2863 GSEL(GCODE_SEL, SEL_KPL));
2864 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2865 GSEL(GCODE_SEL, SEL_KPL));
2866 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2867 GSEL(GCODE_SEL, SEL_KPL));
2868 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2869 GSEL(GCODE_SEL, SEL_KPL));
2870 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2871 GSEL(GCODE_SEL, SEL_KPL));
2872 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2873 , GSEL(GCODE_SEL, SEL_KPL));
2874 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2875 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2876 GSEL(GCODE_SEL, SEL_KPL));
2877 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2878 GSEL(GCODE_SEL, SEL_KPL));
2879 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2880 GSEL(GCODE_SEL, SEL_KPL));
2881 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2882 GSEL(GCODE_SEL, SEL_KPL));
2883 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2884 GSEL(GCODE_SEL, SEL_KPL));
2885 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2886 GSEL(GCODE_SEL, SEL_KPL));
2887 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2888 GSEL(GCODE_SEL, SEL_KPL));
2889 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2890 GSEL(GCODE_SEL, SEL_KPL));
2891 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2892 GSEL(GCODE_SEL, SEL_KPL));
2893 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2894 GSEL(GCODE_SEL, SEL_KPL));
2895 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2896 GSEL(GCODE_SEL, SEL_KPL));
2897 #ifdef KDTRACE_HOOKS
2898 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
2899 GSEL(GCODE_SEL, SEL_KPL));
2900 #endif
2901
2902 r_idt.rd_limit = sizeof(idt0) - 1;
2903 r_idt.rd_base = (int) idt;
2904 lidt(&r_idt);
2905
2906 #ifdef XBOX
2907 /*
2908 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2909 * This should be 0x10de / 0x02a5.
2910 *
2911 * This is exactly what Linux does.
2912 */
2913 outl(0xcf8, 0x80000000);
2914 if (inl(0xcfc) == 0x02a510de) {
2915 arch_i386_is_xbox = 1;
2916 pic16l_setled(XBOX_LED_GREEN);
2917
2918 /*
2919 * We are an XBOX, but we may have either 64MB or 128MB of
2920 * memory. The PCI host bridge should be programmed for this,
2921 * so we just query it.
2922 */
2923 outl(0xcf8, 0x80000084);
2924 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2925 }
2926 #endif /* XBOX */
2927
2928 /*
2929 * Initialize the i8254 before the console so that console
2930 * initialization can use DELAY().
2931 */
2932 i8254_init();
2933
2934 /*
2935 * Initialize the console before we print anything out.
2936 */
2937 cninit();
2938
2939 if (metadata_missing)
2940 printf("WARNING: loader(8) metadata is missing!\n");
2941
2942 #ifdef DEV_ISA
2943 elcr_probe();
2944 atpic_startup();
2945 #endif
2946
2947 #ifdef DDB
2948 ksym_start = bootinfo.bi_symtab;
2949 ksym_end = bootinfo.bi_esymtab;
2950 #endif
2951
2952 kdb_init();
2953
2954 #ifdef KDB
2955 if (boothowto & RB_KDB)
2956 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2957 #endif
2958
2959 finishidentcpu(); /* Final stage of CPU initialization */
2960 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2961 GSEL(GCODE_SEL, SEL_KPL));
2962 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2963 GSEL(GCODE_SEL, SEL_KPL));
2964 initializecpu(); /* Initialize CPU registers */
2965
2966 /* make an initial tss so cpu can get interrupt stack on syscall! */
2967 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2968 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2969 kstack0_sz - sizeof(struct pcb) - 16);
2970 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2971 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2972 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2973 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2974 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2975 ltr(gsel_tss);
2976
2977 /* pointer to selector slot for %fs/%gs */
2978 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2979
2980 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2981 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2982 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2983 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2984 #ifdef PAE
2985 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2986 #else
2987 dblfault_tss.tss_cr3 = (int)IdlePTD;
2988 #endif
2989 dblfault_tss.tss_eip = (int)dblfault_handler;
2990 dblfault_tss.tss_eflags = PSL_KERNEL;
2991 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2992 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2993 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2994 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2995 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2996
2997 vm86_initialize();
2998 getmemsize(first);
2999 init_param2(physmem);
3000
3001 /* now running on new page tables, configured,and u/iom is accessible */
3002
3003 msgbufinit(msgbufp, msgbufsize);
3004
3005 /* make a call gate to reenter kernel with */
3006 gdp = &ldt[LSYS5CALLS_SEL].gd;
3007
3008 x = (int) &IDTVEC(lcall_syscall);
3009 gdp->gd_looffset = x;
3010 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
3011 gdp->gd_stkcpy = 1;
3012 gdp->gd_type = SDT_SYS386CGT;
3013 gdp->gd_dpl = SEL_UPL;
3014 gdp->gd_p = 1;
3015 gdp->gd_hioffset = x >> 16;
3016
3017 /* XXX does this work? */
3018 /* XXX yes! */
3019 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
3020 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
3021
3022 /* transfer to user mode */
3023
3024 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
3025 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
3026
3027 /* setup proc 0's pcb */
3028 thread0.td_pcb->pcb_flags = 0;
3029 #ifdef PAE
3030 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
3031 #else
3032 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
3033 #endif
3034 thread0.td_pcb->pcb_ext = 0;
3035 thread0.td_frame = &proc0_tf;
3036
3037 cpu_probe_amdc1e();
3038 cpu_probe_cmpxchg8b();
3039 }
3040 #endif
3041
3042 void
3043 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
3044 {
3045
3046 pcpu->pc_acpi_id = 0xffffffff;
3047 }
3048
3049 void
3050 spinlock_enter(void)
3051 {
3052 struct thread *td;
3053 register_t flags;
3054
3055 td = curthread;
3056 if (td->td_md.md_spinlock_count == 0) {
3057 flags = intr_disable();
3058 td->td_md.md_spinlock_count = 1;
3059 td->td_md.md_saved_flags = flags;
3060 } else
3061 td->td_md.md_spinlock_count++;
3062 critical_enter();
3063 }
3064
3065 void
3066 spinlock_exit(void)
3067 {
3068 struct thread *td;
3069 register_t flags;
3070
3071 td = curthread;
3072 critical_exit();
3073 flags = td->td_md.md_saved_flags;
3074 td->td_md.md_spinlock_count--;
3075 if (td->td_md.md_spinlock_count == 0)
3076 intr_restore(flags);
3077 }
3078
3079 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
3080 static void f00f_hack(void *unused);
3081 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
3082
3083 static void
3084 f00f_hack(void *unused)
3085 {
3086 struct gate_descriptor *new_idt;
3087 vm_offset_t tmp;
3088
3089 if (!has_f00f_bug)
3090 return;
3091
3092 GIANT_REQUIRED;
3093
3094 printf("Intel Pentium detected, installing workaround for F00F bug\n");
3095
3096 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
3097 if (tmp == 0)
3098 panic("kmem_alloc returned 0");
3099
3100 /* Put the problematic entry (#6) at the end of the lower page. */
3101 new_idt = (struct gate_descriptor*)
3102 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
3103 bcopy(idt, new_idt, sizeof(idt0));
3104 r_idt.rd_base = (u_int)new_idt;
3105 lidt(&r_idt);
3106 idt = new_idt;
3107 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
3108 VM_PROT_READ, FALSE) != KERN_SUCCESS)
3109 panic("vm_map_protect failed");
3110 }
3111 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
3112
3113 /*
3114 * Construct a PCB from a trapframe. This is called from kdb_trap() where
3115 * we want to start a backtrace from the function that caused us to enter
3116 * the debugger. We have the context in the trapframe, but base the trace
3117 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
3118 * enough for a backtrace.
3119 */
3120 void
3121 makectx(struct trapframe *tf, struct pcb *pcb)
3122 {
3123
3124 pcb->pcb_edi = tf->tf_edi;
3125 pcb->pcb_esi = tf->tf_esi;
3126 pcb->pcb_ebp = tf->tf_ebp;
3127 pcb->pcb_ebx = tf->tf_ebx;
3128 pcb->pcb_eip = tf->tf_eip;
3129 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
3130 }
3131
3132 int
3133 ptrace_set_pc(struct thread *td, u_long addr)
3134 {
3135
3136 td->td_frame->tf_eip = addr;
3137 return (0);
3138 }
3139
3140 int
3141 ptrace_single_step(struct thread *td)
3142 {
3143 td->td_frame->tf_eflags |= PSL_T;
3144 return (0);
3145 }
3146
3147 int
3148 ptrace_clear_single_step(struct thread *td)
3149 {
3150 td->td_frame->tf_eflags &= ~PSL_T;
3151 return (0);
3152 }
3153
3154 int
3155 fill_regs(struct thread *td, struct reg *regs)
3156 {
3157 struct pcb *pcb;
3158 struct trapframe *tp;
3159
3160 tp = td->td_frame;
3161 pcb = td->td_pcb;
3162 regs->r_gs = pcb->pcb_gs;
3163 return (fill_frame_regs(tp, regs));
3164 }
3165
3166 int
3167 fill_frame_regs(struct trapframe *tp, struct reg *regs)
3168 {
3169 regs->r_fs = tp->tf_fs;
3170 regs->r_es = tp->tf_es;
3171 regs->r_ds = tp->tf_ds;
3172 regs->r_edi = tp->tf_edi;
3173 regs->r_esi = tp->tf_esi;
3174 regs->r_ebp = tp->tf_ebp;
3175 regs->r_ebx = tp->tf_ebx;
3176 regs->r_edx = tp->tf_edx;
3177 regs->r_ecx = tp->tf_ecx;
3178 regs->r_eax = tp->tf_eax;
3179 regs->r_eip = tp->tf_eip;
3180 regs->r_cs = tp->tf_cs;
3181 regs->r_eflags = tp->tf_eflags;
3182 regs->r_esp = tp->tf_esp;
3183 regs->r_ss = tp->tf_ss;
3184 return (0);
3185 }
3186
3187 int
3188 set_regs(struct thread *td, struct reg *regs)
3189 {
3190 struct pcb *pcb;
3191 struct trapframe *tp;
3192
3193 tp = td->td_frame;
3194 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
3195 !CS_SECURE(regs->r_cs))
3196 return (EINVAL);
3197 pcb = td->td_pcb;
3198 tp->tf_fs = regs->r_fs;
3199 tp->tf_es = regs->r_es;
3200 tp->tf_ds = regs->r_ds;
3201 tp->tf_edi = regs->r_edi;
3202 tp->tf_esi = regs->r_esi;
3203 tp->tf_ebp = regs->r_ebp;
3204 tp->tf_ebx = regs->r_ebx;
3205 tp->tf_edx = regs->r_edx;
3206 tp->tf_ecx = regs->r_ecx;
3207 tp->tf_eax = regs->r_eax;
3208 tp->tf_eip = regs->r_eip;
3209 tp->tf_cs = regs->r_cs;
3210 tp->tf_eflags = regs->r_eflags;
3211 tp->tf_esp = regs->r_esp;
3212 tp->tf_ss = regs->r_ss;
3213 pcb->pcb_gs = regs->r_gs;
3214 return (0);
3215 }
3216
3217 #ifdef CPU_ENABLE_SSE
3218 static void
3219 fill_fpregs_xmm(sv_xmm, sv_87)
3220 struct savexmm *sv_xmm;
3221 struct save87 *sv_87;
3222 {
3223 register struct env87 *penv_87 = &sv_87->sv_env;
3224 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3225 int i;
3226
3227 bzero(sv_87, sizeof(*sv_87));
3228
3229 /* FPU control/status */
3230 penv_87->en_cw = penv_xmm->en_cw;
3231 penv_87->en_sw = penv_xmm->en_sw;
3232 penv_87->en_tw = penv_xmm->en_tw;
3233 penv_87->en_fip = penv_xmm->en_fip;
3234 penv_87->en_fcs = penv_xmm->en_fcs;
3235 penv_87->en_opcode = penv_xmm->en_opcode;
3236 penv_87->en_foo = penv_xmm->en_foo;
3237 penv_87->en_fos = penv_xmm->en_fos;
3238
3239 /* FPU registers */
3240 for (i = 0; i < 8; ++i)
3241 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
3242 }
3243
3244 static void
3245 set_fpregs_xmm(sv_87, sv_xmm)
3246 struct save87 *sv_87;
3247 struct savexmm *sv_xmm;
3248 {
3249 register struct env87 *penv_87 = &sv_87->sv_env;
3250 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3251 int i;
3252
3253 /* FPU control/status */
3254 penv_xmm->en_cw = penv_87->en_cw;
3255 penv_xmm->en_sw = penv_87->en_sw;
3256 penv_xmm->en_tw = penv_87->en_tw;
3257 penv_xmm->en_fip = penv_87->en_fip;
3258 penv_xmm->en_fcs = penv_87->en_fcs;
3259 penv_xmm->en_opcode = penv_87->en_opcode;
3260 penv_xmm->en_foo = penv_87->en_foo;
3261 penv_xmm->en_fos = penv_87->en_fos;
3262
3263 /* FPU registers */
3264 for (i = 0; i < 8; ++i)
3265 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
3266 }
3267 #endif /* CPU_ENABLE_SSE */
3268
3269 int
3270 fill_fpregs(struct thread *td, struct fpreg *fpregs)
3271 {
3272
3273 KASSERT(td == curthread || TD_IS_SUSPENDED(td),
3274 ("not suspended thread %p", td));
3275 #ifdef DEV_NPX
3276 npxgetregs(td);
3277 #else
3278 bzero(fpregs, sizeof(*fpregs));
3279 #endif
3280 #ifdef CPU_ENABLE_SSE
3281 if (cpu_fxsr)
3282 fill_fpregs_xmm(&td->td_pcb->pcb_user_save.sv_xmm,
3283 (struct save87 *)fpregs);
3284 else
3285 #endif /* CPU_ENABLE_SSE */
3286 bcopy(&td->td_pcb->pcb_user_save.sv_87, fpregs,
3287 sizeof(*fpregs));
3288 return (0);
3289 }
3290
3291 int
3292 set_fpregs(struct thread *td, struct fpreg *fpregs)
3293 {
3294
3295 #ifdef CPU_ENABLE_SSE
3296 if (cpu_fxsr)
3297 set_fpregs_xmm((struct save87 *)fpregs,
3298 &td->td_pcb->pcb_user_save.sv_xmm);
3299 else
3300 #endif /* CPU_ENABLE_SSE */
3301 bcopy(fpregs, &td->td_pcb->pcb_user_save.sv_87,
3302 sizeof(*fpregs));
3303 #ifdef DEV_NPX
3304 npxuserinited(td);
3305 #endif
3306 return (0);
3307 }
3308
3309 /*
3310 * Get machine context.
3311 */
3312 int
3313 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
3314 {
3315 struct trapframe *tp;
3316 struct segment_descriptor *sdp;
3317
3318 tp = td->td_frame;
3319
3320 PROC_LOCK(curthread->td_proc);
3321 mcp->mc_onstack = sigonstack(tp->tf_esp);
3322 PROC_UNLOCK(curthread->td_proc);
3323 mcp->mc_gs = td->td_pcb->pcb_gs;
3324 mcp->mc_fs = tp->tf_fs;
3325 mcp->mc_es = tp->tf_es;
3326 mcp->mc_ds = tp->tf_ds;
3327 mcp->mc_edi = tp->tf_edi;
3328 mcp->mc_esi = tp->tf_esi;
3329 mcp->mc_ebp = tp->tf_ebp;
3330 mcp->mc_isp = tp->tf_isp;
3331 mcp->mc_eflags = tp->tf_eflags;
3332 if (flags & GET_MC_CLEAR_RET) {
3333 mcp->mc_eax = 0;
3334 mcp->mc_edx = 0;
3335 mcp->mc_eflags &= ~PSL_C;
3336 } else {
3337 mcp->mc_eax = tp->tf_eax;
3338 mcp->mc_edx = tp->tf_edx;
3339 }
3340 mcp->mc_ebx = tp->tf_ebx;
3341 mcp->mc_ecx = tp->tf_ecx;
3342 mcp->mc_eip = tp->tf_eip;
3343 mcp->mc_cs = tp->tf_cs;
3344 mcp->mc_esp = tp->tf_esp;
3345 mcp->mc_ss = tp->tf_ss;
3346 mcp->mc_len = sizeof(*mcp);
3347 get_fpcontext(td, mcp);
3348 sdp = &td->td_pcb->pcb_fsd;
3349 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3350 sdp = &td->td_pcb->pcb_gsd;
3351 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3352 bzero(mcp->mc_spare1, sizeof(mcp->mc_spare1));
3353 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
3354 return (0);
3355 }
3356
3357 /*
3358 * Set machine context.
3359 *
3360 * However, we don't set any but the user modifiable flags, and we won't
3361 * touch the cs selector.
3362 */
3363 int
3364 set_mcontext(struct thread *td, const mcontext_t *mcp)
3365 {
3366 struct trapframe *tp;
3367 int eflags, ret;
3368
3369 tp = td->td_frame;
3370 if (mcp->mc_len != sizeof(*mcp))
3371 return (EINVAL);
3372 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
3373 (tp->tf_eflags & ~PSL_USERCHANGE);
3374 if ((ret = set_fpcontext(td, mcp)) == 0) {
3375 tp->tf_fs = mcp->mc_fs;
3376 tp->tf_es = mcp->mc_es;
3377 tp->tf_ds = mcp->mc_ds;
3378 tp->tf_edi = mcp->mc_edi;
3379 tp->tf_esi = mcp->mc_esi;
3380 tp->tf_ebp = mcp->mc_ebp;
3381 tp->tf_ebx = mcp->mc_ebx;
3382 tp->tf_edx = mcp->mc_edx;
3383 tp->tf_ecx = mcp->mc_ecx;
3384 tp->tf_eax = mcp->mc_eax;
3385 tp->tf_eip = mcp->mc_eip;
3386 tp->tf_eflags = eflags;
3387 tp->tf_esp = mcp->mc_esp;
3388 tp->tf_ss = mcp->mc_ss;
3389 td->td_pcb->pcb_gs = mcp->mc_gs;
3390 ret = 0;
3391 }
3392 return (ret);
3393 }
3394
3395 static void
3396 get_fpcontext(struct thread *td, mcontext_t *mcp)
3397 {
3398
3399 #ifndef DEV_NPX
3400 mcp->mc_fpformat = _MC_FPFMT_NODEV;
3401 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
3402 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
3403 #else
3404 mcp->mc_ownedfp = npxgetregs(td);
3405 bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
3406 sizeof(mcp->mc_fpstate));
3407 mcp->mc_fpformat = npxformat();
3408 #endif
3409 }
3410
3411 static int
3412 set_fpcontext(struct thread *td, const mcontext_t *mcp)
3413 {
3414
3415 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
3416 return (0);
3417 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
3418 mcp->mc_fpformat != _MC_FPFMT_XMM)
3419 return (EINVAL);
3420 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
3421 /* We don't care what state is left in the FPU or PCB. */
3422 fpstate_drop(td);
3423 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
3424 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
3425 #ifdef DEV_NPX
3426 #ifdef CPU_ENABLE_SSE
3427 if (cpu_fxsr)
3428 ((union savefpu *)&mcp->mc_fpstate)->sv_xmm.sv_env.
3429 en_mxcsr &= cpu_mxcsr_mask;
3430 #endif
3431 npxsetregs(td, (union savefpu *)&mcp->mc_fpstate);
3432 #endif
3433 } else
3434 return (EINVAL);
3435 return (0);
3436 }
3437
3438 static void
3439 fpstate_drop(struct thread *td)
3440 {
3441
3442 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
3443 critical_enter();
3444 #ifdef DEV_NPX
3445 if (PCPU_GET(fpcurthread) == td)
3446 npxdrop();
3447 #endif
3448 /*
3449 * XXX force a full drop of the npx. The above only drops it if we
3450 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
3451 *
3452 * XXX I don't much like npxgetregs()'s semantics of doing a full
3453 * drop. Dropping only to the pcb matches fnsave's behaviour.
3454 * We only need to drop to !PCB_INITDONE in sendsig(). But
3455 * sendsig() is the only caller of npxgetregs()... perhaps we just
3456 * have too many layers.
3457 */
3458 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
3459 PCB_NPXUSERINITDONE);
3460 critical_exit();
3461 }
3462
3463 int
3464 fill_dbregs(struct thread *td, struct dbreg *dbregs)
3465 {
3466 struct pcb *pcb;
3467
3468 if (td == NULL) {
3469 dbregs->dr[0] = rdr0();
3470 dbregs->dr[1] = rdr1();
3471 dbregs->dr[2] = rdr2();
3472 dbregs->dr[3] = rdr3();
3473 dbregs->dr[4] = rdr4();
3474 dbregs->dr[5] = rdr5();
3475 dbregs->dr[6] = rdr6();
3476 dbregs->dr[7] = rdr7();
3477 } else {
3478 pcb = td->td_pcb;
3479 dbregs->dr[0] = pcb->pcb_dr0;
3480 dbregs->dr[1] = pcb->pcb_dr1;
3481 dbregs->dr[2] = pcb->pcb_dr2;
3482 dbregs->dr[3] = pcb->pcb_dr3;
3483 dbregs->dr[4] = 0;
3484 dbregs->dr[5] = 0;
3485 dbregs->dr[6] = pcb->pcb_dr6;
3486 dbregs->dr[7] = pcb->pcb_dr7;
3487 }
3488 return (0);
3489 }
3490
3491 int
3492 set_dbregs(struct thread *td, struct dbreg *dbregs)
3493 {
3494 struct pcb *pcb;
3495 int i;
3496
3497 if (td == NULL) {
3498 load_dr0(dbregs->dr[0]);
3499 load_dr1(dbregs->dr[1]);
3500 load_dr2(dbregs->dr[2]);
3501 load_dr3(dbregs->dr[3]);
3502 load_dr4(dbregs->dr[4]);
3503 load_dr5(dbregs->dr[5]);
3504 load_dr6(dbregs->dr[6]);
3505 load_dr7(dbregs->dr[7]);
3506 } else {
3507 /*
3508 * Don't let an illegal value for dr7 get set. Specifically,
3509 * check for undefined settings. Setting these bit patterns
3510 * result in undefined behaviour and can lead to an unexpected
3511 * TRCTRAP.
3512 */
3513 for (i = 0; i < 4; i++) {
3514 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
3515 return (EINVAL);
3516 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
3517 return (EINVAL);
3518 }
3519
3520 pcb = td->td_pcb;
3521
3522 /*
3523 * Don't let a process set a breakpoint that is not within the
3524 * process's address space. If a process could do this, it
3525 * could halt the system by setting a breakpoint in the kernel
3526 * (if ddb was enabled). Thus, we need to check to make sure
3527 * that no breakpoints are being enabled for addresses outside
3528 * process's address space.
3529 *
3530 * XXX - what about when the watched area of the user's
3531 * address space is written into from within the kernel
3532 * ... wouldn't that still cause a breakpoint to be generated
3533 * from within kernel mode?
3534 */
3535
3536 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
3537 /* dr0 is enabled */
3538 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
3539 return (EINVAL);
3540 }
3541
3542 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
3543 /* dr1 is enabled */
3544 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
3545 return (EINVAL);
3546 }
3547
3548 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
3549 /* dr2 is enabled */
3550 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
3551 return (EINVAL);
3552 }
3553
3554 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
3555 /* dr3 is enabled */
3556 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
3557 return (EINVAL);
3558 }
3559
3560 pcb->pcb_dr0 = dbregs->dr[0];
3561 pcb->pcb_dr1 = dbregs->dr[1];
3562 pcb->pcb_dr2 = dbregs->dr[2];
3563 pcb->pcb_dr3 = dbregs->dr[3];
3564 pcb->pcb_dr6 = dbregs->dr[6];
3565 pcb->pcb_dr7 = dbregs->dr[7];
3566
3567 pcb->pcb_flags |= PCB_DBREGS;
3568 }
3569
3570 return (0);
3571 }
3572
3573 /*
3574 * Return > 0 if a hardware breakpoint has been hit, and the
3575 * breakpoint was in user space. Return 0, otherwise.
3576 */
3577 int
3578 user_dbreg_trap(void)
3579 {
3580 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
3581 u_int32_t bp; /* breakpoint bits extracted from dr6 */
3582 int nbp; /* number of breakpoints that triggered */
3583 caddr_t addr[4]; /* breakpoint addresses */
3584 int i;
3585
3586 dr7 = rdr7();
3587 if ((dr7 & 0x000000ff) == 0) {
3588 /*
3589 * all GE and LE bits in the dr7 register are zero,
3590 * thus the trap couldn't have been caused by the
3591 * hardware debug registers
3592 */
3593 return 0;
3594 }
3595
3596 nbp = 0;
3597 dr6 = rdr6();
3598 bp = dr6 & 0x0000000f;
3599
3600 if (!bp) {
3601 /*
3602 * None of the breakpoint bits are set meaning this
3603 * trap was not caused by any of the debug registers
3604 */
3605 return 0;
3606 }
3607
3608 /*
3609 * at least one of the breakpoints were hit, check to see
3610 * which ones and if any of them are user space addresses
3611 */
3612
3613 if (bp & 0x01) {
3614 addr[nbp++] = (caddr_t)rdr0();
3615 }
3616 if (bp & 0x02) {
3617 addr[nbp++] = (caddr_t)rdr1();
3618 }
3619 if (bp & 0x04) {
3620 addr[nbp++] = (caddr_t)rdr2();
3621 }
3622 if (bp & 0x08) {
3623 addr[nbp++] = (caddr_t)rdr3();
3624 }
3625
3626 for (i = 0; i < nbp; i++) {
3627 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
3628 /*
3629 * addr[i] is in user space
3630 */
3631 return nbp;
3632 }
3633 }
3634
3635 /*
3636 * None of the breakpoints are in user space.
3637 */
3638 return 0;
3639 }
3640
3641 #ifdef KDB
3642
3643 /*
3644 * Provide inb() and outb() as functions. They are normally only available as
3645 * inline functions, thus cannot be called from the debugger.
3646 */
3647
3648 /* silence compiler warnings */
3649 u_char inb_(u_short);
3650 void outb_(u_short, u_char);
3651
3652 u_char
3653 inb_(u_short port)
3654 {
3655 return inb(port);
3656 }
3657
3658 void
3659 outb_(u_short port, u_char data)
3660 {
3661 outb(port, data);
3662 }
3663
3664 #endif /* KDB */
Cache object: 98a0b59b4185b402ee46f7cc8ecd8905
|