1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include "opt_atalk.h"
44 #include "opt_compat.h"
45 #include "opt_cpu.h"
46 #include "opt_ddb.h"
47 #include "opt_inet.h"
48 #include "opt_ipx.h"
49 #include "opt_isa.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_msgbuf.h"
53 #include "opt_npx.h"
54 #include "opt_perfmon.h"
55
56 #include <sys/param.h>
57 #include <sys/proc.h>
58 #include <sys/systm.h>
59 #include <sys/bio.h>
60 #include <sys/buf.h>
61 #include <sys/bus.h>
62 #include <sys/callout.h>
63 #include <sys/cons.h>
64 #include <sys/cpu.h>
65 #include <sys/eventhandler.h>
66 #include <sys/exec.h>
67 #include <sys/imgact.h>
68 #include <sys/kdb.h>
69 #include <sys/kernel.h>
70 #include <sys/ktr.h>
71 #include <sys/linker.h>
72 #include <sys/lock.h>
73 #include <sys/malloc.h>
74 #include <sys/memrange.h>
75 #include <sys/msgbuf.h>
76 #include <sys/mutex.h>
77 #include <sys/pcpu.h>
78 #include <sys/ptrace.h>
79 #include <sys/reboot.h>
80 #include <sys/sched.h>
81 #include <sys/signalvar.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/sysproto.h>
85 #include <sys/ucontext.h>
86 #include <sys/vmmeter.h>
87
88 #include <vm/vm.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_param.h>
96
97 #ifdef DDB
98 #ifndef KDB
99 #error KDB must be enabled in order for DDB to work!
100 #endif
101 #include <ddb/ddb.h>
102 #include <ddb/db_sym.h>
103 #endif
104
105 #include <pc98/pc98/pc98_machdep.h>
106
107 #include <net/netisr.h>
108
109 #include <machine/bootinfo.h>
110 #include <machine/clock.h>
111 #include <machine/cpu.h>
112 #include <machine/cputypes.h>
113 #include <machine/intr_machdep.h>
114 #include <machine/md_var.h>
115 #include <machine/pc/bios.h>
116 #include <machine/pcb.h>
117 #include <machine/pcb_ext.h>
118 #include <machine/proc.h>
119 #include <machine/reg.h>
120 #include <machine/sigframe.h>
121 #include <machine/specialreg.h>
122 #include <machine/vm86.h>
123 #ifdef PERFMON
124 #include <machine/perfmon.h>
125 #endif
126 #ifdef SMP
127 #include <machine/smp.h>
128 #endif
129
130 #ifdef DEV_ISA
131 #include <i386/isa/icu.h>
132 #endif
133
134 /* Sanity check for __curthread() */
135 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
136
137 extern void init386(int first);
138 extern void dblfault_handler(void);
139
140 extern void printcpuinfo(void); /* XXX header file */
141 extern void finishidentcpu(void);
142 extern void panicifcpuunsupported(void);
143 extern void initializecpu(void);
144
145 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
146 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
147
148 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
149 #define CPU_ENABLE_SSE
150 #endif
151
152 static void cpu_startup(void *);
153 static void fpstate_drop(struct thread *td);
154 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
155 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
156 #ifdef CPU_ENABLE_SSE
157 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
158 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
159 #endif /* CPU_ENABLE_SSE */
160 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
161
162 int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */
163 int need_post_dma_flush; /* If 1, use invd after DMA transfer. */
164
165 #ifdef DDB
166 extern vm_offset_t ksym_start, ksym_end;
167 #endif
168
169 int _udatasel, _ucodesel;
170 u_int basemem;
171
172 static int ispc98 = 1;
173 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
174
175 int cold = 1;
176
177 #ifdef COMPAT_43
178 static void osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code);
179 #endif
180 #ifdef COMPAT_FREEBSD4
181 static void freebsd4_sendsig(sig_t catcher, int sig, sigset_t *mask,
182 u_long code);
183 #endif
184
185 long Maxmem = 0;
186 long realmem = 0;
187
188 #define PHYSMAP_SIZE (2 * 16)
189
190 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
191 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
192
193 /* must be 2 less so 0 0 can signal end of chunks */
194 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
195 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
196
197 struct kva_md_info kmi;
198
199 static struct trapframe proc0_tf;
200 struct pcpu __pcpu[MAXCPU];
201
202 struct mtx icu_lock;
203
204 struct mem_range_softc mem_range_softc;
205
206 static void
207 cpu_startup(dummy)
208 void *dummy;
209 {
210 /*
211 * Good {morning,afternoon,evening,night}.
212 */
213 startrtclock();
214 printcpuinfo();
215 panicifcpuunsupported();
216 #ifdef PERFMON
217 perfmon_init();
218 #endif
219 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
220 ptoa((uintmax_t)Maxmem) / 1048576);
221 realmem = Maxmem;
222 /*
223 * Display any holes after the first chunk of extended memory.
224 */
225 if (bootverbose) {
226 int indx;
227
228 printf("Physical memory chunk(s):\n");
229 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
230 vm_paddr_t size;
231
232 size = phys_avail[indx + 1] - phys_avail[indx];
233 printf(
234 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
235 (uintmax_t)phys_avail[indx],
236 (uintmax_t)phys_avail[indx + 1] - 1,
237 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
238 }
239 }
240
241 vm_ksubmap_init(&kmi);
242
243 printf("avail memory = %ju (%ju MB)\n",
244 ptoa((uintmax_t)cnt.v_free_count),
245 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
246
247 /*
248 * Set up buffers, so they can be used to read disk labels.
249 */
250 bufinit();
251 vm_pager_bufferinit();
252
253 cpu_setregs();
254 }
255
256 /*
257 * Send an interrupt to process.
258 *
259 * Stack is set up to allow sigcode stored
260 * at top to call routine, followed by kcall
261 * to sigreturn routine below. After sigreturn
262 * resets the signal mask, the stack, and the
263 * frame pointer, it returns to the user
264 * specified pc, psl.
265 */
266 #ifdef COMPAT_43
267 static void
268 osendsig(catcher, sig, mask, code)
269 sig_t catcher;
270 int sig;
271 sigset_t *mask;
272 u_long code;
273 {
274 struct osigframe sf, *fp;
275 struct proc *p;
276 struct thread *td;
277 struct sigacts *psp;
278 struct trapframe *regs;
279 int oonstack;
280
281 td = curthread;
282 p = td->td_proc;
283 PROC_LOCK_ASSERT(p, MA_OWNED);
284 psp = p->p_sigacts;
285 mtx_assert(&psp->ps_mtx, MA_OWNED);
286 regs = td->td_frame;
287 oonstack = sigonstack(regs->tf_esp);
288
289 /* Allocate space for the signal handler context. */
290 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
291 SIGISMEMBER(psp->ps_sigonstack, sig)) {
292 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
293 td->td_sigstk.ss_size - sizeof(struct osigframe));
294 #if defined(COMPAT_43)
295 td->td_sigstk.ss_flags |= SS_ONSTACK;
296 #endif
297 } else
298 fp = (struct osigframe *)regs->tf_esp - 1;
299
300 /* Translate the signal if appropriate. */
301 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
302 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
303
304 /* Build the argument list for the signal handler. */
305 sf.sf_signum = sig;
306 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
307 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
308 /* Signal handler installed with SA_SIGINFO. */
309 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
310 sf.sf_siginfo.si_signo = sig;
311 sf.sf_siginfo.si_code = code;
312 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
313 } else {
314 /* Old FreeBSD-style arguments. */
315 sf.sf_arg2 = code;
316 sf.sf_addr = td->td_md.md_fault_addr;
317 sf.sf_ahu.sf_handler = catcher;
318 }
319 mtx_unlock(&psp->ps_mtx);
320 PROC_UNLOCK(p);
321
322 /* Save most if not all of trap frame. */
323 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
324 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
325 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
326 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
327 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
328 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
329 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
330 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
331 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
332 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
333 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
334 sf.sf_siginfo.si_sc.sc_gs = rgs();
335 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
336
337 /* Build the signal context to be used by osigreturn(). */
338 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
339 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
340 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
341 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
342 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
343 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
344 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
345 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
346
347 /*
348 * If we're a vm86 process, we want to save the segment registers.
349 * We also change eflags to be our emulated eflags, not the actual
350 * eflags.
351 */
352 if (regs->tf_eflags & PSL_VM) {
353 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
354 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
355 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
356
357 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
358 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
359 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
360 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
361
362 if (vm86->vm86_has_vme == 0)
363 sf.sf_siginfo.si_sc.sc_ps =
364 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
365 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
366
367 /* See sendsig() for comments. */
368 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
369 }
370
371 /*
372 * Copy the sigframe out to the user's stack.
373 */
374 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
375 #ifdef DEBUG
376 printf("process %ld has trashed its stack\n", (long)p->p_pid);
377 #endif
378 PROC_LOCK(p);
379 sigexit(td, SIGILL);
380 }
381
382 regs->tf_esp = (int)fp;
383 regs->tf_eip = PS_STRINGS - szosigcode;
384 regs->tf_eflags &= ~(PSL_T | PSL_D);
385 regs->tf_cs = _ucodesel;
386 regs->tf_ds = _udatasel;
387 regs->tf_es = _udatasel;
388 regs->tf_fs = _udatasel;
389 load_gs(_udatasel);
390 regs->tf_ss = _udatasel;
391 PROC_LOCK(p);
392 mtx_lock(&psp->ps_mtx);
393 }
394 #endif /* COMPAT_43 */
395
396 #ifdef COMPAT_FREEBSD4
397 static void
398 freebsd4_sendsig(catcher, sig, mask, code)
399 sig_t catcher;
400 int sig;
401 sigset_t *mask;
402 u_long code;
403 {
404 struct sigframe4 sf, *sfp;
405 struct proc *p;
406 struct thread *td;
407 struct sigacts *psp;
408 struct trapframe *regs;
409 int oonstack;
410
411 td = curthread;
412 p = td->td_proc;
413 PROC_LOCK_ASSERT(p, MA_OWNED);
414 psp = p->p_sigacts;
415 mtx_assert(&psp->ps_mtx, MA_OWNED);
416 regs = td->td_frame;
417 oonstack = sigonstack(regs->tf_esp);
418
419 /* Save user context. */
420 bzero(&sf, sizeof(sf));
421 sf.sf_uc.uc_sigmask = *mask;
422 sf.sf_uc.uc_stack = td->td_sigstk;
423 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
424 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
425 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
426 sf.sf_uc.uc_mcontext.mc_gs = rgs();
427 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
428
429 /* Allocate space for the signal handler context. */
430 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
431 SIGISMEMBER(psp->ps_sigonstack, sig)) {
432 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
433 td->td_sigstk.ss_size - sizeof(struct sigframe4));
434 #if defined(COMPAT_43)
435 td->td_sigstk.ss_flags |= SS_ONSTACK;
436 #endif
437 } else
438 sfp = (struct sigframe4 *)regs->tf_esp - 1;
439
440 /* Translate the signal if appropriate. */
441 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
442 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
443
444 /* Build the argument list for the signal handler. */
445 sf.sf_signum = sig;
446 sf.sf_ucontext = (register_t)&sfp->sf_uc;
447 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
448 /* Signal handler installed with SA_SIGINFO. */
449 sf.sf_siginfo = (register_t)&sfp->sf_si;
450 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
451
452 /* Fill in POSIX parts */
453 sf.sf_si.si_signo = sig;
454 sf.sf_si.si_code = code;
455 sf.sf_si.si_addr = (void *)td->td_md.md_fault_addr;
456 } else {
457 /* Old FreeBSD-style arguments. */
458 sf.sf_siginfo = code;
459 sf.sf_addr = td->td_md.md_fault_addr;
460 sf.sf_ahu.sf_handler = catcher;
461 }
462 mtx_unlock(&psp->ps_mtx);
463 PROC_UNLOCK(p);
464
465 /*
466 * If we're a vm86 process, we want to save the segment registers.
467 * We also change eflags to be our emulated eflags, not the actual
468 * eflags.
469 */
470 if (regs->tf_eflags & PSL_VM) {
471 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
472 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
473
474 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
475 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
476 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
477 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
478
479 if (vm86->vm86_has_vme == 0)
480 sf.sf_uc.uc_mcontext.mc_eflags =
481 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
482 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
483
484 /*
485 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
486 * syscalls made by the signal handler. This just avoids
487 * wasting time for our lazy fixup of such faults. PSL_NT
488 * does nothing in vm86 mode, but vm86 programs can set it
489 * almost legitimately in probes for old cpu types.
490 */
491 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
492 }
493
494 /*
495 * Copy the sigframe out to the user's stack.
496 */
497 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
498 #ifdef DEBUG
499 printf("process %ld has trashed its stack\n", (long)p->p_pid);
500 #endif
501 PROC_LOCK(p);
502 sigexit(td, SIGILL);
503 }
504
505 regs->tf_esp = (int)sfp;
506 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
507 regs->tf_eflags &= ~(PSL_T | PSL_D);
508 regs->tf_cs = _ucodesel;
509 regs->tf_ds = _udatasel;
510 regs->tf_es = _udatasel;
511 regs->tf_fs = _udatasel;
512 regs->tf_ss = _udatasel;
513 PROC_LOCK(p);
514 mtx_lock(&psp->ps_mtx);
515 }
516 #endif /* COMPAT_FREEBSD4 */
517
518 void
519 sendsig(catcher, sig, mask, code)
520 sig_t catcher;
521 int sig;
522 sigset_t *mask;
523 u_long code;
524 {
525 struct sigframe sf, *sfp;
526 struct proc *p;
527 struct thread *td;
528 struct sigacts *psp;
529 char *sp;
530 struct trapframe *regs;
531 int oonstack;
532
533 td = curthread;
534 p = td->td_proc;
535 PROC_LOCK_ASSERT(p, MA_OWNED);
536 psp = p->p_sigacts;
537 mtx_assert(&psp->ps_mtx, MA_OWNED);
538 #ifdef COMPAT_FREEBSD4
539 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
540 freebsd4_sendsig(catcher, sig, mask, code);
541 return;
542 }
543 #endif
544 #ifdef COMPAT_43
545 if (SIGISMEMBER(psp->ps_osigset, sig)) {
546 osendsig(catcher, sig, mask, code);
547 return;
548 }
549 #endif
550 regs = td->td_frame;
551 oonstack = sigonstack(regs->tf_esp);
552
553 /* Save user context. */
554 bzero(&sf, sizeof(sf));
555 sf.sf_uc.uc_sigmask = *mask;
556 sf.sf_uc.uc_stack = td->td_sigstk;
557 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
558 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
559 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
560 sf.sf_uc.uc_mcontext.mc_gs = rgs();
561 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
562 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
563 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
564 fpstate_drop(td);
565
566 /* Allocate space for the signal handler context. */
567 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
568 SIGISMEMBER(psp->ps_sigonstack, sig)) {
569 sp = td->td_sigstk.ss_sp +
570 td->td_sigstk.ss_size - sizeof(struct sigframe);
571 #if defined(COMPAT_43)
572 td->td_sigstk.ss_flags |= SS_ONSTACK;
573 #endif
574 } else
575 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
576 /* Align to 16 bytes. */
577 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
578
579 /* Translate the signal if appropriate. */
580 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
581 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
582
583 /* Build the argument list for the signal handler. */
584 sf.sf_signum = sig;
585 sf.sf_ucontext = (register_t)&sfp->sf_uc;
586 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
587 /* Signal handler installed with SA_SIGINFO. */
588 sf.sf_siginfo = (register_t)&sfp->sf_si;
589 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
590
591 /* Fill in POSIX parts */
592 sf.sf_si.si_signo = sig;
593 sf.sf_si.si_code = code;
594 sf.sf_si.si_addr = (void *)td->td_md.md_fault_addr;
595 } else {
596 /* Old FreeBSD-style arguments. */
597 sf.sf_siginfo = code;
598 sf.sf_addr = td->td_md.md_fault_addr;
599 sf.sf_ahu.sf_handler = catcher;
600 }
601 mtx_unlock(&psp->ps_mtx);
602 PROC_UNLOCK(p);
603
604 /*
605 * If we're a vm86 process, we want to save the segment registers.
606 * We also change eflags to be our emulated eflags, not the actual
607 * eflags.
608 */
609 if (regs->tf_eflags & PSL_VM) {
610 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
611 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
612
613 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
614 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
615 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
616 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
617
618 if (vm86->vm86_has_vme == 0)
619 sf.sf_uc.uc_mcontext.mc_eflags =
620 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
621 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
622
623 /*
624 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
625 * syscalls made by the signal handler. This just avoids
626 * wasting time for our lazy fixup of such faults. PSL_NT
627 * does nothing in vm86 mode, but vm86 programs can set it
628 * almost legitimately in probes for old cpu types.
629 */
630 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
631 }
632
633 /*
634 * Copy the sigframe out to the user's stack.
635 */
636 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
637 #ifdef DEBUG
638 printf("process %ld has trashed its stack\n", (long)p->p_pid);
639 #endif
640 PROC_LOCK(p);
641 sigexit(td, SIGILL);
642 }
643
644 regs->tf_esp = (int)sfp;
645 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
646 regs->tf_eflags &= ~(PSL_T | PSL_D);
647 regs->tf_cs = _ucodesel;
648 regs->tf_ds = _udatasel;
649 regs->tf_es = _udatasel;
650 regs->tf_fs = _udatasel;
651 regs->tf_ss = _udatasel;
652 PROC_LOCK(p);
653 mtx_lock(&psp->ps_mtx);
654 }
655
656 /*
657 * Build siginfo_t for SA thread
658 */
659 void
660 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
661 {
662 struct proc *p;
663 struct thread *td;
664
665 td = curthread;
666 p = td->td_proc;
667 PROC_LOCK_ASSERT(p, MA_OWNED);
668
669 bzero(si, sizeof(*si));
670 si->si_signo = sig;
671 si->si_code = code;
672 si->si_addr = (void *)td->td_md.md_fault_addr;
673 /* XXXKSE fill other fields */
674 }
675
676 /*
677 * System call to cleanup state after a signal
678 * has been taken. Reset signal mask and
679 * stack state from context left by sendsig (above).
680 * Return to previous pc and psl as specified by
681 * context left by sendsig. Check carefully to
682 * make sure that the user has not modified the
683 * state to gain improper privileges.
684 *
685 * MPSAFE
686 */
687 #ifdef COMPAT_43
688 int
689 osigreturn(td, uap)
690 struct thread *td;
691 struct osigreturn_args /* {
692 struct osigcontext *sigcntxp;
693 } */ *uap;
694 {
695 struct osigcontext sc;
696 struct trapframe *regs;
697 struct osigcontext *scp;
698 struct proc *p = td->td_proc;
699 int eflags, error;
700
701 regs = td->td_frame;
702 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
703 if (error != 0)
704 return (error);
705 scp = ≻
706 eflags = scp->sc_ps;
707 if (eflags & PSL_VM) {
708 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
709 struct vm86_kernel *vm86;
710
711 /*
712 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
713 * set up the vm86 area, and we can't enter vm86 mode.
714 */
715 if (td->td_pcb->pcb_ext == 0)
716 return (EINVAL);
717 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
718 if (vm86->vm86_inited == 0)
719 return (EINVAL);
720
721 /* Go back to user mode if both flags are set. */
722 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
723 trapsignal(td, SIGBUS, 0);
724
725 if (vm86->vm86_has_vme) {
726 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
727 (eflags & VME_USERCHANGE) | PSL_VM;
728 } else {
729 vm86->vm86_eflags = eflags; /* save VIF, VIP */
730 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
731 (eflags & VM_USERCHANGE) | PSL_VM;
732 }
733 tf->tf_vm86_ds = scp->sc_ds;
734 tf->tf_vm86_es = scp->sc_es;
735 tf->tf_vm86_fs = scp->sc_fs;
736 tf->tf_vm86_gs = scp->sc_gs;
737 tf->tf_ds = _udatasel;
738 tf->tf_es = _udatasel;
739 tf->tf_fs = _udatasel;
740 } else {
741 /*
742 * Don't allow users to change privileged or reserved flags.
743 */
744 /*
745 * XXX do allow users to change the privileged flag PSL_RF.
746 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
747 * should sometimes set it there too. tf_eflags is kept in
748 * the signal context during signal handling and there is no
749 * other place to remember it, so the PSL_RF bit may be
750 * corrupted by the signal handler without us knowing.
751 * Corruption of the PSL_RF bit at worst causes one more or
752 * one less debugger trap, so allowing it is fairly harmless.
753 */
754 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
755 return (EINVAL);
756 }
757
758 /*
759 * Don't allow users to load a valid privileged %cs. Let the
760 * hardware check for invalid selectors, excess privilege in
761 * other selectors, invalid %eip's and invalid %esp's.
762 */
763 if (!CS_SECURE(scp->sc_cs)) {
764 trapsignal(td, SIGBUS, T_PROTFLT);
765 return (EINVAL);
766 }
767 regs->tf_ds = scp->sc_ds;
768 regs->tf_es = scp->sc_es;
769 regs->tf_fs = scp->sc_fs;
770 }
771
772 /* Restore remaining registers. */
773 regs->tf_eax = scp->sc_eax;
774 regs->tf_ebx = scp->sc_ebx;
775 regs->tf_ecx = scp->sc_ecx;
776 regs->tf_edx = scp->sc_edx;
777 regs->tf_esi = scp->sc_esi;
778 regs->tf_edi = scp->sc_edi;
779 regs->tf_cs = scp->sc_cs;
780 regs->tf_ss = scp->sc_ss;
781 regs->tf_isp = scp->sc_isp;
782 regs->tf_ebp = scp->sc_fp;
783 regs->tf_esp = scp->sc_sp;
784 regs->tf_eip = scp->sc_pc;
785 regs->tf_eflags = eflags;
786
787 PROC_LOCK(p);
788 #if defined(COMPAT_43)
789 if (scp->sc_onstack & 1)
790 td->td_sigstk.ss_flags |= SS_ONSTACK;
791 else
792 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
793 #endif
794 SIGSETOLD(td->td_sigmask, scp->sc_mask);
795 SIG_CANTMASK(td->td_sigmask);
796 signotify(td);
797 PROC_UNLOCK(p);
798 return (EJUSTRETURN);
799 }
800 #endif /* COMPAT_43 */
801
802 #ifdef COMPAT_FREEBSD4
803 /*
804 * MPSAFE
805 */
806 int
807 freebsd4_sigreturn(td, uap)
808 struct thread *td;
809 struct freebsd4_sigreturn_args /* {
810 const ucontext4 *sigcntxp;
811 } */ *uap;
812 {
813 struct ucontext4 uc;
814 struct proc *p = td->td_proc;
815 struct trapframe *regs;
816 const struct ucontext4 *ucp;
817 int cs, eflags, error;
818
819 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
820 if (error != 0)
821 return (error);
822 ucp = &uc;
823 regs = td->td_frame;
824 eflags = ucp->uc_mcontext.mc_eflags;
825 if (eflags & PSL_VM) {
826 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
827 struct vm86_kernel *vm86;
828
829 /*
830 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
831 * set up the vm86 area, and we can't enter vm86 mode.
832 */
833 if (td->td_pcb->pcb_ext == 0)
834 return (EINVAL);
835 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
836 if (vm86->vm86_inited == 0)
837 return (EINVAL);
838
839 /* Go back to user mode if both flags are set. */
840 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
841 trapsignal(td, SIGBUS, 0);
842
843 if (vm86->vm86_has_vme) {
844 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
845 (eflags & VME_USERCHANGE) | PSL_VM;
846 } else {
847 vm86->vm86_eflags = eflags; /* save VIF, VIP */
848 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
849 (eflags & VM_USERCHANGE) | PSL_VM;
850 }
851 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
852 tf->tf_eflags = eflags;
853 tf->tf_vm86_ds = tf->tf_ds;
854 tf->tf_vm86_es = tf->tf_es;
855 tf->tf_vm86_fs = tf->tf_fs;
856 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
857 tf->tf_ds = _udatasel;
858 tf->tf_es = _udatasel;
859 tf->tf_fs = _udatasel;
860 } else {
861 /*
862 * Don't allow users to change privileged or reserved flags.
863 */
864 /*
865 * XXX do allow users to change the privileged flag PSL_RF.
866 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
867 * should sometimes set it there too. tf_eflags is kept in
868 * the signal context during signal handling and there is no
869 * other place to remember it, so the PSL_RF bit may be
870 * corrupted by the signal handler without us knowing.
871 * Corruption of the PSL_RF bit at worst causes one more or
872 * one less debugger trap, so allowing it is fairly harmless.
873 */
874 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
875 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
876 return (EINVAL);
877 }
878
879 /*
880 * Don't allow users to load a valid privileged %cs. Let the
881 * hardware check for invalid selectors, excess privilege in
882 * other selectors, invalid %eip's and invalid %esp's.
883 */
884 cs = ucp->uc_mcontext.mc_cs;
885 if (!CS_SECURE(cs)) {
886 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
887 trapsignal(td, SIGBUS, T_PROTFLT);
888 return (EINVAL);
889 }
890
891 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
892 }
893
894 PROC_LOCK(p);
895 #if defined(COMPAT_43)
896 if (ucp->uc_mcontext.mc_onstack & 1)
897 td->td_sigstk.ss_flags |= SS_ONSTACK;
898 else
899 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
900 #endif
901
902 td->td_sigmask = ucp->uc_sigmask;
903 SIG_CANTMASK(td->td_sigmask);
904 signotify(td);
905 PROC_UNLOCK(p);
906 return (EJUSTRETURN);
907 }
908 #endif /* COMPAT_FREEBSD4 */
909
910 /*
911 * MPSAFE
912 */
913 int
914 sigreturn(td, uap)
915 struct thread *td;
916 struct sigreturn_args /* {
917 const __ucontext *sigcntxp;
918 } */ *uap;
919 {
920 ucontext_t uc;
921 struct proc *p = td->td_proc;
922 struct trapframe *regs;
923 const ucontext_t *ucp;
924 int cs, eflags, error, ret;
925
926 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
927 if (error != 0)
928 return (error);
929 ucp = &uc;
930 regs = td->td_frame;
931 eflags = ucp->uc_mcontext.mc_eflags;
932 if (eflags & PSL_VM) {
933 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
934 struct vm86_kernel *vm86;
935
936 /*
937 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
938 * set up the vm86 area, and we can't enter vm86 mode.
939 */
940 if (td->td_pcb->pcb_ext == 0)
941 return (EINVAL);
942 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
943 if (vm86->vm86_inited == 0)
944 return (EINVAL);
945
946 /* Go back to user mode if both flags are set. */
947 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
948 trapsignal(td, SIGBUS, 0);
949
950 if (vm86->vm86_has_vme) {
951 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
952 (eflags & VME_USERCHANGE) | PSL_VM;
953 } else {
954 vm86->vm86_eflags = eflags; /* save VIF, VIP */
955 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
956 (eflags & VM_USERCHANGE) | PSL_VM;
957 }
958 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
959 tf->tf_eflags = eflags;
960 tf->tf_vm86_ds = tf->tf_ds;
961 tf->tf_vm86_es = tf->tf_es;
962 tf->tf_vm86_fs = tf->tf_fs;
963 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
964 tf->tf_ds = _udatasel;
965 tf->tf_es = _udatasel;
966 tf->tf_fs = _udatasel;
967 } else {
968 /*
969 * Don't allow users to change privileged or reserved flags.
970 */
971 /*
972 * XXX do allow users to change the privileged flag PSL_RF.
973 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
974 * should sometimes set it there too. tf_eflags is kept in
975 * the signal context during signal handling and there is no
976 * other place to remember it, so the PSL_RF bit may be
977 * corrupted by the signal handler without us knowing.
978 * Corruption of the PSL_RF bit at worst causes one more or
979 * one less debugger trap, so allowing it is fairly harmless.
980 */
981 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
982 printf("sigreturn: eflags = 0x%x\n", eflags);
983 return (EINVAL);
984 }
985
986 /*
987 * Don't allow users to load a valid privileged %cs. Let the
988 * hardware check for invalid selectors, excess privilege in
989 * other selectors, invalid %eip's and invalid %esp's.
990 */
991 cs = ucp->uc_mcontext.mc_cs;
992 if (!CS_SECURE(cs)) {
993 printf("sigreturn: cs = 0x%x\n", cs);
994 trapsignal(td, SIGBUS, T_PROTFLT);
995 return (EINVAL);
996 }
997
998 ret = set_fpcontext(td, &ucp->uc_mcontext);
999 if (ret != 0)
1000 return (ret);
1001 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1002 }
1003
1004 PROC_LOCK(p);
1005 #if defined(COMPAT_43)
1006 if (ucp->uc_mcontext.mc_onstack & 1)
1007 td->td_sigstk.ss_flags |= SS_ONSTACK;
1008 else
1009 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1010 #endif
1011
1012 td->td_sigmask = ucp->uc_sigmask;
1013 SIG_CANTMASK(td->td_sigmask);
1014 signotify(td);
1015 PROC_UNLOCK(p);
1016 return (EJUSTRETURN);
1017 }
1018
1019 /*
1020 * Machine dependent boot() routine
1021 *
1022 * I haven't seen anything to put here yet
1023 * Possibly some stuff might be grafted back here from boot()
1024 */
1025 void
1026 cpu_boot(int howto)
1027 {
1028 }
1029
1030 /* Get current clock frequency for the given cpu id. */
1031 int
1032 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1033 {
1034 register_t reg;
1035 uint64_t tsc1, tsc2;
1036
1037 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1038 return (EINVAL);
1039 if (!tsc_present)
1040 return (EOPNOTSUPP);
1041
1042 /* If we're booting, trust the rate calibrated moments ago. */
1043 if (cold) {
1044 *rate = tsc_freq;
1045 return (0);
1046 }
1047
1048 #ifdef SMP
1049 /* Schedule ourselves on the indicated cpu. */
1050 mtx_lock_spin(&sched_lock);
1051 sched_bind(curthread, cpu_id);
1052 mtx_unlock_spin(&sched_lock);
1053 #endif
1054
1055 /* Calibrate by measuring a short delay. */
1056 reg = intr_disable();
1057 tsc1 = rdtsc();
1058 DELAY(1000);
1059 tsc2 = rdtsc();
1060 intr_restore(reg);
1061
1062 #ifdef SMP
1063 mtx_lock_spin(&sched_lock);
1064 sched_unbind(curthread);
1065 mtx_unlock_spin(&sched_lock);
1066 #endif
1067
1068 /*
1069 * Calculate the difference in readings, convert to Mhz, and
1070 * subtract 0.5% of the total. Empirical testing has shown that
1071 * overhead in DELAY() works out to approximately this value.
1072 */
1073 tsc2 -= tsc1;
1074 *rate = tsc2 * 1000 - tsc2 * 5;
1075 return (0);
1076 }
1077
1078 /*
1079 * Shutdown the CPU as much as possible
1080 */
1081 void
1082 cpu_halt(void)
1083 {
1084 for (;;)
1085 __asm__ ("hlt");
1086 }
1087
1088 /*
1089 * Hook to idle the CPU when possible. In the SMP case we default to
1090 * off because a halted cpu will not currently pick up a new thread in the
1091 * run queue until the next timer tick. If turned on this will result in
1092 * approximately a 4.2% loss in real time performance in buildworld tests
1093 * (but improves user and sys times oddly enough), and saves approximately
1094 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1095 *
1096 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1097 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1098 * Then we can have our cake and eat it too.
1099 *
1100 * XXX I'm turning it on for SMP as well by default for now. It seems to
1101 * help lock contention somewhat, and this is critical for HTT. -Peter
1102 */
1103 static int cpu_idle_hlt = 1;
1104 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1105 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1106
1107 static void
1108 cpu_idle_default(void)
1109 {
1110 /*
1111 * we must absolutely guarentee that hlt is the
1112 * absolute next instruction after sti or we
1113 * introduce a timing window.
1114 */
1115 __asm __volatile("sti; hlt");
1116 }
1117
1118 /*
1119 * Note that we have to be careful here to avoid a race between checking
1120 * sched_runnable() and actually halting. If we don't do this, we may waste
1121 * the time between calling hlt and the next interrupt even though there
1122 * is a runnable process.
1123 */
1124 void
1125 cpu_idle(void)
1126 {
1127
1128 #ifdef SMP
1129 if (mp_grab_cpu_hlt())
1130 return;
1131 #endif
1132
1133 if (cpu_idle_hlt) {
1134 disable_intr();
1135 if (sched_runnable())
1136 enable_intr();
1137 else
1138 (*cpu_idle_hook)();
1139 }
1140 }
1141
1142 /* Other subsystems (e.g., ACPI) can hook this later. */
1143 void (*cpu_idle_hook)(void) = cpu_idle_default;
1144
1145 /*
1146 * Clear registers on exec
1147 */
1148 void
1149 exec_setregs(td, entry, stack, ps_strings)
1150 struct thread *td;
1151 u_long entry;
1152 u_long stack;
1153 u_long ps_strings;
1154 {
1155 struct trapframe *regs = td->td_frame;
1156 struct pcb *pcb = td->td_pcb;
1157
1158 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1159 pcb->pcb_gs = _udatasel;
1160 load_gs(_udatasel);
1161
1162 if (td->td_proc->p_md.md_ldt)
1163 user_ldt_free(td);
1164
1165 bzero((char *)regs, sizeof(struct trapframe));
1166 regs->tf_eip = entry;
1167 regs->tf_esp = stack;
1168 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1169 regs->tf_ss = _udatasel;
1170 regs->tf_ds = _udatasel;
1171 regs->tf_es = _udatasel;
1172 regs->tf_fs = _udatasel;
1173 regs->tf_cs = _ucodesel;
1174
1175 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1176 regs->tf_ebx = ps_strings;
1177
1178 /*
1179 * Reset the hardware debug registers if they were in use.
1180 * They won't have any meaning for the newly exec'd process.
1181 */
1182 if (pcb->pcb_flags & PCB_DBREGS) {
1183 pcb->pcb_dr0 = 0;
1184 pcb->pcb_dr1 = 0;
1185 pcb->pcb_dr2 = 0;
1186 pcb->pcb_dr3 = 0;
1187 pcb->pcb_dr6 = 0;
1188 pcb->pcb_dr7 = 0;
1189 if (pcb == PCPU_GET(curpcb)) {
1190 /*
1191 * Clear the debug registers on the running
1192 * CPU, otherwise they will end up affecting
1193 * the next process we switch to.
1194 */
1195 reset_dbregs();
1196 }
1197 pcb->pcb_flags &= ~PCB_DBREGS;
1198 }
1199
1200 /*
1201 * Initialize the math emulator (if any) for the current process.
1202 * Actually, just clear the bit that says that the emulator has
1203 * been initialized. Initialization is delayed until the process
1204 * traps to the emulator (if it is done at all) mainly because
1205 * emulators don't provide an entry point for initialization.
1206 */
1207 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1208
1209 /*
1210 * Drop the FP state if we hold it, so that the process gets a
1211 * clean FP state if it uses the FPU again.
1212 */
1213 fpstate_drop(td);
1214
1215 /*
1216 * XXX - Linux emulator
1217 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1218 * on it.
1219 */
1220 td->td_retval[1] = 0;
1221 }
1222
1223 void
1224 cpu_setregs(void)
1225 {
1226 unsigned int cr0;
1227
1228 cr0 = rcr0();
1229
1230 /*
1231 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1232 *
1233 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1234 * instructions. We must set the CR0_MP bit and use the CR0_TS
1235 * bit to control the trap, because setting the CR0_EM bit does
1236 * not cause WAIT instructions to trap. It's important to trap
1237 * WAIT instructions - otherwise the "wait" variants of no-wait
1238 * control instructions would degenerate to the "no-wait" variants
1239 * after FP context switches but work correctly otherwise. It's
1240 * particularly important to trap WAITs when there is no NPX -
1241 * otherwise the "wait" variants would always degenerate.
1242 *
1243 * Try setting CR0_NE to get correct error reporting on 486DX's.
1244 * Setting it should fail or do nothing on lesser processors.
1245 */
1246 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1247 load_cr0(cr0);
1248 load_gs(_udatasel);
1249 }
1250
1251 static int
1252 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
1253 {
1254 int error;
1255 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
1256 req);
1257 if (!error && req->newptr)
1258 resettodr();
1259 return (error);
1260 }
1261
1262 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
1263 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
1264
1265 SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
1266 CTLFLAG_RW, &disable_rtc_set, 0, "");
1267
1268 SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo,
1269 CTLFLAG_RD, &bootinfo, bootinfo, "");
1270
1271 SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
1272 CTLFLAG_RW, &wall_cmos_clock, 0, "");
1273
1274 u_long bootdev; /* not a struct cdev *- encoding is different */
1275 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1276 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1277
1278 /*
1279 * Initialize 386 and configure to run kernel
1280 */
1281
1282 /*
1283 * Initialize segments & interrupt table
1284 */
1285
1286 int _default_ldt;
1287 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1288 static struct gate_descriptor idt0[NIDT];
1289 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1290 union descriptor ldt[NLDT]; /* local descriptor table */
1291 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1292
1293 int private_tss; /* flag indicating private tss */
1294
1295 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1296 extern int has_f00f_bug;
1297 #endif
1298
1299 static struct i386tss dblfault_tss;
1300 static char dblfault_stack[PAGE_SIZE];
1301
1302 extern vm_offset_t proc0kstack;
1303
1304
1305 /*
1306 * software prototypes -- in more palatable form.
1307 *
1308 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1309 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1310 */
1311 struct soft_segment_descriptor gdt_segs[] = {
1312 /* GNULL_SEL 0 Null Descriptor */
1313 { 0x0, /* segment base address */
1314 0x0, /* length */
1315 0, /* segment type */
1316 0, /* segment descriptor priority level */
1317 0, /* segment descriptor present */
1318 0, 0,
1319 0, /* default 32 vs 16 bit size */
1320 0 /* limit granularity (byte/page units)*/ },
1321 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1322 { 0x0, /* segment base address */
1323 0xfffff, /* length - all address space */
1324 SDT_MEMRWA, /* segment type */
1325 0, /* segment descriptor priority level */
1326 1, /* segment descriptor present */
1327 0, 0,
1328 1, /* default 32 vs 16 bit size */
1329 1 /* limit granularity (byte/page units)*/ },
1330 /* GUFS_SEL 2 %fs Descriptor for user */
1331 { 0x0, /* segment base address */
1332 0xfffff, /* length - all address space */
1333 SDT_MEMRWA, /* segment type */
1334 SEL_UPL, /* segment descriptor priority level */
1335 1, /* segment descriptor present */
1336 0, 0,
1337 1, /* default 32 vs 16 bit size */
1338 1 /* limit granularity (byte/page units)*/ },
1339 /* GUGS_SEL 3 %gs Descriptor for user */
1340 { 0x0, /* segment base address */
1341 0xfffff, /* length - all address space */
1342 SDT_MEMRWA, /* segment type */
1343 SEL_UPL, /* segment descriptor priority level */
1344 1, /* segment descriptor present */
1345 0, 0,
1346 1, /* default 32 vs 16 bit size */
1347 1 /* limit granularity (byte/page units)*/ },
1348 /* GCODE_SEL 4 Code Descriptor for kernel */
1349 { 0x0, /* segment base address */
1350 0xfffff, /* length - all address space */
1351 SDT_MEMERA, /* segment type */
1352 0, /* segment descriptor priority level */
1353 1, /* segment descriptor present */
1354 0, 0,
1355 1, /* default 32 vs 16 bit size */
1356 1 /* limit granularity (byte/page units)*/ },
1357 /* GDATA_SEL 5 Data Descriptor for kernel */
1358 { 0x0, /* segment base address */
1359 0xfffff, /* length - all address space */
1360 SDT_MEMRWA, /* segment type */
1361 0, /* segment descriptor priority level */
1362 1, /* segment descriptor present */
1363 0, 0,
1364 1, /* default 32 vs 16 bit size */
1365 1 /* limit granularity (byte/page units)*/ },
1366 /* GUCODE_SEL 6 Code Descriptor for user */
1367 { 0x0, /* segment base address */
1368 0xfffff, /* length - all address space */
1369 SDT_MEMERA, /* segment type */
1370 SEL_UPL, /* segment descriptor priority level */
1371 1, /* segment descriptor present */
1372 0, 0,
1373 1, /* default 32 vs 16 bit size */
1374 1 /* limit granularity (byte/page units)*/ },
1375 /* GUDATA_SEL 7 Data Descriptor for user */
1376 { 0x0, /* segment base address */
1377 0xfffff, /* length - all address space */
1378 SDT_MEMRWA, /* segment type */
1379 SEL_UPL, /* segment descriptor priority level */
1380 1, /* segment descriptor present */
1381 0, 0,
1382 1, /* default 32 vs 16 bit size */
1383 1 /* limit granularity (byte/page units)*/ },
1384 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1385 { 0x400, /* segment base address */
1386 0xfffff, /* length */
1387 SDT_MEMRWA, /* segment type */
1388 0, /* segment descriptor priority level */
1389 1, /* segment descriptor present */
1390 0, 0,
1391 1, /* default 32 vs 16 bit size */
1392 1 /* limit granularity (byte/page units)*/ },
1393 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1394 {
1395 0x0, /* segment base address */
1396 sizeof(struct i386tss)-1,/* length */
1397 SDT_SYS386TSS, /* segment type */
1398 0, /* segment descriptor priority level */
1399 1, /* segment descriptor present */
1400 0, 0,
1401 0, /* unused - default 32 vs 16 bit size */
1402 0 /* limit granularity (byte/page units)*/ },
1403 /* GLDT_SEL 10 LDT Descriptor */
1404 { (int) ldt, /* segment base address */
1405 sizeof(ldt)-1, /* length - all address space */
1406 SDT_SYSLDT, /* segment type */
1407 SEL_UPL, /* segment descriptor priority level */
1408 1, /* segment descriptor present */
1409 0, 0,
1410 0, /* unused - default 32 vs 16 bit size */
1411 0 /* limit granularity (byte/page units)*/ },
1412 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1413 { (int) ldt, /* segment base address */
1414 (512 * sizeof(union descriptor)-1), /* length */
1415 SDT_SYSLDT, /* segment type */
1416 0, /* segment descriptor priority level */
1417 1, /* segment descriptor present */
1418 0, 0,
1419 0, /* unused - default 32 vs 16 bit size */
1420 0 /* limit granularity (byte/page units)*/ },
1421 /* GPANIC_SEL 12 Panic Tss Descriptor */
1422 { (int) &dblfault_tss, /* segment base address */
1423 sizeof(struct i386tss)-1,/* length - all address space */
1424 SDT_SYS386TSS, /* segment type */
1425 0, /* segment descriptor priority level */
1426 1, /* segment descriptor present */
1427 0, 0,
1428 0, /* unused - default 32 vs 16 bit size */
1429 0 /* limit granularity (byte/page units)*/ },
1430 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1431 { 0, /* segment base address (overwritten) */
1432 0xfffff, /* length */
1433 SDT_MEMERA, /* segment type */
1434 0, /* segment descriptor priority level */
1435 1, /* segment descriptor present */
1436 0, 0,
1437 0, /* default 32 vs 16 bit size */
1438 1 /* limit granularity (byte/page units)*/ },
1439 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1440 { 0, /* segment base address (overwritten) */
1441 0xfffff, /* length */
1442 SDT_MEMERA, /* segment type */
1443 0, /* segment descriptor priority level */
1444 1, /* segment descriptor present */
1445 0, 0,
1446 0, /* default 32 vs 16 bit size */
1447 1 /* limit granularity (byte/page units)*/ },
1448 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1449 { 0, /* segment base address (overwritten) */
1450 0xfffff, /* length */
1451 SDT_MEMRWA, /* segment type */
1452 0, /* segment descriptor priority level */
1453 1, /* segment descriptor present */
1454 0, 0,
1455 1, /* default 32 vs 16 bit size */
1456 1 /* limit granularity (byte/page units)*/ },
1457 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1458 { 0, /* segment base address (overwritten) */
1459 0xfffff, /* length */
1460 SDT_MEMRWA, /* segment type */
1461 0, /* segment descriptor priority level */
1462 1, /* segment descriptor present */
1463 0, 0,
1464 0, /* default 32 vs 16 bit size */
1465 1 /* limit granularity (byte/page units)*/ },
1466 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1467 { 0, /* segment base address (overwritten) */
1468 0xfffff, /* length */
1469 SDT_MEMRWA, /* segment type */
1470 0, /* segment descriptor priority level */
1471 1, /* segment descriptor present */
1472 0, 0,
1473 0, /* default 32 vs 16 bit size */
1474 1 /* limit granularity (byte/page units)*/ },
1475 /* GNDIS_SEL 18 NDIS Descriptor */
1476 { 0x0, /* segment base address */
1477 0x0, /* length */
1478 0, /* segment type */
1479 0, /* segment descriptor priority level */
1480 0, /* segment descriptor present */
1481 0, 0,
1482 0, /* default 32 vs 16 bit size */
1483 0 /* limit granularity (byte/page units)*/ },
1484 };
1485
1486 static struct soft_segment_descriptor ldt_segs[] = {
1487 /* Null Descriptor - overwritten by call gate */
1488 { 0x0, /* segment base address */
1489 0x0, /* length - all address space */
1490 0, /* segment type */
1491 0, /* segment descriptor priority level */
1492 0, /* segment descriptor present */
1493 0, 0,
1494 0, /* default 32 vs 16 bit size */
1495 0 /* limit granularity (byte/page units)*/ },
1496 /* Null Descriptor - overwritten by call gate */
1497 { 0x0, /* segment base address */
1498 0x0, /* length - all address space */
1499 0, /* segment type */
1500 0, /* segment descriptor priority level */
1501 0, /* segment descriptor present */
1502 0, 0,
1503 0, /* default 32 vs 16 bit size */
1504 0 /* limit granularity (byte/page units)*/ },
1505 /* Null Descriptor - overwritten by call gate */
1506 { 0x0, /* segment base address */
1507 0x0, /* length - all address space */
1508 0, /* segment type */
1509 0, /* segment descriptor priority level */
1510 0, /* segment descriptor present */
1511 0, 0,
1512 0, /* default 32 vs 16 bit size */
1513 0 /* limit granularity (byte/page units)*/ },
1514 /* Code Descriptor for user */
1515 { 0x0, /* segment base address */
1516 0xfffff, /* length - all address space */
1517 SDT_MEMERA, /* segment type */
1518 SEL_UPL, /* segment descriptor priority level */
1519 1, /* segment descriptor present */
1520 0, 0,
1521 1, /* default 32 vs 16 bit size */
1522 1 /* limit granularity (byte/page units)*/ },
1523 /* Null Descriptor - overwritten by call gate */
1524 { 0x0, /* segment base address */
1525 0x0, /* length - all address space */
1526 0, /* segment type */
1527 0, /* segment descriptor priority level */
1528 0, /* segment descriptor present */
1529 0, 0,
1530 0, /* default 32 vs 16 bit size */
1531 0 /* limit granularity (byte/page units)*/ },
1532 /* Data Descriptor for user */
1533 { 0x0, /* segment base address */
1534 0xfffff, /* length - all address space */
1535 SDT_MEMRWA, /* segment type */
1536 SEL_UPL, /* segment descriptor priority level */
1537 1, /* segment descriptor present */
1538 0, 0,
1539 1, /* default 32 vs 16 bit size */
1540 1 /* limit granularity (byte/page units)*/ },
1541 };
1542
1543 void
1544 setidt(idx, func, typ, dpl, selec)
1545 int idx;
1546 inthand_t *func;
1547 int typ;
1548 int dpl;
1549 int selec;
1550 {
1551 struct gate_descriptor *ip;
1552
1553 ip = idt + idx;
1554 ip->gd_looffset = (int)func;
1555 ip->gd_selector = selec;
1556 ip->gd_stkcpy = 0;
1557 ip->gd_xx = 0;
1558 ip->gd_type = typ;
1559 ip->gd_dpl = dpl;
1560 ip->gd_p = 1;
1561 ip->gd_hioffset = ((int)func)>>16 ;
1562 }
1563
1564 #define IDTVEC(name) __CONCAT(X,name)
1565
1566 extern inthand_t
1567 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1568 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1569 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1570 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1571 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1572
1573 #ifdef DDB
1574 /*
1575 * Display the index and function name of any IDT entries that don't use
1576 * the default 'rsvd' entry point.
1577 */
1578 DB_SHOW_COMMAND(idt, db_show_idt)
1579 {
1580 struct gate_descriptor *ip;
1581 int idx, quit;
1582 uintptr_t func;
1583
1584 ip = idt;
1585 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
1586 for (idx = 0, quit = 0; idx < NIDT; idx++) {
1587 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1588 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1589 db_printf("%3d\t", idx);
1590 db_printsym(func, DB_STGY_PROC);
1591 db_printf("\n");
1592 }
1593 ip++;
1594 }
1595 }
1596 #endif
1597
1598 void
1599 sdtossd(sd, ssd)
1600 struct segment_descriptor *sd;
1601 struct soft_segment_descriptor *ssd;
1602 {
1603 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1604 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1605 ssd->ssd_type = sd->sd_type;
1606 ssd->ssd_dpl = sd->sd_dpl;
1607 ssd->ssd_p = sd->sd_p;
1608 ssd->ssd_def32 = sd->sd_def32;
1609 ssd->ssd_gran = sd->sd_gran;
1610 }
1611
1612 /*
1613 * Populate the (physmap) array with base/bound pairs describing the
1614 * available physical memory in the system, then test this memory and
1615 * build the phys_avail array describing the actually-available memory.
1616 *
1617 * If we cannot accurately determine the physical memory map, then use
1618 * value from the 0xE801 call, and failing that, the RTC.
1619 *
1620 * Total memory size may be set by the kernel environment variable
1621 * hw.physmem or the compile-time define MAXMEM.
1622 *
1623 * XXX first should be vm_paddr_t.
1624 */
1625 static void
1626 getmemsize(int first)
1627 {
1628 int i, physmap_idx, pa_indx, da_indx;
1629 int pg_n;
1630 u_long physmem_tunable;
1631 u_int extmem, under16;
1632 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1633 pt_entry_t *pte;
1634 quad_t dcons_addr, dcons_size;
1635
1636 bzero(physmap, sizeof(physmap));
1637
1638 /* XXX - some of EPSON machines can't use PG_N */
1639 pg_n = PG_N;
1640 if (pc98_machine_type & M_EPSON_PC98) {
1641 switch (epson_machine_id) {
1642 #ifdef WB_CACHE
1643 default:
1644 #endif
1645 case EPSON_PC486_HX:
1646 case EPSON_PC486_HG:
1647 case EPSON_PC486_HA:
1648 pg_n = 0;
1649 break;
1650 }
1651 }
1652
1653 /*
1654 * Perform "base memory" related probes & setup
1655 */
1656 under16 = pc98_getmemsize(&basemem, &extmem);
1657 if (basemem > 640) {
1658 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1659 basemem);
1660 basemem = 640;
1661 }
1662
1663 /*
1664 * XXX if biosbasemem is now < 640, there is a `hole'
1665 * between the end of base memory and the start of
1666 * ISA memory. The hole may be empty or it may
1667 * contain BIOS code or data. Map it read/write so
1668 * that the BIOS can write to it. (Memory from 0 to
1669 * the physical end of the kernel is mapped read-only
1670 * to begin with and then parts of it are remapped.
1671 * The parts that aren't remapped form holes that
1672 * remain read-only and are unused by the kernel.
1673 * The base memory area is below the physical end of
1674 * the kernel and right now forms a read-only hole.
1675 * The part of it from PAGE_SIZE to
1676 * (trunc_page(biosbasemem * 1024) - 1) will be
1677 * remapped and used by the kernel later.)
1678 *
1679 * This code is similar to the code used in
1680 * pmap_mapdev, but since no memory needs to be
1681 * allocated we simply change the mapping.
1682 */
1683 for (pa = trunc_page(basemem * 1024);
1684 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1685 pmap_kenter(KERNBASE + pa, pa);
1686
1687 /*
1688 * if basemem != 640, map pages r/w into vm86 page table so
1689 * that the bios can scribble on it.
1690 */
1691 pte = (pt_entry_t *)vm86paddr;
1692 for (i = basemem / 4; i < 160; i++)
1693 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1694
1695 physmap[0] = 0;
1696 physmap[1] = basemem * 1024;
1697 physmap_idx = 2;
1698 physmap[physmap_idx] = 0x100000;
1699 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1700
1701 /*
1702 * Now, physmap contains a map of physical memory.
1703 */
1704
1705 #ifdef SMP
1706 /* make hole for AP bootstrap code */
1707 physmap[1] = mp_bootaddress(physmap[1]);
1708 #endif
1709
1710 /*
1711 * Maxmem isn't the "maximum memory", it's one larger than the
1712 * highest page of the physical address space. It should be
1713 * called something like "Maxphyspage". We may adjust this
1714 * based on ``hw.physmem'' and the results of the memory test.
1715 */
1716 Maxmem = atop(physmap[physmap_idx + 1]);
1717
1718 #ifdef MAXMEM
1719 Maxmem = MAXMEM / 4;
1720 #endif
1721
1722 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1723 Maxmem = atop(physmem_tunable);
1724
1725 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1726 (boothowto & RB_VERBOSE))
1727 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1728
1729 /*
1730 * If Maxmem has been increased beyond what the system has detected,
1731 * extend the last memory segment to the new limit.
1732 */
1733 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1734 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1735
1736 /*
1737 * We need to divide chunk if Maxmem is larger than 16MB and
1738 * under 16MB area is not full of memory.
1739 * (1) system area (15-16MB region) is cut off
1740 * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY")
1741 */
1742 if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) {
1743 /* 15M - 16M region is cut off, so need to divide chunk */
1744 physmap[physmap_idx + 1] = under16 * 1024;
1745 physmap_idx += 2;
1746 physmap[physmap_idx] = 0x1000000;
1747 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024;
1748 }
1749
1750 /* call pmap initialization to make new kernel address space */
1751 pmap_bootstrap(first, 0);
1752
1753 /*
1754 * Size up each available chunk of physical memory.
1755 */
1756 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1757 pa_indx = 0;
1758 da_indx = 1;
1759 phys_avail[pa_indx++] = physmap[0];
1760 phys_avail[pa_indx] = physmap[0];
1761 dump_avail[da_indx] = physmap[0];
1762 pte = CMAP1;
1763
1764 /*
1765 * Get dcons buffer address
1766 */
1767 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1768 getenv_quad("dcons.size", &dcons_size) == 0)
1769 dcons_addr = 0;
1770
1771 /*
1772 * physmap is in bytes, so when converting to page boundaries,
1773 * round up the start address and round down the end address.
1774 */
1775 for (i = 0; i <= physmap_idx; i += 2) {
1776 vm_paddr_t end;
1777
1778 end = ptoa((vm_paddr_t)Maxmem);
1779 if (physmap[i + 1] < end)
1780 end = trunc_page(physmap[i + 1]);
1781 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1782 int tmp, page_bad, full;
1783 int *ptr = (int *)CADDR1;
1784
1785 full = FALSE;
1786 /*
1787 * block out kernel memory as not available.
1788 */
1789 if (pa >= KERNLOAD && pa < first)
1790 goto do_dump_avail;
1791
1792 /*
1793 * block out dcons buffer
1794 */
1795 if (dcons_addr > 0
1796 && pa >= trunc_page(dcons_addr)
1797 && pa < dcons_addr + dcons_size)
1798 goto do_dump_avail;
1799
1800 page_bad = FALSE;
1801
1802 /*
1803 * map page into kernel: valid, read/write,non-cacheable
1804 */
1805 *pte = pa | PG_V | PG_RW | pg_n;
1806 invltlb();
1807
1808 tmp = *(int *)ptr;
1809 /*
1810 * Test for alternating 1's and 0's
1811 */
1812 *(volatile int *)ptr = 0xaaaaaaaa;
1813 if (*(volatile int *)ptr != 0xaaaaaaaa)
1814 page_bad = TRUE;
1815 /*
1816 * Test for alternating 0's and 1's
1817 */
1818 *(volatile int *)ptr = 0x55555555;
1819 if (*(volatile int *)ptr != 0x55555555)
1820 page_bad = TRUE;
1821 /*
1822 * Test for all 1's
1823 */
1824 *(volatile int *)ptr = 0xffffffff;
1825 if (*(volatile int *)ptr != 0xffffffff)
1826 page_bad = TRUE;
1827 /*
1828 * Test for all 0's
1829 */
1830 *(volatile int *)ptr = 0x0;
1831 if (*(volatile int *)ptr != 0x0)
1832 page_bad = TRUE;
1833 /*
1834 * Restore original value.
1835 */
1836 *(int *)ptr = tmp;
1837
1838 /*
1839 * Adjust array of valid/good pages.
1840 */
1841 if (page_bad == TRUE)
1842 continue;
1843 /*
1844 * If this good page is a continuation of the
1845 * previous set of good pages, then just increase
1846 * the end pointer. Otherwise start a new chunk.
1847 * Note that "end" points one higher than end,
1848 * making the range >= start and < end.
1849 * If we're also doing a speculative memory
1850 * test and we at or past the end, bump up Maxmem
1851 * so that we keep going. The first bad page
1852 * will terminate the loop.
1853 */
1854 if (phys_avail[pa_indx] == pa) {
1855 phys_avail[pa_indx] += PAGE_SIZE;
1856 } else {
1857 pa_indx++;
1858 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1859 printf(
1860 "Too many holes in the physical address space, giving up\n");
1861 pa_indx--;
1862 full = TRUE;
1863 goto do_dump_avail;
1864 }
1865 phys_avail[pa_indx++] = pa; /* start */
1866 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1867 }
1868 physmem++;
1869 do_dump_avail:
1870 if (dump_avail[da_indx] == pa) {
1871 dump_avail[da_indx] += PAGE_SIZE;
1872 } else {
1873 da_indx++;
1874 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1875 da_indx--;
1876 goto do_next;
1877 }
1878 dump_avail[da_indx++] = pa; /* start */
1879 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1880 }
1881 do_next:
1882 if (full)
1883 break;
1884 }
1885 }
1886 *pte = 0;
1887 invltlb();
1888
1889 /*
1890 * XXX
1891 * The last chunk must contain at least one page plus the message
1892 * buffer to avoid complicating other code (message buffer address
1893 * calculation, etc.).
1894 */
1895 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1896 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1897 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1898 phys_avail[pa_indx--] = 0;
1899 phys_avail[pa_indx--] = 0;
1900 }
1901
1902 Maxmem = atop(phys_avail[pa_indx]);
1903
1904 /* Trim off space for the message buffer. */
1905 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1906
1907 avail_end = phys_avail[pa_indx];
1908 }
1909
1910 void
1911 init386(first)
1912 int first;
1913 {
1914 struct gate_descriptor *gdp;
1915 int gsel_tss, metadata_missing, off, x;
1916 struct pcpu *pc;
1917
1918 thread0.td_kstack = proc0kstack;
1919 thread0.td_pcb = (struct pcb *)
1920 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
1921
1922 /*
1923 * This may be done better later if it gets more high level
1924 * components in it. If so just link td->td_proc here.
1925 */
1926 proc_linkup(&proc0, &ksegrp0, &thread0);
1927
1928 /*
1929 * Initialize DMAC
1930 */
1931 pc98_init_dmac();
1932
1933 metadata_missing = 0;
1934 if (bootinfo.bi_modulep) {
1935 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
1936 preload_bootstrap_relocate(KERNBASE);
1937 } else {
1938 metadata_missing = 1;
1939 }
1940 if (envmode == 1)
1941 kern_envp = static_env;
1942 else if (bootinfo.bi_envp)
1943 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
1944
1945 /* Init basic tunables, hz etc */
1946 init_param1();
1947
1948 /*
1949 * Make gdt memory segments. All segments cover the full 4GB
1950 * of address space and permissions are enforced at page level.
1951 */
1952 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
1953 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
1954 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
1955 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
1956 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
1957 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
1958
1959 pc = &__pcpu[0];
1960 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
1961 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
1962 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
1963
1964 for (x = 0; x < NGDT; x++)
1965 ssdtosd(&gdt_segs[x], &gdt[x].sd);
1966
1967 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1968 r_gdt.rd_base = (int) gdt;
1969 lgdt(&r_gdt);
1970
1971 pcpu_init(pc, 0, sizeof(struct pcpu));
1972 PCPU_SET(prvspace, pc);
1973 PCPU_SET(curthread, &thread0);
1974 PCPU_SET(curpcb, thread0.td_pcb);
1975
1976 /*
1977 * Initialize mutexes.
1978 *
1979 * icu_lock: in order to allow an interrupt to occur in a critical
1980 * section, to set pcpu->ipending (etc...) properly, we
1981 * must be able to get the icu lock, so it can't be
1982 * under witness.
1983 */
1984 mutex_init();
1985 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN);
1986 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1987
1988 /* make ldt memory segments */
1989 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
1990 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
1991 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
1992 ssdtosd(&ldt_segs[x], &ldt[x].sd);
1993
1994 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
1995 lldt(_default_ldt);
1996 PCPU_SET(currentldt, _default_ldt);
1997
1998 /* exceptions */
1999 for (x = 0; x < NIDT; x++)
2000 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2001 GSEL(GCODE_SEL, SEL_KPL));
2002 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2003 GSEL(GCODE_SEL, SEL_KPL));
2004 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2005 GSEL(GCODE_SEL, SEL_KPL));
2006 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2007 GSEL(GCODE_SEL, SEL_KPL));
2008 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2009 GSEL(GCODE_SEL, SEL_KPL));
2010 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2011 GSEL(GCODE_SEL, SEL_KPL));
2012 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2013 GSEL(GCODE_SEL, SEL_KPL));
2014 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2015 GSEL(GCODE_SEL, SEL_KPL));
2016 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2017 , GSEL(GCODE_SEL, SEL_KPL));
2018 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2019 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2020 GSEL(GCODE_SEL, SEL_KPL));
2021 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2022 GSEL(GCODE_SEL, SEL_KPL));
2023 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2024 GSEL(GCODE_SEL, SEL_KPL));
2025 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2026 GSEL(GCODE_SEL, SEL_KPL));
2027 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2028 GSEL(GCODE_SEL, SEL_KPL));
2029 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2030 GSEL(GCODE_SEL, SEL_KPL));
2031 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2032 GSEL(GCODE_SEL, SEL_KPL));
2033 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2034 GSEL(GCODE_SEL, SEL_KPL));
2035 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2036 GSEL(GCODE_SEL, SEL_KPL));
2037 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2038 GSEL(GCODE_SEL, SEL_KPL));
2039 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2040 GSEL(GCODE_SEL, SEL_KPL));
2041
2042 r_idt.rd_limit = sizeof(idt0) - 1;
2043 r_idt.rd_base = (int) idt;
2044 lidt(&r_idt);
2045
2046 /*
2047 * Initialize the console before we print anything out.
2048 */
2049 cninit();
2050
2051 if (metadata_missing)
2052 printf("WARNING: loader(8) metadata is missing!\n");
2053
2054 #ifdef DEV_ISA
2055 atpic_startup();
2056 #endif
2057
2058 #ifdef DDB
2059 ksym_start = bootinfo.bi_symtab;
2060 ksym_end = bootinfo.bi_esymtab;
2061 #endif
2062
2063 kdb_init();
2064
2065 #ifdef KDB
2066 if (boothowto & RB_KDB)
2067 kdb_enter("Boot flags requested debugger");
2068 #endif
2069
2070 finishidentcpu(); /* Final stage of CPU initialization */
2071 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2072 GSEL(GCODE_SEL, SEL_KPL));
2073 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2074 GSEL(GCODE_SEL, SEL_KPL));
2075 initializecpu(); /* Initialize CPU registers */
2076
2077 /* make an initial tss so cpu can get interrupt stack on syscall! */
2078 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2079 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2080 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2081 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2082 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2083 private_tss = 0;
2084 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2085 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2086 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2087 ltr(gsel_tss);
2088
2089 /* pointer to selector slot for %fs/%gs */
2090 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2091
2092 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2093 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2094 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2095 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2096 dblfault_tss.tss_cr3 = (int)IdlePTD;
2097 dblfault_tss.tss_eip = (int)dblfault_handler;
2098 dblfault_tss.tss_eflags = PSL_KERNEL;
2099 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2100 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2101 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2102 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2103 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2104
2105 vm86_initialize();
2106 getmemsize(first);
2107 init_param2(physmem);
2108
2109 /* now running on new page tables, configured,and u/iom is accessible */
2110
2111 /* Map the message buffer. */
2112 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2113 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
2114
2115 msgbufinit(msgbufp, MSGBUF_SIZE);
2116
2117 /* make a call gate to reenter kernel with */
2118 gdp = &ldt[LSYS5CALLS_SEL].gd;
2119
2120 x = (int) &IDTVEC(lcall_syscall);
2121 gdp->gd_looffset = x;
2122 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2123 gdp->gd_stkcpy = 1;
2124 gdp->gd_type = SDT_SYS386CGT;
2125 gdp->gd_dpl = SEL_UPL;
2126 gdp->gd_p = 1;
2127 gdp->gd_hioffset = x >> 16;
2128
2129 /* XXX does this work? */
2130 /* XXX yes! */
2131 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2132 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2133
2134 /* transfer to user mode */
2135
2136 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2137 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2138
2139 /* setup proc 0's pcb */
2140 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
2141 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2142 thread0.td_pcb->pcb_ext = 0;
2143 thread0.td_frame = &proc0_tf;
2144 }
2145
2146 void
2147 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2148 {
2149
2150 }
2151
2152 void
2153 spinlock_enter(void)
2154 {
2155 struct thread *td;
2156
2157 td = curthread;
2158 if (td->td_md.md_spinlock_count == 0)
2159 td->td_md.md_saved_flags = intr_disable();
2160 td->td_md.md_spinlock_count++;
2161 critical_enter();
2162 }
2163
2164 void
2165 spinlock_exit(void)
2166 {
2167 struct thread *td;
2168
2169 td = curthread;
2170 critical_exit();
2171 td->td_md.md_spinlock_count--;
2172 if (td->td_md.md_spinlock_count == 0)
2173 intr_restore(td->td_md.md_saved_flags);
2174 }
2175
2176 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2177 static void f00f_hack(void *unused);
2178 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL)
2179
2180 static void
2181 f00f_hack(void *unused)
2182 {
2183 struct gate_descriptor *new_idt;
2184 vm_offset_t tmp;
2185
2186 if (!has_f00f_bug)
2187 return;
2188
2189 GIANT_REQUIRED;
2190
2191 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2192
2193 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2194 if (tmp == 0)
2195 panic("kmem_alloc returned 0");
2196
2197 /* Put the problematic entry (#6) at the end of the lower page. */
2198 new_idt = (struct gate_descriptor*)
2199 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2200 bcopy(idt, new_idt, sizeof(idt0));
2201 r_idt.rd_base = (u_int)new_idt;
2202 lidt(&r_idt);
2203 idt = new_idt;
2204 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2205 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2206 panic("vm_map_protect failed");
2207 }
2208 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2209
2210 /*
2211 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2212 * we want to start a backtrace from the function that caused us to enter
2213 * the debugger. We have the context in the trapframe, but base the trace
2214 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2215 * enough for a backtrace.
2216 */
2217 void
2218 makectx(struct trapframe *tf, struct pcb *pcb)
2219 {
2220
2221 pcb->pcb_edi = tf->tf_edi;
2222 pcb->pcb_esi = tf->tf_esi;
2223 pcb->pcb_ebp = tf->tf_ebp;
2224 pcb->pcb_ebx = tf->tf_ebx;
2225 pcb->pcb_eip = tf->tf_eip;
2226 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2227 }
2228
2229 int
2230 ptrace_set_pc(struct thread *td, u_long addr)
2231 {
2232
2233 td->td_frame->tf_eip = addr;
2234 return (0);
2235 }
2236
2237 int
2238 ptrace_single_step(struct thread *td)
2239 {
2240 td->td_frame->tf_eflags |= PSL_T;
2241 return (0);
2242 }
2243
2244 int
2245 ptrace_clear_single_step(struct thread *td)
2246 {
2247 td->td_frame->tf_eflags &= ~PSL_T;
2248 return (0);
2249 }
2250
2251 int
2252 fill_regs(struct thread *td, struct reg *regs)
2253 {
2254 struct pcb *pcb;
2255 struct trapframe *tp;
2256
2257 tp = td->td_frame;
2258 pcb = td->td_pcb;
2259 regs->r_fs = tp->tf_fs;
2260 regs->r_es = tp->tf_es;
2261 regs->r_ds = tp->tf_ds;
2262 regs->r_edi = tp->tf_edi;
2263 regs->r_esi = tp->tf_esi;
2264 regs->r_ebp = tp->tf_ebp;
2265 regs->r_ebx = tp->tf_ebx;
2266 regs->r_edx = tp->tf_edx;
2267 regs->r_ecx = tp->tf_ecx;
2268 regs->r_eax = tp->tf_eax;
2269 regs->r_eip = tp->tf_eip;
2270 regs->r_cs = tp->tf_cs;
2271 regs->r_eflags = tp->tf_eflags;
2272 regs->r_esp = tp->tf_esp;
2273 regs->r_ss = tp->tf_ss;
2274 regs->r_gs = pcb->pcb_gs;
2275 return (0);
2276 }
2277
2278 int
2279 set_regs(struct thread *td, struct reg *regs)
2280 {
2281 struct pcb *pcb;
2282 struct trapframe *tp;
2283
2284 tp = td->td_frame;
2285 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2286 !CS_SECURE(regs->r_cs))
2287 return (EINVAL);
2288 pcb = td->td_pcb;
2289 tp->tf_fs = regs->r_fs;
2290 tp->tf_es = regs->r_es;
2291 tp->tf_ds = regs->r_ds;
2292 tp->tf_edi = regs->r_edi;
2293 tp->tf_esi = regs->r_esi;
2294 tp->tf_ebp = regs->r_ebp;
2295 tp->tf_ebx = regs->r_ebx;
2296 tp->tf_edx = regs->r_edx;
2297 tp->tf_ecx = regs->r_ecx;
2298 tp->tf_eax = regs->r_eax;
2299 tp->tf_eip = regs->r_eip;
2300 tp->tf_cs = regs->r_cs;
2301 tp->tf_eflags = regs->r_eflags;
2302 tp->tf_esp = regs->r_esp;
2303 tp->tf_ss = regs->r_ss;
2304 pcb->pcb_gs = regs->r_gs;
2305 return (0);
2306 }
2307
2308 #ifdef CPU_ENABLE_SSE
2309 static void
2310 fill_fpregs_xmm(sv_xmm, sv_87)
2311 struct savexmm *sv_xmm;
2312 struct save87 *sv_87;
2313 {
2314 register struct env87 *penv_87 = &sv_87->sv_env;
2315 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2316 int i;
2317
2318 bzero(sv_87, sizeof(*sv_87));
2319
2320 /* FPU control/status */
2321 penv_87->en_cw = penv_xmm->en_cw;
2322 penv_87->en_sw = penv_xmm->en_sw;
2323 penv_87->en_tw = penv_xmm->en_tw;
2324 penv_87->en_fip = penv_xmm->en_fip;
2325 penv_87->en_fcs = penv_xmm->en_fcs;
2326 penv_87->en_opcode = penv_xmm->en_opcode;
2327 penv_87->en_foo = penv_xmm->en_foo;
2328 penv_87->en_fos = penv_xmm->en_fos;
2329
2330 /* FPU registers */
2331 for (i = 0; i < 8; ++i)
2332 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2333 }
2334
2335 static void
2336 set_fpregs_xmm(sv_87, sv_xmm)
2337 struct save87 *sv_87;
2338 struct savexmm *sv_xmm;
2339 {
2340 register struct env87 *penv_87 = &sv_87->sv_env;
2341 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2342 int i;
2343
2344 /* FPU control/status */
2345 penv_xmm->en_cw = penv_87->en_cw;
2346 penv_xmm->en_sw = penv_87->en_sw;
2347 penv_xmm->en_tw = penv_87->en_tw;
2348 penv_xmm->en_fip = penv_87->en_fip;
2349 penv_xmm->en_fcs = penv_87->en_fcs;
2350 penv_xmm->en_opcode = penv_87->en_opcode;
2351 penv_xmm->en_foo = penv_87->en_foo;
2352 penv_xmm->en_fos = penv_87->en_fos;
2353
2354 /* FPU registers */
2355 for (i = 0; i < 8; ++i)
2356 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2357 }
2358 #endif /* CPU_ENABLE_SSE */
2359
2360 int
2361 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2362 {
2363 #ifdef CPU_ENABLE_SSE
2364 if (cpu_fxsr) {
2365 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2366 (struct save87 *)fpregs);
2367 return (0);
2368 }
2369 #endif /* CPU_ENABLE_SSE */
2370 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2371 return (0);
2372 }
2373
2374 int
2375 set_fpregs(struct thread *td, struct fpreg *fpregs)
2376 {
2377 #ifdef CPU_ENABLE_SSE
2378 if (cpu_fxsr) {
2379 set_fpregs_xmm((struct save87 *)fpregs,
2380 &td->td_pcb->pcb_save.sv_xmm);
2381 return (0);
2382 }
2383 #endif /* CPU_ENABLE_SSE */
2384 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2385 return (0);
2386 }
2387
2388 /*
2389 * Get machine context.
2390 */
2391 int
2392 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2393 {
2394 struct trapframe *tp;
2395
2396 tp = td->td_frame;
2397
2398 PROC_LOCK(curthread->td_proc);
2399 mcp->mc_onstack = sigonstack(tp->tf_esp);
2400 PROC_UNLOCK(curthread->td_proc);
2401 mcp->mc_gs = td->td_pcb->pcb_gs;
2402 mcp->mc_fs = tp->tf_fs;
2403 mcp->mc_es = tp->tf_es;
2404 mcp->mc_ds = tp->tf_ds;
2405 mcp->mc_edi = tp->tf_edi;
2406 mcp->mc_esi = tp->tf_esi;
2407 mcp->mc_ebp = tp->tf_ebp;
2408 mcp->mc_isp = tp->tf_isp;
2409 mcp->mc_eflags = tp->tf_eflags;
2410 if (flags & GET_MC_CLEAR_RET) {
2411 mcp->mc_eax = 0;
2412 mcp->mc_edx = 0;
2413 mcp->mc_eflags &= ~PSL_C;
2414 } else {
2415 mcp->mc_eax = tp->tf_eax;
2416 mcp->mc_edx = tp->tf_edx;
2417 }
2418 mcp->mc_ebx = tp->tf_ebx;
2419 mcp->mc_ecx = tp->tf_ecx;
2420 mcp->mc_eip = tp->tf_eip;
2421 mcp->mc_cs = tp->tf_cs;
2422 mcp->mc_esp = tp->tf_esp;
2423 mcp->mc_ss = tp->tf_ss;
2424 mcp->mc_len = sizeof(*mcp);
2425 get_fpcontext(td, mcp);
2426 return (0);
2427 }
2428
2429 /*
2430 * Set machine context.
2431 *
2432 * However, we don't set any but the user modifiable flags, and we won't
2433 * touch the cs selector.
2434 */
2435 int
2436 set_mcontext(struct thread *td, const mcontext_t *mcp)
2437 {
2438 struct trapframe *tp;
2439 int eflags, ret;
2440
2441 tp = td->td_frame;
2442 if (mcp->mc_len != sizeof(*mcp))
2443 return (EINVAL);
2444 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2445 (tp->tf_eflags & ~PSL_USERCHANGE);
2446 if ((ret = set_fpcontext(td, mcp)) == 0) {
2447 tp->tf_fs = mcp->mc_fs;
2448 tp->tf_es = mcp->mc_es;
2449 tp->tf_ds = mcp->mc_ds;
2450 tp->tf_edi = mcp->mc_edi;
2451 tp->tf_esi = mcp->mc_esi;
2452 tp->tf_ebp = mcp->mc_ebp;
2453 tp->tf_ebx = mcp->mc_ebx;
2454 tp->tf_edx = mcp->mc_edx;
2455 tp->tf_ecx = mcp->mc_ecx;
2456 tp->tf_eax = mcp->mc_eax;
2457 tp->tf_eip = mcp->mc_eip;
2458 tp->tf_eflags = eflags;
2459 tp->tf_esp = mcp->mc_esp;
2460 tp->tf_ss = mcp->mc_ss;
2461 td->td_pcb->pcb_gs = mcp->mc_gs;
2462 ret = 0;
2463 }
2464 return (ret);
2465 }
2466
2467 static void
2468 get_fpcontext(struct thread *td, mcontext_t *mcp)
2469 {
2470 #ifndef DEV_NPX
2471 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2472 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2473 #else
2474 union savefpu *addr;
2475
2476 /*
2477 * XXX mc_fpstate might be misaligned, since its declaration is not
2478 * unportabilized using __attribute__((aligned(16))) like the
2479 * declaration of struct savemm, and anyway, alignment doesn't work
2480 * for auto variables since we don't use gcc's pessimal stack
2481 * alignment. Work around this by abusing the spare fields after
2482 * mcp->mc_fpstate.
2483 *
2484 * XXX unpessimize most cases by only aligning when fxsave might be
2485 * called, although this requires knowing too much about
2486 * npxgetregs()'s internals.
2487 */
2488 addr = (union savefpu *)&mcp->mc_fpstate;
2489 if (td == PCPU_GET(fpcurthread) &&
2490 #ifdef CPU_ENABLE_SSE
2491 cpu_fxsr &&
2492 #endif
2493 ((uintptr_t)(void *)addr & 0xF)) {
2494 do
2495 addr = (void *)((char *)addr + 4);
2496 while ((uintptr_t)(void *)addr & 0xF);
2497 }
2498 mcp->mc_ownedfp = npxgetregs(td, addr);
2499 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2500 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2501 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2502 }
2503 mcp->mc_fpformat = npxformat();
2504 #endif
2505 }
2506
2507 static int
2508 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2509 {
2510 union savefpu *addr;
2511
2512 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2513 return (0);
2514 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2515 mcp->mc_fpformat != _MC_FPFMT_XMM)
2516 return (EINVAL);
2517 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2518 /* We don't care what state is left in the FPU or PCB. */
2519 fpstate_drop(td);
2520 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2521 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2522 /* XXX align as above. */
2523 addr = (union savefpu *)&mcp->mc_fpstate;
2524 if (td == PCPU_GET(fpcurthread) &&
2525 #ifdef CPU_ENABLE_SSE
2526 cpu_fxsr &&
2527 #endif
2528 ((uintptr_t)(void *)addr & 0xF)) {
2529 do
2530 addr = (void *)((char *)addr + 4);
2531 while ((uintptr_t)(void *)addr & 0xF);
2532 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2533 }
2534 #ifdef DEV_NPX
2535 /*
2536 * XXX we violate the dubious requirement that npxsetregs()
2537 * be called with interrupts disabled.
2538 */
2539 npxsetregs(td, addr);
2540 #endif
2541 /*
2542 * Don't bother putting things back where they were in the
2543 * misaligned case, since we know that the caller won't use
2544 * them again.
2545 */
2546 } else
2547 return (EINVAL);
2548 return (0);
2549 }
2550
2551 static void
2552 fpstate_drop(struct thread *td)
2553 {
2554 register_t s;
2555
2556 s = intr_disable();
2557 #ifdef DEV_NPX
2558 if (PCPU_GET(fpcurthread) == td)
2559 npxdrop();
2560 #endif
2561 /*
2562 * XXX force a full drop of the npx. The above only drops it if we
2563 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2564 *
2565 * XXX I don't much like npxgetregs()'s semantics of doing a full
2566 * drop. Dropping only to the pcb matches fnsave's behaviour.
2567 * We only need to drop to !PCB_INITDONE in sendsig(). But
2568 * sendsig() is the only caller of npxgetregs()... perhaps we just
2569 * have too many layers.
2570 */
2571 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2572 intr_restore(s);
2573 }
2574
2575 int
2576 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2577 {
2578 struct pcb *pcb;
2579
2580 if (td == NULL) {
2581 dbregs->dr[0] = rdr0();
2582 dbregs->dr[1] = rdr1();
2583 dbregs->dr[2] = rdr2();
2584 dbregs->dr[3] = rdr3();
2585 dbregs->dr[4] = rdr4();
2586 dbregs->dr[5] = rdr5();
2587 dbregs->dr[6] = rdr6();
2588 dbregs->dr[7] = rdr7();
2589 } else {
2590 pcb = td->td_pcb;
2591 dbregs->dr[0] = pcb->pcb_dr0;
2592 dbregs->dr[1] = pcb->pcb_dr1;
2593 dbregs->dr[2] = pcb->pcb_dr2;
2594 dbregs->dr[3] = pcb->pcb_dr3;
2595 dbregs->dr[4] = 0;
2596 dbregs->dr[5] = 0;
2597 dbregs->dr[6] = pcb->pcb_dr6;
2598 dbregs->dr[7] = pcb->pcb_dr7;
2599 }
2600 return (0);
2601 }
2602
2603 int
2604 set_dbregs(struct thread *td, struct dbreg *dbregs)
2605 {
2606 struct pcb *pcb;
2607 int i;
2608 u_int32_t mask1, mask2;
2609
2610 if (td == NULL) {
2611 load_dr0(dbregs->dr[0]);
2612 load_dr1(dbregs->dr[1]);
2613 load_dr2(dbregs->dr[2]);
2614 load_dr3(dbregs->dr[3]);
2615 load_dr4(dbregs->dr[4]);
2616 load_dr5(dbregs->dr[5]);
2617 load_dr6(dbregs->dr[6]);
2618 load_dr7(dbregs->dr[7]);
2619 } else {
2620 /*
2621 * Don't let an illegal value for dr7 get set. Specifically,
2622 * check for undefined settings. Setting these bit patterns
2623 * result in undefined behaviour and can lead to an unexpected
2624 * TRCTRAP.
2625 */
2626 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8;
2627 i++, mask1 <<= 2, mask2 <<= 2)
2628 if ((dbregs->dr[7] & mask1) == mask2)
2629 return (EINVAL);
2630
2631 pcb = td->td_pcb;
2632
2633 /*
2634 * Don't let a process set a breakpoint that is not within the
2635 * process's address space. If a process could do this, it
2636 * could halt the system by setting a breakpoint in the kernel
2637 * (if ddb was enabled). Thus, we need to check to make sure
2638 * that no breakpoints are being enabled for addresses outside
2639 * process's address space.
2640 *
2641 * XXX - what about when the watched area of the user's
2642 * address space is written into from within the kernel
2643 * ... wouldn't that still cause a breakpoint to be generated
2644 * from within kernel mode?
2645 */
2646
2647 if (dbregs->dr[7] & 0x3) {
2648 /* dr0 is enabled */
2649 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2650 return (EINVAL);
2651 }
2652
2653 if (dbregs->dr[7] & (0x3<<2)) {
2654 /* dr1 is enabled */
2655 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2656 return (EINVAL);
2657 }
2658
2659 if (dbregs->dr[7] & (0x3<<4)) {
2660 /* dr2 is enabled */
2661 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2662 return (EINVAL);
2663 }
2664
2665 if (dbregs->dr[7] & (0x3<<6)) {
2666 /* dr3 is enabled */
2667 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2668 return (EINVAL);
2669 }
2670
2671 pcb->pcb_dr0 = dbregs->dr[0];
2672 pcb->pcb_dr1 = dbregs->dr[1];
2673 pcb->pcb_dr2 = dbregs->dr[2];
2674 pcb->pcb_dr3 = dbregs->dr[3];
2675 pcb->pcb_dr6 = dbregs->dr[6];
2676 pcb->pcb_dr7 = dbregs->dr[7];
2677
2678 pcb->pcb_flags |= PCB_DBREGS;
2679 }
2680
2681 return (0);
2682 }
2683
2684 /*
2685 * Return > 0 if a hardware breakpoint has been hit, and the
2686 * breakpoint was in user space. Return 0, otherwise.
2687 */
2688 int
2689 user_dbreg_trap(void)
2690 {
2691 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2692 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2693 int nbp; /* number of breakpoints that triggered */
2694 caddr_t addr[4]; /* breakpoint addresses */
2695 int i;
2696
2697 dr7 = rdr7();
2698 if ((dr7 & 0x000000ff) == 0) {
2699 /*
2700 * all GE and LE bits in the dr7 register are zero,
2701 * thus the trap couldn't have been caused by the
2702 * hardware debug registers
2703 */
2704 return 0;
2705 }
2706
2707 nbp = 0;
2708 dr6 = rdr6();
2709 bp = dr6 & 0x0000000f;
2710
2711 if (!bp) {
2712 /*
2713 * None of the breakpoint bits are set meaning this
2714 * trap was not caused by any of the debug registers
2715 */
2716 return 0;
2717 }
2718
2719 /*
2720 * at least one of the breakpoints were hit, check to see
2721 * which ones and if any of them are user space addresses
2722 */
2723
2724 if (bp & 0x01) {
2725 addr[nbp++] = (caddr_t)rdr0();
2726 }
2727 if (bp & 0x02) {
2728 addr[nbp++] = (caddr_t)rdr1();
2729 }
2730 if (bp & 0x04) {
2731 addr[nbp++] = (caddr_t)rdr2();
2732 }
2733 if (bp & 0x08) {
2734 addr[nbp++] = (caddr_t)rdr3();
2735 }
2736
2737 for (i=0; i<nbp; i++) {
2738 if (addr[i] <
2739 (caddr_t)VM_MAXUSER_ADDRESS) {
2740 /*
2741 * addr[i] is in user space
2742 */
2743 return nbp;
2744 }
2745 }
2746
2747 /*
2748 * None of the breakpoints are in user space.
2749 */
2750 return 0;
2751 }
2752
2753 #ifdef KDB
2754
2755 /*
2756 * Provide inb() and outb() as functions. They are normally only
2757 * available as macros calling inlined functions, thus cannot be
2758 * called from the debugger.
2759 *
2760 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
2761 */
2762
2763 #undef inb
2764 #undef outb
2765
2766 /* silence compiler warnings */
2767 u_char inb(u_int);
2768 void outb(u_int, u_char);
2769
2770 u_char
2771 inb(u_int port)
2772 {
2773 u_char data;
2774 /*
2775 * We use %%dx and not %1 here because i/o is done at %dx and not at
2776 * %edx, while gcc generates inferior code (movw instead of movl)
2777 * if we tell it to load (u_short) port.
2778 */
2779 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
2780 return (data);
2781 }
2782
2783 void
2784 outb(u_int port, u_char data)
2785 {
2786 u_char al;
2787 /*
2788 * Use an unnecessary assignment to help gcc's register allocator.
2789 * This make a large difference for gcc-1.40 and a tiny difference
2790 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
2791 * best results. gcc-2.6.0 can't handle this.
2792 */
2793 al = data;
2794 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
2795 }
2796
2797 #endif /* KDB */
Cache object: 42e159c8197b86ba91ea119df4c86eb6
|