1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 * $FreeBSD: releng/5.1/sys/i386/i386/machdep.c 114983 2003-05-13 20:36:02Z jhb $
39 */
40
41 #include "opt_atalk.h"
42 #include "opt_compat.h"
43 #include "opt_cpu.h"
44 #include "opt_ddb.h"
45 #include "opt_inet.h"
46 #include "opt_ipx.h"
47 #include "opt_isa.h"
48 #include "opt_maxmem.h"
49 #include "opt_msgbuf.h"
50 #include "opt_npx.h"
51 #include "opt_perfmon.h"
52 #include "opt_swtch.h"
53 #include "opt_kstack_pages.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/sysproto.h>
58 #include <sys/signalvar.h>
59 #include <sys/imgact.h>
60 #include <sys/kernel.h>
61 #include <sys/ktr.h>
62 #include <sys/linker.h>
63 #include <sys/lock.h>
64 #include <sys/malloc.h>
65 #include <sys/mutex.h>
66 #include <sys/pcpu.h>
67 #include <sys/proc.h>
68 #include <sys/bio.h>
69 #include <sys/buf.h>
70 #include <sys/reboot.h>
71 #include <sys/callout.h>
72 #include <sys/msgbuf.h>
73 #include <sys/sched.h>
74 #include <sys/sysent.h>
75 #include <sys/sysctl.h>
76 #include <sys/ucontext.h>
77 #include <sys/vmmeter.h>
78 #include <sys/bus.h>
79 #include <sys/eventhandler.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_pager.h>
88 #include <vm/vm_extern.h>
89
90 #include <sys/user.h>
91 #include <sys/exec.h>
92 #include <sys/cons.h>
93
94 #include <ddb/ddb.h>
95
96 #include <net/netisr.h>
97
98 #include <machine/cpu.h>
99 #include <machine/cputypes.h>
100 #include <machine/reg.h>
101 #include <machine/clock.h>
102 #include <machine/specialreg.h>
103 #include <machine/bootinfo.h>
104 #include <machine/md_var.h>
105 #include <machine/pc/bios.h>
106 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
107 #include <machine/proc.h>
108 #ifdef PERFMON
109 #include <machine/perfmon.h>
110 #endif
111 #ifdef SMP
112 #include <machine/privatespace.h>
113 #include <machine/smp.h>
114 #endif
115
116 #include <i386/isa/icu.h>
117 #include <i386/isa/intr_machdep.h>
118 #include <isa/rtc.h>
119 #include <machine/vm86.h>
120 #include <sys/ptrace.h>
121 #include <machine/sigframe.h>
122
123 extern void init386(int first);
124 extern void dblfault_handler(void);
125
126 extern void printcpuinfo(void); /* XXX header file */
127 extern void finishidentcpu(void);
128 extern void panicifcpuunsupported(void);
129 extern void initializecpu(void);
130
131 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
132 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
133
134 #if !defined(CPU_ENABLE_SSE) && defined(I686_CPU)
135 #define CPU_ENABLE_SSE
136 #endif
137 #if defined(CPU_DISABLE_SSE)
138 #undef CPU_ENABLE_SSE
139 #endif
140
141 static void cpu_startup(void *);
142 static void fpstate_drop(struct thread *td);
143 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
144 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
145 #ifdef CPU_ENABLE_SSE
146 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
147 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
148 #endif /* CPU_ENABLE_SSE */
149 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
150
151 int _udatasel, _ucodesel;
152 u_int atdevbase;
153
154 #if defined(SWTCH_OPTIM_STATS)
155 int stupid_switch;
156 SYSCTL_INT(_debug, OID_AUTO, stupid_switch,
157 CTLFLAG_RW, &stupid_switch, 0, "");
158 int swtch_optim_stats;
159 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats,
160 CTLFLAG_RW, &swtch_optim_stats, 0, "");
161 int tlb_flush_count;
162 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count,
163 CTLFLAG_RW, &tlb_flush_count, 0, "");
164 int lazy_flush_count;
165 SYSCTL_INT(_debug, OID_AUTO, lazy_flush_count,
166 CTLFLAG_RW, &lazy_flush_count, 0, "");
167 int lazy_flush_fixup;
168 SYSCTL_INT(_debug, OID_AUTO, lazy_flush_fixup,
169 CTLFLAG_RW, &lazy_flush_fixup, 0, "");
170 #ifdef SMP
171 int lazy_flush_smpfixup;
172 SYSCTL_INT(_debug, OID_AUTO, lazy_flush_smpfixup,
173 CTLFLAG_RW, &lazy_flush_smpfixup, 0, "");
174 int lazy_flush_smpipi;
175 SYSCTL_INT(_debug, OID_AUTO, lazy_flush_smpipi,
176 CTLFLAG_RW, &lazy_flush_smpipi, 0, "");
177 int lazy_flush_smpbadcr3;
178 SYSCTL_INT(_debug, OID_AUTO, lazy_flush_smpbadcr3,
179 CTLFLAG_RW, &lazy_flush_smpbadcr3, 0, "");
180 int lazy_flush_smpmiss;
181 SYSCTL_INT(_debug, OID_AUTO, lazy_flush_smpmiss,
182 CTLFLAG_RW, &lazy_flush_smpmiss, 0, "");
183 #endif
184 #endif
185 #ifdef LAZY_SWITCH
186 int lazy_flush_enable = 1;
187 SYSCTL_INT(_debug, OID_AUTO, lazy_flush_enable,
188 CTLFLAG_RW, &lazy_flush_enable, 0, "");
189 #endif
190
191 int cold = 1;
192
193 #ifdef COMPAT_43
194 static void osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code);
195 #endif
196 #ifdef COMPAT_FREEBSD4
197 static void freebsd4_sendsig(sig_t catcher, int sig, sigset_t *mask,
198 u_long code);
199 #endif
200
201 long Maxmem = 0;
202
203 vm_paddr_t phys_avail[10];
204
205 /* must be 2 less so 0 0 can signal end of chunks */
206 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
207
208 struct kva_md_info kmi;
209
210 static struct trapframe proc0_tf;
211 #ifndef SMP
212 static struct pcpu __pcpu;
213 #endif
214
215 struct mtx icu_lock;
216
217 static void
218 cpu_startup(dummy)
219 void *dummy;
220 {
221 /*
222 * Good {morning,afternoon,evening,night}.
223 */
224 startrtclock();
225 printcpuinfo();
226 panicifcpuunsupported();
227 #ifdef PERFMON
228 perfmon_init();
229 #endif
230 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
231 ptoa((uintmax_t)Maxmem) / 1048576);
232 /*
233 * Display any holes after the first chunk of extended memory.
234 */
235 if (bootverbose) {
236 int indx;
237
238 printf("Physical memory chunk(s):\n");
239 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
240 vm_paddr_t size;
241
242 size = phys_avail[indx + 1] - phys_avail[indx];
243 printf(
244 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
245 (uintmax_t)phys_avail[indx],
246 (uintmax_t)phys_avail[indx + 1] - 1,
247 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
248 }
249 }
250
251 vm_ksubmap_init(&kmi);
252
253 printf("avail memory = %ju (%ju MB)\n",
254 ptoa((uintmax_t)cnt.v_free_count),
255 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
256
257 /*
258 * Set up buffers, so they can be used to read disk labels.
259 */
260 bufinit();
261 vm_pager_bufferinit();
262
263 #ifndef SMP
264 /* For SMP, we delay the cpu_setregs() until after SMP startup. */
265 cpu_setregs();
266 #endif
267 }
268
269 /*
270 * Send an interrupt to process.
271 *
272 * Stack is set up to allow sigcode stored
273 * at top to call routine, followed by kcall
274 * to sigreturn routine below. After sigreturn
275 * resets the signal mask, the stack, and the
276 * frame pointer, it returns to the user
277 * specified pc, psl.
278 */
279 #ifdef COMPAT_43
280 static void
281 osendsig(catcher, sig, mask, code)
282 sig_t catcher;
283 int sig;
284 sigset_t *mask;
285 u_long code;
286 {
287 struct osigframe sf, *fp;
288 struct proc *p;
289 struct thread *td;
290 struct sigacts *psp;
291 struct trapframe *regs;
292 int oonstack;
293
294 td = curthread;
295 p = td->td_proc;
296 PROC_LOCK_ASSERT(p, MA_OWNED);
297 psp = p->p_sigacts;
298 mtx_assert(&psp->ps_mtx, MA_OWNED);
299 regs = td->td_frame;
300 oonstack = sigonstack(regs->tf_esp);
301
302 /* Allocate space for the signal handler context. */
303 if ((p->p_flag & P_ALTSTACK) && !oonstack &&
304 SIGISMEMBER(psp->ps_sigonstack, sig)) {
305 fp = (struct osigframe *)(p->p_sigstk.ss_sp +
306 p->p_sigstk.ss_size - sizeof(struct osigframe));
307 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
308 p->p_sigstk.ss_flags |= SS_ONSTACK;
309 #endif
310 } else
311 fp = (struct osigframe *)regs->tf_esp - 1;
312
313 /* Translate the signal if appropriate. */
314 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
315 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
316
317 /* Build the argument list for the signal handler. */
318 sf.sf_signum = sig;
319 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
320 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
321 /* Signal handler installed with SA_SIGINFO. */
322 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
323 sf.sf_siginfo.si_signo = sig;
324 sf.sf_siginfo.si_code = code;
325 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
326 } else {
327 /* Old FreeBSD-style arguments. */
328 sf.sf_arg2 = code;
329 sf.sf_addr = regs->tf_err;
330 sf.sf_ahu.sf_handler = catcher;
331 }
332 mtx_unlock(&psp->ps_mtx);
333 PROC_UNLOCK(p);
334
335 /* Save most if not all of trap frame. */
336 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
337 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
338 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
339 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
340 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
341 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
342 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
343 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
344 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
345 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
346 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
347 sf.sf_siginfo.si_sc.sc_gs = rgs();
348 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
349
350 /* Build the signal context to be used by osigreturn(). */
351 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
352 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
353 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
354 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
355 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
356 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
357 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
358 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
359
360 /*
361 * If we're a vm86 process, we want to save the segment registers.
362 * We also change eflags to be our emulated eflags, not the actual
363 * eflags.
364 */
365 if (regs->tf_eflags & PSL_VM) {
366 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
367 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
368 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
369
370 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
371 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
372 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
373 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
374
375 if (vm86->vm86_has_vme == 0)
376 sf.sf_siginfo.si_sc.sc_ps =
377 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
378 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
379
380 /* See sendsig() for comments. */
381 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
382 }
383
384 /*
385 * Copy the sigframe out to the user's stack.
386 */
387 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
388 #ifdef DEBUG
389 printf("process %ld has trashed its stack\n", (long)p->p_pid);
390 #endif
391 PROC_LOCK(p);
392 sigexit(td, SIGILL);
393 }
394
395 regs->tf_esp = (int)fp;
396 regs->tf_eip = PS_STRINGS - szosigcode;
397 regs->tf_eflags &= ~PSL_T;
398 regs->tf_cs = _ucodesel;
399 regs->tf_ds = _udatasel;
400 regs->tf_es = _udatasel;
401 regs->tf_fs = _udatasel;
402 load_gs(_udatasel);
403 regs->tf_ss = _udatasel;
404 PROC_LOCK(p);
405 mtx_lock(&psp->ps_mtx);
406 }
407 #endif /* COMPAT_43 */
408
409 #ifdef COMPAT_FREEBSD4
410 static void
411 freebsd4_sendsig(catcher, sig, mask, code)
412 sig_t catcher;
413 int sig;
414 sigset_t *mask;
415 u_long code;
416 {
417 struct sigframe4 sf, *sfp;
418 struct proc *p;
419 struct thread *td;
420 struct sigacts *psp;
421 struct trapframe *regs;
422 int oonstack;
423
424 td = curthread;
425 p = td->td_proc;
426 PROC_LOCK_ASSERT(p, MA_OWNED);
427 psp = p->p_sigacts;
428 mtx_assert(&psp->ps_mtx, MA_OWNED);
429 regs = td->td_frame;
430 oonstack = sigonstack(regs->tf_esp);
431
432 /* Save user context. */
433 bzero(&sf, sizeof(sf));
434 sf.sf_uc.uc_sigmask = *mask;
435 sf.sf_uc.uc_stack = p->p_sigstk;
436 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK)
437 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
438 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
439 sf.sf_uc.uc_mcontext.mc_gs = rgs();
440 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
441
442 /* Allocate space for the signal handler context. */
443 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack &&
444 SIGISMEMBER(psp->ps_sigonstack, sig)) {
445 sfp = (struct sigframe4 *)(p->p_sigstk.ss_sp +
446 p->p_sigstk.ss_size - sizeof(struct sigframe4));
447 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
448 p->p_sigstk.ss_flags |= SS_ONSTACK;
449 #endif
450 } else
451 sfp = (struct sigframe4 *)regs->tf_esp - 1;
452
453 /* Translate the signal if appropriate. */
454 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
455 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
456
457 /* Build the argument list for the signal handler. */
458 sf.sf_signum = sig;
459 sf.sf_ucontext = (register_t)&sfp->sf_uc;
460 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
461 /* Signal handler installed with SA_SIGINFO. */
462 sf.sf_siginfo = (register_t)&sfp->sf_si;
463 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
464
465 /* Fill in POSIX parts */
466 sf.sf_si.si_signo = sig;
467 sf.sf_si.si_code = code;
468 sf.sf_si.si_addr = (void *)regs->tf_err;
469 } else {
470 /* Old FreeBSD-style arguments. */
471 sf.sf_siginfo = code;
472 sf.sf_addr = regs->tf_err;
473 sf.sf_ahu.sf_handler = catcher;
474 }
475 mtx_unlock(&psp->ps_mtx);
476 PROC_UNLOCK(p);
477
478 /*
479 * If we're a vm86 process, we want to save the segment registers.
480 * We also change eflags to be our emulated eflags, not the actual
481 * eflags.
482 */
483 if (regs->tf_eflags & PSL_VM) {
484 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
485 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
486
487 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
488 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
489 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
490 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
491
492 if (vm86->vm86_has_vme == 0)
493 sf.sf_uc.uc_mcontext.mc_eflags =
494 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
495 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
496
497 /*
498 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
499 * syscalls made by the signal handler. This just avoids
500 * wasting time for our lazy fixup of such faults. PSL_NT
501 * does nothing in vm86 mode, but vm86 programs can set it
502 * almost legitimately in probes for old cpu types.
503 */
504 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
505 }
506
507 /*
508 * Copy the sigframe out to the user's stack.
509 */
510 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
511 #ifdef DEBUG
512 printf("process %ld has trashed its stack\n", (long)p->p_pid);
513 #endif
514 PROC_LOCK(p);
515 sigexit(td, SIGILL);
516 }
517
518 regs->tf_esp = (int)sfp;
519 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
520 regs->tf_eflags &= ~PSL_T;
521 regs->tf_cs = _ucodesel;
522 regs->tf_ds = _udatasel;
523 regs->tf_es = _udatasel;
524 regs->tf_fs = _udatasel;
525 regs->tf_ss = _udatasel;
526 PROC_LOCK(p);
527 mtx_lock(&psp->ps_mtx);
528 }
529 #endif /* COMPAT_FREEBSD4 */
530
531 void
532 sendsig(catcher, sig, mask, code)
533 sig_t catcher;
534 int sig;
535 sigset_t *mask;
536 u_long code;
537 {
538 struct sigframe sf, *sfp;
539 struct proc *p;
540 struct thread *td;
541 struct sigacts *psp;
542 char *sp;
543 struct trapframe *regs;
544 int oonstack;
545
546 td = curthread;
547 p = td->td_proc;
548 PROC_LOCK_ASSERT(p, MA_OWNED);
549 psp = p->p_sigacts;
550 mtx_assert(&psp->ps_mtx, MA_OWNED);
551 #ifdef COMPAT_FREEBSD4
552 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
553 freebsd4_sendsig(catcher, sig, mask, code);
554 return;
555 }
556 #endif
557 #ifdef COMPAT_43
558 if (SIGISMEMBER(psp->ps_osigset, sig)) {
559 osendsig(catcher, sig, mask, code);
560 return;
561 }
562 #endif
563 regs = td->td_frame;
564 oonstack = sigonstack(regs->tf_esp);
565
566 /* Save user context. */
567 bzero(&sf, sizeof(sf));
568 sf.sf_uc.uc_sigmask = *mask;
569 sf.sf_uc.uc_stack = p->p_sigstk;
570 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK)
571 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
572 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
573 sf.sf_uc.uc_mcontext.mc_gs = rgs();
574 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
575 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
576 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
577 fpstate_drop(td);
578
579 /* Allocate space for the signal handler context. */
580 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack &&
581 SIGISMEMBER(psp->ps_sigonstack, sig)) {
582 sp = p->p_sigstk.ss_sp +
583 p->p_sigstk.ss_size - sizeof(struct sigframe);
584 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
585 p->p_sigstk.ss_flags |= SS_ONSTACK;
586 #endif
587 } else
588 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
589 /* Align to 16 bytes. */
590 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
591
592 /* Translate the signal if appropriate. */
593 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
594 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
595
596 /* Build the argument list for the signal handler. */
597 sf.sf_signum = sig;
598 sf.sf_ucontext = (register_t)&sfp->sf_uc;
599 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
600 /* Signal handler installed with SA_SIGINFO. */
601 sf.sf_siginfo = (register_t)&sfp->sf_si;
602 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
603
604 /* Fill in POSIX parts */
605 sf.sf_si.si_signo = sig;
606 sf.sf_si.si_code = code;
607 sf.sf_si.si_addr = (void *)regs->tf_err;
608 } else {
609 /* Old FreeBSD-style arguments. */
610 sf.sf_siginfo = code;
611 sf.sf_addr = regs->tf_err;
612 sf.sf_ahu.sf_handler = catcher;
613 }
614 mtx_unlock(&psp->ps_mtx);
615 PROC_UNLOCK(p);
616
617 /*
618 * If we're a vm86 process, we want to save the segment registers.
619 * We also change eflags to be our emulated eflags, not the actual
620 * eflags.
621 */
622 if (regs->tf_eflags & PSL_VM) {
623 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
624 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
625
626 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
627 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
628 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
629 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
630
631 if (vm86->vm86_has_vme == 0)
632 sf.sf_uc.uc_mcontext.mc_eflags =
633 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
634 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
635
636 /*
637 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
638 * syscalls made by the signal handler. This just avoids
639 * wasting time for our lazy fixup of such faults. PSL_NT
640 * does nothing in vm86 mode, but vm86 programs can set it
641 * almost legitimately in probes for old cpu types.
642 */
643 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
644 }
645
646 /*
647 * Copy the sigframe out to the user's stack.
648 */
649 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
650 #ifdef DEBUG
651 printf("process %ld has trashed its stack\n", (long)p->p_pid);
652 #endif
653 PROC_LOCK(p);
654 sigexit(td, SIGILL);
655 }
656
657 regs->tf_esp = (int)sfp;
658 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
659 regs->tf_eflags &= ~PSL_T;
660 regs->tf_cs = _ucodesel;
661 regs->tf_ds = _udatasel;
662 regs->tf_es = _udatasel;
663 regs->tf_fs = _udatasel;
664 regs->tf_ss = _udatasel;
665 PROC_LOCK(p);
666 mtx_lock(&psp->ps_mtx);
667 }
668
669 /*
670 * System call to cleanup state after a signal
671 * has been taken. Reset signal mask and
672 * stack state from context left by sendsig (above).
673 * Return to previous pc and psl as specified by
674 * context left by sendsig. Check carefully to
675 * make sure that the user has not modified the
676 * state to gain improper privileges.
677 *
678 * MPSAFE
679 */
680 #ifdef COMPAT_43
681 int
682 osigreturn(td, uap)
683 struct thread *td;
684 struct osigreturn_args /* {
685 struct osigcontext *sigcntxp;
686 } */ *uap;
687 {
688 struct osigcontext sc;
689 struct trapframe *regs;
690 struct osigcontext *scp;
691 struct proc *p = td->td_proc;
692 int eflags, error;
693
694 regs = td->td_frame;
695 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
696 if (error != 0)
697 return (error);
698 scp = ≻
699 eflags = scp->sc_ps;
700 if (eflags & PSL_VM) {
701 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
702 struct vm86_kernel *vm86;
703
704 /*
705 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
706 * set up the vm86 area, and we can't enter vm86 mode.
707 */
708 if (td->td_pcb->pcb_ext == 0)
709 return (EINVAL);
710 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
711 if (vm86->vm86_inited == 0)
712 return (EINVAL);
713
714 /* Go back to user mode if both flags are set. */
715 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
716 trapsignal(td, SIGBUS, 0);
717
718 if (vm86->vm86_has_vme) {
719 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
720 (eflags & VME_USERCHANGE) | PSL_VM;
721 } else {
722 vm86->vm86_eflags = eflags; /* save VIF, VIP */
723 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
724 (eflags & VM_USERCHANGE) | PSL_VM;
725 }
726 tf->tf_vm86_ds = scp->sc_ds;
727 tf->tf_vm86_es = scp->sc_es;
728 tf->tf_vm86_fs = scp->sc_fs;
729 tf->tf_vm86_gs = scp->sc_gs;
730 tf->tf_ds = _udatasel;
731 tf->tf_es = _udatasel;
732 tf->tf_fs = _udatasel;
733 } else {
734 /*
735 * Don't allow users to change privileged or reserved flags.
736 */
737 /*
738 * XXX do allow users to change the privileged flag PSL_RF.
739 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
740 * should sometimes set it there too. tf_eflags is kept in
741 * the signal context during signal handling and there is no
742 * other place to remember it, so the PSL_RF bit may be
743 * corrupted by the signal handler without us knowing.
744 * Corruption of the PSL_RF bit at worst causes one more or
745 * one less debugger trap, so allowing it is fairly harmless.
746 */
747 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
748 return (EINVAL);
749 }
750
751 /*
752 * Don't allow users to load a valid privileged %cs. Let the
753 * hardware check for invalid selectors, excess privilege in
754 * other selectors, invalid %eip's and invalid %esp's.
755 */
756 if (!CS_SECURE(scp->sc_cs)) {
757 trapsignal(td, SIGBUS, T_PROTFLT);
758 return (EINVAL);
759 }
760 regs->tf_ds = scp->sc_ds;
761 regs->tf_es = scp->sc_es;
762 regs->tf_fs = scp->sc_fs;
763 }
764
765 /* Restore remaining registers. */
766 regs->tf_eax = scp->sc_eax;
767 regs->tf_ebx = scp->sc_ebx;
768 regs->tf_ecx = scp->sc_ecx;
769 regs->tf_edx = scp->sc_edx;
770 regs->tf_esi = scp->sc_esi;
771 regs->tf_edi = scp->sc_edi;
772 regs->tf_cs = scp->sc_cs;
773 regs->tf_ss = scp->sc_ss;
774 regs->tf_isp = scp->sc_isp;
775 regs->tf_ebp = scp->sc_fp;
776 regs->tf_esp = scp->sc_sp;
777 regs->tf_eip = scp->sc_pc;
778 regs->tf_eflags = eflags;
779
780 PROC_LOCK(p);
781 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
782 if (scp->sc_onstack & 1)
783 p->p_sigstk.ss_flags |= SS_ONSTACK;
784 else
785 p->p_sigstk.ss_flags &= ~SS_ONSTACK;
786 #endif
787 SIGSETOLD(td->td_sigmask, scp->sc_mask);
788 SIG_CANTMASK(td->td_sigmask);
789 signotify(td);
790 PROC_UNLOCK(p);
791 return (EJUSTRETURN);
792 }
793 #endif /* COMPAT_43 */
794
795 #ifdef COMPAT_FREEBSD4
796 /*
797 * MPSAFE
798 */
799 int
800 freebsd4_sigreturn(td, uap)
801 struct thread *td;
802 struct freebsd4_sigreturn_args /* {
803 const ucontext4 *sigcntxp;
804 } */ *uap;
805 {
806 struct ucontext4 uc;
807 struct proc *p = td->td_proc;
808 struct trapframe *regs;
809 const struct ucontext4 *ucp;
810 int cs, eflags, error;
811
812 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
813 if (error != 0)
814 return (error);
815 ucp = &uc;
816 regs = td->td_frame;
817 eflags = ucp->uc_mcontext.mc_eflags;
818 if (eflags & PSL_VM) {
819 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
820 struct vm86_kernel *vm86;
821
822 /*
823 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
824 * set up the vm86 area, and we can't enter vm86 mode.
825 */
826 if (td->td_pcb->pcb_ext == 0)
827 return (EINVAL);
828 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
829 if (vm86->vm86_inited == 0)
830 return (EINVAL);
831
832 /* Go back to user mode if both flags are set. */
833 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
834 trapsignal(td, SIGBUS, 0);
835
836 if (vm86->vm86_has_vme) {
837 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
838 (eflags & VME_USERCHANGE) | PSL_VM;
839 } else {
840 vm86->vm86_eflags = eflags; /* save VIF, VIP */
841 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
842 (eflags & VM_USERCHANGE) | PSL_VM;
843 }
844 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
845 tf->tf_eflags = eflags;
846 tf->tf_vm86_ds = tf->tf_ds;
847 tf->tf_vm86_es = tf->tf_es;
848 tf->tf_vm86_fs = tf->tf_fs;
849 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
850 tf->tf_ds = _udatasel;
851 tf->tf_es = _udatasel;
852 tf->tf_fs = _udatasel;
853 } else {
854 /*
855 * Don't allow users to change privileged or reserved flags.
856 */
857 /*
858 * XXX do allow users to change the privileged flag PSL_RF.
859 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
860 * should sometimes set it there too. tf_eflags is kept in
861 * the signal context during signal handling and there is no
862 * other place to remember it, so the PSL_RF bit may be
863 * corrupted by the signal handler without us knowing.
864 * Corruption of the PSL_RF bit at worst causes one more or
865 * one less debugger trap, so allowing it is fairly harmless.
866 */
867 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
868 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
869 return (EINVAL);
870 }
871
872 /*
873 * Don't allow users to load a valid privileged %cs. Let the
874 * hardware check for invalid selectors, excess privilege in
875 * other selectors, invalid %eip's and invalid %esp's.
876 */
877 cs = ucp->uc_mcontext.mc_cs;
878 if (!CS_SECURE(cs)) {
879 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
880 trapsignal(td, SIGBUS, T_PROTFLT);
881 return (EINVAL);
882 }
883
884 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
885 }
886
887 PROC_LOCK(p);
888 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
889 if (ucp->uc_mcontext.mc_onstack & 1)
890 p->p_sigstk.ss_flags |= SS_ONSTACK;
891 else
892 p->p_sigstk.ss_flags &= ~SS_ONSTACK;
893 #endif
894
895 td->td_sigmask = ucp->uc_sigmask;
896 SIG_CANTMASK(td->td_sigmask);
897 signotify(td);
898 PROC_UNLOCK(p);
899 return (EJUSTRETURN);
900 }
901 #endif /* COMPAT_FREEBSD4 */
902
903 /*
904 * MPSAFE
905 */
906 int
907 sigreturn(td, uap)
908 struct thread *td;
909 struct sigreturn_args /* {
910 const __ucontext *sigcntxp;
911 } */ *uap;
912 {
913 ucontext_t uc;
914 struct proc *p = td->td_proc;
915 struct trapframe *regs;
916 const ucontext_t *ucp;
917 int cs, eflags, error, ret;
918
919 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
920 if (error != 0)
921 return (error);
922 ucp = &uc;
923 regs = td->td_frame;
924 eflags = ucp->uc_mcontext.mc_eflags;
925 if (eflags & PSL_VM) {
926 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
927 struct vm86_kernel *vm86;
928
929 /*
930 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
931 * set up the vm86 area, and we can't enter vm86 mode.
932 */
933 if (td->td_pcb->pcb_ext == 0)
934 return (EINVAL);
935 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
936 if (vm86->vm86_inited == 0)
937 return (EINVAL);
938
939 /* Go back to user mode if both flags are set. */
940 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
941 trapsignal(td, SIGBUS, 0);
942
943 if (vm86->vm86_has_vme) {
944 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
945 (eflags & VME_USERCHANGE) | PSL_VM;
946 } else {
947 vm86->vm86_eflags = eflags; /* save VIF, VIP */
948 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
949 (eflags & VM_USERCHANGE) | PSL_VM;
950 }
951 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
952 tf->tf_eflags = eflags;
953 tf->tf_vm86_ds = tf->tf_ds;
954 tf->tf_vm86_es = tf->tf_es;
955 tf->tf_vm86_fs = tf->tf_fs;
956 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
957 tf->tf_ds = _udatasel;
958 tf->tf_es = _udatasel;
959 tf->tf_fs = _udatasel;
960 } else {
961 /*
962 * Don't allow users to change privileged or reserved flags.
963 */
964 /*
965 * XXX do allow users to change the privileged flag PSL_RF.
966 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
967 * should sometimes set it there too. tf_eflags is kept in
968 * the signal context during signal handling and there is no
969 * other place to remember it, so the PSL_RF bit may be
970 * corrupted by the signal handler without us knowing.
971 * Corruption of the PSL_RF bit at worst causes one more or
972 * one less debugger trap, so allowing it is fairly harmless.
973 */
974 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
975 printf("sigreturn: eflags = 0x%x\n", eflags);
976 return (EINVAL);
977 }
978
979 /*
980 * Don't allow users to load a valid privileged %cs. Let the
981 * hardware check for invalid selectors, excess privilege in
982 * other selectors, invalid %eip's and invalid %esp's.
983 */
984 cs = ucp->uc_mcontext.mc_cs;
985 if (!CS_SECURE(cs)) {
986 printf("sigreturn: cs = 0x%x\n", cs);
987 trapsignal(td, SIGBUS, T_PROTFLT);
988 return (EINVAL);
989 }
990
991 ret = set_fpcontext(td, &ucp->uc_mcontext);
992 if (ret != 0)
993 return (ret);
994 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
995 }
996
997 PROC_LOCK(p);
998 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
999 if (ucp->uc_mcontext.mc_onstack & 1)
1000 p->p_sigstk.ss_flags |= SS_ONSTACK;
1001 else
1002 p->p_sigstk.ss_flags &= ~SS_ONSTACK;
1003 #endif
1004
1005 td->td_sigmask = ucp->uc_sigmask;
1006 SIG_CANTMASK(td->td_sigmask);
1007 signotify(td);
1008 PROC_UNLOCK(p);
1009 return (EJUSTRETURN);
1010 }
1011
1012 /*
1013 * Machine dependent boot() routine
1014 *
1015 * I haven't seen anything to put here yet
1016 * Possibly some stuff might be grafted back here from boot()
1017 */
1018 void
1019 cpu_boot(int howto)
1020 {
1021 }
1022
1023 /*
1024 * Shutdown the CPU as much as possible
1025 */
1026 void
1027 cpu_halt(void)
1028 {
1029 for (;;)
1030 __asm__ ("hlt");
1031 }
1032
1033 /*
1034 * Hook to idle the CPU when possible. In the SMP case we default to
1035 * off because a halted cpu will not currently pick up a new thread in the
1036 * run queue until the next timer tick. If turned on this will result in
1037 * approximately a 4.2% loss in real time performance in buildworld tests
1038 * (but improves user and sys times oddly enough), and saves approximately
1039 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1040 *
1041 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1042 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1043 * Then we can have our cake and eat it too.
1044 *
1045 * XXX I'm turning it on for SMP as well by default for now. It seems to
1046 * help lock contention somewhat, and this is critical for HTT. -Peter
1047 */
1048 static int cpu_idle_hlt = 1;
1049 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1050 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1051
1052 /*
1053 * Note that we have to be careful here to avoid a race between checking
1054 * sched_runnable() and actually halting. If we don't do this, we may waste
1055 * the time between calling hlt and the next interrupt even though there
1056 * is a runnable process.
1057 */
1058 void
1059 cpu_idle(void)
1060 {
1061
1062 #ifdef SMP
1063 if (mp_grab_cpu_hlt())
1064 return;
1065 #endif
1066
1067 if (cpu_idle_hlt) {
1068 disable_intr();
1069 if (sched_runnable()) {
1070 enable_intr();
1071 } else {
1072 /*
1073 * we must absolutely guarentee that hlt is the
1074 * absolute next instruction after sti or we
1075 * introduce a timing window.
1076 */
1077 __asm __volatile("sti; hlt");
1078 }
1079 }
1080 }
1081
1082 /*
1083 * Clear registers on exec
1084 */
1085 void
1086 exec_setregs(td, entry, stack, ps_strings)
1087 struct thread *td;
1088 u_long entry;
1089 u_long stack;
1090 u_long ps_strings;
1091 {
1092 struct trapframe *regs = td->td_frame;
1093 struct pcb *pcb = td->td_pcb;
1094
1095 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1096 pcb->pcb_gs = _udatasel;
1097 load_gs(_udatasel);
1098
1099 if (td->td_proc->p_md.md_ldt)
1100 user_ldt_free(td);
1101
1102 bzero((char *)regs, sizeof(struct trapframe));
1103 regs->tf_eip = entry;
1104 regs->tf_esp = stack;
1105 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1106 regs->tf_ss = _udatasel;
1107 regs->tf_ds = _udatasel;
1108 regs->tf_es = _udatasel;
1109 regs->tf_fs = _udatasel;
1110 regs->tf_cs = _ucodesel;
1111
1112 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1113 regs->tf_ebx = ps_strings;
1114
1115 /*
1116 * Reset the hardware debug registers if they were in use.
1117 * They won't have any meaning for the newly exec'd process.
1118 */
1119 if (pcb->pcb_flags & PCB_DBREGS) {
1120 pcb->pcb_dr0 = 0;
1121 pcb->pcb_dr1 = 0;
1122 pcb->pcb_dr2 = 0;
1123 pcb->pcb_dr3 = 0;
1124 pcb->pcb_dr6 = 0;
1125 pcb->pcb_dr7 = 0;
1126 if (pcb == PCPU_GET(curpcb)) {
1127 /*
1128 * Clear the debug registers on the running
1129 * CPU, otherwise they will end up affecting
1130 * the next process we switch to.
1131 */
1132 reset_dbregs();
1133 }
1134 pcb->pcb_flags &= ~PCB_DBREGS;
1135 }
1136
1137 /*
1138 * Initialize the math emulator (if any) for the current process.
1139 * Actually, just clear the bit that says that the emulator has
1140 * been initialized. Initialization is delayed until the process
1141 * traps to the emulator (if it is done at all) mainly because
1142 * emulators don't provide an entry point for initialization.
1143 */
1144 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1145
1146 /*
1147 * Arrange to trap the next npx or `fwait' instruction (see npx.c
1148 * for why fwait must be trapped at least if there is an npx or an
1149 * emulator). This is mainly to handle the case where npx0 is not
1150 * configured, since the npx routines normally set up the trap
1151 * otherwise. It should be done only at boot time, but doing it
1152 * here allows modifying `npx_exists' for testing the emulator on
1153 * systems with an npx.
1154 */
1155 load_cr0(rcr0() | CR0_MP | CR0_TS);
1156
1157 /* Initialize the npx (if any) for the current process. */
1158 /*
1159 * XXX the above load_cr0() also initializes it and is a layering
1160 * violation if NPX is configured. It drops the npx partially
1161 * and this would be fatal if we were interrupted now, and decided
1162 * to force the state to the pcb, and checked the invariant
1163 * (CR0_TS clear) if and only if PCPU_GET(fpcurthread) != NULL).
1164 * ALL of this can happen except the check. The check used to
1165 * happen and be fatal later when we didn't complete the drop
1166 * before returning to user mode. This should be fixed properly
1167 * soon.
1168 */
1169 fpstate_drop(td);
1170
1171 /*
1172 * XXX - Linux emulator
1173 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1174 * on it.
1175 */
1176 td->td_retval[1] = 0;
1177 }
1178
1179 void
1180 cpu_setregs(void)
1181 {
1182 unsigned int cr0;
1183
1184 cr0 = rcr0();
1185 #ifdef SMP
1186 cr0 |= CR0_NE; /* Done by npxinit() */
1187 #endif
1188 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */
1189 #ifndef I386_CPU
1190 cr0 |= CR0_WP | CR0_AM;
1191 #endif
1192 load_cr0(cr0);
1193 load_gs(_udatasel);
1194 }
1195
1196 static int
1197 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
1198 {
1199 int error;
1200 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
1201 req);
1202 if (!error && req->newptr)
1203 resettodr();
1204 return (error);
1205 }
1206
1207 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
1208 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
1209
1210 SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
1211 CTLFLAG_RW, &disable_rtc_set, 0, "");
1212
1213 SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo,
1214 CTLFLAG_RD, &bootinfo, bootinfo, "");
1215
1216 SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
1217 CTLFLAG_RW, &wall_cmos_clock, 0, "");
1218
1219 u_long bootdev; /* not a dev_t - encoding is different */
1220 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1221 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in dev_t format)");
1222
1223 /*
1224 * Initialize 386 and configure to run kernel
1225 */
1226
1227 /*
1228 * Initialize segments & interrupt table
1229 */
1230
1231 int _default_ldt;
1232 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1233 static struct gate_descriptor idt0[NIDT];
1234 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1235 union descriptor ldt[NLDT]; /* local descriptor table */
1236 #ifdef SMP
1237 /* table descriptors - used to load tables by microp */
1238 struct region_descriptor r_gdt, r_idt;
1239 #endif
1240
1241 int private_tss; /* flag indicating private tss */
1242
1243 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1244 extern int has_f00f_bug;
1245 #endif
1246
1247 static struct i386tss dblfault_tss;
1248 static char dblfault_stack[PAGE_SIZE];
1249
1250 extern struct user *proc0uarea;
1251 extern vm_offset_t proc0kstack;
1252
1253
1254 /* software prototypes -- in more palatable form */
1255 struct soft_segment_descriptor gdt_segs[] = {
1256 /* GNULL_SEL 0 Null Descriptor */
1257 { 0x0, /* segment base address */
1258 0x0, /* length */
1259 0, /* segment type */
1260 0, /* segment descriptor priority level */
1261 0, /* segment descriptor present */
1262 0, 0,
1263 0, /* default 32 vs 16 bit size */
1264 0 /* limit granularity (byte/page units)*/ },
1265 /* GCODE_SEL 1 Code Descriptor for kernel */
1266 { 0x0, /* segment base address */
1267 0xfffff, /* length - all address space */
1268 SDT_MEMERA, /* segment type */
1269 0, /* segment descriptor priority level */
1270 1, /* segment descriptor present */
1271 0, 0,
1272 1, /* default 32 vs 16 bit size */
1273 1 /* limit granularity (byte/page units)*/ },
1274 /* GDATA_SEL 2 Data Descriptor for kernel */
1275 { 0x0, /* segment base address */
1276 0xfffff, /* length - all address space */
1277 SDT_MEMRWA, /* segment type */
1278 0, /* segment descriptor priority level */
1279 1, /* segment descriptor present */
1280 0, 0,
1281 1, /* default 32 vs 16 bit size */
1282 1 /* limit granularity (byte/page units)*/ },
1283 /* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */
1284 { 0x0, /* segment base address */
1285 0xfffff, /* length - all address space */
1286 SDT_MEMRWA, /* segment type */
1287 0, /* segment descriptor priority level */
1288 1, /* segment descriptor present */
1289 0, 0,
1290 1, /* default 32 vs 16 bit size */
1291 1 /* limit granularity (byte/page units)*/ },
1292 /* GPROC0_SEL 4 Proc 0 Tss Descriptor */
1293 {
1294 0x0, /* segment base address */
1295 sizeof(struct i386tss)-1,/* length - all address space */
1296 SDT_SYS386TSS, /* segment type */
1297 0, /* segment descriptor priority level */
1298 1, /* segment descriptor present */
1299 0, 0,
1300 0, /* unused - default 32 vs 16 bit size */
1301 0 /* limit granularity (byte/page units)*/ },
1302 /* GLDT_SEL 5 LDT Descriptor */
1303 { (int) ldt, /* segment base address */
1304 sizeof(ldt)-1, /* length - all address space */
1305 SDT_SYSLDT, /* segment type */
1306 SEL_UPL, /* segment descriptor priority level */
1307 1, /* segment descriptor present */
1308 0, 0,
1309 0, /* unused - default 32 vs 16 bit size */
1310 0 /* limit granularity (byte/page units)*/ },
1311 /* GUSERLDT_SEL 6 User LDT Descriptor per process */
1312 { (int) ldt, /* segment base address */
1313 (512 * sizeof(union descriptor)-1), /* length */
1314 SDT_SYSLDT, /* segment type */
1315 0, /* segment descriptor priority level */
1316 1, /* segment descriptor present */
1317 0, 0,
1318 0, /* unused - default 32 vs 16 bit size */
1319 0 /* limit granularity (byte/page units)*/ },
1320 /* GTGATE_SEL 7 Null Descriptor - Placeholder */
1321 { 0x0, /* segment base address */
1322 0x0, /* length - all address space */
1323 0, /* segment type */
1324 0, /* segment descriptor priority level */
1325 0, /* segment descriptor present */
1326 0, 0,
1327 0, /* default 32 vs 16 bit size */
1328 0 /* limit granularity (byte/page units)*/ },
1329 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1330 { 0x400, /* segment base address */
1331 0xfffff, /* length */
1332 SDT_MEMRWA, /* segment type */
1333 0, /* segment descriptor priority level */
1334 1, /* segment descriptor present */
1335 0, 0,
1336 1, /* default 32 vs 16 bit size */
1337 1 /* limit granularity (byte/page units)*/ },
1338 /* GPANIC_SEL 9 Panic Tss Descriptor */
1339 { (int) &dblfault_tss, /* segment base address */
1340 sizeof(struct i386tss)-1,/* length - all address space */
1341 SDT_SYS386TSS, /* segment type */
1342 0, /* segment descriptor priority level */
1343 1, /* segment descriptor present */
1344 0, 0,
1345 0, /* unused - default 32 vs 16 bit size */
1346 0 /* limit granularity (byte/page units)*/ },
1347 /* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */
1348 { 0, /* segment base address (overwritten) */
1349 0xfffff, /* length */
1350 SDT_MEMERA, /* segment type */
1351 0, /* segment descriptor priority level */
1352 1, /* segment descriptor present */
1353 0, 0,
1354 0, /* default 32 vs 16 bit size */
1355 1 /* limit granularity (byte/page units)*/ },
1356 /* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */
1357 { 0, /* segment base address (overwritten) */
1358 0xfffff, /* length */
1359 SDT_MEMERA, /* segment type */
1360 0, /* segment descriptor priority level */
1361 1, /* segment descriptor present */
1362 0, 0,
1363 0, /* default 32 vs 16 bit size */
1364 1 /* limit granularity (byte/page units)*/ },
1365 /* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */
1366 { 0, /* segment base address (overwritten) */
1367 0xfffff, /* length */
1368 SDT_MEMRWA, /* segment type */
1369 0, /* segment descriptor priority level */
1370 1, /* segment descriptor present */
1371 0, 0,
1372 1, /* default 32 vs 16 bit size */
1373 1 /* limit granularity (byte/page units)*/ },
1374 /* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */
1375 { 0, /* segment base address (overwritten) */
1376 0xfffff, /* length */
1377 SDT_MEMRWA, /* segment type */
1378 0, /* segment descriptor priority level */
1379 1, /* segment descriptor present */
1380 0, 0,
1381 0, /* default 32 vs 16 bit size */
1382 1 /* limit granularity (byte/page units)*/ },
1383 /* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */
1384 { 0, /* segment base address (overwritten) */
1385 0xfffff, /* length */
1386 SDT_MEMRWA, /* segment type */
1387 0, /* segment descriptor priority level */
1388 1, /* segment descriptor present */
1389 0, 0,
1390 0, /* default 32 vs 16 bit size */
1391 1 /* limit granularity (byte/page units)*/ },
1392 };
1393
1394 static struct soft_segment_descriptor ldt_segs[] = {
1395 /* Null Descriptor - overwritten by call gate */
1396 { 0x0, /* segment base address */
1397 0x0, /* length - all address space */
1398 0, /* segment type */
1399 0, /* segment descriptor priority level */
1400 0, /* segment descriptor present */
1401 0, 0,
1402 0, /* default 32 vs 16 bit size */
1403 0 /* limit granularity (byte/page units)*/ },
1404 /* Null Descriptor - overwritten by call gate */
1405 { 0x0, /* segment base address */
1406 0x0, /* length - all address space */
1407 0, /* segment type */
1408 0, /* segment descriptor priority level */
1409 0, /* segment descriptor present */
1410 0, 0,
1411 0, /* default 32 vs 16 bit size */
1412 0 /* limit granularity (byte/page units)*/ },
1413 /* Null Descriptor - overwritten by call gate */
1414 { 0x0, /* segment base address */
1415 0x0, /* length - all address space */
1416 0, /* segment type */
1417 0, /* segment descriptor priority level */
1418 0, /* segment descriptor present */
1419 0, 0,
1420 0, /* default 32 vs 16 bit size */
1421 0 /* limit granularity (byte/page units)*/ },
1422 /* Code Descriptor for user */
1423 { 0x0, /* segment base address */
1424 0xfffff, /* length - all address space */
1425 SDT_MEMERA, /* segment type */
1426 SEL_UPL, /* segment descriptor priority level */
1427 1, /* segment descriptor present */
1428 0, 0,
1429 1, /* default 32 vs 16 bit size */
1430 1 /* limit granularity (byte/page units)*/ },
1431 /* Null Descriptor - overwritten by call gate */
1432 { 0x0, /* segment base address */
1433 0x0, /* length - all address space */
1434 0, /* segment type */
1435 0, /* segment descriptor priority level */
1436 0, /* segment descriptor present */
1437 0, 0,
1438 0, /* default 32 vs 16 bit size */
1439 0 /* limit granularity (byte/page units)*/ },
1440 /* Data Descriptor for user */
1441 { 0x0, /* segment base address */
1442 0xfffff, /* length - all address space */
1443 SDT_MEMRWA, /* segment type */
1444 SEL_UPL, /* segment descriptor priority level */
1445 1, /* segment descriptor present */
1446 0, 0,
1447 1, /* default 32 vs 16 bit size */
1448 1 /* limit granularity (byte/page units)*/ },
1449 };
1450
1451 void
1452 setidt(idx, func, typ, dpl, selec)
1453 int idx;
1454 inthand_t *func;
1455 int typ;
1456 int dpl;
1457 int selec;
1458 {
1459 struct gate_descriptor *ip;
1460
1461 ip = idt + idx;
1462 ip->gd_looffset = (int)func;
1463 ip->gd_selector = selec;
1464 ip->gd_stkcpy = 0;
1465 ip->gd_xx = 0;
1466 ip->gd_type = typ;
1467 ip->gd_dpl = dpl;
1468 ip->gd_p = 1;
1469 ip->gd_hioffset = ((int)func)>>16 ;
1470 }
1471
1472 #define IDTVEC(name) __CONCAT(X,name)
1473
1474 extern inthand_t
1475 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1476 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1477 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1478 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1479 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1480
1481 void
1482 sdtossd(sd, ssd)
1483 struct segment_descriptor *sd;
1484 struct soft_segment_descriptor *ssd;
1485 {
1486 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1487 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1488 ssd->ssd_type = sd->sd_type;
1489 ssd->ssd_dpl = sd->sd_dpl;
1490 ssd->ssd_p = sd->sd_p;
1491 ssd->ssd_def32 = sd->sd_def32;
1492 ssd->ssd_gran = sd->sd_gran;
1493 }
1494
1495 #define PHYSMAP_SIZE (2 * 8)
1496
1497 /*
1498 * Populate the (physmap) array with base/bound pairs describing the
1499 * available physical memory in the system, then test this memory and
1500 * build the phys_avail array describing the actually-available memory.
1501 *
1502 * If we cannot accurately determine the physical memory map, then use
1503 * value from the 0xE801 call, and failing that, the RTC.
1504 *
1505 * Total memory size may be set by the kernel environment variable
1506 * hw.physmem or the compile-time define MAXMEM.
1507 *
1508 * XXX first should be vm_paddr_t.
1509 */
1510 static void
1511 getmemsize(int first)
1512 {
1513 int i, physmap_idx, pa_indx;
1514 int hasbrokenint12;
1515 u_int basemem, extmem;
1516 struct vm86frame vmf;
1517 struct vm86context vmc;
1518 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1519 pt_entry_t *pte;
1520 char *cp;
1521 struct bios_smap *smap;
1522
1523 hasbrokenint12 = 0;
1524 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
1525 bzero(&vmf, sizeof(struct vm86frame));
1526 bzero(physmap, sizeof(physmap));
1527 basemem = 0;
1528
1529 /*
1530 * Some newer BIOSes has broken INT 12H implementation which cause
1531 * kernel panic immediately. In this case, we need to scan SMAP
1532 * with INT 15:E820 first, then determine base memory size.
1533 */
1534 if (hasbrokenint12) {
1535 goto int15e820;
1536 }
1537
1538 /*
1539 * Perform "base memory" related probes & setup
1540 */
1541 vm86_intcall(0x12, &vmf);
1542 basemem = vmf.vmf_ax;
1543 if (basemem > 640) {
1544 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1545 basemem);
1546 basemem = 640;
1547 }
1548
1549 /*
1550 * XXX if biosbasemem is now < 640, there is a `hole'
1551 * between the end of base memory and the start of
1552 * ISA memory. The hole may be empty or it may
1553 * contain BIOS code or data. Map it read/write so
1554 * that the BIOS can write to it. (Memory from 0 to
1555 * the physical end of the kernel is mapped read-only
1556 * to begin with and then parts of it are remapped.
1557 * The parts that aren't remapped form holes that
1558 * remain read-only and are unused by the kernel.
1559 * The base memory area is below the physical end of
1560 * the kernel and right now forms a read-only hole.
1561 * The part of it from PAGE_SIZE to
1562 * (trunc_page(biosbasemem * 1024) - 1) will be
1563 * remapped and used by the kernel later.)
1564 *
1565 * This code is similar to the code used in
1566 * pmap_mapdev, but since no memory needs to be
1567 * allocated we simply change the mapping.
1568 */
1569 for (pa = trunc_page(basemem * 1024);
1570 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1571 pmap_kenter(KERNBASE + pa, pa);
1572
1573 /*
1574 * if basemem != 640, map pages r/w into vm86 page table so
1575 * that the bios can scribble on it.
1576 */
1577 pte = (pt_entry_t *)vm86paddr;
1578 for (i = basemem / 4; i < 160; i++)
1579 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1580
1581 int15e820:
1582 /*
1583 * map page 1 R/W into the kernel page table so we can use it
1584 * as a buffer. The kernel will unmap this page later.
1585 */
1586 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
1587
1588 /*
1589 * get memory map with INT 15:E820
1590 */
1591 vmc.npages = 0;
1592 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
1593 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
1594
1595 physmap_idx = 0;
1596 vmf.vmf_ebx = 0;
1597 do {
1598 vmf.vmf_eax = 0xE820;
1599 vmf.vmf_edx = SMAP_SIG;
1600 vmf.vmf_ecx = sizeof(struct bios_smap);
1601 i = vm86_datacall(0x15, &vmf, &vmc);
1602 if (i || vmf.vmf_eax != SMAP_SIG)
1603 break;
1604 if (boothowto & RB_VERBOSE)
1605 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1606 smap->type, smap->base, smap->length);
1607
1608 if (smap->type != 0x01)
1609 goto next_run;
1610
1611 if (smap->length == 0)
1612 goto next_run;
1613
1614 #ifndef PAE
1615 if (smap->base >= 0xffffffff) {
1616 printf("%uK of memory above 4GB ignored\n",
1617 (u_int)(smap->length / 1024));
1618 goto next_run;
1619 }
1620 #endif
1621
1622 for (i = 0; i <= physmap_idx; i += 2) {
1623 if (smap->base < physmap[i + 1]) {
1624 if (boothowto & RB_VERBOSE)
1625 printf(
1626 "Overlapping or non-montonic memory region, ignoring second region\n");
1627 goto next_run;
1628 }
1629 }
1630
1631 if (smap->base == physmap[physmap_idx + 1]) {
1632 physmap[physmap_idx + 1] += smap->length;
1633 goto next_run;
1634 }
1635
1636 physmap_idx += 2;
1637 if (physmap_idx == PHYSMAP_SIZE) {
1638 printf(
1639 "Too many segments in the physical address map, giving up\n");
1640 break;
1641 }
1642 physmap[physmap_idx] = smap->base;
1643 physmap[physmap_idx + 1] = smap->base + smap->length;
1644 next_run: ;
1645 } while (vmf.vmf_ebx != 0);
1646
1647 /*
1648 * Perform "base memory" related probes & setup based on SMAP
1649 */
1650 if (basemem == 0) {
1651 for (i = 0; i <= physmap_idx; i += 2) {
1652 if (physmap[i] == 0x00000000) {
1653 basemem = physmap[i + 1] / 1024;
1654 break;
1655 }
1656 }
1657
1658 if (basemem == 0) {
1659 basemem = 640;
1660 }
1661
1662 if (basemem > 640) {
1663 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1664 basemem);
1665 basemem = 640;
1666 }
1667
1668 for (pa = trunc_page(basemem * 1024);
1669 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1670 pmap_kenter(KERNBASE + pa, pa);
1671
1672 pte = (pt_entry_t *)vm86paddr;
1673 for (i = basemem / 4; i < 160; i++)
1674 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1675 }
1676
1677 if (physmap[1] != 0)
1678 goto physmap_done;
1679
1680 /*
1681 * If we failed above, try memory map with INT 15:E801
1682 */
1683 vmf.vmf_ax = 0xE801;
1684 if (vm86_intcall(0x15, &vmf) == 0) {
1685 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1686 } else {
1687 #if 0
1688 vmf.vmf_ah = 0x88;
1689 vm86_intcall(0x15, &vmf);
1690 extmem = vmf.vmf_ax;
1691 #else
1692 /*
1693 * Prefer the RTC value for extended memory.
1694 */
1695 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1696 #endif
1697 }
1698
1699 /*
1700 * Special hack for chipsets that still remap the 384k hole when
1701 * there's 16MB of memory - this really confuses people that
1702 * are trying to use bus mastering ISA controllers with the
1703 * "16MB limit"; they only have 16MB, but the remapping puts
1704 * them beyond the limit.
1705 *
1706 * If extended memory is between 15-16MB (16-17MB phys address range),
1707 * chop it to 15MB.
1708 */
1709 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1710 extmem = 15 * 1024;
1711
1712 physmap[0] = 0;
1713 physmap[1] = basemem * 1024;
1714 physmap_idx = 2;
1715 physmap[physmap_idx] = 0x100000;
1716 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1717
1718 physmap_done:
1719 /*
1720 * Now, physmap contains a map of physical memory.
1721 */
1722
1723 #ifdef SMP
1724 /* make hole for AP bootstrap code */
1725 physmap[1] = mp_bootaddress(physmap[1] / 1024);
1726
1727 /* look for the MP hardware - needed for apic addresses */
1728 i386_mp_probe();
1729 #endif
1730
1731 /*
1732 * Maxmem isn't the "maximum memory", it's one larger than the
1733 * highest page of the physical address space. It should be
1734 * called something like "Maxphyspage". We may adjust this
1735 * based on ``hw.physmem'' and the results of the memory test.
1736 */
1737 Maxmem = atop(physmap[physmap_idx + 1]);
1738
1739 #ifdef MAXMEM
1740 Maxmem = MAXMEM / 4;
1741 #endif
1742
1743 /*
1744 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes
1745 * for the appropriate modifiers. This overrides MAXMEM.
1746 */
1747 if ((cp = getenv("hw.physmem")) != NULL) {
1748 u_int64_t AllowMem, sanity;
1749 char *ep;
1750
1751 sanity = AllowMem = strtouq(cp, &ep, 0);
1752 if ((ep != cp) && (*ep != 0)) {
1753 switch(*ep) {
1754 case 'g':
1755 case 'G':
1756 AllowMem <<= 10;
1757 case 'm':
1758 case 'M':
1759 AllowMem <<= 10;
1760 case 'k':
1761 case 'K':
1762 AllowMem <<= 10;
1763 break;
1764 default:
1765 AllowMem = sanity = 0;
1766 }
1767 if (AllowMem < sanity)
1768 AllowMem = 0;
1769 }
1770 if (AllowMem == 0)
1771 printf("Ignoring invalid memory size of '%s'\n", cp);
1772 else
1773 Maxmem = atop(AllowMem);
1774 freeenv(cp);
1775 }
1776
1777 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1778 (boothowto & RB_VERBOSE))
1779 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1780
1781 /*
1782 * If Maxmem has been increased beyond what the system has detected,
1783 * extend the last memory segment to the new limit.
1784 */
1785 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1786 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1787
1788 /* call pmap initialization to make new kernel address space */
1789 pmap_bootstrap(first, 0);
1790
1791 /*
1792 * Size up each available chunk of physical memory.
1793 */
1794 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1795 pa_indx = 0;
1796 phys_avail[pa_indx++] = physmap[0];
1797 phys_avail[pa_indx] = physmap[0];
1798 pte = CMAP1;
1799
1800 /*
1801 * physmap is in bytes, so when converting to page boundaries,
1802 * round up the start address and round down the end address.
1803 */
1804 for (i = 0; i <= physmap_idx; i += 2) {
1805 vm_paddr_t end;
1806
1807 end = ptoa((vm_paddr_t)Maxmem);
1808 if (physmap[i + 1] < end)
1809 end = trunc_page(physmap[i + 1]);
1810 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1811 int tmp, page_bad;
1812 int *ptr = (int *)CADDR1;
1813
1814 /*
1815 * block out kernel memory as not available.
1816 */
1817 if (pa >= 0x100000 && pa < first)
1818 continue;
1819
1820 page_bad = FALSE;
1821
1822 /*
1823 * map page into kernel: valid, read/write,non-cacheable
1824 */
1825 *pte = pa | PG_V | PG_RW | PG_N;
1826 invltlb();
1827
1828 tmp = *(int *)ptr;
1829 /*
1830 * Test for alternating 1's and 0's
1831 */
1832 *(volatile int *)ptr = 0xaaaaaaaa;
1833 if (*(volatile int *)ptr != 0xaaaaaaaa) {
1834 page_bad = TRUE;
1835 }
1836 /*
1837 * Test for alternating 0's and 1's
1838 */
1839 *(volatile int *)ptr = 0x55555555;
1840 if (*(volatile int *)ptr != 0x55555555) {
1841 page_bad = TRUE;
1842 }
1843 /*
1844 * Test for all 1's
1845 */
1846 *(volatile int *)ptr = 0xffffffff;
1847 if (*(volatile int *)ptr != 0xffffffff) {
1848 page_bad = TRUE;
1849 }
1850 /*
1851 * Test for all 0's
1852 */
1853 *(volatile int *)ptr = 0x0;
1854 if (*(volatile int *)ptr != 0x0) {
1855 page_bad = TRUE;
1856 }
1857 /*
1858 * Restore original value.
1859 */
1860 *(int *)ptr = tmp;
1861
1862 /*
1863 * Adjust array of valid/good pages.
1864 */
1865 if (page_bad == TRUE) {
1866 continue;
1867 }
1868 /*
1869 * If this good page is a continuation of the
1870 * previous set of good pages, then just increase
1871 * the end pointer. Otherwise start a new chunk.
1872 * Note that "end" points one higher than end,
1873 * making the range >= start and < end.
1874 * If we're also doing a speculative memory
1875 * test and we at or past the end, bump up Maxmem
1876 * so that we keep going. The first bad page
1877 * will terminate the loop.
1878 */
1879 if (phys_avail[pa_indx] == pa) {
1880 phys_avail[pa_indx] += PAGE_SIZE;
1881 } else {
1882 pa_indx++;
1883 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1884 printf(
1885 "Too many holes in the physical address space, giving up\n");
1886 pa_indx--;
1887 break;
1888 }
1889 phys_avail[pa_indx++] = pa; /* start */
1890 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1891 }
1892 physmem++;
1893 }
1894 }
1895 *pte = 0;
1896 invltlb();
1897
1898 /*
1899 * XXX
1900 * The last chunk must contain at least one page plus the message
1901 * buffer to avoid complicating other code (message buffer address
1902 * calculation, etc.).
1903 */
1904 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1905 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1906 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1907 phys_avail[pa_indx--] = 0;
1908 phys_avail[pa_indx--] = 0;
1909 }
1910
1911 Maxmem = atop(phys_avail[pa_indx]);
1912
1913 /* Trim off space for the message buffer. */
1914 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1915
1916 avail_end = phys_avail[pa_indx];
1917 }
1918
1919 void
1920 init386(first)
1921 int first;
1922 {
1923 struct gate_descriptor *gdp;
1924 int gsel_tss, metadata_missing, off, x;
1925 #ifndef SMP
1926 /* table descriptors - used to load tables by microp */
1927 struct region_descriptor r_gdt, r_idt;
1928 #endif
1929 struct pcpu *pc;
1930
1931 proc0.p_uarea = proc0uarea;
1932 thread0.td_kstack = proc0kstack;
1933 thread0.td_pcb = (struct pcb *)
1934 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
1935 atdevbase = ISA_HOLE_START + KERNBASE;
1936
1937 /*
1938 * This may be done better later if it gets more high level
1939 * components in it. If so just link td->td_proc here.
1940 */
1941 proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
1942
1943 metadata_missing = 0;
1944 if (bootinfo.bi_modulep) {
1945 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
1946 preload_bootstrap_relocate(KERNBASE);
1947 } else {
1948 metadata_missing = 1;
1949 }
1950 if (envmode == 1)
1951 kern_envp = static_env;
1952 else if (bootinfo.bi_envp)
1953 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
1954
1955 /* Init basic tunables, hz etc */
1956 init_param1();
1957
1958 /*
1959 * make gdt memory segments, the code segment goes up to end of the
1960 * page with etext in it, the data segment goes to the end of
1961 * the address space
1962 */
1963 /*
1964 * XXX text protection is temporarily (?) disabled. The limit was
1965 * i386_btop(round_page(etext)) - 1.
1966 */
1967 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
1968 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
1969 #ifdef SMP
1970 pc = &SMP_prvspace[0].pcpu;
1971 gdt_segs[GPRIV_SEL].ssd_limit =
1972 atop(sizeof(struct privatespace) - 1);
1973 #else
1974 pc = &__pcpu;
1975 gdt_segs[GPRIV_SEL].ssd_limit =
1976 atop(sizeof(struct pcpu) - 1);
1977 #endif
1978 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
1979 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
1980
1981 for (x = 0; x < NGDT; x++)
1982 ssdtosd(&gdt_segs[x], &gdt[x].sd);
1983
1984 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1985 r_gdt.rd_base = (int) gdt;
1986 lgdt(&r_gdt);
1987
1988 pcpu_init(pc, 0, sizeof(struct pcpu));
1989 PCPU_SET(prvspace, pc);
1990 PCPU_SET(curthread, &thread0);
1991
1992 /*
1993 * Initialize mutexes.
1994 *
1995 * icu_lock: in order to allow an interrupt to occur in a critical
1996 * section, to set pcpu->ipending (etc...) properly, we
1997 * must be able to get the icu lock, so it can't be
1998 * under witness.
1999 */
2000 mutex_init();
2001 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_RECURSE);
2002 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
2003
2004 /* make ldt memory segments */
2005 /*
2006 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it
2007 * should be spelled ...MAX_USER...
2008 */
2009 ldt_segs[LUCODE_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1);
2010 ldt_segs[LUDATA_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1);
2011 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2012 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2013
2014 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2015 lldt(_default_ldt);
2016 PCPU_SET(currentldt, _default_ldt);
2017
2018 /* exceptions */
2019 for (x = 0; x < NIDT; x++)
2020 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2021 GSEL(GCODE_SEL, SEL_KPL));
2022 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2023 GSEL(GCODE_SEL, SEL_KPL));
2024 setidt(1, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2025 GSEL(GCODE_SEL, SEL_KPL));
2026 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL,
2027 GSEL(GCODE_SEL, SEL_KPL));
2028 setidt(3, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2029 GSEL(GCODE_SEL, SEL_KPL));
2030 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2031 GSEL(GCODE_SEL, SEL_KPL));
2032 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2033 GSEL(GCODE_SEL, SEL_KPL));
2034 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2035 GSEL(GCODE_SEL, SEL_KPL));
2036 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2037 , GSEL(GCODE_SEL, SEL_KPL));
2038 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2039 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2040 GSEL(GCODE_SEL, SEL_KPL));
2041 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2042 GSEL(GCODE_SEL, SEL_KPL));
2043 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2044 GSEL(GCODE_SEL, SEL_KPL));
2045 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2046 GSEL(GCODE_SEL, SEL_KPL));
2047 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2048 GSEL(GCODE_SEL, SEL_KPL));
2049 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2050 GSEL(GCODE_SEL, SEL_KPL));
2051 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2052 GSEL(GCODE_SEL, SEL_KPL));
2053 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2054 GSEL(GCODE_SEL, SEL_KPL));
2055 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2056 GSEL(GCODE_SEL, SEL_KPL));
2057 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2058 GSEL(GCODE_SEL, SEL_KPL));
2059 setidt(19, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2060 GSEL(GCODE_SEL, SEL_KPL));
2061 setidt(0x80, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2062 GSEL(GCODE_SEL, SEL_KPL));
2063
2064 r_idt.rd_limit = sizeof(idt0) - 1;
2065 r_idt.rd_base = (int) idt;
2066 lidt(&r_idt);
2067
2068 /*
2069 * Initialize the console before we print anything out.
2070 */
2071 cninit();
2072
2073 if (metadata_missing)
2074 printf("WARNING: loader(8) metadata is missing!\n");
2075
2076 #ifdef DEV_ISA
2077 isa_defaultirq();
2078 #endif
2079
2080 #ifdef DDB
2081 kdb_init();
2082 if (boothowto & RB_KDB)
2083 Debugger("Boot flags requested debugger");
2084 #endif
2085
2086 finishidentcpu(); /* Final stage of CPU initialization */
2087 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2088 GSEL(GCODE_SEL, SEL_KPL));
2089 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2090 GSEL(GCODE_SEL, SEL_KPL));
2091 initializecpu(); /* Initialize CPU registers */
2092
2093 /* make an initial tss so cpu can get interrupt stack on syscall! */
2094 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2095 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2096 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2097 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2098 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2099 private_tss = 0;
2100 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2101 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2102 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2103 ltr(gsel_tss);
2104
2105 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2106 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2107 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2108 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2109 #ifdef PAE
2110 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2111 #else
2112 dblfault_tss.tss_cr3 = (int)IdlePTD;
2113 #endif
2114 dblfault_tss.tss_eip = (int)dblfault_handler;
2115 dblfault_tss.tss_eflags = PSL_KERNEL;
2116 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2117 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2118 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2119 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2120 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2121
2122 vm86_initialize();
2123 getmemsize(first);
2124 init_param2(physmem);
2125
2126 /* now running on new page tables, configured,and u/iom is accessible */
2127
2128 /* Map the message buffer. */
2129 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2130 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
2131
2132 msgbufinit(msgbufp, MSGBUF_SIZE);
2133
2134 /* make a call gate to reenter kernel with */
2135 gdp = &ldt[LSYS5CALLS_SEL].gd;
2136
2137 x = (int) &IDTVEC(lcall_syscall);
2138 gdp->gd_looffset = x;
2139 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2140 gdp->gd_stkcpy = 1;
2141 gdp->gd_type = SDT_SYS386CGT;
2142 gdp->gd_dpl = SEL_UPL;
2143 gdp->gd_p = 1;
2144 gdp->gd_hioffset = x >> 16;
2145
2146 /* XXX does this work? */
2147 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2148 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2149
2150 /* transfer to user mode */
2151
2152 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
2153 _udatasel = LSEL(LUDATA_SEL, SEL_UPL);
2154
2155 /* setup proc 0's pcb */
2156 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
2157 #ifdef PAE
2158 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2159 #else
2160 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2161 #endif
2162 thread0.td_pcb->pcb_ext = 0;
2163 thread0.td_frame = &proc0_tf;
2164 }
2165
2166 void
2167 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2168 {
2169 }
2170
2171 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2172 static void f00f_hack(void *unused);
2173 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2174
2175 static void
2176 f00f_hack(void *unused) {
2177 struct gate_descriptor *new_idt;
2178 #ifndef SMP
2179 struct region_descriptor r_idt;
2180 #endif
2181 vm_offset_t tmp;
2182
2183 if (!has_f00f_bug)
2184 return;
2185
2186 GIANT_REQUIRED;
2187
2188 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2189
2190 r_idt.rd_limit = sizeof(idt0) - 1;
2191
2192 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2193 if (tmp == 0)
2194 panic("kmem_alloc returned 0");
2195 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0)
2196 panic("kmem_alloc returned non-page-aligned memory");
2197 /* Put the first seven entries in the lower page */
2198 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8));
2199 bcopy(idt, new_idt, sizeof(idt0));
2200 r_idt.rd_base = (int)new_idt;
2201 lidt(&r_idt);
2202 idt = new_idt;
2203 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2204 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2205 panic("vm_map_protect failed");
2206 return;
2207 }
2208 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2209
2210 int
2211 ptrace_set_pc(struct thread *td, unsigned long addr)
2212 {
2213 td->td_frame->tf_eip = addr;
2214 return (0);
2215 }
2216
2217 int
2218 ptrace_single_step(struct thread *td)
2219 {
2220 td->td_frame->tf_eflags |= PSL_T;
2221 return (0);
2222 }
2223
2224 int
2225 fill_regs(struct thread *td, struct reg *regs)
2226 {
2227 struct pcb *pcb;
2228 struct trapframe *tp;
2229
2230 tp = td->td_frame;
2231 regs->r_fs = tp->tf_fs;
2232 regs->r_es = tp->tf_es;
2233 regs->r_ds = tp->tf_ds;
2234 regs->r_edi = tp->tf_edi;
2235 regs->r_esi = tp->tf_esi;
2236 regs->r_ebp = tp->tf_ebp;
2237 regs->r_ebx = tp->tf_ebx;
2238 regs->r_edx = tp->tf_edx;
2239 regs->r_ecx = tp->tf_ecx;
2240 regs->r_eax = tp->tf_eax;
2241 regs->r_eip = tp->tf_eip;
2242 regs->r_cs = tp->tf_cs;
2243 regs->r_eflags = tp->tf_eflags;
2244 regs->r_esp = tp->tf_esp;
2245 regs->r_ss = tp->tf_ss;
2246 pcb = td->td_pcb;
2247 regs->r_gs = pcb->pcb_gs;
2248 return (0);
2249 }
2250
2251 int
2252 set_regs(struct thread *td, struct reg *regs)
2253 {
2254 struct pcb *pcb;
2255 struct trapframe *tp;
2256
2257 tp = td->td_frame;
2258 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2259 !CS_SECURE(regs->r_cs))
2260 return (EINVAL);
2261 tp->tf_fs = regs->r_fs;
2262 tp->tf_es = regs->r_es;
2263 tp->tf_ds = regs->r_ds;
2264 tp->tf_edi = regs->r_edi;
2265 tp->tf_esi = regs->r_esi;
2266 tp->tf_ebp = regs->r_ebp;
2267 tp->tf_ebx = regs->r_ebx;
2268 tp->tf_edx = regs->r_edx;
2269 tp->tf_ecx = regs->r_ecx;
2270 tp->tf_eax = regs->r_eax;
2271 tp->tf_eip = regs->r_eip;
2272 tp->tf_cs = regs->r_cs;
2273 tp->tf_eflags = regs->r_eflags;
2274 tp->tf_esp = regs->r_esp;
2275 tp->tf_ss = regs->r_ss;
2276 pcb = td->td_pcb;
2277 pcb->pcb_gs = regs->r_gs;
2278 return (0);
2279 }
2280
2281 #ifdef CPU_ENABLE_SSE
2282 static void
2283 fill_fpregs_xmm(sv_xmm, sv_87)
2284 struct savexmm *sv_xmm;
2285 struct save87 *sv_87;
2286 {
2287 register struct env87 *penv_87 = &sv_87->sv_env;
2288 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2289 int i;
2290
2291 bzero(sv_87, sizeof(*sv_87));
2292
2293 /* FPU control/status */
2294 penv_87->en_cw = penv_xmm->en_cw;
2295 penv_87->en_sw = penv_xmm->en_sw;
2296 penv_87->en_tw = penv_xmm->en_tw;
2297 penv_87->en_fip = penv_xmm->en_fip;
2298 penv_87->en_fcs = penv_xmm->en_fcs;
2299 penv_87->en_opcode = penv_xmm->en_opcode;
2300 penv_87->en_foo = penv_xmm->en_foo;
2301 penv_87->en_fos = penv_xmm->en_fos;
2302
2303 /* FPU registers */
2304 for (i = 0; i < 8; ++i)
2305 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2306 }
2307
2308 static void
2309 set_fpregs_xmm(sv_87, sv_xmm)
2310 struct save87 *sv_87;
2311 struct savexmm *sv_xmm;
2312 {
2313 register struct env87 *penv_87 = &sv_87->sv_env;
2314 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2315 int i;
2316
2317 /* FPU control/status */
2318 penv_xmm->en_cw = penv_87->en_cw;
2319 penv_xmm->en_sw = penv_87->en_sw;
2320 penv_xmm->en_tw = penv_87->en_tw;
2321 penv_xmm->en_fip = penv_87->en_fip;
2322 penv_xmm->en_fcs = penv_87->en_fcs;
2323 penv_xmm->en_opcode = penv_87->en_opcode;
2324 penv_xmm->en_foo = penv_87->en_foo;
2325 penv_xmm->en_fos = penv_87->en_fos;
2326
2327 /* FPU registers */
2328 for (i = 0; i < 8; ++i)
2329 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2330 }
2331 #endif /* CPU_ENABLE_SSE */
2332
2333 int
2334 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2335 {
2336 #ifdef CPU_ENABLE_SSE
2337 if (cpu_fxsr) {
2338 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2339 (struct save87 *)fpregs);
2340 return (0);
2341 }
2342 #endif /* CPU_ENABLE_SSE */
2343 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2344 return (0);
2345 }
2346
2347 int
2348 set_fpregs(struct thread *td, struct fpreg *fpregs)
2349 {
2350 #ifdef CPU_ENABLE_SSE
2351 if (cpu_fxsr) {
2352 set_fpregs_xmm((struct save87 *)fpregs,
2353 &td->td_pcb->pcb_save.sv_xmm);
2354 return (0);
2355 }
2356 #endif /* CPU_ENABLE_SSE */
2357 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2358 return (0);
2359 }
2360
2361 /*
2362 * Get machine context.
2363 */
2364 int
2365 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
2366 {
2367 struct trapframe *tp;
2368
2369 tp = td->td_frame;
2370
2371 PROC_LOCK(curthread->td_proc);
2372 mcp->mc_onstack = sigonstack(tp->tf_esp);
2373 PROC_UNLOCK(curthread->td_proc);
2374 mcp->mc_gs = td->td_pcb->pcb_gs;
2375 mcp->mc_fs = tp->tf_fs;
2376 mcp->mc_es = tp->tf_es;
2377 mcp->mc_ds = tp->tf_ds;
2378 mcp->mc_edi = tp->tf_edi;
2379 mcp->mc_esi = tp->tf_esi;
2380 mcp->mc_ebp = tp->tf_ebp;
2381 mcp->mc_isp = tp->tf_isp;
2382 if (clear_ret != 0) {
2383 mcp->mc_eax = 0;
2384 mcp->mc_edx = 0;
2385 } else {
2386 mcp->mc_eax = tp->tf_eax;
2387 mcp->mc_edx = tp->tf_edx;
2388 }
2389 mcp->mc_ebx = tp->tf_ebx;
2390 mcp->mc_ecx = tp->tf_ecx;
2391 mcp->mc_eip = tp->tf_eip;
2392 mcp->mc_cs = tp->tf_cs;
2393 mcp->mc_eflags = tp->tf_eflags;
2394 mcp->mc_esp = tp->tf_esp;
2395 mcp->mc_ss = tp->tf_ss;
2396 mcp->mc_len = sizeof(*mcp);
2397 get_fpcontext(td, mcp);
2398 return (0);
2399 }
2400
2401 /*
2402 * Set machine context.
2403 *
2404 * However, we don't set any but the user modifiable flags, and we won't
2405 * touch the cs selector.
2406 */
2407 int
2408 set_mcontext(struct thread *td, const mcontext_t *mcp)
2409 {
2410 struct trapframe *tp;
2411 int eflags, ret;
2412
2413 tp = td->td_frame;
2414 if (mcp->mc_len != sizeof(*mcp))
2415 return (EINVAL);
2416 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2417 (tp->tf_eflags & ~PSL_USERCHANGE);
2418 if ((ret = set_fpcontext(td, mcp)) == 0) {
2419 tp->tf_fs = mcp->mc_fs;
2420 tp->tf_es = mcp->mc_es;
2421 tp->tf_ds = mcp->mc_ds;
2422 tp->tf_edi = mcp->mc_edi;
2423 tp->tf_esi = mcp->mc_esi;
2424 tp->tf_ebp = mcp->mc_ebp;
2425 tp->tf_ebx = mcp->mc_ebx;
2426 tp->tf_edx = mcp->mc_edx;
2427 tp->tf_ecx = mcp->mc_ecx;
2428 tp->tf_eax = mcp->mc_eax;
2429 tp->tf_eip = mcp->mc_eip;
2430 tp->tf_eflags = eflags;
2431 tp->tf_esp = mcp->mc_esp;
2432 tp->tf_ss = mcp->mc_ss;
2433 td->td_pcb->pcb_gs = mcp->mc_gs;
2434 ret = 0;
2435 }
2436 return (ret);
2437 }
2438
2439 static void
2440 get_fpcontext(struct thread *td, mcontext_t *mcp)
2441 {
2442 #ifndef DEV_NPX
2443 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2444 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2445 #else
2446 union savefpu *addr;
2447
2448 /*
2449 * XXX mc_fpstate might be misaligned, since its declaration is not
2450 * unportabilized using __attribute__((aligned(16))) like the
2451 * declaration of struct savemm, and anyway, alignment doesn't work
2452 * for auto variables since we don't use gcc's pessimal stack
2453 * alignment. Work around this by abusing the spare fields after
2454 * mcp->mc_fpstate.
2455 *
2456 * XXX unpessimize most cases by only aligning when fxsave might be
2457 * called, although this requires knowing too much about
2458 * npxgetregs()'s internals.
2459 */
2460 addr = (union savefpu *)&mcp->mc_fpstate;
2461 if (td == PCPU_GET(fpcurthread) &&
2462 #ifdef CPU_ENABLE_SSE
2463 cpu_fxsr &&
2464 #endif
2465 ((uintptr_t)(void *)addr & 0xF)) {
2466 do
2467 addr = (void *)((char *)addr + 4);
2468 while ((uintptr_t)(void *)addr & 0xF);
2469 }
2470 mcp->mc_ownedfp = npxgetregs(td, addr);
2471 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2472 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2473 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2474 }
2475 mcp->mc_fpformat = npxformat();
2476 #endif
2477 }
2478
2479 static int
2480 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2481 {
2482 union savefpu *addr;
2483
2484 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2485 return (0);
2486 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2487 mcp->mc_fpformat != _MC_FPFMT_XMM)
2488 return (EINVAL);
2489 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2490 /* We don't care what state is left in the FPU or PCB. */
2491 fpstate_drop(td);
2492 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2493 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2494 /* XXX align as above. */
2495 addr = (union savefpu *)&mcp->mc_fpstate;
2496 if (td == PCPU_GET(fpcurthread) &&
2497 #ifdef CPU_ENABLE_SSE
2498 cpu_fxsr &&
2499 #endif
2500 ((uintptr_t)(void *)addr & 0xF)) {
2501 do
2502 addr = (void *)((char *)addr + 4);
2503 while ((uintptr_t)(void *)addr & 0xF);
2504 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2505 }
2506 #ifdef DEV_NPX
2507 /*
2508 * XXX we violate the dubious requirement that npxsetregs()
2509 * be called with interrupts disabled.
2510 */
2511 npxsetregs(td, addr);
2512 #endif
2513 /*
2514 * Don't bother putting things back where they were in the
2515 * misaligned case, since we know that the caller won't use
2516 * them again.
2517 */
2518 } else
2519 return (EINVAL);
2520 return (0);
2521 }
2522
2523 static void
2524 fpstate_drop(struct thread *td)
2525 {
2526 register_t s;
2527
2528 s = intr_disable();
2529 #ifdef DEV_NPX
2530 if (PCPU_GET(fpcurthread) == td)
2531 npxdrop();
2532 #endif
2533 /*
2534 * XXX force a full drop of the npx. The above only drops it if we
2535 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2536 *
2537 * XXX I don't much like npxgetregs()'s semantics of doing a full
2538 * drop. Dropping only to the pcb matches fnsave's behaviour.
2539 * We only need to drop to !PCB_INITDONE in sendsig(). But
2540 * sendsig() is the only caller of npxgetregs()... perhaps we just
2541 * have too many layers.
2542 */
2543 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2544 intr_restore(s);
2545 }
2546
2547 int
2548 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2549 {
2550 struct pcb *pcb;
2551
2552 if (td == NULL) {
2553 dbregs->dr[0] = rdr0();
2554 dbregs->dr[1] = rdr1();
2555 dbregs->dr[2] = rdr2();
2556 dbregs->dr[3] = rdr3();
2557 dbregs->dr[4] = rdr4();
2558 dbregs->dr[5] = rdr5();
2559 dbregs->dr[6] = rdr6();
2560 dbregs->dr[7] = rdr7();
2561 } else {
2562 pcb = td->td_pcb;
2563 dbregs->dr[0] = pcb->pcb_dr0;
2564 dbregs->dr[1] = pcb->pcb_dr1;
2565 dbregs->dr[2] = pcb->pcb_dr2;
2566 dbregs->dr[3] = pcb->pcb_dr3;
2567 dbregs->dr[4] = 0;
2568 dbregs->dr[5] = 0;
2569 dbregs->dr[6] = pcb->pcb_dr6;
2570 dbregs->dr[7] = pcb->pcb_dr7;
2571 }
2572 return (0);
2573 }
2574
2575 int
2576 set_dbregs(struct thread *td, struct dbreg *dbregs)
2577 {
2578 struct pcb *pcb;
2579 int i;
2580 u_int32_t mask1, mask2;
2581
2582 if (td == NULL) {
2583 load_dr0(dbregs->dr[0]);
2584 load_dr1(dbregs->dr[1]);
2585 load_dr2(dbregs->dr[2]);
2586 load_dr3(dbregs->dr[3]);
2587 load_dr4(dbregs->dr[4]);
2588 load_dr5(dbregs->dr[5]);
2589 load_dr6(dbregs->dr[6]);
2590 load_dr7(dbregs->dr[7]);
2591 } else {
2592 /*
2593 * Don't let an illegal value for dr7 get set. Specifically,
2594 * check for undefined settings. Setting these bit patterns
2595 * result in undefined behaviour and can lead to an unexpected
2596 * TRCTRAP.
2597 */
2598 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8;
2599 i++, mask1 <<= 2, mask2 <<= 2)
2600 if ((dbregs->dr[7] & mask1) == mask2)
2601 return (EINVAL);
2602
2603 pcb = td->td_pcb;
2604
2605 /*
2606 * Don't let a process set a breakpoint that is not within the
2607 * process's address space. If a process could do this, it
2608 * could halt the system by setting a breakpoint in the kernel
2609 * (if ddb was enabled). Thus, we need to check to make sure
2610 * that no breakpoints are being enabled for addresses outside
2611 * process's address space, unless, perhaps, we were called by
2612 * uid 0.
2613 *
2614 * XXX - what about when the watched area of the user's
2615 * address space is written into from within the kernel
2616 * ... wouldn't that still cause a breakpoint to be generated
2617 * from within kernel mode?
2618 */
2619
2620 if (suser(td) != 0) {
2621 if (dbregs->dr[7] & 0x3) {
2622 /* dr0 is enabled */
2623 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2624 return (EINVAL);
2625 }
2626
2627 if (dbregs->dr[7] & (0x3<<2)) {
2628 /* dr1 is enabled */
2629 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2630 return (EINVAL);
2631 }
2632
2633 if (dbregs->dr[7] & (0x3<<4)) {
2634 /* dr2 is enabled */
2635 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2636 return (EINVAL);
2637 }
2638
2639 if (dbregs->dr[7] & (0x3<<6)) {
2640 /* dr3 is enabled */
2641 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2642 return (EINVAL);
2643 }
2644 }
2645
2646 pcb->pcb_dr0 = dbregs->dr[0];
2647 pcb->pcb_dr1 = dbregs->dr[1];
2648 pcb->pcb_dr2 = dbregs->dr[2];
2649 pcb->pcb_dr3 = dbregs->dr[3];
2650 pcb->pcb_dr6 = dbregs->dr[6];
2651 pcb->pcb_dr7 = dbregs->dr[7];
2652
2653 pcb->pcb_flags |= PCB_DBREGS;
2654 }
2655
2656 return (0);
2657 }
2658
2659 /*
2660 * Return > 0 if a hardware breakpoint has been hit, and the
2661 * breakpoint was in user space. Return 0, otherwise.
2662 */
2663 int
2664 user_dbreg_trap(void)
2665 {
2666 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2667 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2668 int nbp; /* number of breakpoints that triggered */
2669 caddr_t addr[4]; /* breakpoint addresses */
2670 int i;
2671
2672 dr7 = rdr7();
2673 if ((dr7 & 0x000000ff) == 0) {
2674 /*
2675 * all GE and LE bits in the dr7 register are zero,
2676 * thus the trap couldn't have been caused by the
2677 * hardware debug registers
2678 */
2679 return 0;
2680 }
2681
2682 nbp = 0;
2683 dr6 = rdr6();
2684 bp = dr6 & 0x0000000f;
2685
2686 if (!bp) {
2687 /*
2688 * None of the breakpoint bits are set meaning this
2689 * trap was not caused by any of the debug registers
2690 */
2691 return 0;
2692 }
2693
2694 /*
2695 * at least one of the breakpoints were hit, check to see
2696 * which ones and if any of them are user space addresses
2697 */
2698
2699 if (bp & 0x01) {
2700 addr[nbp++] = (caddr_t)rdr0();
2701 }
2702 if (bp & 0x02) {
2703 addr[nbp++] = (caddr_t)rdr1();
2704 }
2705 if (bp & 0x04) {
2706 addr[nbp++] = (caddr_t)rdr2();
2707 }
2708 if (bp & 0x08) {
2709 addr[nbp++] = (caddr_t)rdr3();
2710 }
2711
2712 for (i=0; i<nbp; i++) {
2713 if (addr[i] <
2714 (caddr_t)VM_MAXUSER_ADDRESS) {
2715 /*
2716 * addr[i] is in user space
2717 */
2718 return nbp;
2719 }
2720 }
2721
2722 /*
2723 * None of the breakpoints are in user space.
2724 */
2725 return 0;
2726 }
2727
2728
2729 #ifndef DDB
2730 void
2731 Debugger(const char *msg)
2732 {
2733 printf("Debugger(\"%s\") called.\n", msg);
2734 }
2735 #endif /* no DDB */
2736
2737 #ifdef DDB
2738
2739 /*
2740 * Provide inb() and outb() as functions. They are normally only
2741 * available as macros calling inlined functions, thus cannot be
2742 * called inside DDB.
2743 *
2744 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
2745 */
2746
2747 #undef inb
2748 #undef outb
2749
2750 /* silence compiler warnings */
2751 u_char inb(u_int);
2752 void outb(u_int, u_char);
2753
2754 u_char
2755 inb(u_int port)
2756 {
2757 u_char data;
2758 /*
2759 * We use %%dx and not %1 here because i/o is done at %dx and not at
2760 * %edx, while gcc generates inferior code (movw instead of movl)
2761 * if we tell it to load (u_short) port.
2762 */
2763 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
2764 return (data);
2765 }
2766
2767 void
2768 outb(u_int port, u_char data)
2769 {
2770 u_char al;
2771 /*
2772 * Use an unnecessary assignment to help gcc's register allocator.
2773 * This make a large difference for gcc-1.40 and a tiny difference
2774 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
2775 * best results. gcc-2.6.0 can't handle this.
2776 */
2777 al = data;
2778 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
2779 }
2780
2781 #endif /* DDB */
Cache object: f2469fd551200a3af35b7fd0f726f3ad
|