1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/6.1/sys/i386/i386/machdep.c 158164 2006-04-30 05:17:59Z kensmith $");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_compat.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_ipx.h"
50 #include "opt_isa.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_msgbuf.h"
54 #include "opt_npx.h"
55 #include "opt_perfmon.h"
56
57 #include <sys/param.h>
58 #include <sys/proc.h>
59 #include <sys/systm.h>
60 #include <sys/bio.h>
61 #include <sys/buf.h>
62 #include <sys/bus.h>
63 #include <sys/callout.h>
64 #include <sys/cons.h>
65 #include <sys/cpu.h>
66 #include <sys/eventhandler.h>
67 #include <sys/exec.h>
68 #include <sys/imgact.h>
69 #include <sys/kdb.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/linker.h>
73 #include <sys/lock.h>
74 #include <sys/malloc.h>
75 #include <sys/memrange.h>
76 #include <sys/msgbuf.h>
77 #include <sys/mutex.h>
78 #include <sys/pcpu.h>
79 #include <sys/ptrace.h>
80 #include <sys/reboot.h>
81 #include <sys/sched.h>
82 #include <sys/signalvar.h>
83 #include <sys/sysctl.h>
84 #include <sys/sysent.h>
85 #include <sys/sysproto.h>
86 #include <sys/ucontext.h>
87 #include <sys/vmmeter.h>
88
89 #include <vm/vm.h>
90 #include <vm/vm_extern.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_pager.h>
96 #include <vm/vm_param.h>
97
98 #ifdef DDB
99 #ifndef KDB
100 #error KDB must be enabled in order for DDB to work!
101 #endif
102 #include <ddb/ddb.h>
103 #include <ddb/db_sym.h>
104 #endif
105
106 #include <isa/rtc.h>
107
108 #include <net/netisr.h>
109
110 #include <machine/bootinfo.h>
111 #include <machine/clock.h>
112 #include <machine/cpu.h>
113 #include <machine/cputypes.h>
114 #include <machine/intr_machdep.h>
115 #include <machine/md_var.h>
116 #include <machine/pc/bios.h>
117 #include <machine/pcb.h>
118 #include <machine/pcb_ext.h>
119 #include <machine/proc.h>
120 #include <machine/reg.h>
121 #include <machine/sigframe.h>
122 #include <machine/specialreg.h>
123 #include <machine/vm86.h>
124 #ifdef PERFMON
125 #include <machine/perfmon.h>
126 #endif
127 #ifdef SMP
128 #include <machine/privatespace.h>
129 #include <machine/smp.h>
130 #endif
131
132 #ifdef DEV_ISA
133 #include <i386/isa/icu.h>
134 #endif
135
136 /* Sanity check for __curthread() */
137 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
138
139 extern void init386(int first);
140 extern void dblfault_handler(void);
141
142 extern void printcpuinfo(void); /* XXX header file */
143 extern void finishidentcpu(void);
144 extern void panicifcpuunsupported(void);
145 extern void initializecpu(void);
146
147 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
148 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
149
150 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
151 #define CPU_ENABLE_SSE
152 #endif
153
154 static void cpu_startup(void *);
155 static void fpstate_drop(struct thread *td);
156 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
157 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
158 #ifdef CPU_ENABLE_SSE
159 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
160 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
161 #endif /* CPU_ENABLE_SSE */
162 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
163
164 #ifdef DDB
165 extern vm_offset_t ksym_start, ksym_end;
166 #endif
167
168 int _udatasel, _ucodesel;
169 u_int basemem;
170
171 int cold = 1;
172
173 #ifdef COMPAT_43
174 static void osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code);
175 #endif
176 #ifdef COMPAT_FREEBSD4
177 static void freebsd4_sendsig(sig_t catcher, int sig, sigset_t *mask,
178 u_long code);
179 #endif
180
181 long Maxmem = 0;
182 long realmem = 0;
183
184 vm_paddr_t phys_avail[10];
185 vm_paddr_t dump_avail[10];
186
187 /* must be 2 less so 0 0 can signal end of chunks */
188 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
189 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
190
191 struct kva_md_info kmi;
192
193 static struct trapframe proc0_tf;
194 #ifndef SMP
195 static struct pcpu __pcpu;
196 #endif
197
198 struct mtx icu_lock;
199
200 struct mem_range_softc mem_range_softc;
201
202 static void
203 cpu_startup(dummy)
204 void *dummy;
205 {
206 /*
207 * Good {morning,afternoon,evening,night}.
208 */
209 startrtclock();
210 printcpuinfo();
211 panicifcpuunsupported();
212 #ifdef PERFMON
213 perfmon_init();
214 #endif
215 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
216 ptoa((uintmax_t)Maxmem) / 1048576);
217 realmem = Maxmem;
218 /*
219 * Display any holes after the first chunk of extended memory.
220 */
221 if (bootverbose) {
222 int indx;
223
224 printf("Physical memory chunk(s):\n");
225 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
226 vm_paddr_t size;
227
228 size = phys_avail[indx + 1] - phys_avail[indx];
229 printf(
230 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
231 (uintmax_t)phys_avail[indx],
232 (uintmax_t)phys_avail[indx + 1] - 1,
233 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
234 }
235 }
236
237 vm_ksubmap_init(&kmi);
238
239 printf("avail memory = %ju (%ju MB)\n",
240 ptoa((uintmax_t)cnt.v_free_count),
241 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
242
243 /*
244 * Set up buffers, so they can be used to read disk labels.
245 */
246 bufinit();
247 vm_pager_bufferinit();
248
249 cpu_setregs();
250 }
251
252 /*
253 * Send an interrupt to process.
254 *
255 * Stack is set up to allow sigcode stored
256 * at top to call routine, followed by kcall
257 * to sigreturn routine below. After sigreturn
258 * resets the signal mask, the stack, and the
259 * frame pointer, it returns to the user
260 * specified pc, psl.
261 */
262 #ifdef COMPAT_43
263 static void
264 osendsig(catcher, sig, mask, code)
265 sig_t catcher;
266 int sig;
267 sigset_t *mask;
268 u_long code;
269 {
270 struct osigframe sf, *fp;
271 struct proc *p;
272 struct thread *td;
273 struct sigacts *psp;
274 struct trapframe *regs;
275 int oonstack;
276
277 td = curthread;
278 p = td->td_proc;
279 PROC_LOCK_ASSERT(p, MA_OWNED);
280 psp = p->p_sigacts;
281 mtx_assert(&psp->ps_mtx, MA_OWNED);
282 regs = td->td_frame;
283 oonstack = sigonstack(regs->tf_esp);
284
285 /* Allocate space for the signal handler context. */
286 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
287 SIGISMEMBER(psp->ps_sigonstack, sig)) {
288 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
289 td->td_sigstk.ss_size - sizeof(struct osigframe));
290 #if defined(COMPAT_43)
291 td->td_sigstk.ss_flags |= SS_ONSTACK;
292 #endif
293 } else
294 fp = (struct osigframe *)regs->tf_esp - 1;
295
296 /* Translate the signal if appropriate. */
297 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
298 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
299
300 /* Build the argument list for the signal handler. */
301 sf.sf_signum = sig;
302 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
303 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
304 /* Signal handler installed with SA_SIGINFO. */
305 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
306 sf.sf_siginfo.si_signo = sig;
307 sf.sf_siginfo.si_code = code;
308 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
309 } else {
310 /* Old FreeBSD-style arguments. */
311 sf.sf_arg2 = code;
312 sf.sf_addr = regs->tf_err;
313 sf.sf_ahu.sf_handler = catcher;
314 }
315 mtx_unlock(&psp->ps_mtx);
316 PROC_UNLOCK(p);
317
318 /* Save most if not all of trap frame. */
319 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
320 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
321 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
322 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
323 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
324 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
325 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
326 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
327 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
328 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
329 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
330 sf.sf_siginfo.si_sc.sc_gs = rgs();
331 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
332
333 /* Build the signal context to be used by osigreturn(). */
334 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
335 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
336 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
337 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
338 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
339 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
340 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
341 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
342
343 /*
344 * If we're a vm86 process, we want to save the segment registers.
345 * We also change eflags to be our emulated eflags, not the actual
346 * eflags.
347 */
348 if (regs->tf_eflags & PSL_VM) {
349 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
350 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
351 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
352
353 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
354 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
355 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
356 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
357
358 if (vm86->vm86_has_vme == 0)
359 sf.sf_siginfo.si_sc.sc_ps =
360 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
361 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
362
363 /* See sendsig() for comments. */
364 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
365 }
366
367 /*
368 * Copy the sigframe out to the user's stack.
369 */
370 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
371 #ifdef DEBUG
372 printf("process %ld has trashed its stack\n", (long)p->p_pid);
373 #endif
374 PROC_LOCK(p);
375 sigexit(td, SIGILL);
376 }
377
378 regs->tf_esp = (int)fp;
379 regs->tf_eip = PS_STRINGS - szosigcode;
380 regs->tf_eflags &= ~PSL_T;
381 regs->tf_cs = _ucodesel;
382 regs->tf_ds = _udatasel;
383 regs->tf_es = _udatasel;
384 regs->tf_fs = _udatasel;
385 load_gs(_udatasel);
386 regs->tf_ss = _udatasel;
387 PROC_LOCK(p);
388 mtx_lock(&psp->ps_mtx);
389 }
390 #endif /* COMPAT_43 */
391
392 #ifdef COMPAT_FREEBSD4
393 static void
394 freebsd4_sendsig(catcher, sig, mask, code)
395 sig_t catcher;
396 int sig;
397 sigset_t *mask;
398 u_long code;
399 {
400 struct sigframe4 sf, *sfp;
401 struct proc *p;
402 struct thread *td;
403 struct sigacts *psp;
404 struct trapframe *regs;
405 int oonstack;
406
407 td = curthread;
408 p = td->td_proc;
409 PROC_LOCK_ASSERT(p, MA_OWNED);
410 psp = p->p_sigacts;
411 mtx_assert(&psp->ps_mtx, MA_OWNED);
412 regs = td->td_frame;
413 oonstack = sigonstack(regs->tf_esp);
414
415 /* Save user context. */
416 bzero(&sf, sizeof(sf));
417 sf.sf_uc.uc_sigmask = *mask;
418 sf.sf_uc.uc_stack = td->td_sigstk;
419 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
420 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
421 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
422 sf.sf_uc.uc_mcontext.mc_gs = rgs();
423 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
424
425 /* Allocate space for the signal handler context. */
426 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
427 SIGISMEMBER(psp->ps_sigonstack, sig)) {
428 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
429 td->td_sigstk.ss_size - sizeof(struct sigframe4));
430 #if defined(COMPAT_43)
431 td->td_sigstk.ss_flags |= SS_ONSTACK;
432 #endif
433 } else
434 sfp = (struct sigframe4 *)regs->tf_esp - 1;
435
436 /* Translate the signal if appropriate. */
437 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
438 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
439
440 /* Build the argument list for the signal handler. */
441 sf.sf_signum = sig;
442 sf.sf_ucontext = (register_t)&sfp->sf_uc;
443 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
444 /* Signal handler installed with SA_SIGINFO. */
445 sf.sf_siginfo = (register_t)&sfp->sf_si;
446 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
447
448 /* Fill in POSIX parts */
449 sf.sf_si.si_signo = sig;
450 sf.sf_si.si_code = code;
451 sf.sf_si.si_addr = (void *)regs->tf_err;
452 } else {
453 /* Old FreeBSD-style arguments. */
454 sf.sf_siginfo = code;
455 sf.sf_addr = regs->tf_err;
456 sf.sf_ahu.sf_handler = catcher;
457 }
458 mtx_unlock(&psp->ps_mtx);
459 PROC_UNLOCK(p);
460
461 /*
462 * If we're a vm86 process, we want to save the segment registers.
463 * We also change eflags to be our emulated eflags, not the actual
464 * eflags.
465 */
466 if (regs->tf_eflags & PSL_VM) {
467 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
468 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
469
470 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
471 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
472 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
473 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
474
475 if (vm86->vm86_has_vme == 0)
476 sf.sf_uc.uc_mcontext.mc_eflags =
477 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
478 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
479
480 /*
481 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
482 * syscalls made by the signal handler. This just avoids
483 * wasting time for our lazy fixup of such faults. PSL_NT
484 * does nothing in vm86 mode, but vm86 programs can set it
485 * almost legitimately in probes for old cpu types.
486 */
487 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
488 }
489
490 /*
491 * Copy the sigframe out to the user's stack.
492 */
493 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
494 #ifdef DEBUG
495 printf("process %ld has trashed its stack\n", (long)p->p_pid);
496 #endif
497 PROC_LOCK(p);
498 sigexit(td, SIGILL);
499 }
500
501 regs->tf_esp = (int)sfp;
502 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
503 regs->tf_eflags &= ~PSL_T;
504 regs->tf_cs = _ucodesel;
505 regs->tf_ds = _udatasel;
506 regs->tf_es = _udatasel;
507 regs->tf_fs = _udatasel;
508 regs->tf_ss = _udatasel;
509 PROC_LOCK(p);
510 mtx_lock(&psp->ps_mtx);
511 }
512 #endif /* COMPAT_FREEBSD4 */
513
514 void
515 sendsig(catcher, sig, mask, code)
516 sig_t catcher;
517 int sig;
518 sigset_t *mask;
519 u_long code;
520 {
521 struct sigframe sf, *sfp;
522 struct proc *p;
523 struct thread *td;
524 struct sigacts *psp;
525 char *sp;
526 struct trapframe *regs;
527 int oonstack;
528
529 td = curthread;
530 p = td->td_proc;
531 PROC_LOCK_ASSERT(p, MA_OWNED);
532 psp = p->p_sigacts;
533 mtx_assert(&psp->ps_mtx, MA_OWNED);
534 #ifdef COMPAT_FREEBSD4
535 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
536 freebsd4_sendsig(catcher, sig, mask, code);
537 return;
538 }
539 #endif
540 #ifdef COMPAT_43
541 if (SIGISMEMBER(psp->ps_osigset, sig)) {
542 osendsig(catcher, sig, mask, code);
543 return;
544 }
545 #endif
546 regs = td->td_frame;
547 oonstack = sigonstack(regs->tf_esp);
548
549 /* Save user context. */
550 bzero(&sf, sizeof(sf));
551 sf.sf_uc.uc_sigmask = *mask;
552 sf.sf_uc.uc_stack = td->td_sigstk;
553 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
554 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
555 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
556 sf.sf_uc.uc_mcontext.mc_gs = rgs();
557 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
558 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
559 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
560 fpstate_drop(td);
561
562 /* Allocate space for the signal handler context. */
563 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
564 SIGISMEMBER(psp->ps_sigonstack, sig)) {
565 sp = td->td_sigstk.ss_sp +
566 td->td_sigstk.ss_size - sizeof(struct sigframe);
567 #if defined(COMPAT_43)
568 td->td_sigstk.ss_flags |= SS_ONSTACK;
569 #endif
570 } else
571 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
572 /* Align to 16 bytes. */
573 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
574
575 /* Translate the signal if appropriate. */
576 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
577 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
578
579 /* Build the argument list for the signal handler. */
580 sf.sf_signum = sig;
581 sf.sf_ucontext = (register_t)&sfp->sf_uc;
582 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
583 /* Signal handler installed with SA_SIGINFO. */
584 sf.sf_siginfo = (register_t)&sfp->sf_si;
585 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
586
587 /* Fill in POSIX parts */
588 sf.sf_si.si_signo = sig;
589 sf.sf_si.si_code = code;
590 sf.sf_si.si_addr = (void *)regs->tf_err;
591 } else {
592 /* Old FreeBSD-style arguments. */
593 sf.sf_siginfo = code;
594 sf.sf_addr = regs->tf_err;
595 sf.sf_ahu.sf_handler = catcher;
596 }
597 mtx_unlock(&psp->ps_mtx);
598 PROC_UNLOCK(p);
599
600 /*
601 * If we're a vm86 process, we want to save the segment registers.
602 * We also change eflags to be our emulated eflags, not the actual
603 * eflags.
604 */
605 if (regs->tf_eflags & PSL_VM) {
606 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
607 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
608
609 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
610 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
611 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
612 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
613
614 if (vm86->vm86_has_vme == 0)
615 sf.sf_uc.uc_mcontext.mc_eflags =
616 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
617 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
618
619 /*
620 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
621 * syscalls made by the signal handler. This just avoids
622 * wasting time for our lazy fixup of such faults. PSL_NT
623 * does nothing in vm86 mode, but vm86 programs can set it
624 * almost legitimately in probes for old cpu types.
625 */
626 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
627 }
628
629 /*
630 * Copy the sigframe out to the user's stack.
631 */
632 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
633 #ifdef DEBUG
634 printf("process %ld has trashed its stack\n", (long)p->p_pid);
635 #endif
636 PROC_LOCK(p);
637 sigexit(td, SIGILL);
638 }
639
640 regs->tf_esp = (int)sfp;
641 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
642 regs->tf_eflags &= ~PSL_T;
643 regs->tf_cs = _ucodesel;
644 regs->tf_ds = _udatasel;
645 regs->tf_es = _udatasel;
646 regs->tf_fs = _udatasel;
647 regs->tf_ss = _udatasel;
648 PROC_LOCK(p);
649 mtx_lock(&psp->ps_mtx);
650 }
651
652 /*
653 * Build siginfo_t for SA thread
654 */
655 void
656 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
657 {
658 struct proc *p;
659 struct thread *td;
660
661 td = curthread;
662 p = td->td_proc;
663 PROC_LOCK_ASSERT(p, MA_OWNED);
664
665 bzero(si, sizeof(*si));
666 si->si_signo = sig;
667 si->si_code = code;
668 si->si_addr = (void *)td->td_frame->tf_err;
669 /* XXXKSE fill other fields */
670 }
671
672 /*
673 * System call to cleanup state after a signal
674 * has been taken. Reset signal mask and
675 * stack state from context left by sendsig (above).
676 * Return to previous pc and psl as specified by
677 * context left by sendsig. Check carefully to
678 * make sure that the user has not modified the
679 * state to gain improper privileges.
680 *
681 * MPSAFE
682 */
683 #ifdef COMPAT_43
684 int
685 osigreturn(td, uap)
686 struct thread *td;
687 struct osigreturn_args /* {
688 struct osigcontext *sigcntxp;
689 } */ *uap;
690 {
691 struct osigcontext sc;
692 struct trapframe *regs;
693 struct osigcontext *scp;
694 struct proc *p = td->td_proc;
695 int eflags, error;
696
697 regs = td->td_frame;
698 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
699 if (error != 0)
700 return (error);
701 scp = ≻
702 eflags = scp->sc_ps;
703 if (eflags & PSL_VM) {
704 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
705 struct vm86_kernel *vm86;
706
707 /*
708 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
709 * set up the vm86 area, and we can't enter vm86 mode.
710 */
711 if (td->td_pcb->pcb_ext == 0)
712 return (EINVAL);
713 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
714 if (vm86->vm86_inited == 0)
715 return (EINVAL);
716
717 /* Go back to user mode if both flags are set. */
718 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
719 trapsignal(td, SIGBUS, 0);
720
721 if (vm86->vm86_has_vme) {
722 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
723 (eflags & VME_USERCHANGE) | PSL_VM;
724 } else {
725 vm86->vm86_eflags = eflags; /* save VIF, VIP */
726 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
727 (eflags & VM_USERCHANGE) | PSL_VM;
728 }
729 tf->tf_vm86_ds = scp->sc_ds;
730 tf->tf_vm86_es = scp->sc_es;
731 tf->tf_vm86_fs = scp->sc_fs;
732 tf->tf_vm86_gs = scp->sc_gs;
733 tf->tf_ds = _udatasel;
734 tf->tf_es = _udatasel;
735 tf->tf_fs = _udatasel;
736 } else {
737 /*
738 * Don't allow users to change privileged or reserved flags.
739 */
740 /*
741 * XXX do allow users to change the privileged flag PSL_RF.
742 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
743 * should sometimes set it there too. tf_eflags is kept in
744 * the signal context during signal handling and there is no
745 * other place to remember it, so the PSL_RF bit may be
746 * corrupted by the signal handler without us knowing.
747 * Corruption of the PSL_RF bit at worst causes one more or
748 * one less debugger trap, so allowing it is fairly harmless.
749 */
750 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
751 return (EINVAL);
752 }
753
754 /*
755 * Don't allow users to load a valid privileged %cs. Let the
756 * hardware check for invalid selectors, excess privilege in
757 * other selectors, invalid %eip's and invalid %esp's.
758 */
759 if (!CS_SECURE(scp->sc_cs)) {
760 trapsignal(td, SIGBUS, T_PROTFLT);
761 return (EINVAL);
762 }
763 regs->tf_ds = scp->sc_ds;
764 regs->tf_es = scp->sc_es;
765 regs->tf_fs = scp->sc_fs;
766 }
767
768 /* Restore remaining registers. */
769 regs->tf_eax = scp->sc_eax;
770 regs->tf_ebx = scp->sc_ebx;
771 regs->tf_ecx = scp->sc_ecx;
772 regs->tf_edx = scp->sc_edx;
773 regs->tf_esi = scp->sc_esi;
774 regs->tf_edi = scp->sc_edi;
775 regs->tf_cs = scp->sc_cs;
776 regs->tf_ss = scp->sc_ss;
777 regs->tf_isp = scp->sc_isp;
778 regs->tf_ebp = scp->sc_fp;
779 regs->tf_esp = scp->sc_sp;
780 regs->tf_eip = scp->sc_pc;
781 regs->tf_eflags = eflags;
782
783 PROC_LOCK(p);
784 #if defined(COMPAT_43)
785 if (scp->sc_onstack & 1)
786 td->td_sigstk.ss_flags |= SS_ONSTACK;
787 else
788 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
789 #endif
790 SIGSETOLD(td->td_sigmask, scp->sc_mask);
791 SIG_CANTMASK(td->td_sigmask);
792 signotify(td);
793 PROC_UNLOCK(p);
794 return (EJUSTRETURN);
795 }
796 #endif /* COMPAT_43 */
797
798 #ifdef COMPAT_FREEBSD4
799 /*
800 * MPSAFE
801 */
802 int
803 freebsd4_sigreturn(td, uap)
804 struct thread *td;
805 struct freebsd4_sigreturn_args /* {
806 const ucontext4 *sigcntxp;
807 } */ *uap;
808 {
809 struct ucontext4 uc;
810 struct proc *p = td->td_proc;
811 struct trapframe *regs;
812 const struct ucontext4 *ucp;
813 int cs, eflags, error;
814
815 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
816 if (error != 0)
817 return (error);
818 ucp = &uc;
819 regs = td->td_frame;
820 eflags = ucp->uc_mcontext.mc_eflags;
821 if (eflags & PSL_VM) {
822 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
823 struct vm86_kernel *vm86;
824
825 /*
826 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
827 * set up the vm86 area, and we can't enter vm86 mode.
828 */
829 if (td->td_pcb->pcb_ext == 0)
830 return (EINVAL);
831 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
832 if (vm86->vm86_inited == 0)
833 return (EINVAL);
834
835 /* Go back to user mode if both flags are set. */
836 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
837 trapsignal(td, SIGBUS, 0);
838
839 if (vm86->vm86_has_vme) {
840 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
841 (eflags & VME_USERCHANGE) | PSL_VM;
842 } else {
843 vm86->vm86_eflags = eflags; /* save VIF, VIP */
844 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
845 (eflags & VM_USERCHANGE) | PSL_VM;
846 }
847 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
848 tf->tf_eflags = eflags;
849 tf->tf_vm86_ds = tf->tf_ds;
850 tf->tf_vm86_es = tf->tf_es;
851 tf->tf_vm86_fs = tf->tf_fs;
852 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
853 tf->tf_ds = _udatasel;
854 tf->tf_es = _udatasel;
855 tf->tf_fs = _udatasel;
856 } else {
857 /*
858 * Don't allow users to change privileged or reserved flags.
859 */
860 /*
861 * XXX do allow users to change the privileged flag PSL_RF.
862 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
863 * should sometimes set it there too. tf_eflags is kept in
864 * the signal context during signal handling and there is no
865 * other place to remember it, so the PSL_RF bit may be
866 * corrupted by the signal handler without us knowing.
867 * Corruption of the PSL_RF bit at worst causes one more or
868 * one less debugger trap, so allowing it is fairly harmless.
869 */
870 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
871 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
872 return (EINVAL);
873 }
874
875 /*
876 * Don't allow users to load a valid privileged %cs. Let the
877 * hardware check for invalid selectors, excess privilege in
878 * other selectors, invalid %eip's and invalid %esp's.
879 */
880 cs = ucp->uc_mcontext.mc_cs;
881 if (!CS_SECURE(cs)) {
882 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
883 trapsignal(td, SIGBUS, T_PROTFLT);
884 return (EINVAL);
885 }
886
887 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
888 }
889
890 PROC_LOCK(p);
891 #if defined(COMPAT_43)
892 if (ucp->uc_mcontext.mc_onstack & 1)
893 td->td_sigstk.ss_flags |= SS_ONSTACK;
894 else
895 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
896 #endif
897
898 td->td_sigmask = ucp->uc_sigmask;
899 SIG_CANTMASK(td->td_sigmask);
900 signotify(td);
901 PROC_UNLOCK(p);
902 return (EJUSTRETURN);
903 }
904 #endif /* COMPAT_FREEBSD4 */
905
906 /*
907 * MPSAFE
908 */
909 int
910 sigreturn(td, uap)
911 struct thread *td;
912 struct sigreturn_args /* {
913 const __ucontext *sigcntxp;
914 } */ *uap;
915 {
916 ucontext_t uc;
917 struct proc *p = td->td_proc;
918 struct trapframe *regs;
919 const ucontext_t *ucp;
920 int cs, eflags, error, ret;
921
922 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
923 if (error != 0)
924 return (error);
925 ucp = &uc;
926 regs = td->td_frame;
927 eflags = ucp->uc_mcontext.mc_eflags;
928 if (eflags & PSL_VM) {
929 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
930 struct vm86_kernel *vm86;
931
932 /*
933 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
934 * set up the vm86 area, and we can't enter vm86 mode.
935 */
936 if (td->td_pcb->pcb_ext == 0)
937 return (EINVAL);
938 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
939 if (vm86->vm86_inited == 0)
940 return (EINVAL);
941
942 /* Go back to user mode if both flags are set. */
943 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
944 trapsignal(td, SIGBUS, 0);
945
946 if (vm86->vm86_has_vme) {
947 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
948 (eflags & VME_USERCHANGE) | PSL_VM;
949 } else {
950 vm86->vm86_eflags = eflags; /* save VIF, VIP */
951 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
952 (eflags & VM_USERCHANGE) | PSL_VM;
953 }
954 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
955 tf->tf_eflags = eflags;
956 tf->tf_vm86_ds = tf->tf_ds;
957 tf->tf_vm86_es = tf->tf_es;
958 tf->tf_vm86_fs = tf->tf_fs;
959 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
960 tf->tf_ds = _udatasel;
961 tf->tf_es = _udatasel;
962 tf->tf_fs = _udatasel;
963 } else {
964 /*
965 * Don't allow users to change privileged or reserved flags.
966 */
967 /*
968 * XXX do allow users to change the privileged flag PSL_RF.
969 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
970 * should sometimes set it there too. tf_eflags is kept in
971 * the signal context during signal handling and there is no
972 * other place to remember it, so the PSL_RF bit may be
973 * corrupted by the signal handler without us knowing.
974 * Corruption of the PSL_RF bit at worst causes one more or
975 * one less debugger trap, so allowing it is fairly harmless.
976 */
977 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
978 printf("sigreturn: eflags = 0x%x\n", eflags);
979 return (EINVAL);
980 }
981
982 /*
983 * Don't allow users to load a valid privileged %cs. Let the
984 * hardware check for invalid selectors, excess privilege in
985 * other selectors, invalid %eip's and invalid %esp's.
986 */
987 cs = ucp->uc_mcontext.mc_cs;
988 if (!CS_SECURE(cs)) {
989 printf("sigreturn: cs = 0x%x\n", cs);
990 trapsignal(td, SIGBUS, T_PROTFLT);
991 return (EINVAL);
992 }
993
994 ret = set_fpcontext(td, &ucp->uc_mcontext);
995 if (ret != 0)
996 return (ret);
997 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
998 }
999
1000 PROC_LOCK(p);
1001 #if defined(COMPAT_43)
1002 if (ucp->uc_mcontext.mc_onstack & 1)
1003 td->td_sigstk.ss_flags |= SS_ONSTACK;
1004 else
1005 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1006 #endif
1007
1008 td->td_sigmask = ucp->uc_sigmask;
1009 SIG_CANTMASK(td->td_sigmask);
1010 signotify(td);
1011 PROC_UNLOCK(p);
1012 return (EJUSTRETURN);
1013 }
1014
1015 /*
1016 * Machine dependent boot() routine
1017 *
1018 * I haven't seen anything to put here yet
1019 * Possibly some stuff might be grafted back here from boot()
1020 */
1021 void
1022 cpu_boot(int howto)
1023 {
1024 }
1025
1026 /* Get current clock frequency for the given cpu id. */
1027 int
1028 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1029 {
1030 register_t reg;
1031 uint64_t tsc1, tsc2;
1032
1033 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1034 return (EINVAL);
1035 if (!tsc_present)
1036 return (EOPNOTSUPP);
1037
1038 /* If we're booting, trust the rate calibrated moments ago. */
1039 if (cold) {
1040 *rate = tsc_freq;
1041 return (0);
1042 }
1043
1044 #ifdef SMP
1045 /* Schedule ourselves on the indicated cpu. */
1046 mtx_lock_spin(&sched_lock);
1047 sched_bind(curthread, cpu_id);
1048 mtx_unlock_spin(&sched_lock);
1049 #endif
1050
1051 /* Calibrate by measuring a short delay. */
1052 reg = intr_disable();
1053 tsc1 = rdtsc();
1054 DELAY(1000);
1055 tsc2 = rdtsc();
1056 intr_restore(reg);
1057
1058 #ifdef SMP
1059 mtx_lock_spin(&sched_lock);
1060 sched_unbind(curthread);
1061 mtx_unlock_spin(&sched_lock);
1062 #endif
1063
1064 /*
1065 * Calculate the difference in readings, convert to Mhz, and
1066 * subtract 0.5% of the total. Empirical testing has shown that
1067 * overhead in DELAY() works out to approximately this value.
1068 */
1069 tsc2 -= tsc1;
1070 *rate = tsc2 * 1000 - tsc2 * 5;
1071 return (0);
1072 }
1073
1074 /*
1075 * Shutdown the CPU as much as possible
1076 */
1077 void
1078 cpu_halt(void)
1079 {
1080 for (;;)
1081 __asm__ ("hlt");
1082 }
1083
1084 /*
1085 * Hook to idle the CPU when possible. In the SMP case we default to
1086 * off because a halted cpu will not currently pick up a new thread in the
1087 * run queue until the next timer tick. If turned on this will result in
1088 * approximately a 4.2% loss in real time performance in buildworld tests
1089 * (but improves user and sys times oddly enough), and saves approximately
1090 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1091 *
1092 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1093 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1094 * Then we can have our cake and eat it too.
1095 *
1096 * XXX I'm turning it on for SMP as well by default for now. It seems to
1097 * help lock contention somewhat, and this is critical for HTT. -Peter
1098 */
1099 static int cpu_idle_hlt = 1;
1100 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1101 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1102
1103 static void
1104 cpu_idle_default(void)
1105 {
1106 /*
1107 * we must absolutely guarentee that hlt is the
1108 * absolute next instruction after sti or we
1109 * introduce a timing window.
1110 */
1111 __asm __volatile("sti; hlt");
1112 }
1113
1114 /*
1115 * Note that we have to be careful here to avoid a race between checking
1116 * sched_runnable() and actually halting. If we don't do this, we may waste
1117 * the time between calling hlt and the next interrupt even though there
1118 * is a runnable process.
1119 */
1120 void
1121 cpu_idle(void)
1122 {
1123
1124 #ifdef SMP
1125 if (mp_grab_cpu_hlt())
1126 return;
1127 #endif
1128
1129 if (cpu_idle_hlt) {
1130 disable_intr();
1131 if (sched_runnable())
1132 enable_intr();
1133 else
1134 (*cpu_idle_hook)();
1135 }
1136 }
1137
1138 /* Other subsystems (e.g., ACPI) can hook this later. */
1139 void (*cpu_idle_hook)(void) = cpu_idle_default;
1140
1141 /*
1142 * Clear registers on exec
1143 */
1144 void
1145 exec_setregs(td, entry, stack, ps_strings)
1146 struct thread *td;
1147 u_long entry;
1148 u_long stack;
1149 u_long ps_strings;
1150 {
1151 struct trapframe *regs = td->td_frame;
1152 struct pcb *pcb = td->td_pcb;
1153
1154 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1155 pcb->pcb_gs = _udatasel;
1156 load_gs(_udatasel);
1157
1158 if (td->td_proc->p_md.md_ldt)
1159 user_ldt_free(td);
1160
1161 bzero((char *)regs, sizeof(struct trapframe));
1162 regs->tf_eip = entry;
1163 regs->tf_esp = stack;
1164 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1165 regs->tf_ss = _udatasel;
1166 regs->tf_ds = _udatasel;
1167 regs->tf_es = _udatasel;
1168 regs->tf_fs = _udatasel;
1169 regs->tf_cs = _ucodesel;
1170
1171 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1172 regs->tf_ebx = ps_strings;
1173
1174 /*
1175 * Reset the hardware debug registers if they were in use.
1176 * They won't have any meaning for the newly exec'd process.
1177 */
1178 if (pcb->pcb_flags & PCB_DBREGS) {
1179 pcb->pcb_dr0 = 0;
1180 pcb->pcb_dr1 = 0;
1181 pcb->pcb_dr2 = 0;
1182 pcb->pcb_dr3 = 0;
1183 pcb->pcb_dr6 = 0;
1184 pcb->pcb_dr7 = 0;
1185 if (pcb == PCPU_GET(curpcb)) {
1186 /*
1187 * Clear the debug registers on the running
1188 * CPU, otherwise they will end up affecting
1189 * the next process we switch to.
1190 */
1191 reset_dbregs();
1192 }
1193 pcb->pcb_flags &= ~PCB_DBREGS;
1194 }
1195
1196 /*
1197 * Initialize the math emulator (if any) for the current process.
1198 * Actually, just clear the bit that says that the emulator has
1199 * been initialized. Initialization is delayed until the process
1200 * traps to the emulator (if it is done at all) mainly because
1201 * emulators don't provide an entry point for initialization.
1202 */
1203 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1204
1205 /*
1206 * Drop the FP state if we hold it, so that the process gets a
1207 * clean FP state if it uses the FPU again.
1208 */
1209 fpstate_drop(td);
1210
1211 /*
1212 * XXX - Linux emulator
1213 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1214 * on it.
1215 */
1216 td->td_retval[1] = 0;
1217 }
1218
1219 void
1220 cpu_setregs(void)
1221 {
1222 unsigned int cr0;
1223
1224 cr0 = rcr0();
1225
1226 /*
1227 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1228 *
1229 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1230 * instructions. We must set the CR0_MP bit and use the CR0_TS
1231 * bit to control the trap, because setting the CR0_EM bit does
1232 * not cause WAIT instructions to trap. It's important to trap
1233 * WAIT instructions - otherwise the "wait" variants of no-wait
1234 * control instructions would degenerate to the "no-wait" variants
1235 * after FP context switches but work correctly otherwise. It's
1236 * particularly important to trap WAITs when there is no NPX -
1237 * otherwise the "wait" variants would always degenerate.
1238 *
1239 * Try setting CR0_NE to get correct error reporting on 486DX's.
1240 * Setting it should fail or do nothing on lesser processors.
1241 */
1242 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1243 load_cr0(cr0);
1244 load_gs(_udatasel);
1245 }
1246
1247 static int
1248 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS)
1249 {
1250 int error;
1251 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
1252 req);
1253 if (!error && req->newptr)
1254 resettodr();
1255 return (error);
1256 }
1257
1258 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
1259 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
1260
1261 SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
1262 CTLFLAG_RW, &disable_rtc_set, 0, "");
1263
1264 SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo,
1265 CTLFLAG_RD, &bootinfo, bootinfo, "");
1266
1267 SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
1268 CTLFLAG_RW, &wall_cmos_clock, 0, "");
1269
1270 u_long bootdev; /* not a struct cdev *- encoding is different */
1271 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1272 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1273
1274 /*
1275 * Initialize 386 and configure to run kernel
1276 */
1277
1278 /*
1279 * Initialize segments & interrupt table
1280 */
1281
1282 int _default_ldt;
1283 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1284 static struct gate_descriptor idt0[NIDT];
1285 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1286 union descriptor ldt[NLDT]; /* local descriptor table */
1287 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1288
1289 int private_tss; /* flag indicating private tss */
1290
1291 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1292 extern int has_f00f_bug;
1293 #endif
1294
1295 static struct i386tss dblfault_tss;
1296 static char dblfault_stack[PAGE_SIZE];
1297
1298 extern vm_offset_t proc0kstack;
1299
1300
1301 /*
1302 * software prototypes -- in more palatable form.
1303 *
1304 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1305 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1306 */
1307 struct soft_segment_descriptor gdt_segs[] = {
1308 /* GNULL_SEL 0 Null Descriptor */
1309 { 0x0, /* segment base address */
1310 0x0, /* length */
1311 0, /* segment type */
1312 0, /* segment descriptor priority level */
1313 0, /* segment descriptor present */
1314 0, 0,
1315 0, /* default 32 vs 16 bit size */
1316 0 /* limit granularity (byte/page units)*/ },
1317 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1318 { 0x0, /* segment base address */
1319 0xfffff, /* length - all address space */
1320 SDT_MEMRWA, /* segment type */
1321 0, /* segment descriptor priority level */
1322 1, /* segment descriptor present */
1323 0, 0,
1324 1, /* default 32 vs 16 bit size */
1325 1 /* limit granularity (byte/page units)*/ },
1326 /* GUFS_SEL 2 %fs Descriptor for user */
1327 { 0x0, /* segment base address */
1328 0xfffff, /* length - all address space */
1329 SDT_MEMRWA, /* segment type */
1330 SEL_UPL, /* segment descriptor priority level */
1331 1, /* segment descriptor present */
1332 0, 0,
1333 1, /* default 32 vs 16 bit size */
1334 1 /* limit granularity (byte/page units)*/ },
1335 /* GUGS_SEL 3 %gs Descriptor for user */
1336 { 0x0, /* segment base address */
1337 0xfffff, /* length - all address space */
1338 SDT_MEMRWA, /* segment type */
1339 SEL_UPL, /* segment descriptor priority level */
1340 1, /* segment descriptor present */
1341 0, 0,
1342 1, /* default 32 vs 16 bit size */
1343 1 /* limit granularity (byte/page units)*/ },
1344 /* GCODE_SEL 4 Code Descriptor for kernel */
1345 { 0x0, /* segment base address */
1346 0xfffff, /* length - all address space */
1347 SDT_MEMERA, /* segment type */
1348 0, /* segment descriptor priority level */
1349 1, /* segment descriptor present */
1350 0, 0,
1351 1, /* default 32 vs 16 bit size */
1352 1 /* limit granularity (byte/page units)*/ },
1353 /* GDATA_SEL 5 Data Descriptor for kernel */
1354 { 0x0, /* segment base address */
1355 0xfffff, /* length - all address space */
1356 SDT_MEMRWA, /* segment type */
1357 0, /* segment descriptor priority level */
1358 1, /* segment descriptor present */
1359 0, 0,
1360 1, /* default 32 vs 16 bit size */
1361 1 /* limit granularity (byte/page units)*/ },
1362 /* GUCODE_SEL 6 Code Descriptor for user */
1363 { 0x0, /* segment base address */
1364 0xfffff, /* length - all address space */
1365 SDT_MEMERA, /* segment type */
1366 SEL_UPL, /* segment descriptor priority level */
1367 1, /* segment descriptor present */
1368 0, 0,
1369 1, /* default 32 vs 16 bit size */
1370 1 /* limit granularity (byte/page units)*/ },
1371 /* GUDATA_SEL 7 Data Descriptor for user */
1372 { 0x0, /* segment base address */
1373 0xfffff, /* length - all address space */
1374 SDT_MEMRWA, /* segment type */
1375 SEL_UPL, /* segment descriptor priority level */
1376 1, /* segment descriptor present */
1377 0, 0,
1378 1, /* default 32 vs 16 bit size */
1379 1 /* limit granularity (byte/page units)*/ },
1380 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1381 { 0x400, /* segment base address */
1382 0xfffff, /* length */
1383 SDT_MEMRWA, /* segment type */
1384 0, /* segment descriptor priority level */
1385 1, /* segment descriptor present */
1386 0, 0,
1387 1, /* default 32 vs 16 bit size */
1388 1 /* limit granularity (byte/page units)*/ },
1389 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1390 {
1391 0x0, /* segment base address */
1392 sizeof(struct i386tss)-1,/* length */
1393 SDT_SYS386TSS, /* segment type */
1394 0, /* segment descriptor priority level */
1395 1, /* segment descriptor present */
1396 0, 0,
1397 0, /* unused - default 32 vs 16 bit size */
1398 0 /* limit granularity (byte/page units)*/ },
1399 /* GLDT_SEL 10 LDT Descriptor */
1400 { (int) ldt, /* segment base address */
1401 sizeof(ldt)-1, /* length - all address space */
1402 SDT_SYSLDT, /* segment type */
1403 SEL_UPL, /* segment descriptor priority level */
1404 1, /* segment descriptor present */
1405 0, 0,
1406 0, /* unused - default 32 vs 16 bit size */
1407 0 /* limit granularity (byte/page units)*/ },
1408 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1409 { (int) ldt, /* segment base address */
1410 (512 * sizeof(union descriptor)-1), /* length */
1411 SDT_SYSLDT, /* segment type */
1412 0, /* segment descriptor priority level */
1413 1, /* segment descriptor present */
1414 0, 0,
1415 0, /* unused - default 32 vs 16 bit size */
1416 0 /* limit granularity (byte/page units)*/ },
1417 /* GPANIC_SEL 12 Panic Tss Descriptor */
1418 { (int) &dblfault_tss, /* segment base address */
1419 sizeof(struct i386tss)-1,/* length - all address space */
1420 SDT_SYS386TSS, /* segment type */
1421 0, /* segment descriptor priority level */
1422 1, /* segment descriptor present */
1423 0, 0,
1424 0, /* unused - default 32 vs 16 bit size */
1425 0 /* limit granularity (byte/page units)*/ },
1426 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1427 { 0, /* segment base address (overwritten) */
1428 0xfffff, /* length */
1429 SDT_MEMERA, /* segment type */
1430 0, /* segment descriptor priority level */
1431 1, /* segment descriptor present */
1432 0, 0,
1433 0, /* default 32 vs 16 bit size */
1434 1 /* limit granularity (byte/page units)*/ },
1435 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1436 { 0, /* segment base address (overwritten) */
1437 0xfffff, /* length */
1438 SDT_MEMERA, /* segment type */
1439 0, /* segment descriptor priority level */
1440 1, /* segment descriptor present */
1441 0, 0,
1442 0, /* default 32 vs 16 bit size */
1443 1 /* limit granularity (byte/page units)*/ },
1444 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1445 { 0, /* segment base address (overwritten) */
1446 0xfffff, /* length */
1447 SDT_MEMRWA, /* segment type */
1448 0, /* segment descriptor priority level */
1449 1, /* segment descriptor present */
1450 0, 0,
1451 1, /* default 32 vs 16 bit size */
1452 1 /* limit granularity (byte/page units)*/ },
1453 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1454 { 0, /* segment base address (overwritten) */
1455 0xfffff, /* length */
1456 SDT_MEMRWA, /* segment type */
1457 0, /* segment descriptor priority level */
1458 1, /* segment descriptor present */
1459 0, 0,
1460 0, /* default 32 vs 16 bit size */
1461 1 /* limit granularity (byte/page units)*/ },
1462 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1463 { 0, /* segment base address (overwritten) */
1464 0xfffff, /* length */
1465 SDT_MEMRWA, /* segment type */
1466 0, /* segment descriptor priority level */
1467 1, /* segment descriptor present */
1468 0, 0,
1469 0, /* default 32 vs 16 bit size */
1470 1 /* limit granularity (byte/page units)*/ },
1471 /* GNDIS_SEL 18 NDIS Descriptor */
1472 { 0x0, /* segment base address */
1473 0x0, /* length */
1474 0, /* segment type */
1475 0, /* segment descriptor priority level */
1476 0, /* segment descriptor present */
1477 0, 0,
1478 0, /* default 32 vs 16 bit size */
1479 0 /* limit granularity (byte/page units)*/ },
1480 };
1481
1482 static struct soft_segment_descriptor ldt_segs[] = {
1483 /* Null Descriptor - overwritten by call gate */
1484 { 0x0, /* segment base address */
1485 0x0, /* length - all address space */
1486 0, /* segment type */
1487 0, /* segment descriptor priority level */
1488 0, /* segment descriptor present */
1489 0, 0,
1490 0, /* default 32 vs 16 bit size */
1491 0 /* limit granularity (byte/page units)*/ },
1492 /* Null Descriptor - overwritten by call gate */
1493 { 0x0, /* segment base address */
1494 0x0, /* length - all address space */
1495 0, /* segment type */
1496 0, /* segment descriptor priority level */
1497 0, /* segment descriptor present */
1498 0, 0,
1499 0, /* default 32 vs 16 bit size */
1500 0 /* limit granularity (byte/page units)*/ },
1501 /* Null Descriptor - overwritten by call gate */
1502 { 0x0, /* segment base address */
1503 0x0, /* length - all address space */
1504 0, /* segment type */
1505 0, /* segment descriptor priority level */
1506 0, /* segment descriptor present */
1507 0, 0,
1508 0, /* default 32 vs 16 bit size */
1509 0 /* limit granularity (byte/page units)*/ },
1510 /* Code Descriptor for user */
1511 { 0x0, /* segment base address */
1512 0xfffff, /* length - all address space */
1513 SDT_MEMERA, /* segment type */
1514 SEL_UPL, /* segment descriptor priority level */
1515 1, /* segment descriptor present */
1516 0, 0,
1517 1, /* default 32 vs 16 bit size */
1518 1 /* limit granularity (byte/page units)*/ },
1519 /* Null Descriptor - overwritten by call gate */
1520 { 0x0, /* segment base address */
1521 0x0, /* length - all address space */
1522 0, /* segment type */
1523 0, /* segment descriptor priority level */
1524 0, /* segment descriptor present */
1525 0, 0,
1526 0, /* default 32 vs 16 bit size */
1527 0 /* limit granularity (byte/page units)*/ },
1528 /* Data Descriptor for user */
1529 { 0x0, /* segment base address */
1530 0xfffff, /* length - all address space */
1531 SDT_MEMRWA, /* segment type */
1532 SEL_UPL, /* segment descriptor priority level */
1533 1, /* segment descriptor present */
1534 0, 0,
1535 1, /* default 32 vs 16 bit size */
1536 1 /* limit granularity (byte/page units)*/ },
1537 };
1538
1539 void
1540 setidt(idx, func, typ, dpl, selec)
1541 int idx;
1542 inthand_t *func;
1543 int typ;
1544 int dpl;
1545 int selec;
1546 {
1547 struct gate_descriptor *ip;
1548
1549 ip = idt + idx;
1550 ip->gd_looffset = (int)func;
1551 ip->gd_selector = selec;
1552 ip->gd_stkcpy = 0;
1553 ip->gd_xx = 0;
1554 ip->gd_type = typ;
1555 ip->gd_dpl = dpl;
1556 ip->gd_p = 1;
1557 ip->gd_hioffset = ((int)func)>>16 ;
1558 }
1559
1560 #define IDTVEC(name) __CONCAT(X,name)
1561
1562 extern inthand_t
1563 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1564 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1565 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1566 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1567 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1568
1569 #ifdef DDB
1570 /*
1571 * Display the index and function name of any IDT entries that don't use
1572 * the default 'rsvd' entry point.
1573 */
1574 DB_SHOW_COMMAND(idt, db_show_idt)
1575 {
1576 struct gate_descriptor *ip;
1577 int idx, quit;
1578 uintptr_t func;
1579
1580 ip = idt;
1581 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
1582 for (idx = 0, quit = 0; idx < NIDT; idx++) {
1583 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1584 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1585 db_printf("%3d\t", idx);
1586 db_printsym(func, DB_STGY_PROC);
1587 db_printf("\n");
1588 }
1589 ip++;
1590 }
1591 }
1592 #endif
1593
1594 void
1595 sdtossd(sd, ssd)
1596 struct segment_descriptor *sd;
1597 struct soft_segment_descriptor *ssd;
1598 {
1599 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1600 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1601 ssd->ssd_type = sd->sd_type;
1602 ssd->ssd_dpl = sd->sd_dpl;
1603 ssd->ssd_p = sd->sd_p;
1604 ssd->ssd_def32 = sd->sd_def32;
1605 ssd->ssd_gran = sd->sd_gran;
1606 }
1607
1608 #define PHYSMAP_SIZE (2 * 8)
1609
1610 /*
1611 * Populate the (physmap) array with base/bound pairs describing the
1612 * available physical memory in the system, then test this memory and
1613 * build the phys_avail array describing the actually-available memory.
1614 *
1615 * If we cannot accurately determine the physical memory map, then use
1616 * value from the 0xE801 call, and failing that, the RTC.
1617 *
1618 * Total memory size may be set by the kernel environment variable
1619 * hw.physmem or the compile-time define MAXMEM.
1620 *
1621 * XXX first should be vm_paddr_t.
1622 */
1623 static void
1624 getmemsize(int first)
1625 {
1626 int i, physmap_idx, pa_indx, da_indx;
1627 int hasbrokenint12;
1628 u_long physmem_tunable;
1629 u_int extmem;
1630 struct vm86frame vmf;
1631 struct vm86context vmc;
1632 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1633 pt_entry_t *pte;
1634 struct bios_smap *smap;
1635 quad_t dcons_addr, dcons_size;
1636
1637 hasbrokenint12 = 0;
1638 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
1639 bzero(&vmf, sizeof(vmf));
1640 bzero(physmap, sizeof(physmap));
1641 basemem = 0;
1642
1643 /*
1644 * Some newer BIOSes has broken INT 12H implementation which cause
1645 * kernel panic immediately. In this case, we need to scan SMAP
1646 * with INT 15:E820 first, then determine base memory size.
1647 */
1648 if (hasbrokenint12) {
1649 goto int15e820;
1650 }
1651
1652 /*
1653 * Perform "base memory" related probes & setup
1654 */
1655 vm86_intcall(0x12, &vmf);
1656 basemem = vmf.vmf_ax;
1657 if (basemem > 640) {
1658 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1659 basemem);
1660 basemem = 640;
1661 }
1662
1663 /*
1664 * XXX if biosbasemem is now < 640, there is a `hole'
1665 * between the end of base memory and the start of
1666 * ISA memory. The hole may be empty or it may
1667 * contain BIOS code or data. Map it read/write so
1668 * that the BIOS can write to it. (Memory from 0 to
1669 * the physical end of the kernel is mapped read-only
1670 * to begin with and then parts of it are remapped.
1671 * The parts that aren't remapped form holes that
1672 * remain read-only and are unused by the kernel.
1673 * The base memory area is below the physical end of
1674 * the kernel and right now forms a read-only hole.
1675 * The part of it from PAGE_SIZE to
1676 * (trunc_page(biosbasemem * 1024) - 1) will be
1677 * remapped and used by the kernel later.)
1678 *
1679 * This code is similar to the code used in
1680 * pmap_mapdev, but since no memory needs to be
1681 * allocated we simply change the mapping.
1682 */
1683 for (pa = trunc_page(basemem * 1024);
1684 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1685 pmap_kenter(KERNBASE + pa, pa);
1686
1687 /*
1688 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1689 * the vm86 page table so that vm86 can scribble on them using
1690 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1691 * page 0, at least as initialized here?
1692 */
1693 pte = (pt_entry_t *)vm86paddr;
1694 for (i = basemem / 4; i < 160; i++)
1695 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1696
1697 int15e820:
1698 /*
1699 * map page 1 R/W into the kernel page table so we can use it
1700 * as a buffer. The kernel will unmap this page later.
1701 */
1702 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
1703
1704 /*
1705 * get memory map with INT 15:E820
1706 */
1707 vmc.npages = 0;
1708 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
1709 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
1710
1711 physmap_idx = 0;
1712 vmf.vmf_ebx = 0;
1713 do {
1714 vmf.vmf_eax = 0xE820;
1715 vmf.vmf_edx = SMAP_SIG;
1716 vmf.vmf_ecx = sizeof(struct bios_smap);
1717 i = vm86_datacall(0x15, &vmf, &vmc);
1718 if (i || vmf.vmf_eax != SMAP_SIG)
1719 break;
1720 if (boothowto & RB_VERBOSE)
1721 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1722 smap->type, smap->base, smap->length);
1723
1724 if (smap->type != 0x01)
1725 continue;
1726
1727 if (smap->length == 0)
1728 continue;
1729
1730 #ifndef PAE
1731 if (smap->base >= 0xffffffff) {
1732 printf("%uK of memory above 4GB ignored\n",
1733 (u_int)(smap->length / 1024));
1734 continue;
1735 }
1736 #endif
1737
1738 for (i = 0; i <= physmap_idx; i += 2) {
1739 if (smap->base < physmap[i + 1]) {
1740 if (boothowto & RB_VERBOSE)
1741 printf(
1742 "Overlapping or non-montonic memory region, ignoring second region\n");
1743 continue;
1744 }
1745 }
1746
1747 if (smap->base == physmap[physmap_idx + 1]) {
1748 physmap[physmap_idx + 1] += smap->length;
1749 continue;
1750 }
1751
1752 physmap_idx += 2;
1753 if (physmap_idx == PHYSMAP_SIZE) {
1754 printf(
1755 "Too many segments in the physical address map, giving up\n");
1756 break;
1757 }
1758 physmap[physmap_idx] = smap->base;
1759 physmap[physmap_idx + 1] = smap->base + smap->length;
1760 } while (vmf.vmf_ebx != 0);
1761
1762 /*
1763 * Perform "base memory" related probes & setup based on SMAP
1764 */
1765 if (basemem == 0) {
1766 for (i = 0; i <= physmap_idx; i += 2) {
1767 if (physmap[i] == 0x00000000) {
1768 basemem = physmap[i + 1] / 1024;
1769 break;
1770 }
1771 }
1772
1773 /*
1774 * XXX this function is horribly organized and has to the same
1775 * things that it does above here.
1776 */
1777 if (basemem == 0)
1778 basemem = 640;
1779 if (basemem > 640) {
1780 printf(
1781 "Preposterous BIOS basemem of %uK, truncating to 640K\n",
1782 basemem);
1783 basemem = 640;
1784 }
1785
1786 /*
1787 * Let vm86 scribble on pages between basemem and
1788 * ISA_HOLE_START, as above.
1789 */
1790 for (pa = trunc_page(basemem * 1024);
1791 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1792 pmap_kenter(KERNBASE + pa, pa);
1793 pte = (pt_entry_t *)vm86paddr;
1794 for (i = basemem / 4; i < 160; i++)
1795 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1796 }
1797
1798 if (physmap[1] != 0)
1799 goto physmap_done;
1800
1801 /*
1802 * If we failed above, try memory map with INT 15:E801
1803 */
1804 vmf.vmf_ax = 0xE801;
1805 if (vm86_intcall(0x15, &vmf) == 0) {
1806 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1807 } else {
1808 #if 0
1809 vmf.vmf_ah = 0x88;
1810 vm86_intcall(0x15, &vmf);
1811 extmem = vmf.vmf_ax;
1812 #else
1813 /*
1814 * Prefer the RTC value for extended memory.
1815 */
1816 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1817 #endif
1818 }
1819
1820 /*
1821 * Special hack for chipsets that still remap the 384k hole when
1822 * there's 16MB of memory - this really confuses people that
1823 * are trying to use bus mastering ISA controllers with the
1824 * "16MB limit"; they only have 16MB, but the remapping puts
1825 * them beyond the limit.
1826 *
1827 * If extended memory is between 15-16MB (16-17MB phys address range),
1828 * chop it to 15MB.
1829 */
1830 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1831 extmem = 15 * 1024;
1832
1833 physmap[0] = 0;
1834 physmap[1] = basemem * 1024;
1835 physmap_idx = 2;
1836 physmap[physmap_idx] = 0x100000;
1837 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1838
1839 physmap_done:
1840 /*
1841 * Now, physmap contains a map of physical memory.
1842 */
1843
1844 #ifdef SMP
1845 /* make hole for AP bootstrap code */
1846 physmap[1] = mp_bootaddress(physmap[1]);
1847 #endif
1848
1849 /*
1850 * Maxmem isn't the "maximum memory", it's one larger than the
1851 * highest page of the physical address space. It should be
1852 * called something like "Maxphyspage". We may adjust this
1853 * based on ``hw.physmem'' and the results of the memory test.
1854 */
1855 Maxmem = atop(physmap[physmap_idx + 1]);
1856
1857 #ifdef MAXMEM
1858 Maxmem = MAXMEM / 4;
1859 #endif
1860
1861 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1862 Maxmem = atop(physmem_tunable);
1863
1864 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1865 (boothowto & RB_VERBOSE))
1866 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1867
1868 /*
1869 * If Maxmem has been increased beyond what the system has detected,
1870 * extend the last memory segment to the new limit.
1871 */
1872 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1873 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1874
1875 /* call pmap initialization to make new kernel address space */
1876 pmap_bootstrap(first, 0);
1877
1878 /*
1879 * Size up each available chunk of physical memory.
1880 */
1881 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1882 pa_indx = 0;
1883 da_indx = 1;
1884 phys_avail[pa_indx++] = physmap[0];
1885 phys_avail[pa_indx] = physmap[0];
1886 dump_avail[da_indx] = physmap[0];
1887 pte = CMAP1;
1888
1889 /*
1890 * Get dcons buffer address
1891 */
1892 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1893 getenv_quad("dcons.size", &dcons_size) == 0)
1894 dcons_addr = 0;
1895
1896 /*
1897 * physmap is in bytes, so when converting to page boundaries,
1898 * round up the start address and round down the end address.
1899 */
1900 for (i = 0; i <= physmap_idx; i += 2) {
1901 vm_paddr_t end;
1902
1903 end = ptoa((vm_paddr_t)Maxmem);
1904 if (physmap[i + 1] < end)
1905 end = trunc_page(physmap[i + 1]);
1906 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1907 int tmp, page_bad, full;
1908 int *ptr = (int *)CADDR1;
1909
1910 full = FALSE;
1911 /*
1912 * block out kernel memory as not available.
1913 */
1914 if (pa >= KERNLOAD && pa < first)
1915 goto do_dump_avail;
1916
1917 /*
1918 * block out dcons buffer
1919 */
1920 if (dcons_addr > 0
1921 && pa >= trunc_page(dcons_addr)
1922 && pa < dcons_addr + dcons_size)
1923 goto do_dump_avail;
1924
1925 page_bad = FALSE;
1926
1927 /*
1928 * map page into kernel: valid, read/write,non-cacheable
1929 */
1930 *pte = pa | PG_V | PG_RW | PG_N;
1931 invltlb();
1932
1933 tmp = *(int *)ptr;
1934 /*
1935 * Test for alternating 1's and 0's
1936 */
1937 *(volatile int *)ptr = 0xaaaaaaaa;
1938 if (*(volatile int *)ptr != 0xaaaaaaaa)
1939 page_bad = TRUE;
1940 /*
1941 * Test for alternating 0's and 1's
1942 */
1943 *(volatile int *)ptr = 0x55555555;
1944 if (*(volatile int *)ptr != 0x55555555)
1945 page_bad = TRUE;
1946 /*
1947 * Test for all 1's
1948 */
1949 *(volatile int *)ptr = 0xffffffff;
1950 if (*(volatile int *)ptr != 0xffffffff)
1951 page_bad = TRUE;
1952 /*
1953 * Test for all 0's
1954 */
1955 *(volatile int *)ptr = 0x0;
1956 if (*(volatile int *)ptr != 0x0)
1957 page_bad = TRUE;
1958 /*
1959 * Restore original value.
1960 */
1961 *(int *)ptr = tmp;
1962
1963 /*
1964 * Adjust array of valid/good pages.
1965 */
1966 if (page_bad == TRUE)
1967 continue;
1968 /*
1969 * If this good page is a continuation of the
1970 * previous set of good pages, then just increase
1971 * the end pointer. Otherwise start a new chunk.
1972 * Note that "end" points one higher than end,
1973 * making the range >= start and < end.
1974 * If we're also doing a speculative memory
1975 * test and we at or past the end, bump up Maxmem
1976 * so that we keep going. The first bad page
1977 * will terminate the loop.
1978 */
1979 if (phys_avail[pa_indx] == pa) {
1980 phys_avail[pa_indx] += PAGE_SIZE;
1981 } else {
1982 pa_indx++;
1983 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1984 printf(
1985 "Too many holes in the physical address space, giving up\n");
1986 pa_indx--;
1987 full = TRUE;
1988 goto do_dump_avail;
1989 }
1990 phys_avail[pa_indx++] = pa; /* start */
1991 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1992 }
1993 physmem++;
1994 do_dump_avail:
1995 if (dump_avail[da_indx] == pa) {
1996 dump_avail[da_indx] += PAGE_SIZE;
1997 } else {
1998 da_indx++;
1999 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2000 da_indx--;
2001 goto do_next;
2002 }
2003 dump_avail[da_indx++] = pa; /* start */
2004 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2005 }
2006 do_next:
2007 if (full)
2008 break;
2009 }
2010 }
2011 *pte = 0;
2012 invltlb();
2013
2014 /*
2015 * XXX
2016 * The last chunk must contain at least one page plus the message
2017 * buffer to avoid complicating other code (message buffer address
2018 * calculation, etc.).
2019 */
2020 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2021 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
2022 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2023 phys_avail[pa_indx--] = 0;
2024 phys_avail[pa_indx--] = 0;
2025 }
2026
2027 Maxmem = atop(phys_avail[pa_indx]);
2028
2029 /* Trim off space for the message buffer. */
2030 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
2031
2032 avail_end = phys_avail[pa_indx];
2033 }
2034
2035 void
2036 init386(first)
2037 int first;
2038 {
2039 struct gate_descriptor *gdp;
2040 int gsel_tss, metadata_missing, off, x;
2041 struct pcpu *pc;
2042
2043 thread0.td_kstack = proc0kstack;
2044 thread0.td_pcb = (struct pcb *)
2045 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
2046
2047 /*
2048 * This may be done better later if it gets more high level
2049 * components in it. If so just link td->td_proc here.
2050 */
2051 proc_linkup(&proc0, &ksegrp0, &thread0);
2052
2053 metadata_missing = 0;
2054 if (bootinfo.bi_modulep) {
2055 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2056 preload_bootstrap_relocate(KERNBASE);
2057 } else {
2058 metadata_missing = 1;
2059 }
2060 if (envmode == 1)
2061 kern_envp = static_env;
2062 else if (bootinfo.bi_envp)
2063 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2064
2065 /* Init basic tunables, hz etc */
2066 init_param1();
2067
2068 /*
2069 * Make gdt memory segments. All segments cover the full 4GB
2070 * of address space and permissions are enforced at page level.
2071 */
2072 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2073 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2074 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2075 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2076 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2077 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2078
2079 #ifdef SMP
2080 pc = &SMP_prvspace[0].pcpu;
2081 #else
2082 pc = &__pcpu;
2083 #endif
2084 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2085 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2086 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2087
2088 for (x = 0; x < NGDT; x++)
2089 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2090
2091 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2092 r_gdt.rd_base = (int) gdt;
2093 lgdt(&r_gdt);
2094
2095 pcpu_init(pc, 0, sizeof(struct pcpu));
2096 PCPU_SET(prvspace, pc);
2097 PCPU_SET(curthread, &thread0);
2098 PCPU_SET(curpcb, thread0.td_pcb);
2099
2100 /*
2101 * Initialize mutexes.
2102 *
2103 * icu_lock: in order to allow an interrupt to occur in a critical
2104 * section, to set pcpu->ipending (etc...) properly, we
2105 * must be able to get the icu lock, so it can't be
2106 * under witness.
2107 */
2108 mutex_init();
2109 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN);
2110 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
2111
2112 /* make ldt memory segments */
2113 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2114 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2115 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2116 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2117
2118 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2119 lldt(_default_ldt);
2120 PCPU_SET(currentldt, _default_ldt);
2121
2122 /* exceptions */
2123 for (x = 0; x < NIDT; x++)
2124 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2125 GSEL(GCODE_SEL, SEL_KPL));
2126 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2127 GSEL(GCODE_SEL, SEL_KPL));
2128 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2129 GSEL(GCODE_SEL, SEL_KPL));
2130 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2131 GSEL(GCODE_SEL, SEL_KPL));
2132 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2133 GSEL(GCODE_SEL, SEL_KPL));
2134 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2135 GSEL(GCODE_SEL, SEL_KPL));
2136 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2137 GSEL(GCODE_SEL, SEL_KPL));
2138 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2139 GSEL(GCODE_SEL, SEL_KPL));
2140 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2141 , GSEL(GCODE_SEL, SEL_KPL));
2142 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2143 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2144 GSEL(GCODE_SEL, SEL_KPL));
2145 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2146 GSEL(GCODE_SEL, SEL_KPL));
2147 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2148 GSEL(GCODE_SEL, SEL_KPL));
2149 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2150 GSEL(GCODE_SEL, SEL_KPL));
2151 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2152 GSEL(GCODE_SEL, SEL_KPL));
2153 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2154 GSEL(GCODE_SEL, SEL_KPL));
2155 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2156 GSEL(GCODE_SEL, SEL_KPL));
2157 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2158 GSEL(GCODE_SEL, SEL_KPL));
2159 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2160 GSEL(GCODE_SEL, SEL_KPL));
2161 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2162 GSEL(GCODE_SEL, SEL_KPL));
2163 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2164 GSEL(GCODE_SEL, SEL_KPL));
2165
2166 r_idt.rd_limit = sizeof(idt0) - 1;
2167 r_idt.rd_base = (int) idt;
2168 lidt(&r_idt);
2169
2170 /*
2171 * Initialize the console before we print anything out.
2172 */
2173 cninit();
2174
2175 if (metadata_missing)
2176 printf("WARNING: loader(8) metadata is missing!\n");
2177
2178 #ifdef DEV_ISA
2179 elcr_probe();
2180 atpic_startup();
2181 #endif
2182
2183 #ifdef DDB
2184 ksym_start = bootinfo.bi_symtab;
2185 ksym_end = bootinfo.bi_esymtab;
2186 #endif
2187
2188 kdb_init();
2189
2190 #ifdef KDB
2191 if (boothowto & RB_KDB)
2192 kdb_enter("Boot flags requested debugger");
2193 #endif
2194
2195 finishidentcpu(); /* Final stage of CPU initialization */
2196 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2197 GSEL(GCODE_SEL, SEL_KPL));
2198 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2199 GSEL(GCODE_SEL, SEL_KPL));
2200 initializecpu(); /* Initialize CPU registers */
2201
2202 /* make an initial tss so cpu can get interrupt stack on syscall! */
2203 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2204 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2205 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2206 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2207 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2208 private_tss = 0;
2209 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2210 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2211 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2212 ltr(gsel_tss);
2213
2214 /* pointer to selector slot for %fs/%gs */
2215 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2216
2217 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2218 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2219 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2220 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2221 #ifdef PAE
2222 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2223 #else
2224 dblfault_tss.tss_cr3 = (int)IdlePTD;
2225 #endif
2226 dblfault_tss.tss_eip = (int)dblfault_handler;
2227 dblfault_tss.tss_eflags = PSL_KERNEL;
2228 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2229 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2230 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2231 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2232 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2233
2234 vm86_initialize();
2235 getmemsize(first);
2236 init_param2(physmem);
2237
2238 /* now running on new page tables, configured,and u/iom is accessible */
2239
2240 /* Map the message buffer. */
2241 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2242 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
2243
2244 msgbufinit(msgbufp, MSGBUF_SIZE);
2245
2246 /* make a call gate to reenter kernel with */
2247 gdp = &ldt[LSYS5CALLS_SEL].gd;
2248
2249 x = (int) &IDTVEC(lcall_syscall);
2250 gdp->gd_looffset = x;
2251 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2252 gdp->gd_stkcpy = 1;
2253 gdp->gd_type = SDT_SYS386CGT;
2254 gdp->gd_dpl = SEL_UPL;
2255 gdp->gd_p = 1;
2256 gdp->gd_hioffset = x >> 16;
2257
2258 /* XXX does this work? */
2259 /* XXX yes! */
2260 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2261 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2262
2263 /* transfer to user mode */
2264
2265 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2266 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2267
2268 /* setup proc 0's pcb */
2269 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
2270 #ifdef PAE
2271 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2272 #else
2273 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2274 #endif
2275 thread0.td_pcb->pcb_ext = 0;
2276 thread0.td_frame = &proc0_tf;
2277 }
2278
2279 void
2280 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2281 {
2282
2283 pcpu->pc_acpi_id = 0xffffffff;
2284 }
2285
2286 void
2287 spinlock_enter(void)
2288 {
2289 struct thread *td;
2290
2291 td = curthread;
2292 if (td->td_md.md_spinlock_count == 0)
2293 td->td_md.md_saved_flags = intr_disable();
2294 td->td_md.md_spinlock_count++;
2295 critical_enter();
2296 }
2297
2298 void
2299 spinlock_exit(void)
2300 {
2301 struct thread *td;
2302
2303 td = curthread;
2304 critical_exit();
2305 td->td_md.md_spinlock_count--;
2306 if (td->td_md.md_spinlock_count == 0)
2307 intr_restore(td->td_md.md_saved_flags);
2308 }
2309
2310 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2311 static void f00f_hack(void *unused);
2312 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL)
2313
2314 static void
2315 f00f_hack(void *unused)
2316 {
2317 struct gate_descriptor *new_idt;
2318 vm_offset_t tmp;
2319
2320 if (!has_f00f_bug)
2321 return;
2322
2323 GIANT_REQUIRED;
2324
2325 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2326
2327 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2328 if (tmp == 0)
2329 panic("kmem_alloc returned 0");
2330
2331 /* Put the problematic entry (#6) at the end of the lower page. */
2332 new_idt = (struct gate_descriptor*)
2333 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2334 bcopy(idt, new_idt, sizeof(idt0));
2335 r_idt.rd_base = (u_int)new_idt;
2336 lidt(&r_idt);
2337 idt = new_idt;
2338 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2339 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2340 panic("vm_map_protect failed");
2341 }
2342 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2343
2344 /*
2345 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2346 * we want to start a backtrace from the function that caused us to enter
2347 * the debugger. We have the context in the trapframe, but base the trace
2348 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2349 * enough for a backtrace.
2350 */
2351 void
2352 makectx(struct trapframe *tf, struct pcb *pcb)
2353 {
2354
2355 pcb->pcb_edi = tf->tf_edi;
2356 pcb->pcb_esi = tf->tf_esi;
2357 pcb->pcb_ebp = tf->tf_ebp;
2358 pcb->pcb_ebx = tf->tf_ebx;
2359 pcb->pcb_eip = tf->tf_eip;
2360 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2361 }
2362
2363 int
2364 ptrace_set_pc(struct thread *td, u_long addr)
2365 {
2366
2367 td->td_frame->tf_eip = addr;
2368 return (0);
2369 }
2370
2371 int
2372 ptrace_single_step(struct thread *td)
2373 {
2374 td->td_frame->tf_eflags |= PSL_T;
2375 return (0);
2376 }
2377
2378 int
2379 ptrace_clear_single_step(struct thread *td)
2380 {
2381 td->td_frame->tf_eflags &= ~PSL_T;
2382 return (0);
2383 }
2384
2385 int
2386 fill_regs(struct thread *td, struct reg *regs)
2387 {
2388 struct pcb *pcb;
2389 struct trapframe *tp;
2390
2391 tp = td->td_frame;
2392 pcb = td->td_pcb;
2393 regs->r_fs = tp->tf_fs;
2394 regs->r_es = tp->tf_es;
2395 regs->r_ds = tp->tf_ds;
2396 regs->r_edi = tp->tf_edi;
2397 regs->r_esi = tp->tf_esi;
2398 regs->r_ebp = tp->tf_ebp;
2399 regs->r_ebx = tp->tf_ebx;
2400 regs->r_edx = tp->tf_edx;
2401 regs->r_ecx = tp->tf_ecx;
2402 regs->r_eax = tp->tf_eax;
2403 regs->r_eip = tp->tf_eip;
2404 regs->r_cs = tp->tf_cs;
2405 regs->r_eflags = tp->tf_eflags;
2406 regs->r_esp = tp->tf_esp;
2407 regs->r_ss = tp->tf_ss;
2408 regs->r_gs = pcb->pcb_gs;
2409 return (0);
2410 }
2411
2412 int
2413 set_regs(struct thread *td, struct reg *regs)
2414 {
2415 struct pcb *pcb;
2416 struct trapframe *tp;
2417
2418 tp = td->td_frame;
2419 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2420 !CS_SECURE(regs->r_cs))
2421 return (EINVAL);
2422 pcb = td->td_pcb;
2423 tp->tf_fs = regs->r_fs;
2424 tp->tf_es = regs->r_es;
2425 tp->tf_ds = regs->r_ds;
2426 tp->tf_edi = regs->r_edi;
2427 tp->tf_esi = regs->r_esi;
2428 tp->tf_ebp = regs->r_ebp;
2429 tp->tf_ebx = regs->r_ebx;
2430 tp->tf_edx = regs->r_edx;
2431 tp->tf_ecx = regs->r_ecx;
2432 tp->tf_eax = regs->r_eax;
2433 tp->tf_eip = regs->r_eip;
2434 tp->tf_cs = regs->r_cs;
2435 tp->tf_eflags = regs->r_eflags;
2436 tp->tf_esp = regs->r_esp;
2437 tp->tf_ss = regs->r_ss;
2438 pcb->pcb_gs = regs->r_gs;
2439 return (0);
2440 }
2441
2442 #ifdef CPU_ENABLE_SSE
2443 static void
2444 fill_fpregs_xmm(sv_xmm, sv_87)
2445 struct savexmm *sv_xmm;
2446 struct save87 *sv_87;
2447 {
2448 register struct env87 *penv_87 = &sv_87->sv_env;
2449 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2450 int i;
2451
2452 bzero(sv_87, sizeof(*sv_87));
2453
2454 /* FPU control/status */
2455 penv_87->en_cw = penv_xmm->en_cw;
2456 penv_87->en_sw = penv_xmm->en_sw;
2457 penv_87->en_tw = penv_xmm->en_tw;
2458 penv_87->en_fip = penv_xmm->en_fip;
2459 penv_87->en_fcs = penv_xmm->en_fcs;
2460 penv_87->en_opcode = penv_xmm->en_opcode;
2461 penv_87->en_foo = penv_xmm->en_foo;
2462 penv_87->en_fos = penv_xmm->en_fos;
2463
2464 /* FPU registers */
2465 for (i = 0; i < 8; ++i)
2466 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2467 }
2468
2469 static void
2470 set_fpregs_xmm(sv_87, sv_xmm)
2471 struct save87 *sv_87;
2472 struct savexmm *sv_xmm;
2473 {
2474 register struct env87 *penv_87 = &sv_87->sv_env;
2475 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2476 int i;
2477
2478 /* FPU control/status */
2479 penv_xmm->en_cw = penv_87->en_cw;
2480 penv_xmm->en_sw = penv_87->en_sw;
2481 penv_xmm->en_tw = penv_87->en_tw;
2482 penv_xmm->en_fip = penv_87->en_fip;
2483 penv_xmm->en_fcs = penv_87->en_fcs;
2484 penv_xmm->en_opcode = penv_87->en_opcode;
2485 penv_xmm->en_foo = penv_87->en_foo;
2486 penv_xmm->en_fos = penv_87->en_fos;
2487
2488 /* FPU registers */
2489 for (i = 0; i < 8; ++i)
2490 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2491 }
2492 #endif /* CPU_ENABLE_SSE */
2493
2494 int
2495 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2496 {
2497 #ifdef CPU_ENABLE_SSE
2498 if (cpu_fxsr) {
2499 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2500 (struct save87 *)fpregs);
2501 return (0);
2502 }
2503 #endif /* CPU_ENABLE_SSE */
2504 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2505 return (0);
2506 }
2507
2508 int
2509 set_fpregs(struct thread *td, struct fpreg *fpregs)
2510 {
2511 #ifdef CPU_ENABLE_SSE
2512 if (cpu_fxsr) {
2513 set_fpregs_xmm((struct save87 *)fpregs,
2514 &td->td_pcb->pcb_save.sv_xmm);
2515 return (0);
2516 }
2517 #endif /* CPU_ENABLE_SSE */
2518 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2519 return (0);
2520 }
2521
2522 /*
2523 * Get machine context.
2524 */
2525 int
2526 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2527 {
2528 struct trapframe *tp;
2529
2530 tp = td->td_frame;
2531
2532 PROC_LOCK(curthread->td_proc);
2533 mcp->mc_onstack = sigonstack(tp->tf_esp);
2534 PROC_UNLOCK(curthread->td_proc);
2535 mcp->mc_gs = td->td_pcb->pcb_gs;
2536 mcp->mc_fs = tp->tf_fs;
2537 mcp->mc_es = tp->tf_es;
2538 mcp->mc_ds = tp->tf_ds;
2539 mcp->mc_edi = tp->tf_edi;
2540 mcp->mc_esi = tp->tf_esi;
2541 mcp->mc_ebp = tp->tf_ebp;
2542 mcp->mc_isp = tp->tf_isp;
2543 mcp->mc_eflags = tp->tf_eflags;
2544 if (flags & GET_MC_CLEAR_RET) {
2545 mcp->mc_eax = 0;
2546 mcp->mc_edx = 0;
2547 mcp->mc_eflags &= ~PSL_C;
2548 } else {
2549 mcp->mc_eax = tp->tf_eax;
2550 mcp->mc_edx = tp->tf_edx;
2551 }
2552 mcp->mc_ebx = tp->tf_ebx;
2553 mcp->mc_ecx = tp->tf_ecx;
2554 mcp->mc_eip = tp->tf_eip;
2555 mcp->mc_cs = tp->tf_cs;
2556 mcp->mc_esp = tp->tf_esp;
2557 mcp->mc_ss = tp->tf_ss;
2558 mcp->mc_len = sizeof(*mcp);
2559 get_fpcontext(td, mcp);
2560 return (0);
2561 }
2562
2563 /*
2564 * Set machine context.
2565 *
2566 * However, we don't set any but the user modifiable flags, and we won't
2567 * touch the cs selector.
2568 */
2569 int
2570 set_mcontext(struct thread *td, const mcontext_t *mcp)
2571 {
2572 struct trapframe *tp;
2573 int eflags, ret;
2574
2575 tp = td->td_frame;
2576 if (mcp->mc_len != sizeof(*mcp))
2577 return (EINVAL);
2578 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2579 (tp->tf_eflags & ~PSL_USERCHANGE);
2580 if ((ret = set_fpcontext(td, mcp)) == 0) {
2581 tp->tf_fs = mcp->mc_fs;
2582 tp->tf_es = mcp->mc_es;
2583 tp->tf_ds = mcp->mc_ds;
2584 tp->tf_edi = mcp->mc_edi;
2585 tp->tf_esi = mcp->mc_esi;
2586 tp->tf_ebp = mcp->mc_ebp;
2587 tp->tf_ebx = mcp->mc_ebx;
2588 tp->tf_edx = mcp->mc_edx;
2589 tp->tf_ecx = mcp->mc_ecx;
2590 tp->tf_eax = mcp->mc_eax;
2591 tp->tf_eip = mcp->mc_eip;
2592 tp->tf_eflags = eflags;
2593 tp->tf_esp = mcp->mc_esp;
2594 tp->tf_ss = mcp->mc_ss;
2595 td->td_pcb->pcb_gs = mcp->mc_gs;
2596 ret = 0;
2597 }
2598 return (ret);
2599 }
2600
2601 static void
2602 get_fpcontext(struct thread *td, mcontext_t *mcp)
2603 {
2604 #ifndef DEV_NPX
2605 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2606 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2607 #else
2608 union savefpu *addr;
2609
2610 /*
2611 * XXX mc_fpstate might be misaligned, since its declaration is not
2612 * unportabilized using __attribute__((aligned(16))) like the
2613 * declaration of struct savemm, and anyway, alignment doesn't work
2614 * for auto variables since we don't use gcc's pessimal stack
2615 * alignment. Work around this by abusing the spare fields after
2616 * mcp->mc_fpstate.
2617 *
2618 * XXX unpessimize most cases by only aligning when fxsave might be
2619 * called, although this requires knowing too much about
2620 * npxgetregs()'s internals.
2621 */
2622 addr = (union savefpu *)&mcp->mc_fpstate;
2623 if (td == PCPU_GET(fpcurthread) &&
2624 #ifdef CPU_ENABLE_SSE
2625 cpu_fxsr &&
2626 #endif
2627 ((uintptr_t)(void *)addr & 0xF)) {
2628 do
2629 addr = (void *)((char *)addr + 4);
2630 while ((uintptr_t)(void *)addr & 0xF);
2631 }
2632 mcp->mc_ownedfp = npxgetregs(td, addr);
2633 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2634 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2635 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2636 }
2637 mcp->mc_fpformat = npxformat();
2638 #endif
2639 }
2640
2641 static int
2642 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2643 {
2644 union savefpu *addr;
2645
2646 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2647 return (0);
2648 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2649 mcp->mc_fpformat != _MC_FPFMT_XMM)
2650 return (EINVAL);
2651 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2652 /* We don't care what state is left in the FPU or PCB. */
2653 fpstate_drop(td);
2654 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2655 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2656 /* XXX align as above. */
2657 addr = (union savefpu *)&mcp->mc_fpstate;
2658 if (td == PCPU_GET(fpcurthread) &&
2659 #ifdef CPU_ENABLE_SSE
2660 cpu_fxsr &&
2661 #endif
2662 ((uintptr_t)(void *)addr & 0xF)) {
2663 do
2664 addr = (void *)((char *)addr + 4);
2665 while ((uintptr_t)(void *)addr & 0xF);
2666 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2667 }
2668 #ifdef DEV_NPX
2669 /*
2670 * XXX we violate the dubious requirement that npxsetregs()
2671 * be called with interrupts disabled.
2672 */
2673 npxsetregs(td, addr);
2674 #endif
2675 /*
2676 * Don't bother putting things back where they were in the
2677 * misaligned case, since we know that the caller won't use
2678 * them again.
2679 */
2680 } else
2681 return (EINVAL);
2682 return (0);
2683 }
2684
2685 static void
2686 fpstate_drop(struct thread *td)
2687 {
2688 register_t s;
2689
2690 s = intr_disable();
2691 #ifdef DEV_NPX
2692 if (PCPU_GET(fpcurthread) == td)
2693 npxdrop();
2694 #endif
2695 /*
2696 * XXX force a full drop of the npx. The above only drops it if we
2697 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2698 *
2699 * XXX I don't much like npxgetregs()'s semantics of doing a full
2700 * drop. Dropping only to the pcb matches fnsave's behaviour.
2701 * We only need to drop to !PCB_INITDONE in sendsig(). But
2702 * sendsig() is the only caller of npxgetregs()... perhaps we just
2703 * have too many layers.
2704 */
2705 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2706 intr_restore(s);
2707 }
2708
2709 int
2710 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2711 {
2712 struct pcb *pcb;
2713
2714 if (td == NULL) {
2715 dbregs->dr[0] = rdr0();
2716 dbregs->dr[1] = rdr1();
2717 dbregs->dr[2] = rdr2();
2718 dbregs->dr[3] = rdr3();
2719 dbregs->dr[4] = rdr4();
2720 dbregs->dr[5] = rdr5();
2721 dbregs->dr[6] = rdr6();
2722 dbregs->dr[7] = rdr7();
2723 } else {
2724 pcb = td->td_pcb;
2725 dbregs->dr[0] = pcb->pcb_dr0;
2726 dbregs->dr[1] = pcb->pcb_dr1;
2727 dbregs->dr[2] = pcb->pcb_dr2;
2728 dbregs->dr[3] = pcb->pcb_dr3;
2729 dbregs->dr[4] = 0;
2730 dbregs->dr[5] = 0;
2731 dbregs->dr[6] = pcb->pcb_dr6;
2732 dbregs->dr[7] = pcb->pcb_dr7;
2733 }
2734 return (0);
2735 }
2736
2737 int
2738 set_dbregs(struct thread *td, struct dbreg *dbregs)
2739 {
2740 struct pcb *pcb;
2741 int i;
2742 u_int32_t mask1, mask2;
2743
2744 if (td == NULL) {
2745 load_dr0(dbregs->dr[0]);
2746 load_dr1(dbregs->dr[1]);
2747 load_dr2(dbregs->dr[2]);
2748 load_dr3(dbregs->dr[3]);
2749 load_dr4(dbregs->dr[4]);
2750 load_dr5(dbregs->dr[5]);
2751 load_dr6(dbregs->dr[6]);
2752 load_dr7(dbregs->dr[7]);
2753 } else {
2754 /*
2755 * Don't let an illegal value for dr7 get set. Specifically,
2756 * check for undefined settings. Setting these bit patterns
2757 * result in undefined behaviour and can lead to an unexpected
2758 * TRCTRAP.
2759 */
2760 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8;
2761 i++, mask1 <<= 2, mask2 <<= 2)
2762 if ((dbregs->dr[7] & mask1) == mask2)
2763 return (EINVAL);
2764
2765 pcb = td->td_pcb;
2766
2767 /*
2768 * Don't let a process set a breakpoint that is not within the
2769 * process's address space. If a process could do this, it
2770 * could halt the system by setting a breakpoint in the kernel
2771 * (if ddb was enabled). Thus, we need to check to make sure
2772 * that no breakpoints are being enabled for addresses outside
2773 * process's address space.
2774 *
2775 * XXX - what about when the watched area of the user's
2776 * address space is written into from within the kernel
2777 * ... wouldn't that still cause a breakpoint to be generated
2778 * from within kernel mode?
2779 */
2780
2781 if (dbregs->dr[7] & 0x3) {
2782 /* dr0 is enabled */
2783 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2784 return (EINVAL);
2785 }
2786
2787 if (dbregs->dr[7] & (0x3<<2)) {
2788 /* dr1 is enabled */
2789 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2790 return (EINVAL);
2791 }
2792
2793 if (dbregs->dr[7] & (0x3<<4)) {
2794 /* dr2 is enabled */
2795 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2796 return (EINVAL);
2797 }
2798
2799 if (dbregs->dr[7] & (0x3<<6)) {
2800 /* dr3 is enabled */
2801 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2802 return (EINVAL);
2803 }
2804
2805 pcb->pcb_dr0 = dbregs->dr[0];
2806 pcb->pcb_dr1 = dbregs->dr[1];
2807 pcb->pcb_dr2 = dbregs->dr[2];
2808 pcb->pcb_dr3 = dbregs->dr[3];
2809 pcb->pcb_dr6 = dbregs->dr[6];
2810 pcb->pcb_dr7 = dbregs->dr[7];
2811
2812 pcb->pcb_flags |= PCB_DBREGS;
2813 }
2814
2815 return (0);
2816 }
2817
2818 /*
2819 * Return > 0 if a hardware breakpoint has been hit, and the
2820 * breakpoint was in user space. Return 0, otherwise.
2821 */
2822 int
2823 user_dbreg_trap(void)
2824 {
2825 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2826 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2827 int nbp; /* number of breakpoints that triggered */
2828 caddr_t addr[4]; /* breakpoint addresses */
2829 int i;
2830
2831 dr7 = rdr7();
2832 if ((dr7 & 0x000000ff) == 0) {
2833 /*
2834 * all GE and LE bits in the dr7 register are zero,
2835 * thus the trap couldn't have been caused by the
2836 * hardware debug registers
2837 */
2838 return 0;
2839 }
2840
2841 nbp = 0;
2842 dr6 = rdr6();
2843 bp = dr6 & 0x0000000f;
2844
2845 if (!bp) {
2846 /*
2847 * None of the breakpoint bits are set meaning this
2848 * trap was not caused by any of the debug registers
2849 */
2850 return 0;
2851 }
2852
2853 /*
2854 * at least one of the breakpoints were hit, check to see
2855 * which ones and if any of them are user space addresses
2856 */
2857
2858 if (bp & 0x01) {
2859 addr[nbp++] = (caddr_t)rdr0();
2860 }
2861 if (bp & 0x02) {
2862 addr[nbp++] = (caddr_t)rdr1();
2863 }
2864 if (bp & 0x04) {
2865 addr[nbp++] = (caddr_t)rdr2();
2866 }
2867 if (bp & 0x08) {
2868 addr[nbp++] = (caddr_t)rdr3();
2869 }
2870
2871 for (i=0; i<nbp; i++) {
2872 if (addr[i] <
2873 (caddr_t)VM_MAXUSER_ADDRESS) {
2874 /*
2875 * addr[i] is in user space
2876 */
2877 return nbp;
2878 }
2879 }
2880
2881 /*
2882 * None of the breakpoints are in user space.
2883 */
2884 return 0;
2885 }
2886
2887 #ifndef DEV_APIC
2888 #include <machine/apicvar.h>
2889
2890 /*
2891 * Provide stub functions so that the MADT APIC enumerator in the acpi
2892 * kernel module will link against a kernel without 'device apic'.
2893 *
2894 * XXX - This is a gross hack.
2895 */
2896 void
2897 apic_register_enumerator(struct apic_enumerator *enumerator)
2898 {
2899 }
2900
2901 void *
2902 ioapic_create(uintptr_t addr, int32_t id, int intbase)
2903 {
2904 return (NULL);
2905 }
2906
2907 int
2908 ioapic_disable_pin(void *cookie, u_int pin)
2909 {
2910 return (ENXIO);
2911 }
2912
2913 int
2914 ioapic_get_vector(void *cookie, u_int pin)
2915 {
2916 return (-1);
2917 }
2918
2919 void
2920 ioapic_register(void *cookie)
2921 {
2922 }
2923
2924 int
2925 ioapic_remap_vector(void *cookie, u_int pin, int vector)
2926 {
2927 return (ENXIO);
2928 }
2929
2930 int
2931 ioapic_set_extint(void *cookie, u_int pin)
2932 {
2933 return (ENXIO);
2934 }
2935
2936 int
2937 ioapic_set_nmi(void *cookie, u_int pin)
2938 {
2939 return (ENXIO);
2940 }
2941
2942 int
2943 ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol)
2944 {
2945 return (ENXIO);
2946 }
2947
2948 int
2949 ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger)
2950 {
2951 return (ENXIO);
2952 }
2953
2954 void
2955 lapic_create(u_int apic_id, int boot_cpu)
2956 {
2957 }
2958
2959 void
2960 lapic_init(uintptr_t addr)
2961 {
2962 }
2963
2964 int
2965 lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode)
2966 {
2967 return (ENXIO);
2968 }
2969
2970 int
2971 lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol)
2972 {
2973 return (ENXIO);
2974 }
2975
2976 int
2977 lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger)
2978 {
2979 return (ENXIO);
2980 }
2981 #endif
2982
2983 #ifdef KDB
2984
2985 /*
2986 * Provide inb() and outb() as functions. They are normally only
2987 * available as macros calling inlined functions, thus cannot be
2988 * called from the debugger.
2989 *
2990 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
2991 */
2992
2993 #undef inb
2994 #undef outb
2995
2996 /* silence compiler warnings */
2997 u_char inb(u_int);
2998 void outb(u_int, u_char);
2999
3000 u_char
3001 inb(u_int port)
3002 {
3003 u_char data;
3004 /*
3005 * We use %%dx and not %1 here because i/o is done at %dx and not at
3006 * %edx, while gcc generates inferior code (movw instead of movl)
3007 * if we tell it to load (u_short) port.
3008 */
3009 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
3010 return (data);
3011 }
3012
3013 void
3014 outb(u_int port, u_char data)
3015 {
3016 u_char al;
3017 /*
3018 * Use an unnecessary assignment to help gcc's register allocator.
3019 * This make a large difference for gcc-1.40 and a tiny difference
3020 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
3021 * best results. gcc-2.6.0 can't handle this.
3022 */
3023 al = data;
3024 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
3025 }
3026
3027 #endif /* KDB */
Cache object: 5b413a2a0974ebe8e1e1c5b7c5c579c5
|