1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/10.2/sys/i386/i386/machdep.c 282066 2015-04-27 08:06:33Z kib $");
42
43 #include "opt_apic.h"
44 #include "opt_atalk.h"
45 #include "opt_atpic.h"
46 #include "opt_compat.h"
47 #include "opt_cpu.h"
48 #include "opt_ddb.h"
49 #include "opt_inet.h"
50 #include "opt_ipx.h"
51 #include "opt_isa.h"
52 #include "opt_kstack_pages.h"
53 #include "opt_maxmem.h"
54 #include "opt_mp_watchdog.h"
55 #include "opt_npx.h"
56 #include "opt_perfmon.h"
57 #include "opt_platform.h"
58 #include "opt_xbox.h"
59 #include "opt_kdtrace.h"
60
61 #include <sys/param.h>
62 #include <sys/proc.h>
63 #include <sys/systm.h>
64 #include <sys/bio.h>
65 #include <sys/buf.h>
66 #include <sys/bus.h>
67 #include <sys/callout.h>
68 #include <sys/cons.h>
69 #include <sys/cpu.h>
70 #include <sys/eventhandler.h>
71 #include <sys/exec.h>
72 #include <sys/imgact.h>
73 #include <sys/kdb.h>
74 #include <sys/kernel.h>
75 #include <sys/ktr.h>
76 #include <sys/linker.h>
77 #include <sys/lock.h>
78 #include <sys/malloc.h>
79 #include <sys/memrange.h>
80 #include <sys/msgbuf.h>
81 #include <sys/mutex.h>
82 #include <sys/pcpu.h>
83 #include <sys/ptrace.h>
84 #include <sys/reboot.h>
85 #include <sys/rwlock.h>
86 #include <sys/sched.h>
87 #include <sys/signalvar.h>
88 #ifdef SMP
89 #include <sys/smp.h>
90 #endif
91 #include <sys/syscallsubr.h>
92 #include <sys/sysctl.h>
93 #include <sys/sysent.h>
94 #include <sys/sysproto.h>
95 #include <sys/ucontext.h>
96 #include <sys/vmmeter.h>
97
98 #include <vm/vm.h>
99 #include <vm/vm_extern.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_pager.h>
105 #include <vm/vm_param.h>
106
107 #ifdef DDB
108 #ifndef KDB
109 #error KDB must be enabled in order for DDB to work!
110 #endif
111 #include <ddb/ddb.h>
112 #include <ddb/db_sym.h>
113 #endif
114
115 #ifdef PC98
116 #include <pc98/pc98/pc98_machdep.h>
117 #else
118 #include <isa/rtc.h>
119 #endif
120
121 #include <net/netisr.h>
122
123 #include <machine/bootinfo.h>
124 #include <machine/clock.h>
125 #include <machine/cpu.h>
126 #include <machine/cputypes.h>
127 #include <machine/intr_machdep.h>
128 #include <x86/mca.h>
129 #include <machine/md_var.h>
130 #include <machine/metadata.h>
131 #include <machine/mp_watchdog.h>
132 #include <machine/pc/bios.h>
133 #include <machine/pcb.h>
134 #include <machine/pcb_ext.h>
135 #include <machine/proc.h>
136 #include <machine/reg.h>
137 #include <machine/sigframe.h>
138 #include <machine/specialreg.h>
139 #include <machine/vm86.h>
140 #ifdef PERFMON
141 #include <machine/perfmon.h>
142 #endif
143 #ifdef SMP
144 #include <machine/smp.h>
145 #endif
146 #ifdef FDT
147 #include <x86/fdt.h>
148 #endif
149
150 #ifdef DEV_APIC
151 #include <machine/apicvar.h>
152 #endif
153
154 #ifdef DEV_ISA
155 #include <x86/isa/icu.h>
156 #endif
157
158 #ifdef XBOX
159 #include <machine/xbox.h>
160
161 int arch_i386_is_xbox = 0;
162 uint32_t arch_i386_xbox_memsize = 0;
163 #endif
164
165 #ifdef XEN
166 /* XEN includes */
167 #include <xen/xen-os.h>
168 #include <xen/hypervisor.h>
169 #include <machine/xen/xenvar.h>
170 #include <machine/xen/xenfunc.h>
171 #include <xen/xen_intr.h>
172
173 void Xhypervisor_callback(void);
174 void failsafe_callback(void);
175
176 extern trap_info_t trap_table[];
177 struct proc_ldt default_proc_ldt;
178 extern int init_first;
179 int running_xen = 1;
180 extern unsigned long physfree;
181 #endif /* XEN */
182
183 /* Sanity check for __curthread() */
184 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
185
186 extern register_t init386(int first);
187 extern void dblfault_handler(void);
188
189 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
190 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
191
192 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
193 #define CPU_ENABLE_SSE
194 #endif
195
196 static void cpu_startup(void *);
197 static void fpstate_drop(struct thread *td);
198 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
199 char *xfpusave, size_t xfpusave_len);
200 static int set_fpcontext(struct thread *td, mcontext_t *mcp,
201 char *xfpustate, size_t xfpustate_len);
202 #ifdef CPU_ENABLE_SSE
203 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
204 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
205 #endif /* CPU_ENABLE_SSE */
206 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
207
208 #ifdef DDB
209 extern vm_offset_t ksym_start, ksym_end;
210 #endif
211
212 /* Intel ICH registers */
213 #define ICH_PMBASE 0x400
214 #define ICH_SMI_EN ICH_PMBASE + 0x30
215
216 int _udatasel, _ucodesel;
217 u_int basemem;
218
219 #ifdef PC98
220 int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */
221 int need_post_dma_flush; /* If 1, use invd after DMA transfer. */
222
223 static int ispc98 = 1;
224 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
225 #endif
226
227 int cold = 1;
228
229 #ifdef COMPAT_43
230 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
231 #endif
232 #ifdef COMPAT_FREEBSD4
233 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
234 #endif
235
236 long Maxmem = 0;
237 long realmem = 0;
238
239 #ifdef PAE
240 FEATURE(pae, "Physical Address Extensions");
241 #endif
242
243 /*
244 * The number of PHYSMAP entries must be one less than the number of
245 * PHYSSEG entries because the PHYSMAP entry that spans the largest
246 * physical address that is accessible by ISA DMA is split into two
247 * PHYSSEG entries.
248 */
249 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
250
251 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
252 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
253
254 /* must be 2 less so 0 0 can signal end of chunks */
255 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
256 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
257
258 struct kva_md_info kmi;
259
260 static struct trapframe proc0_tf;
261 struct pcpu __pcpu[MAXCPU];
262
263 struct mtx icu_lock;
264
265 struct mem_range_softc mem_range_softc;
266
267 static void
268 cpu_startup(dummy)
269 void *dummy;
270 {
271 uintmax_t memsize;
272 char *sysenv;
273
274 #ifndef PC98
275 /*
276 * On MacBooks, we need to disallow the legacy USB circuit to
277 * generate an SMI# because this can cause several problems,
278 * namely: incorrect CPU frequency detection and failure to
279 * start the APs.
280 * We do this by disabling a bit in the SMI_EN (SMI Control and
281 * Enable register) of the Intel ICH LPC Interface Bridge.
282 */
283 sysenv = getenv("smbios.system.product");
284 if (sysenv != NULL) {
285 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
286 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
287 strncmp(sysenv, "MacBook4,1", 10) == 0 ||
288 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
289 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
290 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
291 strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
292 strncmp(sysenv, "Macmini1,1", 10) == 0) {
293 if (bootverbose)
294 printf("Disabling LEGACY_USB_EN bit on "
295 "Intel ICH.\n");
296 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
297 }
298 freeenv(sysenv);
299 }
300 #endif /* !PC98 */
301
302 /*
303 * Good {morning,afternoon,evening,night}.
304 */
305 startrtclock();
306 printcpuinfo();
307 panicifcpuunsupported();
308 #ifdef PERFMON
309 perfmon_init();
310 #endif
311
312 /*
313 * Display physical memory if SMBIOS reports reasonable amount.
314 */
315 memsize = 0;
316 sysenv = getenv("smbios.memory.enabled");
317 if (sysenv != NULL) {
318 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
319 freeenv(sysenv);
320 }
321 if (memsize < ptoa((uintmax_t)cnt.v_free_count))
322 memsize = ptoa((uintmax_t)Maxmem);
323 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
324 realmem = atop(memsize);
325
326 /*
327 * Display any holes after the first chunk of extended memory.
328 */
329 if (bootverbose) {
330 int indx;
331
332 printf("Physical memory chunk(s):\n");
333 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
334 vm_paddr_t size;
335
336 size = phys_avail[indx + 1] - phys_avail[indx];
337 printf(
338 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
339 (uintmax_t)phys_avail[indx],
340 (uintmax_t)phys_avail[indx + 1] - 1,
341 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
342 }
343 }
344
345 vm_ksubmap_init(&kmi);
346
347 printf("avail memory = %ju (%ju MB)\n",
348 ptoa((uintmax_t)cnt.v_free_count),
349 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
350
351 /*
352 * Set up buffers, so they can be used to read disk labels.
353 */
354 bufinit();
355 vm_pager_bufferinit();
356 #ifndef XEN
357 cpu_setregs();
358 #endif
359 }
360
361 /*
362 * Send an interrupt to process.
363 *
364 * Stack is set up to allow sigcode stored
365 * at top to call routine, followed by call
366 * to sigreturn routine below. After sigreturn
367 * resets the signal mask, the stack, and the
368 * frame pointer, it returns to the user
369 * specified pc, psl.
370 */
371 #ifdef COMPAT_43
372 static void
373 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
374 {
375 struct osigframe sf, *fp;
376 struct proc *p;
377 struct thread *td;
378 struct sigacts *psp;
379 struct trapframe *regs;
380 int sig;
381 int oonstack;
382
383 td = curthread;
384 p = td->td_proc;
385 PROC_LOCK_ASSERT(p, MA_OWNED);
386 sig = ksi->ksi_signo;
387 psp = p->p_sigacts;
388 mtx_assert(&psp->ps_mtx, MA_OWNED);
389 regs = td->td_frame;
390 oonstack = sigonstack(regs->tf_esp);
391
392 /* Allocate space for the signal handler context. */
393 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
394 SIGISMEMBER(psp->ps_sigonstack, sig)) {
395 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
396 td->td_sigstk.ss_size - sizeof(struct osigframe));
397 #if defined(COMPAT_43)
398 td->td_sigstk.ss_flags |= SS_ONSTACK;
399 #endif
400 } else
401 fp = (struct osigframe *)regs->tf_esp - 1;
402
403 /* Translate the signal if appropriate. */
404 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
405 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
406
407 /* Build the argument list for the signal handler. */
408 sf.sf_signum = sig;
409 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
410 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
411 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
412 /* Signal handler installed with SA_SIGINFO. */
413 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
414 sf.sf_siginfo.si_signo = sig;
415 sf.sf_siginfo.si_code = ksi->ksi_code;
416 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
417 sf.sf_addr = 0;
418 } else {
419 /* Old FreeBSD-style arguments. */
420 sf.sf_arg2 = ksi->ksi_code;
421 sf.sf_addr = (register_t)ksi->ksi_addr;
422 sf.sf_ahu.sf_handler = catcher;
423 }
424 mtx_unlock(&psp->ps_mtx);
425 PROC_UNLOCK(p);
426
427 /* Save most if not all of trap frame. */
428 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
429 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
430 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
431 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
432 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
433 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
434 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
435 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
436 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
437 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
438 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
439 sf.sf_siginfo.si_sc.sc_gs = rgs();
440 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
441
442 /* Build the signal context to be used by osigreturn(). */
443 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
444 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
445 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
446 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
447 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
448 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
449 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
450 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
451
452 /*
453 * If we're a vm86 process, we want to save the segment registers.
454 * We also change eflags to be our emulated eflags, not the actual
455 * eflags.
456 */
457 if (regs->tf_eflags & PSL_VM) {
458 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
459 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
460 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
461
462 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
463 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
464 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
465 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
466
467 if (vm86->vm86_has_vme == 0)
468 sf.sf_siginfo.si_sc.sc_ps =
469 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
470 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
471
472 /* See sendsig() for comments. */
473 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
474 }
475
476 /*
477 * Copy the sigframe out to the user's stack.
478 */
479 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
480 #ifdef DEBUG
481 printf("process %ld has trashed its stack\n", (long)p->p_pid);
482 #endif
483 PROC_LOCK(p);
484 sigexit(td, SIGILL);
485 }
486
487 regs->tf_esp = (int)fp;
488 if (p->p_sysent->sv_sigcode_base != 0) {
489 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
490 szosigcode;
491 } else {
492 /* a.out sysentvec does not use shared page */
493 regs->tf_eip = p->p_sysent->sv_psstrings - szosigcode;
494 }
495 regs->tf_eflags &= ~(PSL_T | PSL_D);
496 regs->tf_cs = _ucodesel;
497 regs->tf_ds = _udatasel;
498 regs->tf_es = _udatasel;
499 regs->tf_fs = _udatasel;
500 load_gs(_udatasel);
501 regs->tf_ss = _udatasel;
502 PROC_LOCK(p);
503 mtx_lock(&psp->ps_mtx);
504 }
505 #endif /* COMPAT_43 */
506
507 #ifdef COMPAT_FREEBSD4
508 static void
509 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
510 {
511 struct sigframe4 sf, *sfp;
512 struct proc *p;
513 struct thread *td;
514 struct sigacts *psp;
515 struct trapframe *regs;
516 int sig;
517 int oonstack;
518
519 td = curthread;
520 p = td->td_proc;
521 PROC_LOCK_ASSERT(p, MA_OWNED);
522 sig = ksi->ksi_signo;
523 psp = p->p_sigacts;
524 mtx_assert(&psp->ps_mtx, MA_OWNED);
525 regs = td->td_frame;
526 oonstack = sigonstack(regs->tf_esp);
527
528 /* Save user context. */
529 bzero(&sf, sizeof(sf));
530 sf.sf_uc.uc_sigmask = *mask;
531 sf.sf_uc.uc_stack = td->td_sigstk;
532 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
533 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
534 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
535 sf.sf_uc.uc_mcontext.mc_gs = rgs();
536 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
537 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
538 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
539 bzero(sf.sf_uc.uc_mcontext.__spare__,
540 sizeof(sf.sf_uc.uc_mcontext.__spare__));
541 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
542
543 /* Allocate space for the signal handler context. */
544 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
545 SIGISMEMBER(psp->ps_sigonstack, sig)) {
546 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
547 td->td_sigstk.ss_size - sizeof(struct sigframe4));
548 #if defined(COMPAT_43)
549 td->td_sigstk.ss_flags |= SS_ONSTACK;
550 #endif
551 } else
552 sfp = (struct sigframe4 *)regs->tf_esp - 1;
553
554 /* Translate the signal if appropriate. */
555 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
556 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
557
558 /* Build the argument list for the signal handler. */
559 sf.sf_signum = sig;
560 sf.sf_ucontext = (register_t)&sfp->sf_uc;
561 bzero(&sf.sf_si, sizeof(sf.sf_si));
562 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
563 /* Signal handler installed with SA_SIGINFO. */
564 sf.sf_siginfo = (register_t)&sfp->sf_si;
565 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
566
567 /* Fill in POSIX parts */
568 sf.sf_si.si_signo = sig;
569 sf.sf_si.si_code = ksi->ksi_code;
570 sf.sf_si.si_addr = ksi->ksi_addr;
571 } else {
572 /* Old FreeBSD-style arguments. */
573 sf.sf_siginfo = ksi->ksi_code;
574 sf.sf_addr = (register_t)ksi->ksi_addr;
575 sf.sf_ahu.sf_handler = catcher;
576 }
577 mtx_unlock(&psp->ps_mtx);
578 PROC_UNLOCK(p);
579
580 /*
581 * If we're a vm86 process, we want to save the segment registers.
582 * We also change eflags to be our emulated eflags, not the actual
583 * eflags.
584 */
585 if (regs->tf_eflags & PSL_VM) {
586 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
587 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
588
589 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
590 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
591 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
592 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
593
594 if (vm86->vm86_has_vme == 0)
595 sf.sf_uc.uc_mcontext.mc_eflags =
596 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
597 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
598
599 /*
600 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
601 * syscalls made by the signal handler. This just avoids
602 * wasting time for our lazy fixup of such faults. PSL_NT
603 * does nothing in vm86 mode, but vm86 programs can set it
604 * almost legitimately in probes for old cpu types.
605 */
606 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
607 }
608
609 /*
610 * Copy the sigframe out to the user's stack.
611 */
612 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
613 #ifdef DEBUG
614 printf("process %ld has trashed its stack\n", (long)p->p_pid);
615 #endif
616 PROC_LOCK(p);
617 sigexit(td, SIGILL);
618 }
619
620 regs->tf_esp = (int)sfp;
621 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
622 szfreebsd4_sigcode;
623 regs->tf_eflags &= ~(PSL_T | PSL_D);
624 regs->tf_cs = _ucodesel;
625 regs->tf_ds = _udatasel;
626 regs->tf_es = _udatasel;
627 regs->tf_fs = _udatasel;
628 regs->tf_ss = _udatasel;
629 PROC_LOCK(p);
630 mtx_lock(&psp->ps_mtx);
631 }
632 #endif /* COMPAT_FREEBSD4 */
633
634 void
635 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
636 {
637 struct sigframe sf, *sfp;
638 struct proc *p;
639 struct thread *td;
640 struct sigacts *psp;
641 char *sp;
642 struct trapframe *regs;
643 struct segment_descriptor *sdp;
644 char *xfpusave;
645 size_t xfpusave_len;
646 int sig;
647 int oonstack;
648
649 td = curthread;
650 p = td->td_proc;
651 PROC_LOCK_ASSERT(p, MA_OWNED);
652 sig = ksi->ksi_signo;
653 psp = p->p_sigacts;
654 mtx_assert(&psp->ps_mtx, MA_OWNED);
655 #ifdef COMPAT_FREEBSD4
656 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
657 freebsd4_sendsig(catcher, ksi, mask);
658 return;
659 }
660 #endif
661 #ifdef COMPAT_43
662 if (SIGISMEMBER(psp->ps_osigset, sig)) {
663 osendsig(catcher, ksi, mask);
664 return;
665 }
666 #endif
667 regs = td->td_frame;
668 oonstack = sigonstack(regs->tf_esp);
669
670 #ifdef CPU_ENABLE_SSE
671 if (cpu_max_ext_state_size > sizeof(union savefpu) && use_xsave) {
672 xfpusave_len = cpu_max_ext_state_size - sizeof(union savefpu);
673 xfpusave = __builtin_alloca(xfpusave_len);
674 } else {
675 #else
676 {
677 #endif
678 xfpusave_len = 0;
679 xfpusave = NULL;
680 }
681
682 /* Save user context. */
683 bzero(&sf, sizeof(sf));
684 sf.sf_uc.uc_sigmask = *mask;
685 sf.sf_uc.uc_stack = td->td_sigstk;
686 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
687 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
688 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
689 sf.sf_uc.uc_mcontext.mc_gs = rgs();
690 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
691 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
692 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
693 fpstate_drop(td);
694 /*
695 * Unconditionally fill the fsbase and gsbase into the mcontext.
696 */
697 sdp = &td->td_pcb->pcb_fsd;
698 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
699 sdp->sd_lobase;
700 sdp = &td->td_pcb->pcb_gsd;
701 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
702 sdp->sd_lobase;
703 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
704 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
705 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
706
707 /* Allocate space for the signal handler context. */
708 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
709 SIGISMEMBER(psp->ps_sigonstack, sig)) {
710 sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
711 #if defined(COMPAT_43)
712 td->td_sigstk.ss_flags |= SS_ONSTACK;
713 #endif
714 } else
715 sp = (char *)regs->tf_esp - 128;
716 if (xfpusave != NULL) {
717 sp -= xfpusave_len;
718 sp = (char *)((unsigned int)sp & ~0x3F);
719 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
720 }
721 sp -= sizeof(struct sigframe);
722
723 /* Align to 16 bytes. */
724 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
725
726 /* Translate the signal if appropriate. */
727 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
728 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
729
730 /* Build the argument list for the signal handler. */
731 sf.sf_signum = sig;
732 sf.sf_ucontext = (register_t)&sfp->sf_uc;
733 bzero(&sf.sf_si, sizeof(sf.sf_si));
734 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
735 /* Signal handler installed with SA_SIGINFO. */
736 sf.sf_siginfo = (register_t)&sfp->sf_si;
737 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
738
739 /* Fill in POSIX parts */
740 sf.sf_si = ksi->ksi_info;
741 sf.sf_si.si_signo = sig; /* maybe a translated signal */
742 } else {
743 /* Old FreeBSD-style arguments. */
744 sf.sf_siginfo = ksi->ksi_code;
745 sf.sf_addr = (register_t)ksi->ksi_addr;
746 sf.sf_ahu.sf_handler = catcher;
747 }
748 mtx_unlock(&psp->ps_mtx);
749 PROC_UNLOCK(p);
750
751 /*
752 * If we're a vm86 process, we want to save the segment registers.
753 * We also change eflags to be our emulated eflags, not the actual
754 * eflags.
755 */
756 if (regs->tf_eflags & PSL_VM) {
757 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
758 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
759
760 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
761 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
762 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
763 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
764
765 if (vm86->vm86_has_vme == 0)
766 sf.sf_uc.uc_mcontext.mc_eflags =
767 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
768 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
769
770 /*
771 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
772 * syscalls made by the signal handler. This just avoids
773 * wasting time for our lazy fixup of such faults. PSL_NT
774 * does nothing in vm86 mode, but vm86 programs can set it
775 * almost legitimately in probes for old cpu types.
776 */
777 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
778 }
779
780 /*
781 * Copy the sigframe out to the user's stack.
782 */
783 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
784 (xfpusave != NULL && copyout(xfpusave,
785 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
786 != 0)) {
787 #ifdef DEBUG
788 printf("process %ld has trashed its stack\n", (long)p->p_pid);
789 #endif
790 PROC_LOCK(p);
791 sigexit(td, SIGILL);
792 }
793
794 regs->tf_esp = (int)sfp;
795 regs->tf_eip = p->p_sysent->sv_sigcode_base;
796 if (regs->tf_eip == 0)
797 regs->tf_eip = p->p_sysent->sv_psstrings - szsigcode;
798 regs->tf_eflags &= ~(PSL_T | PSL_D);
799 regs->tf_cs = _ucodesel;
800 regs->tf_ds = _udatasel;
801 regs->tf_es = _udatasel;
802 regs->tf_fs = _udatasel;
803 regs->tf_ss = _udatasel;
804 PROC_LOCK(p);
805 mtx_lock(&psp->ps_mtx);
806 }
807
808 /*
809 * System call to cleanup state after a signal
810 * has been taken. Reset signal mask and
811 * stack state from context left by sendsig (above).
812 * Return to previous pc and psl as specified by
813 * context left by sendsig. Check carefully to
814 * make sure that the user has not modified the
815 * state to gain improper privileges.
816 *
817 * MPSAFE
818 */
819 #ifdef COMPAT_43
820 int
821 osigreturn(td, uap)
822 struct thread *td;
823 struct osigreturn_args /* {
824 struct osigcontext *sigcntxp;
825 } */ *uap;
826 {
827 struct osigcontext sc;
828 struct trapframe *regs;
829 struct osigcontext *scp;
830 int eflags, error;
831 ksiginfo_t ksi;
832
833 regs = td->td_frame;
834 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
835 if (error != 0)
836 return (error);
837 scp = ≻
838 eflags = scp->sc_ps;
839 if (eflags & PSL_VM) {
840 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
841 struct vm86_kernel *vm86;
842
843 /*
844 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
845 * set up the vm86 area, and we can't enter vm86 mode.
846 */
847 if (td->td_pcb->pcb_ext == 0)
848 return (EINVAL);
849 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
850 if (vm86->vm86_inited == 0)
851 return (EINVAL);
852
853 /* Go back to user mode if both flags are set. */
854 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
855 ksiginfo_init_trap(&ksi);
856 ksi.ksi_signo = SIGBUS;
857 ksi.ksi_code = BUS_OBJERR;
858 ksi.ksi_addr = (void *)regs->tf_eip;
859 trapsignal(td, &ksi);
860 }
861
862 if (vm86->vm86_has_vme) {
863 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
864 (eflags & VME_USERCHANGE) | PSL_VM;
865 } else {
866 vm86->vm86_eflags = eflags; /* save VIF, VIP */
867 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
868 (eflags & VM_USERCHANGE) | PSL_VM;
869 }
870 tf->tf_vm86_ds = scp->sc_ds;
871 tf->tf_vm86_es = scp->sc_es;
872 tf->tf_vm86_fs = scp->sc_fs;
873 tf->tf_vm86_gs = scp->sc_gs;
874 tf->tf_ds = _udatasel;
875 tf->tf_es = _udatasel;
876 tf->tf_fs = _udatasel;
877 } else {
878 /*
879 * Don't allow users to change privileged or reserved flags.
880 */
881 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
882 return (EINVAL);
883 }
884
885 /*
886 * Don't allow users to load a valid privileged %cs. Let the
887 * hardware check for invalid selectors, excess privilege in
888 * other selectors, invalid %eip's and invalid %esp's.
889 */
890 if (!CS_SECURE(scp->sc_cs)) {
891 ksiginfo_init_trap(&ksi);
892 ksi.ksi_signo = SIGBUS;
893 ksi.ksi_code = BUS_OBJERR;
894 ksi.ksi_trapno = T_PROTFLT;
895 ksi.ksi_addr = (void *)regs->tf_eip;
896 trapsignal(td, &ksi);
897 return (EINVAL);
898 }
899 regs->tf_ds = scp->sc_ds;
900 regs->tf_es = scp->sc_es;
901 regs->tf_fs = scp->sc_fs;
902 }
903
904 /* Restore remaining registers. */
905 regs->tf_eax = scp->sc_eax;
906 regs->tf_ebx = scp->sc_ebx;
907 regs->tf_ecx = scp->sc_ecx;
908 regs->tf_edx = scp->sc_edx;
909 regs->tf_esi = scp->sc_esi;
910 regs->tf_edi = scp->sc_edi;
911 regs->tf_cs = scp->sc_cs;
912 regs->tf_ss = scp->sc_ss;
913 regs->tf_isp = scp->sc_isp;
914 regs->tf_ebp = scp->sc_fp;
915 regs->tf_esp = scp->sc_sp;
916 regs->tf_eip = scp->sc_pc;
917 regs->tf_eflags = eflags;
918
919 #if defined(COMPAT_43)
920 if (scp->sc_onstack & 1)
921 td->td_sigstk.ss_flags |= SS_ONSTACK;
922 else
923 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
924 #endif
925 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
926 SIGPROCMASK_OLD);
927 return (EJUSTRETURN);
928 }
929 #endif /* COMPAT_43 */
930
931 #ifdef COMPAT_FREEBSD4
932 /*
933 * MPSAFE
934 */
935 int
936 freebsd4_sigreturn(td, uap)
937 struct thread *td;
938 struct freebsd4_sigreturn_args /* {
939 const ucontext4 *sigcntxp;
940 } */ *uap;
941 {
942 struct ucontext4 uc;
943 struct trapframe *regs;
944 struct ucontext4 *ucp;
945 int cs, eflags, error;
946 ksiginfo_t ksi;
947
948 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
949 if (error != 0)
950 return (error);
951 ucp = &uc;
952 regs = td->td_frame;
953 eflags = ucp->uc_mcontext.mc_eflags;
954 if (eflags & PSL_VM) {
955 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
956 struct vm86_kernel *vm86;
957
958 /*
959 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
960 * set up the vm86 area, and we can't enter vm86 mode.
961 */
962 if (td->td_pcb->pcb_ext == 0)
963 return (EINVAL);
964 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
965 if (vm86->vm86_inited == 0)
966 return (EINVAL);
967
968 /* Go back to user mode if both flags are set. */
969 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
970 ksiginfo_init_trap(&ksi);
971 ksi.ksi_signo = SIGBUS;
972 ksi.ksi_code = BUS_OBJERR;
973 ksi.ksi_addr = (void *)regs->tf_eip;
974 trapsignal(td, &ksi);
975 }
976 if (vm86->vm86_has_vme) {
977 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
978 (eflags & VME_USERCHANGE) | PSL_VM;
979 } else {
980 vm86->vm86_eflags = eflags; /* save VIF, VIP */
981 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
982 (eflags & VM_USERCHANGE) | PSL_VM;
983 }
984 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
985 tf->tf_eflags = eflags;
986 tf->tf_vm86_ds = tf->tf_ds;
987 tf->tf_vm86_es = tf->tf_es;
988 tf->tf_vm86_fs = tf->tf_fs;
989 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
990 tf->tf_ds = _udatasel;
991 tf->tf_es = _udatasel;
992 tf->tf_fs = _udatasel;
993 } else {
994 /*
995 * Don't allow users to change privileged or reserved flags.
996 */
997 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
998 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
999 td->td_proc->p_pid, td->td_name, eflags);
1000 return (EINVAL);
1001 }
1002
1003 /*
1004 * Don't allow users to load a valid privileged %cs. Let the
1005 * hardware check for invalid selectors, excess privilege in
1006 * other selectors, invalid %eip's and invalid %esp's.
1007 */
1008 cs = ucp->uc_mcontext.mc_cs;
1009 if (!CS_SECURE(cs)) {
1010 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
1011 td->td_proc->p_pid, td->td_name, cs);
1012 ksiginfo_init_trap(&ksi);
1013 ksi.ksi_signo = SIGBUS;
1014 ksi.ksi_code = BUS_OBJERR;
1015 ksi.ksi_trapno = T_PROTFLT;
1016 ksi.ksi_addr = (void *)regs->tf_eip;
1017 trapsignal(td, &ksi);
1018 return (EINVAL);
1019 }
1020
1021 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1022 }
1023
1024 #if defined(COMPAT_43)
1025 if (ucp->uc_mcontext.mc_onstack & 1)
1026 td->td_sigstk.ss_flags |= SS_ONSTACK;
1027 else
1028 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1029 #endif
1030 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1031 return (EJUSTRETURN);
1032 }
1033 #endif /* COMPAT_FREEBSD4 */
1034
1035 /*
1036 * MPSAFE
1037 */
1038 int
1039 sys_sigreturn(td, uap)
1040 struct thread *td;
1041 struct sigreturn_args /* {
1042 const struct __ucontext *sigcntxp;
1043 } */ *uap;
1044 {
1045 ucontext_t uc;
1046 struct proc *p;
1047 struct trapframe *regs;
1048 ucontext_t *ucp;
1049 char *xfpustate;
1050 size_t xfpustate_len;
1051 int cs, eflags, error, ret;
1052 ksiginfo_t ksi;
1053
1054 p = td->td_proc;
1055
1056 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
1057 if (error != 0)
1058 return (error);
1059 ucp = &uc;
1060 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
1061 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
1062 td->td_name, ucp->uc_mcontext.mc_flags);
1063 return (EINVAL);
1064 }
1065 regs = td->td_frame;
1066 eflags = ucp->uc_mcontext.mc_eflags;
1067 if (eflags & PSL_VM) {
1068 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
1069 struct vm86_kernel *vm86;
1070
1071 /*
1072 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
1073 * set up the vm86 area, and we can't enter vm86 mode.
1074 */
1075 if (td->td_pcb->pcb_ext == 0)
1076 return (EINVAL);
1077 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
1078 if (vm86->vm86_inited == 0)
1079 return (EINVAL);
1080
1081 /* Go back to user mode if both flags are set. */
1082 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
1083 ksiginfo_init_trap(&ksi);
1084 ksi.ksi_signo = SIGBUS;
1085 ksi.ksi_code = BUS_OBJERR;
1086 ksi.ksi_addr = (void *)regs->tf_eip;
1087 trapsignal(td, &ksi);
1088 }
1089
1090 if (vm86->vm86_has_vme) {
1091 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
1092 (eflags & VME_USERCHANGE) | PSL_VM;
1093 } else {
1094 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1095 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1096 (eflags & VM_USERCHANGE) | PSL_VM;
1097 }
1098 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1099 tf->tf_eflags = eflags;
1100 tf->tf_vm86_ds = tf->tf_ds;
1101 tf->tf_vm86_es = tf->tf_es;
1102 tf->tf_vm86_fs = tf->tf_fs;
1103 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1104 tf->tf_ds = _udatasel;
1105 tf->tf_es = _udatasel;
1106 tf->tf_fs = _udatasel;
1107 } else {
1108 /*
1109 * Don't allow users to change privileged or reserved flags.
1110 */
1111 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
1112 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1113 td->td_proc->p_pid, td->td_name, eflags);
1114 return (EINVAL);
1115 }
1116
1117 /*
1118 * Don't allow users to load a valid privileged %cs. Let the
1119 * hardware check for invalid selectors, excess privilege in
1120 * other selectors, invalid %eip's and invalid %esp's.
1121 */
1122 cs = ucp->uc_mcontext.mc_cs;
1123 if (!CS_SECURE(cs)) {
1124 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1125 td->td_proc->p_pid, td->td_name, cs);
1126 ksiginfo_init_trap(&ksi);
1127 ksi.ksi_signo = SIGBUS;
1128 ksi.ksi_code = BUS_OBJERR;
1129 ksi.ksi_trapno = T_PROTFLT;
1130 ksi.ksi_addr = (void *)regs->tf_eip;
1131 trapsignal(td, &ksi);
1132 return (EINVAL);
1133 }
1134
1135 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
1136 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
1137 if (xfpustate_len > cpu_max_ext_state_size -
1138 sizeof(union savefpu)) {
1139 uprintf(
1140 "pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
1141 p->p_pid, td->td_name, xfpustate_len);
1142 return (EINVAL);
1143 }
1144 xfpustate = __builtin_alloca(xfpustate_len);
1145 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
1146 xfpustate, xfpustate_len);
1147 if (error != 0) {
1148 uprintf(
1149 "pid %d (%s): sigreturn copying xfpustate failed\n",
1150 p->p_pid, td->td_name);
1151 return (error);
1152 }
1153 } else {
1154 xfpustate = NULL;
1155 xfpustate_len = 0;
1156 }
1157 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate,
1158 xfpustate_len);
1159 if (ret != 0)
1160 return (ret);
1161 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1162 }
1163
1164 #if defined(COMPAT_43)
1165 if (ucp->uc_mcontext.mc_onstack & 1)
1166 td->td_sigstk.ss_flags |= SS_ONSTACK;
1167 else
1168 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1169 #endif
1170
1171 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1172 return (EJUSTRETURN);
1173 }
1174
1175 /*
1176 * Machine dependent boot() routine
1177 *
1178 * I haven't seen anything to put here yet
1179 * Possibly some stuff might be grafted back here from boot()
1180 */
1181 void
1182 cpu_boot(int howto)
1183 {
1184 }
1185
1186 /*
1187 * Flush the D-cache for non-DMA I/O so that the I-cache can
1188 * be made coherent later.
1189 */
1190 void
1191 cpu_flush_dcache(void *ptr, size_t len)
1192 {
1193 /* Not applicable */
1194 }
1195
1196 /* Get current clock frequency for the given cpu id. */
1197 int
1198 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1199 {
1200 uint64_t tsc1, tsc2;
1201 uint64_t acnt, mcnt, perf;
1202 register_t reg;
1203
1204 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1205 return (EINVAL);
1206 if ((cpu_feature & CPUID_TSC) == 0)
1207 return (EOPNOTSUPP);
1208
1209 /*
1210 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
1211 * DELAY(9) based logic fails.
1212 */
1213 if (tsc_is_invariant && !tsc_perf_stat)
1214 return (EOPNOTSUPP);
1215
1216 #ifdef SMP
1217 if (smp_cpus > 1) {
1218 /* Schedule ourselves on the indicated cpu. */
1219 thread_lock(curthread);
1220 sched_bind(curthread, cpu_id);
1221 thread_unlock(curthread);
1222 }
1223 #endif
1224
1225 /* Calibrate by measuring a short delay. */
1226 reg = intr_disable();
1227 if (tsc_is_invariant) {
1228 wrmsr(MSR_MPERF, 0);
1229 wrmsr(MSR_APERF, 0);
1230 tsc1 = rdtsc();
1231 DELAY(1000);
1232 mcnt = rdmsr(MSR_MPERF);
1233 acnt = rdmsr(MSR_APERF);
1234 tsc2 = rdtsc();
1235 intr_restore(reg);
1236 perf = 1000 * acnt / mcnt;
1237 *rate = (tsc2 - tsc1) * perf;
1238 } else {
1239 tsc1 = rdtsc();
1240 DELAY(1000);
1241 tsc2 = rdtsc();
1242 intr_restore(reg);
1243 *rate = (tsc2 - tsc1) * 1000;
1244 }
1245
1246 #ifdef SMP
1247 if (smp_cpus > 1) {
1248 thread_lock(curthread);
1249 sched_unbind(curthread);
1250 thread_unlock(curthread);
1251 }
1252 #endif
1253
1254 return (0);
1255 }
1256
1257 #ifdef XEN
1258
1259 static void
1260 idle_block(void)
1261 {
1262
1263 HYPERVISOR_sched_op(SCHEDOP_block, 0);
1264 }
1265
1266 void
1267 cpu_halt(void)
1268 {
1269 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
1270 }
1271
1272 int scheduler_running;
1273
1274 static void
1275 cpu_idle_hlt(sbintime_t sbt)
1276 {
1277
1278 scheduler_running = 1;
1279 enable_intr();
1280 idle_block();
1281 }
1282
1283 #else
1284 /*
1285 * Shutdown the CPU as much as possible
1286 */
1287 void
1288 cpu_halt(void)
1289 {
1290 for (;;)
1291 halt();
1292 }
1293
1294 #endif
1295
1296 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
1297 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
1298 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
1299 TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
1300 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
1301 0, "Use MONITOR/MWAIT for short idle");
1302
1303 #define STATE_RUNNING 0x0
1304 #define STATE_MWAIT 0x1
1305 #define STATE_SLEEPING 0x2
1306
1307 #ifndef PC98
1308 static void
1309 cpu_idle_acpi(sbintime_t sbt)
1310 {
1311 int *state;
1312
1313 state = (int *)PCPU_PTR(monitorbuf);
1314 *state = STATE_SLEEPING;
1315
1316 /* See comments in cpu_idle_hlt(). */
1317 disable_intr();
1318 if (sched_runnable())
1319 enable_intr();
1320 else if (cpu_idle_hook)
1321 cpu_idle_hook(sbt);
1322 else
1323 __asm __volatile("sti; hlt");
1324 *state = STATE_RUNNING;
1325 }
1326 #endif /* !PC98 */
1327
1328 #ifndef XEN
1329 static void
1330 cpu_idle_hlt(sbintime_t sbt)
1331 {
1332 int *state;
1333
1334 state = (int *)PCPU_PTR(monitorbuf);
1335 *state = STATE_SLEEPING;
1336
1337 /*
1338 * Since we may be in a critical section from cpu_idle(), if
1339 * an interrupt fires during that critical section we may have
1340 * a pending preemption. If the CPU halts, then that thread
1341 * may not execute until a later interrupt awakens the CPU.
1342 * To handle this race, check for a runnable thread after
1343 * disabling interrupts and immediately return if one is
1344 * found. Also, we must absolutely guarentee that hlt is
1345 * the next instruction after sti. This ensures that any
1346 * interrupt that fires after the call to disable_intr() will
1347 * immediately awaken the CPU from hlt. Finally, please note
1348 * that on x86 this works fine because of interrupts enabled only
1349 * after the instruction following sti takes place, while IF is set
1350 * to 1 immediately, allowing hlt instruction to acknowledge the
1351 * interrupt.
1352 */
1353 disable_intr();
1354 if (sched_runnable())
1355 enable_intr();
1356 else
1357 __asm __volatile("sti; hlt");
1358 *state = STATE_RUNNING;
1359 }
1360 #endif
1361
1362 static void
1363 cpu_idle_mwait(sbintime_t sbt)
1364 {
1365 int *state;
1366
1367 state = (int *)PCPU_PTR(monitorbuf);
1368 *state = STATE_MWAIT;
1369
1370 /* See comments in cpu_idle_hlt(). */
1371 disable_intr();
1372 if (sched_runnable()) {
1373 enable_intr();
1374 *state = STATE_RUNNING;
1375 return;
1376 }
1377 cpu_monitor(state, 0, 0);
1378 if (*state == STATE_MWAIT)
1379 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
1380 else
1381 enable_intr();
1382 *state = STATE_RUNNING;
1383 }
1384
1385 static void
1386 cpu_idle_spin(sbintime_t sbt)
1387 {
1388 int *state;
1389 int i;
1390
1391 state = (int *)PCPU_PTR(monitorbuf);
1392 *state = STATE_RUNNING;
1393
1394 /*
1395 * The sched_runnable() call is racy but as long as there is
1396 * a loop missing it one time will have just a little impact if any
1397 * (and it is much better than missing the check at all).
1398 */
1399 for (i = 0; i < 1000; i++) {
1400 if (sched_runnable())
1401 return;
1402 cpu_spinwait();
1403 }
1404 }
1405
1406 /*
1407 * C1E renders the local APIC timer dead, so we disable it by
1408 * reading the Interrupt Pending Message register and clearing
1409 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
1410 *
1411 * Reference:
1412 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
1413 * #32559 revision 3.00+
1414 */
1415 #define MSR_AMDK8_IPM 0xc0010055
1416 #define AMDK8_SMIONCMPHALT (1ULL << 27)
1417 #define AMDK8_C1EONCMPHALT (1ULL << 28)
1418 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
1419
1420 static void
1421 cpu_probe_amdc1e(void)
1422 {
1423
1424 /*
1425 * Detect the presence of C1E capability mostly on latest
1426 * dual-cores (or future) k8 family.
1427 */
1428 if (cpu_vendor_id == CPU_VENDOR_AMD &&
1429 (cpu_id & 0x00000f00) == 0x00000f00 &&
1430 (cpu_id & 0x0fff0000) >= 0x00040000) {
1431 cpu_ident_amdc1e = 1;
1432 }
1433 }
1434
1435 #if defined(PC98) || defined(XEN)
1436 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_hlt;
1437 #else
1438 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
1439 #endif
1440
1441 void
1442 cpu_idle(int busy)
1443 {
1444 #ifndef XEN
1445 uint64_t msr;
1446 #endif
1447 sbintime_t sbt = -1;
1448
1449 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
1450 busy, curcpu);
1451 #if defined(MP_WATCHDOG) && !defined(XEN)
1452 ap_watchdog(PCPU_GET(cpuid));
1453 #endif
1454 #ifndef XEN
1455 /* If we are busy - try to use fast methods. */
1456 if (busy) {
1457 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
1458 cpu_idle_mwait(busy);
1459 goto out;
1460 }
1461 }
1462 #endif
1463
1464 /* If we have time - switch timers into idle mode. */
1465 if (!busy) {
1466 critical_enter();
1467 sbt = cpu_idleclock();
1468 }
1469
1470 #ifndef XEN
1471 /* Apply AMD APIC timer C1E workaround. */
1472 if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
1473 msr = rdmsr(MSR_AMDK8_IPM);
1474 if (msr & AMDK8_CMPHALT)
1475 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
1476 }
1477 #endif
1478
1479 /* Call main idle method. */
1480 cpu_idle_fn(sbt);
1481
1482 /* Switch timers mack into active mode. */
1483 if (!busy) {
1484 cpu_activeclock();
1485 critical_exit();
1486 }
1487 #ifndef XEN
1488 out:
1489 #endif
1490 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
1491 busy, curcpu);
1492 }
1493
1494 int
1495 cpu_idle_wakeup(int cpu)
1496 {
1497 struct pcpu *pcpu;
1498 int *state;
1499
1500 pcpu = pcpu_find(cpu);
1501 state = (int *)pcpu->pc_monitorbuf;
1502 /*
1503 * This doesn't need to be atomic since missing the race will
1504 * simply result in unnecessary IPIs.
1505 */
1506 if (*state == STATE_SLEEPING)
1507 return (0);
1508 if (*state == STATE_MWAIT)
1509 *state = STATE_RUNNING;
1510 return (1);
1511 }
1512
1513 /*
1514 * Ordered by speed/power consumption.
1515 */
1516 struct {
1517 void *id_fn;
1518 char *id_name;
1519 } idle_tbl[] = {
1520 { cpu_idle_spin, "spin" },
1521 { cpu_idle_mwait, "mwait" },
1522 { cpu_idle_hlt, "hlt" },
1523 #ifndef PC98
1524 { cpu_idle_acpi, "acpi" },
1525 #endif
1526 { NULL, NULL }
1527 };
1528
1529 static int
1530 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
1531 {
1532 char *avail, *p;
1533 int error;
1534 int i;
1535
1536 avail = malloc(256, M_TEMP, M_WAITOK);
1537 p = avail;
1538 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1539 if (strstr(idle_tbl[i].id_name, "mwait") &&
1540 (cpu_feature2 & CPUID2_MON) == 0)
1541 continue;
1542 #ifndef PC98
1543 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1544 cpu_idle_hook == NULL)
1545 continue;
1546 #endif
1547 p += sprintf(p, "%s%s", p != avail ? ", " : "",
1548 idle_tbl[i].id_name);
1549 }
1550 error = sysctl_handle_string(oidp, avail, 0, req);
1551 free(avail, M_TEMP);
1552 return (error);
1553 }
1554
1555 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
1556 0, 0, idle_sysctl_available, "A", "list of available idle functions");
1557
1558 static int
1559 idle_sysctl(SYSCTL_HANDLER_ARGS)
1560 {
1561 char buf[16];
1562 int error;
1563 char *p;
1564 int i;
1565
1566 p = "unknown";
1567 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1568 if (idle_tbl[i].id_fn == cpu_idle_fn) {
1569 p = idle_tbl[i].id_name;
1570 break;
1571 }
1572 }
1573 strncpy(buf, p, sizeof(buf));
1574 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1575 if (error != 0 || req->newptr == NULL)
1576 return (error);
1577 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1578 if (strstr(idle_tbl[i].id_name, "mwait") &&
1579 (cpu_feature2 & CPUID2_MON) == 0)
1580 continue;
1581 #ifndef PC98
1582 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1583 cpu_idle_hook == NULL)
1584 continue;
1585 #endif
1586 if (strcmp(idle_tbl[i].id_name, buf))
1587 continue;
1588 cpu_idle_fn = idle_tbl[i].id_fn;
1589 return (0);
1590 }
1591 return (EINVAL);
1592 }
1593
1594 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
1595 idle_sysctl, "A", "currently selected idle function");
1596
1597 /*
1598 * Reset registers to default values on exec.
1599 */
1600 void
1601 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1602 {
1603 struct trapframe *regs = td->td_frame;
1604 struct pcb *pcb = td->td_pcb;
1605
1606 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1607 pcb->pcb_gs = _udatasel;
1608 load_gs(_udatasel);
1609
1610 mtx_lock_spin(&dt_lock);
1611 if (td->td_proc->p_md.md_ldt)
1612 user_ldt_free(td);
1613 else
1614 mtx_unlock_spin(&dt_lock);
1615
1616 bzero((char *)regs, sizeof(struct trapframe));
1617 regs->tf_eip = imgp->entry_addr;
1618 regs->tf_esp = stack;
1619 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1620 regs->tf_ss = _udatasel;
1621 regs->tf_ds = _udatasel;
1622 regs->tf_es = _udatasel;
1623 regs->tf_fs = _udatasel;
1624 regs->tf_cs = _ucodesel;
1625
1626 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1627 regs->tf_ebx = imgp->ps_strings;
1628
1629 /*
1630 * Reset the hardware debug registers if they were in use.
1631 * They won't have any meaning for the newly exec'd process.
1632 */
1633 if (pcb->pcb_flags & PCB_DBREGS) {
1634 pcb->pcb_dr0 = 0;
1635 pcb->pcb_dr1 = 0;
1636 pcb->pcb_dr2 = 0;
1637 pcb->pcb_dr3 = 0;
1638 pcb->pcb_dr6 = 0;
1639 pcb->pcb_dr7 = 0;
1640 if (pcb == curpcb) {
1641 /*
1642 * Clear the debug registers on the running
1643 * CPU, otherwise they will end up affecting
1644 * the next process we switch to.
1645 */
1646 reset_dbregs();
1647 }
1648 pcb->pcb_flags &= ~PCB_DBREGS;
1649 }
1650
1651 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1652
1653 /*
1654 * Drop the FP state if we hold it, so that the process gets a
1655 * clean FP state if it uses the FPU again.
1656 */
1657 fpstate_drop(td);
1658
1659 /*
1660 * XXX - Linux emulator
1661 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1662 * on it.
1663 */
1664 td->td_retval[1] = 0;
1665 }
1666
1667 void
1668 cpu_setregs(void)
1669 {
1670 unsigned int cr0;
1671
1672 cr0 = rcr0();
1673
1674 /*
1675 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1676 *
1677 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1678 * instructions. We must set the CR0_MP bit and use the CR0_TS
1679 * bit to control the trap, because setting the CR0_EM bit does
1680 * not cause WAIT instructions to trap. It's important to trap
1681 * WAIT instructions - otherwise the "wait" variants of no-wait
1682 * control instructions would degenerate to the "no-wait" variants
1683 * after FP context switches but work correctly otherwise. It's
1684 * particularly important to trap WAITs when there is no NPX -
1685 * otherwise the "wait" variants would always degenerate.
1686 *
1687 * Try setting CR0_NE to get correct error reporting on 486DX's.
1688 * Setting it should fail or do nothing on lesser processors.
1689 */
1690 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1691 load_cr0(cr0);
1692 load_gs(_udatasel);
1693 }
1694
1695 u_long bootdev; /* not a struct cdev *- encoding is different */
1696 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1697 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1698
1699 static char bootmethod[16] = "BIOS";
1700 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
1701 "System firmware boot method");
1702
1703 /*
1704 * Initialize 386 and configure to run kernel
1705 */
1706
1707 /*
1708 * Initialize segments & interrupt table
1709 */
1710
1711 int _default_ldt;
1712
1713 #ifdef XEN
1714 union descriptor *gdt;
1715 union descriptor *ldt;
1716 #else
1717 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1718 union descriptor ldt[NLDT]; /* local descriptor table */
1719 #endif
1720 static struct gate_descriptor idt0[NIDT];
1721 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1722 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1723 struct mtx dt_lock; /* lock for GDT and LDT */
1724
1725 static struct i386tss dblfault_tss;
1726 static char dblfault_stack[PAGE_SIZE];
1727
1728 extern vm_offset_t proc0kstack;
1729
1730
1731 /*
1732 * software prototypes -- in more palatable form.
1733 *
1734 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1735 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1736 */
1737 struct soft_segment_descriptor gdt_segs[] = {
1738 /* GNULL_SEL 0 Null Descriptor */
1739 { .ssd_base = 0x0,
1740 .ssd_limit = 0x0,
1741 .ssd_type = 0,
1742 .ssd_dpl = SEL_KPL,
1743 .ssd_p = 0,
1744 .ssd_xx = 0, .ssd_xx1 = 0,
1745 .ssd_def32 = 0,
1746 .ssd_gran = 0 },
1747 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1748 { .ssd_base = 0x0,
1749 .ssd_limit = 0xfffff,
1750 .ssd_type = SDT_MEMRWA,
1751 .ssd_dpl = SEL_KPL,
1752 .ssd_p = 1,
1753 .ssd_xx = 0, .ssd_xx1 = 0,
1754 .ssd_def32 = 1,
1755 .ssd_gran = 1 },
1756 /* GUFS_SEL 2 %fs Descriptor for user */
1757 { .ssd_base = 0x0,
1758 .ssd_limit = 0xfffff,
1759 .ssd_type = SDT_MEMRWA,
1760 .ssd_dpl = SEL_UPL,
1761 .ssd_p = 1,
1762 .ssd_xx = 0, .ssd_xx1 = 0,
1763 .ssd_def32 = 1,
1764 .ssd_gran = 1 },
1765 /* GUGS_SEL 3 %gs Descriptor for user */
1766 { .ssd_base = 0x0,
1767 .ssd_limit = 0xfffff,
1768 .ssd_type = SDT_MEMRWA,
1769 .ssd_dpl = SEL_UPL,
1770 .ssd_p = 1,
1771 .ssd_xx = 0, .ssd_xx1 = 0,
1772 .ssd_def32 = 1,
1773 .ssd_gran = 1 },
1774 /* GCODE_SEL 4 Code Descriptor for kernel */
1775 { .ssd_base = 0x0,
1776 .ssd_limit = 0xfffff,
1777 .ssd_type = SDT_MEMERA,
1778 .ssd_dpl = SEL_KPL,
1779 .ssd_p = 1,
1780 .ssd_xx = 0, .ssd_xx1 = 0,
1781 .ssd_def32 = 1,
1782 .ssd_gran = 1 },
1783 /* GDATA_SEL 5 Data Descriptor for kernel */
1784 { .ssd_base = 0x0,
1785 .ssd_limit = 0xfffff,
1786 .ssd_type = SDT_MEMRWA,
1787 .ssd_dpl = SEL_KPL,
1788 .ssd_p = 1,
1789 .ssd_xx = 0, .ssd_xx1 = 0,
1790 .ssd_def32 = 1,
1791 .ssd_gran = 1 },
1792 /* GUCODE_SEL 6 Code Descriptor for user */
1793 { .ssd_base = 0x0,
1794 .ssd_limit = 0xfffff,
1795 .ssd_type = SDT_MEMERA,
1796 .ssd_dpl = SEL_UPL,
1797 .ssd_p = 1,
1798 .ssd_xx = 0, .ssd_xx1 = 0,
1799 .ssd_def32 = 1,
1800 .ssd_gran = 1 },
1801 /* GUDATA_SEL 7 Data Descriptor for user */
1802 { .ssd_base = 0x0,
1803 .ssd_limit = 0xfffff,
1804 .ssd_type = SDT_MEMRWA,
1805 .ssd_dpl = SEL_UPL,
1806 .ssd_p = 1,
1807 .ssd_xx = 0, .ssd_xx1 = 0,
1808 .ssd_def32 = 1,
1809 .ssd_gran = 1 },
1810 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1811 { .ssd_base = 0x400,
1812 .ssd_limit = 0xfffff,
1813 .ssd_type = SDT_MEMRWA,
1814 .ssd_dpl = SEL_KPL,
1815 .ssd_p = 1,
1816 .ssd_xx = 0, .ssd_xx1 = 0,
1817 .ssd_def32 = 1,
1818 .ssd_gran = 1 },
1819 #ifndef XEN
1820 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1821 {
1822 .ssd_base = 0x0,
1823 .ssd_limit = sizeof(struct i386tss)-1,
1824 .ssd_type = SDT_SYS386TSS,
1825 .ssd_dpl = 0,
1826 .ssd_p = 1,
1827 .ssd_xx = 0, .ssd_xx1 = 0,
1828 .ssd_def32 = 0,
1829 .ssd_gran = 0 },
1830 /* GLDT_SEL 10 LDT Descriptor */
1831 { .ssd_base = (int) ldt,
1832 .ssd_limit = sizeof(ldt)-1,
1833 .ssd_type = SDT_SYSLDT,
1834 .ssd_dpl = SEL_UPL,
1835 .ssd_p = 1,
1836 .ssd_xx = 0, .ssd_xx1 = 0,
1837 .ssd_def32 = 0,
1838 .ssd_gran = 0 },
1839 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1840 { .ssd_base = (int) ldt,
1841 .ssd_limit = (512 * sizeof(union descriptor)-1),
1842 .ssd_type = SDT_SYSLDT,
1843 .ssd_dpl = 0,
1844 .ssd_p = 1,
1845 .ssd_xx = 0, .ssd_xx1 = 0,
1846 .ssd_def32 = 0,
1847 .ssd_gran = 0 },
1848 /* GPANIC_SEL 12 Panic Tss Descriptor */
1849 { .ssd_base = (int) &dblfault_tss,
1850 .ssd_limit = sizeof(struct i386tss)-1,
1851 .ssd_type = SDT_SYS386TSS,
1852 .ssd_dpl = 0,
1853 .ssd_p = 1,
1854 .ssd_xx = 0, .ssd_xx1 = 0,
1855 .ssd_def32 = 0,
1856 .ssd_gran = 0 },
1857 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1858 { .ssd_base = 0,
1859 .ssd_limit = 0xfffff,
1860 .ssd_type = SDT_MEMERA,
1861 .ssd_dpl = 0,
1862 .ssd_p = 1,
1863 .ssd_xx = 0, .ssd_xx1 = 0,
1864 .ssd_def32 = 0,
1865 .ssd_gran = 1 },
1866 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1867 { .ssd_base = 0,
1868 .ssd_limit = 0xfffff,
1869 .ssd_type = SDT_MEMERA,
1870 .ssd_dpl = 0,
1871 .ssd_p = 1,
1872 .ssd_xx = 0, .ssd_xx1 = 0,
1873 .ssd_def32 = 0,
1874 .ssd_gran = 1 },
1875 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1876 { .ssd_base = 0,
1877 .ssd_limit = 0xfffff,
1878 .ssd_type = SDT_MEMRWA,
1879 .ssd_dpl = 0,
1880 .ssd_p = 1,
1881 .ssd_xx = 0, .ssd_xx1 = 0,
1882 .ssd_def32 = 1,
1883 .ssd_gran = 1 },
1884 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1885 { .ssd_base = 0,
1886 .ssd_limit = 0xfffff,
1887 .ssd_type = SDT_MEMRWA,
1888 .ssd_dpl = 0,
1889 .ssd_p = 1,
1890 .ssd_xx = 0, .ssd_xx1 = 0,
1891 .ssd_def32 = 0,
1892 .ssd_gran = 1 },
1893 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1894 { .ssd_base = 0,
1895 .ssd_limit = 0xfffff,
1896 .ssd_type = SDT_MEMRWA,
1897 .ssd_dpl = 0,
1898 .ssd_p = 1,
1899 .ssd_xx = 0, .ssd_xx1 = 0,
1900 .ssd_def32 = 0,
1901 .ssd_gran = 1 },
1902 /* GNDIS_SEL 18 NDIS Descriptor */
1903 { .ssd_base = 0x0,
1904 .ssd_limit = 0x0,
1905 .ssd_type = 0,
1906 .ssd_dpl = 0,
1907 .ssd_p = 0,
1908 .ssd_xx = 0, .ssd_xx1 = 0,
1909 .ssd_def32 = 0,
1910 .ssd_gran = 0 },
1911 #endif /* !XEN */
1912 };
1913
1914 static struct soft_segment_descriptor ldt_segs[] = {
1915 /* Null Descriptor - overwritten by call gate */
1916 { .ssd_base = 0x0,
1917 .ssd_limit = 0x0,
1918 .ssd_type = 0,
1919 .ssd_dpl = 0,
1920 .ssd_p = 0,
1921 .ssd_xx = 0, .ssd_xx1 = 0,
1922 .ssd_def32 = 0,
1923 .ssd_gran = 0 },
1924 /* Null Descriptor - overwritten by call gate */
1925 { .ssd_base = 0x0,
1926 .ssd_limit = 0x0,
1927 .ssd_type = 0,
1928 .ssd_dpl = 0,
1929 .ssd_p = 0,
1930 .ssd_xx = 0, .ssd_xx1 = 0,
1931 .ssd_def32 = 0,
1932 .ssd_gran = 0 },
1933 /* Null Descriptor - overwritten by call gate */
1934 { .ssd_base = 0x0,
1935 .ssd_limit = 0x0,
1936 .ssd_type = 0,
1937 .ssd_dpl = 0,
1938 .ssd_p = 0,
1939 .ssd_xx = 0, .ssd_xx1 = 0,
1940 .ssd_def32 = 0,
1941 .ssd_gran = 0 },
1942 /* Code Descriptor for user */
1943 { .ssd_base = 0x0,
1944 .ssd_limit = 0xfffff,
1945 .ssd_type = SDT_MEMERA,
1946 .ssd_dpl = SEL_UPL,
1947 .ssd_p = 1,
1948 .ssd_xx = 0, .ssd_xx1 = 0,
1949 .ssd_def32 = 1,
1950 .ssd_gran = 1 },
1951 /* Null Descriptor - overwritten by call gate */
1952 { .ssd_base = 0x0,
1953 .ssd_limit = 0x0,
1954 .ssd_type = 0,
1955 .ssd_dpl = 0,
1956 .ssd_p = 0,
1957 .ssd_xx = 0, .ssd_xx1 = 0,
1958 .ssd_def32 = 0,
1959 .ssd_gran = 0 },
1960 /* Data Descriptor for user */
1961 { .ssd_base = 0x0,
1962 .ssd_limit = 0xfffff,
1963 .ssd_type = SDT_MEMRWA,
1964 .ssd_dpl = SEL_UPL,
1965 .ssd_p = 1,
1966 .ssd_xx = 0, .ssd_xx1 = 0,
1967 .ssd_def32 = 1,
1968 .ssd_gran = 1 },
1969 };
1970
1971 void
1972 setidt(idx, func, typ, dpl, selec)
1973 int idx;
1974 inthand_t *func;
1975 int typ;
1976 int dpl;
1977 int selec;
1978 {
1979 struct gate_descriptor *ip;
1980
1981 ip = idt + idx;
1982 ip->gd_looffset = (int)func;
1983 ip->gd_selector = selec;
1984 ip->gd_stkcpy = 0;
1985 ip->gd_xx = 0;
1986 ip->gd_type = typ;
1987 ip->gd_dpl = dpl;
1988 ip->gd_p = 1;
1989 ip->gd_hioffset = ((int)func)>>16 ;
1990 }
1991
1992 extern inthand_t
1993 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1994 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1995 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1996 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1997 IDTVEC(xmm),
1998 #ifdef KDTRACE_HOOKS
1999 IDTVEC(dtrace_ret),
2000 #endif
2001 #ifdef XENHVM
2002 IDTVEC(xen_intr_upcall),
2003 #endif
2004 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
2005
2006 #ifdef DDB
2007 /*
2008 * Display the index and function name of any IDT entries that don't use
2009 * the default 'rsvd' entry point.
2010 */
2011 DB_SHOW_COMMAND(idt, db_show_idt)
2012 {
2013 struct gate_descriptor *ip;
2014 int idx;
2015 uintptr_t func;
2016
2017 ip = idt;
2018 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
2019 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
2020 if (func != (uintptr_t)&IDTVEC(rsvd)) {
2021 db_printf("%3d\t", idx);
2022 db_printsym(func, DB_STGY_PROC);
2023 db_printf("\n");
2024 }
2025 ip++;
2026 }
2027 }
2028
2029 /* Show privileged registers. */
2030 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
2031 {
2032 uint64_t idtr, gdtr;
2033
2034 idtr = ridt();
2035 db_printf("idtr\t0x%08x/%04x\n",
2036 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
2037 gdtr = rgdt();
2038 db_printf("gdtr\t0x%08x/%04x\n",
2039 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
2040 db_printf("ldtr\t0x%04x\n", rldt());
2041 db_printf("tr\t0x%04x\n", rtr());
2042 db_printf("cr0\t0x%08x\n", rcr0());
2043 db_printf("cr2\t0x%08x\n", rcr2());
2044 db_printf("cr3\t0x%08x\n", rcr3());
2045 db_printf("cr4\t0x%08x\n", rcr4());
2046 }
2047 #endif
2048
2049 void
2050 sdtossd(sd, ssd)
2051 struct segment_descriptor *sd;
2052 struct soft_segment_descriptor *ssd;
2053 {
2054 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
2055 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
2056 ssd->ssd_type = sd->sd_type;
2057 ssd->ssd_dpl = sd->sd_dpl;
2058 ssd->ssd_p = sd->sd_p;
2059 ssd->ssd_def32 = sd->sd_def32;
2060 ssd->ssd_gran = sd->sd_gran;
2061 }
2062
2063 #if !defined(PC98) && !defined(XEN)
2064 static int
2065 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
2066 {
2067 int i, insert_idx, physmap_idx;
2068
2069 physmap_idx = *physmap_idxp;
2070
2071 if (boothowto & RB_VERBOSE)
2072 printf("SMAP type=%02x base=%016llx len=%016llx\n",
2073 smap->type, smap->base, smap->length);
2074
2075 if (smap->type != SMAP_TYPE_MEMORY)
2076 return (1);
2077
2078 if (smap->length == 0)
2079 return (1);
2080
2081 #ifndef PAE
2082 if (smap->base > 0xffffffff) {
2083 printf("%uK of memory above 4GB ignored\n",
2084 (u_int)(smap->length / 1024));
2085 return (1);
2086 }
2087 #endif
2088
2089 /*
2090 * Find insertion point while checking for overlap. Start off by
2091 * assuming the new entry will be added to the end.
2092 */
2093 insert_idx = physmap_idx + 2;
2094 for (i = 0; i <= physmap_idx; i += 2) {
2095 if (smap->base < physmap[i + 1]) {
2096 if (smap->base + smap->length <= physmap[i]) {
2097 insert_idx = i;
2098 break;
2099 }
2100 if (boothowto & RB_VERBOSE)
2101 printf(
2102 "Overlapping memory regions, ignoring second region\n");
2103 return (1);
2104 }
2105 }
2106
2107 /* See if we can prepend to the next entry. */
2108 if (insert_idx <= physmap_idx &&
2109 smap->base + smap->length == physmap[insert_idx]) {
2110 physmap[insert_idx] = smap->base;
2111 return (1);
2112 }
2113
2114 /* See if we can append to the previous entry. */
2115 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) {
2116 physmap[insert_idx - 1] += smap->length;
2117 return (1);
2118 }
2119
2120 physmap_idx += 2;
2121 *physmap_idxp = physmap_idx;
2122 if (physmap_idx == PHYSMAP_SIZE) {
2123 printf(
2124 "Too many segments in the physical address map, giving up\n");
2125 return (0);
2126 }
2127
2128 /*
2129 * Move the last 'N' entries down to make room for the new
2130 * entry if needed.
2131 */
2132 for (i = physmap_idx; i > insert_idx; i -= 2) {
2133 physmap[i] = physmap[i - 2];
2134 physmap[i + 1] = physmap[i - 1];
2135 }
2136
2137 /* Insert the new entry. */
2138 physmap[insert_idx] = smap->base;
2139 physmap[insert_idx + 1] = smap->base + smap->length;
2140 return (1);
2141 }
2142 #endif /* !PC98 && !XEN */
2143
2144 #ifndef XEN
2145 static void
2146 basemem_setup(void)
2147 {
2148 vm_paddr_t pa;
2149 pt_entry_t *pte;
2150 int i;
2151
2152 if (basemem > 640) {
2153 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
2154 basemem);
2155 basemem = 640;
2156 }
2157
2158 /*
2159 * XXX if biosbasemem is now < 640, there is a `hole'
2160 * between the end of base memory and the start of
2161 * ISA memory. The hole may be empty or it may
2162 * contain BIOS code or data. Map it read/write so
2163 * that the BIOS can write to it. (Memory from 0 to
2164 * the physical end of the kernel is mapped read-only
2165 * to begin with and then parts of it are remapped.
2166 * The parts that aren't remapped form holes that
2167 * remain read-only and are unused by the kernel.
2168 * The base memory area is below the physical end of
2169 * the kernel and right now forms a read-only hole.
2170 * The part of it from PAGE_SIZE to
2171 * (trunc_page(biosbasemem * 1024) - 1) will be
2172 * remapped and used by the kernel later.)
2173 *
2174 * This code is similar to the code used in
2175 * pmap_mapdev, but since no memory needs to be
2176 * allocated we simply change the mapping.
2177 */
2178 for (pa = trunc_page(basemem * 1024);
2179 pa < ISA_HOLE_START; pa += PAGE_SIZE)
2180 pmap_kenter(KERNBASE + pa, pa);
2181
2182 /*
2183 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
2184 * the vm86 page table so that vm86 can scribble on them using
2185 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
2186 * page 0, at least as initialized here?
2187 */
2188 pte = (pt_entry_t *)vm86paddr;
2189 for (i = basemem / 4; i < 160; i++)
2190 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
2191 }
2192 #endif /* !XEN */
2193
2194 /*
2195 * Populate the (physmap) array with base/bound pairs describing the
2196 * available physical memory in the system, then test this memory and
2197 * build the phys_avail array describing the actually-available memory.
2198 *
2199 * If we cannot accurately determine the physical memory map, then use
2200 * value from the 0xE801 call, and failing that, the RTC.
2201 *
2202 * Total memory size may be set by the kernel environment variable
2203 * hw.physmem or the compile-time define MAXMEM.
2204 *
2205 * XXX first should be vm_paddr_t.
2206 */
2207 #ifdef PC98
2208 static void
2209 getmemsize(int first)
2210 {
2211 int off, physmap_idx, pa_indx, da_indx;
2212 u_long physmem_tunable, memtest;
2213 vm_paddr_t physmap[PHYSMAP_SIZE];
2214 pt_entry_t *pte;
2215 quad_t dcons_addr, dcons_size;
2216 int i;
2217 int pg_n;
2218 u_int extmem;
2219 u_int under16;
2220 vm_paddr_t pa;
2221
2222 bzero(physmap, sizeof(physmap));
2223
2224 /* XXX - some of EPSON machines can't use PG_N */
2225 pg_n = PG_N;
2226 if (pc98_machine_type & M_EPSON_PC98) {
2227 switch (epson_machine_id) {
2228 #ifdef WB_CACHE
2229 default:
2230 #endif
2231 case EPSON_PC486_HX:
2232 case EPSON_PC486_HG:
2233 case EPSON_PC486_HA:
2234 pg_n = 0;
2235 break;
2236 }
2237 }
2238
2239 under16 = pc98_getmemsize(&basemem, &extmem);
2240 basemem_setup();
2241
2242 physmap[0] = 0;
2243 physmap[1] = basemem * 1024;
2244 physmap_idx = 2;
2245 physmap[physmap_idx] = 0x100000;
2246 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2247
2248 /*
2249 * Now, physmap contains a map of physical memory.
2250 */
2251
2252 #ifdef SMP
2253 /* make hole for AP bootstrap code */
2254 physmap[1] = mp_bootaddress(physmap[1]);
2255 #endif
2256
2257 /*
2258 * Maxmem isn't the "maximum memory", it's one larger than the
2259 * highest page of the physical address space. It should be
2260 * called something like "Maxphyspage". We may adjust this
2261 * based on ``hw.physmem'' and the results of the memory test.
2262 */
2263 Maxmem = atop(physmap[physmap_idx + 1]);
2264
2265 #ifdef MAXMEM
2266 Maxmem = MAXMEM / 4;
2267 #endif
2268
2269 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
2270 Maxmem = atop(physmem_tunable);
2271
2272 /*
2273 * By default keep the memtest enabled. Use a general name so that
2274 * one could eventually do more with the code than just disable it.
2275 */
2276 memtest = 1;
2277 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
2278
2279 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2280 (boothowto & RB_VERBOSE))
2281 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2282
2283 /*
2284 * If Maxmem has been increased beyond what the system has detected,
2285 * extend the last memory segment to the new limit.
2286 */
2287 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2288 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2289
2290 /*
2291 * We need to divide chunk if Maxmem is larger than 16MB and
2292 * under 16MB area is not full of memory.
2293 * (1) system area (15-16MB region) is cut off
2294 * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY")
2295 */
2296 if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) {
2297 /* 15M - 16M region is cut off, so need to divide chunk */
2298 physmap[physmap_idx + 1] = under16 * 1024;
2299 physmap_idx += 2;
2300 physmap[physmap_idx] = 0x1000000;
2301 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024;
2302 }
2303
2304 /* call pmap initialization to make new kernel address space */
2305 pmap_bootstrap(first);
2306
2307 /*
2308 * Size up each available chunk of physical memory.
2309 */
2310 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2311 pa_indx = 0;
2312 da_indx = 1;
2313 phys_avail[pa_indx++] = physmap[0];
2314 phys_avail[pa_indx] = physmap[0];
2315 dump_avail[da_indx] = physmap[0];
2316 pte = CMAP3;
2317
2318 /*
2319 * Get dcons buffer address
2320 */
2321 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2322 getenv_quad("dcons.size", &dcons_size) == 0)
2323 dcons_addr = 0;
2324
2325 /*
2326 * physmap is in bytes, so when converting to page boundaries,
2327 * round up the start address and round down the end address.
2328 */
2329 for (i = 0; i <= physmap_idx; i += 2) {
2330 vm_paddr_t end;
2331
2332 end = ptoa((vm_paddr_t)Maxmem);
2333 if (physmap[i + 1] < end)
2334 end = trunc_page(physmap[i + 1]);
2335 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2336 int tmp, page_bad, full;
2337 int *ptr = (int *)CADDR3;
2338
2339 full = FALSE;
2340 /*
2341 * block out kernel memory as not available.
2342 */
2343 if (pa >= KERNLOAD && pa < first)
2344 goto do_dump_avail;
2345
2346 /*
2347 * block out dcons buffer
2348 */
2349 if (dcons_addr > 0
2350 && pa >= trunc_page(dcons_addr)
2351 && pa < dcons_addr + dcons_size)
2352 goto do_dump_avail;
2353
2354 page_bad = FALSE;
2355 if (memtest == 0)
2356 goto skip_memtest;
2357
2358 /*
2359 * map page into kernel: valid, read/write,non-cacheable
2360 */
2361 *pte = pa | PG_V | PG_RW | pg_n;
2362 invltlb();
2363
2364 tmp = *(int *)ptr;
2365 /*
2366 * Test for alternating 1's and 0's
2367 */
2368 *(volatile int *)ptr = 0xaaaaaaaa;
2369 if (*(volatile int *)ptr != 0xaaaaaaaa)
2370 page_bad = TRUE;
2371 /*
2372 * Test for alternating 0's and 1's
2373 */
2374 *(volatile int *)ptr = 0x55555555;
2375 if (*(volatile int *)ptr != 0x55555555)
2376 page_bad = TRUE;
2377 /*
2378 * Test for all 1's
2379 */
2380 *(volatile int *)ptr = 0xffffffff;
2381 if (*(volatile int *)ptr != 0xffffffff)
2382 page_bad = TRUE;
2383 /*
2384 * Test for all 0's
2385 */
2386 *(volatile int *)ptr = 0x0;
2387 if (*(volatile int *)ptr != 0x0)
2388 page_bad = TRUE;
2389 /*
2390 * Restore original value.
2391 */
2392 *(int *)ptr = tmp;
2393
2394 skip_memtest:
2395 /*
2396 * Adjust array of valid/good pages.
2397 */
2398 if (page_bad == TRUE)
2399 continue;
2400 /*
2401 * If this good page is a continuation of the
2402 * previous set of good pages, then just increase
2403 * the end pointer. Otherwise start a new chunk.
2404 * Note that "end" points one higher than end,
2405 * making the range >= start and < end.
2406 * If we're also doing a speculative memory
2407 * test and we at or past the end, bump up Maxmem
2408 * so that we keep going. The first bad page
2409 * will terminate the loop.
2410 */
2411 if (phys_avail[pa_indx] == pa) {
2412 phys_avail[pa_indx] += PAGE_SIZE;
2413 } else {
2414 pa_indx++;
2415 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2416 printf(
2417 "Too many holes in the physical address space, giving up\n");
2418 pa_indx--;
2419 full = TRUE;
2420 goto do_dump_avail;
2421 }
2422 phys_avail[pa_indx++] = pa; /* start */
2423 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2424 }
2425 physmem++;
2426 do_dump_avail:
2427 if (dump_avail[da_indx] == pa) {
2428 dump_avail[da_indx] += PAGE_SIZE;
2429 } else {
2430 da_indx++;
2431 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2432 da_indx--;
2433 goto do_next;
2434 }
2435 dump_avail[da_indx++] = pa; /* start */
2436 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2437 }
2438 do_next:
2439 if (full)
2440 break;
2441 }
2442 }
2443 *pte = 0;
2444 invltlb();
2445
2446 /*
2447 * XXX
2448 * The last chunk must contain at least one page plus the message
2449 * buffer to avoid complicating other code (message buffer address
2450 * calculation, etc.).
2451 */
2452 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2453 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2454 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2455 phys_avail[pa_indx--] = 0;
2456 phys_avail[pa_indx--] = 0;
2457 }
2458
2459 Maxmem = atop(phys_avail[pa_indx]);
2460
2461 /* Trim off space for the message buffer. */
2462 phys_avail[pa_indx] -= round_page(msgbufsize);
2463
2464 /* Map the message buffer. */
2465 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2466 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2467 off);
2468
2469 PT_UPDATES_FLUSH();
2470 }
2471 #else /* PC98 */
2472 static void
2473 getmemsize(int first)
2474 {
2475 int has_smap, off, physmap_idx, pa_indx, da_indx;
2476 u_long physmem_tunable, memtest;
2477 vm_paddr_t physmap[PHYSMAP_SIZE];
2478 pt_entry_t *pte;
2479 quad_t dcons_addr, dcons_size;
2480 #ifndef XEN
2481 int hasbrokenint12, i, res;
2482 u_int extmem;
2483 struct vm86frame vmf;
2484 struct vm86context vmc;
2485 vm_paddr_t pa;
2486 struct bios_smap *smap, *smapbase, *smapend;
2487 u_int32_t smapsize;
2488 caddr_t kmdp;
2489 #endif
2490
2491 has_smap = 0;
2492 #if defined(XEN)
2493 Maxmem = xen_start_info->nr_pages - init_first;
2494 physmem = Maxmem;
2495 basemem = 0;
2496 physmap[0] = init_first << PAGE_SHIFT;
2497 physmap[1] = ptoa(Maxmem) - round_page(msgbufsize);
2498 physmap_idx = 0;
2499 #else
2500 #ifdef XBOX
2501 if (arch_i386_is_xbox) {
2502 /*
2503 * We queried the memory size before, so chop off 4MB for
2504 * the framebuffer and inform the OS of this.
2505 */
2506 physmap[0] = 0;
2507 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
2508 physmap_idx = 0;
2509 goto physmap_done;
2510 }
2511 #endif
2512 bzero(&vmf, sizeof(vmf));
2513 bzero(physmap, sizeof(physmap));
2514 basemem = 0;
2515
2516 /*
2517 * Check if the loader supplied an SMAP memory map. If so,
2518 * use that and do not make any VM86 calls.
2519 */
2520 physmap_idx = 0;
2521 smapbase = NULL;
2522 kmdp = preload_search_by_type("elf kernel");
2523 if (kmdp == NULL)
2524 kmdp = preload_search_by_type("elf32 kernel");
2525 if (kmdp != NULL)
2526 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2527 MODINFO_METADATA | MODINFOMD_SMAP);
2528 if (smapbase != NULL) {
2529 /*
2530 * subr_module.c says:
2531 * "Consumer may safely assume that size value precedes data."
2532 * ie: an int32_t immediately precedes SMAP.
2533 */
2534 smapsize = *((u_int32_t *)smapbase - 1);
2535 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
2536 has_smap = 1;
2537
2538 for (smap = smapbase; smap < smapend; smap++)
2539 if (!add_smap_entry(smap, physmap, &physmap_idx))
2540 break;
2541 goto have_smap;
2542 }
2543
2544 /*
2545 * Some newer BIOSes have a broken INT 12H implementation
2546 * which causes a kernel panic immediately. In this case, we
2547 * need use the SMAP to determine the base memory size.
2548 */
2549 hasbrokenint12 = 0;
2550 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
2551 if (hasbrokenint12 == 0) {
2552 /* Use INT12 to determine base memory size. */
2553 vm86_intcall(0x12, &vmf);
2554 basemem = vmf.vmf_ax;
2555 basemem_setup();
2556 }
2557
2558 /*
2559 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
2560 * the kernel page table so we can use it as a buffer. The
2561 * kernel will unmap this page later.
2562 */
2563 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
2564 vmc.npages = 0;
2565 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
2566 res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
2567 KASSERT(res != 0, ("vm86_getptr() failed: address not found"));
2568
2569 vmf.vmf_ebx = 0;
2570 do {
2571 vmf.vmf_eax = 0xE820;
2572 vmf.vmf_edx = SMAP_SIG;
2573 vmf.vmf_ecx = sizeof(struct bios_smap);
2574 i = vm86_datacall(0x15, &vmf, &vmc);
2575 if (i || vmf.vmf_eax != SMAP_SIG)
2576 break;
2577 has_smap = 1;
2578 if (!add_smap_entry(smap, physmap, &physmap_idx))
2579 break;
2580 } while (vmf.vmf_ebx != 0);
2581
2582 have_smap:
2583 /*
2584 * If we didn't fetch the "base memory" size from INT12,
2585 * figure it out from the SMAP (or just guess).
2586 */
2587 if (basemem == 0) {
2588 for (i = 0; i <= physmap_idx; i += 2) {
2589 if (physmap[i] == 0x00000000) {
2590 basemem = physmap[i + 1] / 1024;
2591 break;
2592 }
2593 }
2594
2595 /* XXX: If we couldn't find basemem from SMAP, just guess. */
2596 if (basemem == 0)
2597 basemem = 640;
2598 basemem_setup();
2599 }
2600
2601 if (physmap[1] != 0)
2602 goto physmap_done;
2603
2604 /*
2605 * If we failed to find an SMAP, figure out the extended
2606 * memory size. We will then build a simple memory map with
2607 * two segments, one for "base memory" and the second for
2608 * "extended memory". Note that "extended memory" starts at a
2609 * physical address of 1MB and that both basemem and extmem
2610 * are in units of 1KB.
2611 *
2612 * First, try to fetch the extended memory size via INT 15:E801.
2613 */
2614 vmf.vmf_ax = 0xE801;
2615 if (vm86_intcall(0x15, &vmf) == 0) {
2616 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
2617 } else {
2618 /*
2619 * If INT15:E801 fails, this is our last ditch effort
2620 * to determine the extended memory size. Currently
2621 * we prefer the RTC value over INT15:88.
2622 */
2623 #if 0
2624 vmf.vmf_ah = 0x88;
2625 vm86_intcall(0x15, &vmf);
2626 extmem = vmf.vmf_ax;
2627 #else
2628 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
2629 #endif
2630 }
2631
2632 /*
2633 * Special hack for chipsets that still remap the 384k hole when
2634 * there's 16MB of memory - this really confuses people that
2635 * are trying to use bus mastering ISA controllers with the
2636 * "16MB limit"; they only have 16MB, but the remapping puts
2637 * them beyond the limit.
2638 *
2639 * If extended memory is between 15-16MB (16-17MB phys address range),
2640 * chop it to 15MB.
2641 */
2642 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
2643 extmem = 15 * 1024;
2644
2645 physmap[0] = 0;
2646 physmap[1] = basemem * 1024;
2647 physmap_idx = 2;
2648 physmap[physmap_idx] = 0x100000;
2649 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2650
2651 physmap_done:
2652 #endif
2653 /*
2654 * Now, physmap contains a map of physical memory.
2655 */
2656
2657 #ifdef SMP
2658 /* make hole for AP bootstrap code */
2659 physmap[1] = mp_bootaddress(physmap[1]);
2660 #endif
2661
2662 /*
2663 * Maxmem isn't the "maximum memory", it's one larger than the
2664 * highest page of the physical address space. It should be
2665 * called something like "Maxphyspage". We may adjust this
2666 * based on ``hw.physmem'' and the results of the memory test.
2667 */
2668 Maxmem = atop(physmap[physmap_idx + 1]);
2669
2670 #ifdef MAXMEM
2671 Maxmem = MAXMEM / 4;
2672 #endif
2673
2674 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
2675 Maxmem = atop(physmem_tunable);
2676
2677 /*
2678 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
2679 * the amount of memory in the system.
2680 */
2681 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
2682 Maxmem = atop(physmap[physmap_idx + 1]);
2683
2684 /*
2685 * By default enable the memory test on real hardware, and disable
2686 * it if we appear to be running in a VM. This avoids touching all
2687 * pages unnecessarily, which doesn't matter on real hardware but is
2688 * bad for shared VM hosts. Use a general name so that
2689 * one could eventually do more with the code than just disable it.
2690 */
2691 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
2692 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
2693
2694 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2695 (boothowto & RB_VERBOSE))
2696 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2697
2698 /*
2699 * If Maxmem has been increased beyond what the system has detected,
2700 * extend the last memory segment to the new limit.
2701 */
2702 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2703 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2704
2705 /* call pmap initialization to make new kernel address space */
2706 pmap_bootstrap(first);
2707
2708 /*
2709 * Size up each available chunk of physical memory.
2710 */
2711 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2712 pa_indx = 0;
2713 da_indx = 1;
2714 phys_avail[pa_indx++] = physmap[0];
2715 phys_avail[pa_indx] = physmap[0];
2716 dump_avail[da_indx] = physmap[0];
2717 pte = CMAP3;
2718
2719 /*
2720 * Get dcons buffer address
2721 */
2722 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2723 getenv_quad("dcons.size", &dcons_size) == 0)
2724 dcons_addr = 0;
2725
2726 #ifndef XEN
2727 /*
2728 * physmap is in bytes, so when converting to page boundaries,
2729 * round up the start address and round down the end address.
2730 */
2731 for (i = 0; i <= physmap_idx; i += 2) {
2732 vm_paddr_t end;
2733
2734 end = ptoa((vm_paddr_t)Maxmem);
2735 if (physmap[i + 1] < end)
2736 end = trunc_page(physmap[i + 1]);
2737 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2738 int tmp, page_bad, full;
2739 int *ptr = (int *)CADDR3;
2740
2741 full = FALSE;
2742 /*
2743 * block out kernel memory as not available.
2744 */
2745 if (pa >= KERNLOAD && pa < first)
2746 goto do_dump_avail;
2747
2748 /*
2749 * block out dcons buffer
2750 */
2751 if (dcons_addr > 0
2752 && pa >= trunc_page(dcons_addr)
2753 && pa < dcons_addr + dcons_size)
2754 goto do_dump_avail;
2755
2756 page_bad = FALSE;
2757 if (memtest == 0)
2758 goto skip_memtest;
2759
2760 /*
2761 * map page into kernel: valid, read/write,non-cacheable
2762 */
2763 *pte = pa | PG_V | PG_RW | PG_N;
2764 invltlb();
2765
2766 tmp = *(int *)ptr;
2767 /*
2768 * Test for alternating 1's and 0's
2769 */
2770 *(volatile int *)ptr = 0xaaaaaaaa;
2771 if (*(volatile int *)ptr != 0xaaaaaaaa)
2772 page_bad = TRUE;
2773 /*
2774 * Test for alternating 0's and 1's
2775 */
2776 *(volatile int *)ptr = 0x55555555;
2777 if (*(volatile int *)ptr != 0x55555555)
2778 page_bad = TRUE;
2779 /*
2780 * Test for all 1's
2781 */
2782 *(volatile int *)ptr = 0xffffffff;
2783 if (*(volatile int *)ptr != 0xffffffff)
2784 page_bad = TRUE;
2785 /*
2786 * Test for all 0's
2787 */
2788 *(volatile int *)ptr = 0x0;
2789 if (*(volatile int *)ptr != 0x0)
2790 page_bad = TRUE;
2791 /*
2792 * Restore original value.
2793 */
2794 *(int *)ptr = tmp;
2795
2796 skip_memtest:
2797 /*
2798 * Adjust array of valid/good pages.
2799 */
2800 if (page_bad == TRUE)
2801 continue;
2802 /*
2803 * If this good page is a continuation of the
2804 * previous set of good pages, then just increase
2805 * the end pointer. Otherwise start a new chunk.
2806 * Note that "end" points one higher than end,
2807 * making the range >= start and < end.
2808 * If we're also doing a speculative memory
2809 * test and we at or past the end, bump up Maxmem
2810 * so that we keep going. The first bad page
2811 * will terminate the loop.
2812 */
2813 if (phys_avail[pa_indx] == pa) {
2814 phys_avail[pa_indx] += PAGE_SIZE;
2815 } else {
2816 pa_indx++;
2817 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2818 printf(
2819 "Too many holes in the physical address space, giving up\n");
2820 pa_indx--;
2821 full = TRUE;
2822 goto do_dump_avail;
2823 }
2824 phys_avail[pa_indx++] = pa; /* start */
2825 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2826 }
2827 physmem++;
2828 do_dump_avail:
2829 if (dump_avail[da_indx] == pa) {
2830 dump_avail[da_indx] += PAGE_SIZE;
2831 } else {
2832 da_indx++;
2833 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2834 da_indx--;
2835 goto do_next;
2836 }
2837 dump_avail[da_indx++] = pa; /* start */
2838 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2839 }
2840 do_next:
2841 if (full)
2842 break;
2843 }
2844 }
2845 *pte = 0;
2846 invltlb();
2847 #else
2848 phys_avail[0] = physfree;
2849 phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2850 dump_avail[0] = 0;
2851 dump_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2852
2853 #endif
2854
2855 /*
2856 * XXX
2857 * The last chunk must contain at least one page plus the message
2858 * buffer to avoid complicating other code (message buffer address
2859 * calculation, etc.).
2860 */
2861 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2862 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2863 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2864 phys_avail[pa_indx--] = 0;
2865 phys_avail[pa_indx--] = 0;
2866 }
2867
2868 Maxmem = atop(phys_avail[pa_indx]);
2869
2870 /* Trim off space for the message buffer. */
2871 phys_avail[pa_indx] -= round_page(msgbufsize);
2872
2873 /* Map the message buffer. */
2874 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2875 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2876 off);
2877
2878 PT_UPDATES_FLUSH();
2879 }
2880 #endif /* PC98 */
2881
2882 #ifdef XEN
2883 #define MTOPSIZE (1<<(14 + PAGE_SHIFT))
2884
2885 register_t
2886 init386(first)
2887 int first;
2888 {
2889 unsigned long gdtmachpfn;
2890 int error, gsel_tss, metadata_missing, x, pa;
2891 struct pcpu *pc;
2892 #ifdef CPU_ENABLE_SSE
2893 struct xstate_hdr *xhdr;
2894 #endif
2895 struct callback_register event = {
2896 .type = CALLBACKTYPE_event,
2897 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback },
2898 };
2899 struct callback_register failsafe = {
2900 .type = CALLBACKTYPE_failsafe,
2901 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback },
2902 };
2903
2904 thread0.td_kstack = proc0kstack;
2905 thread0.td_kstack_pages = KSTACK_PAGES;
2906
2907 /*
2908 * This may be done better later if it gets more high level
2909 * components in it. If so just link td->td_proc here.
2910 */
2911 proc_linkup0(&proc0, &thread0);
2912
2913 metadata_missing = 0;
2914 if (xen_start_info->mod_start) {
2915 preload_metadata = (caddr_t)xen_start_info->mod_start;
2916 preload_bootstrap_relocate(KERNBASE);
2917 } else {
2918 metadata_missing = 1;
2919 }
2920 if (envmode == 1)
2921 kern_envp = static_env;
2922 else if ((caddr_t)xen_start_info->cmd_line)
2923 kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line);
2924
2925 boothowto |= xen_boothowto(kern_envp);
2926
2927 /* Init basic tunables, hz etc */
2928 init_param1();
2929
2930 /*
2931 * XEN occupies a portion of the upper virtual address space
2932 * At its base it manages an array mapping machine page frames
2933 * to physical page frames - hence we need to be able to
2934 * access 4GB - (64MB - 4MB + 64k)
2935 */
2936 gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2937 gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2938 gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2939 gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2940 gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2941 gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2942 gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2943 gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2944
2945 pc = &__pcpu[0];
2946 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2947 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2948
2949 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW);
2950 bzero(gdt, PAGE_SIZE);
2951 for (x = 0; x < NGDT; x++)
2952 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2953
2954 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2955
2956 gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
2957 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V);
2958 PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
2959 lgdt(&r_gdt);
2960 gdtset = 1;
2961
2962 if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
2963 panic("set_trap_table failed - error %d\n", error);
2964 }
2965
2966 error = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
2967 if (error == 0)
2968 error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
2969 #if CONFIG_XEN_COMPAT <= 0x030002
2970 if (error == -ENOXENSYS)
2971 HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL),
2972 (unsigned long)Xhypervisor_callback,
2973 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
2974 #endif
2975 pcpu_init(pc, 0, sizeof(struct pcpu));
2976 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2977 pmap_kenter(pa + KERNBASE, pa);
2978 dpcpu_init((void *)(first + KERNBASE), 0);
2979 first += DPCPU_SIZE;
2980 physfree += DPCPU_SIZE;
2981 init_first += DPCPU_SIZE / PAGE_SIZE;
2982
2983 PCPU_SET(prvspace, pc);
2984 PCPU_SET(curthread, &thread0);
2985
2986 /*
2987 * Initialize mutexes.
2988 *
2989 * icu_lock: in order to allow an interrupt to occur in a critical
2990 * section, to set pcpu->ipending (etc...) properly, we
2991 * must be able to get the icu lock, so it can't be
2992 * under witness.
2993 */
2994 mutex_init();
2995 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2996
2997 /* make ldt memory segments */
2998 PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW);
2999 bzero(ldt, PAGE_SIZE);
3000 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
3001 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
3002 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
3003 ssdtosd(&ldt_segs[x], &ldt[x].sd);
3004
3005 default_proc_ldt.ldt_base = (caddr_t)ldt;
3006 default_proc_ldt.ldt_len = 6;
3007 _default_ldt = (int)&default_proc_ldt;
3008 PCPU_SET(currentldt, _default_ldt);
3009 PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
3010 xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
3011
3012 #if defined(XEN_PRIVILEGED)
3013 /*
3014 * Initialize the i8254 before the console so that console
3015 * initialization can use DELAY().
3016 */
3017 i8254_init();
3018 #endif
3019
3020 /*
3021 * Initialize the console before we print anything out.
3022 */
3023 cninit();
3024
3025 if (metadata_missing)
3026 printf("WARNING: loader(8) metadata is missing!\n");
3027
3028 #ifdef DEV_ISA
3029 #ifdef DEV_ATPIC
3030 elcr_probe();
3031 atpic_startup();
3032 #else
3033 /* Reset and mask the atpics and leave them shut down. */
3034 atpic_reset();
3035
3036 /*
3037 * Point the ICU spurious interrupt vectors at the APIC spurious
3038 * interrupt handler.
3039 */
3040 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
3041 GSEL(GCODE_SEL, SEL_KPL));
3042 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
3043 GSEL(GCODE_SEL, SEL_KPL));
3044 #endif
3045 #endif
3046
3047 #ifdef DDB
3048 ksym_start = bootinfo.bi_symtab;
3049 ksym_end = bootinfo.bi_esymtab;
3050 #endif
3051
3052 kdb_init();
3053
3054 #ifdef KDB
3055 if (boothowto & RB_KDB)
3056 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
3057 #endif
3058
3059 finishidentcpu(); /* Final stage of CPU initialization */
3060 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
3061 GSEL(GCODE_SEL, SEL_KPL));
3062 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
3063 GSEL(GCODE_SEL, SEL_KPL));
3064 initializecpu(); /* Initialize CPU registers */
3065 initializecpucache();
3066
3067 /* pointer to selector slot for %fs/%gs */
3068 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
3069
3070 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
3071 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
3072 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
3073 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
3074 #if defined(PAE) || defined(PAE_TABLES)
3075 dblfault_tss.tss_cr3 = (int)IdlePDPT;
3076 #else
3077 dblfault_tss.tss_cr3 = (int)IdlePTD;
3078 #endif
3079 dblfault_tss.tss_eip = (int)dblfault_handler;
3080 dblfault_tss.tss_eflags = PSL_KERNEL;
3081 dblfault_tss.tss_ds = dblfault_tss.tss_es =
3082 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
3083 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
3084 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
3085 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
3086
3087 vm86_initialize();
3088 getmemsize(first);
3089 init_param2(physmem);
3090
3091 /* now running on new page tables, configured,and u/iom is accessible */
3092
3093 msgbufinit(msgbufp, msgbufsize);
3094 #ifdef DEV_NPX
3095 npxinit(true);
3096 #endif
3097 /*
3098 * Set up thread0 pcb after npxinit calculated pcb + fpu save
3099 * area size. Zero out the extended state header in fpu save
3100 * area.
3101 */
3102 thread0.td_pcb = get_pcb_td(&thread0);
3103 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
3104 #ifdef CPU_ENABLE_SSE
3105 if (use_xsave) {
3106 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
3107 1);
3108 xhdr->xstate_bv = xsave_mask;
3109 }
3110 #endif
3111 PCPU_SET(curpcb, thread0.td_pcb);
3112 /* make an initial tss so cpu can get interrupt stack on syscall! */
3113 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
3114 PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16);
3115 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
3116 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
3117 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL),
3118 PCPU_GET(common_tss.tss_esp0));
3119
3120 /* transfer to user mode */
3121
3122 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
3123 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
3124
3125 /* setup proc 0's pcb */
3126 thread0.td_pcb->pcb_flags = 0;
3127 #if defined(PAE) || defined(PAE_TABLES)
3128 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
3129 #else
3130 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
3131 #endif
3132 thread0.td_pcb->pcb_ext = 0;
3133 thread0.td_frame = &proc0_tf;
3134 thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0];
3135 thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1];
3136
3137 cpu_probe_amdc1e();
3138
3139 /* Location of kernel stack for locore */
3140 return ((register_t)thread0.td_pcb);
3141 }
3142
3143 #else
3144 register_t
3145 init386(first)
3146 int first;
3147 {
3148 struct gate_descriptor *gdp;
3149 int gsel_tss, metadata_missing, x, pa;
3150 struct pcpu *pc;
3151 #ifdef CPU_ENABLE_SSE
3152 struct xstate_hdr *xhdr;
3153 #endif
3154
3155 thread0.td_kstack = proc0kstack;
3156 thread0.td_kstack_pages = KSTACK_PAGES;
3157
3158 /*
3159 * This may be done better later if it gets more high level
3160 * components in it. If so just link td->td_proc here.
3161 */
3162 proc_linkup0(&proc0, &thread0);
3163
3164 #ifdef PC98
3165 /*
3166 * Initialize DMAC
3167 */
3168 pc98_init_dmac();
3169 #endif
3170
3171 metadata_missing = 0;
3172 if (bootinfo.bi_modulep) {
3173 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
3174 preload_bootstrap_relocate(KERNBASE);
3175 } else {
3176 metadata_missing = 1;
3177 }
3178 if (envmode == 1)
3179 kern_envp = static_env;
3180 else if (bootinfo.bi_envp)
3181 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
3182
3183 /* Init basic tunables, hz etc */
3184 init_param1();
3185
3186 /*
3187 * Make gdt memory segments. All segments cover the full 4GB
3188 * of address space and permissions are enforced at page level.
3189 */
3190 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
3191 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
3192 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
3193 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
3194 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
3195 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
3196
3197 pc = &__pcpu[0];
3198 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
3199 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
3200 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
3201
3202 for (x = 0; x < NGDT; x++)
3203 ssdtosd(&gdt_segs[x], &gdt[x].sd);
3204
3205 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
3206 r_gdt.rd_base = (int) gdt;
3207 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
3208 lgdt(&r_gdt);
3209
3210 pcpu_init(pc, 0, sizeof(struct pcpu));
3211 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
3212 pmap_kenter(pa + KERNBASE, pa);
3213 dpcpu_init((void *)(first + KERNBASE), 0);
3214 first += DPCPU_SIZE;
3215 PCPU_SET(prvspace, pc);
3216 PCPU_SET(curthread, &thread0);
3217
3218 /*
3219 * Initialize mutexes.
3220 *
3221 * icu_lock: in order to allow an interrupt to occur in a critical
3222 * section, to set pcpu->ipending (etc...) properly, we
3223 * must be able to get the icu lock, so it can't be
3224 * under witness.
3225 */
3226 mutex_init();
3227 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
3228
3229 /* make ldt memory segments */
3230 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
3231 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
3232 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
3233 ssdtosd(&ldt_segs[x], &ldt[x].sd);
3234
3235 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
3236 lldt(_default_ldt);
3237 PCPU_SET(currentldt, _default_ldt);
3238
3239 /* exceptions */
3240 for (x = 0; x < NIDT; x++)
3241 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
3242 GSEL(GCODE_SEL, SEL_KPL));
3243 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
3244 GSEL(GCODE_SEL, SEL_KPL));
3245 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
3246 GSEL(GCODE_SEL, SEL_KPL));
3247 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
3248 GSEL(GCODE_SEL, SEL_KPL));
3249 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
3250 GSEL(GCODE_SEL, SEL_KPL));
3251 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
3252 GSEL(GCODE_SEL, SEL_KPL));
3253 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
3254 GSEL(GCODE_SEL, SEL_KPL));
3255 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
3256 GSEL(GCODE_SEL, SEL_KPL));
3257 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
3258 , GSEL(GCODE_SEL, SEL_KPL));
3259 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
3260 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
3261 GSEL(GCODE_SEL, SEL_KPL));
3262 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
3263 GSEL(GCODE_SEL, SEL_KPL));
3264 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
3265 GSEL(GCODE_SEL, SEL_KPL));
3266 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
3267 GSEL(GCODE_SEL, SEL_KPL));
3268 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
3269 GSEL(GCODE_SEL, SEL_KPL));
3270 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
3271 GSEL(GCODE_SEL, SEL_KPL));
3272 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
3273 GSEL(GCODE_SEL, SEL_KPL));
3274 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
3275 GSEL(GCODE_SEL, SEL_KPL));
3276 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
3277 GSEL(GCODE_SEL, SEL_KPL));
3278 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
3279 GSEL(GCODE_SEL, SEL_KPL));
3280 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
3281 GSEL(GCODE_SEL, SEL_KPL));
3282 #ifdef KDTRACE_HOOKS
3283 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
3284 GSEL(GCODE_SEL, SEL_KPL));
3285 #endif
3286 #ifdef XENHVM
3287 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYS386IGT, SEL_UPL,
3288 GSEL(GCODE_SEL, SEL_KPL));
3289 #endif
3290
3291 r_idt.rd_limit = sizeof(idt0) - 1;
3292 r_idt.rd_base = (int) idt;
3293 lidt(&r_idt);
3294
3295 #ifdef XBOX
3296 /*
3297 * The following code queries the PCI ID of 0:0:0. For the XBOX,
3298 * This should be 0x10de / 0x02a5.
3299 *
3300 * This is exactly what Linux does.
3301 */
3302 outl(0xcf8, 0x80000000);
3303 if (inl(0xcfc) == 0x02a510de) {
3304 arch_i386_is_xbox = 1;
3305 pic16l_setled(XBOX_LED_GREEN);
3306
3307 /*
3308 * We are an XBOX, but we may have either 64MB or 128MB of
3309 * memory. The PCI host bridge should be programmed for this,
3310 * so we just query it.
3311 */
3312 outl(0xcf8, 0x80000084);
3313 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
3314 }
3315 #endif /* XBOX */
3316
3317 /*
3318 * Initialize the i8254 before the console so that console
3319 * initialization can use DELAY().
3320 */
3321 i8254_init();
3322
3323 /*
3324 * Initialize the console before we print anything out.
3325 */
3326 cninit();
3327
3328 if (metadata_missing)
3329 printf("WARNING: loader(8) metadata is missing!\n");
3330
3331 #ifdef DEV_ISA
3332 #ifdef DEV_ATPIC
3333 #ifndef PC98
3334 elcr_probe();
3335 #endif
3336 atpic_startup();
3337 #else
3338 /* Reset and mask the atpics and leave them shut down. */
3339 atpic_reset();
3340
3341 /*
3342 * Point the ICU spurious interrupt vectors at the APIC spurious
3343 * interrupt handler.
3344 */
3345 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
3346 GSEL(GCODE_SEL, SEL_KPL));
3347 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
3348 GSEL(GCODE_SEL, SEL_KPL));
3349 #endif
3350 #endif
3351
3352 #ifdef DDB
3353 ksym_start = bootinfo.bi_symtab;
3354 ksym_end = bootinfo.bi_esymtab;
3355 #endif
3356
3357 kdb_init();
3358
3359 #ifdef KDB
3360 if (boothowto & RB_KDB)
3361 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
3362 #endif
3363
3364 finishidentcpu(); /* Final stage of CPU initialization */
3365 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
3366 GSEL(GCODE_SEL, SEL_KPL));
3367 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
3368 GSEL(GCODE_SEL, SEL_KPL));
3369 initializecpu(); /* Initialize CPU registers */
3370 initializecpucache();
3371
3372 /* pointer to selector slot for %fs/%gs */
3373 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
3374
3375 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
3376 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
3377 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
3378 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
3379 #if defined(PAE) || defined(PAE_TABLES)
3380 dblfault_tss.tss_cr3 = (int)IdlePDPT;
3381 #else
3382 dblfault_tss.tss_cr3 = (int)IdlePTD;
3383 #endif
3384 dblfault_tss.tss_eip = (int)dblfault_handler;
3385 dblfault_tss.tss_eflags = PSL_KERNEL;
3386 dblfault_tss.tss_ds = dblfault_tss.tss_es =
3387 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
3388 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
3389 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
3390 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
3391
3392 vm86_initialize();
3393 getmemsize(first);
3394 init_param2(physmem);
3395
3396 /* now running on new page tables, configured,and u/iom is accessible */
3397
3398 msgbufinit(msgbufp, msgbufsize);
3399 #ifdef DEV_NPX
3400 npxinit(true);
3401 #endif
3402 /*
3403 * Set up thread0 pcb after npxinit calculated pcb + fpu save
3404 * area size. Zero out the extended state header in fpu save
3405 * area.
3406 */
3407 thread0.td_pcb = get_pcb_td(&thread0);
3408 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
3409 #ifdef CPU_ENABLE_SSE
3410 if (use_xsave) {
3411 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
3412 1);
3413 xhdr->xstate_bv = xsave_mask;
3414 }
3415 #endif
3416 PCPU_SET(curpcb, thread0.td_pcb);
3417 /* make an initial tss so cpu can get interrupt stack on syscall! */
3418 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
3419 PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16);
3420 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
3421 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
3422 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
3423 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
3424 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
3425 ltr(gsel_tss);
3426
3427 /* make a call gate to reenter kernel with */
3428 gdp = &ldt[LSYS5CALLS_SEL].gd;
3429
3430 x = (int) &IDTVEC(lcall_syscall);
3431 gdp->gd_looffset = x;
3432 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
3433 gdp->gd_stkcpy = 1;
3434 gdp->gd_type = SDT_SYS386CGT;
3435 gdp->gd_dpl = SEL_UPL;
3436 gdp->gd_p = 1;
3437 gdp->gd_hioffset = x >> 16;
3438
3439 /* XXX does this work? */
3440 /* XXX yes! */
3441 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
3442 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
3443
3444 /* transfer to user mode */
3445
3446 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
3447 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
3448
3449 /* setup proc 0's pcb */
3450 thread0.td_pcb->pcb_flags = 0;
3451 #if defined(PAE) || defined(PAE_TABLES)
3452 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
3453 #else
3454 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
3455 #endif
3456 thread0.td_pcb->pcb_ext = 0;
3457 thread0.td_frame = &proc0_tf;
3458
3459 cpu_probe_amdc1e();
3460
3461 #ifdef FDT
3462 x86_init_fdt();
3463 #endif
3464
3465 /* Location of kernel stack for locore */
3466 return ((register_t)thread0.td_pcb);
3467 }
3468 #endif
3469
3470 void
3471 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
3472 {
3473
3474 pcpu->pc_acpi_id = 0xffffffff;
3475 }
3476
3477 #ifndef PC98
3478 static int
3479 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
3480 {
3481 struct bios_smap *smapbase;
3482 struct bios_smap_xattr smap;
3483 caddr_t kmdp;
3484 uint32_t *smapattr;
3485 int count, error, i;
3486
3487 /* Retrieve the system memory map from the loader. */
3488 kmdp = preload_search_by_type("elf kernel");
3489 if (kmdp == NULL)
3490 kmdp = preload_search_by_type("elf32 kernel");
3491 if (kmdp == NULL)
3492 return (0);
3493 smapbase = (struct bios_smap *)preload_search_info(kmdp,
3494 MODINFO_METADATA | MODINFOMD_SMAP);
3495 if (smapbase == NULL)
3496 return (0);
3497 smapattr = (uint32_t *)preload_search_info(kmdp,
3498 MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
3499 count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase);
3500 error = 0;
3501 for (i = 0; i < count; i++) {
3502 smap.base = smapbase[i].base;
3503 smap.length = smapbase[i].length;
3504 smap.type = smapbase[i].type;
3505 if (smapattr != NULL)
3506 smap.xattr = smapattr[i];
3507 else
3508 smap.xattr = 0;
3509 error = SYSCTL_OUT(req, &smap, sizeof(smap));
3510 }
3511 return (error);
3512 }
3513 SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
3514 smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
3515 #endif /* !PC98 */
3516
3517 void
3518 spinlock_enter(void)
3519 {
3520 struct thread *td;
3521 register_t flags;
3522
3523 td = curthread;
3524 if (td->td_md.md_spinlock_count == 0) {
3525 flags = intr_disable();
3526 td->td_md.md_spinlock_count = 1;
3527 td->td_md.md_saved_flags = flags;
3528 } else
3529 td->td_md.md_spinlock_count++;
3530 critical_enter();
3531 }
3532
3533 void
3534 spinlock_exit(void)
3535 {
3536 struct thread *td;
3537 register_t flags;
3538
3539 td = curthread;
3540 critical_exit();
3541 flags = td->td_md.md_saved_flags;
3542 td->td_md.md_spinlock_count--;
3543 if (td->td_md.md_spinlock_count == 0)
3544 intr_restore(flags);
3545 }
3546
3547 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
3548 static void f00f_hack(void *unused);
3549 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
3550
3551 static void
3552 f00f_hack(void *unused)
3553 {
3554 struct gate_descriptor *new_idt;
3555 vm_offset_t tmp;
3556
3557 if (!has_f00f_bug)
3558 return;
3559
3560 GIANT_REQUIRED;
3561
3562 printf("Intel Pentium detected, installing workaround for F00F bug\n");
3563
3564 tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO);
3565 if (tmp == 0)
3566 panic("kmem_malloc returned 0");
3567
3568 /* Put the problematic entry (#6) at the end of the lower page. */
3569 new_idt = (struct gate_descriptor*)
3570 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
3571 bcopy(idt, new_idt, sizeof(idt0));
3572 r_idt.rd_base = (u_int)new_idt;
3573 lidt(&r_idt);
3574 idt = new_idt;
3575 pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
3576 }
3577 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
3578
3579 /*
3580 * Construct a PCB from a trapframe. This is called from kdb_trap() where
3581 * we want to start a backtrace from the function that caused us to enter
3582 * the debugger. We have the context in the trapframe, but base the trace
3583 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
3584 * enough for a backtrace.
3585 */
3586 void
3587 makectx(struct trapframe *tf, struct pcb *pcb)
3588 {
3589
3590 pcb->pcb_edi = tf->tf_edi;
3591 pcb->pcb_esi = tf->tf_esi;
3592 pcb->pcb_ebp = tf->tf_ebp;
3593 pcb->pcb_ebx = tf->tf_ebx;
3594 pcb->pcb_eip = tf->tf_eip;
3595 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
3596 }
3597
3598 int
3599 ptrace_set_pc(struct thread *td, u_long addr)
3600 {
3601
3602 td->td_frame->tf_eip = addr;
3603 return (0);
3604 }
3605
3606 int
3607 ptrace_single_step(struct thread *td)
3608 {
3609 td->td_frame->tf_eflags |= PSL_T;
3610 return (0);
3611 }
3612
3613 int
3614 ptrace_clear_single_step(struct thread *td)
3615 {
3616 td->td_frame->tf_eflags &= ~PSL_T;
3617 return (0);
3618 }
3619
3620 int
3621 fill_regs(struct thread *td, struct reg *regs)
3622 {
3623 struct pcb *pcb;
3624 struct trapframe *tp;
3625
3626 tp = td->td_frame;
3627 pcb = td->td_pcb;
3628 regs->r_gs = pcb->pcb_gs;
3629 return (fill_frame_regs(tp, regs));
3630 }
3631
3632 int
3633 fill_frame_regs(struct trapframe *tp, struct reg *regs)
3634 {
3635 regs->r_fs = tp->tf_fs;
3636 regs->r_es = tp->tf_es;
3637 regs->r_ds = tp->tf_ds;
3638 regs->r_edi = tp->tf_edi;
3639 regs->r_esi = tp->tf_esi;
3640 regs->r_ebp = tp->tf_ebp;
3641 regs->r_ebx = tp->tf_ebx;
3642 regs->r_edx = tp->tf_edx;
3643 regs->r_ecx = tp->tf_ecx;
3644 regs->r_eax = tp->tf_eax;
3645 regs->r_eip = tp->tf_eip;
3646 regs->r_cs = tp->tf_cs;
3647 regs->r_eflags = tp->tf_eflags;
3648 regs->r_esp = tp->tf_esp;
3649 regs->r_ss = tp->tf_ss;
3650 return (0);
3651 }
3652
3653 int
3654 set_regs(struct thread *td, struct reg *regs)
3655 {
3656 struct pcb *pcb;
3657 struct trapframe *tp;
3658
3659 tp = td->td_frame;
3660 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
3661 !CS_SECURE(regs->r_cs))
3662 return (EINVAL);
3663 pcb = td->td_pcb;
3664 tp->tf_fs = regs->r_fs;
3665 tp->tf_es = regs->r_es;
3666 tp->tf_ds = regs->r_ds;
3667 tp->tf_edi = regs->r_edi;
3668 tp->tf_esi = regs->r_esi;
3669 tp->tf_ebp = regs->r_ebp;
3670 tp->tf_ebx = regs->r_ebx;
3671 tp->tf_edx = regs->r_edx;
3672 tp->tf_ecx = regs->r_ecx;
3673 tp->tf_eax = regs->r_eax;
3674 tp->tf_eip = regs->r_eip;
3675 tp->tf_cs = regs->r_cs;
3676 tp->tf_eflags = regs->r_eflags;
3677 tp->tf_esp = regs->r_esp;
3678 tp->tf_ss = regs->r_ss;
3679 pcb->pcb_gs = regs->r_gs;
3680 return (0);
3681 }
3682
3683 #ifdef CPU_ENABLE_SSE
3684 static void
3685 fill_fpregs_xmm(sv_xmm, sv_87)
3686 struct savexmm *sv_xmm;
3687 struct save87 *sv_87;
3688 {
3689 register struct env87 *penv_87 = &sv_87->sv_env;
3690 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3691 int i;
3692
3693 bzero(sv_87, sizeof(*sv_87));
3694
3695 /* FPU control/status */
3696 penv_87->en_cw = penv_xmm->en_cw;
3697 penv_87->en_sw = penv_xmm->en_sw;
3698 penv_87->en_tw = penv_xmm->en_tw;
3699 penv_87->en_fip = penv_xmm->en_fip;
3700 penv_87->en_fcs = penv_xmm->en_fcs;
3701 penv_87->en_opcode = penv_xmm->en_opcode;
3702 penv_87->en_foo = penv_xmm->en_foo;
3703 penv_87->en_fos = penv_xmm->en_fos;
3704
3705 /* FPU registers */
3706 for (i = 0; i < 8; ++i)
3707 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
3708 }
3709
3710 static void
3711 set_fpregs_xmm(sv_87, sv_xmm)
3712 struct save87 *sv_87;
3713 struct savexmm *sv_xmm;
3714 {
3715 register struct env87 *penv_87 = &sv_87->sv_env;
3716 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3717 int i;
3718
3719 /* FPU control/status */
3720 penv_xmm->en_cw = penv_87->en_cw;
3721 penv_xmm->en_sw = penv_87->en_sw;
3722 penv_xmm->en_tw = penv_87->en_tw;
3723 penv_xmm->en_fip = penv_87->en_fip;
3724 penv_xmm->en_fcs = penv_87->en_fcs;
3725 penv_xmm->en_opcode = penv_87->en_opcode;
3726 penv_xmm->en_foo = penv_87->en_foo;
3727 penv_xmm->en_fos = penv_87->en_fos;
3728
3729 /* FPU registers */
3730 for (i = 0; i < 8; ++i)
3731 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
3732 }
3733 #endif /* CPU_ENABLE_SSE */
3734
3735 int
3736 fill_fpregs(struct thread *td, struct fpreg *fpregs)
3737 {
3738
3739 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
3740 P_SHOULDSTOP(td->td_proc),
3741 ("not suspended thread %p", td));
3742 #ifdef DEV_NPX
3743 npxgetregs(td);
3744 #else
3745 bzero(fpregs, sizeof(*fpregs));
3746 #endif
3747 #ifdef CPU_ENABLE_SSE
3748 if (cpu_fxsr)
3749 fill_fpregs_xmm(&get_pcb_user_save_td(td)->sv_xmm,
3750 (struct save87 *)fpregs);
3751 else
3752 #endif /* CPU_ENABLE_SSE */
3753 bcopy(&get_pcb_user_save_td(td)->sv_87, fpregs,
3754 sizeof(*fpregs));
3755 return (0);
3756 }
3757
3758 int
3759 set_fpregs(struct thread *td, struct fpreg *fpregs)
3760 {
3761
3762 #ifdef CPU_ENABLE_SSE
3763 if (cpu_fxsr)
3764 set_fpregs_xmm((struct save87 *)fpregs,
3765 &get_pcb_user_save_td(td)->sv_xmm);
3766 else
3767 #endif /* CPU_ENABLE_SSE */
3768 bcopy(fpregs, &get_pcb_user_save_td(td)->sv_87,
3769 sizeof(*fpregs));
3770 #ifdef DEV_NPX
3771 npxuserinited(td);
3772 #endif
3773 return (0);
3774 }
3775
3776 /*
3777 * Get machine context.
3778 */
3779 int
3780 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
3781 {
3782 struct trapframe *tp;
3783 struct segment_descriptor *sdp;
3784
3785 tp = td->td_frame;
3786
3787 PROC_LOCK(curthread->td_proc);
3788 mcp->mc_onstack = sigonstack(tp->tf_esp);
3789 PROC_UNLOCK(curthread->td_proc);
3790 mcp->mc_gs = td->td_pcb->pcb_gs;
3791 mcp->mc_fs = tp->tf_fs;
3792 mcp->mc_es = tp->tf_es;
3793 mcp->mc_ds = tp->tf_ds;
3794 mcp->mc_edi = tp->tf_edi;
3795 mcp->mc_esi = tp->tf_esi;
3796 mcp->mc_ebp = tp->tf_ebp;
3797 mcp->mc_isp = tp->tf_isp;
3798 mcp->mc_eflags = tp->tf_eflags;
3799 if (flags & GET_MC_CLEAR_RET) {
3800 mcp->mc_eax = 0;
3801 mcp->mc_edx = 0;
3802 mcp->mc_eflags &= ~PSL_C;
3803 } else {
3804 mcp->mc_eax = tp->tf_eax;
3805 mcp->mc_edx = tp->tf_edx;
3806 }
3807 mcp->mc_ebx = tp->tf_ebx;
3808 mcp->mc_ecx = tp->tf_ecx;
3809 mcp->mc_eip = tp->tf_eip;
3810 mcp->mc_cs = tp->tf_cs;
3811 mcp->mc_esp = tp->tf_esp;
3812 mcp->mc_ss = tp->tf_ss;
3813 mcp->mc_len = sizeof(*mcp);
3814 get_fpcontext(td, mcp, NULL, 0);
3815 sdp = &td->td_pcb->pcb_fsd;
3816 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3817 sdp = &td->td_pcb->pcb_gsd;
3818 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3819 mcp->mc_flags = 0;
3820 mcp->mc_xfpustate = 0;
3821 mcp->mc_xfpustate_len = 0;
3822 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
3823 return (0);
3824 }
3825
3826 /*
3827 * Set machine context.
3828 *
3829 * However, we don't set any but the user modifiable flags, and we won't
3830 * touch the cs selector.
3831 */
3832 int
3833 set_mcontext(struct thread *td, mcontext_t *mcp)
3834 {
3835 struct trapframe *tp;
3836 char *xfpustate;
3837 int eflags, ret;
3838
3839 tp = td->td_frame;
3840 if (mcp->mc_len != sizeof(*mcp) ||
3841 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
3842 return (EINVAL);
3843 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
3844 (tp->tf_eflags & ~PSL_USERCHANGE);
3845 if (mcp->mc_flags & _MC_HASFPXSTATE) {
3846 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
3847 sizeof(union savefpu))
3848 return (EINVAL);
3849 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
3850 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
3851 mcp->mc_xfpustate_len);
3852 if (ret != 0)
3853 return (ret);
3854 } else
3855 xfpustate = NULL;
3856 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
3857 if (ret != 0)
3858 return (ret);
3859 tp->tf_fs = mcp->mc_fs;
3860 tp->tf_es = mcp->mc_es;
3861 tp->tf_ds = mcp->mc_ds;
3862 tp->tf_edi = mcp->mc_edi;
3863 tp->tf_esi = mcp->mc_esi;
3864 tp->tf_ebp = mcp->mc_ebp;
3865 tp->tf_ebx = mcp->mc_ebx;
3866 tp->tf_edx = mcp->mc_edx;
3867 tp->tf_ecx = mcp->mc_ecx;
3868 tp->tf_eax = mcp->mc_eax;
3869 tp->tf_eip = mcp->mc_eip;
3870 tp->tf_eflags = eflags;
3871 tp->tf_esp = mcp->mc_esp;
3872 tp->tf_ss = mcp->mc_ss;
3873 td->td_pcb->pcb_gs = mcp->mc_gs;
3874 return (0);
3875 }
3876
3877 static void
3878 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
3879 size_t xfpusave_len)
3880 {
3881 #ifdef CPU_ENABLE_SSE
3882 size_t max_len, len;
3883 #endif
3884
3885 #ifndef DEV_NPX
3886 mcp->mc_fpformat = _MC_FPFMT_NODEV;
3887 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
3888 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
3889 #else
3890 mcp->mc_ownedfp = npxgetregs(td);
3891 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
3892 sizeof(mcp->mc_fpstate));
3893 mcp->mc_fpformat = npxformat();
3894 #ifdef CPU_ENABLE_SSE
3895 if (!use_xsave || xfpusave_len == 0)
3896 return;
3897 max_len = cpu_max_ext_state_size - sizeof(union savefpu);
3898 len = xfpusave_len;
3899 if (len > max_len) {
3900 len = max_len;
3901 bzero(xfpusave + max_len, len - max_len);
3902 }
3903 mcp->mc_flags |= _MC_HASFPXSTATE;
3904 mcp->mc_xfpustate_len = len;
3905 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
3906 #endif
3907 #endif
3908 }
3909
3910 static int
3911 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
3912 size_t xfpustate_len)
3913 {
3914 union savefpu *fpstate;
3915 int error;
3916
3917 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
3918 return (0);
3919 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
3920 mcp->mc_fpformat != _MC_FPFMT_XMM)
3921 return (EINVAL);
3922 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
3923 /* We don't care what state is left in the FPU or PCB. */
3924 fpstate_drop(td);
3925 error = 0;
3926 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
3927 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
3928 #ifdef DEV_NPX
3929 fpstate = (union savefpu *)&mcp->mc_fpstate;
3930 #ifdef CPU_ENABLE_SSE
3931 if (cpu_fxsr)
3932 fpstate->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
3933 #endif
3934 error = npxsetregs(td, fpstate, xfpustate, xfpustate_len);
3935 #else
3936 error = EINVAL;
3937 #endif
3938 } else
3939 return (EINVAL);
3940 return (error);
3941 }
3942
3943 static void
3944 fpstate_drop(struct thread *td)
3945 {
3946
3947 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
3948 critical_enter();
3949 #ifdef DEV_NPX
3950 if (PCPU_GET(fpcurthread) == td)
3951 npxdrop();
3952 #endif
3953 /*
3954 * XXX force a full drop of the npx. The above only drops it if we
3955 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
3956 *
3957 * XXX I don't much like npxgetregs()'s semantics of doing a full
3958 * drop. Dropping only to the pcb matches fnsave's behaviour.
3959 * We only need to drop to !PCB_INITDONE in sendsig(). But
3960 * sendsig() is the only caller of npxgetregs()... perhaps we just
3961 * have too many layers.
3962 */
3963 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
3964 PCB_NPXUSERINITDONE);
3965 critical_exit();
3966 }
3967
3968 int
3969 fill_dbregs(struct thread *td, struct dbreg *dbregs)
3970 {
3971 struct pcb *pcb;
3972
3973 if (td == NULL) {
3974 dbregs->dr[0] = rdr0();
3975 dbregs->dr[1] = rdr1();
3976 dbregs->dr[2] = rdr2();
3977 dbregs->dr[3] = rdr3();
3978 dbregs->dr[4] = rdr4();
3979 dbregs->dr[5] = rdr5();
3980 dbregs->dr[6] = rdr6();
3981 dbregs->dr[7] = rdr7();
3982 } else {
3983 pcb = td->td_pcb;
3984 dbregs->dr[0] = pcb->pcb_dr0;
3985 dbregs->dr[1] = pcb->pcb_dr1;
3986 dbregs->dr[2] = pcb->pcb_dr2;
3987 dbregs->dr[3] = pcb->pcb_dr3;
3988 dbregs->dr[4] = 0;
3989 dbregs->dr[5] = 0;
3990 dbregs->dr[6] = pcb->pcb_dr6;
3991 dbregs->dr[7] = pcb->pcb_dr7;
3992 }
3993 return (0);
3994 }
3995
3996 int
3997 set_dbregs(struct thread *td, struct dbreg *dbregs)
3998 {
3999 struct pcb *pcb;
4000 int i;
4001
4002 if (td == NULL) {
4003 load_dr0(dbregs->dr[0]);
4004 load_dr1(dbregs->dr[1]);
4005 load_dr2(dbregs->dr[2]);
4006 load_dr3(dbregs->dr[3]);
4007 load_dr4(dbregs->dr[4]);
4008 load_dr5(dbregs->dr[5]);
4009 load_dr6(dbregs->dr[6]);
4010 load_dr7(dbregs->dr[7]);
4011 } else {
4012 /*
4013 * Don't let an illegal value for dr7 get set. Specifically,
4014 * check for undefined settings. Setting these bit patterns
4015 * result in undefined behaviour and can lead to an unexpected
4016 * TRCTRAP.
4017 */
4018 for (i = 0; i < 4; i++) {
4019 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
4020 return (EINVAL);
4021 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
4022 return (EINVAL);
4023 }
4024
4025 pcb = td->td_pcb;
4026
4027 /*
4028 * Don't let a process set a breakpoint that is not within the
4029 * process's address space. If a process could do this, it
4030 * could halt the system by setting a breakpoint in the kernel
4031 * (if ddb was enabled). Thus, we need to check to make sure
4032 * that no breakpoints are being enabled for addresses outside
4033 * process's address space.
4034 *
4035 * XXX - what about when the watched area of the user's
4036 * address space is written into from within the kernel
4037 * ... wouldn't that still cause a breakpoint to be generated
4038 * from within kernel mode?
4039 */
4040
4041 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
4042 /* dr0 is enabled */
4043 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
4044 return (EINVAL);
4045 }
4046
4047 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
4048 /* dr1 is enabled */
4049 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
4050 return (EINVAL);
4051 }
4052
4053 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
4054 /* dr2 is enabled */
4055 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
4056 return (EINVAL);
4057 }
4058
4059 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
4060 /* dr3 is enabled */
4061 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
4062 return (EINVAL);
4063 }
4064
4065 pcb->pcb_dr0 = dbregs->dr[0];
4066 pcb->pcb_dr1 = dbregs->dr[1];
4067 pcb->pcb_dr2 = dbregs->dr[2];
4068 pcb->pcb_dr3 = dbregs->dr[3];
4069 pcb->pcb_dr6 = dbregs->dr[6];
4070 pcb->pcb_dr7 = dbregs->dr[7];
4071
4072 pcb->pcb_flags |= PCB_DBREGS;
4073 }
4074
4075 return (0);
4076 }
4077
4078 /*
4079 * Return > 0 if a hardware breakpoint has been hit, and the
4080 * breakpoint was in user space. Return 0, otherwise.
4081 */
4082 int
4083 user_dbreg_trap(void)
4084 {
4085 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
4086 u_int32_t bp; /* breakpoint bits extracted from dr6 */
4087 int nbp; /* number of breakpoints that triggered */
4088 caddr_t addr[4]; /* breakpoint addresses */
4089 int i;
4090
4091 dr7 = rdr7();
4092 if ((dr7 & 0x000000ff) == 0) {
4093 /*
4094 * all GE and LE bits in the dr7 register are zero,
4095 * thus the trap couldn't have been caused by the
4096 * hardware debug registers
4097 */
4098 return 0;
4099 }
4100
4101 nbp = 0;
4102 dr6 = rdr6();
4103 bp = dr6 & 0x0000000f;
4104
4105 if (!bp) {
4106 /*
4107 * None of the breakpoint bits are set meaning this
4108 * trap was not caused by any of the debug registers
4109 */
4110 return 0;
4111 }
4112
4113 /*
4114 * at least one of the breakpoints were hit, check to see
4115 * which ones and if any of them are user space addresses
4116 */
4117
4118 if (bp & 0x01) {
4119 addr[nbp++] = (caddr_t)rdr0();
4120 }
4121 if (bp & 0x02) {
4122 addr[nbp++] = (caddr_t)rdr1();
4123 }
4124 if (bp & 0x04) {
4125 addr[nbp++] = (caddr_t)rdr2();
4126 }
4127 if (bp & 0x08) {
4128 addr[nbp++] = (caddr_t)rdr3();
4129 }
4130
4131 for (i = 0; i < nbp; i++) {
4132 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
4133 /*
4134 * addr[i] is in user space
4135 */
4136 return nbp;
4137 }
4138 }
4139
4140 /*
4141 * None of the breakpoints are in user space.
4142 */
4143 return 0;
4144 }
4145
4146 #ifdef KDB
4147
4148 /*
4149 * Provide inb() and outb() as functions. They are normally only available as
4150 * inline functions, thus cannot be called from the debugger.
4151 */
4152
4153 /* silence compiler warnings */
4154 u_char inb_(u_short);
4155 void outb_(u_short, u_char);
4156
4157 u_char
4158 inb_(u_short port)
4159 {
4160 return inb(port);
4161 }
4162
4163 void
4164 outb_(u_short port, u_char data)
4165 {
4166 outb(port, data);
4167 }
4168
4169 #endif /* KDB */
Cache object: 0a804647ef4bdb6ba1b9fe0d8c07cf6c
|