1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/11.2/sys/i386/i386/machdep.c 338607 2018-09-12 05:08:49Z gordon $");
42
43 #include "opt_apic.h"
44 #include "opt_atpic.h"
45 #include "opt_compat.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_isa.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_mp_watchdog.h"
53 #include "opt_perfmon.h"
54 #include "opt_platform.h"
55 #include "opt_xbox.h"
56
57 #include <sys/param.h>
58 #include <sys/proc.h>
59 #include <sys/systm.h>
60 #include <sys/bio.h>
61 #include <sys/buf.h>
62 #include <sys/bus.h>
63 #include <sys/callout.h>
64 #include <sys/cons.h>
65 #include <sys/cpu.h>
66 #include <sys/eventhandler.h>
67 #include <sys/exec.h>
68 #include <sys/imgact.h>
69 #include <sys/kdb.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/linker.h>
73 #include <sys/lock.h>
74 #include <sys/malloc.h>
75 #include <sys/memrange.h>
76 #include <sys/msgbuf.h>
77 #include <sys/mutex.h>
78 #include <sys/pcpu.h>
79 #include <sys/ptrace.h>
80 #include <sys/reboot.h>
81 #include <sys/rwlock.h>
82 #include <sys/sched.h>
83 #include <sys/signalvar.h>
84 #ifdef SMP
85 #include <sys/smp.h>
86 #endif
87 #include <sys/syscallsubr.h>
88 #include <sys/sysctl.h>
89 #include <sys/sysent.h>
90 #include <sys/sysproto.h>
91 #include <sys/ucontext.h>
92 #include <sys/vmmeter.h>
93
94 #include <vm/vm.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_map.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_pager.h>
101 #include <vm/vm_param.h>
102
103 #ifdef DDB
104 #ifndef KDB
105 #error KDB must be enabled in order for DDB to work!
106 #endif
107 #include <ddb/ddb.h>
108 #include <ddb/db_sym.h>
109 #endif
110
111 #ifdef PC98
112 #include <pc98/pc98/pc98_machdep.h>
113 #else
114 #include <isa/rtc.h>
115 #endif
116
117 #include <net/netisr.h>
118
119 #include <machine/bootinfo.h>
120 #include <machine/clock.h>
121 #include <machine/cpu.h>
122 #include <machine/cputypes.h>
123 #include <machine/intr_machdep.h>
124 #include <x86/mca.h>
125 #include <machine/md_var.h>
126 #include <machine/metadata.h>
127 #include <machine/mp_watchdog.h>
128 #include <machine/pc/bios.h>
129 #include <machine/pcb.h>
130 #include <machine/pcb_ext.h>
131 #include <machine/proc.h>
132 #include <machine/reg.h>
133 #include <machine/sigframe.h>
134 #include <machine/specialreg.h>
135 #include <machine/vm86.h>
136 #include <x86/init.h>
137 #ifdef PERFMON
138 #include <machine/perfmon.h>
139 #endif
140 #ifdef SMP
141 #include <machine/smp.h>
142 #endif
143 #ifdef FDT
144 #include <x86/fdt.h>
145 #endif
146
147 #ifdef DEV_APIC
148 #include <x86/apicvar.h>
149 #endif
150
151 #ifdef DEV_ISA
152 #include <x86/isa/icu.h>
153 #endif
154
155 #ifdef XBOX
156 #include <machine/xbox.h>
157
158 int arch_i386_is_xbox = 0;
159 uint32_t arch_i386_xbox_memsize = 0;
160 #endif
161
162 /* Sanity check for __curthread() */
163 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
164
165 extern register_t init386(int first);
166 extern void dblfault_handler(void);
167
168 static void cpu_startup(void *);
169 static void fpstate_drop(struct thread *td);
170 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
171 char *xfpusave, size_t xfpusave_len);
172 static int set_fpcontext(struct thread *td, mcontext_t *mcp,
173 char *xfpustate, size_t xfpustate_len);
174 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
175
176 /* Intel ICH registers */
177 #define ICH_PMBASE 0x400
178 #define ICH_SMI_EN ICH_PMBASE + 0x30
179
180 int _udatasel, _ucodesel;
181 u_int basemem;
182
183 #ifdef PC98
184 int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */
185 int need_post_dma_flush; /* If 1, use invd after DMA transfer. */
186
187 static int ispc98 = 1;
188 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
189 #endif
190
191 int cold = 1;
192
193 #ifdef COMPAT_43
194 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
195 #endif
196 #ifdef COMPAT_FREEBSD4
197 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
198 #endif
199
200 long Maxmem = 0;
201 long realmem = 0;
202
203 #ifdef PAE
204 FEATURE(pae, "Physical Address Extensions");
205 #endif
206
207 /*
208 * The number of PHYSMAP entries must be one less than the number of
209 * PHYSSEG entries because the PHYSMAP entry that spans the largest
210 * physical address that is accessible by ISA DMA is split into two
211 * PHYSSEG entries.
212 */
213 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
214
215 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
216 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
217
218 /* must be 2 less so 0 0 can signal end of chunks */
219 #define PHYS_AVAIL_ARRAY_END (nitems(phys_avail) - 2)
220 #define DUMP_AVAIL_ARRAY_END (nitems(dump_avail) - 2)
221
222 struct kva_md_info kmi;
223
224 static struct trapframe proc0_tf;
225 struct pcpu __pcpu[MAXCPU];
226
227 struct mtx icu_lock;
228
229 struct mem_range_softc mem_range_softc;
230
231 /* Default init_ops implementation. */
232 struct init_ops init_ops = {
233 .early_clock_source_init = i8254_init,
234 .early_delay = i8254_delay,
235 #ifdef DEV_APIC
236 .msi_init = msi_init,
237 #endif
238 };
239
240 static void
241 cpu_startup(dummy)
242 void *dummy;
243 {
244 uintmax_t memsize;
245 char *sysenv;
246
247 #ifndef PC98
248 /*
249 * On MacBooks, we need to disallow the legacy USB circuit to
250 * generate an SMI# because this can cause several problems,
251 * namely: incorrect CPU frequency detection and failure to
252 * start the APs.
253 * We do this by disabling a bit in the SMI_EN (SMI Control and
254 * Enable register) of the Intel ICH LPC Interface Bridge.
255 */
256 sysenv = kern_getenv("smbios.system.product");
257 if (sysenv != NULL) {
258 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
259 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
260 strncmp(sysenv, "MacBook4,1", 10) == 0 ||
261 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
262 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
263 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
264 strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
265 strncmp(sysenv, "Macmini1,1", 10) == 0) {
266 if (bootverbose)
267 printf("Disabling LEGACY_USB_EN bit on "
268 "Intel ICH.\n");
269 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
270 }
271 freeenv(sysenv);
272 }
273 #endif /* !PC98 */
274
275 /*
276 * Good {morning,afternoon,evening,night}.
277 */
278 startrtclock();
279 printcpuinfo();
280 panicifcpuunsupported();
281 #ifdef PERFMON
282 perfmon_init();
283 #endif
284
285 /*
286 * Display physical memory if SMBIOS reports reasonable amount.
287 */
288 memsize = 0;
289 sysenv = kern_getenv("smbios.memory.enabled");
290 if (sysenv != NULL) {
291 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
292 freeenv(sysenv);
293 }
294 if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
295 memsize = ptoa((uintmax_t)Maxmem);
296 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
297 realmem = atop(memsize);
298
299 /*
300 * Display any holes after the first chunk of extended memory.
301 */
302 if (bootverbose) {
303 int indx;
304
305 printf("Physical memory chunk(s):\n");
306 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
307 vm_paddr_t size;
308
309 size = phys_avail[indx + 1] - phys_avail[indx];
310 printf(
311 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
312 (uintmax_t)phys_avail[indx],
313 (uintmax_t)phys_avail[indx + 1] - 1,
314 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
315 }
316 }
317
318 vm_ksubmap_init(&kmi);
319
320 printf("avail memory = %ju (%ju MB)\n",
321 ptoa((uintmax_t)vm_cnt.v_free_count),
322 ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
323
324 /*
325 * Set up buffers, so they can be used to read disk labels.
326 */
327 bufinit();
328 vm_pager_bufferinit();
329 cpu_setregs();
330 }
331
332 /*
333 * Send an interrupt to process.
334 *
335 * Stack is set up to allow sigcode stored
336 * at top to call routine, followed by call
337 * to sigreturn routine below. After sigreturn
338 * resets the signal mask, the stack, and the
339 * frame pointer, it returns to the user
340 * specified pc, psl.
341 */
342 #ifdef COMPAT_43
343 static void
344 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
345 {
346 struct osigframe sf, *fp;
347 struct proc *p;
348 struct thread *td;
349 struct sigacts *psp;
350 struct trapframe *regs;
351 int sig;
352 int oonstack;
353
354 td = curthread;
355 p = td->td_proc;
356 PROC_LOCK_ASSERT(p, MA_OWNED);
357 sig = ksi->ksi_signo;
358 psp = p->p_sigacts;
359 mtx_assert(&psp->ps_mtx, MA_OWNED);
360 regs = td->td_frame;
361 oonstack = sigonstack(regs->tf_esp);
362
363 /* Allocate space for the signal handler context. */
364 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
365 SIGISMEMBER(psp->ps_sigonstack, sig)) {
366 fp = (struct osigframe *)((uintptr_t)td->td_sigstk.ss_sp +
367 td->td_sigstk.ss_size - sizeof(struct osigframe));
368 #if defined(COMPAT_43)
369 td->td_sigstk.ss_flags |= SS_ONSTACK;
370 #endif
371 } else
372 fp = (struct osigframe *)regs->tf_esp - 1;
373
374 /* Build the argument list for the signal handler. */
375 sf.sf_signum = sig;
376 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
377 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
378 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
379 /* Signal handler installed with SA_SIGINFO. */
380 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
381 sf.sf_siginfo.si_signo = sig;
382 sf.sf_siginfo.si_code = ksi->ksi_code;
383 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
384 sf.sf_addr = 0;
385 } else {
386 /* Old FreeBSD-style arguments. */
387 sf.sf_arg2 = ksi->ksi_code;
388 sf.sf_addr = (register_t)ksi->ksi_addr;
389 sf.sf_ahu.sf_handler = catcher;
390 }
391 mtx_unlock(&psp->ps_mtx);
392 PROC_UNLOCK(p);
393
394 /* Save most if not all of trap frame. */
395 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
396 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
397 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
398 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
399 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
400 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
401 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
402 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
403 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
404 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
405 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
406 sf.sf_siginfo.si_sc.sc_gs = rgs();
407 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
408
409 /* Build the signal context to be used by osigreturn(). */
410 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
411 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
412 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
413 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
414 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
415 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
416 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
417 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
418
419 /*
420 * If we're a vm86 process, we want to save the segment registers.
421 * We also change eflags to be our emulated eflags, not the actual
422 * eflags.
423 */
424 if (regs->tf_eflags & PSL_VM) {
425 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
426 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
427 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
428
429 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
430 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
431 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
432 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
433
434 if (vm86->vm86_has_vme == 0)
435 sf.sf_siginfo.si_sc.sc_ps =
436 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
437 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
438
439 /* See sendsig() for comments. */
440 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
441 }
442
443 /*
444 * Copy the sigframe out to the user's stack.
445 */
446 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
447 PROC_LOCK(p);
448 sigexit(td, SIGILL);
449 }
450
451 regs->tf_esp = (int)fp;
452 if (p->p_sysent->sv_sigcode_base != 0) {
453 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
454 szosigcode;
455 } else {
456 /* a.out sysentvec does not use shared page */
457 regs->tf_eip = p->p_sysent->sv_psstrings - szosigcode;
458 }
459 regs->tf_eflags &= ~(PSL_T | PSL_D);
460 regs->tf_cs = _ucodesel;
461 regs->tf_ds = _udatasel;
462 regs->tf_es = _udatasel;
463 regs->tf_fs = _udatasel;
464 load_gs(_udatasel);
465 regs->tf_ss = _udatasel;
466 PROC_LOCK(p);
467 mtx_lock(&psp->ps_mtx);
468 }
469 #endif /* COMPAT_43 */
470
471 #ifdef COMPAT_FREEBSD4
472 static void
473 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
474 {
475 struct sigframe4 sf, *sfp;
476 struct proc *p;
477 struct thread *td;
478 struct sigacts *psp;
479 struct trapframe *regs;
480 int sig;
481 int oonstack;
482
483 td = curthread;
484 p = td->td_proc;
485 PROC_LOCK_ASSERT(p, MA_OWNED);
486 sig = ksi->ksi_signo;
487 psp = p->p_sigacts;
488 mtx_assert(&psp->ps_mtx, MA_OWNED);
489 regs = td->td_frame;
490 oonstack = sigonstack(regs->tf_esp);
491
492 /* Save user context. */
493 bzero(&sf, sizeof(sf));
494 sf.sf_uc.uc_sigmask = *mask;
495 sf.sf_uc.uc_stack = td->td_sigstk;
496 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
497 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
498 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
499 sf.sf_uc.uc_mcontext.mc_gs = rgs();
500 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
501 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
502 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
503 bzero(sf.sf_uc.uc_mcontext.__spare__,
504 sizeof(sf.sf_uc.uc_mcontext.__spare__));
505 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
506
507 /* Allocate space for the signal handler context. */
508 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
509 SIGISMEMBER(psp->ps_sigonstack, sig)) {
510 sfp = (struct sigframe4 *)((uintptr_t)td->td_sigstk.ss_sp +
511 td->td_sigstk.ss_size - sizeof(struct sigframe4));
512 #if defined(COMPAT_43)
513 td->td_sigstk.ss_flags |= SS_ONSTACK;
514 #endif
515 } else
516 sfp = (struct sigframe4 *)regs->tf_esp - 1;
517
518 /* Build the argument list for the signal handler. */
519 sf.sf_signum = sig;
520 sf.sf_ucontext = (register_t)&sfp->sf_uc;
521 bzero(&sf.sf_si, sizeof(sf.sf_si));
522 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
523 /* Signal handler installed with SA_SIGINFO. */
524 sf.sf_siginfo = (register_t)&sfp->sf_si;
525 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
526
527 /* Fill in POSIX parts */
528 sf.sf_si.si_signo = sig;
529 sf.sf_si.si_code = ksi->ksi_code;
530 sf.sf_si.si_addr = ksi->ksi_addr;
531 } else {
532 /* Old FreeBSD-style arguments. */
533 sf.sf_siginfo = ksi->ksi_code;
534 sf.sf_addr = (register_t)ksi->ksi_addr;
535 sf.sf_ahu.sf_handler = catcher;
536 }
537 mtx_unlock(&psp->ps_mtx);
538 PROC_UNLOCK(p);
539
540 /*
541 * If we're a vm86 process, we want to save the segment registers.
542 * We also change eflags to be our emulated eflags, not the actual
543 * eflags.
544 */
545 if (regs->tf_eflags & PSL_VM) {
546 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
547 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
548
549 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
550 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
551 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
552 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
553
554 if (vm86->vm86_has_vme == 0)
555 sf.sf_uc.uc_mcontext.mc_eflags =
556 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
557 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
558
559 /*
560 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
561 * syscalls made by the signal handler. This just avoids
562 * wasting time for our lazy fixup of such faults. PSL_NT
563 * does nothing in vm86 mode, but vm86 programs can set it
564 * almost legitimately in probes for old cpu types.
565 */
566 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
567 }
568
569 /*
570 * Copy the sigframe out to the user's stack.
571 */
572 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
573 PROC_LOCK(p);
574 sigexit(td, SIGILL);
575 }
576
577 regs->tf_esp = (int)sfp;
578 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
579 szfreebsd4_sigcode;
580 regs->tf_eflags &= ~(PSL_T | PSL_D);
581 regs->tf_cs = _ucodesel;
582 regs->tf_ds = _udatasel;
583 regs->tf_es = _udatasel;
584 regs->tf_fs = _udatasel;
585 regs->tf_ss = _udatasel;
586 PROC_LOCK(p);
587 mtx_lock(&psp->ps_mtx);
588 }
589 #endif /* COMPAT_FREEBSD4 */
590
591 void
592 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
593 {
594 struct sigframe sf, *sfp;
595 struct proc *p;
596 struct thread *td;
597 struct sigacts *psp;
598 char *sp;
599 struct trapframe *regs;
600 struct segment_descriptor *sdp;
601 char *xfpusave;
602 size_t xfpusave_len;
603 int sig;
604 int oonstack;
605
606 td = curthread;
607 p = td->td_proc;
608 PROC_LOCK_ASSERT(p, MA_OWNED);
609 sig = ksi->ksi_signo;
610 psp = p->p_sigacts;
611 mtx_assert(&psp->ps_mtx, MA_OWNED);
612 #ifdef COMPAT_FREEBSD4
613 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
614 freebsd4_sendsig(catcher, ksi, mask);
615 return;
616 }
617 #endif
618 #ifdef COMPAT_43
619 if (SIGISMEMBER(psp->ps_osigset, sig)) {
620 osendsig(catcher, ksi, mask);
621 return;
622 }
623 #endif
624 regs = td->td_frame;
625 oonstack = sigonstack(regs->tf_esp);
626
627 if (cpu_max_ext_state_size > sizeof(union savefpu) && use_xsave) {
628 xfpusave_len = cpu_max_ext_state_size - sizeof(union savefpu);
629 xfpusave = __builtin_alloca(xfpusave_len);
630 } else {
631 xfpusave_len = 0;
632 xfpusave = NULL;
633 }
634
635 /* Save user context. */
636 bzero(&sf, sizeof(sf));
637 sf.sf_uc.uc_sigmask = *mask;
638 sf.sf_uc.uc_stack = td->td_sigstk;
639 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
640 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
641 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
642 sf.sf_uc.uc_mcontext.mc_gs = rgs();
643 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
644 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
645 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
646 fpstate_drop(td);
647 /*
648 * Unconditionally fill the fsbase and gsbase into the mcontext.
649 */
650 sdp = &td->td_pcb->pcb_fsd;
651 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
652 sdp->sd_lobase;
653 sdp = &td->td_pcb->pcb_gsd;
654 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
655 sdp->sd_lobase;
656 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
657 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
658 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
659
660 /* Allocate space for the signal handler context. */
661 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
662 SIGISMEMBER(psp->ps_sigonstack, sig)) {
663 sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
664 #if defined(COMPAT_43)
665 td->td_sigstk.ss_flags |= SS_ONSTACK;
666 #endif
667 } else
668 sp = (char *)regs->tf_esp - 128;
669 if (xfpusave != NULL) {
670 sp -= xfpusave_len;
671 sp = (char *)((unsigned int)sp & ~0x3F);
672 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
673 }
674 sp -= sizeof(struct sigframe);
675
676 /* Align to 16 bytes. */
677 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
678
679 /* Build the argument list for the signal handler. */
680 sf.sf_signum = sig;
681 sf.sf_ucontext = (register_t)&sfp->sf_uc;
682 bzero(&sf.sf_si, sizeof(sf.sf_si));
683 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
684 /* Signal handler installed with SA_SIGINFO. */
685 sf.sf_siginfo = (register_t)&sfp->sf_si;
686 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
687
688 /* Fill in POSIX parts */
689 sf.sf_si = ksi->ksi_info;
690 sf.sf_si.si_signo = sig; /* maybe a translated signal */
691 } else {
692 /* Old FreeBSD-style arguments. */
693 sf.sf_siginfo = ksi->ksi_code;
694 sf.sf_addr = (register_t)ksi->ksi_addr;
695 sf.sf_ahu.sf_handler = catcher;
696 }
697 mtx_unlock(&psp->ps_mtx);
698 PROC_UNLOCK(p);
699
700 /*
701 * If we're a vm86 process, we want to save the segment registers.
702 * We also change eflags to be our emulated eflags, not the actual
703 * eflags.
704 */
705 if (regs->tf_eflags & PSL_VM) {
706 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
707 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
708
709 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
710 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
711 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
712 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
713
714 if (vm86->vm86_has_vme == 0)
715 sf.sf_uc.uc_mcontext.mc_eflags =
716 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
717 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
718
719 /*
720 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
721 * syscalls made by the signal handler. This just avoids
722 * wasting time for our lazy fixup of such faults. PSL_NT
723 * does nothing in vm86 mode, but vm86 programs can set it
724 * almost legitimately in probes for old cpu types.
725 */
726 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
727 }
728
729 /*
730 * Copy the sigframe out to the user's stack.
731 */
732 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
733 (xfpusave != NULL && copyout(xfpusave,
734 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
735 != 0)) {
736 PROC_LOCK(p);
737 sigexit(td, SIGILL);
738 }
739
740 regs->tf_esp = (int)sfp;
741 regs->tf_eip = p->p_sysent->sv_sigcode_base;
742 if (regs->tf_eip == 0)
743 regs->tf_eip = p->p_sysent->sv_psstrings - szsigcode;
744 regs->tf_eflags &= ~(PSL_T | PSL_D);
745 regs->tf_cs = _ucodesel;
746 regs->tf_ds = _udatasel;
747 regs->tf_es = _udatasel;
748 regs->tf_fs = _udatasel;
749 regs->tf_ss = _udatasel;
750 PROC_LOCK(p);
751 mtx_lock(&psp->ps_mtx);
752 }
753
754 /*
755 * System call to cleanup state after a signal
756 * has been taken. Reset signal mask and
757 * stack state from context left by sendsig (above).
758 * Return to previous pc and psl as specified by
759 * context left by sendsig. Check carefully to
760 * make sure that the user has not modified the
761 * state to gain improper privileges.
762 *
763 * MPSAFE
764 */
765 #ifdef COMPAT_43
766 int
767 osigreturn(td, uap)
768 struct thread *td;
769 struct osigreturn_args /* {
770 struct osigcontext *sigcntxp;
771 } */ *uap;
772 {
773 struct osigcontext sc;
774 struct trapframe *regs;
775 struct osigcontext *scp;
776 int eflags, error;
777 ksiginfo_t ksi;
778
779 regs = td->td_frame;
780 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
781 if (error != 0)
782 return (error);
783 scp = ≻
784 eflags = scp->sc_ps;
785 if (eflags & PSL_VM) {
786 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
787 struct vm86_kernel *vm86;
788
789 /*
790 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
791 * set up the vm86 area, and we can't enter vm86 mode.
792 */
793 if (td->td_pcb->pcb_ext == 0)
794 return (EINVAL);
795 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
796 if (vm86->vm86_inited == 0)
797 return (EINVAL);
798
799 /* Go back to user mode if both flags are set. */
800 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
801 ksiginfo_init_trap(&ksi);
802 ksi.ksi_signo = SIGBUS;
803 ksi.ksi_code = BUS_OBJERR;
804 ksi.ksi_addr = (void *)regs->tf_eip;
805 trapsignal(td, &ksi);
806 }
807
808 if (vm86->vm86_has_vme) {
809 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
810 (eflags & VME_USERCHANGE) | PSL_VM;
811 } else {
812 vm86->vm86_eflags = eflags; /* save VIF, VIP */
813 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
814 (eflags & VM_USERCHANGE) | PSL_VM;
815 }
816 tf->tf_vm86_ds = scp->sc_ds;
817 tf->tf_vm86_es = scp->sc_es;
818 tf->tf_vm86_fs = scp->sc_fs;
819 tf->tf_vm86_gs = scp->sc_gs;
820 tf->tf_ds = _udatasel;
821 tf->tf_es = _udatasel;
822 tf->tf_fs = _udatasel;
823 } else {
824 /*
825 * Don't allow users to change privileged or reserved flags.
826 */
827 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
828 return (EINVAL);
829 }
830
831 /*
832 * Don't allow users to load a valid privileged %cs. Let the
833 * hardware check for invalid selectors, excess privilege in
834 * other selectors, invalid %eip's and invalid %esp's.
835 */
836 if (!CS_SECURE(scp->sc_cs)) {
837 ksiginfo_init_trap(&ksi);
838 ksi.ksi_signo = SIGBUS;
839 ksi.ksi_code = BUS_OBJERR;
840 ksi.ksi_trapno = T_PROTFLT;
841 ksi.ksi_addr = (void *)regs->tf_eip;
842 trapsignal(td, &ksi);
843 return (EINVAL);
844 }
845 regs->tf_ds = scp->sc_ds;
846 regs->tf_es = scp->sc_es;
847 regs->tf_fs = scp->sc_fs;
848 }
849
850 /* Restore remaining registers. */
851 regs->tf_eax = scp->sc_eax;
852 regs->tf_ebx = scp->sc_ebx;
853 regs->tf_ecx = scp->sc_ecx;
854 regs->tf_edx = scp->sc_edx;
855 regs->tf_esi = scp->sc_esi;
856 regs->tf_edi = scp->sc_edi;
857 regs->tf_cs = scp->sc_cs;
858 regs->tf_ss = scp->sc_ss;
859 regs->tf_isp = scp->sc_isp;
860 regs->tf_ebp = scp->sc_fp;
861 regs->tf_esp = scp->sc_sp;
862 regs->tf_eip = scp->sc_pc;
863 regs->tf_eflags = eflags;
864
865 #if defined(COMPAT_43)
866 if (scp->sc_onstack & 1)
867 td->td_sigstk.ss_flags |= SS_ONSTACK;
868 else
869 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
870 #endif
871 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
872 SIGPROCMASK_OLD);
873 return (EJUSTRETURN);
874 }
875 #endif /* COMPAT_43 */
876
877 #ifdef COMPAT_FREEBSD4
878 /*
879 * MPSAFE
880 */
881 int
882 freebsd4_sigreturn(td, uap)
883 struct thread *td;
884 struct freebsd4_sigreturn_args /* {
885 const ucontext4 *sigcntxp;
886 } */ *uap;
887 {
888 struct ucontext4 uc;
889 struct trapframe *regs;
890 struct ucontext4 *ucp;
891 int cs, eflags, error;
892 ksiginfo_t ksi;
893
894 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
895 if (error != 0)
896 return (error);
897 ucp = &uc;
898 regs = td->td_frame;
899 eflags = ucp->uc_mcontext.mc_eflags;
900 if (eflags & PSL_VM) {
901 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
902 struct vm86_kernel *vm86;
903
904 /*
905 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
906 * set up the vm86 area, and we can't enter vm86 mode.
907 */
908 if (td->td_pcb->pcb_ext == 0)
909 return (EINVAL);
910 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
911 if (vm86->vm86_inited == 0)
912 return (EINVAL);
913
914 /* Go back to user mode if both flags are set. */
915 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
916 ksiginfo_init_trap(&ksi);
917 ksi.ksi_signo = SIGBUS;
918 ksi.ksi_code = BUS_OBJERR;
919 ksi.ksi_addr = (void *)regs->tf_eip;
920 trapsignal(td, &ksi);
921 }
922 if (vm86->vm86_has_vme) {
923 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
924 (eflags & VME_USERCHANGE) | PSL_VM;
925 } else {
926 vm86->vm86_eflags = eflags; /* save VIF, VIP */
927 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
928 (eflags & VM_USERCHANGE) | PSL_VM;
929 }
930 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
931 tf->tf_eflags = eflags;
932 tf->tf_vm86_ds = tf->tf_ds;
933 tf->tf_vm86_es = tf->tf_es;
934 tf->tf_vm86_fs = tf->tf_fs;
935 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
936 tf->tf_ds = _udatasel;
937 tf->tf_es = _udatasel;
938 tf->tf_fs = _udatasel;
939 } else {
940 /*
941 * Don't allow users to change privileged or reserved flags.
942 */
943 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
944 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
945 td->td_proc->p_pid, td->td_name, eflags);
946 return (EINVAL);
947 }
948
949 /*
950 * Don't allow users to load a valid privileged %cs. Let the
951 * hardware check for invalid selectors, excess privilege in
952 * other selectors, invalid %eip's and invalid %esp's.
953 */
954 cs = ucp->uc_mcontext.mc_cs;
955 if (!CS_SECURE(cs)) {
956 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
957 td->td_proc->p_pid, td->td_name, cs);
958 ksiginfo_init_trap(&ksi);
959 ksi.ksi_signo = SIGBUS;
960 ksi.ksi_code = BUS_OBJERR;
961 ksi.ksi_trapno = T_PROTFLT;
962 ksi.ksi_addr = (void *)regs->tf_eip;
963 trapsignal(td, &ksi);
964 return (EINVAL);
965 }
966
967 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
968 }
969
970 #if defined(COMPAT_43)
971 if (ucp->uc_mcontext.mc_onstack & 1)
972 td->td_sigstk.ss_flags |= SS_ONSTACK;
973 else
974 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
975 #endif
976 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
977 return (EJUSTRETURN);
978 }
979 #endif /* COMPAT_FREEBSD4 */
980
981 /*
982 * MPSAFE
983 */
984 int
985 sys_sigreturn(td, uap)
986 struct thread *td;
987 struct sigreturn_args /* {
988 const struct __ucontext *sigcntxp;
989 } */ *uap;
990 {
991 ucontext_t uc;
992 struct proc *p;
993 struct trapframe *regs;
994 ucontext_t *ucp;
995 char *xfpustate;
996 size_t xfpustate_len;
997 int cs, eflags, error, ret;
998 ksiginfo_t ksi;
999
1000 p = td->td_proc;
1001
1002 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
1003 if (error != 0)
1004 return (error);
1005 ucp = &uc;
1006 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
1007 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
1008 td->td_name, ucp->uc_mcontext.mc_flags);
1009 return (EINVAL);
1010 }
1011 regs = td->td_frame;
1012 eflags = ucp->uc_mcontext.mc_eflags;
1013 if (eflags & PSL_VM) {
1014 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
1015 struct vm86_kernel *vm86;
1016
1017 /*
1018 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
1019 * set up the vm86 area, and we can't enter vm86 mode.
1020 */
1021 if (td->td_pcb->pcb_ext == 0)
1022 return (EINVAL);
1023 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
1024 if (vm86->vm86_inited == 0)
1025 return (EINVAL);
1026
1027 /* Go back to user mode if both flags are set. */
1028 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
1029 ksiginfo_init_trap(&ksi);
1030 ksi.ksi_signo = SIGBUS;
1031 ksi.ksi_code = BUS_OBJERR;
1032 ksi.ksi_addr = (void *)regs->tf_eip;
1033 trapsignal(td, &ksi);
1034 }
1035
1036 if (vm86->vm86_has_vme) {
1037 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
1038 (eflags & VME_USERCHANGE) | PSL_VM;
1039 } else {
1040 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1041 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1042 (eflags & VM_USERCHANGE) | PSL_VM;
1043 }
1044 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1045 tf->tf_eflags = eflags;
1046 tf->tf_vm86_ds = tf->tf_ds;
1047 tf->tf_vm86_es = tf->tf_es;
1048 tf->tf_vm86_fs = tf->tf_fs;
1049 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1050 tf->tf_ds = _udatasel;
1051 tf->tf_es = _udatasel;
1052 tf->tf_fs = _udatasel;
1053 } else {
1054 /*
1055 * Don't allow users to change privileged or reserved flags.
1056 */
1057 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
1058 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1059 td->td_proc->p_pid, td->td_name, eflags);
1060 return (EINVAL);
1061 }
1062
1063 /*
1064 * Don't allow users to load a valid privileged %cs. Let the
1065 * hardware check for invalid selectors, excess privilege in
1066 * other selectors, invalid %eip's and invalid %esp's.
1067 */
1068 cs = ucp->uc_mcontext.mc_cs;
1069 if (!CS_SECURE(cs)) {
1070 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1071 td->td_proc->p_pid, td->td_name, cs);
1072 ksiginfo_init_trap(&ksi);
1073 ksi.ksi_signo = SIGBUS;
1074 ksi.ksi_code = BUS_OBJERR;
1075 ksi.ksi_trapno = T_PROTFLT;
1076 ksi.ksi_addr = (void *)regs->tf_eip;
1077 trapsignal(td, &ksi);
1078 return (EINVAL);
1079 }
1080
1081 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
1082 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
1083 if (xfpustate_len > cpu_max_ext_state_size -
1084 sizeof(union savefpu)) {
1085 uprintf(
1086 "pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
1087 p->p_pid, td->td_name, xfpustate_len);
1088 return (EINVAL);
1089 }
1090 xfpustate = __builtin_alloca(xfpustate_len);
1091 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
1092 xfpustate, xfpustate_len);
1093 if (error != 0) {
1094 uprintf(
1095 "pid %d (%s): sigreturn copying xfpustate failed\n",
1096 p->p_pid, td->td_name);
1097 return (error);
1098 }
1099 } else {
1100 xfpustate = NULL;
1101 xfpustate_len = 0;
1102 }
1103 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate,
1104 xfpustate_len);
1105 if (ret != 0)
1106 return (ret);
1107 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1108 }
1109
1110 #if defined(COMPAT_43)
1111 if (ucp->uc_mcontext.mc_onstack & 1)
1112 td->td_sigstk.ss_flags |= SS_ONSTACK;
1113 else
1114 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1115 #endif
1116
1117 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1118 return (EJUSTRETURN);
1119 }
1120
1121 /*
1122 * Reset registers to default values on exec.
1123 */
1124 void
1125 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1126 {
1127 struct trapframe *regs = td->td_frame;
1128 struct pcb *pcb = td->td_pcb;
1129
1130 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1131 pcb->pcb_gs = _udatasel;
1132 load_gs(_udatasel);
1133
1134 mtx_lock_spin(&dt_lock);
1135 if (td->td_proc->p_md.md_ldt)
1136 user_ldt_free(td);
1137 else
1138 mtx_unlock_spin(&dt_lock);
1139
1140 /*
1141 * Reset the fs and gs bases. The values from the old address
1142 * space do not make sense for the new program. In particular,
1143 * gsbase might be the TLS base for the old program but the new
1144 * program has no TLS now.
1145 */
1146 set_fsbase(td, 0);
1147 set_gsbase(td, 0);
1148
1149 bzero((char *)regs, sizeof(struct trapframe));
1150 regs->tf_eip = imgp->entry_addr;
1151 regs->tf_esp = stack;
1152 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1153 regs->tf_ss = _udatasel;
1154 regs->tf_ds = _udatasel;
1155 regs->tf_es = _udatasel;
1156 regs->tf_fs = _udatasel;
1157 regs->tf_cs = _ucodesel;
1158
1159 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1160 regs->tf_ebx = imgp->ps_strings;
1161
1162 /*
1163 * Reset the hardware debug registers if they were in use.
1164 * They won't have any meaning for the newly exec'd process.
1165 */
1166 if (pcb->pcb_flags & PCB_DBREGS) {
1167 pcb->pcb_dr0 = 0;
1168 pcb->pcb_dr1 = 0;
1169 pcb->pcb_dr2 = 0;
1170 pcb->pcb_dr3 = 0;
1171 pcb->pcb_dr6 = 0;
1172 pcb->pcb_dr7 = 0;
1173 if (pcb == curpcb) {
1174 /*
1175 * Clear the debug registers on the running
1176 * CPU, otherwise they will end up affecting
1177 * the next process we switch to.
1178 */
1179 reset_dbregs();
1180 }
1181 pcb->pcb_flags &= ~PCB_DBREGS;
1182 }
1183
1184 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1185
1186 /*
1187 * Drop the FP state if we hold it, so that the process gets a
1188 * clean FP state if it uses the FPU again.
1189 */
1190 fpstate_drop(td);
1191
1192 /*
1193 * XXX - Linux emulator
1194 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1195 * on it.
1196 */
1197 td->td_retval[1] = 0;
1198 }
1199
1200 void
1201 cpu_setregs(void)
1202 {
1203 unsigned int cr0;
1204
1205 cr0 = rcr0();
1206
1207 /*
1208 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1209 *
1210 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1211 * instructions. We must set the CR0_MP bit and use the CR0_TS
1212 * bit to control the trap, because setting the CR0_EM bit does
1213 * not cause WAIT instructions to trap. It's important to trap
1214 * WAIT instructions - otherwise the "wait" variants of no-wait
1215 * control instructions would degenerate to the "no-wait" variants
1216 * after FP context switches but work correctly otherwise. It's
1217 * particularly important to trap WAITs when there is no NPX -
1218 * otherwise the "wait" variants would always degenerate.
1219 *
1220 * Try setting CR0_NE to get correct error reporting on 486DX's.
1221 * Setting it should fail or do nothing on lesser processors.
1222 */
1223 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1224 load_cr0(cr0);
1225 load_gs(_udatasel);
1226 }
1227
1228 u_long bootdev; /* not a struct cdev *- encoding is different */
1229 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1230 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1231
1232 static char bootmethod[16] = "BIOS";
1233 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
1234 "System firmware boot method");
1235
1236 /*
1237 * Initialize 386 and configure to run kernel
1238 */
1239
1240 /*
1241 * Initialize segments & interrupt table
1242 */
1243
1244 int _default_ldt;
1245
1246 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1247 union descriptor ldt[NLDT]; /* local descriptor table */
1248 static struct gate_descriptor idt0[NIDT];
1249 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1250 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1251 struct mtx dt_lock; /* lock for GDT and LDT */
1252
1253 static struct i386tss dblfault_tss;
1254 static char dblfault_stack[PAGE_SIZE];
1255
1256 extern vm_offset_t proc0kstack;
1257
1258
1259 /*
1260 * software prototypes -- in more palatable form.
1261 *
1262 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1263 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1264 */
1265 struct soft_segment_descriptor gdt_segs[] = {
1266 /* GNULL_SEL 0 Null Descriptor */
1267 { .ssd_base = 0x0,
1268 .ssd_limit = 0x0,
1269 .ssd_type = 0,
1270 .ssd_dpl = SEL_KPL,
1271 .ssd_p = 0,
1272 .ssd_xx = 0, .ssd_xx1 = 0,
1273 .ssd_def32 = 0,
1274 .ssd_gran = 0 },
1275 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1276 { .ssd_base = 0x0,
1277 .ssd_limit = 0xfffff,
1278 .ssd_type = SDT_MEMRWA,
1279 .ssd_dpl = SEL_KPL,
1280 .ssd_p = 1,
1281 .ssd_xx = 0, .ssd_xx1 = 0,
1282 .ssd_def32 = 1,
1283 .ssd_gran = 1 },
1284 /* GUFS_SEL 2 %fs Descriptor for user */
1285 { .ssd_base = 0x0,
1286 .ssd_limit = 0xfffff,
1287 .ssd_type = SDT_MEMRWA,
1288 .ssd_dpl = SEL_UPL,
1289 .ssd_p = 1,
1290 .ssd_xx = 0, .ssd_xx1 = 0,
1291 .ssd_def32 = 1,
1292 .ssd_gran = 1 },
1293 /* GUGS_SEL 3 %gs Descriptor for user */
1294 { .ssd_base = 0x0,
1295 .ssd_limit = 0xfffff,
1296 .ssd_type = SDT_MEMRWA,
1297 .ssd_dpl = SEL_UPL,
1298 .ssd_p = 1,
1299 .ssd_xx = 0, .ssd_xx1 = 0,
1300 .ssd_def32 = 1,
1301 .ssd_gran = 1 },
1302 /* GCODE_SEL 4 Code Descriptor for kernel */
1303 { .ssd_base = 0x0,
1304 .ssd_limit = 0xfffff,
1305 .ssd_type = SDT_MEMERA,
1306 .ssd_dpl = SEL_KPL,
1307 .ssd_p = 1,
1308 .ssd_xx = 0, .ssd_xx1 = 0,
1309 .ssd_def32 = 1,
1310 .ssd_gran = 1 },
1311 /* GDATA_SEL 5 Data Descriptor for kernel */
1312 { .ssd_base = 0x0,
1313 .ssd_limit = 0xfffff,
1314 .ssd_type = SDT_MEMRWA,
1315 .ssd_dpl = SEL_KPL,
1316 .ssd_p = 1,
1317 .ssd_xx = 0, .ssd_xx1 = 0,
1318 .ssd_def32 = 1,
1319 .ssd_gran = 1 },
1320 /* GUCODE_SEL 6 Code Descriptor for user */
1321 { .ssd_base = 0x0,
1322 .ssd_limit = 0xfffff,
1323 .ssd_type = SDT_MEMERA,
1324 .ssd_dpl = SEL_UPL,
1325 .ssd_p = 1,
1326 .ssd_xx = 0, .ssd_xx1 = 0,
1327 .ssd_def32 = 1,
1328 .ssd_gran = 1 },
1329 /* GUDATA_SEL 7 Data Descriptor for user */
1330 { .ssd_base = 0x0,
1331 .ssd_limit = 0xfffff,
1332 .ssd_type = SDT_MEMRWA,
1333 .ssd_dpl = SEL_UPL,
1334 .ssd_p = 1,
1335 .ssd_xx = 0, .ssd_xx1 = 0,
1336 .ssd_def32 = 1,
1337 .ssd_gran = 1 },
1338 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1339 { .ssd_base = 0x400,
1340 .ssd_limit = 0xfffff,
1341 .ssd_type = SDT_MEMRWA,
1342 .ssd_dpl = SEL_KPL,
1343 .ssd_p = 1,
1344 .ssd_xx = 0, .ssd_xx1 = 0,
1345 .ssd_def32 = 1,
1346 .ssd_gran = 1 },
1347 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1348 {
1349 .ssd_base = 0x0,
1350 .ssd_limit = sizeof(struct i386tss)-1,
1351 .ssd_type = SDT_SYS386TSS,
1352 .ssd_dpl = 0,
1353 .ssd_p = 1,
1354 .ssd_xx = 0, .ssd_xx1 = 0,
1355 .ssd_def32 = 0,
1356 .ssd_gran = 0 },
1357 /* GLDT_SEL 10 LDT Descriptor */
1358 { .ssd_base = (int) ldt,
1359 .ssd_limit = sizeof(ldt)-1,
1360 .ssd_type = SDT_SYSLDT,
1361 .ssd_dpl = SEL_UPL,
1362 .ssd_p = 1,
1363 .ssd_xx = 0, .ssd_xx1 = 0,
1364 .ssd_def32 = 0,
1365 .ssd_gran = 0 },
1366 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1367 { .ssd_base = (int) ldt,
1368 .ssd_limit = (512 * sizeof(union descriptor)-1),
1369 .ssd_type = SDT_SYSLDT,
1370 .ssd_dpl = 0,
1371 .ssd_p = 1,
1372 .ssd_xx = 0, .ssd_xx1 = 0,
1373 .ssd_def32 = 0,
1374 .ssd_gran = 0 },
1375 /* GPANIC_SEL 12 Panic Tss Descriptor */
1376 { .ssd_base = (int) &dblfault_tss,
1377 .ssd_limit = sizeof(struct i386tss)-1,
1378 .ssd_type = SDT_SYS386TSS,
1379 .ssd_dpl = 0,
1380 .ssd_p = 1,
1381 .ssd_xx = 0, .ssd_xx1 = 0,
1382 .ssd_def32 = 0,
1383 .ssd_gran = 0 },
1384 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1385 { .ssd_base = 0,
1386 .ssd_limit = 0xfffff,
1387 .ssd_type = SDT_MEMERA,
1388 .ssd_dpl = 0,
1389 .ssd_p = 1,
1390 .ssd_xx = 0, .ssd_xx1 = 0,
1391 .ssd_def32 = 0,
1392 .ssd_gran = 1 },
1393 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1394 { .ssd_base = 0,
1395 .ssd_limit = 0xfffff,
1396 .ssd_type = SDT_MEMERA,
1397 .ssd_dpl = 0,
1398 .ssd_p = 1,
1399 .ssd_xx = 0, .ssd_xx1 = 0,
1400 .ssd_def32 = 0,
1401 .ssd_gran = 1 },
1402 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1403 { .ssd_base = 0,
1404 .ssd_limit = 0xfffff,
1405 .ssd_type = SDT_MEMRWA,
1406 .ssd_dpl = 0,
1407 .ssd_p = 1,
1408 .ssd_xx = 0, .ssd_xx1 = 0,
1409 .ssd_def32 = 1,
1410 .ssd_gran = 1 },
1411 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1412 { .ssd_base = 0,
1413 .ssd_limit = 0xfffff,
1414 .ssd_type = SDT_MEMRWA,
1415 .ssd_dpl = 0,
1416 .ssd_p = 1,
1417 .ssd_xx = 0, .ssd_xx1 = 0,
1418 .ssd_def32 = 0,
1419 .ssd_gran = 1 },
1420 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1421 { .ssd_base = 0,
1422 .ssd_limit = 0xfffff,
1423 .ssd_type = SDT_MEMRWA,
1424 .ssd_dpl = 0,
1425 .ssd_p = 1,
1426 .ssd_xx = 0, .ssd_xx1 = 0,
1427 .ssd_def32 = 0,
1428 .ssd_gran = 1 },
1429 /* GNDIS_SEL 18 NDIS Descriptor */
1430 { .ssd_base = 0x0,
1431 .ssd_limit = 0x0,
1432 .ssd_type = 0,
1433 .ssd_dpl = 0,
1434 .ssd_p = 0,
1435 .ssd_xx = 0, .ssd_xx1 = 0,
1436 .ssd_def32 = 0,
1437 .ssd_gran = 0 },
1438 };
1439
1440 static struct soft_segment_descriptor ldt_segs[] = {
1441 /* Null Descriptor - overwritten by call gate */
1442 { .ssd_base = 0x0,
1443 .ssd_limit = 0x0,
1444 .ssd_type = 0,
1445 .ssd_dpl = 0,
1446 .ssd_p = 0,
1447 .ssd_xx = 0, .ssd_xx1 = 0,
1448 .ssd_def32 = 0,
1449 .ssd_gran = 0 },
1450 /* Null Descriptor - overwritten by call gate */
1451 { .ssd_base = 0x0,
1452 .ssd_limit = 0x0,
1453 .ssd_type = 0,
1454 .ssd_dpl = 0,
1455 .ssd_p = 0,
1456 .ssd_xx = 0, .ssd_xx1 = 0,
1457 .ssd_def32 = 0,
1458 .ssd_gran = 0 },
1459 /* Null Descriptor - overwritten by call gate */
1460 { .ssd_base = 0x0,
1461 .ssd_limit = 0x0,
1462 .ssd_type = 0,
1463 .ssd_dpl = 0,
1464 .ssd_p = 0,
1465 .ssd_xx = 0, .ssd_xx1 = 0,
1466 .ssd_def32 = 0,
1467 .ssd_gran = 0 },
1468 /* Code Descriptor for user */
1469 { .ssd_base = 0x0,
1470 .ssd_limit = 0xfffff,
1471 .ssd_type = SDT_MEMERA,
1472 .ssd_dpl = SEL_UPL,
1473 .ssd_p = 1,
1474 .ssd_xx = 0, .ssd_xx1 = 0,
1475 .ssd_def32 = 1,
1476 .ssd_gran = 1 },
1477 /* Null Descriptor - overwritten by call gate */
1478 { .ssd_base = 0x0,
1479 .ssd_limit = 0x0,
1480 .ssd_type = 0,
1481 .ssd_dpl = 0,
1482 .ssd_p = 0,
1483 .ssd_xx = 0, .ssd_xx1 = 0,
1484 .ssd_def32 = 0,
1485 .ssd_gran = 0 },
1486 /* Data Descriptor for user */
1487 { .ssd_base = 0x0,
1488 .ssd_limit = 0xfffff,
1489 .ssd_type = SDT_MEMRWA,
1490 .ssd_dpl = SEL_UPL,
1491 .ssd_p = 1,
1492 .ssd_xx = 0, .ssd_xx1 = 0,
1493 .ssd_def32 = 1,
1494 .ssd_gran = 1 },
1495 };
1496
1497 void
1498 setidt(idx, func, typ, dpl, selec)
1499 int idx;
1500 inthand_t *func;
1501 int typ;
1502 int dpl;
1503 int selec;
1504 {
1505 struct gate_descriptor *ip;
1506
1507 ip = idt + idx;
1508 ip->gd_looffset = (int)func;
1509 ip->gd_selector = selec;
1510 ip->gd_stkcpy = 0;
1511 ip->gd_xx = 0;
1512 ip->gd_type = typ;
1513 ip->gd_dpl = dpl;
1514 ip->gd_p = 1;
1515 ip->gd_hioffset = ((int)func)>>16 ;
1516 }
1517
1518 extern inthand_t
1519 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1520 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1521 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1522 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1523 IDTVEC(xmm),
1524 #ifdef KDTRACE_HOOKS
1525 IDTVEC(dtrace_ret),
1526 #endif
1527 #ifdef XENHVM
1528 IDTVEC(xen_intr_upcall),
1529 #endif
1530 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1531
1532 #ifdef DDB
1533 /*
1534 * Display the index and function name of any IDT entries that don't use
1535 * the default 'rsvd' entry point.
1536 */
1537 DB_SHOW_COMMAND(idt, db_show_idt)
1538 {
1539 struct gate_descriptor *ip;
1540 int idx;
1541 uintptr_t func;
1542
1543 ip = idt;
1544 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1545 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1546 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1547 db_printf("%3d\t", idx);
1548 db_printsym(func, DB_STGY_PROC);
1549 db_printf("\n");
1550 }
1551 ip++;
1552 }
1553 }
1554
1555 /* Show privileged registers. */
1556 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1557 {
1558 uint64_t idtr, gdtr;
1559
1560 idtr = ridt();
1561 db_printf("idtr\t0x%08x/%04x\n",
1562 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1563 gdtr = rgdt();
1564 db_printf("gdtr\t0x%08x/%04x\n",
1565 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1566 db_printf("ldtr\t0x%04x\n", rldt());
1567 db_printf("tr\t0x%04x\n", rtr());
1568 db_printf("cr0\t0x%08x\n", rcr0());
1569 db_printf("cr2\t0x%08x\n", rcr2());
1570 db_printf("cr3\t0x%08x\n", rcr3());
1571 db_printf("cr4\t0x%08x\n", rcr4());
1572 if (rcr4() & CR4_XSAVE)
1573 db_printf("xcr0\t0x%016llx\n", rxcr(0));
1574 if (amd_feature & (AMDID_NX | AMDID_LM))
1575 db_printf("EFER\t0x%016llx\n", rdmsr(MSR_EFER));
1576 if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
1577 db_printf("FEATURES_CTL\t0x%016llx\n",
1578 rdmsr(MSR_IA32_FEATURE_CONTROL));
1579 if ((cpu_vendor_id == CPU_VENDOR_INTEL ||
1580 cpu_vendor_id == CPU_VENDOR_AMD) && CPUID_TO_FAMILY(cpu_id) >= 6)
1581 db_printf("DEBUG_CTL\t0x%016llx\n", rdmsr(MSR_DEBUGCTLMSR));
1582 if (cpu_feature & CPUID_PAT)
1583 db_printf("PAT\t0x%016llx\n", rdmsr(MSR_PAT));
1584 }
1585
1586 DB_SHOW_COMMAND(dbregs, db_show_dbregs)
1587 {
1588
1589 db_printf("dr0\t0x%08x\n", rdr0());
1590 db_printf("dr1\t0x%08x\n", rdr1());
1591 db_printf("dr2\t0x%08x\n", rdr2());
1592 db_printf("dr3\t0x%08x\n", rdr3());
1593 db_printf("dr6\t0x%08x\n", rdr6());
1594 db_printf("dr7\t0x%08x\n", rdr7());
1595 }
1596 #endif
1597
1598 void
1599 sdtossd(sd, ssd)
1600 struct segment_descriptor *sd;
1601 struct soft_segment_descriptor *ssd;
1602 {
1603 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1604 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1605 ssd->ssd_type = sd->sd_type;
1606 ssd->ssd_dpl = sd->sd_dpl;
1607 ssd->ssd_p = sd->sd_p;
1608 ssd->ssd_def32 = sd->sd_def32;
1609 ssd->ssd_gran = sd->sd_gran;
1610 }
1611
1612 #if !defined(PC98)
1613 static int
1614 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
1615 int *physmap_idxp)
1616 {
1617 int i, insert_idx, physmap_idx;
1618
1619 physmap_idx = *physmap_idxp;
1620
1621 if (length == 0)
1622 return (1);
1623
1624 #ifndef PAE
1625 if (base > 0xffffffff) {
1626 printf("%uK of memory above 4GB ignored\n",
1627 (u_int)(length / 1024));
1628 return (1);
1629 }
1630 #endif
1631
1632 /*
1633 * Find insertion point while checking for overlap. Start off by
1634 * assuming the new entry will be added to the end.
1635 */
1636 insert_idx = physmap_idx + 2;
1637 for (i = 0; i <= physmap_idx; i += 2) {
1638 if (base < physmap[i + 1]) {
1639 if (base + length <= physmap[i]) {
1640 insert_idx = i;
1641 break;
1642 }
1643 if (boothowto & RB_VERBOSE)
1644 printf(
1645 "Overlapping memory regions, ignoring second region\n");
1646 return (1);
1647 }
1648 }
1649
1650 /* See if we can prepend to the next entry. */
1651 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
1652 physmap[insert_idx] = base;
1653 return (1);
1654 }
1655
1656 /* See if we can append to the previous entry. */
1657 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
1658 physmap[insert_idx - 1] += length;
1659 return (1);
1660 }
1661
1662 physmap_idx += 2;
1663 *physmap_idxp = physmap_idx;
1664 if (physmap_idx == PHYSMAP_SIZE) {
1665 printf(
1666 "Too many segments in the physical address map, giving up\n");
1667 return (0);
1668 }
1669
1670 /*
1671 * Move the last 'N' entries down to make room for the new
1672 * entry if needed.
1673 */
1674 for (i = physmap_idx; i > insert_idx; i -= 2) {
1675 physmap[i] = physmap[i - 2];
1676 physmap[i + 1] = physmap[i - 1];
1677 }
1678
1679 /* Insert the new entry. */
1680 physmap[insert_idx] = base;
1681 physmap[insert_idx + 1] = base + length;
1682 return (1);
1683 }
1684
1685 static int
1686 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1687 {
1688 if (boothowto & RB_VERBOSE)
1689 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1690 smap->type, smap->base, smap->length);
1691
1692 if (smap->type != SMAP_TYPE_MEMORY)
1693 return (1);
1694
1695 return (add_physmap_entry(smap->base, smap->length, physmap,
1696 physmap_idxp));
1697 }
1698
1699 static void
1700 add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap,
1701 int *physmap_idxp)
1702 {
1703 struct bios_smap *smap, *smapend;
1704 u_int32_t smapsize;
1705 /*
1706 * Memory map from INT 15:E820.
1707 *
1708 * subr_module.c says:
1709 * "Consumer may safely assume that size value precedes data."
1710 * ie: an int32_t immediately precedes SMAP.
1711 */
1712 smapsize = *((u_int32_t *)smapbase - 1);
1713 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1714
1715 for (smap = smapbase; smap < smapend; smap++)
1716 if (!add_smap_entry(smap, physmap, physmap_idxp))
1717 break;
1718 }
1719 #endif /* !PC98 */
1720
1721 static void
1722 basemem_setup(void)
1723 {
1724 vm_paddr_t pa;
1725 pt_entry_t *pte;
1726 int i;
1727
1728 if (basemem > 640) {
1729 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1730 basemem);
1731 basemem = 640;
1732 }
1733
1734 /*
1735 * XXX if biosbasemem is now < 640, there is a `hole'
1736 * between the end of base memory and the start of
1737 * ISA memory. The hole may be empty or it may
1738 * contain BIOS code or data. Map it read/write so
1739 * that the BIOS can write to it. (Memory from 0 to
1740 * the physical end of the kernel is mapped read-only
1741 * to begin with and then parts of it are remapped.
1742 * The parts that aren't remapped form holes that
1743 * remain read-only and are unused by the kernel.
1744 * The base memory area is below the physical end of
1745 * the kernel and right now forms a read-only hole.
1746 * The part of it from PAGE_SIZE to
1747 * (trunc_page(biosbasemem * 1024) - 1) will be
1748 * remapped and used by the kernel later.)
1749 *
1750 * This code is similar to the code used in
1751 * pmap_mapdev, but since no memory needs to be
1752 * allocated we simply change the mapping.
1753 */
1754 for (pa = trunc_page(basemem * 1024);
1755 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1756 pmap_kenter(KERNBASE + pa, pa);
1757
1758 /*
1759 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1760 * the vm86 page table so that vm86 can scribble on them using
1761 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1762 * page 0, at least as initialized here?
1763 */
1764 pte = (pt_entry_t *)vm86paddr;
1765 for (i = basemem / 4; i < 160; i++)
1766 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1767 }
1768
1769 /*
1770 * Populate the (physmap) array with base/bound pairs describing the
1771 * available physical memory in the system, then test this memory and
1772 * build the phys_avail array describing the actually-available memory.
1773 *
1774 * If we cannot accurately determine the physical memory map, then use
1775 * value from the 0xE801 call, and failing that, the RTC.
1776 *
1777 * Total memory size may be set by the kernel environment variable
1778 * hw.physmem or the compile-time define MAXMEM.
1779 *
1780 * XXX first should be vm_paddr_t.
1781 */
1782 #ifdef PC98
1783 static void
1784 getmemsize(int first)
1785 {
1786 int off, physmap_idx, pa_indx, da_indx;
1787 u_long physmem_tunable, memtest;
1788 vm_paddr_t physmap[PHYSMAP_SIZE];
1789 pt_entry_t *pte;
1790 quad_t dcons_addr, dcons_size;
1791 int i;
1792 int pg_n;
1793 u_int extmem;
1794 u_int under16;
1795 vm_paddr_t pa;
1796
1797 bzero(physmap, sizeof(physmap));
1798
1799 /* XXX - some of EPSON machines can't use PG_N */
1800 pg_n = PG_N;
1801 if (pc98_machine_type & M_EPSON_PC98) {
1802 switch (epson_machine_id) {
1803 #ifdef WB_CACHE
1804 default:
1805 #endif
1806 case EPSON_PC486_HX:
1807 case EPSON_PC486_HG:
1808 case EPSON_PC486_HA:
1809 pg_n = 0;
1810 break;
1811 }
1812 }
1813
1814 under16 = pc98_getmemsize(&basemem, &extmem);
1815 basemem_setup();
1816
1817 physmap[0] = 0;
1818 physmap[1] = basemem * 1024;
1819 physmap_idx = 2;
1820 physmap[physmap_idx] = 0x100000;
1821 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1822
1823 /*
1824 * Now, physmap contains a map of physical memory.
1825 */
1826
1827 #ifdef SMP
1828 /* make hole for AP bootstrap code */
1829 physmap[1] = mp_bootaddress(physmap[1]);
1830 #endif
1831
1832 /*
1833 * Maxmem isn't the "maximum memory", it's one larger than the
1834 * highest page of the physical address space. It should be
1835 * called something like "Maxphyspage". We may adjust this
1836 * based on ``hw.physmem'' and the results of the memory test.
1837 */
1838 Maxmem = atop(physmap[physmap_idx + 1]);
1839
1840 #ifdef MAXMEM
1841 Maxmem = MAXMEM / 4;
1842 #endif
1843
1844 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1845 Maxmem = atop(physmem_tunable);
1846
1847 /*
1848 * By default keep the memtest enabled. Use a general name so that
1849 * one could eventually do more with the code than just disable it.
1850 */
1851 memtest = 1;
1852 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1853
1854 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1855 (boothowto & RB_VERBOSE))
1856 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1857
1858 /*
1859 * If Maxmem has been increased beyond what the system has detected,
1860 * extend the last memory segment to the new limit.
1861 */
1862 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1863 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1864
1865 /*
1866 * We need to divide chunk if Maxmem is larger than 16MB and
1867 * under 16MB area is not full of memory.
1868 * (1) system area (15-16MB region) is cut off
1869 * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY")
1870 */
1871 if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) {
1872 /* 15M - 16M region is cut off, so need to divide chunk */
1873 physmap[physmap_idx + 1] = under16 * 1024;
1874 physmap_idx += 2;
1875 physmap[physmap_idx] = 0x1000000;
1876 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024;
1877 }
1878
1879 /* call pmap initialization to make new kernel address space */
1880 pmap_bootstrap(first);
1881
1882 /*
1883 * Size up each available chunk of physical memory.
1884 */
1885 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1886 pa_indx = 0;
1887 da_indx = 1;
1888 phys_avail[pa_indx++] = physmap[0];
1889 phys_avail[pa_indx] = physmap[0];
1890 dump_avail[da_indx] = physmap[0];
1891 pte = CMAP3;
1892
1893 /*
1894 * Get dcons buffer address
1895 */
1896 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1897 getenv_quad("dcons.size", &dcons_size) == 0)
1898 dcons_addr = 0;
1899
1900 /*
1901 * physmap is in bytes, so when converting to page boundaries,
1902 * round up the start address and round down the end address.
1903 */
1904 for (i = 0; i <= physmap_idx; i += 2) {
1905 vm_paddr_t end;
1906
1907 end = ptoa((vm_paddr_t)Maxmem);
1908 if (physmap[i + 1] < end)
1909 end = trunc_page(physmap[i + 1]);
1910 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1911 int tmp, page_bad, full;
1912 int *ptr = (int *)CADDR3;
1913
1914 full = FALSE;
1915 /*
1916 * block out kernel memory as not available.
1917 */
1918 if (pa >= KERNLOAD && pa < first)
1919 goto do_dump_avail;
1920
1921 /*
1922 * block out dcons buffer
1923 */
1924 if (dcons_addr > 0
1925 && pa >= trunc_page(dcons_addr)
1926 && pa < dcons_addr + dcons_size)
1927 goto do_dump_avail;
1928
1929 page_bad = FALSE;
1930 if (memtest == 0)
1931 goto skip_memtest;
1932
1933 /*
1934 * map page into kernel: valid, read/write,non-cacheable
1935 */
1936 *pte = pa | PG_V | PG_RW | pg_n;
1937 invltlb();
1938
1939 tmp = *(int *)ptr;
1940 /*
1941 * Test for alternating 1's and 0's
1942 */
1943 *(volatile int *)ptr = 0xaaaaaaaa;
1944 if (*(volatile int *)ptr != 0xaaaaaaaa)
1945 page_bad = TRUE;
1946 /*
1947 * Test for alternating 0's and 1's
1948 */
1949 *(volatile int *)ptr = 0x55555555;
1950 if (*(volatile int *)ptr != 0x55555555)
1951 page_bad = TRUE;
1952 /*
1953 * Test for all 1's
1954 */
1955 *(volatile int *)ptr = 0xffffffff;
1956 if (*(volatile int *)ptr != 0xffffffff)
1957 page_bad = TRUE;
1958 /*
1959 * Test for all 0's
1960 */
1961 *(volatile int *)ptr = 0x0;
1962 if (*(volatile int *)ptr != 0x0)
1963 page_bad = TRUE;
1964 /*
1965 * Restore original value.
1966 */
1967 *(int *)ptr = tmp;
1968
1969 skip_memtest:
1970 /*
1971 * Adjust array of valid/good pages.
1972 */
1973 if (page_bad == TRUE)
1974 continue;
1975 /*
1976 * If this good page is a continuation of the
1977 * previous set of good pages, then just increase
1978 * the end pointer. Otherwise start a new chunk.
1979 * Note that "end" points one higher than end,
1980 * making the range >= start and < end.
1981 * If we're also doing a speculative memory
1982 * test and we at or past the end, bump up Maxmem
1983 * so that we keep going. The first bad page
1984 * will terminate the loop.
1985 */
1986 if (phys_avail[pa_indx] == pa) {
1987 phys_avail[pa_indx] += PAGE_SIZE;
1988 } else {
1989 pa_indx++;
1990 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1991 printf(
1992 "Too many holes in the physical address space, giving up\n");
1993 pa_indx--;
1994 full = TRUE;
1995 goto do_dump_avail;
1996 }
1997 phys_avail[pa_indx++] = pa; /* start */
1998 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1999 }
2000 physmem++;
2001 do_dump_avail:
2002 if (dump_avail[da_indx] == pa) {
2003 dump_avail[da_indx] += PAGE_SIZE;
2004 } else {
2005 da_indx++;
2006 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2007 da_indx--;
2008 goto do_next;
2009 }
2010 dump_avail[da_indx++] = pa; /* start */
2011 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2012 }
2013 do_next:
2014 if (full)
2015 break;
2016 }
2017 }
2018 *pte = 0;
2019 invltlb();
2020
2021 /*
2022 * XXX
2023 * The last chunk must contain at least one page plus the message
2024 * buffer to avoid complicating other code (message buffer address
2025 * calculation, etc.).
2026 */
2027 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2028 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2029 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2030 phys_avail[pa_indx--] = 0;
2031 phys_avail[pa_indx--] = 0;
2032 }
2033
2034 Maxmem = atop(phys_avail[pa_indx]);
2035
2036 /* Trim off space for the message buffer. */
2037 phys_avail[pa_indx] -= round_page(msgbufsize);
2038
2039 /* Map the message buffer. */
2040 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2041 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2042 off);
2043 }
2044 #else /* PC98 */
2045 static void
2046 getmemsize(int first)
2047 {
2048 int has_smap, off, physmap_idx, pa_indx, da_indx;
2049 u_long memtest;
2050 vm_paddr_t physmap[PHYSMAP_SIZE];
2051 pt_entry_t *pte;
2052 quad_t dcons_addr, dcons_size, physmem_tunable;
2053 int hasbrokenint12, i, res;
2054 u_int extmem;
2055 struct vm86frame vmf;
2056 struct vm86context vmc;
2057 vm_paddr_t pa;
2058 struct bios_smap *smap, *smapbase;
2059 caddr_t kmdp;
2060
2061 has_smap = 0;
2062 #ifdef XBOX
2063 if (arch_i386_is_xbox) {
2064 /*
2065 * We queried the memory size before, so chop off 4MB for
2066 * the framebuffer and inform the OS of this.
2067 */
2068 physmap[0] = 0;
2069 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
2070 physmap_idx = 0;
2071 goto physmap_done;
2072 }
2073 #endif
2074 bzero(&vmf, sizeof(vmf));
2075 bzero(physmap, sizeof(physmap));
2076 basemem = 0;
2077
2078 /*
2079 * Check if the loader supplied an SMAP memory map. If so,
2080 * use that and do not make any VM86 calls.
2081 */
2082 physmap_idx = 0;
2083 kmdp = preload_search_by_type("elf kernel");
2084 if (kmdp == NULL)
2085 kmdp = preload_search_by_type("elf32 kernel");
2086 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2087 MODINFO_METADATA | MODINFOMD_SMAP);
2088 if (smapbase != NULL) {
2089 add_smap_entries(smapbase, physmap, &physmap_idx);
2090 has_smap = 1;
2091 goto have_smap;
2092 }
2093
2094 /*
2095 * Some newer BIOSes have a broken INT 12H implementation
2096 * which causes a kernel panic immediately. In this case, we
2097 * need use the SMAP to determine the base memory size.
2098 */
2099 hasbrokenint12 = 0;
2100 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
2101 if (hasbrokenint12 == 0) {
2102 /* Use INT12 to determine base memory size. */
2103 vm86_intcall(0x12, &vmf);
2104 basemem = vmf.vmf_ax;
2105 basemem_setup();
2106 }
2107
2108 /*
2109 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
2110 * the kernel page table so we can use it as a buffer. The
2111 * kernel will unmap this page later.
2112 */
2113 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
2114 vmc.npages = 0;
2115 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
2116 res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
2117 KASSERT(res != 0, ("vm86_getptr() failed: address not found"));
2118
2119 vmf.vmf_ebx = 0;
2120 do {
2121 vmf.vmf_eax = 0xE820;
2122 vmf.vmf_edx = SMAP_SIG;
2123 vmf.vmf_ecx = sizeof(struct bios_smap);
2124 i = vm86_datacall(0x15, &vmf, &vmc);
2125 if (i || vmf.vmf_eax != SMAP_SIG)
2126 break;
2127 has_smap = 1;
2128 if (!add_smap_entry(smap, physmap, &physmap_idx))
2129 break;
2130 } while (vmf.vmf_ebx != 0);
2131
2132 have_smap:
2133 /*
2134 * If we didn't fetch the "base memory" size from INT12,
2135 * figure it out from the SMAP (or just guess).
2136 */
2137 if (basemem == 0) {
2138 for (i = 0; i <= physmap_idx; i += 2) {
2139 if (physmap[i] == 0x00000000) {
2140 basemem = physmap[i + 1] / 1024;
2141 break;
2142 }
2143 }
2144
2145 /* XXX: If we couldn't find basemem from SMAP, just guess. */
2146 if (basemem == 0)
2147 basemem = 640;
2148 basemem_setup();
2149 }
2150
2151 if (physmap[1] != 0)
2152 goto physmap_done;
2153
2154 /*
2155 * If we failed to find an SMAP, figure out the extended
2156 * memory size. We will then build a simple memory map with
2157 * two segments, one for "base memory" and the second for
2158 * "extended memory". Note that "extended memory" starts at a
2159 * physical address of 1MB and that both basemem and extmem
2160 * are in units of 1KB.
2161 *
2162 * First, try to fetch the extended memory size via INT 15:E801.
2163 */
2164 vmf.vmf_ax = 0xE801;
2165 if (vm86_intcall(0x15, &vmf) == 0) {
2166 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
2167 } else {
2168 /*
2169 * If INT15:E801 fails, this is our last ditch effort
2170 * to determine the extended memory size. Currently
2171 * we prefer the RTC value over INT15:88.
2172 */
2173 #if 0
2174 vmf.vmf_ah = 0x88;
2175 vm86_intcall(0x15, &vmf);
2176 extmem = vmf.vmf_ax;
2177 #else
2178 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
2179 #endif
2180 }
2181
2182 /*
2183 * Special hack for chipsets that still remap the 384k hole when
2184 * there's 16MB of memory - this really confuses people that
2185 * are trying to use bus mastering ISA controllers with the
2186 * "16MB limit"; they only have 16MB, but the remapping puts
2187 * them beyond the limit.
2188 *
2189 * If extended memory is between 15-16MB (16-17MB phys address range),
2190 * chop it to 15MB.
2191 */
2192 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
2193 extmem = 15 * 1024;
2194
2195 physmap[0] = 0;
2196 physmap[1] = basemem * 1024;
2197 physmap_idx = 2;
2198 physmap[physmap_idx] = 0x100000;
2199 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2200
2201 physmap_done:
2202 /*
2203 * Now, physmap contains a map of physical memory.
2204 */
2205
2206 #ifdef SMP
2207 /* make hole for AP bootstrap code */
2208 physmap[1] = mp_bootaddress(physmap[1]);
2209 #endif
2210
2211 /*
2212 * Maxmem isn't the "maximum memory", it's one larger than the
2213 * highest page of the physical address space. It should be
2214 * called something like "Maxphyspage". We may adjust this
2215 * based on ``hw.physmem'' and the results of the memory test.
2216 *
2217 * This is especially confusing when it is much larger than the
2218 * memory size and is displayed as "realmem".
2219 */
2220 Maxmem = atop(physmap[physmap_idx + 1]);
2221
2222 #ifdef MAXMEM
2223 Maxmem = MAXMEM / 4;
2224 #endif
2225
2226 if (TUNABLE_QUAD_FETCH("hw.physmem", &physmem_tunable))
2227 Maxmem = atop(physmem_tunable);
2228
2229 /*
2230 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
2231 * the amount of memory in the system.
2232 */
2233 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
2234 Maxmem = atop(physmap[physmap_idx + 1]);
2235
2236 /*
2237 * By default enable the memory test on real hardware, and disable
2238 * it if we appear to be running in a VM. This avoids touching all
2239 * pages unnecessarily, which doesn't matter on real hardware but is
2240 * bad for shared VM hosts. Use a general name so that
2241 * one could eventually do more with the code than just disable it.
2242 */
2243 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
2244 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
2245
2246 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2247 (boothowto & RB_VERBOSE))
2248 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2249
2250 /*
2251 * If Maxmem has been increased beyond what the system has detected,
2252 * extend the last memory segment to the new limit.
2253 */
2254 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2255 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2256
2257 /* call pmap initialization to make new kernel address space */
2258 pmap_bootstrap(first);
2259
2260 /*
2261 * Size up each available chunk of physical memory.
2262 */
2263 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2264 pa_indx = 0;
2265 da_indx = 1;
2266 phys_avail[pa_indx++] = physmap[0];
2267 phys_avail[pa_indx] = physmap[0];
2268 dump_avail[da_indx] = physmap[0];
2269 pte = CMAP3;
2270
2271 /*
2272 * Get dcons buffer address
2273 */
2274 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2275 getenv_quad("dcons.size", &dcons_size) == 0)
2276 dcons_addr = 0;
2277
2278 /*
2279 * physmap is in bytes, so when converting to page boundaries,
2280 * round up the start address and round down the end address.
2281 */
2282 for (i = 0; i <= physmap_idx; i += 2) {
2283 vm_paddr_t end;
2284
2285 end = ptoa((vm_paddr_t)Maxmem);
2286 if (physmap[i + 1] < end)
2287 end = trunc_page(physmap[i + 1]);
2288 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2289 int tmp, page_bad, full;
2290 int *ptr = (int *)CADDR3;
2291
2292 full = FALSE;
2293 /*
2294 * block out kernel memory as not available.
2295 */
2296 if (pa >= KERNLOAD && pa < first)
2297 goto do_dump_avail;
2298
2299 /*
2300 * block out dcons buffer
2301 */
2302 if (dcons_addr > 0
2303 && pa >= trunc_page(dcons_addr)
2304 && pa < dcons_addr + dcons_size)
2305 goto do_dump_avail;
2306
2307 page_bad = FALSE;
2308 if (memtest == 0)
2309 goto skip_memtest;
2310
2311 /*
2312 * map page into kernel: valid, read/write,non-cacheable
2313 */
2314 *pte = pa | PG_V | PG_RW | PG_N;
2315 invltlb();
2316
2317 tmp = *(int *)ptr;
2318 /*
2319 * Test for alternating 1's and 0's
2320 */
2321 *(volatile int *)ptr = 0xaaaaaaaa;
2322 if (*(volatile int *)ptr != 0xaaaaaaaa)
2323 page_bad = TRUE;
2324 /*
2325 * Test for alternating 0's and 1's
2326 */
2327 *(volatile int *)ptr = 0x55555555;
2328 if (*(volatile int *)ptr != 0x55555555)
2329 page_bad = TRUE;
2330 /*
2331 * Test for all 1's
2332 */
2333 *(volatile int *)ptr = 0xffffffff;
2334 if (*(volatile int *)ptr != 0xffffffff)
2335 page_bad = TRUE;
2336 /*
2337 * Test for all 0's
2338 */
2339 *(volatile int *)ptr = 0x0;
2340 if (*(volatile int *)ptr != 0x0)
2341 page_bad = TRUE;
2342 /*
2343 * Restore original value.
2344 */
2345 *(int *)ptr = tmp;
2346
2347 skip_memtest:
2348 /*
2349 * Adjust array of valid/good pages.
2350 */
2351 if (page_bad == TRUE)
2352 continue;
2353 /*
2354 * If this good page is a continuation of the
2355 * previous set of good pages, then just increase
2356 * the end pointer. Otherwise start a new chunk.
2357 * Note that "end" points one higher than end,
2358 * making the range >= start and < end.
2359 * If we're also doing a speculative memory
2360 * test and we at or past the end, bump up Maxmem
2361 * so that we keep going. The first bad page
2362 * will terminate the loop.
2363 */
2364 if (phys_avail[pa_indx] == pa) {
2365 phys_avail[pa_indx] += PAGE_SIZE;
2366 } else {
2367 pa_indx++;
2368 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2369 printf(
2370 "Too many holes in the physical address space, giving up\n");
2371 pa_indx--;
2372 full = TRUE;
2373 goto do_dump_avail;
2374 }
2375 phys_avail[pa_indx++] = pa; /* start */
2376 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2377 }
2378 physmem++;
2379 do_dump_avail:
2380 if (dump_avail[da_indx] == pa) {
2381 dump_avail[da_indx] += PAGE_SIZE;
2382 } else {
2383 da_indx++;
2384 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2385 da_indx--;
2386 goto do_next;
2387 }
2388 dump_avail[da_indx++] = pa; /* start */
2389 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2390 }
2391 do_next:
2392 if (full)
2393 break;
2394 }
2395 }
2396 *pte = 0;
2397 invltlb();
2398
2399 /*
2400 * XXX
2401 * The last chunk must contain at least one page plus the message
2402 * buffer to avoid complicating other code (message buffer address
2403 * calculation, etc.).
2404 */
2405 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2406 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2407 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2408 phys_avail[pa_indx--] = 0;
2409 phys_avail[pa_indx--] = 0;
2410 }
2411
2412 Maxmem = atop(phys_avail[pa_indx]);
2413
2414 /* Trim off space for the message buffer. */
2415 phys_avail[pa_indx] -= round_page(msgbufsize);
2416
2417 /* Map the message buffer. */
2418 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2419 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2420 off);
2421 }
2422 #endif /* PC98 */
2423
2424 static void
2425 i386_kdb_init(void)
2426 {
2427 #ifdef DDB
2428 db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab);
2429 #endif
2430 kdb_init();
2431 #ifdef KDB
2432 if (boothowto & RB_KDB)
2433 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2434 #endif
2435 }
2436
2437 register_t
2438 init386(int first)
2439 {
2440 struct gate_descriptor *gdp;
2441 int gsel_tss, metadata_missing, x, pa;
2442 struct pcpu *pc;
2443 struct xstate_hdr *xhdr;
2444 caddr_t kmdp;
2445 int late_console;
2446
2447 thread0.td_kstack = proc0kstack;
2448 thread0.td_kstack_pages = TD0_KSTACK_PAGES;
2449
2450 /*
2451 * This may be done better later if it gets more high level
2452 * components in it. If so just link td->td_proc here.
2453 */
2454 proc_linkup0(&proc0, &thread0);
2455
2456 #ifdef PC98
2457 /*
2458 * Initialize DMAC
2459 */
2460 pc98_init_dmac();
2461 #endif
2462
2463 metadata_missing = 0;
2464 if (bootinfo.bi_modulep) {
2465 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2466 preload_bootstrap_relocate(KERNBASE);
2467 } else {
2468 metadata_missing = 1;
2469 }
2470
2471 if (bootinfo.bi_envp != 0)
2472 init_static_kenv((char *)bootinfo.bi_envp + KERNBASE, 0);
2473 else
2474 init_static_kenv(NULL, 0);
2475
2476 identify_hypervisor();
2477
2478 /* Init basic tunables, hz etc */
2479 init_param1();
2480
2481 /*
2482 * Make gdt memory segments. All segments cover the full 4GB
2483 * of address space and permissions are enforced at page level.
2484 */
2485 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2486 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2487 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2488 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2489 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2490 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2491
2492 pc = &__pcpu[0];
2493 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2494 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2495 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2496
2497 for (x = 0; x < NGDT; x++)
2498 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2499
2500 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2501 r_gdt.rd_base = (int) gdt;
2502 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2503 lgdt(&r_gdt);
2504
2505 pcpu_init(pc, 0, sizeof(struct pcpu));
2506 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2507 pmap_kenter(pa + KERNBASE, pa);
2508 dpcpu_init((void *)(first + KERNBASE), 0);
2509 first += DPCPU_SIZE;
2510 PCPU_SET(prvspace, pc);
2511 PCPU_SET(curthread, &thread0);
2512 /* Non-late cninit() and printf() can be moved up to here. */
2513
2514 /*
2515 * Initialize mutexes.
2516 *
2517 * icu_lock: in order to allow an interrupt to occur in a critical
2518 * section, to set pcpu->ipending (etc...) properly, we
2519 * must be able to get the icu lock, so it can't be
2520 * under witness.
2521 */
2522 mutex_init();
2523 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2524
2525 /* make ldt memory segments */
2526 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2527 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2528 for (x = 0; x < nitems(ldt_segs); x++)
2529 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2530
2531 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2532 lldt(_default_ldt);
2533 PCPU_SET(currentldt, _default_ldt);
2534
2535 /* exceptions */
2536 for (x = 0; x < NIDT; x++)
2537 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2538 GSEL(GCODE_SEL, SEL_KPL));
2539 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2540 GSEL(GCODE_SEL, SEL_KPL));
2541 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2542 GSEL(GCODE_SEL, SEL_KPL));
2543 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2544 GSEL(GCODE_SEL, SEL_KPL));
2545 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2546 GSEL(GCODE_SEL, SEL_KPL));
2547 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2548 GSEL(GCODE_SEL, SEL_KPL));
2549 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2550 GSEL(GCODE_SEL, SEL_KPL));
2551 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2552 GSEL(GCODE_SEL, SEL_KPL));
2553 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2554 , GSEL(GCODE_SEL, SEL_KPL));
2555 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2556 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2557 GSEL(GCODE_SEL, SEL_KPL));
2558 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2559 GSEL(GCODE_SEL, SEL_KPL));
2560 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2561 GSEL(GCODE_SEL, SEL_KPL));
2562 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2563 GSEL(GCODE_SEL, SEL_KPL));
2564 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2565 GSEL(GCODE_SEL, SEL_KPL));
2566 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2567 GSEL(GCODE_SEL, SEL_KPL));
2568 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2569 GSEL(GCODE_SEL, SEL_KPL));
2570 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2571 GSEL(GCODE_SEL, SEL_KPL));
2572 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2573 GSEL(GCODE_SEL, SEL_KPL));
2574 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2575 GSEL(GCODE_SEL, SEL_KPL));
2576 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2577 GSEL(GCODE_SEL, SEL_KPL));
2578 #ifdef KDTRACE_HOOKS
2579 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
2580 GSEL(GCODE_SEL, SEL_KPL));
2581 #endif
2582 #ifdef XENHVM
2583 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYS386IGT, SEL_KPL,
2584 GSEL(GCODE_SEL, SEL_KPL));
2585 #endif
2586
2587 r_idt.rd_limit = sizeof(idt0) - 1;
2588 r_idt.rd_base = (int) idt;
2589 lidt(&r_idt);
2590
2591 #ifdef XBOX
2592 /*
2593 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2594 * This should be 0x10de / 0x02a5.
2595 *
2596 * This is exactly what Linux does.
2597 */
2598 outl(0xcf8, 0x80000000);
2599 if (inl(0xcfc) == 0x02a510de) {
2600 arch_i386_is_xbox = 1;
2601 pic16l_setled(XBOX_LED_GREEN);
2602
2603 /*
2604 * We are an XBOX, but we may have either 64MB or 128MB of
2605 * memory. The PCI host bridge should be programmed for this,
2606 * so we just query it.
2607 */
2608 outl(0xcf8, 0x80000084);
2609 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2610 }
2611 #endif /* XBOX */
2612
2613 /*
2614 * Initialize the clock before the console so that console
2615 * initialization can use DELAY().
2616 */
2617 clock_init();
2618
2619 finishidentcpu(); /* Final stage of CPU initialization */
2620 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2621 GSEL(GCODE_SEL, SEL_KPL));
2622 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2623 GSEL(GCODE_SEL, SEL_KPL));
2624 initializecpu(); /* Initialize CPU registers */
2625 initializecpucache();
2626
2627 /* pointer to selector slot for %fs/%gs */
2628 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2629
2630 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2631 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2632 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2633 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2634 #if defined(PAE) || defined(PAE_TABLES)
2635 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2636 #else
2637 dblfault_tss.tss_cr3 = (int)IdlePTD;
2638 #endif
2639 dblfault_tss.tss_eip = (int)dblfault_handler;
2640 dblfault_tss.tss_eflags = PSL_KERNEL;
2641 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2642 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2643 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2644 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2645 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2646
2647 /* Initialize the tss (except for the final esp0) early for vm86. */
2648 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2649 thread0.td_kstack_pages * PAGE_SIZE - 16);
2650 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2651 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2652 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2653 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2654 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2655 ltr(gsel_tss);
2656
2657 /* Initialize the PIC early for vm86 calls. */
2658 #ifdef DEV_ISA
2659 #ifdef DEV_ATPIC
2660 #ifndef PC98
2661 elcr_probe();
2662 #endif
2663 atpic_startup();
2664 #else
2665 /* Reset and mask the atpics and leave them shut down. */
2666 atpic_reset();
2667
2668 /*
2669 * Point the ICU spurious interrupt vectors at the APIC spurious
2670 * interrupt handler.
2671 */
2672 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2673 GSEL(GCODE_SEL, SEL_KPL));
2674 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2675 GSEL(GCODE_SEL, SEL_KPL));
2676 #endif
2677 #endif
2678
2679 /*
2680 * The console and kdb should be initialized even earlier than here,
2681 * but some console drivers don't work until after getmemsize().
2682 * Default to late console initialization to support these drivers.
2683 * This loses mainly printf()s in getmemsize() and early debugging.
2684 */
2685 late_console = 1;
2686 TUNABLE_INT_FETCH("debug.late_console", &late_console);
2687 if (!late_console) {
2688 cninit();
2689 i386_kdb_init();
2690 }
2691
2692 kmdp = preload_search_by_type("elf kernel");
2693 link_elf_ireloc(kmdp);
2694
2695 vm86_initialize();
2696 getmemsize(first);
2697 init_param2(physmem);
2698
2699 /* now running on new page tables, configured,and u/iom is accessible */
2700
2701 if (late_console)
2702 cninit();
2703
2704 if (metadata_missing)
2705 printf("WARNING: loader(8) metadata is missing!\n");
2706
2707 if (late_console)
2708 i386_kdb_init();
2709
2710 msgbufinit(msgbufp, msgbufsize);
2711 npxinit(true);
2712 /*
2713 * Set up thread0 pcb after npxinit calculated pcb + fpu save
2714 * area size. Zero out the extended state header in fpu save
2715 * area.
2716 */
2717 thread0.td_pcb = get_pcb_td(&thread0);
2718 thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0);
2719 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
2720 if (use_xsave) {
2721 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
2722 1);
2723 xhdr->xstate_bv = xsave_mask;
2724 }
2725 PCPU_SET(curpcb, thread0.td_pcb);
2726 /* Move esp0 in the tss to its final place. */
2727 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2728 PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16);
2729 gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; /* clear busy bit */
2730 ltr(gsel_tss);
2731
2732 /* make a call gate to reenter kernel with */
2733 gdp = &ldt[LSYS5CALLS_SEL].gd;
2734
2735 x = (int) &IDTVEC(lcall_syscall);
2736 gdp->gd_looffset = x;
2737 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2738 gdp->gd_stkcpy = 1;
2739 gdp->gd_type = SDT_SYS386CGT;
2740 gdp->gd_dpl = SEL_UPL;
2741 gdp->gd_p = 1;
2742 gdp->gd_hioffset = x >> 16;
2743
2744 /* XXX does this work? */
2745 /* XXX yes! */
2746 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2747 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2748
2749 /* transfer to user mode */
2750
2751 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2752 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2753
2754 /* setup proc 0's pcb */
2755 thread0.td_pcb->pcb_flags = 0;
2756 #if defined(PAE) || defined(PAE_TABLES)
2757 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2758 #else
2759 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2760 #endif
2761 thread0.td_pcb->pcb_ext = 0;
2762 thread0.td_frame = &proc0_tf;
2763
2764 cpu_probe_amdc1e();
2765
2766 #ifdef FDT
2767 x86_init_fdt();
2768 #endif
2769
2770 /* Location of kernel stack for locore */
2771 return ((register_t)thread0.td_pcb);
2772 }
2773
2774 void
2775 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2776 {
2777
2778 pcpu->pc_acpi_id = 0xffffffff;
2779 }
2780
2781 #ifndef PC98
2782 static int
2783 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
2784 {
2785 struct bios_smap *smapbase;
2786 struct bios_smap_xattr smap;
2787 caddr_t kmdp;
2788 uint32_t *smapattr;
2789 int count, error, i;
2790
2791 /* Retrieve the system memory map from the loader. */
2792 kmdp = preload_search_by_type("elf kernel");
2793 if (kmdp == NULL)
2794 kmdp = preload_search_by_type("elf32 kernel");
2795 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2796 MODINFO_METADATA | MODINFOMD_SMAP);
2797 if (smapbase == NULL)
2798 return (0);
2799 smapattr = (uint32_t *)preload_search_info(kmdp,
2800 MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
2801 count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase);
2802 error = 0;
2803 for (i = 0; i < count; i++) {
2804 smap.base = smapbase[i].base;
2805 smap.length = smapbase[i].length;
2806 smap.type = smapbase[i].type;
2807 if (smapattr != NULL)
2808 smap.xattr = smapattr[i];
2809 else
2810 smap.xattr = 0;
2811 error = SYSCTL_OUT(req, &smap, sizeof(smap));
2812 }
2813 return (error);
2814 }
2815 SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
2816 smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
2817 #endif /* !PC98 */
2818
2819 void
2820 spinlock_enter(void)
2821 {
2822 struct thread *td;
2823 register_t flags;
2824
2825 td = curthread;
2826 if (td->td_md.md_spinlock_count == 0) {
2827 flags = intr_disable();
2828 td->td_md.md_spinlock_count = 1;
2829 td->td_md.md_saved_flags = flags;
2830 } else
2831 td->td_md.md_spinlock_count++;
2832 critical_enter();
2833 }
2834
2835 void
2836 spinlock_exit(void)
2837 {
2838 struct thread *td;
2839 register_t flags;
2840
2841 td = curthread;
2842 critical_exit();
2843 flags = td->td_md.md_saved_flags;
2844 td->td_md.md_spinlock_count--;
2845 if (td->td_md.md_spinlock_count == 0)
2846 intr_restore(flags);
2847 }
2848
2849 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2850 static void f00f_hack(void *unused);
2851 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2852
2853 static void
2854 f00f_hack(void *unused)
2855 {
2856 struct gate_descriptor *new_idt;
2857 vm_offset_t tmp;
2858
2859 if (!has_f00f_bug)
2860 return;
2861
2862 GIANT_REQUIRED;
2863
2864 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2865
2866 tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO);
2867 if (tmp == 0)
2868 panic("kmem_malloc returned 0");
2869
2870 /* Put the problematic entry (#6) at the end of the lower page. */
2871 new_idt = (struct gate_descriptor*)
2872 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2873 bcopy(idt, new_idt, sizeof(idt0));
2874 r_idt.rd_base = (u_int)new_idt;
2875 lidt(&r_idt);
2876 idt = new_idt;
2877 pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
2878 }
2879 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2880
2881 /*
2882 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2883 * we want to start a backtrace from the function that caused us to enter
2884 * the debugger. We have the context in the trapframe, but base the trace
2885 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2886 * enough for a backtrace.
2887 */
2888 void
2889 makectx(struct trapframe *tf, struct pcb *pcb)
2890 {
2891
2892 pcb->pcb_edi = tf->tf_edi;
2893 pcb->pcb_esi = tf->tf_esi;
2894 pcb->pcb_ebp = tf->tf_ebp;
2895 pcb->pcb_ebx = tf->tf_ebx;
2896 pcb->pcb_eip = tf->tf_eip;
2897 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2898 pcb->pcb_gs = rgs();
2899 }
2900
2901 int
2902 ptrace_set_pc(struct thread *td, u_long addr)
2903 {
2904
2905 td->td_frame->tf_eip = addr;
2906 return (0);
2907 }
2908
2909 int
2910 ptrace_single_step(struct thread *td)
2911 {
2912 td->td_frame->tf_eflags |= PSL_T;
2913 return (0);
2914 }
2915
2916 int
2917 ptrace_clear_single_step(struct thread *td)
2918 {
2919 td->td_frame->tf_eflags &= ~PSL_T;
2920 return (0);
2921 }
2922
2923 int
2924 fill_regs(struct thread *td, struct reg *regs)
2925 {
2926 struct pcb *pcb;
2927 struct trapframe *tp;
2928
2929 tp = td->td_frame;
2930 pcb = td->td_pcb;
2931 regs->r_gs = pcb->pcb_gs;
2932 return (fill_frame_regs(tp, regs));
2933 }
2934
2935 int
2936 fill_frame_regs(struct trapframe *tp, struct reg *regs)
2937 {
2938 regs->r_fs = tp->tf_fs;
2939 regs->r_es = tp->tf_es;
2940 regs->r_ds = tp->tf_ds;
2941 regs->r_edi = tp->tf_edi;
2942 regs->r_esi = tp->tf_esi;
2943 regs->r_ebp = tp->tf_ebp;
2944 regs->r_ebx = tp->tf_ebx;
2945 regs->r_edx = tp->tf_edx;
2946 regs->r_ecx = tp->tf_ecx;
2947 regs->r_eax = tp->tf_eax;
2948 regs->r_eip = tp->tf_eip;
2949 regs->r_cs = tp->tf_cs;
2950 regs->r_eflags = tp->tf_eflags;
2951 regs->r_esp = tp->tf_esp;
2952 regs->r_ss = tp->tf_ss;
2953 return (0);
2954 }
2955
2956 int
2957 set_regs(struct thread *td, struct reg *regs)
2958 {
2959 struct pcb *pcb;
2960 struct trapframe *tp;
2961
2962 tp = td->td_frame;
2963 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2964 !CS_SECURE(regs->r_cs))
2965 return (EINVAL);
2966 pcb = td->td_pcb;
2967 tp->tf_fs = regs->r_fs;
2968 tp->tf_es = regs->r_es;
2969 tp->tf_ds = regs->r_ds;
2970 tp->tf_edi = regs->r_edi;
2971 tp->tf_esi = regs->r_esi;
2972 tp->tf_ebp = regs->r_ebp;
2973 tp->tf_ebx = regs->r_ebx;
2974 tp->tf_edx = regs->r_edx;
2975 tp->tf_ecx = regs->r_ecx;
2976 tp->tf_eax = regs->r_eax;
2977 tp->tf_eip = regs->r_eip;
2978 tp->tf_cs = regs->r_cs;
2979 tp->tf_eflags = regs->r_eflags;
2980 tp->tf_esp = regs->r_esp;
2981 tp->tf_ss = regs->r_ss;
2982 pcb->pcb_gs = regs->r_gs;
2983 return (0);
2984 }
2985
2986 int
2987 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2988 {
2989
2990 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
2991 P_SHOULDSTOP(td->td_proc),
2992 ("not suspended thread %p", td));
2993 npxgetregs(td);
2994 if (cpu_fxsr)
2995 npx_fill_fpregs_xmm(&get_pcb_user_save_td(td)->sv_xmm,
2996 (struct save87 *)fpregs);
2997 else
2998 bcopy(&get_pcb_user_save_td(td)->sv_87, fpregs,
2999 sizeof(*fpregs));
3000 return (0);
3001 }
3002
3003 int
3004 set_fpregs(struct thread *td, struct fpreg *fpregs)
3005 {
3006
3007 critical_enter();
3008 if (cpu_fxsr)
3009 npx_set_fpregs_xmm((struct save87 *)fpregs,
3010 &get_pcb_user_save_td(td)->sv_xmm);
3011 else
3012 bcopy(fpregs, &get_pcb_user_save_td(td)->sv_87,
3013 sizeof(*fpregs));
3014 npxuserinited(td);
3015 critical_exit();
3016 return (0);
3017 }
3018
3019 /*
3020 * Get machine context.
3021 */
3022 int
3023 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
3024 {
3025 struct trapframe *tp;
3026 struct segment_descriptor *sdp;
3027
3028 tp = td->td_frame;
3029
3030 PROC_LOCK(curthread->td_proc);
3031 mcp->mc_onstack = sigonstack(tp->tf_esp);
3032 PROC_UNLOCK(curthread->td_proc);
3033 mcp->mc_gs = td->td_pcb->pcb_gs;
3034 mcp->mc_fs = tp->tf_fs;
3035 mcp->mc_es = tp->tf_es;
3036 mcp->mc_ds = tp->tf_ds;
3037 mcp->mc_edi = tp->tf_edi;
3038 mcp->mc_esi = tp->tf_esi;
3039 mcp->mc_ebp = tp->tf_ebp;
3040 mcp->mc_isp = tp->tf_isp;
3041 mcp->mc_eflags = tp->tf_eflags;
3042 if (flags & GET_MC_CLEAR_RET) {
3043 mcp->mc_eax = 0;
3044 mcp->mc_edx = 0;
3045 mcp->mc_eflags &= ~PSL_C;
3046 } else {
3047 mcp->mc_eax = tp->tf_eax;
3048 mcp->mc_edx = tp->tf_edx;
3049 }
3050 mcp->mc_ebx = tp->tf_ebx;
3051 mcp->mc_ecx = tp->tf_ecx;
3052 mcp->mc_eip = tp->tf_eip;
3053 mcp->mc_cs = tp->tf_cs;
3054 mcp->mc_esp = tp->tf_esp;
3055 mcp->mc_ss = tp->tf_ss;
3056 mcp->mc_len = sizeof(*mcp);
3057 get_fpcontext(td, mcp, NULL, 0);
3058 sdp = &td->td_pcb->pcb_fsd;
3059 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3060 sdp = &td->td_pcb->pcb_gsd;
3061 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3062 mcp->mc_flags = 0;
3063 mcp->mc_xfpustate = 0;
3064 mcp->mc_xfpustate_len = 0;
3065 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
3066 return (0);
3067 }
3068
3069 /*
3070 * Set machine context.
3071 *
3072 * However, we don't set any but the user modifiable flags, and we won't
3073 * touch the cs selector.
3074 */
3075 int
3076 set_mcontext(struct thread *td, mcontext_t *mcp)
3077 {
3078 struct trapframe *tp;
3079 char *xfpustate;
3080 int eflags, ret;
3081
3082 tp = td->td_frame;
3083 if (mcp->mc_len != sizeof(*mcp) ||
3084 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
3085 return (EINVAL);
3086 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
3087 (tp->tf_eflags & ~PSL_USERCHANGE);
3088 if (mcp->mc_flags & _MC_HASFPXSTATE) {
3089 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
3090 sizeof(union savefpu))
3091 return (EINVAL);
3092 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
3093 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
3094 mcp->mc_xfpustate_len);
3095 if (ret != 0)
3096 return (ret);
3097 } else
3098 xfpustate = NULL;
3099 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
3100 if (ret != 0)
3101 return (ret);
3102 tp->tf_fs = mcp->mc_fs;
3103 tp->tf_es = mcp->mc_es;
3104 tp->tf_ds = mcp->mc_ds;
3105 tp->tf_edi = mcp->mc_edi;
3106 tp->tf_esi = mcp->mc_esi;
3107 tp->tf_ebp = mcp->mc_ebp;
3108 tp->tf_ebx = mcp->mc_ebx;
3109 tp->tf_edx = mcp->mc_edx;
3110 tp->tf_ecx = mcp->mc_ecx;
3111 tp->tf_eax = mcp->mc_eax;
3112 tp->tf_eip = mcp->mc_eip;
3113 tp->tf_eflags = eflags;
3114 tp->tf_esp = mcp->mc_esp;
3115 tp->tf_ss = mcp->mc_ss;
3116 td->td_pcb->pcb_gs = mcp->mc_gs;
3117 return (0);
3118 }
3119
3120 static void
3121 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
3122 size_t xfpusave_len)
3123 {
3124 size_t max_len, len;
3125
3126 mcp->mc_ownedfp = npxgetregs(td);
3127 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
3128 sizeof(mcp->mc_fpstate));
3129 mcp->mc_fpformat = npxformat();
3130 if (!use_xsave || xfpusave_len == 0)
3131 return;
3132 max_len = cpu_max_ext_state_size - sizeof(union savefpu);
3133 len = xfpusave_len;
3134 if (len > max_len) {
3135 len = max_len;
3136 bzero(xfpusave + max_len, len - max_len);
3137 }
3138 mcp->mc_flags |= _MC_HASFPXSTATE;
3139 mcp->mc_xfpustate_len = len;
3140 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
3141 }
3142
3143 static int
3144 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
3145 size_t xfpustate_len)
3146 {
3147 int error;
3148
3149 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
3150 return (0);
3151 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
3152 mcp->mc_fpformat != _MC_FPFMT_XMM)
3153 return (EINVAL);
3154 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
3155 /* We don't care what state is left in the FPU or PCB. */
3156 fpstate_drop(td);
3157 error = 0;
3158 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
3159 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
3160 error = npxsetregs(td, (union savefpu *)&mcp->mc_fpstate,
3161 xfpustate, xfpustate_len);
3162 } else
3163 return (EINVAL);
3164 return (error);
3165 }
3166
3167 static void
3168 fpstate_drop(struct thread *td)
3169 {
3170
3171 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
3172 critical_enter();
3173 if (PCPU_GET(fpcurthread) == td)
3174 npxdrop();
3175 /*
3176 * XXX force a full drop of the npx. The above only drops it if we
3177 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
3178 *
3179 * XXX I don't much like npxgetregs()'s semantics of doing a full
3180 * drop. Dropping only to the pcb matches fnsave's behaviour.
3181 * We only need to drop to !PCB_INITDONE in sendsig(). But
3182 * sendsig() is the only caller of npxgetregs()... perhaps we just
3183 * have too many layers.
3184 */
3185 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
3186 PCB_NPXUSERINITDONE);
3187 critical_exit();
3188 }
3189
3190 int
3191 fill_dbregs(struct thread *td, struct dbreg *dbregs)
3192 {
3193 struct pcb *pcb;
3194
3195 if (td == NULL) {
3196 dbregs->dr[0] = rdr0();
3197 dbregs->dr[1] = rdr1();
3198 dbregs->dr[2] = rdr2();
3199 dbregs->dr[3] = rdr3();
3200 dbregs->dr[4] = rdr4();
3201 dbregs->dr[5] = rdr5();
3202 dbregs->dr[6] = rdr6();
3203 dbregs->dr[7] = rdr7();
3204 } else {
3205 pcb = td->td_pcb;
3206 dbregs->dr[0] = pcb->pcb_dr0;
3207 dbregs->dr[1] = pcb->pcb_dr1;
3208 dbregs->dr[2] = pcb->pcb_dr2;
3209 dbregs->dr[3] = pcb->pcb_dr3;
3210 dbregs->dr[4] = 0;
3211 dbregs->dr[5] = 0;
3212 dbregs->dr[6] = pcb->pcb_dr6;
3213 dbregs->dr[7] = pcb->pcb_dr7;
3214 }
3215 return (0);
3216 }
3217
3218 int
3219 set_dbregs(struct thread *td, struct dbreg *dbregs)
3220 {
3221 struct pcb *pcb;
3222 int i;
3223
3224 if (td == NULL) {
3225 load_dr0(dbregs->dr[0]);
3226 load_dr1(dbregs->dr[1]);
3227 load_dr2(dbregs->dr[2]);
3228 load_dr3(dbregs->dr[3]);
3229 load_dr4(dbregs->dr[4]);
3230 load_dr5(dbregs->dr[5]);
3231 load_dr6(dbregs->dr[6]);
3232 load_dr7(dbregs->dr[7]);
3233 } else {
3234 /*
3235 * Don't let an illegal value for dr7 get set. Specifically,
3236 * check for undefined settings. Setting these bit patterns
3237 * result in undefined behaviour and can lead to an unexpected
3238 * TRCTRAP.
3239 */
3240 for (i = 0; i < 4; i++) {
3241 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
3242 return (EINVAL);
3243 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
3244 return (EINVAL);
3245 }
3246
3247 pcb = td->td_pcb;
3248
3249 /*
3250 * Don't let a process set a breakpoint that is not within the
3251 * process's address space. If a process could do this, it
3252 * could halt the system by setting a breakpoint in the kernel
3253 * (if ddb was enabled). Thus, we need to check to make sure
3254 * that no breakpoints are being enabled for addresses outside
3255 * process's address space.
3256 *
3257 * XXX - what about when the watched area of the user's
3258 * address space is written into from within the kernel
3259 * ... wouldn't that still cause a breakpoint to be generated
3260 * from within kernel mode?
3261 */
3262
3263 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
3264 /* dr0 is enabled */
3265 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
3266 return (EINVAL);
3267 }
3268
3269 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
3270 /* dr1 is enabled */
3271 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
3272 return (EINVAL);
3273 }
3274
3275 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
3276 /* dr2 is enabled */
3277 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
3278 return (EINVAL);
3279 }
3280
3281 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
3282 /* dr3 is enabled */
3283 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
3284 return (EINVAL);
3285 }
3286
3287 pcb->pcb_dr0 = dbregs->dr[0];
3288 pcb->pcb_dr1 = dbregs->dr[1];
3289 pcb->pcb_dr2 = dbregs->dr[2];
3290 pcb->pcb_dr3 = dbregs->dr[3];
3291 pcb->pcb_dr6 = dbregs->dr[6];
3292 pcb->pcb_dr7 = dbregs->dr[7];
3293
3294 pcb->pcb_flags |= PCB_DBREGS;
3295 }
3296
3297 return (0);
3298 }
3299
3300 /*
3301 * Return > 0 if a hardware breakpoint has been hit, and the
3302 * breakpoint was in user space. Return 0, otherwise.
3303 */
3304 int
3305 user_dbreg_trap(void)
3306 {
3307 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
3308 u_int32_t bp; /* breakpoint bits extracted from dr6 */
3309 int nbp; /* number of breakpoints that triggered */
3310 caddr_t addr[4]; /* breakpoint addresses */
3311 int i;
3312
3313 dr7 = rdr7();
3314 if ((dr7 & 0x000000ff) == 0) {
3315 /*
3316 * all GE and LE bits in the dr7 register are zero,
3317 * thus the trap couldn't have been caused by the
3318 * hardware debug registers
3319 */
3320 return 0;
3321 }
3322
3323 nbp = 0;
3324 dr6 = rdr6();
3325 bp = dr6 & 0x0000000f;
3326
3327 if (!bp) {
3328 /*
3329 * None of the breakpoint bits are set meaning this
3330 * trap was not caused by any of the debug registers
3331 */
3332 return 0;
3333 }
3334
3335 /*
3336 * at least one of the breakpoints were hit, check to see
3337 * which ones and if any of them are user space addresses
3338 */
3339
3340 if (bp & 0x01) {
3341 addr[nbp++] = (caddr_t)rdr0();
3342 }
3343 if (bp & 0x02) {
3344 addr[nbp++] = (caddr_t)rdr1();
3345 }
3346 if (bp & 0x04) {
3347 addr[nbp++] = (caddr_t)rdr2();
3348 }
3349 if (bp & 0x08) {
3350 addr[nbp++] = (caddr_t)rdr3();
3351 }
3352
3353 for (i = 0; i < nbp; i++) {
3354 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
3355 /*
3356 * addr[i] is in user space
3357 */
3358 return nbp;
3359 }
3360 }
3361
3362 /*
3363 * None of the breakpoints are in user space.
3364 */
3365 return 0;
3366 }
3367
3368 #ifdef KDB
3369
3370 /*
3371 * Provide inb() and outb() as functions. They are normally only available as
3372 * inline functions, thus cannot be called from the debugger.
3373 */
3374
3375 /* silence compiler warnings */
3376 u_char inb_(u_short);
3377 void outb_(u_short, u_char);
3378
3379 u_char
3380 inb_(u_short port)
3381 {
3382 return inb(port);
3383 }
3384
3385 void
3386 outb_(u_short port, u_char data)
3387 {
3388 outb(port, data);
3389 }
3390
3391 #endif /* KDB */
Cache object: 3d0ac621f79f8a9d4b33fe8d93367c1e
|