1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD: releng/11.0/sys/amd64/amd64/machdep.c 298308 2016-04-19 23:41:46Z pfg $");
43
44 #include "opt_atpic.h"
45 #include "opt_compat.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_isa.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_mp_watchdog.h"
53 #include "opt_perfmon.h"
54 #include "opt_platform.h"
55 #include "opt_sched.h"
56
57 #include <sys/param.h>
58 #include <sys/proc.h>
59 #include <sys/systm.h>
60 #include <sys/bio.h>
61 #include <sys/buf.h>
62 #include <sys/bus.h>
63 #include <sys/callout.h>
64 #include <sys/cons.h>
65 #include <sys/cpu.h>
66 #include <sys/efi.h>
67 #include <sys/eventhandler.h>
68 #include <sys/exec.h>
69 #include <sys/imgact.h>
70 #include <sys/kdb.h>
71 #include <sys/kernel.h>
72 #include <sys/ktr.h>
73 #include <sys/linker.h>
74 #include <sys/lock.h>
75 #include <sys/malloc.h>
76 #include <sys/memrange.h>
77 #include <sys/msgbuf.h>
78 #include <sys/mutex.h>
79 #include <sys/pcpu.h>
80 #include <sys/ptrace.h>
81 #include <sys/reboot.h>
82 #include <sys/rwlock.h>
83 #include <sys/sched.h>
84 #include <sys/signalvar.h>
85 #ifdef SMP
86 #include <sys/smp.h>
87 #endif
88 #include <sys/syscallsubr.h>
89 #include <sys/sysctl.h>
90 #include <sys/sysent.h>
91 #include <sys/sysproto.h>
92 #include <sys/ucontext.h>
93 #include <sys/vmmeter.h>
94
95 #include <vm/vm.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_pager.h>
102 #include <vm/vm_param.h>
103
104 #ifdef DDB
105 #ifndef KDB
106 #error KDB must be enabled in order for DDB to work!
107 #endif
108 #include <ddb/ddb.h>
109 #include <ddb/db_sym.h>
110 #endif
111
112 #include <net/netisr.h>
113
114 #include <machine/clock.h>
115 #include <machine/cpu.h>
116 #include <machine/cputypes.h>
117 #include <machine/intr_machdep.h>
118 #include <x86/mca.h>
119 #include <machine/md_var.h>
120 #include <machine/metadata.h>
121 #include <machine/mp_watchdog.h>
122 #include <machine/pc/bios.h>
123 #include <machine/pcb.h>
124 #include <machine/proc.h>
125 #include <machine/reg.h>
126 #include <machine/sigframe.h>
127 #include <machine/specialreg.h>
128 #ifdef PERFMON
129 #include <machine/perfmon.h>
130 #endif
131 #include <machine/tss.h>
132 #ifdef SMP
133 #include <machine/smp.h>
134 #endif
135 #ifdef FDT
136 #include <x86/fdt.h>
137 #endif
138
139 #ifdef DEV_ATPIC
140 #include <x86/isa/icu.h>
141 #else
142 #include <x86/apicvar.h>
143 #endif
144
145 #include <isa/isareg.h>
146 #include <isa/rtc.h>
147 #include <x86/init.h>
148
149 /* Sanity check for __curthread() */
150 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
151
152 extern u_int64_t hammer_time(u_int64_t, u_int64_t);
153
154 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
155 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
156
157 static void cpu_startup(void *);
158 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
159 char *xfpusave, size_t xfpusave_len);
160 static int set_fpcontext(struct thread *td, mcontext_t *mcp,
161 char *xfpustate, size_t xfpustate_len);
162 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
163
164 /* Preload data parse function */
165 static caddr_t native_parse_preload_data(u_int64_t);
166
167 /* Native function to fetch and parse the e820 map */
168 static void native_parse_memmap(caddr_t, vm_paddr_t *, int *);
169
170 /* Default init_ops implementation. */
171 struct init_ops init_ops = {
172 .parse_preload_data = native_parse_preload_data,
173 .early_clock_source_init = i8254_init,
174 .early_delay = i8254_delay,
175 .parse_memmap = native_parse_memmap,
176 #ifdef SMP
177 .mp_bootaddress = mp_bootaddress,
178 .start_all_aps = native_start_all_aps,
179 #endif
180 .msi_init = msi_init,
181 };
182
183 /*
184 * The file "conf/ldscript.amd64" defines the symbol "kernphys". Its value is
185 * the physical address at which the kernel is loaded.
186 */
187 extern char kernphys[];
188
189 struct msgbuf *msgbufp;
190
191 /*
192 * Physical address of the EFI System Table. Stashed from the metadata hints
193 * passed into the kernel and used by the EFI code to call runtime services.
194 */
195 vm_paddr_t efi_systbl;
196
197 /* Intel ICH registers */
198 #define ICH_PMBASE 0x400
199 #define ICH_SMI_EN ICH_PMBASE + 0x30
200
201 int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel;
202
203 int cold = 1;
204
205 long Maxmem = 0;
206 long realmem = 0;
207
208 /*
209 * The number of PHYSMAP entries must be one less than the number of
210 * PHYSSEG entries because the PHYSMAP entry that spans the largest
211 * physical address that is accessible by ISA DMA is split into two
212 * PHYSSEG entries.
213 */
214 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
215
216 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
217 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
218
219 /* must be 2 less so 0 0 can signal end of chunks */
220 #define PHYS_AVAIL_ARRAY_END (nitems(phys_avail) - 2)
221 #define DUMP_AVAIL_ARRAY_END (nitems(dump_avail) - 2)
222
223 struct kva_md_info kmi;
224
225 static struct trapframe proc0_tf;
226 struct region_descriptor r_gdt, r_idt;
227
228 struct pcpu __pcpu[MAXCPU];
229
230 struct mtx icu_lock;
231
232 struct mem_range_softc mem_range_softc;
233
234 struct mtx dt_lock; /* lock for GDT and LDT */
235
236 void (*vmm_resume_p)(void);
237
238 static void
239 cpu_startup(dummy)
240 void *dummy;
241 {
242 uintmax_t memsize;
243 char *sysenv;
244
245 /*
246 * On MacBooks, we need to disallow the legacy USB circuit to
247 * generate an SMI# because this can cause several problems,
248 * namely: incorrect CPU frequency detection and failure to
249 * start the APs.
250 * We do this by disabling a bit in the SMI_EN (SMI Control and
251 * Enable register) of the Intel ICH LPC Interface Bridge.
252 */
253 sysenv = kern_getenv("smbios.system.product");
254 if (sysenv != NULL) {
255 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
256 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
257 strncmp(sysenv, "MacBook4,1", 10) == 0 ||
258 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
259 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
260 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
261 strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
262 strncmp(sysenv, "Macmini1,1", 10) == 0) {
263 if (bootverbose)
264 printf("Disabling LEGACY_USB_EN bit on "
265 "Intel ICH.\n");
266 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
267 }
268 freeenv(sysenv);
269 }
270
271 /*
272 * Good {morning,afternoon,evening,night}.
273 */
274 startrtclock();
275 printcpuinfo();
276 panicifcpuunsupported();
277 #ifdef PERFMON
278 perfmon_init();
279 #endif
280
281 /*
282 * Display physical memory if SMBIOS reports reasonable amount.
283 */
284 memsize = 0;
285 sysenv = kern_getenv("smbios.memory.enabled");
286 if (sysenv != NULL) {
287 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
288 freeenv(sysenv);
289 }
290 if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
291 memsize = ptoa((uintmax_t)Maxmem);
292 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
293 realmem = atop(memsize);
294
295 /*
296 * Display any holes after the first chunk of extended memory.
297 */
298 if (bootverbose) {
299 int indx;
300
301 printf("Physical memory chunk(s):\n");
302 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
303 vm_paddr_t size;
304
305 size = phys_avail[indx + 1] - phys_avail[indx];
306 printf(
307 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
308 (uintmax_t)phys_avail[indx],
309 (uintmax_t)phys_avail[indx + 1] - 1,
310 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
311 }
312 }
313
314 vm_ksubmap_init(&kmi);
315
316 printf("avail memory = %ju (%ju MB)\n",
317 ptoa((uintmax_t)vm_cnt.v_free_count),
318 ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
319
320 /*
321 * Set up buffers, so they can be used to read disk labels.
322 */
323 bufinit();
324 vm_pager_bufferinit();
325
326 cpu_setregs();
327 }
328
329 /*
330 * Send an interrupt to process.
331 *
332 * Stack is set up to allow sigcode stored
333 * at top to call routine, followed by call
334 * to sigreturn routine below. After sigreturn
335 * resets the signal mask, the stack, and the
336 * frame pointer, it returns to the user
337 * specified pc, psl.
338 */
339 void
340 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
341 {
342 struct sigframe sf, *sfp;
343 struct pcb *pcb;
344 struct proc *p;
345 struct thread *td;
346 struct sigacts *psp;
347 char *sp;
348 struct trapframe *regs;
349 char *xfpusave;
350 size_t xfpusave_len;
351 int sig;
352 int oonstack;
353
354 td = curthread;
355 pcb = td->td_pcb;
356 p = td->td_proc;
357 PROC_LOCK_ASSERT(p, MA_OWNED);
358 sig = ksi->ksi_signo;
359 psp = p->p_sigacts;
360 mtx_assert(&psp->ps_mtx, MA_OWNED);
361 regs = td->td_frame;
362 oonstack = sigonstack(regs->tf_rsp);
363
364 if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) {
365 xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu);
366 xfpusave = __builtin_alloca(xfpusave_len);
367 } else {
368 xfpusave_len = 0;
369 xfpusave = NULL;
370 }
371
372 /* Save user context. */
373 bzero(&sf, sizeof(sf));
374 sf.sf_uc.uc_sigmask = *mask;
375 sf.sf_uc.uc_stack = td->td_sigstk;
376 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
377 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
378 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
379 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
380 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
381 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
382 fpstate_drop(td);
383 sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
384 sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
385 bzero(sf.sf_uc.uc_mcontext.mc_spare,
386 sizeof(sf.sf_uc.uc_mcontext.mc_spare));
387 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
388
389 /* Allocate space for the signal handler context. */
390 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
391 SIGISMEMBER(psp->ps_sigonstack, sig)) {
392 sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
393 #if defined(COMPAT_43)
394 td->td_sigstk.ss_flags |= SS_ONSTACK;
395 #endif
396 } else
397 sp = (char *)regs->tf_rsp - 128;
398 if (xfpusave != NULL) {
399 sp -= xfpusave_len;
400 sp = (char *)((unsigned long)sp & ~0x3Ful);
401 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
402 }
403 sp -= sizeof(struct sigframe);
404 /* Align to 16 bytes. */
405 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
406
407 /* Build the argument list for the signal handler. */
408 regs->tf_rdi = sig; /* arg 1 in %rdi */
409 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */
410 bzero(&sf.sf_si, sizeof(sf.sf_si));
411 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
412 /* Signal handler installed with SA_SIGINFO. */
413 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */
414 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
415
416 /* Fill in POSIX parts */
417 sf.sf_si = ksi->ksi_info;
418 sf.sf_si.si_signo = sig; /* maybe a translated signal */
419 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
420 } else {
421 /* Old FreeBSD-style arguments. */
422 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */
423 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
424 sf.sf_ahu.sf_handler = catcher;
425 }
426 mtx_unlock(&psp->ps_mtx);
427 PROC_UNLOCK(p);
428
429 /*
430 * Copy the sigframe out to the user's stack.
431 */
432 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
433 (xfpusave != NULL && copyout(xfpusave,
434 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
435 != 0)) {
436 #ifdef DEBUG
437 printf("process %ld has trashed its stack\n", (long)p->p_pid);
438 #endif
439 PROC_LOCK(p);
440 sigexit(td, SIGILL);
441 }
442
443 regs->tf_rsp = (long)sfp;
444 regs->tf_rip = p->p_sysent->sv_sigcode_base;
445 regs->tf_rflags &= ~(PSL_T | PSL_D);
446 regs->tf_cs = _ucodesel;
447 regs->tf_ds = _udatasel;
448 regs->tf_ss = _udatasel;
449 regs->tf_es = _udatasel;
450 regs->tf_fs = _ufssel;
451 regs->tf_gs = _ugssel;
452 regs->tf_flags = TF_HASSEGS;
453 set_pcb_flags(pcb, PCB_FULL_IRET);
454 PROC_LOCK(p);
455 mtx_lock(&psp->ps_mtx);
456 }
457
458 /*
459 * System call to cleanup state after a signal
460 * has been taken. Reset signal mask and
461 * stack state from context left by sendsig (above).
462 * Return to previous pc and psl as specified by
463 * context left by sendsig. Check carefully to
464 * make sure that the user has not modified the
465 * state to gain improper privileges.
466 *
467 * MPSAFE
468 */
469 int
470 sys_sigreturn(td, uap)
471 struct thread *td;
472 struct sigreturn_args /* {
473 const struct __ucontext *sigcntxp;
474 } */ *uap;
475 {
476 ucontext_t uc;
477 struct pcb *pcb;
478 struct proc *p;
479 struct trapframe *regs;
480 ucontext_t *ucp;
481 char *xfpustate;
482 size_t xfpustate_len;
483 long rflags;
484 int cs, error, ret;
485 ksiginfo_t ksi;
486
487 pcb = td->td_pcb;
488 p = td->td_proc;
489
490 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
491 if (error != 0) {
492 uprintf("pid %d (%s): sigreturn copyin failed\n",
493 p->p_pid, td->td_name);
494 return (error);
495 }
496 ucp = &uc;
497 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
498 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
499 td->td_name, ucp->uc_mcontext.mc_flags);
500 return (EINVAL);
501 }
502 regs = td->td_frame;
503 rflags = ucp->uc_mcontext.mc_rflags;
504 /*
505 * Don't allow users to change privileged or reserved flags.
506 */
507 if (!EFL_SECURE(rflags, regs->tf_rflags)) {
508 uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid,
509 td->td_name, rflags);
510 return (EINVAL);
511 }
512
513 /*
514 * Don't allow users to load a valid privileged %cs. Let the
515 * hardware check for invalid selectors, excess privilege in
516 * other selectors, invalid %eip's and invalid %esp's.
517 */
518 cs = ucp->uc_mcontext.mc_cs;
519 if (!CS_SECURE(cs)) {
520 uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid,
521 td->td_name, cs);
522 ksiginfo_init_trap(&ksi);
523 ksi.ksi_signo = SIGBUS;
524 ksi.ksi_code = BUS_OBJERR;
525 ksi.ksi_trapno = T_PROTFLT;
526 ksi.ksi_addr = (void *)regs->tf_rip;
527 trapsignal(td, &ksi);
528 return (EINVAL);
529 }
530
531 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
532 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
533 if (xfpustate_len > cpu_max_ext_state_size -
534 sizeof(struct savefpu)) {
535 uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
536 p->p_pid, td->td_name, xfpustate_len);
537 return (EINVAL);
538 }
539 xfpustate = __builtin_alloca(xfpustate_len);
540 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
541 xfpustate, xfpustate_len);
542 if (error != 0) {
543 uprintf(
544 "pid %d (%s): sigreturn copying xfpustate failed\n",
545 p->p_pid, td->td_name);
546 return (error);
547 }
548 } else {
549 xfpustate = NULL;
550 xfpustate_len = 0;
551 }
552 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len);
553 if (ret != 0) {
554 uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
555 p->p_pid, td->td_name, ret);
556 return (ret);
557 }
558 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
559 pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase;
560 pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase;
561
562 #if defined(COMPAT_43)
563 if (ucp->uc_mcontext.mc_onstack & 1)
564 td->td_sigstk.ss_flags |= SS_ONSTACK;
565 else
566 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
567 #endif
568
569 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
570 set_pcb_flags(pcb, PCB_FULL_IRET);
571 return (EJUSTRETURN);
572 }
573
574 #ifdef COMPAT_FREEBSD4
575 int
576 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
577 {
578
579 return sys_sigreturn(td, (struct sigreturn_args *)uap);
580 }
581 #endif
582
583 /*
584 * Reset registers to default values on exec.
585 */
586 void
587 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
588 {
589 struct trapframe *regs = td->td_frame;
590 struct pcb *pcb = td->td_pcb;
591
592 mtx_lock(&dt_lock);
593 if (td->td_proc->p_md.md_ldt != NULL)
594 user_ldt_free(td);
595 else
596 mtx_unlock(&dt_lock);
597
598 pcb->pcb_fsbase = 0;
599 pcb->pcb_gsbase = 0;
600 clear_pcb_flags(pcb, PCB_32BIT);
601 pcb->pcb_initial_fpucw = __INITIAL_FPUCW__;
602 set_pcb_flags(pcb, PCB_FULL_IRET);
603
604 bzero((char *)regs, sizeof(struct trapframe));
605 regs->tf_rip = imgp->entry_addr;
606 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
607 regs->tf_rdi = stack; /* argv */
608 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
609 regs->tf_ss = _udatasel;
610 regs->tf_cs = _ucodesel;
611 regs->tf_ds = _udatasel;
612 regs->tf_es = _udatasel;
613 regs->tf_fs = _ufssel;
614 regs->tf_gs = _ugssel;
615 regs->tf_flags = TF_HASSEGS;
616 td->td_retval[1] = 0;
617
618 /*
619 * Reset the hardware debug registers if they were in use.
620 * They won't have any meaning for the newly exec'd process.
621 */
622 if (pcb->pcb_flags & PCB_DBREGS) {
623 pcb->pcb_dr0 = 0;
624 pcb->pcb_dr1 = 0;
625 pcb->pcb_dr2 = 0;
626 pcb->pcb_dr3 = 0;
627 pcb->pcb_dr6 = 0;
628 pcb->pcb_dr7 = 0;
629 if (pcb == curpcb) {
630 /*
631 * Clear the debug registers on the running
632 * CPU, otherwise they will end up affecting
633 * the next process we switch to.
634 */
635 reset_dbregs();
636 }
637 clear_pcb_flags(pcb, PCB_DBREGS);
638 }
639
640 /*
641 * Drop the FP state if we hold it, so that the process gets a
642 * clean FP state if it uses the FPU again.
643 */
644 fpstate_drop(td);
645 }
646
647 void
648 cpu_setregs(void)
649 {
650 register_t cr0;
651
652 cr0 = rcr0();
653 /*
654 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
655 * BSP. See the comments there about why we set them.
656 */
657 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
658 load_cr0(cr0);
659 }
660
661 /*
662 * Initialize amd64 and configure to run kernel
663 */
664
665 /*
666 * Initialize segments & interrupt table
667 */
668
669 struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */
670 static struct gate_descriptor idt0[NIDT];
671 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
672
673 static char dblfault_stack[PAGE_SIZE] __aligned(16);
674
675 static char nmi0_stack[PAGE_SIZE] __aligned(16);
676 CTASSERT(sizeof(struct nmi_pcpu) == 16);
677
678 struct amd64tss common_tss[MAXCPU];
679
680 /*
681 * Software prototypes -- in more palatable form.
682 *
683 * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same
684 * slots as corresponding segments for i386 kernel.
685 */
686 struct soft_segment_descriptor gdt_segs[] = {
687 /* GNULL_SEL 0 Null Descriptor */
688 { .ssd_base = 0x0,
689 .ssd_limit = 0x0,
690 .ssd_type = 0,
691 .ssd_dpl = 0,
692 .ssd_p = 0,
693 .ssd_long = 0,
694 .ssd_def32 = 0,
695 .ssd_gran = 0 },
696 /* GNULL2_SEL 1 Null Descriptor */
697 { .ssd_base = 0x0,
698 .ssd_limit = 0x0,
699 .ssd_type = 0,
700 .ssd_dpl = 0,
701 .ssd_p = 0,
702 .ssd_long = 0,
703 .ssd_def32 = 0,
704 .ssd_gran = 0 },
705 /* GUFS32_SEL 2 32 bit %gs Descriptor for user */
706 { .ssd_base = 0x0,
707 .ssd_limit = 0xfffff,
708 .ssd_type = SDT_MEMRWA,
709 .ssd_dpl = SEL_UPL,
710 .ssd_p = 1,
711 .ssd_long = 0,
712 .ssd_def32 = 1,
713 .ssd_gran = 1 },
714 /* GUGS32_SEL 3 32 bit %fs Descriptor for user */
715 { .ssd_base = 0x0,
716 .ssd_limit = 0xfffff,
717 .ssd_type = SDT_MEMRWA,
718 .ssd_dpl = SEL_UPL,
719 .ssd_p = 1,
720 .ssd_long = 0,
721 .ssd_def32 = 1,
722 .ssd_gran = 1 },
723 /* GCODE_SEL 4 Code Descriptor for kernel */
724 { .ssd_base = 0x0,
725 .ssd_limit = 0xfffff,
726 .ssd_type = SDT_MEMERA,
727 .ssd_dpl = SEL_KPL,
728 .ssd_p = 1,
729 .ssd_long = 1,
730 .ssd_def32 = 0,
731 .ssd_gran = 1 },
732 /* GDATA_SEL 5 Data Descriptor for kernel */
733 { .ssd_base = 0x0,
734 .ssd_limit = 0xfffff,
735 .ssd_type = SDT_MEMRWA,
736 .ssd_dpl = SEL_KPL,
737 .ssd_p = 1,
738 .ssd_long = 1,
739 .ssd_def32 = 0,
740 .ssd_gran = 1 },
741 /* GUCODE32_SEL 6 32 bit Code Descriptor for user */
742 { .ssd_base = 0x0,
743 .ssd_limit = 0xfffff,
744 .ssd_type = SDT_MEMERA,
745 .ssd_dpl = SEL_UPL,
746 .ssd_p = 1,
747 .ssd_long = 0,
748 .ssd_def32 = 1,
749 .ssd_gran = 1 },
750 /* GUDATA_SEL 7 32/64 bit Data Descriptor for user */
751 { .ssd_base = 0x0,
752 .ssd_limit = 0xfffff,
753 .ssd_type = SDT_MEMRWA,
754 .ssd_dpl = SEL_UPL,
755 .ssd_p = 1,
756 .ssd_long = 0,
757 .ssd_def32 = 1,
758 .ssd_gran = 1 },
759 /* GUCODE_SEL 8 64 bit Code Descriptor for user */
760 { .ssd_base = 0x0,
761 .ssd_limit = 0xfffff,
762 .ssd_type = SDT_MEMERA,
763 .ssd_dpl = SEL_UPL,
764 .ssd_p = 1,
765 .ssd_long = 1,
766 .ssd_def32 = 0,
767 .ssd_gran = 1 },
768 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
769 { .ssd_base = 0x0,
770 .ssd_limit = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE - 1,
771 .ssd_type = SDT_SYSTSS,
772 .ssd_dpl = SEL_KPL,
773 .ssd_p = 1,
774 .ssd_long = 0,
775 .ssd_def32 = 0,
776 .ssd_gran = 0 },
777 /* Actually, the TSS is a system descriptor which is double size */
778 { .ssd_base = 0x0,
779 .ssd_limit = 0x0,
780 .ssd_type = 0,
781 .ssd_dpl = 0,
782 .ssd_p = 0,
783 .ssd_long = 0,
784 .ssd_def32 = 0,
785 .ssd_gran = 0 },
786 /* GUSERLDT_SEL 11 LDT Descriptor */
787 { .ssd_base = 0x0,
788 .ssd_limit = 0x0,
789 .ssd_type = 0,
790 .ssd_dpl = 0,
791 .ssd_p = 0,
792 .ssd_long = 0,
793 .ssd_def32 = 0,
794 .ssd_gran = 0 },
795 /* GUSERLDT_SEL 12 LDT Descriptor, double size */
796 { .ssd_base = 0x0,
797 .ssd_limit = 0x0,
798 .ssd_type = 0,
799 .ssd_dpl = 0,
800 .ssd_p = 0,
801 .ssd_long = 0,
802 .ssd_def32 = 0,
803 .ssd_gran = 0 },
804 };
805
806 void
807 setidt(int idx, inthand_t *func, int typ, int dpl, int ist)
808 {
809 struct gate_descriptor *ip;
810
811 ip = idt + idx;
812 ip->gd_looffset = (uintptr_t)func;
813 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
814 ip->gd_ist = ist;
815 ip->gd_xx = 0;
816 ip->gd_type = typ;
817 ip->gd_dpl = dpl;
818 ip->gd_p = 1;
819 ip->gd_hioffset = ((uintptr_t)func)>>16 ;
820 }
821
822 extern inthand_t
823 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
824 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
825 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
826 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
827 IDTVEC(xmm), IDTVEC(dblfault),
828 #ifdef KDTRACE_HOOKS
829 IDTVEC(dtrace_ret),
830 #endif
831 #ifdef XENHVM
832 IDTVEC(xen_intr_upcall),
833 #endif
834 IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
835
836 #ifdef DDB
837 /*
838 * Display the index and function name of any IDT entries that don't use
839 * the default 'rsvd' entry point.
840 */
841 DB_SHOW_COMMAND(idt, db_show_idt)
842 {
843 struct gate_descriptor *ip;
844 int idx;
845 uintptr_t func;
846
847 ip = idt;
848 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
849 func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
850 if (func != (uintptr_t)&IDTVEC(rsvd)) {
851 db_printf("%3d\t", idx);
852 db_printsym(func, DB_STGY_PROC);
853 db_printf("\n");
854 }
855 ip++;
856 }
857 }
858
859 /* Show privileged registers. */
860 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
861 {
862 struct {
863 uint16_t limit;
864 uint64_t base;
865 } __packed idtr, gdtr;
866 uint16_t ldt, tr;
867
868 __asm __volatile("sidt %0" : "=m" (idtr));
869 db_printf("idtr\t0x%016lx/%04x\n",
870 (u_long)idtr.base, (u_int)idtr.limit);
871 __asm __volatile("sgdt %0" : "=m" (gdtr));
872 db_printf("gdtr\t0x%016lx/%04x\n",
873 (u_long)gdtr.base, (u_int)gdtr.limit);
874 __asm __volatile("sldt %0" : "=r" (ldt));
875 db_printf("ldtr\t0x%04x\n", ldt);
876 __asm __volatile("str %0" : "=r" (tr));
877 db_printf("tr\t0x%04x\n", tr);
878 db_printf("cr0\t0x%016lx\n", rcr0());
879 db_printf("cr2\t0x%016lx\n", rcr2());
880 db_printf("cr3\t0x%016lx\n", rcr3());
881 db_printf("cr4\t0x%016lx\n", rcr4());
882 if (rcr4() & CR4_XSAVE)
883 db_printf("xcr0\t0x%016lx\n", rxcr(0));
884 db_printf("EFER\t0x%016lx\n", rdmsr(MSR_EFER));
885 if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
886 db_printf("FEATURES_CTL\t%016lx\n",
887 rdmsr(MSR_IA32_FEATURE_CONTROL));
888 db_printf("DEBUG_CTL\t0x%016lx\n", rdmsr(MSR_DEBUGCTLMSR));
889 db_printf("PAT\t0x%016lx\n", rdmsr(MSR_PAT));
890 db_printf("GSBASE\t0x%016lx\n", rdmsr(MSR_GSBASE));
891 }
892
893 DB_SHOW_COMMAND(dbregs, db_show_dbregs)
894 {
895
896 db_printf("dr0\t0x%016lx\n", rdr0());
897 db_printf("dr1\t0x%016lx\n", rdr1());
898 db_printf("dr2\t0x%016lx\n", rdr2());
899 db_printf("dr3\t0x%016lx\n", rdr3());
900 db_printf("dr6\t0x%016lx\n", rdr6());
901 db_printf("dr7\t0x%016lx\n", rdr7());
902 }
903 #endif
904
905 void
906 sdtossd(sd, ssd)
907 struct user_segment_descriptor *sd;
908 struct soft_segment_descriptor *ssd;
909 {
910
911 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
912 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
913 ssd->ssd_type = sd->sd_type;
914 ssd->ssd_dpl = sd->sd_dpl;
915 ssd->ssd_p = sd->sd_p;
916 ssd->ssd_long = sd->sd_long;
917 ssd->ssd_def32 = sd->sd_def32;
918 ssd->ssd_gran = sd->sd_gran;
919 }
920
921 void
922 ssdtosd(ssd, sd)
923 struct soft_segment_descriptor *ssd;
924 struct user_segment_descriptor *sd;
925 {
926
927 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
928 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
929 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
930 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
931 sd->sd_type = ssd->ssd_type;
932 sd->sd_dpl = ssd->ssd_dpl;
933 sd->sd_p = ssd->ssd_p;
934 sd->sd_long = ssd->ssd_long;
935 sd->sd_def32 = ssd->ssd_def32;
936 sd->sd_gran = ssd->ssd_gran;
937 }
938
939 void
940 ssdtosyssd(ssd, sd)
941 struct soft_segment_descriptor *ssd;
942 struct system_segment_descriptor *sd;
943 {
944
945 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
946 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
947 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
948 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
949 sd->sd_type = ssd->ssd_type;
950 sd->sd_dpl = ssd->ssd_dpl;
951 sd->sd_p = ssd->ssd_p;
952 sd->sd_gran = ssd->ssd_gran;
953 }
954
955 #if !defined(DEV_ATPIC) && defined(DEV_ISA)
956 #include <isa/isavar.h>
957 #include <isa/isareg.h>
958 /*
959 * Return a bitmap of the current interrupt requests. This is 8259-specific
960 * and is only suitable for use at probe time.
961 * This is only here to pacify sio. It is NOT FATAL if this doesn't work.
962 * It shouldn't be here. There should probably be an APIC centric
963 * implementation in the apic driver code, if at all.
964 */
965 intrmask_t
966 isa_irq_pending(void)
967 {
968 u_char irr1;
969 u_char irr2;
970
971 irr1 = inb(IO_ICU1);
972 irr2 = inb(IO_ICU2);
973 return ((irr2 << 8) | irr1);
974 }
975 #endif
976
977 u_int basemem;
978
979 static int
980 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
981 int *physmap_idxp)
982 {
983 int i, insert_idx, physmap_idx;
984
985 physmap_idx = *physmap_idxp;
986
987 if (length == 0)
988 return (1);
989
990 /*
991 * Find insertion point while checking for overlap. Start off by
992 * assuming the new entry will be added to the end.
993 *
994 * NB: physmap_idx points to the next free slot.
995 */
996 insert_idx = physmap_idx;
997 for (i = 0; i <= physmap_idx; i += 2) {
998 if (base < physmap[i + 1]) {
999 if (base + length <= physmap[i]) {
1000 insert_idx = i;
1001 break;
1002 }
1003 if (boothowto & RB_VERBOSE)
1004 printf(
1005 "Overlapping memory regions, ignoring second region\n");
1006 return (1);
1007 }
1008 }
1009
1010 /* See if we can prepend to the next entry. */
1011 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
1012 physmap[insert_idx] = base;
1013 return (1);
1014 }
1015
1016 /* See if we can append to the previous entry. */
1017 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
1018 physmap[insert_idx - 1] += length;
1019 return (1);
1020 }
1021
1022 physmap_idx += 2;
1023 *physmap_idxp = physmap_idx;
1024 if (physmap_idx == PHYSMAP_SIZE) {
1025 printf(
1026 "Too many segments in the physical address map, giving up\n");
1027 return (0);
1028 }
1029
1030 /*
1031 * Move the last 'N' entries down to make room for the new
1032 * entry if needed.
1033 */
1034 for (i = (physmap_idx - 2); i > insert_idx; i -= 2) {
1035 physmap[i] = physmap[i - 2];
1036 physmap[i + 1] = physmap[i - 1];
1037 }
1038
1039 /* Insert the new entry. */
1040 physmap[insert_idx] = base;
1041 physmap[insert_idx + 1] = base + length;
1042 return (1);
1043 }
1044
1045 void
1046 bios_add_smap_entries(struct bios_smap *smapbase, u_int32_t smapsize,
1047 vm_paddr_t *physmap, int *physmap_idx)
1048 {
1049 struct bios_smap *smap, *smapend;
1050
1051 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1052
1053 for (smap = smapbase; smap < smapend; smap++) {
1054 if (boothowto & RB_VERBOSE)
1055 printf("SMAP type=%02x base=%016lx len=%016lx\n",
1056 smap->type, smap->base, smap->length);
1057
1058 if (smap->type != SMAP_TYPE_MEMORY)
1059 continue;
1060
1061 if (!add_physmap_entry(smap->base, smap->length, physmap,
1062 physmap_idx))
1063 break;
1064 }
1065 }
1066
1067 #define efi_next_descriptor(ptr, size) \
1068 ((struct efi_md *)(((uint8_t *) ptr) + size))
1069
1070 static void
1071 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
1072 int *physmap_idx)
1073 {
1074 struct efi_md *map, *p;
1075 const char *type;
1076 size_t efisz;
1077 int ndesc, i;
1078
1079 static const char *types[] = {
1080 "Reserved",
1081 "LoaderCode",
1082 "LoaderData",
1083 "BootServicesCode",
1084 "BootServicesData",
1085 "RuntimeServicesCode",
1086 "RuntimeServicesData",
1087 "ConventionalMemory",
1088 "UnusableMemory",
1089 "ACPIReclaimMemory",
1090 "ACPIMemoryNVS",
1091 "MemoryMappedIO",
1092 "MemoryMappedIOPortSpace",
1093 "PalCode"
1094 };
1095
1096 /*
1097 * Memory map data provided by UEFI via the GetMemoryMap
1098 * Boot Services API.
1099 */
1100 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
1101 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1102
1103 if (efihdr->descriptor_size == 0)
1104 return;
1105 ndesc = efihdr->memory_size / efihdr->descriptor_size;
1106
1107 if (boothowto & RB_VERBOSE)
1108 printf("%23s %12s %12s %8s %4s\n",
1109 "Type", "Physical", "Virtual", "#Pages", "Attr");
1110
1111 for (i = 0, p = map; i < ndesc; i++,
1112 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1113 if (boothowto & RB_VERBOSE) {
1114 if (p->md_type <= EFI_MD_TYPE_PALCODE)
1115 type = types[p->md_type];
1116 else
1117 type = "<INVALID>";
1118 printf("%23s %012lx %12p %08lx ", type, p->md_phys,
1119 p->md_virt, p->md_pages);
1120 if (p->md_attr & EFI_MD_ATTR_UC)
1121 printf("UC ");
1122 if (p->md_attr & EFI_MD_ATTR_WC)
1123 printf("WC ");
1124 if (p->md_attr & EFI_MD_ATTR_WT)
1125 printf("WT ");
1126 if (p->md_attr & EFI_MD_ATTR_WB)
1127 printf("WB ");
1128 if (p->md_attr & EFI_MD_ATTR_UCE)
1129 printf("UCE ");
1130 if (p->md_attr & EFI_MD_ATTR_WP)
1131 printf("WP ");
1132 if (p->md_attr & EFI_MD_ATTR_RP)
1133 printf("RP ");
1134 if (p->md_attr & EFI_MD_ATTR_XP)
1135 printf("XP ");
1136 if (p->md_attr & EFI_MD_ATTR_RT)
1137 printf("RUNTIME");
1138 printf("\n");
1139 }
1140
1141 switch (p->md_type) {
1142 case EFI_MD_TYPE_CODE:
1143 case EFI_MD_TYPE_DATA:
1144 case EFI_MD_TYPE_BS_CODE:
1145 case EFI_MD_TYPE_BS_DATA:
1146 case EFI_MD_TYPE_FREE:
1147 /*
1148 * We're allowed to use any entry with these types.
1149 */
1150 break;
1151 default:
1152 continue;
1153 }
1154
1155 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
1156 physmap, physmap_idx))
1157 break;
1158 }
1159 }
1160
1161 static char bootmethod[16] = "";
1162 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
1163 "System firmware boot method");
1164
1165 static void
1166 native_parse_memmap(caddr_t kmdp, vm_paddr_t *physmap, int *physmap_idx)
1167 {
1168 struct bios_smap *smap;
1169 struct efi_map_header *efihdr;
1170 u_int32_t size;
1171
1172 /*
1173 * Memory map from INT 15:E820.
1174 *
1175 * subr_module.c says:
1176 * "Consumer may safely assume that size value precedes data."
1177 * ie: an int32_t immediately precedes smap.
1178 */
1179
1180 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1181 MODINFO_METADATA | MODINFOMD_EFI_MAP);
1182 smap = (struct bios_smap *)preload_search_info(kmdp,
1183 MODINFO_METADATA | MODINFOMD_SMAP);
1184 if (efihdr == NULL && smap == NULL)
1185 panic("No BIOS smap or EFI map info from loader!");
1186
1187 if (efihdr != NULL) {
1188 add_efi_map_entries(efihdr, physmap, physmap_idx);
1189 strlcpy(bootmethod, "UEFI", sizeof(bootmethod));
1190 } else {
1191 size = *((u_int32_t *)smap - 1);
1192 bios_add_smap_entries(smap, size, physmap, physmap_idx);
1193 strlcpy(bootmethod, "BIOS", sizeof(bootmethod));
1194 }
1195 }
1196
1197 #define PAGES_PER_GB (1024 * 1024 * 1024 / PAGE_SIZE)
1198
1199 /*
1200 * Populate the (physmap) array with base/bound pairs describing the
1201 * available physical memory in the system, then test this memory and
1202 * build the phys_avail array describing the actually-available memory.
1203 *
1204 * Total memory size may be set by the kernel environment variable
1205 * hw.physmem or the compile-time define MAXMEM.
1206 *
1207 * XXX first should be vm_paddr_t.
1208 */
1209 static void
1210 getmemsize(caddr_t kmdp, u_int64_t first)
1211 {
1212 int i, physmap_idx, pa_indx, da_indx;
1213 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1214 u_long physmem_start, physmem_tunable, memtest;
1215 pt_entry_t *pte;
1216 quad_t dcons_addr, dcons_size;
1217 int page_counter;
1218
1219 bzero(physmap, sizeof(physmap));
1220 physmap_idx = 0;
1221
1222 init_ops.parse_memmap(kmdp, physmap, &physmap_idx);
1223 physmap_idx -= 2;
1224
1225 /*
1226 * Find the 'base memory' segment for SMP
1227 */
1228 basemem = 0;
1229 for (i = 0; i <= physmap_idx; i += 2) {
1230 if (physmap[i] <= 0xA0000) {
1231 basemem = physmap[i + 1] / 1024;
1232 break;
1233 }
1234 }
1235 if (basemem == 0 || basemem > 640) {
1236 if (bootverbose)
1237 printf(
1238 "Memory map doesn't contain a basemem segment, faking it");
1239 basemem = 640;
1240 }
1241
1242 /*
1243 * Make hole for "AP -> long mode" bootstrap code. The
1244 * mp_bootaddress vector is only available when the kernel
1245 * is configured to support APs and APs for the system start
1246 * in 32bit mode (e.g. SMP bare metal).
1247 */
1248 if (init_ops.mp_bootaddress) {
1249 if (physmap[1] >= 0x100000000)
1250 panic(
1251 "Basemem segment is not suitable for AP bootstrap code!");
1252 physmap[1] = init_ops.mp_bootaddress(physmap[1] / 1024);
1253 }
1254
1255 /*
1256 * Maxmem isn't the "maximum memory", it's one larger than the
1257 * highest page of the physical address space. It should be
1258 * called something like "Maxphyspage". We may adjust this
1259 * based on ``hw.physmem'' and the results of the memory test.
1260 */
1261 Maxmem = atop(physmap[physmap_idx + 1]);
1262
1263 #ifdef MAXMEM
1264 Maxmem = MAXMEM / 4;
1265 #endif
1266
1267 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1268 Maxmem = atop(physmem_tunable);
1269
1270 /*
1271 * The boot memory test is disabled by default, as it takes a
1272 * significant amount of time on large-memory systems, and is
1273 * unfriendly to virtual machines as it unnecessarily touches all
1274 * pages.
1275 *
1276 * A general name is used as the code may be extended to support
1277 * additional tests beyond the current "page present" test.
1278 */
1279 memtest = 0;
1280 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1281
1282 /*
1283 * Don't allow MAXMEM or hw.physmem to extend the amount of memory
1284 * in the system.
1285 */
1286 if (Maxmem > atop(physmap[physmap_idx + 1]))
1287 Maxmem = atop(physmap[physmap_idx + 1]);
1288
1289 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1290 (boothowto & RB_VERBOSE))
1291 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1292
1293 /* call pmap initialization to make new kernel address space */
1294 pmap_bootstrap(&first);
1295
1296 /*
1297 * Size up each available chunk of physical memory.
1298 *
1299 * XXX Some BIOSes corrupt low 64KB between suspend and resume.
1300 * By default, mask off the first 16 pages unless we appear to be
1301 * running in a VM.
1302 */
1303 physmem_start = (vm_guest > VM_GUEST_NO ? 1 : 16) << PAGE_SHIFT;
1304 TUNABLE_ULONG_FETCH("hw.physmem.start", &physmem_start);
1305 if (physmap[0] < physmem_start) {
1306 if (physmem_start < PAGE_SIZE)
1307 physmap[0] = PAGE_SIZE;
1308 else if (physmem_start >= physmap[1])
1309 physmap[0] = round_page(physmap[1] - PAGE_SIZE);
1310 else
1311 physmap[0] = round_page(physmem_start);
1312 }
1313 pa_indx = 0;
1314 da_indx = 1;
1315 phys_avail[pa_indx++] = physmap[0];
1316 phys_avail[pa_indx] = physmap[0];
1317 dump_avail[da_indx] = physmap[0];
1318 pte = CMAP1;
1319
1320 /*
1321 * Get dcons buffer address
1322 */
1323 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1324 getenv_quad("dcons.size", &dcons_size) == 0)
1325 dcons_addr = 0;
1326
1327 /*
1328 * physmap is in bytes, so when converting to page boundaries,
1329 * round up the start address and round down the end address.
1330 */
1331 page_counter = 0;
1332 if (memtest != 0)
1333 printf("Testing system memory");
1334 for (i = 0; i <= physmap_idx; i += 2) {
1335 vm_paddr_t end;
1336
1337 end = ptoa((vm_paddr_t)Maxmem);
1338 if (physmap[i + 1] < end)
1339 end = trunc_page(physmap[i + 1]);
1340 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1341 int tmp, page_bad, full;
1342 int *ptr = (int *)CADDR1;
1343
1344 full = FALSE;
1345 /*
1346 * block out kernel memory as not available.
1347 */
1348 if (pa >= (vm_paddr_t)kernphys && pa < first)
1349 goto do_dump_avail;
1350
1351 /*
1352 * block out dcons buffer
1353 */
1354 if (dcons_addr > 0
1355 && pa >= trunc_page(dcons_addr)
1356 && pa < dcons_addr + dcons_size)
1357 goto do_dump_avail;
1358
1359 page_bad = FALSE;
1360 if (memtest == 0)
1361 goto skip_memtest;
1362
1363 /*
1364 * Print a "." every GB to show we're making
1365 * progress.
1366 */
1367 page_counter++;
1368 if ((page_counter % PAGES_PER_GB) == 0)
1369 printf(".");
1370
1371 /*
1372 * map page into kernel: valid, read/write,non-cacheable
1373 */
1374 *pte = pa | PG_V | PG_RW | PG_NC_PWT | PG_NC_PCD;
1375 invltlb();
1376
1377 tmp = *(int *)ptr;
1378 /*
1379 * Test for alternating 1's and 0's
1380 */
1381 *(volatile int *)ptr = 0xaaaaaaaa;
1382 if (*(volatile int *)ptr != 0xaaaaaaaa)
1383 page_bad = TRUE;
1384 /*
1385 * Test for alternating 0's and 1's
1386 */
1387 *(volatile int *)ptr = 0x55555555;
1388 if (*(volatile int *)ptr != 0x55555555)
1389 page_bad = TRUE;
1390 /*
1391 * Test for all 1's
1392 */
1393 *(volatile int *)ptr = 0xffffffff;
1394 if (*(volatile int *)ptr != 0xffffffff)
1395 page_bad = TRUE;
1396 /*
1397 * Test for all 0's
1398 */
1399 *(volatile int *)ptr = 0x0;
1400 if (*(volatile int *)ptr != 0x0)
1401 page_bad = TRUE;
1402 /*
1403 * Restore original value.
1404 */
1405 *(int *)ptr = tmp;
1406
1407 skip_memtest:
1408 /*
1409 * Adjust array of valid/good pages.
1410 */
1411 if (page_bad == TRUE)
1412 continue;
1413 /*
1414 * If this good page is a continuation of the
1415 * previous set of good pages, then just increase
1416 * the end pointer. Otherwise start a new chunk.
1417 * Note that "end" points one higher than end,
1418 * making the range >= start and < end.
1419 * If we're also doing a speculative memory
1420 * test and we at or past the end, bump up Maxmem
1421 * so that we keep going. The first bad page
1422 * will terminate the loop.
1423 */
1424 if (phys_avail[pa_indx] == pa) {
1425 phys_avail[pa_indx] += PAGE_SIZE;
1426 } else {
1427 pa_indx++;
1428 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1429 printf(
1430 "Too many holes in the physical address space, giving up\n");
1431 pa_indx--;
1432 full = TRUE;
1433 goto do_dump_avail;
1434 }
1435 phys_avail[pa_indx++] = pa; /* start */
1436 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1437 }
1438 physmem++;
1439 do_dump_avail:
1440 if (dump_avail[da_indx] == pa) {
1441 dump_avail[da_indx] += PAGE_SIZE;
1442 } else {
1443 da_indx++;
1444 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1445 da_indx--;
1446 goto do_next;
1447 }
1448 dump_avail[da_indx++] = pa; /* start */
1449 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1450 }
1451 do_next:
1452 if (full)
1453 break;
1454 }
1455 }
1456 *pte = 0;
1457 invltlb();
1458 if (memtest != 0)
1459 printf("\n");
1460
1461 /*
1462 * XXX
1463 * The last chunk must contain at least one page plus the message
1464 * buffer to avoid complicating other code (message buffer address
1465 * calculation, etc.).
1466 */
1467 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1468 round_page(msgbufsize) >= phys_avail[pa_indx]) {
1469 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1470 phys_avail[pa_indx--] = 0;
1471 phys_avail[pa_indx--] = 0;
1472 }
1473
1474 Maxmem = atop(phys_avail[pa_indx]);
1475
1476 /* Trim off space for the message buffer. */
1477 phys_avail[pa_indx] -= round_page(msgbufsize);
1478
1479 /* Map the message buffer. */
1480 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]);
1481 }
1482
1483 static caddr_t
1484 native_parse_preload_data(u_int64_t modulep)
1485 {
1486 caddr_t kmdp;
1487 char *envp;
1488 #ifdef DDB
1489 vm_offset_t ksym_start;
1490 vm_offset_t ksym_end;
1491 #endif
1492
1493 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
1494 preload_bootstrap_relocate(KERNBASE);
1495 kmdp = preload_search_by_type("elf kernel");
1496 if (kmdp == NULL)
1497 kmdp = preload_search_by_type("elf64 kernel");
1498 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1499 envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
1500 if (envp != NULL)
1501 envp += KERNBASE;
1502 init_static_kenv(envp, 0);
1503 #ifdef DDB
1504 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1505 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1506 db_fetch_ksymtab(ksym_start, ksym_end);
1507 #endif
1508 efi_systbl = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1509
1510 return (kmdp);
1511 }
1512
1513 u_int64_t
1514 hammer_time(u_int64_t modulep, u_int64_t physfree)
1515 {
1516 caddr_t kmdp;
1517 int gsel_tss, x;
1518 struct pcpu *pc;
1519 struct nmi_pcpu *np;
1520 struct xstate_hdr *xhdr;
1521 u_int64_t msr;
1522 char *env;
1523 size_t kstack0_sz;
1524
1525 /*
1526 * This may be done better later if it gets more high level
1527 * components in it. If so just link td->td_proc here.
1528 */
1529 proc_linkup0(&proc0, &thread0);
1530
1531 kmdp = init_ops.parse_preload_data(modulep);
1532
1533 /* Init basic tunables, hz etc */
1534 init_param1();
1535
1536 thread0.td_kstack = physfree + KERNBASE;
1537 thread0.td_kstack_pages = kstack_pages;
1538 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
1539 bzero((void *)thread0.td_kstack, kstack0_sz);
1540 physfree += kstack0_sz;
1541
1542 /*
1543 * make gdt memory segments
1544 */
1545 for (x = 0; x < NGDT; x++) {
1546 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
1547 x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1)
1548 ssdtosd(&gdt_segs[x], &gdt[x]);
1549 }
1550 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0];
1551 ssdtosyssd(&gdt_segs[GPROC0_SEL],
1552 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1553
1554 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1555 r_gdt.rd_base = (long) gdt;
1556 lgdt(&r_gdt);
1557 pc = &__pcpu[0];
1558
1559 wrmsr(MSR_FSBASE, 0); /* User value */
1560 wrmsr(MSR_GSBASE, (u_int64_t)pc);
1561 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */
1562
1563 pcpu_init(pc, 0, sizeof(struct pcpu));
1564 dpcpu_init((void *)(physfree + KERNBASE), 0);
1565 physfree += DPCPU_SIZE;
1566 PCPU_SET(prvspace, pc);
1567 PCPU_SET(curthread, &thread0);
1568 PCPU_SET(tssp, &common_tss[0]);
1569 PCPU_SET(commontssp, &common_tss[0]);
1570 PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1571 PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]);
1572 PCPU_SET(fs32p, &gdt[GUFS32_SEL]);
1573 PCPU_SET(gs32p, &gdt[GUGS32_SEL]);
1574
1575 /*
1576 * Initialize mutexes.
1577 *
1578 * icu_lock: in order to allow an interrupt to occur in a critical
1579 * section, to set pcpu->ipending (etc...) properly, we
1580 * must be able to get the icu lock, so it can't be
1581 * under witness.
1582 */
1583 mutex_init();
1584 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1585 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF);
1586
1587 /* exceptions */
1588 for (x = 0; x < NIDT; x++)
1589 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0);
1590 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0);
1591 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0);
1592 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2);
1593 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0);
1594 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0);
1595 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0);
1596 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0);
1597 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0);
1598 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
1599 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0);
1600 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0);
1601 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0);
1602 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0);
1603 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0);
1604 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0);
1605 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0);
1606 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0);
1607 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0);
1608 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0);
1609 #ifdef KDTRACE_HOOKS
1610 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0);
1611 #endif
1612 #ifdef XENHVM
1613 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYSIGT, SEL_UPL, 0);
1614 #endif
1615
1616 r_idt.rd_limit = sizeof(idt0) - 1;
1617 r_idt.rd_base = (long) idt;
1618 lidt(&r_idt);
1619
1620 /*
1621 * Initialize the clock before the console so that console
1622 * initialization can use DELAY().
1623 */
1624 clock_init();
1625
1626 /*
1627 * Use vt(4) by default for UEFI boot (during the sc(4)/vt(4)
1628 * transition).
1629 * Once bootblocks have updated, we can test directly for
1630 * efi_systbl != NULL here...
1631 */
1632 if (preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP)
1633 != NULL)
1634 vty_set_preferred(VTY_VT);
1635
1636 identify_cpu(); /* Final stage of CPU initialization */
1637 initializecpu(); /* Initialize CPU registers */
1638 initializecpucache();
1639
1640 /* doublefault stack space, runs on ist1 */
1641 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
1642
1643 /*
1644 * NMI stack, runs on ist2. The pcpu pointer is stored just
1645 * above the start of the ist2 stack.
1646 */
1647 np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1;
1648 np->np_pcpu = (register_t) pc;
1649 common_tss[0].tss_ist2 = (long) np;
1650
1651 /* Set the IO permission bitmap (empty due to tss seg limit) */
1652 common_tss[0].tss_iobase = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE;
1653
1654 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1655 ltr(gsel_tss);
1656
1657 /* Set up the fast syscall stuff */
1658 msr = rdmsr(MSR_EFER) | EFER_SCE;
1659 wrmsr(MSR_EFER, msr);
1660 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
1661 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
1662 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
1663 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
1664 wrmsr(MSR_STAR, msr);
1665 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
1666
1667 getmemsize(kmdp, physfree);
1668 init_param2(physmem);
1669
1670 /* now running on new page tables, configured,and u/iom is accessible */
1671
1672 cninit();
1673
1674 #ifdef DEV_ISA
1675 #ifdef DEV_ATPIC
1676 elcr_probe();
1677 atpic_startup();
1678 #else
1679 /* Reset and mask the atpics and leave them shut down. */
1680 atpic_reset();
1681
1682 /*
1683 * Point the ICU spurious interrupt vectors at the APIC spurious
1684 * interrupt handler.
1685 */
1686 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1687 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1688 #endif
1689 #else
1690 #error "have you forgotten the isa device?";
1691 #endif
1692
1693 kdb_init();
1694
1695 #ifdef KDB
1696 if (boothowto & RB_KDB)
1697 kdb_enter(KDB_WHY_BOOTFLAGS,
1698 "Boot flags requested debugger");
1699 #endif
1700
1701 msgbufinit(msgbufp, msgbufsize);
1702 fpuinit();
1703
1704 /*
1705 * Set up thread0 pcb after fpuinit calculated pcb + fpu save
1706 * area size. Zero out the extended state header in fpu save
1707 * area.
1708 */
1709 thread0.td_pcb = get_pcb_td(&thread0);
1710 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
1711 if (use_xsave) {
1712 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
1713 1);
1714 xhdr->xstate_bv = xsave_mask;
1715 }
1716 /* make an initial tss so cpu can get interrupt stack on syscall! */
1717 common_tss[0].tss_rsp0 = (vm_offset_t)thread0.td_pcb;
1718 /* Ensure the stack is aligned to 16 bytes */
1719 common_tss[0].tss_rsp0 &= ~0xFul;
1720 PCPU_SET(rsp0, common_tss[0].tss_rsp0);
1721 PCPU_SET(curpcb, thread0.td_pcb);
1722
1723 /* transfer to user mode */
1724
1725 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1726 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1727 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
1728 _ufssel = GSEL(GUFS32_SEL, SEL_UPL);
1729 _ugssel = GSEL(GUGS32_SEL, SEL_UPL);
1730
1731 load_ds(_udatasel);
1732 load_es(_udatasel);
1733 load_fs(_ufssel);
1734
1735 /* setup proc 0's pcb */
1736 thread0.td_pcb->pcb_flags = 0;
1737 thread0.td_frame = &proc0_tf;
1738
1739 env = kern_getenv("kernelname");
1740 if (env != NULL)
1741 strlcpy(kernelname, env, sizeof(kernelname));
1742
1743 cpu_probe_amdc1e();
1744
1745 #ifdef FDT
1746 x86_init_fdt();
1747 #endif
1748
1749 /* Location of kernel stack for locore */
1750 return ((u_int64_t)thread0.td_pcb);
1751 }
1752
1753 void
1754 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1755 {
1756
1757 pcpu->pc_acpi_id = 0xffffffff;
1758 }
1759
1760 static int
1761 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
1762 {
1763 struct bios_smap *smapbase;
1764 struct bios_smap_xattr smap;
1765 caddr_t kmdp;
1766 uint32_t *smapattr;
1767 int count, error, i;
1768
1769 /* Retrieve the system memory map from the loader. */
1770 kmdp = preload_search_by_type("elf kernel");
1771 if (kmdp == NULL)
1772 kmdp = preload_search_by_type("elf64 kernel");
1773 smapbase = (struct bios_smap *)preload_search_info(kmdp,
1774 MODINFO_METADATA | MODINFOMD_SMAP);
1775 if (smapbase == NULL)
1776 return (0);
1777 smapattr = (uint32_t *)preload_search_info(kmdp,
1778 MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
1779 count = *((uint32_t *)smapbase - 1) / sizeof(*smapbase);
1780 error = 0;
1781 for (i = 0; i < count; i++) {
1782 smap.base = smapbase[i].base;
1783 smap.length = smapbase[i].length;
1784 smap.type = smapbase[i].type;
1785 if (smapattr != NULL)
1786 smap.xattr = smapattr[i];
1787 else
1788 smap.xattr = 0;
1789 error = SYSCTL_OUT(req, &smap, sizeof(smap));
1790 }
1791 return (error);
1792 }
1793 SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
1794 smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
1795
1796 static int
1797 efi_map_sysctl_handler(SYSCTL_HANDLER_ARGS)
1798 {
1799 struct efi_map_header *efihdr;
1800 caddr_t kmdp;
1801 uint32_t efisize;
1802
1803 kmdp = preload_search_by_type("elf kernel");
1804 if (kmdp == NULL)
1805 kmdp = preload_search_by_type("elf64 kernel");
1806 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1807 MODINFO_METADATA | MODINFOMD_EFI_MAP);
1808 if (efihdr == NULL)
1809 return (0);
1810 efisize = *((uint32_t *)efihdr - 1);
1811 return (SYSCTL_OUT(req, efihdr, efisize));
1812 }
1813 SYSCTL_PROC(_machdep, OID_AUTO, efi_map, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
1814 efi_map_sysctl_handler, "S,efi_map_header", "Raw EFI Memory Map");
1815
1816 void
1817 spinlock_enter(void)
1818 {
1819 struct thread *td;
1820 register_t flags;
1821
1822 td = curthread;
1823 if (td->td_md.md_spinlock_count == 0) {
1824 flags = intr_disable();
1825 td->td_md.md_spinlock_count = 1;
1826 td->td_md.md_saved_flags = flags;
1827 } else
1828 td->td_md.md_spinlock_count++;
1829 critical_enter();
1830 }
1831
1832 void
1833 spinlock_exit(void)
1834 {
1835 struct thread *td;
1836 register_t flags;
1837
1838 td = curthread;
1839 critical_exit();
1840 flags = td->td_md.md_saved_flags;
1841 td->td_md.md_spinlock_count--;
1842 if (td->td_md.md_spinlock_count == 0)
1843 intr_restore(flags);
1844 }
1845
1846 /*
1847 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1848 * we want to start a backtrace from the function that caused us to enter
1849 * the debugger. We have the context in the trapframe, but base the trace
1850 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1851 * enough for a backtrace.
1852 */
1853 void
1854 makectx(struct trapframe *tf, struct pcb *pcb)
1855 {
1856
1857 pcb->pcb_r12 = tf->tf_r12;
1858 pcb->pcb_r13 = tf->tf_r13;
1859 pcb->pcb_r14 = tf->tf_r14;
1860 pcb->pcb_r15 = tf->tf_r15;
1861 pcb->pcb_rbp = tf->tf_rbp;
1862 pcb->pcb_rbx = tf->tf_rbx;
1863 pcb->pcb_rip = tf->tf_rip;
1864 pcb->pcb_rsp = tf->tf_rsp;
1865 }
1866
1867 int
1868 ptrace_set_pc(struct thread *td, unsigned long addr)
1869 {
1870
1871 td->td_frame->tf_rip = addr;
1872 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
1873 return (0);
1874 }
1875
1876 int
1877 ptrace_single_step(struct thread *td)
1878 {
1879 td->td_frame->tf_rflags |= PSL_T;
1880 return (0);
1881 }
1882
1883 int
1884 ptrace_clear_single_step(struct thread *td)
1885 {
1886 td->td_frame->tf_rflags &= ~PSL_T;
1887 return (0);
1888 }
1889
1890 int
1891 fill_regs(struct thread *td, struct reg *regs)
1892 {
1893 struct trapframe *tp;
1894
1895 tp = td->td_frame;
1896 return (fill_frame_regs(tp, regs));
1897 }
1898
1899 int
1900 fill_frame_regs(struct trapframe *tp, struct reg *regs)
1901 {
1902 regs->r_r15 = tp->tf_r15;
1903 regs->r_r14 = tp->tf_r14;
1904 regs->r_r13 = tp->tf_r13;
1905 regs->r_r12 = tp->tf_r12;
1906 regs->r_r11 = tp->tf_r11;
1907 regs->r_r10 = tp->tf_r10;
1908 regs->r_r9 = tp->tf_r9;
1909 regs->r_r8 = tp->tf_r8;
1910 regs->r_rdi = tp->tf_rdi;
1911 regs->r_rsi = tp->tf_rsi;
1912 regs->r_rbp = tp->tf_rbp;
1913 regs->r_rbx = tp->tf_rbx;
1914 regs->r_rdx = tp->tf_rdx;
1915 regs->r_rcx = tp->tf_rcx;
1916 regs->r_rax = tp->tf_rax;
1917 regs->r_rip = tp->tf_rip;
1918 regs->r_cs = tp->tf_cs;
1919 regs->r_rflags = tp->tf_rflags;
1920 regs->r_rsp = tp->tf_rsp;
1921 regs->r_ss = tp->tf_ss;
1922 if (tp->tf_flags & TF_HASSEGS) {
1923 regs->r_ds = tp->tf_ds;
1924 regs->r_es = tp->tf_es;
1925 regs->r_fs = tp->tf_fs;
1926 regs->r_gs = tp->tf_gs;
1927 } else {
1928 regs->r_ds = 0;
1929 regs->r_es = 0;
1930 regs->r_fs = 0;
1931 regs->r_gs = 0;
1932 }
1933 return (0);
1934 }
1935
1936 int
1937 set_regs(struct thread *td, struct reg *regs)
1938 {
1939 struct trapframe *tp;
1940 register_t rflags;
1941
1942 tp = td->td_frame;
1943 rflags = regs->r_rflags & 0xffffffff;
1944 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
1945 return (EINVAL);
1946 tp->tf_r15 = regs->r_r15;
1947 tp->tf_r14 = regs->r_r14;
1948 tp->tf_r13 = regs->r_r13;
1949 tp->tf_r12 = regs->r_r12;
1950 tp->tf_r11 = regs->r_r11;
1951 tp->tf_r10 = regs->r_r10;
1952 tp->tf_r9 = regs->r_r9;
1953 tp->tf_r8 = regs->r_r8;
1954 tp->tf_rdi = regs->r_rdi;
1955 tp->tf_rsi = regs->r_rsi;
1956 tp->tf_rbp = regs->r_rbp;
1957 tp->tf_rbx = regs->r_rbx;
1958 tp->tf_rdx = regs->r_rdx;
1959 tp->tf_rcx = regs->r_rcx;
1960 tp->tf_rax = regs->r_rax;
1961 tp->tf_rip = regs->r_rip;
1962 tp->tf_cs = regs->r_cs;
1963 tp->tf_rflags = rflags;
1964 tp->tf_rsp = regs->r_rsp;
1965 tp->tf_ss = regs->r_ss;
1966 if (0) { /* XXXKIB */
1967 tp->tf_ds = regs->r_ds;
1968 tp->tf_es = regs->r_es;
1969 tp->tf_fs = regs->r_fs;
1970 tp->tf_gs = regs->r_gs;
1971 tp->tf_flags = TF_HASSEGS;
1972 }
1973 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
1974 return (0);
1975 }
1976
1977 /* XXX check all this stuff! */
1978 /* externalize from sv_xmm */
1979 static void
1980 fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
1981 {
1982 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
1983 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1984 int i;
1985
1986 /* pcb -> fpregs */
1987 bzero(fpregs, sizeof(*fpregs));
1988
1989 /* FPU control/status */
1990 penv_fpreg->en_cw = penv_xmm->en_cw;
1991 penv_fpreg->en_sw = penv_xmm->en_sw;
1992 penv_fpreg->en_tw = penv_xmm->en_tw;
1993 penv_fpreg->en_opcode = penv_xmm->en_opcode;
1994 penv_fpreg->en_rip = penv_xmm->en_rip;
1995 penv_fpreg->en_rdp = penv_xmm->en_rdp;
1996 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
1997 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
1998
1999 /* FPU registers */
2000 for (i = 0; i < 8; ++i)
2001 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
2002
2003 /* SSE registers */
2004 for (i = 0; i < 16; ++i)
2005 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
2006 }
2007
2008 /* internalize from fpregs into sv_xmm */
2009 static void
2010 set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
2011 {
2012 struct envxmm *penv_xmm = &sv_xmm->sv_env;
2013 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
2014 int i;
2015
2016 /* fpregs -> pcb */
2017 /* FPU control/status */
2018 penv_xmm->en_cw = penv_fpreg->en_cw;
2019 penv_xmm->en_sw = penv_fpreg->en_sw;
2020 penv_xmm->en_tw = penv_fpreg->en_tw;
2021 penv_xmm->en_opcode = penv_fpreg->en_opcode;
2022 penv_xmm->en_rip = penv_fpreg->en_rip;
2023 penv_xmm->en_rdp = penv_fpreg->en_rdp;
2024 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
2025 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
2026
2027 /* FPU registers */
2028 for (i = 0; i < 8; ++i)
2029 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
2030
2031 /* SSE registers */
2032 for (i = 0; i < 16; ++i)
2033 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
2034 }
2035
2036 /* externalize from td->pcb */
2037 int
2038 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2039 {
2040
2041 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
2042 P_SHOULDSTOP(td->td_proc),
2043 ("not suspended thread %p", td));
2044 fpugetregs(td);
2045 fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs);
2046 return (0);
2047 }
2048
2049 /* internalize to td->pcb */
2050 int
2051 set_fpregs(struct thread *td, struct fpreg *fpregs)
2052 {
2053
2054 set_fpregs_xmm(fpregs, get_pcb_user_save_td(td));
2055 fpuuserinited(td);
2056 return (0);
2057 }
2058
2059 /*
2060 * Get machine context.
2061 */
2062 int
2063 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2064 {
2065 struct pcb *pcb;
2066 struct trapframe *tp;
2067
2068 pcb = td->td_pcb;
2069 tp = td->td_frame;
2070 PROC_LOCK(curthread->td_proc);
2071 mcp->mc_onstack = sigonstack(tp->tf_rsp);
2072 PROC_UNLOCK(curthread->td_proc);
2073 mcp->mc_r15 = tp->tf_r15;
2074 mcp->mc_r14 = tp->tf_r14;
2075 mcp->mc_r13 = tp->tf_r13;
2076 mcp->mc_r12 = tp->tf_r12;
2077 mcp->mc_r11 = tp->tf_r11;
2078 mcp->mc_r10 = tp->tf_r10;
2079 mcp->mc_r9 = tp->tf_r9;
2080 mcp->mc_r8 = tp->tf_r8;
2081 mcp->mc_rdi = tp->tf_rdi;
2082 mcp->mc_rsi = tp->tf_rsi;
2083 mcp->mc_rbp = tp->tf_rbp;
2084 mcp->mc_rbx = tp->tf_rbx;
2085 mcp->mc_rcx = tp->tf_rcx;
2086 mcp->mc_rflags = tp->tf_rflags;
2087 if (flags & GET_MC_CLEAR_RET) {
2088 mcp->mc_rax = 0;
2089 mcp->mc_rdx = 0;
2090 mcp->mc_rflags &= ~PSL_C;
2091 } else {
2092 mcp->mc_rax = tp->tf_rax;
2093 mcp->mc_rdx = tp->tf_rdx;
2094 }
2095 mcp->mc_rip = tp->tf_rip;
2096 mcp->mc_cs = tp->tf_cs;
2097 mcp->mc_rsp = tp->tf_rsp;
2098 mcp->mc_ss = tp->tf_ss;
2099 mcp->mc_ds = tp->tf_ds;
2100 mcp->mc_es = tp->tf_es;
2101 mcp->mc_fs = tp->tf_fs;
2102 mcp->mc_gs = tp->tf_gs;
2103 mcp->mc_flags = tp->tf_flags;
2104 mcp->mc_len = sizeof(*mcp);
2105 get_fpcontext(td, mcp, NULL, 0);
2106 mcp->mc_fsbase = pcb->pcb_fsbase;
2107 mcp->mc_gsbase = pcb->pcb_gsbase;
2108 mcp->mc_xfpustate = 0;
2109 mcp->mc_xfpustate_len = 0;
2110 bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
2111 return (0);
2112 }
2113
2114 /*
2115 * Set machine context.
2116 *
2117 * However, we don't set any but the user modifiable flags, and we won't
2118 * touch the cs selector.
2119 */
2120 int
2121 set_mcontext(struct thread *td, mcontext_t *mcp)
2122 {
2123 struct pcb *pcb;
2124 struct trapframe *tp;
2125 char *xfpustate;
2126 long rflags;
2127 int ret;
2128
2129 pcb = td->td_pcb;
2130 tp = td->td_frame;
2131 if (mcp->mc_len != sizeof(*mcp) ||
2132 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
2133 return (EINVAL);
2134 rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
2135 (tp->tf_rflags & ~PSL_USERCHANGE);
2136 if (mcp->mc_flags & _MC_HASFPXSTATE) {
2137 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
2138 sizeof(struct savefpu))
2139 return (EINVAL);
2140 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
2141 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
2142 mcp->mc_xfpustate_len);
2143 if (ret != 0)
2144 return (ret);
2145 } else
2146 xfpustate = NULL;
2147 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
2148 if (ret != 0)
2149 return (ret);
2150 tp->tf_r15 = mcp->mc_r15;
2151 tp->tf_r14 = mcp->mc_r14;
2152 tp->tf_r13 = mcp->mc_r13;
2153 tp->tf_r12 = mcp->mc_r12;
2154 tp->tf_r11 = mcp->mc_r11;
2155 tp->tf_r10 = mcp->mc_r10;
2156 tp->tf_r9 = mcp->mc_r9;
2157 tp->tf_r8 = mcp->mc_r8;
2158 tp->tf_rdi = mcp->mc_rdi;
2159 tp->tf_rsi = mcp->mc_rsi;
2160 tp->tf_rbp = mcp->mc_rbp;
2161 tp->tf_rbx = mcp->mc_rbx;
2162 tp->tf_rdx = mcp->mc_rdx;
2163 tp->tf_rcx = mcp->mc_rcx;
2164 tp->tf_rax = mcp->mc_rax;
2165 tp->tf_rip = mcp->mc_rip;
2166 tp->tf_rflags = rflags;
2167 tp->tf_rsp = mcp->mc_rsp;
2168 tp->tf_ss = mcp->mc_ss;
2169 tp->tf_flags = mcp->mc_flags;
2170 if (tp->tf_flags & TF_HASSEGS) {
2171 tp->tf_ds = mcp->mc_ds;
2172 tp->tf_es = mcp->mc_es;
2173 tp->tf_fs = mcp->mc_fs;
2174 tp->tf_gs = mcp->mc_gs;
2175 }
2176 if (mcp->mc_flags & _MC_HASBASES) {
2177 pcb->pcb_fsbase = mcp->mc_fsbase;
2178 pcb->pcb_gsbase = mcp->mc_gsbase;
2179 }
2180 set_pcb_flags(pcb, PCB_FULL_IRET);
2181 return (0);
2182 }
2183
2184 static void
2185 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
2186 size_t xfpusave_len)
2187 {
2188 size_t max_len, len;
2189
2190 mcp->mc_ownedfp = fpugetregs(td);
2191 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
2192 sizeof(mcp->mc_fpstate));
2193 mcp->mc_fpformat = fpuformat();
2194 if (!use_xsave || xfpusave_len == 0)
2195 return;
2196 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
2197 len = xfpusave_len;
2198 if (len > max_len) {
2199 len = max_len;
2200 bzero(xfpusave + max_len, len - max_len);
2201 }
2202 mcp->mc_flags |= _MC_HASFPXSTATE;
2203 mcp->mc_xfpustate_len = len;
2204 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
2205 }
2206
2207 static int
2208 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
2209 size_t xfpustate_len)
2210 {
2211 struct savefpu *fpstate;
2212 int error;
2213
2214 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2215 return (0);
2216 else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
2217 return (EINVAL);
2218 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
2219 /* We don't care what state is left in the FPU or PCB. */
2220 fpstate_drop(td);
2221 error = 0;
2222 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2223 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2224 fpstate = (struct savefpu *)&mcp->mc_fpstate;
2225 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
2226 error = fpusetregs(td, fpstate, xfpustate, xfpustate_len);
2227 } else
2228 return (EINVAL);
2229 return (error);
2230 }
2231
2232 void
2233 fpstate_drop(struct thread *td)
2234 {
2235
2236 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
2237 critical_enter();
2238 if (PCPU_GET(fpcurthread) == td)
2239 fpudrop();
2240 /*
2241 * XXX force a full drop of the fpu. The above only drops it if we
2242 * owned it.
2243 *
2244 * XXX I don't much like fpugetuserregs()'s semantics of doing a full
2245 * drop. Dropping only to the pcb matches fnsave's behaviour.
2246 * We only need to drop to !PCB_INITDONE in sendsig(). But
2247 * sendsig() is the only caller of fpugetuserregs()... perhaps we just
2248 * have too many layers.
2249 */
2250 clear_pcb_flags(curthread->td_pcb,
2251 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
2252 critical_exit();
2253 }
2254
2255 int
2256 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2257 {
2258 struct pcb *pcb;
2259
2260 if (td == NULL) {
2261 dbregs->dr[0] = rdr0();
2262 dbregs->dr[1] = rdr1();
2263 dbregs->dr[2] = rdr2();
2264 dbregs->dr[3] = rdr3();
2265 dbregs->dr[6] = rdr6();
2266 dbregs->dr[7] = rdr7();
2267 } else {
2268 pcb = td->td_pcb;
2269 dbregs->dr[0] = pcb->pcb_dr0;
2270 dbregs->dr[1] = pcb->pcb_dr1;
2271 dbregs->dr[2] = pcb->pcb_dr2;
2272 dbregs->dr[3] = pcb->pcb_dr3;
2273 dbregs->dr[6] = pcb->pcb_dr6;
2274 dbregs->dr[7] = pcb->pcb_dr7;
2275 }
2276 dbregs->dr[4] = 0;
2277 dbregs->dr[5] = 0;
2278 dbregs->dr[8] = 0;
2279 dbregs->dr[9] = 0;
2280 dbregs->dr[10] = 0;
2281 dbregs->dr[11] = 0;
2282 dbregs->dr[12] = 0;
2283 dbregs->dr[13] = 0;
2284 dbregs->dr[14] = 0;
2285 dbregs->dr[15] = 0;
2286 return (0);
2287 }
2288
2289 int
2290 set_dbregs(struct thread *td, struct dbreg *dbregs)
2291 {
2292 struct pcb *pcb;
2293 int i;
2294
2295 if (td == NULL) {
2296 load_dr0(dbregs->dr[0]);
2297 load_dr1(dbregs->dr[1]);
2298 load_dr2(dbregs->dr[2]);
2299 load_dr3(dbregs->dr[3]);
2300 load_dr6(dbregs->dr[6]);
2301 load_dr7(dbregs->dr[7]);
2302 } else {
2303 /*
2304 * Don't let an illegal value for dr7 get set. Specifically,
2305 * check for undefined settings. Setting these bit patterns
2306 * result in undefined behaviour and can lead to an unexpected
2307 * TRCTRAP or a general protection fault right here.
2308 * Upper bits of dr6 and dr7 must not be set
2309 */
2310 for (i = 0; i < 4; i++) {
2311 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2312 return (EINVAL);
2313 if (td->td_frame->tf_cs == _ucode32sel &&
2314 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
2315 return (EINVAL);
2316 }
2317 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
2318 (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
2319 return (EINVAL);
2320
2321 pcb = td->td_pcb;
2322
2323 /*
2324 * Don't let a process set a breakpoint that is not within the
2325 * process's address space. If a process could do this, it
2326 * could halt the system by setting a breakpoint in the kernel
2327 * (if ddb was enabled). Thus, we need to check to make sure
2328 * that no breakpoints are being enabled for addresses outside
2329 * process's address space.
2330 *
2331 * XXX - what about when the watched area of the user's
2332 * address space is written into from within the kernel
2333 * ... wouldn't that still cause a breakpoint to be generated
2334 * from within kernel mode?
2335 */
2336
2337 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2338 /* dr0 is enabled */
2339 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2340 return (EINVAL);
2341 }
2342 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2343 /* dr1 is enabled */
2344 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2345 return (EINVAL);
2346 }
2347 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2348 /* dr2 is enabled */
2349 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2350 return (EINVAL);
2351 }
2352 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2353 /* dr3 is enabled */
2354 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2355 return (EINVAL);
2356 }
2357
2358 pcb->pcb_dr0 = dbregs->dr[0];
2359 pcb->pcb_dr1 = dbregs->dr[1];
2360 pcb->pcb_dr2 = dbregs->dr[2];
2361 pcb->pcb_dr3 = dbregs->dr[3];
2362 pcb->pcb_dr6 = dbregs->dr[6];
2363 pcb->pcb_dr7 = dbregs->dr[7];
2364
2365 set_pcb_flags(pcb, PCB_DBREGS);
2366 }
2367
2368 return (0);
2369 }
2370
2371 void
2372 reset_dbregs(void)
2373 {
2374
2375 load_dr7(0); /* Turn off the control bits first */
2376 load_dr0(0);
2377 load_dr1(0);
2378 load_dr2(0);
2379 load_dr3(0);
2380 load_dr6(0);
2381 }
2382
2383 /*
2384 * Return > 0 if a hardware breakpoint has been hit, and the
2385 * breakpoint was in user space. Return 0, otherwise.
2386 */
2387 int
2388 user_dbreg_trap(void)
2389 {
2390 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */
2391 u_int64_t bp; /* breakpoint bits extracted from dr6 */
2392 int nbp; /* number of breakpoints that triggered */
2393 caddr_t addr[4]; /* breakpoint addresses */
2394 int i;
2395
2396 dr7 = rdr7();
2397 if ((dr7 & 0x000000ff) == 0) {
2398 /*
2399 * all GE and LE bits in the dr7 register are zero,
2400 * thus the trap couldn't have been caused by the
2401 * hardware debug registers
2402 */
2403 return 0;
2404 }
2405
2406 nbp = 0;
2407 dr6 = rdr6();
2408 bp = dr6 & 0x0000000f;
2409
2410 if (!bp) {
2411 /*
2412 * None of the breakpoint bits are set meaning this
2413 * trap was not caused by any of the debug registers
2414 */
2415 return 0;
2416 }
2417
2418 /*
2419 * at least one of the breakpoints were hit, check to see
2420 * which ones and if any of them are user space addresses
2421 */
2422
2423 if (bp & 0x01) {
2424 addr[nbp++] = (caddr_t)rdr0();
2425 }
2426 if (bp & 0x02) {
2427 addr[nbp++] = (caddr_t)rdr1();
2428 }
2429 if (bp & 0x04) {
2430 addr[nbp++] = (caddr_t)rdr2();
2431 }
2432 if (bp & 0x08) {
2433 addr[nbp++] = (caddr_t)rdr3();
2434 }
2435
2436 for (i = 0; i < nbp; i++) {
2437 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2438 /*
2439 * addr[i] is in user space
2440 */
2441 return nbp;
2442 }
2443 }
2444
2445 /*
2446 * None of the breakpoints are in user space.
2447 */
2448 return 0;
2449 }
2450
2451 #ifdef KDB
2452
2453 /*
2454 * Provide inb() and outb() as functions. They are normally only available as
2455 * inline functions, thus cannot be called from the debugger.
2456 */
2457
2458 /* silence compiler warnings */
2459 u_char inb_(u_short);
2460 void outb_(u_short, u_char);
2461
2462 u_char
2463 inb_(u_short port)
2464 {
2465 return inb(port);
2466 }
2467
2468 void
2469 outb_(u_short port, u_char data)
2470 {
2471 outb(port, data);
2472 }
2473
2474 #endif /* KDB */
Cache object: adcf4f6d84cdbff8b178e83b641962cc
|