1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 #include "opt_atalk.h"
45 #include "opt_atpic.h"
46 #include "opt_compat.h"
47 #include "opt_cpu.h"
48 #include "opt_ddb.h"
49 #include "opt_inet.h"
50 #include "opt_ipx.h"
51 #include "opt_isa.h"
52 #include "opt_kstack_pages.h"
53 #include "opt_maxmem.h"
54 #include "opt_perfmon.h"
55 #include "opt_sched.h"
56 #include "opt_kdtrace.h"
57
58 #include <sys/param.h>
59 #include <sys/proc.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/buf.h>
63 #include <sys/bus.h>
64 #include <sys/callout.h>
65 #include <sys/cons.h>
66 #include <sys/cpu.h>
67 #include <sys/eventhandler.h>
68 #include <sys/exec.h>
69 #include <sys/imgact.h>
70 #include <sys/kdb.h>
71 #include <sys/kernel.h>
72 #include <sys/ktr.h>
73 #include <sys/linker.h>
74 #include <sys/lock.h>
75 #include <sys/malloc.h>
76 #include <sys/memrange.h>
77 #include <sys/msgbuf.h>
78 #include <sys/mutex.h>
79 #include <sys/pcpu.h>
80 #include <sys/ptrace.h>
81 #include <sys/reboot.h>
82 #include <sys/sched.h>
83 #include <sys/signalvar.h>
84 #include <sys/syscallsubr.h>
85 #include <sys/sysctl.h>
86 #include <sys/sysent.h>
87 #include <sys/sysproto.h>
88 #include <sys/ucontext.h>
89 #include <sys/vmmeter.h>
90
91 #include <vm/vm.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_param.h>
99
100 #ifdef DDB
101 #ifndef KDB
102 #error KDB must be enabled in order for DDB to work!
103 #endif
104 #include <ddb/ddb.h>
105 #include <ddb/db_sym.h>
106 #endif
107
108 #include <net/netisr.h>
109
110 #include <machine/clock.h>
111 #include <machine/cpu.h>
112 #include <machine/cputypes.h>
113 #include <machine/intr_machdep.h>
114 #include <machine/mca.h>
115 #include <machine/md_var.h>
116 #include <machine/metadata.h>
117 #include <machine/pc/bios.h>
118 #include <machine/pcb.h>
119 #include <machine/proc.h>
120 #include <machine/reg.h>
121 #include <machine/sigframe.h>
122 #include <machine/specialreg.h>
123 #ifdef PERFMON
124 #include <machine/perfmon.h>
125 #endif
126 #include <machine/tss.h>
127 #ifdef SMP
128 #include <machine/smp.h>
129 #endif
130
131 #ifdef DEV_ATPIC
132 #include <amd64/isa/icu.h>
133 #else
134 #include <machine/apicvar.h>
135 #endif
136
137 #include <isa/isareg.h>
138 #include <isa/rtc.h>
139
140 /* Sanity check for __curthread() */
141 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
142
143 extern u_int64_t hammer_time(u_int64_t, u_int64_t);
144
145 extern void printcpuinfo(void); /* XXX header file */
146 extern void identify_cpu(void);
147 extern void panicifcpuunsupported(void);
148
149 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
150 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
151
152 static void cpu_startup(void *);
153 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
154 char *xfpusave, size_t xfpusave_len);
155 static int set_fpcontext(struct thread *td, const mcontext_t *mcp,
156 char *xfpustate, size_t xfpustate_len);
157 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
158
159 #ifdef DDB
160 extern vm_offset_t ksym_start, ksym_end;
161 #endif
162
163 struct msgbuf *msgbufp;
164
165 /* Intel ICH registers */
166 #define ICH_PMBASE 0x400
167 #define ICH_SMI_EN ICH_PMBASE + 0x30
168
169 int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel;
170
171 int cold = 1;
172
173 long Maxmem = 0;
174 long realmem = 0;
175
176 /*
177 * The number of PHYSMAP entries must be one less than the number of
178 * PHYSSEG entries because the PHYSMAP entry that spans the largest
179 * physical address that is accessible by ISA DMA is split into two
180 * PHYSSEG entries.
181 */
182 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
183
184 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
185 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
186
187 /* must be 2 less so 0 0 can signal end of chunks */
188 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
189 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
190
191 struct kva_md_info kmi;
192
193 static struct trapframe proc0_tf;
194 struct region_descriptor r_gdt, r_idt;
195
196 struct pcpu __pcpu[MAXCPU];
197
198 struct mtx icu_lock;
199
200 struct mem_range_softc mem_range_softc;
201
202 struct mtx dt_lock; /* lock for GDT and LDT */
203
204 static void
205 cpu_startup(dummy)
206 void *dummy;
207 {
208 uintmax_t memsize;
209 char *sysenv;
210
211 /*
212 * On MacBooks, we need to disallow the legacy USB circuit to
213 * generate an SMI# because this can cause several problems,
214 * namely: incorrect CPU frequency detection and failure to
215 * start the APs.
216 * We do this by disabling a bit in the SMI_EN (SMI Control and
217 * Enable register) of the Intel ICH LPC Interface Bridge.
218 */
219 sysenv = getenv("smbios.system.product");
220 if (sysenv != NULL) {
221 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
222 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
223 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
224 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
225 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
226 strncmp(sysenv, "Macmini1,1", 10) == 0) {
227 if (bootverbose)
228 printf("Disabling LEGACY_USB_EN bit on "
229 "Intel ICH.\n");
230 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
231 }
232 freeenv(sysenv);
233 }
234
235 /*
236 * Good {morning,afternoon,evening,night}.
237 */
238 startrtclock();
239 printcpuinfo();
240 panicifcpuunsupported();
241 #ifdef PERFMON
242 perfmon_init();
243 #endif
244 realmem = Maxmem;
245
246 /*
247 * Display physical memory if SMBIOS reports reasonable amount.
248 */
249 memsize = 0;
250 sysenv = getenv("smbios.memory.enabled");
251 if (sysenv != NULL) {
252 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
253 freeenv(sysenv);
254 }
255 if (memsize < ptoa((uintmax_t)cnt.v_free_count))
256 memsize = ptoa((uintmax_t)Maxmem);
257 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
258
259 /*
260 * Display any holes after the first chunk of extended memory.
261 */
262 if (bootverbose) {
263 int indx;
264
265 printf("Physical memory chunk(s):\n");
266 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
267 vm_paddr_t size;
268
269 size = phys_avail[indx + 1] - phys_avail[indx];
270 printf(
271 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
272 (uintmax_t)phys_avail[indx],
273 (uintmax_t)phys_avail[indx + 1] - 1,
274 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
275 }
276 }
277
278 vm_ksubmap_init(&kmi);
279
280 printf("avail memory = %ju (%ju MB)\n",
281 ptoa((uintmax_t)cnt.v_free_count),
282 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
283
284 /*
285 * Set up buffers, so they can be used to read disk labels.
286 */
287 bufinit();
288 vm_pager_bufferinit();
289
290 cpu_setregs();
291 }
292
293 /*
294 * Send an interrupt to process.
295 *
296 * Stack is set up to allow sigcode stored
297 * at top to call routine, followed by call
298 * to sigreturn routine below. After sigreturn
299 * resets the signal mask, the stack, and the
300 * frame pointer, it returns to the user
301 * specified pc, psl.
302 */
303 void
304 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
305 {
306 struct sigframe sf, *sfp;
307 struct pcb *pcb;
308 struct proc *p;
309 struct thread *td;
310 struct sigacts *psp;
311 char *sp;
312 struct trapframe *regs;
313 char *xfpusave;
314 size_t xfpusave_len;
315 int sig;
316 int oonstack;
317
318 td = curthread;
319 pcb = td->td_pcb;
320 p = td->td_proc;
321 PROC_LOCK_ASSERT(p, MA_OWNED);
322 sig = ksi->ksi_signo;
323 psp = p->p_sigacts;
324 mtx_assert(&psp->ps_mtx, MA_OWNED);
325 regs = td->td_frame;
326 oonstack = sigonstack(regs->tf_rsp);
327
328 if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) {
329 xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu);
330 xfpusave = __builtin_alloca(xfpusave_len);
331 } else {
332 xfpusave_len = 0;
333 xfpusave = NULL;
334 }
335
336 /* Save user context. */
337 bzero(&sf, sizeof(sf));
338 sf.sf_uc.uc_sigmask = *mask;
339 sf.sf_uc.uc_stack = td->td_sigstk;
340 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
341 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
342 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
343 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
344 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
345 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
346 fpstate_drop(td);
347 sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
348 sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
349 bzero(sf.sf_uc.uc_mcontext.mc_spare,
350 sizeof(sf.sf_uc.uc_mcontext.mc_spare));
351 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
352
353 /* Allocate space for the signal handler context. */
354 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
355 SIGISMEMBER(psp->ps_sigonstack, sig)) {
356 sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
357 #if defined(COMPAT_43)
358 td->td_sigstk.ss_flags |= SS_ONSTACK;
359 #endif
360 } else
361 sp = (char *)regs->tf_rsp - 128;
362 if (xfpusave != NULL) {
363 sp -= xfpusave_len;
364 sp = (char *)((unsigned long)sp & ~0x3Ful);
365 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
366 }
367 sp -= sizeof(struct sigframe);
368 /* Align to 16 bytes. */
369 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
370
371 /* Translate the signal if appropriate. */
372 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
373 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
374
375 /* Build the argument list for the signal handler. */
376 regs->tf_rdi = sig; /* arg 1 in %rdi */
377 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */
378 bzero(&sf.sf_si, sizeof(sf.sf_si));
379 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
380 /* Signal handler installed with SA_SIGINFO. */
381 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */
382 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
383
384 /* Fill in POSIX parts */
385 sf.sf_si = ksi->ksi_info;
386 sf.sf_si.si_signo = sig; /* maybe a translated signal */
387 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
388 } else {
389 /* Old FreeBSD-style arguments. */
390 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */
391 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
392 sf.sf_ahu.sf_handler = catcher;
393 }
394 mtx_unlock(&psp->ps_mtx);
395 PROC_UNLOCK(p);
396
397 /*
398 * Copy the sigframe out to the user's stack.
399 */
400 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
401 (xfpusave != NULL && copyout(xfpusave,
402 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
403 != 0)) {
404 #ifdef DEBUG
405 printf("process %ld has trashed its stack\n", (long)p->p_pid);
406 #endif
407 PROC_LOCK(p);
408 sigexit(td, SIGILL);
409 }
410
411 regs->tf_rsp = (long)sfp;
412 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
413 regs->tf_rflags &= ~(PSL_T | PSL_D);
414 regs->tf_cs = _ucodesel;
415 regs->tf_ds = _udatasel;
416 regs->tf_es = _udatasel;
417 regs->tf_fs = _ufssel;
418 regs->tf_gs = _ugssel;
419 regs->tf_flags = TF_HASSEGS;
420 set_pcb_flags(pcb, PCB_FULL_IRET);
421 PROC_LOCK(p);
422 mtx_lock(&psp->ps_mtx);
423 }
424
425 /*
426 * System call to cleanup state after a signal
427 * has been taken. Reset signal mask and
428 * stack state from context left by sendsig (above).
429 * Return to previous pc and psl as specified by
430 * context left by sendsig. Check carefully to
431 * make sure that the user has not modified the
432 * state to gain improper privileges.
433 *
434 * MPSAFE
435 */
436 int
437 sigreturn(td, uap)
438 struct thread *td;
439 struct sigreturn_args /* {
440 const struct __ucontext *sigcntxp;
441 } */ *uap;
442 {
443 ucontext_t uc;
444 struct pcb *pcb;
445 struct proc *p;
446 struct trapframe *regs;
447 ucontext_t *ucp;
448 char *xfpustate;
449 size_t xfpustate_len;
450 long rflags;
451 int cs, error, ret;
452 ksiginfo_t ksi;
453
454 pcb = td->td_pcb;
455 p = td->td_proc;
456
457 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
458 if (error != 0) {
459 uprintf("pid %d (%s): sigreturn copyin failed\n",
460 p->p_pid, td->td_name);
461 return (error);
462 }
463 ucp = &uc;
464 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
465 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
466 td->td_name, ucp->uc_mcontext.mc_flags);
467 return (EINVAL);
468 }
469 regs = td->td_frame;
470 rflags = ucp->uc_mcontext.mc_rflags;
471 /*
472 * Don't allow users to change privileged or reserved flags.
473 */
474 /*
475 * XXX do allow users to change the privileged flag PSL_RF.
476 * The cpu sets PSL_RF in tf_rflags for faults. Debuggers
477 * should sometimes set it there too. tf_rflags is kept in
478 * the signal context during signal handling and there is no
479 * other place to remember it, so the PSL_RF bit may be
480 * corrupted by the signal handler without us knowing.
481 * Corruption of the PSL_RF bit at worst causes one more or
482 * one less debugger trap, so allowing it is fairly harmless.
483 */
484 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) {
485 uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid,
486 td->td_name, rflags);
487 return (EINVAL);
488 }
489
490 /*
491 * Don't allow users to load a valid privileged %cs. Let the
492 * hardware check for invalid selectors, excess privilege in
493 * other selectors, invalid %eip's and invalid %esp's.
494 */
495 cs = ucp->uc_mcontext.mc_cs;
496 if (!CS_SECURE(cs)) {
497 uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid,
498 td->td_name, cs);
499 ksiginfo_init_trap(&ksi);
500 ksi.ksi_signo = SIGBUS;
501 ksi.ksi_code = BUS_OBJERR;
502 ksi.ksi_trapno = T_PROTFLT;
503 ksi.ksi_addr = (void *)regs->tf_rip;
504 trapsignal(td, &ksi);
505 return (EINVAL);
506 }
507
508 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
509 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
510 if (xfpustate_len > cpu_max_ext_state_size -
511 sizeof(struct savefpu)) {
512 uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
513 p->p_pid, td->td_name, xfpustate_len);
514 return (EINVAL);
515 }
516 xfpustate = __builtin_alloca(xfpustate_len);
517 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
518 xfpustate, xfpustate_len);
519 if (error != 0) {
520 uprintf(
521 "pid %d (%s): sigreturn copying xfpustate failed\n",
522 p->p_pid, td->td_name);
523 return (error);
524 }
525 } else {
526 xfpustate = NULL;
527 xfpustate_len = 0;
528 }
529 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len);
530 if (ret != 0) {
531 uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
532 p->p_pid, td->td_name, ret);
533 return (ret);
534 }
535 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
536 pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase;
537 pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase;
538
539 #if defined(COMPAT_43)
540 if (ucp->uc_mcontext.mc_onstack & 1)
541 td->td_sigstk.ss_flags |= SS_ONSTACK;
542 else
543 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
544 #endif
545
546 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
547 set_pcb_flags(pcb, PCB_FULL_IRET);
548 return (EJUSTRETURN);
549 }
550
551 #ifdef COMPAT_FREEBSD4
552 int
553 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
554 {
555
556 return sigreturn(td, (struct sigreturn_args *)uap);
557 }
558 #endif
559
560
561 /*
562 * Machine dependent boot() routine
563 *
564 * I haven't seen anything to put here yet
565 * Possibly some stuff might be grafted back here from boot()
566 */
567 void
568 cpu_boot(int howto)
569 {
570 }
571
572 /*
573 * Flush the D-cache for non-DMA I/O so that the I-cache can
574 * be made coherent later.
575 */
576 void
577 cpu_flush_dcache(void *ptr, size_t len)
578 {
579 /* Not applicable */
580 }
581
582 /* Get current clock frequency for the given cpu id. */
583 int
584 cpu_est_clockrate(int cpu_id, uint64_t *rate)
585 {
586 register_t reg;
587 uint64_t tsc1, tsc2;
588
589 if (pcpu_find(cpu_id) == NULL || rate == NULL)
590 return (EINVAL);
591
592 /* If we're booting, trust the rate calibrated moments ago. */
593 if (cold) {
594 *rate = tsc_freq;
595 return (0);
596 }
597
598 #ifdef SMP
599 /* Schedule ourselves on the indicated cpu. */
600 thread_lock(curthread);
601 sched_bind(curthread, cpu_id);
602 thread_unlock(curthread);
603 #endif
604
605 /* Calibrate by measuring a short delay. */
606 reg = intr_disable();
607 tsc1 = rdtsc();
608 DELAY(1000);
609 tsc2 = rdtsc();
610 intr_restore(reg);
611
612 #ifdef SMP
613 thread_lock(curthread);
614 sched_unbind(curthread);
615 thread_unlock(curthread);
616 #endif
617
618 /*
619 * Calculate the difference in readings, convert to Mhz, and
620 * subtract 0.5% of the total. Empirical testing has shown that
621 * overhead in DELAY() works out to approximately this value.
622 */
623 tsc2 -= tsc1;
624 *rate = tsc2 * 1000 - tsc2 * 5;
625 return (0);
626 }
627
628 /*
629 * Shutdown the CPU as much as possible
630 */
631 void
632 cpu_halt(void)
633 {
634 for (;;)
635 halt();
636 }
637
638 void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
639 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
640 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
641 TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
642 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
643 0, "Use MONITOR/MWAIT for short idle");
644
645 #define STATE_RUNNING 0x0
646 #define STATE_MWAIT 0x1
647 #define STATE_SLEEPING 0x2
648
649 static void
650 cpu_idle_acpi(int busy)
651 {
652 int *state;
653
654 state = (int *)PCPU_PTR(monitorbuf);
655 *state = STATE_SLEEPING;
656
657 /* See comments in cpu_idle_hlt(). */
658 disable_intr();
659 if (sched_runnable())
660 enable_intr();
661 else if (cpu_idle_hook)
662 cpu_idle_hook();
663 else
664 __asm __volatile("sti; hlt");
665 *state = STATE_RUNNING;
666 }
667
668 static void
669 cpu_idle_hlt(int busy)
670 {
671 int *state;
672
673 state = (int *)PCPU_PTR(monitorbuf);
674 *state = STATE_SLEEPING;
675
676 /*
677 * Since we may be in a critical section from cpu_idle(), if
678 * an interrupt fires during that critical section we may have
679 * a pending preemption. If the CPU halts, then that thread
680 * may not execute until a later interrupt awakens the CPU.
681 * To handle this race, check for a runnable thread after
682 * disabling interrupts and immediately return if one is
683 * found. Also, we must absolutely guarentee that hlt is
684 * the next instruction after sti. This ensures that any
685 * interrupt that fires after the call to disable_intr() will
686 * immediately awaken the CPU from hlt. Finally, please note
687 * that on x86 this works fine because of interrupts enabled only
688 * after the instruction following sti takes place, while IF is set
689 * to 1 immediately, allowing hlt instruction to acknowledge the
690 * interrupt.
691 */
692 disable_intr();
693 if (sched_runnable())
694 enable_intr();
695 else
696 __asm __volatile("sti; hlt");
697 *state = STATE_RUNNING;
698 }
699
700 /*
701 * MWAIT cpu power states. Lower 4 bits are sub-states.
702 */
703 #define MWAIT_C0 0xf0
704 #define MWAIT_C1 0x00
705 #define MWAIT_C2 0x10
706 #define MWAIT_C3 0x20
707 #define MWAIT_C4 0x30
708
709 static void
710 cpu_idle_mwait(int busy)
711 {
712 int *state;
713
714 state = (int *)PCPU_PTR(monitorbuf);
715 *state = STATE_MWAIT;
716
717 /* See comments in cpu_idle_hlt(). */
718 disable_intr();
719 if (sched_runnable()) {
720 enable_intr();
721 *state = STATE_RUNNING;
722 return;
723 }
724 cpu_monitor(state, 0, 0);
725 if (*state == STATE_MWAIT)
726 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
727 else
728 enable_intr();
729 *state = STATE_RUNNING;
730 }
731
732 static void
733 cpu_idle_spin(int busy)
734 {
735 int *state;
736 int i;
737
738 state = (int *)PCPU_PTR(monitorbuf);
739 *state = STATE_RUNNING;
740
741 /*
742 * The sched_runnable() call is racy but as long as there is
743 * a loop missing it one time will have just a little impact if any
744 * (and it is much better than missing the check at all).
745 */
746 for (i = 0; i < 1000; i++) {
747 if (sched_runnable())
748 return;
749 cpu_spinwait();
750 }
751 }
752
753 /*
754 * C1E renders the local APIC timer dead, so we disable it by
755 * reading the Interrupt Pending Message register and clearing
756 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
757 *
758 * Reference:
759 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
760 * #32559 revision 3.00+
761 */
762 #define MSR_AMDK8_IPM 0xc0010055
763 #define AMDK8_SMIONCMPHALT (1ULL << 27)
764 #define AMDK8_C1EONCMPHALT (1ULL << 28)
765 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
766
767 static void
768 cpu_probe_amdc1e(void)
769 {
770
771 /*
772 * Detect the presence of C1E capability mostly on latest
773 * dual-cores (or future) k8 family.
774 */
775 if (cpu_vendor_id == CPU_VENDOR_AMD &&
776 (cpu_id & 0x00000f00) == 0x00000f00 &&
777 (cpu_id & 0x0fff0000) >= 0x00040000) {
778 cpu_ident_amdc1e = 1;
779 }
780 }
781
782 void (*cpu_idle_fn)(int) = cpu_idle_acpi;
783
784 void
785 cpu_idle(int busy)
786 {
787 uint64_t msr;
788
789 #ifdef SMP
790 if (mp_grab_cpu_hlt())
791 return;
792 #endif
793 /* If we are busy - try to use fast methods. */
794 if (busy) {
795 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
796 cpu_idle_mwait(busy);
797 return;
798 }
799 }
800
801 /* Apply AMD APIC timer C1E workaround. */
802 if (cpu_ident_amdc1e) {
803 msr = rdmsr(MSR_AMDK8_IPM);
804 if (msr & AMDK8_CMPHALT)
805 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
806 }
807
808 /* Call main idle method. */
809 cpu_idle_fn(busy);
810 }
811
812 int
813 cpu_idle_wakeup(int cpu)
814 {
815 struct pcpu *pcpu;
816 int *state;
817
818 pcpu = pcpu_find(cpu);
819 state = (int *)pcpu->pc_monitorbuf;
820 /*
821 * This doesn't need to be atomic since missing the race will
822 * simply result in unnecessary IPIs.
823 */
824 if (*state == STATE_SLEEPING)
825 return (0);
826 if (*state == STATE_MWAIT)
827 *state = STATE_RUNNING;
828 return (1);
829 }
830
831 /*
832 * Ordered by speed/power consumption.
833 */
834 struct {
835 void *id_fn;
836 char *id_name;
837 } idle_tbl[] = {
838 { cpu_idle_spin, "spin" },
839 { cpu_idle_mwait, "mwait" },
840 { cpu_idle_hlt, "hlt" },
841 { cpu_idle_acpi, "acpi" },
842 { NULL, NULL }
843 };
844
845 static int
846 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
847 {
848 char *avail, *p;
849 int error;
850 int i;
851
852 avail = malloc(256, M_TEMP, M_WAITOK);
853 p = avail;
854 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
855 if (strstr(idle_tbl[i].id_name, "mwait") &&
856 (cpu_feature2 & CPUID2_MON) == 0)
857 continue;
858 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
859 cpu_idle_hook == NULL)
860 continue;
861 p += sprintf(p, "%s, ", idle_tbl[i].id_name);
862 }
863 error = sysctl_handle_string(oidp, avail, 0, req);
864 free(avail, M_TEMP);
865 return (error);
866 }
867
868 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
869 0, 0, idle_sysctl_available, "A", "list of available idle functions");
870
871 static int
872 idle_sysctl(SYSCTL_HANDLER_ARGS)
873 {
874 char buf[16];
875 int error;
876 char *p;
877 int i;
878
879 p = "unknown";
880 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
881 if (idle_tbl[i].id_fn == cpu_idle_fn) {
882 p = idle_tbl[i].id_name;
883 break;
884 }
885 }
886 strncpy(buf, p, sizeof(buf));
887 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
888 if (error != 0 || req->newptr == NULL)
889 return (error);
890 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
891 if (strstr(idle_tbl[i].id_name, "mwait") &&
892 (cpu_feature2 & CPUID2_MON) == 0)
893 continue;
894 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
895 cpu_idle_hook == NULL)
896 continue;
897 if (strcmp(idle_tbl[i].id_name, buf))
898 continue;
899 cpu_idle_fn = idle_tbl[i].id_fn;
900 return (0);
901 }
902 return (EINVAL);
903 }
904
905 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
906 idle_sysctl, "A", "currently selected idle function");
907
908 /*
909 * Reset registers to default values on exec.
910 */
911 void
912 exec_setregs(td, entry, stack, ps_strings)
913 struct thread *td;
914 u_long entry;
915 u_long stack;
916 u_long ps_strings;
917 {
918 struct trapframe *regs = td->td_frame;
919 struct pcb *pcb = td->td_pcb;
920
921 mtx_lock(&dt_lock);
922 if (td->td_proc->p_md.md_ldt != NULL)
923 user_ldt_free(td);
924 else
925 mtx_unlock(&dt_lock);
926
927 pcb->pcb_fsbase = 0;
928 pcb->pcb_gsbase = 0;
929 clear_pcb_flags(pcb, PCB_32BIT | PCB_GS32BIT);
930 pcb->pcb_initial_fpucw = __INITIAL_FPUCW__;
931 set_pcb_flags(pcb, PCB_FULL_IRET);
932
933 bzero((char *)regs, sizeof(struct trapframe));
934 regs->tf_rip = entry;
935 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
936 regs->tf_rdi = stack; /* argv */
937 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
938 regs->tf_ss = _udatasel;
939 regs->tf_cs = _ucodesel;
940 regs->tf_ds = _udatasel;
941 regs->tf_es = _udatasel;
942 regs->tf_fs = _ufssel;
943 regs->tf_gs = _ugssel;
944 regs->tf_flags = TF_HASSEGS;
945 td->td_retval[1] = 0;
946
947 /*
948 * Reset the hardware debug registers if they were in use.
949 * They won't have any meaning for the newly exec'd process.
950 */
951 if (pcb->pcb_flags & PCB_DBREGS) {
952 pcb->pcb_dr0 = 0;
953 pcb->pcb_dr1 = 0;
954 pcb->pcb_dr2 = 0;
955 pcb->pcb_dr3 = 0;
956 pcb->pcb_dr6 = 0;
957 pcb->pcb_dr7 = 0;
958 if (pcb == PCPU_GET(curpcb)) {
959 /*
960 * Clear the debug registers on the running
961 * CPU, otherwise they will end up affecting
962 * the next process we switch to.
963 */
964 reset_dbregs();
965 }
966 clear_pcb_flags(pcb, PCB_DBREGS);
967 }
968
969 /*
970 * Drop the FP state if we hold it, so that the process gets a
971 * clean FP state if it uses the FPU again.
972 */
973 fpstate_drop(td);
974 }
975
976 void
977 cpu_setregs(void)
978 {
979 register_t cr0;
980
981 cr0 = rcr0();
982 /*
983 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
984 * BSP. See the comments there about why we set them.
985 */
986 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
987 load_cr0(cr0);
988 }
989
990 /*
991 * Initialize amd64 and configure to run kernel
992 */
993
994 /*
995 * Initialize segments & interrupt table
996 */
997
998 struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */
999 static struct gate_descriptor idt0[NIDT];
1000 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1001
1002 static char dblfault_stack[PAGE_SIZE] __aligned(16);
1003
1004 static char nmi0_stack[PAGE_SIZE] __aligned(16);
1005 CTASSERT(sizeof(struct nmi_pcpu) == 16);
1006
1007 struct amd64tss common_tss[MAXCPU];
1008
1009 /*
1010 * Software prototypes -- in more palatable form.
1011 *
1012 * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same
1013 * slots as corresponding segments for i386 kernel.
1014 */
1015 struct soft_segment_descriptor gdt_segs[] = {
1016 /* GNULL_SEL 0 Null Descriptor */
1017 { .ssd_base = 0x0,
1018 .ssd_limit = 0x0,
1019 .ssd_type = 0,
1020 .ssd_dpl = 0,
1021 .ssd_p = 0,
1022 .ssd_long = 0,
1023 .ssd_def32 = 0,
1024 .ssd_gran = 0 },
1025 /* GNULL2_SEL 1 Null Descriptor */
1026 { .ssd_base = 0x0,
1027 .ssd_limit = 0x0,
1028 .ssd_type = 0,
1029 .ssd_dpl = 0,
1030 .ssd_p = 0,
1031 .ssd_long = 0,
1032 .ssd_def32 = 0,
1033 .ssd_gran = 0 },
1034 /* GUFS32_SEL 2 32 bit %gs Descriptor for user */
1035 { .ssd_base = 0x0,
1036 .ssd_limit = 0xfffff,
1037 .ssd_type = SDT_MEMRWA,
1038 .ssd_dpl = SEL_UPL,
1039 .ssd_p = 1,
1040 .ssd_long = 0,
1041 .ssd_def32 = 1,
1042 .ssd_gran = 1 },
1043 /* GUGS32_SEL 3 32 bit %fs Descriptor for user */
1044 { .ssd_base = 0x0,
1045 .ssd_limit = 0xfffff,
1046 .ssd_type = SDT_MEMRWA,
1047 .ssd_dpl = SEL_UPL,
1048 .ssd_p = 1,
1049 .ssd_long = 0,
1050 .ssd_def32 = 1,
1051 .ssd_gran = 1 },
1052 /* GCODE_SEL 4 Code Descriptor for kernel */
1053 { .ssd_base = 0x0,
1054 .ssd_limit = 0xfffff,
1055 .ssd_type = SDT_MEMERA,
1056 .ssd_dpl = SEL_KPL,
1057 .ssd_p = 1,
1058 .ssd_long = 1,
1059 .ssd_def32 = 0,
1060 .ssd_gran = 1 },
1061 /* GDATA_SEL 5 Data Descriptor for kernel */
1062 { .ssd_base = 0x0,
1063 .ssd_limit = 0xfffff,
1064 .ssd_type = SDT_MEMRWA,
1065 .ssd_dpl = SEL_KPL,
1066 .ssd_p = 1,
1067 .ssd_long = 1,
1068 .ssd_def32 = 0,
1069 .ssd_gran = 1 },
1070 /* GUCODE32_SEL 6 32 bit Code Descriptor for user */
1071 { .ssd_base = 0x0,
1072 .ssd_limit = 0xfffff,
1073 .ssd_type = SDT_MEMERA,
1074 .ssd_dpl = SEL_UPL,
1075 .ssd_p = 1,
1076 .ssd_long = 0,
1077 .ssd_def32 = 1,
1078 .ssd_gran = 1 },
1079 /* GUDATA_SEL 7 32/64 bit Data Descriptor for user */
1080 { .ssd_base = 0x0,
1081 .ssd_limit = 0xfffff,
1082 .ssd_type = SDT_MEMRWA,
1083 .ssd_dpl = SEL_UPL,
1084 .ssd_p = 1,
1085 .ssd_long = 0,
1086 .ssd_def32 = 1,
1087 .ssd_gran = 1 },
1088 /* GUCODE_SEL 8 64 bit Code Descriptor for user */
1089 { .ssd_base = 0x0,
1090 .ssd_limit = 0xfffff,
1091 .ssd_type = SDT_MEMERA,
1092 .ssd_dpl = SEL_UPL,
1093 .ssd_p = 1,
1094 .ssd_long = 1,
1095 .ssd_def32 = 0,
1096 .ssd_gran = 1 },
1097 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1098 { .ssd_base = 0x0,
1099 .ssd_limit = sizeof(struct amd64tss) + IOPAGES * PAGE_SIZE - 1,
1100 .ssd_type = SDT_SYSTSS,
1101 .ssd_dpl = SEL_KPL,
1102 .ssd_p = 1,
1103 .ssd_long = 0,
1104 .ssd_def32 = 0,
1105 .ssd_gran = 0 },
1106 /* Actually, the TSS is a system descriptor which is double size */
1107 { .ssd_base = 0x0,
1108 .ssd_limit = 0x0,
1109 .ssd_type = 0,
1110 .ssd_dpl = 0,
1111 .ssd_p = 0,
1112 .ssd_long = 0,
1113 .ssd_def32 = 0,
1114 .ssd_gran = 0 },
1115 /* GUSERLDT_SEL 11 LDT Descriptor */
1116 { .ssd_base = 0x0,
1117 .ssd_limit = 0x0,
1118 .ssd_type = 0,
1119 .ssd_dpl = 0,
1120 .ssd_p = 0,
1121 .ssd_long = 0,
1122 .ssd_def32 = 0,
1123 .ssd_gran = 0 },
1124 /* GUSERLDT_SEL 12 LDT Descriptor, double size */
1125 { .ssd_base = 0x0,
1126 .ssd_limit = 0x0,
1127 .ssd_type = 0,
1128 .ssd_dpl = 0,
1129 .ssd_p = 0,
1130 .ssd_long = 0,
1131 .ssd_def32 = 0,
1132 .ssd_gran = 0 },
1133 };
1134
1135 void
1136 setidt(idx, func, typ, dpl, ist)
1137 int idx;
1138 inthand_t *func;
1139 int typ;
1140 int dpl;
1141 int ist;
1142 {
1143 struct gate_descriptor *ip;
1144
1145 ip = idt + idx;
1146 ip->gd_looffset = (uintptr_t)func;
1147 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
1148 ip->gd_ist = ist;
1149 ip->gd_xx = 0;
1150 ip->gd_type = typ;
1151 ip->gd_dpl = dpl;
1152 ip->gd_p = 1;
1153 ip->gd_hioffset = ((uintptr_t)func)>>16 ;
1154 }
1155
1156 extern inthand_t
1157 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1158 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1159 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1160 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1161 IDTVEC(xmm), IDTVEC(dblfault),
1162 #ifdef KDTRACE_HOOKS
1163 IDTVEC(dtrace_ret),
1164 #endif
1165 IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
1166
1167 #ifdef DDB
1168 /*
1169 * Display the index and function name of any IDT entries that don't use
1170 * the default 'rsvd' entry point.
1171 */
1172 DB_SHOW_COMMAND(idt, db_show_idt)
1173 {
1174 struct gate_descriptor *ip;
1175 int idx;
1176 uintptr_t func;
1177
1178 ip = idt;
1179 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1180 func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
1181 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1182 db_printf("%3d\t", idx);
1183 db_printsym(func, DB_STGY_PROC);
1184 db_printf("\n");
1185 }
1186 ip++;
1187 }
1188 }
1189 #endif
1190
1191 void
1192 sdtossd(sd, ssd)
1193 struct user_segment_descriptor *sd;
1194 struct soft_segment_descriptor *ssd;
1195 {
1196
1197 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1198 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1199 ssd->ssd_type = sd->sd_type;
1200 ssd->ssd_dpl = sd->sd_dpl;
1201 ssd->ssd_p = sd->sd_p;
1202 ssd->ssd_long = sd->sd_long;
1203 ssd->ssd_def32 = sd->sd_def32;
1204 ssd->ssd_gran = sd->sd_gran;
1205 }
1206
1207 void
1208 ssdtosd(ssd, sd)
1209 struct soft_segment_descriptor *ssd;
1210 struct user_segment_descriptor *sd;
1211 {
1212
1213 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1214 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
1215 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1216 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1217 sd->sd_type = ssd->ssd_type;
1218 sd->sd_dpl = ssd->ssd_dpl;
1219 sd->sd_p = ssd->ssd_p;
1220 sd->sd_long = ssd->ssd_long;
1221 sd->sd_def32 = ssd->ssd_def32;
1222 sd->sd_gran = ssd->ssd_gran;
1223 }
1224
1225 void
1226 ssdtosyssd(ssd, sd)
1227 struct soft_segment_descriptor *ssd;
1228 struct system_segment_descriptor *sd;
1229 {
1230
1231 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1232 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
1233 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1234 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1235 sd->sd_type = ssd->ssd_type;
1236 sd->sd_dpl = ssd->ssd_dpl;
1237 sd->sd_p = ssd->ssd_p;
1238 sd->sd_gran = ssd->ssd_gran;
1239 }
1240
1241 #if !defined(DEV_ATPIC) && defined(DEV_ISA)
1242 #include <isa/isavar.h>
1243 #include <isa/isareg.h>
1244 /*
1245 * Return a bitmap of the current interrupt requests. This is 8259-specific
1246 * and is only suitable for use at probe time.
1247 * This is only here to pacify sio. It is NOT FATAL if this doesn't work.
1248 * It shouldn't be here. There should probably be an APIC centric
1249 * implementation in the apic driver code, if at all.
1250 */
1251 intrmask_t
1252 isa_irq_pending(void)
1253 {
1254 u_char irr1;
1255 u_char irr2;
1256
1257 irr1 = inb(IO_ICU1);
1258 irr2 = inb(IO_ICU2);
1259 return ((irr2 << 8) | irr1);
1260 }
1261 #endif
1262
1263 u_int basemem;
1264
1265 static int
1266 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1267 {
1268 int i, insert_idx, physmap_idx;
1269
1270 physmap_idx = *physmap_idxp;
1271
1272 if (boothowto & RB_VERBOSE)
1273 printf("SMAP type=%02x base=%016lx len=%016lx\n",
1274 smap->type, smap->base, smap->length);
1275
1276 if (smap->type != SMAP_TYPE_MEMORY)
1277 return (1);
1278
1279 if (smap->length == 0)
1280 return (0);
1281
1282 /*
1283 * Find insertion point while checking for overlap. Start off by
1284 * assuming the new entry will be added to the end.
1285 */
1286 insert_idx = physmap_idx + 2;
1287 for (i = 0; i <= physmap_idx; i += 2) {
1288 if (smap->base < physmap[i + 1]) {
1289 if (smap->base + smap->length <= physmap[i]) {
1290 insert_idx = i;
1291 break;
1292 }
1293 if (boothowto & RB_VERBOSE)
1294 printf(
1295 "Overlapping memory regions, ignoring second region\n");
1296 return (1);
1297 }
1298 }
1299
1300 /* See if we can prepend to the next entry. */
1301 if (insert_idx <= physmap_idx &&
1302 smap->base + smap->length == physmap[insert_idx]) {
1303 physmap[insert_idx] = smap->base;
1304 return (1);
1305 }
1306
1307 /* See if we can append to the previous entry. */
1308 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) {
1309 physmap[insert_idx - 1] += smap->length;
1310 return (1);
1311 }
1312
1313 physmap_idx += 2;
1314 *physmap_idxp = physmap_idx;
1315 if (physmap_idx == PHYSMAP_SIZE) {
1316 printf(
1317 "Too many segments in the physical address map, giving up\n");
1318 return (0);
1319 }
1320
1321 /*
1322 * Move the last 'N' entries down to make room for the new
1323 * entry if needed.
1324 */
1325 for (i = physmap_idx; i > insert_idx; i -= 2) {
1326 physmap[i] = physmap[i - 2];
1327 physmap[i + 1] = physmap[i - 1];
1328 }
1329
1330 /* Insert the new entry. */
1331 physmap[insert_idx] = smap->base;
1332 physmap[insert_idx + 1] = smap->base + smap->length;
1333 return (1);
1334 }
1335
1336 /*
1337 * Populate the (physmap) array with base/bound pairs describing the
1338 * available physical memory in the system, then test this memory and
1339 * build the phys_avail array describing the actually-available memory.
1340 *
1341 * Total memory size may be set by the kernel environment variable
1342 * hw.physmem or the compile-time define MAXMEM.
1343 *
1344 * XXX first should be vm_paddr_t.
1345 */
1346 static void
1347 getmemsize(caddr_t kmdp, u_int64_t first)
1348 {
1349 int i, physmap_idx, pa_indx, da_indx;
1350 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1351 u_long physmem_tunable, memtest;
1352 pt_entry_t *pte;
1353 struct bios_smap *smapbase, *smap, *smapend;
1354 u_int32_t smapsize;
1355 quad_t dcons_addr, dcons_size;
1356
1357 bzero(physmap, sizeof(physmap));
1358 basemem = 0;
1359 physmap_idx = 0;
1360
1361 /*
1362 * get memory map from INT 15:E820, kindly supplied by the loader.
1363 *
1364 * subr_module.c says:
1365 * "Consumer may safely assume that size value precedes data."
1366 * ie: an int32_t immediately precedes smap.
1367 */
1368 smapbase = (struct bios_smap *)preload_search_info(kmdp,
1369 MODINFO_METADATA | MODINFOMD_SMAP);
1370 if (smapbase == NULL)
1371 panic("No BIOS smap info from loader!");
1372
1373 smapsize = *((u_int32_t *)smapbase - 1);
1374 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1375
1376 for (smap = smapbase; smap < smapend; smap++)
1377 if (!add_smap_entry(smap, physmap, &physmap_idx))
1378 break;
1379
1380 /*
1381 * Find the 'base memory' segment for SMP
1382 */
1383 basemem = 0;
1384 for (i = 0; i <= physmap_idx; i += 2) {
1385 if (physmap[i] == 0x00000000) {
1386 basemem = physmap[i + 1] / 1024;
1387 break;
1388 }
1389 }
1390 if (basemem == 0)
1391 panic("BIOS smap did not include a basemem segment!");
1392
1393 #ifdef SMP
1394 /* make hole for AP bootstrap code */
1395 physmap[1] = mp_bootaddress(physmap[1] / 1024);
1396 #endif
1397
1398 /*
1399 * Maxmem isn't the "maximum memory", it's one larger than the
1400 * highest page of the physical address space. It should be
1401 * called something like "Maxphyspage". We may adjust this
1402 * based on ``hw.physmem'' and the results of the memory test.
1403 */
1404 Maxmem = atop(physmap[physmap_idx + 1]);
1405
1406 #ifdef MAXMEM
1407 Maxmem = MAXMEM / 4;
1408 #endif
1409
1410 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1411 Maxmem = atop(physmem_tunable);
1412
1413 /*
1414 * By default enable the memory test on real hardware, and disable
1415 * it if we appear to be running in a VM. This avoids touching all
1416 * pages unnecessarily, which doesn't matter on real hardware but is
1417 * bad for shared VM hosts. Use a general name so that
1418 * one could eventually do more with the code than just disable it.
1419 */
1420 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
1421 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1422
1423 /*
1424 * Don't allow MAXMEM or hw.physmem to extend the amount of memory
1425 * in the system.
1426 */
1427 if (Maxmem > atop(physmap[physmap_idx + 1]))
1428 Maxmem = atop(physmap[physmap_idx + 1]);
1429
1430 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1431 (boothowto & RB_VERBOSE))
1432 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1433
1434 /* call pmap initialization to make new kernel address space */
1435 pmap_bootstrap(&first);
1436
1437 /*
1438 * Size up each available chunk of physical memory.
1439 */
1440 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1441 pa_indx = 0;
1442 da_indx = 1;
1443 phys_avail[pa_indx++] = physmap[0];
1444 phys_avail[pa_indx] = physmap[0];
1445 dump_avail[da_indx] = physmap[0];
1446 pte = CMAP1;
1447
1448 /*
1449 * Get dcons buffer address
1450 */
1451 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1452 getenv_quad("dcons.size", &dcons_size) == 0)
1453 dcons_addr = 0;
1454
1455 /*
1456 * physmap is in bytes, so when converting to page boundaries,
1457 * round up the start address and round down the end address.
1458 */
1459 for (i = 0; i <= physmap_idx; i += 2) {
1460 vm_paddr_t end;
1461
1462 end = ptoa((vm_paddr_t)Maxmem);
1463 if (physmap[i + 1] < end)
1464 end = trunc_page(physmap[i + 1]);
1465 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1466 int tmp, page_bad, full;
1467 int *ptr = (int *)CADDR1;
1468
1469 full = FALSE;
1470 /*
1471 * block out kernel memory as not available.
1472 */
1473 if (pa >= 0x100000 && pa < first)
1474 goto do_dump_avail;
1475
1476 /*
1477 * block out dcons buffer
1478 */
1479 if (dcons_addr > 0
1480 && pa >= trunc_page(dcons_addr)
1481 && pa < dcons_addr + dcons_size)
1482 goto do_dump_avail;
1483
1484 page_bad = FALSE;
1485 if (memtest == 0)
1486 goto skip_memtest;
1487
1488 /*
1489 * map page into kernel: valid, read/write,non-cacheable
1490 */
1491 *pte = pa | PG_V | PG_RW | PG_N;
1492 invltlb();
1493
1494 tmp = *(int *)ptr;
1495 /*
1496 * Test for alternating 1's and 0's
1497 */
1498 *(volatile int *)ptr = 0xaaaaaaaa;
1499 if (*(volatile int *)ptr != 0xaaaaaaaa)
1500 page_bad = TRUE;
1501 /*
1502 * Test for alternating 0's and 1's
1503 */
1504 *(volatile int *)ptr = 0x55555555;
1505 if (*(volatile int *)ptr != 0x55555555)
1506 page_bad = TRUE;
1507 /*
1508 * Test for all 1's
1509 */
1510 *(volatile int *)ptr = 0xffffffff;
1511 if (*(volatile int *)ptr != 0xffffffff)
1512 page_bad = TRUE;
1513 /*
1514 * Test for all 0's
1515 */
1516 *(volatile int *)ptr = 0x0;
1517 if (*(volatile int *)ptr != 0x0)
1518 page_bad = TRUE;
1519 /*
1520 * Restore original value.
1521 */
1522 *(int *)ptr = tmp;
1523
1524 skip_memtest:
1525 /*
1526 * Adjust array of valid/good pages.
1527 */
1528 if (page_bad == TRUE)
1529 continue;
1530 /*
1531 * If this good page is a continuation of the
1532 * previous set of good pages, then just increase
1533 * the end pointer. Otherwise start a new chunk.
1534 * Note that "end" points one higher than end,
1535 * making the range >= start and < end.
1536 * If we're also doing a speculative memory
1537 * test and we at or past the end, bump up Maxmem
1538 * so that we keep going. The first bad page
1539 * will terminate the loop.
1540 */
1541 if (phys_avail[pa_indx] == pa) {
1542 phys_avail[pa_indx] += PAGE_SIZE;
1543 } else {
1544 pa_indx++;
1545 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1546 printf(
1547 "Too many holes in the physical address space, giving up\n");
1548 pa_indx--;
1549 full = TRUE;
1550 goto do_dump_avail;
1551 }
1552 phys_avail[pa_indx++] = pa; /* start */
1553 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1554 }
1555 physmem++;
1556 do_dump_avail:
1557 if (dump_avail[da_indx] == pa) {
1558 dump_avail[da_indx] += PAGE_SIZE;
1559 } else {
1560 da_indx++;
1561 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1562 da_indx--;
1563 goto do_next;
1564 }
1565 dump_avail[da_indx++] = pa; /* start */
1566 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1567 }
1568 do_next:
1569 if (full)
1570 break;
1571 }
1572 }
1573 *pte = 0;
1574 invltlb();
1575
1576 /*
1577 * XXX
1578 * The last chunk must contain at least one page plus the message
1579 * buffer to avoid complicating other code (message buffer address
1580 * calculation, etc.).
1581 */
1582 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1583 round_page(msgbufsize) >= phys_avail[pa_indx]) {
1584 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1585 phys_avail[pa_indx--] = 0;
1586 phys_avail[pa_indx--] = 0;
1587 }
1588
1589 Maxmem = atop(phys_avail[pa_indx]);
1590
1591 /* Trim off space for the message buffer. */
1592 phys_avail[pa_indx] -= round_page(msgbufsize);
1593
1594 /* Map the message buffer. */
1595 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]);
1596 }
1597
1598 u_int64_t
1599 hammer_time(u_int64_t modulep, u_int64_t physfree)
1600 {
1601 caddr_t kmdp;
1602 int gsel_tss, x;
1603 struct pcpu *pc;
1604 struct nmi_pcpu *np;
1605 struct xstate_hdr *xhdr;
1606 u_int64_t msr;
1607 char *env;
1608 size_t kstack0_sz;
1609
1610 thread0.td_kstack = physfree + KERNBASE;
1611 thread0.td_kstack_pages = KSTACK_PAGES;
1612 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
1613 bzero((void *)thread0.td_kstack, kstack0_sz);
1614 physfree += kstack0_sz;
1615
1616 /*
1617 * This may be done better later if it gets more high level
1618 * components in it. If so just link td->td_proc here.
1619 */
1620 proc_linkup0(&proc0, &thread0);
1621
1622 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
1623 preload_bootstrap_relocate(KERNBASE);
1624 kmdp = preload_search_by_type("elf kernel");
1625 if (kmdp == NULL)
1626 kmdp = preload_search_by_type("elf64 kernel");
1627 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1628 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE;
1629 #ifdef DDB
1630 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1631 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1632 #endif
1633
1634 /* Init basic tunables, hz etc */
1635 init_param1();
1636
1637 /*
1638 * make gdt memory segments
1639 */
1640 for (x = 0; x < NGDT; x++) {
1641 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
1642 x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1)
1643 ssdtosd(&gdt_segs[x], &gdt[x]);
1644 }
1645 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0];
1646 ssdtosyssd(&gdt_segs[GPROC0_SEL],
1647 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1648
1649 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1650 r_gdt.rd_base = (long) gdt;
1651 lgdt(&r_gdt);
1652 pc = &__pcpu[0];
1653
1654 wrmsr(MSR_FSBASE, 0); /* User value */
1655 wrmsr(MSR_GSBASE, (u_int64_t)pc);
1656 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */
1657
1658 pcpu_init(pc, 0, sizeof(struct pcpu));
1659 dpcpu_init((void *)(physfree + KERNBASE), 0);
1660 physfree += DPCPU_SIZE;
1661 PCPU_SET(prvspace, pc);
1662 PCPU_SET(curthread, &thread0);
1663 PCPU_SET(tssp, &common_tss[0]);
1664 PCPU_SET(commontssp, &common_tss[0]);
1665 PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1666 PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]);
1667 PCPU_SET(fs32p, &gdt[GUFS32_SEL]);
1668 PCPU_SET(gs32p, &gdt[GUGS32_SEL]);
1669
1670 /*
1671 * Initialize mutexes.
1672 *
1673 * icu_lock: in order to allow an interrupt to occur in a critical
1674 * section, to set pcpu->ipending (etc...) properly, we
1675 * must be able to get the icu lock, so it can't be
1676 * under witness.
1677 */
1678 mutex_init();
1679 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1680 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF);
1681
1682 /* exceptions */
1683 for (x = 0; x < NIDT; x++)
1684 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0);
1685 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0);
1686 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0);
1687 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2);
1688 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0);
1689 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0);
1690 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0);
1691 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0);
1692 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0);
1693 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
1694 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0);
1695 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0);
1696 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0);
1697 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0);
1698 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0);
1699 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0);
1700 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0);
1701 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0);
1702 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0);
1703 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0);
1704 #ifdef KDTRACE_HOOKS
1705 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0);
1706 #endif
1707
1708 r_idt.rd_limit = sizeof(idt0) - 1;
1709 r_idt.rd_base = (long) idt;
1710 lidt(&r_idt);
1711
1712 /*
1713 * Initialize the i8254 before the console so that console
1714 * initialization can use DELAY().
1715 */
1716 i8254_init();
1717
1718 /*
1719 * Initialize the console before we print anything out.
1720 */
1721 cninit();
1722
1723 #ifdef DEV_ISA
1724 #ifdef DEV_ATPIC
1725 elcr_probe();
1726 atpic_startup();
1727 #else
1728 /* Reset and mask the atpics and leave them shut down. */
1729 atpic_reset();
1730
1731 /*
1732 * Point the ICU spurious interrupt vectors at the APIC spurious
1733 * interrupt handler.
1734 */
1735 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1736 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1737 #endif
1738 #else
1739 #error "have you forgotten the isa device?";
1740 #endif
1741
1742 kdb_init();
1743
1744 #ifdef KDB
1745 if (boothowto & RB_KDB)
1746 kdb_enter(KDB_WHY_BOOTFLAGS,
1747 "Boot flags requested debugger");
1748 #endif
1749
1750 identify_cpu(); /* Final stage of CPU initialization */
1751 initializecpu(); /* Initialize CPU registers */
1752 initializecpucache();
1753
1754 /* doublefault stack space, runs on ist1 */
1755 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
1756
1757 /*
1758 * NMI stack, runs on ist2. The pcpu pointer is stored just
1759 * above the start of the ist2 stack.
1760 */
1761 np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1;
1762 np->np_pcpu = (register_t) pc;
1763 common_tss[0].tss_ist2 = (long) np;
1764
1765 /* Set the IO permission bitmap (empty due to tss seg limit) */
1766 common_tss[0].tss_iobase = sizeof(struct amd64tss) +
1767 IOPAGES * PAGE_SIZE;
1768
1769 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1770 ltr(gsel_tss);
1771
1772 /* Set up the fast syscall stuff */
1773 msr = rdmsr(MSR_EFER) | EFER_SCE;
1774 wrmsr(MSR_EFER, msr);
1775 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
1776 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
1777 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
1778 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
1779 wrmsr(MSR_STAR, msr);
1780 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
1781
1782 getmemsize(kmdp, physfree);
1783 init_param2(physmem);
1784
1785 /* now running on new page tables, configured,and u/iom is accessible */
1786
1787 msgbufinit(msgbufp, msgbufsize);
1788 fpuinit();
1789
1790 /*
1791 * Set up thread0 pcb after fpuinit calculated pcb + fpu save
1792 * area size. Zero out the extended state header in fpu save
1793 * area.
1794 */
1795 thread0.td_pcb = get_pcb_td(&thread0);
1796 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
1797 if (use_xsave) {
1798 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
1799 1);
1800 xhdr->xstate_bv = xsave_mask;
1801 }
1802 /* make an initial tss so cpu can get interrupt stack on syscall! */
1803 common_tss[0].tss_rsp0 = (vm_offset_t)thread0.td_pcb;
1804 /* Ensure the stack is aligned to 16 bytes */
1805 common_tss[0].tss_rsp0 &= ~0xFul;
1806 PCPU_SET(rsp0, common_tss[0].tss_rsp0);
1807 PCPU_SET(curpcb, thread0.td_pcb);
1808
1809 /* transfer to user mode */
1810
1811 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1812 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1813 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
1814 _ufssel = GSEL(GUFS32_SEL, SEL_UPL);
1815 _ugssel = GSEL(GUGS32_SEL, SEL_UPL);
1816
1817 load_ds(_udatasel);
1818 load_es(_udatasel);
1819 load_fs(_ufssel);
1820
1821 /* setup proc 0's pcb */
1822 thread0.td_pcb->pcb_flags = 0;
1823 thread0.td_pcb->pcb_cr3 = KPML4phys;
1824 thread0.td_frame = &proc0_tf;
1825
1826 env = getenv("kernelname");
1827 if (env != NULL)
1828 strlcpy(kernelname, env, sizeof(kernelname));
1829
1830 #ifdef XENHVM
1831 if (inw(0x10) == 0x49d2) {
1832 if (bootverbose)
1833 printf("Xen detected: disabling emulated block and network devices\n");
1834 outw(0x10, 3);
1835 }
1836 #endif
1837
1838 cpu_probe_amdc1e();
1839
1840 /* Location of kernel stack for locore */
1841 return ((u_int64_t)thread0.td_pcb);
1842 }
1843
1844 void
1845 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1846 {
1847
1848 pcpu->pc_acpi_id = 0xffffffff;
1849 }
1850
1851 void
1852 spinlock_enter(void)
1853 {
1854 struct thread *td;
1855 register_t flags;
1856
1857 td = curthread;
1858 if (td->td_md.md_spinlock_count == 0) {
1859 flags = intr_disable();
1860 td->td_md.md_spinlock_count = 1;
1861 td->td_md.md_saved_flags = flags;
1862 } else
1863 td->td_md.md_spinlock_count++;
1864 critical_enter();
1865 }
1866
1867 void
1868 spinlock_exit(void)
1869 {
1870 struct thread *td;
1871 register_t flags;
1872
1873 td = curthread;
1874 critical_exit();
1875 flags = td->td_md.md_saved_flags;
1876 td->td_md.md_spinlock_count--;
1877 if (td->td_md.md_spinlock_count == 0)
1878 intr_restore(flags);
1879 }
1880
1881 /*
1882 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1883 * we want to start a backtrace from the function that caused us to enter
1884 * the debugger. We have the context in the trapframe, but base the trace
1885 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1886 * enough for a backtrace.
1887 */
1888 void
1889 makectx(struct trapframe *tf, struct pcb *pcb)
1890 {
1891
1892 pcb->pcb_r12 = tf->tf_r12;
1893 pcb->pcb_r13 = tf->tf_r13;
1894 pcb->pcb_r14 = tf->tf_r14;
1895 pcb->pcb_r15 = tf->tf_r15;
1896 pcb->pcb_rbp = tf->tf_rbp;
1897 pcb->pcb_rbx = tf->tf_rbx;
1898 pcb->pcb_rip = tf->tf_rip;
1899 pcb->pcb_rsp = tf->tf_rsp;
1900 }
1901
1902 int
1903 ptrace_set_pc(struct thread *td, unsigned long addr)
1904 {
1905 td->td_frame->tf_rip = addr;
1906 return (0);
1907 }
1908
1909 int
1910 ptrace_single_step(struct thread *td)
1911 {
1912 td->td_frame->tf_rflags |= PSL_T;
1913 return (0);
1914 }
1915
1916 int
1917 ptrace_clear_single_step(struct thread *td)
1918 {
1919 td->td_frame->tf_rflags &= ~PSL_T;
1920 return (0);
1921 }
1922
1923 int
1924 fill_regs(struct thread *td, struct reg *regs)
1925 {
1926 struct trapframe *tp;
1927
1928 tp = td->td_frame;
1929 return (fill_frame_regs(tp, regs));
1930 }
1931
1932 int
1933 fill_frame_regs(struct trapframe *tp, struct reg *regs)
1934 {
1935 regs->r_r15 = tp->tf_r15;
1936 regs->r_r14 = tp->tf_r14;
1937 regs->r_r13 = tp->tf_r13;
1938 regs->r_r12 = tp->tf_r12;
1939 regs->r_r11 = tp->tf_r11;
1940 regs->r_r10 = tp->tf_r10;
1941 regs->r_r9 = tp->tf_r9;
1942 regs->r_r8 = tp->tf_r8;
1943 regs->r_rdi = tp->tf_rdi;
1944 regs->r_rsi = tp->tf_rsi;
1945 regs->r_rbp = tp->tf_rbp;
1946 regs->r_rbx = tp->tf_rbx;
1947 regs->r_rdx = tp->tf_rdx;
1948 regs->r_rcx = tp->tf_rcx;
1949 regs->r_rax = tp->tf_rax;
1950 regs->r_rip = tp->tf_rip;
1951 regs->r_cs = tp->tf_cs;
1952 regs->r_rflags = tp->tf_rflags;
1953 regs->r_rsp = tp->tf_rsp;
1954 regs->r_ss = tp->tf_ss;
1955 if (tp->tf_flags & TF_HASSEGS) {
1956 regs->r_ds = tp->tf_ds;
1957 regs->r_es = tp->tf_es;
1958 regs->r_fs = tp->tf_fs;
1959 regs->r_gs = tp->tf_gs;
1960 } else {
1961 regs->r_ds = 0;
1962 regs->r_es = 0;
1963 regs->r_fs = 0;
1964 regs->r_gs = 0;
1965 }
1966 return (0);
1967 }
1968
1969 int
1970 set_regs(struct thread *td, struct reg *regs)
1971 {
1972 struct trapframe *tp;
1973 register_t rflags;
1974
1975 tp = td->td_frame;
1976 rflags = regs->r_rflags & 0xffffffff;
1977 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
1978 return (EINVAL);
1979 tp->tf_r15 = regs->r_r15;
1980 tp->tf_r14 = regs->r_r14;
1981 tp->tf_r13 = regs->r_r13;
1982 tp->tf_r12 = regs->r_r12;
1983 tp->tf_r11 = regs->r_r11;
1984 tp->tf_r10 = regs->r_r10;
1985 tp->tf_r9 = regs->r_r9;
1986 tp->tf_r8 = regs->r_r8;
1987 tp->tf_rdi = regs->r_rdi;
1988 tp->tf_rsi = regs->r_rsi;
1989 tp->tf_rbp = regs->r_rbp;
1990 tp->tf_rbx = regs->r_rbx;
1991 tp->tf_rdx = regs->r_rdx;
1992 tp->tf_rcx = regs->r_rcx;
1993 tp->tf_rax = regs->r_rax;
1994 tp->tf_rip = regs->r_rip;
1995 tp->tf_cs = regs->r_cs;
1996 tp->tf_rflags = rflags;
1997 tp->tf_rsp = regs->r_rsp;
1998 tp->tf_ss = regs->r_ss;
1999 if (0) { /* XXXKIB */
2000 tp->tf_ds = regs->r_ds;
2001 tp->tf_es = regs->r_es;
2002 tp->tf_fs = regs->r_fs;
2003 tp->tf_gs = regs->r_gs;
2004 tp->tf_flags = TF_HASSEGS;
2005 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
2006 }
2007 return (0);
2008 }
2009
2010 /* XXX check all this stuff! */
2011 /* externalize from sv_xmm */
2012 static void
2013 fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
2014 {
2015 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
2016 struct envxmm *penv_xmm = &sv_xmm->sv_env;
2017 int i;
2018
2019 /* pcb -> fpregs */
2020 bzero(fpregs, sizeof(*fpregs));
2021
2022 /* FPU control/status */
2023 penv_fpreg->en_cw = penv_xmm->en_cw;
2024 penv_fpreg->en_sw = penv_xmm->en_sw;
2025 penv_fpreg->en_tw = penv_xmm->en_tw;
2026 penv_fpreg->en_opcode = penv_xmm->en_opcode;
2027 penv_fpreg->en_rip = penv_xmm->en_rip;
2028 penv_fpreg->en_rdp = penv_xmm->en_rdp;
2029 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
2030 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
2031
2032 /* FPU registers */
2033 for (i = 0; i < 8; ++i)
2034 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
2035
2036 /* SSE registers */
2037 for (i = 0; i < 16; ++i)
2038 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
2039 }
2040
2041 /* internalize from fpregs into sv_xmm */
2042 static void
2043 set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
2044 {
2045 struct envxmm *penv_xmm = &sv_xmm->sv_env;
2046 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
2047 int i;
2048
2049 /* fpregs -> pcb */
2050 /* FPU control/status */
2051 penv_xmm->en_cw = penv_fpreg->en_cw;
2052 penv_xmm->en_sw = penv_fpreg->en_sw;
2053 penv_xmm->en_tw = penv_fpreg->en_tw;
2054 penv_xmm->en_opcode = penv_fpreg->en_opcode;
2055 penv_xmm->en_rip = penv_fpreg->en_rip;
2056 penv_xmm->en_rdp = penv_fpreg->en_rdp;
2057 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
2058 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
2059
2060 /* FPU registers */
2061 for (i = 0; i < 8; ++i)
2062 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
2063
2064 /* SSE registers */
2065 for (i = 0; i < 16; ++i)
2066 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
2067 }
2068
2069 /* externalize from td->pcb */
2070 int
2071 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2072 {
2073
2074 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
2075 P_SHOULDSTOP(td->td_proc),
2076 ("not suspended thread %p", td));
2077 fpugetregs(td);
2078 fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs);
2079 return (0);
2080 }
2081
2082 /* internalize to td->pcb */
2083 int
2084 set_fpregs(struct thread *td, struct fpreg *fpregs)
2085 {
2086
2087 set_fpregs_xmm(fpregs, get_pcb_user_save_td(td));
2088 fpuuserinited(td);
2089 return (0);
2090 }
2091
2092 /*
2093 * Get machine context.
2094 */
2095 int
2096 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2097 {
2098 struct pcb *pcb;
2099 struct trapframe *tp;
2100
2101 pcb = td->td_pcb;
2102 tp = td->td_frame;
2103 PROC_LOCK(curthread->td_proc);
2104 mcp->mc_onstack = sigonstack(tp->tf_rsp);
2105 PROC_UNLOCK(curthread->td_proc);
2106 mcp->mc_r15 = tp->tf_r15;
2107 mcp->mc_r14 = tp->tf_r14;
2108 mcp->mc_r13 = tp->tf_r13;
2109 mcp->mc_r12 = tp->tf_r12;
2110 mcp->mc_r11 = tp->tf_r11;
2111 mcp->mc_r10 = tp->tf_r10;
2112 mcp->mc_r9 = tp->tf_r9;
2113 mcp->mc_r8 = tp->tf_r8;
2114 mcp->mc_rdi = tp->tf_rdi;
2115 mcp->mc_rsi = tp->tf_rsi;
2116 mcp->mc_rbp = tp->tf_rbp;
2117 mcp->mc_rbx = tp->tf_rbx;
2118 mcp->mc_rcx = tp->tf_rcx;
2119 mcp->mc_rflags = tp->tf_rflags;
2120 if (flags & GET_MC_CLEAR_RET) {
2121 mcp->mc_rax = 0;
2122 mcp->mc_rdx = 0;
2123 mcp->mc_rflags &= ~PSL_C;
2124 } else {
2125 mcp->mc_rax = tp->tf_rax;
2126 mcp->mc_rdx = tp->tf_rdx;
2127 }
2128 mcp->mc_rip = tp->tf_rip;
2129 mcp->mc_cs = tp->tf_cs;
2130 mcp->mc_rsp = tp->tf_rsp;
2131 mcp->mc_ss = tp->tf_ss;
2132 mcp->mc_ds = tp->tf_ds;
2133 mcp->mc_es = tp->tf_es;
2134 mcp->mc_fs = tp->tf_fs;
2135 mcp->mc_gs = tp->tf_gs;
2136 mcp->mc_flags = tp->tf_flags;
2137 mcp->mc_len = sizeof(*mcp);
2138 get_fpcontext(td, mcp, NULL, 0);
2139 mcp->mc_fsbase = pcb->pcb_fsbase;
2140 mcp->mc_gsbase = pcb->pcb_gsbase;
2141 mcp->mc_xfpustate = 0;
2142 mcp->mc_xfpustate_len = 0;
2143 bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
2144 return (0);
2145 }
2146
2147 /*
2148 * Set machine context.
2149 *
2150 * However, we don't set any but the user modifiable flags, and we won't
2151 * touch the cs selector.
2152 */
2153 int
2154 set_mcontext(struct thread *td, const mcontext_t *mcp)
2155 {
2156 struct pcb *pcb;
2157 struct trapframe *tp;
2158 char *xfpustate;
2159 long rflags;
2160 int ret;
2161
2162 pcb = td->td_pcb;
2163 tp = td->td_frame;
2164 if (mcp->mc_len != sizeof(*mcp) ||
2165 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
2166 return (EINVAL);
2167 rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
2168 (tp->tf_rflags & ~PSL_USERCHANGE);
2169 if (mcp->mc_flags & _MC_HASFPXSTATE) {
2170 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
2171 sizeof(struct savefpu))
2172 return (EINVAL);
2173 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
2174 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
2175 mcp->mc_xfpustate_len);
2176 if (ret != 0)
2177 return (ret);
2178 } else
2179 xfpustate = NULL;
2180 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
2181 if (ret != 0)
2182 return (ret);
2183 tp->tf_r15 = mcp->mc_r15;
2184 tp->tf_r14 = mcp->mc_r14;
2185 tp->tf_r13 = mcp->mc_r13;
2186 tp->tf_r12 = mcp->mc_r12;
2187 tp->tf_r11 = mcp->mc_r11;
2188 tp->tf_r10 = mcp->mc_r10;
2189 tp->tf_r9 = mcp->mc_r9;
2190 tp->tf_r8 = mcp->mc_r8;
2191 tp->tf_rdi = mcp->mc_rdi;
2192 tp->tf_rsi = mcp->mc_rsi;
2193 tp->tf_rbp = mcp->mc_rbp;
2194 tp->tf_rbx = mcp->mc_rbx;
2195 tp->tf_rdx = mcp->mc_rdx;
2196 tp->tf_rcx = mcp->mc_rcx;
2197 tp->tf_rax = mcp->mc_rax;
2198 tp->tf_rip = mcp->mc_rip;
2199 tp->tf_rflags = rflags;
2200 tp->tf_rsp = mcp->mc_rsp;
2201 tp->tf_ss = mcp->mc_ss;
2202 tp->tf_flags = mcp->mc_flags;
2203 if (tp->tf_flags & TF_HASSEGS) {
2204 tp->tf_ds = mcp->mc_ds;
2205 tp->tf_es = mcp->mc_es;
2206 tp->tf_fs = mcp->mc_fs;
2207 tp->tf_gs = mcp->mc_gs;
2208 }
2209 if (mcp->mc_flags & _MC_HASBASES) {
2210 pcb->pcb_fsbase = mcp->mc_fsbase;
2211 pcb->pcb_gsbase = mcp->mc_gsbase;
2212 }
2213 set_pcb_flags(pcb, PCB_FULL_IRET);
2214 return (0);
2215 }
2216
2217 static void
2218 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
2219 size_t xfpusave_len)
2220 {
2221 size_t max_len, len;
2222
2223 mcp->mc_ownedfp = fpugetregs(td);
2224 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
2225 sizeof(mcp->mc_fpstate));
2226 mcp->mc_fpformat = fpuformat();
2227 if (!use_xsave || xfpusave_len == 0)
2228 return;
2229 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
2230 len = xfpusave_len;
2231 if (len > max_len) {
2232 len = max_len;
2233 bzero(xfpusave + max_len, len - max_len);
2234 }
2235 mcp->mc_flags |= _MC_HASFPXSTATE;
2236 mcp->mc_xfpustate_len = len;
2237 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
2238 }
2239
2240 static int
2241 set_fpcontext(struct thread *td, const mcontext_t *mcp, char *xfpustate,
2242 size_t xfpustate_len)
2243 {
2244 struct savefpu *fpstate;
2245 int error;
2246
2247 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2248 return (0);
2249 else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
2250 return (EINVAL);
2251 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
2252 /* We don't care what state is left in the FPU or PCB. */
2253 fpstate_drop(td);
2254 error = 0;
2255 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2256 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2257 fpstate = (struct savefpu *)&mcp->mc_fpstate;
2258 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
2259 error = fpusetregs(td, fpstate, xfpustate, xfpustate_len);
2260 } else
2261 return (EINVAL);
2262 return (error);
2263 }
2264
2265 void
2266 fpstate_drop(struct thread *td)
2267 {
2268
2269 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
2270 critical_enter();
2271 if (PCPU_GET(fpcurthread) == td)
2272 fpudrop();
2273 /*
2274 * XXX force a full drop of the fpu. The above only drops it if we
2275 * owned it.
2276 *
2277 * XXX I don't much like fpugetuserregs()'s semantics of doing a full
2278 * drop. Dropping only to the pcb matches fnsave's behaviour.
2279 * We only need to drop to !PCB_INITDONE in sendsig(). But
2280 * sendsig() is the only caller of fpugetuserregs()... perhaps we just
2281 * have too many layers.
2282 */
2283 clear_pcb_flags(curthread->td_pcb,
2284 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
2285 critical_exit();
2286 }
2287
2288 int
2289 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2290 {
2291 struct pcb *pcb;
2292
2293 if (td == NULL) {
2294 dbregs->dr[0] = rdr0();
2295 dbregs->dr[1] = rdr1();
2296 dbregs->dr[2] = rdr2();
2297 dbregs->dr[3] = rdr3();
2298 dbregs->dr[6] = rdr6();
2299 dbregs->dr[7] = rdr7();
2300 } else {
2301 pcb = td->td_pcb;
2302 dbregs->dr[0] = pcb->pcb_dr0;
2303 dbregs->dr[1] = pcb->pcb_dr1;
2304 dbregs->dr[2] = pcb->pcb_dr2;
2305 dbregs->dr[3] = pcb->pcb_dr3;
2306 dbregs->dr[6] = pcb->pcb_dr6;
2307 dbregs->dr[7] = pcb->pcb_dr7;
2308 }
2309 dbregs->dr[4] = 0;
2310 dbregs->dr[5] = 0;
2311 dbregs->dr[8] = 0;
2312 dbregs->dr[9] = 0;
2313 dbregs->dr[10] = 0;
2314 dbregs->dr[11] = 0;
2315 dbregs->dr[12] = 0;
2316 dbregs->dr[13] = 0;
2317 dbregs->dr[14] = 0;
2318 dbregs->dr[15] = 0;
2319 return (0);
2320 }
2321
2322 int
2323 set_dbregs(struct thread *td, struct dbreg *dbregs)
2324 {
2325 struct pcb *pcb;
2326 int i;
2327
2328 if (td == NULL) {
2329 load_dr0(dbregs->dr[0]);
2330 load_dr1(dbregs->dr[1]);
2331 load_dr2(dbregs->dr[2]);
2332 load_dr3(dbregs->dr[3]);
2333 load_dr6(dbregs->dr[6]);
2334 load_dr7(dbregs->dr[7]);
2335 } else {
2336 /*
2337 * Don't let an illegal value for dr7 get set. Specifically,
2338 * check for undefined settings. Setting these bit patterns
2339 * result in undefined behaviour and can lead to an unexpected
2340 * TRCTRAP or a general protection fault right here.
2341 * Upper bits of dr6 and dr7 must not be set
2342 */
2343 for (i = 0; i < 4; i++) {
2344 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2345 return (EINVAL);
2346 if (td->td_frame->tf_cs == _ucode32sel &&
2347 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
2348 return (EINVAL);
2349 }
2350 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
2351 (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
2352 return (EINVAL);
2353
2354 pcb = td->td_pcb;
2355
2356 /*
2357 * Don't let a process set a breakpoint that is not within the
2358 * process's address space. If a process could do this, it
2359 * could halt the system by setting a breakpoint in the kernel
2360 * (if ddb was enabled). Thus, we need to check to make sure
2361 * that no breakpoints are being enabled for addresses outside
2362 * process's address space.
2363 *
2364 * XXX - what about when the watched area of the user's
2365 * address space is written into from within the kernel
2366 * ... wouldn't that still cause a breakpoint to be generated
2367 * from within kernel mode?
2368 */
2369
2370 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2371 /* dr0 is enabled */
2372 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2373 return (EINVAL);
2374 }
2375 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2376 /* dr1 is enabled */
2377 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2378 return (EINVAL);
2379 }
2380 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2381 /* dr2 is enabled */
2382 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2383 return (EINVAL);
2384 }
2385 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2386 /* dr3 is enabled */
2387 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2388 return (EINVAL);
2389 }
2390
2391 pcb->pcb_dr0 = dbregs->dr[0];
2392 pcb->pcb_dr1 = dbregs->dr[1];
2393 pcb->pcb_dr2 = dbregs->dr[2];
2394 pcb->pcb_dr3 = dbregs->dr[3];
2395 pcb->pcb_dr6 = dbregs->dr[6];
2396 pcb->pcb_dr7 = dbregs->dr[7];
2397
2398 set_pcb_flags(pcb, PCB_DBREGS);
2399 }
2400
2401 return (0);
2402 }
2403
2404 void
2405 reset_dbregs(void)
2406 {
2407
2408 load_dr7(0); /* Turn off the control bits first */
2409 load_dr0(0);
2410 load_dr1(0);
2411 load_dr2(0);
2412 load_dr3(0);
2413 load_dr6(0);
2414 }
2415
2416 /*
2417 * Return > 0 if a hardware breakpoint has been hit, and the
2418 * breakpoint was in user space. Return 0, otherwise.
2419 */
2420 int
2421 user_dbreg_trap(void)
2422 {
2423 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */
2424 u_int64_t bp; /* breakpoint bits extracted from dr6 */
2425 int nbp; /* number of breakpoints that triggered */
2426 caddr_t addr[4]; /* breakpoint addresses */
2427 int i;
2428
2429 dr7 = rdr7();
2430 if ((dr7 & 0x000000ff) == 0) {
2431 /*
2432 * all GE and LE bits in the dr7 register are zero,
2433 * thus the trap couldn't have been caused by the
2434 * hardware debug registers
2435 */
2436 return 0;
2437 }
2438
2439 nbp = 0;
2440 dr6 = rdr6();
2441 bp = dr6 & 0x0000000f;
2442
2443 if (!bp) {
2444 /*
2445 * None of the breakpoint bits are set meaning this
2446 * trap was not caused by any of the debug registers
2447 */
2448 return 0;
2449 }
2450
2451 /*
2452 * at least one of the breakpoints were hit, check to see
2453 * which ones and if any of them are user space addresses
2454 */
2455
2456 if (bp & 0x01) {
2457 addr[nbp++] = (caddr_t)rdr0();
2458 }
2459 if (bp & 0x02) {
2460 addr[nbp++] = (caddr_t)rdr1();
2461 }
2462 if (bp & 0x04) {
2463 addr[nbp++] = (caddr_t)rdr2();
2464 }
2465 if (bp & 0x08) {
2466 addr[nbp++] = (caddr_t)rdr3();
2467 }
2468
2469 for (i = 0; i < nbp; i++) {
2470 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2471 /*
2472 * addr[i] is in user space
2473 */
2474 return nbp;
2475 }
2476 }
2477
2478 /*
2479 * None of the breakpoints are in user space.
2480 */
2481 return 0;
2482 }
2483
2484 #ifdef KDB
2485
2486 /*
2487 * Provide inb() and outb() as functions. They are normally only available as
2488 * inline functions, thus cannot be called from the debugger.
2489 */
2490
2491 /* silence compiler warnings */
2492 u_char inb_(u_short);
2493 void outb_(u_short, u_char);
2494
2495 u_char
2496 inb_(u_short port)
2497 {
2498 return inb(port);
2499 }
2500
2501 void
2502 outb_(u_short port, u_char data)
2503 {
2504 outb(port, data);
2505 }
2506
2507 #endif /* KDB */
Cache object: f89e2f6b01cdf3996d8d42c1f8da8a2d
|