1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/11.1/sys/i386/i386/vm_machdep.c 330908 2018-03-14 04:00:00Z gordon $");
45
46 #include "opt_isa.h"
47 #include "opt_npx.h"
48 #include "opt_reset.h"
49 #include "opt_cpu.h"
50 #include "opt_xbox.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/kernel.h>
57 #include <sys/ktr.h>
58 #include <sys/lock.h>
59 #include <sys/malloc.h>
60 #include <sys/mbuf.h>
61 #include <sys/mutex.h>
62 #include <sys/pioctl.h>
63 #include <sys/proc.h>
64 #include <sys/sysent.h>
65 #include <sys/sf_buf.h>
66 #include <sys/smp.h>
67 #include <sys/sched.h>
68 #include <sys/sysctl.h>
69 #include <sys/unistd.h>
70 #include <sys/vnode.h>
71 #include <sys/vmmeter.h>
72
73 #include <machine/cpu.h>
74 #include <machine/cputypes.h>
75 #include <machine/md_var.h>
76 #include <machine/pcb.h>
77 #include <machine/pcb_ext.h>
78 #include <machine/smp.h>
79 #include <machine/vm86.h>
80
81 #ifdef CPU_ELAN
82 #include <machine/elan_mmcr.h>
83 #endif
84
85 #include <vm/vm.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_param.h>
91
92 #ifdef PC98
93 #include <pc98/cbus/cbus.h>
94 #else
95 #include <isa/isareg.h>
96 #endif
97
98 #ifdef XBOX
99 #include <machine/xbox.h>
100 #endif
101
102 #ifndef NSFBUFS
103 #define NSFBUFS (512 + maxusers * 16)
104 #endif
105
106 _Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread),
107 "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread.");
108 _Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb),
109 "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb.");
110 _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
111 "__OFFSETOF_MONINORBUF does not correspond with offset of pc_monitorbuf.");
112
113 static void cpu_reset_real(void);
114 #ifdef SMP
115 static void cpu_reset_proxy(void);
116 static u_int cpu_reset_proxyid;
117 static volatile u_int cpu_reset_proxy_active;
118 #endif
119
120 union savefpu *
121 get_pcb_user_save_td(struct thread *td)
122 {
123 vm_offset_t p;
124
125 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
126 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
127 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
128 return ((union savefpu *)p);
129 }
130
131 union savefpu *
132 get_pcb_user_save_pcb(struct pcb *pcb)
133 {
134 vm_offset_t p;
135
136 p = (vm_offset_t)(pcb + 1);
137 return ((union savefpu *)p);
138 }
139
140 struct pcb *
141 get_pcb_td(struct thread *td)
142 {
143 vm_offset_t p;
144
145 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
146 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
147 sizeof(struct pcb);
148 return ((struct pcb *)p);
149 }
150
151 void *
152 alloc_fpusave(int flags)
153 {
154 void *res;
155 struct savefpu_ymm *sf;
156
157 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
158 if (use_xsave) {
159 sf = (struct savefpu_ymm *)res;
160 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
161 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
162 }
163 return (res);
164 }
165 /*
166 * Finish a fork operation, with process p2 nearly set up.
167 * Copy and update the pcb, set up the stack so that the child
168 * ready to run and return to user mode.
169 */
170 void
171 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
172 {
173 register struct proc *p1;
174 struct pcb *pcb2;
175 struct mdproc *mdp2;
176
177 p1 = td1->td_proc;
178 if ((flags & RFPROC) == 0) {
179 if ((flags & RFMEM) == 0) {
180 /* unshare user LDT */
181 struct mdproc *mdp1 = &p1->p_md;
182 struct proc_ldt *pldt, *pldt1;
183
184 mtx_lock_spin(&dt_lock);
185 if ((pldt1 = mdp1->md_ldt) != NULL &&
186 pldt1->ldt_refcnt > 1) {
187 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len);
188 if (pldt == NULL)
189 panic("could not copy LDT");
190 mdp1->md_ldt = pldt;
191 set_user_ldt(mdp1);
192 user_ldt_deref(pldt1);
193 } else
194 mtx_unlock_spin(&dt_lock);
195 }
196 return;
197 }
198
199 /* Ensure that td1's pcb is up to date. */
200 if (td1 == curthread)
201 td1->td_pcb->pcb_gs = rgs();
202 critical_enter();
203 if (PCPU_GET(fpcurthread) == td1)
204 npxsave(td1->td_pcb->pcb_save);
205 critical_exit();
206
207 /* Point the pcb to the top of the stack */
208 pcb2 = get_pcb_td(td2);
209 td2->td_pcb = pcb2;
210
211 /* Copy td1's pcb */
212 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
213
214 /* Properly initialize pcb_save */
215 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
216 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
217 cpu_max_ext_state_size);
218
219 /* Point mdproc and then copy over td1's contents */
220 mdp2 = &p2->p_md;
221 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
222
223 /*
224 * Create a new fresh stack for the new process.
225 * Copy the trap frame for the return to user mode as if from a
226 * syscall. This copies most of the user mode register values.
227 * The -16 is so we can expand the trapframe if we go to vm86.
228 */
229 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1;
230 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
231
232 td2->td_frame->tf_eax = 0; /* Child returns zero */
233 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
234 td2->td_frame->tf_edx = 1;
235
236 /*
237 * If the parent process has the trap bit set (i.e. a debugger had
238 * single stepped the process to the system call), we need to clear
239 * the trap flag from the new frame unless the debugger had set PF_FORK
240 * on the parent. Otherwise, the child will receive a (likely
241 * unexpected) SIGTRAP when it executes the first instruction after
242 * returning to userland.
243 */
244 if ((p1->p_pfsflags & PF_FORK) == 0)
245 td2->td_frame->tf_eflags &= ~PSL_T;
246
247 /*
248 * Set registers for trampoline to user mode. Leave space for the
249 * return address on stack. These are the kernel mode register values.
250 */
251 #if defined(PAE) || defined(PAE_TABLES)
252 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
253 #else
254 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
255 #endif
256 pcb2->pcb_edi = 0;
257 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
258 pcb2->pcb_ebp = 0;
259 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
260 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
261 pcb2->pcb_eip = (int)fork_trampoline;
262 pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */
263 /*-
264 * pcb2->pcb_dr*: cloned above.
265 * pcb2->pcb_savefpu: cloned above.
266 * pcb2->pcb_flags: cloned above.
267 * pcb2->pcb_onfault: cloned above (always NULL here?).
268 * pcb2->pcb_gs: cloned above.
269 * pcb2->pcb_ext: cleared below.
270 */
271
272 /*
273 * XXX don't copy the i/o pages. this should probably be fixed.
274 */
275 pcb2->pcb_ext = 0;
276
277 /* Copy the LDT, if necessary. */
278 mtx_lock_spin(&dt_lock);
279 if (mdp2->md_ldt != NULL) {
280 if (flags & RFMEM) {
281 mdp2->md_ldt->ldt_refcnt++;
282 } else {
283 mdp2->md_ldt = user_ldt_alloc(mdp2,
284 mdp2->md_ldt->ldt_len);
285 if (mdp2->md_ldt == NULL)
286 panic("could not copy LDT");
287 }
288 }
289 mtx_unlock_spin(&dt_lock);
290
291 /* Setup to release spin count in fork_exit(). */
292 td2->td_md.md_spinlock_count = 1;
293 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
294
295 /*
296 * Now, cpu_switch() can schedule the new process.
297 * pcb_esp is loaded pointing to the cpu_switch() stack frame
298 * containing the return address when exiting cpu_switch.
299 * This will normally be to fork_trampoline(), which will have
300 * %ebx loaded with the new proc's pointer. fork_trampoline()
301 * will set up a stack to call fork_return(p, frame); to complete
302 * the return to user-mode.
303 */
304 }
305
306 /*
307 * Intercept the return address from a freshly forked process that has NOT
308 * been scheduled yet.
309 *
310 * This is needed to make kernel threads stay in kernel mode.
311 */
312 void
313 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
314 {
315 /*
316 * Note that the trap frame follows the args, so the function
317 * is really called like this: func(arg, frame);
318 */
319 td->td_pcb->pcb_esi = (int) func; /* function */
320 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
321 }
322
323 void
324 cpu_exit(struct thread *td)
325 {
326
327 /*
328 * If this process has a custom LDT, release it. Reset pc->pcb_gs
329 * and %gs before we free it in case they refer to an LDT entry.
330 */
331 mtx_lock_spin(&dt_lock);
332 if (td->td_proc->p_md.md_ldt) {
333 td->td_pcb->pcb_gs = _udatasel;
334 load_gs(_udatasel);
335 user_ldt_free(td);
336 } else
337 mtx_unlock_spin(&dt_lock);
338 }
339
340 void
341 cpu_thread_exit(struct thread *td)
342 {
343
344 critical_enter();
345 if (td == PCPU_GET(fpcurthread))
346 npxdrop();
347 critical_exit();
348
349 /* Disable any hardware breakpoints. */
350 if (td->td_pcb->pcb_flags & PCB_DBREGS) {
351 reset_dbregs();
352 td->td_pcb->pcb_flags &= ~PCB_DBREGS;
353 }
354 }
355
356 void
357 cpu_thread_clean(struct thread *td)
358 {
359 struct pcb *pcb;
360
361 pcb = td->td_pcb;
362 if (pcb->pcb_ext != NULL) {
363 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
364 /*
365 * XXX do we need to move the TSS off the allocated pages
366 * before freeing them? (not done here)
367 */
368 kmem_free(kernel_arena, (vm_offset_t)pcb->pcb_ext,
369 ctob(IOPAGES + 1));
370 pcb->pcb_ext = NULL;
371 }
372 }
373
374 void
375 cpu_thread_swapin(struct thread *td)
376 {
377 }
378
379 void
380 cpu_thread_swapout(struct thread *td)
381 {
382 }
383
384 void
385 cpu_thread_alloc(struct thread *td)
386 {
387 struct pcb *pcb;
388 struct xstate_hdr *xhdr;
389
390 td->td_pcb = pcb = get_pcb_td(td);
391 td->td_frame = (struct trapframe *)((caddr_t)pcb - 16) - 1;
392 pcb->pcb_ext = NULL;
393 pcb->pcb_save = get_pcb_user_save_pcb(pcb);
394 if (use_xsave) {
395 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
396 bzero(xhdr, sizeof(*xhdr));
397 xhdr->xstate_bv = xsave_mask;
398 }
399 }
400
401 void
402 cpu_thread_free(struct thread *td)
403 {
404
405 cpu_thread_clean(td);
406 }
407
408 void
409 cpu_set_syscall_retval(struct thread *td, int error)
410 {
411
412 switch (error) {
413 case 0:
414 td->td_frame->tf_eax = td->td_retval[0];
415 td->td_frame->tf_edx = td->td_retval[1];
416 td->td_frame->tf_eflags &= ~PSL_C;
417 break;
418
419 case ERESTART:
420 /*
421 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int
422 * 0x80 is 2 bytes. We saved this in tf_err.
423 */
424 td->td_frame->tf_eip -= td->td_frame->tf_err;
425 break;
426
427 case EJUSTRETURN:
428 break;
429
430 default:
431 td->td_frame->tf_eax = SV_ABI_ERRNO(td->td_proc, error);
432 td->td_frame->tf_eflags |= PSL_C;
433 break;
434 }
435 }
436
437 /*
438 * Initialize machine state, mostly pcb and trap frame for a new
439 * thread, about to return to userspace. Put enough state in the new
440 * thread's PCB to get it to go back to the fork_return(), which
441 * finalizes the thread state and handles peculiarities of the first
442 * return to userspace for the new thread.
443 */
444 void
445 cpu_copy_thread(struct thread *td, struct thread *td0)
446 {
447 struct pcb *pcb2;
448
449 /* Point the pcb to the top of the stack. */
450 pcb2 = td->td_pcb;
451
452 /*
453 * Copy the upcall pcb. This loads kernel regs.
454 * Those not loaded individually below get their default
455 * values here.
456 */
457 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
458 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE |
459 PCB_KERNNPX);
460 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
461 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
462 cpu_max_ext_state_size);
463
464 /*
465 * Create a new fresh stack for the new thread.
466 */
467 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
468
469 /* If the current thread has the trap bit set (i.e. a debugger had
470 * single stepped the process to the system call), we need to clear
471 * the trap flag from the new frame. Otherwise, the new thread will
472 * receive a (likely unexpected) SIGTRAP when it executes the first
473 * instruction after returning to userland.
474 */
475 td->td_frame->tf_eflags &= ~PSL_T;
476
477 /*
478 * Set registers for trampoline to user mode. Leave space for the
479 * return address on stack. These are the kernel mode register values.
480 */
481 pcb2->pcb_edi = 0;
482 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
483 pcb2->pcb_ebp = 0;
484 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
485 pcb2->pcb_ebx = (int)td; /* trampoline arg */
486 pcb2->pcb_eip = (int)fork_trampoline;
487 pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */
488 pcb2->pcb_gs = rgs();
489 /*
490 * If we didn't copy the pcb, we'd need to do the following registers:
491 * pcb2->pcb_cr3: cloned above.
492 * pcb2->pcb_dr*: cloned above.
493 * pcb2->pcb_savefpu: cloned above.
494 * pcb2->pcb_flags: cloned above.
495 * pcb2->pcb_onfault: cloned above (always NULL here?).
496 * pcb2->pcb_gs: cloned above.
497 * pcb2->pcb_ext: cleared below.
498 */
499 pcb2->pcb_ext = NULL;
500
501 /* Setup to release spin count in fork_exit(). */
502 td->td_md.md_spinlock_count = 1;
503 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
504 }
505
506 /*
507 * Set that machine state for performing an upcall that starts
508 * the entry function with the given argument.
509 */
510 void
511 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
512 stack_t *stack)
513 {
514
515 /*
516 * Do any extra cleaning that needs to be done.
517 * The thread may have optional components
518 * that are not present in a fresh thread.
519 * This may be a recycled thread so make it look
520 * as though it's newly allocated.
521 */
522 cpu_thread_clean(td);
523
524 /*
525 * Set the trap frame to point at the beginning of the entry
526 * function.
527 */
528 td->td_frame->tf_ebp = 0;
529 td->td_frame->tf_esp =
530 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
531 td->td_frame->tf_eip = (int)entry;
532
533 /* Pass the argument to the entry point. */
534 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
535 (int)arg);
536 }
537
538 int
539 cpu_set_user_tls(struct thread *td, void *tls_base)
540 {
541 struct segment_descriptor sd;
542 uint32_t base;
543
544 /*
545 * Construct a descriptor and store it in the pcb for
546 * the next context switch. Also store it in the gdt
547 * so that the load of tf_fs into %fs will activate it
548 * at return to userland.
549 */
550 base = (uint32_t)tls_base;
551 sd.sd_lobase = base & 0xffffff;
552 sd.sd_hibase = (base >> 24) & 0xff;
553 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
554 sd.sd_hilimit = 0xf;
555 sd.sd_type = SDT_MEMRWA;
556 sd.sd_dpl = SEL_UPL;
557 sd.sd_p = 1;
558 sd.sd_xx = 0;
559 sd.sd_def32 = 1;
560 sd.sd_gran = 1;
561 critical_enter();
562 /* set %gs */
563 td->td_pcb->pcb_gsd = sd;
564 if (td == curthread) {
565 PCPU_GET(fsgs_gdt)[1] = sd;
566 load_gs(GSEL(GUGS_SEL, SEL_UPL));
567 }
568 critical_exit();
569 return (0);
570 }
571
572 /*
573 * Convert kernel VA to physical address
574 */
575 vm_paddr_t
576 kvtop(void *addr)
577 {
578 vm_paddr_t pa;
579
580 pa = pmap_kextract((vm_offset_t)addr);
581 if (pa == 0)
582 panic("kvtop: zero page frame");
583 return (pa);
584 }
585
586 #ifdef SMP
587 static void
588 cpu_reset_proxy()
589 {
590 cpuset_t tcrp;
591
592 cpu_reset_proxy_active = 1;
593 while (cpu_reset_proxy_active == 1)
594 ; /* Wait for other cpu to see that we've started */
595 CPU_SETOF(cpu_reset_proxyid, &tcrp);
596 stop_cpus(tcrp);
597 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
598 DELAY(1000000);
599 cpu_reset_real();
600 }
601 #endif
602
603 void
604 cpu_reset()
605 {
606 #ifdef XBOX
607 if (arch_i386_is_xbox) {
608 /* Kick the PIC16L, it can reboot the box */
609 pic16l_reboot();
610 for (;;);
611 }
612 #endif
613
614 #ifdef SMP
615 cpuset_t map;
616 u_int cnt;
617
618 if (smp_started) {
619 map = all_cpus;
620 CPU_CLR(PCPU_GET(cpuid), &map);
621 CPU_NAND(&map, &stopped_cpus);
622 if (!CPU_EMPTY(&map)) {
623 printf("cpu_reset: Stopping other CPUs\n");
624 stop_cpus(map);
625 }
626
627 if (PCPU_GET(cpuid) != 0) {
628 cpu_reset_proxyid = PCPU_GET(cpuid);
629 cpustop_restartfunc = cpu_reset_proxy;
630 cpu_reset_proxy_active = 0;
631 printf("cpu_reset: Restarting BSP\n");
632
633 /* Restart CPU #0. */
634 /* XXX: restart_cpus(1 << 0); */
635 CPU_SETOF(0, &started_cpus);
636 wmb();
637
638 cnt = 0;
639 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
640 cnt++; /* Wait for BSP to announce restart */
641 if (cpu_reset_proxy_active == 0)
642 printf("cpu_reset: Failed to restart BSP\n");
643 enable_intr();
644 cpu_reset_proxy_active = 2;
645
646 while (1);
647 /* NOTREACHED */
648 }
649
650 DELAY(1000000);
651 }
652 #endif
653 cpu_reset_real();
654 /* NOTREACHED */
655 }
656
657 static void
658 cpu_reset_real()
659 {
660 struct region_descriptor null_idt;
661 #ifndef PC98
662 int b;
663 #endif
664
665 disable_intr();
666 #ifdef CPU_ELAN
667 if (elan_mmcr != NULL)
668 elan_mmcr->RESCFG = 1;
669 #endif
670
671 if (cpu == CPU_GEODE1100) {
672 /* Attempt Geode's own reset */
673 outl(0xcf8, 0x80009044ul);
674 outl(0xcfc, 0xf);
675 }
676
677 #ifdef PC98
678 /*
679 * Attempt to do a CPU reset via CPU reset port.
680 */
681 if ((inb(0x35) & 0xa0) != 0xa0) {
682 outb(0x37, 0x0f); /* SHUT0 = 0. */
683 outb(0x37, 0x0b); /* SHUT1 = 0. */
684 }
685 outb(0xf0, 0x00); /* Reset. */
686 #else
687 #if !defined(BROKEN_KEYBOARD_RESET)
688 /*
689 * Attempt to do a CPU reset via the keyboard controller,
690 * do not turn off GateA20, as any machine that fails
691 * to do the reset here would then end up in no man's land.
692 */
693 outb(IO_KBD + 4, 0xFE);
694 DELAY(500000); /* wait 0.5 sec to see if that did it */
695 #endif
696
697 /*
698 * Attempt to force a reset via the Reset Control register at
699 * I/O port 0xcf9. Bit 2 forces a system reset when it
700 * transitions from 0 to 1. Bit 1 selects the type of reset
701 * to attempt: 0 selects a "soft" reset, and 1 selects a
702 * "hard" reset. We try a "hard" reset. The first write sets
703 * bit 1 to select a "hard" reset and clears bit 2. The
704 * second write forces a 0 -> 1 transition in bit 2 to trigger
705 * a reset.
706 */
707 outb(0xcf9, 0x2);
708 outb(0xcf9, 0x6);
709 DELAY(500000); /* wait 0.5 sec to see if that did it */
710
711 /*
712 * Attempt to force a reset via the Fast A20 and Init register
713 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
714 * Bit 0 asserts INIT# when set to 1. We are careful to only
715 * preserve bit 1 while setting bit 0. We also must clear bit
716 * 0 before setting it if it isn't already clear.
717 */
718 b = inb(0x92);
719 if (b != 0xff) {
720 if ((b & 0x1) != 0)
721 outb(0x92, b & 0xfe);
722 outb(0x92, b | 0x1);
723 DELAY(500000); /* wait 0.5 sec to see if that did it */
724 }
725 #endif /* PC98 */
726
727 printf("No known reset method worked, attempting CPU shutdown\n");
728 DELAY(1000000); /* wait 1 sec for printf to complete */
729
730 /* Wipe the IDT. */
731 null_idt.rd_limit = 0;
732 null_idt.rd_base = 0;
733 lidt(&null_idt);
734
735 /* "good night, sweet prince .... <THUNK!>" */
736 breakpoint();
737
738 /* NOTREACHED */
739 while(1);
740 }
741
742 /*
743 * Get an sf_buf from the freelist. May block if none are available.
744 */
745 void
746 sf_buf_map(struct sf_buf *sf, int flags)
747 {
748 pt_entry_t opte, *ptep;
749
750 /*
751 * Update the sf_buf's virtual-to-physical mapping, flushing the
752 * virtual address from the TLB. Since the reference count for
753 * the sf_buf's old mapping was zero, that mapping is not
754 * currently in use. Consequently, there is no need to exchange
755 * the old and new PTEs atomically, even under PAE.
756 */
757 ptep = vtopte(sf->kva);
758 opte = *ptep;
759 *ptep = VM_PAGE_TO_PHYS(sf->m) | pgeflag | PG_RW | PG_V |
760 pmap_cache_bits(sf->m->md.pat_mode, 0);
761
762 /*
763 * Avoid unnecessary TLB invalidations: If the sf_buf's old
764 * virtual-to-physical mapping was not used, then any processor
765 * that has invalidated the sf_buf's virtual address from its TLB
766 * since the last used mapping need not invalidate again.
767 */
768 #ifdef SMP
769 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
770 CPU_ZERO(&sf->cpumask);
771
772 sf_buf_shootdown(sf, flags);
773 #else
774 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
775 pmap_invalidate_page(kernel_pmap, sf->kva);
776 #endif
777 }
778
779 #ifdef SMP
780 void
781 sf_buf_shootdown(struct sf_buf *sf, int flags)
782 {
783 cpuset_t other_cpus;
784 u_int cpuid;
785
786 sched_pin();
787 cpuid = PCPU_GET(cpuid);
788 if (!CPU_ISSET(cpuid, &sf->cpumask)) {
789 CPU_SET(cpuid, &sf->cpumask);
790 invlpg(sf->kva);
791 }
792 if ((flags & SFB_CPUPRIVATE) == 0) {
793 other_cpus = all_cpus;
794 CPU_CLR(cpuid, &other_cpus);
795 CPU_NAND(&other_cpus, &sf->cpumask);
796 if (!CPU_EMPTY(&other_cpus)) {
797 CPU_OR(&sf->cpumask, &other_cpus);
798 smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap);
799 }
800 }
801 sched_unpin();
802 }
803 #endif
804
805 /*
806 * MD part of sf_buf_free().
807 */
808 int
809 sf_buf_unmap(struct sf_buf *sf)
810 {
811
812 return (0);
813 }
814
815 static void
816 sf_buf_invalidate(struct sf_buf *sf)
817 {
818 vm_page_t m = sf->m;
819
820 /*
821 * Use pmap_qenter to update the pte for
822 * existing mapping, in particular, the PAT
823 * settings are recalculated.
824 */
825 pmap_qenter(sf->kva, &m, 1);
826 pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE, FALSE);
827 }
828
829 /*
830 * Invalidate the cache lines that may belong to the page, if
831 * (possibly old) mapping of the page by sf buffer exists. Returns
832 * TRUE when mapping was found and cache invalidated.
833 */
834 boolean_t
835 sf_buf_invalidate_cache(vm_page_t m)
836 {
837
838 return (sf_buf_process_page(m, sf_buf_invalidate));
839 }
840
841 /*
842 * Software interrupt handler for queued VM system processing.
843 */
844 void
845 swi_vm(void *dummy)
846 {
847 if (busdma_swi_pending != 0)
848 busdma_swi();
849 }
850
851 /*
852 * Tell whether this address is in some physical memory region.
853 * Currently used by the kernel coredump code in order to avoid
854 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
855 * or other unpredictable behaviour.
856 */
857
858 int
859 is_physical_memory(vm_paddr_t addr)
860 {
861
862 #ifdef DEV_ISA
863 /* The ISA ``memory hole''. */
864 if (addr >= 0xa0000 && addr < 0x100000)
865 return 0;
866 #endif
867
868 /*
869 * stuff other tests for known memory-mapped devices (PCI?)
870 * here
871 */
872
873 return 1;
874 }
Cache object: 5aee1dee1c55dc73b007289258a5d270
|