1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/10.3/sys/i386/i386/vm_machdep.c 296945 2016-03-16 17:01:24Z kib $");
45
46 #include "opt_isa.h"
47 #include "opt_npx.h"
48 #include "opt_reset.h"
49 #include "opt_cpu.h"
50 #include "opt_xbox.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/kernel.h>
57 #include <sys/ktr.h>
58 #include <sys/lock.h>
59 #include <sys/malloc.h>
60 #include <sys/mbuf.h>
61 #include <sys/mutex.h>
62 #include <sys/pioctl.h>
63 #include <sys/proc.h>
64 #include <sys/sysent.h>
65 #include <sys/sf_buf.h>
66 #include <sys/smp.h>
67 #include <sys/sched.h>
68 #include <sys/sysctl.h>
69 #include <sys/unistd.h>
70 #include <sys/vnode.h>
71 #include <sys/vmmeter.h>
72
73 #include <machine/cpu.h>
74 #include <machine/cputypes.h>
75 #include <machine/md_var.h>
76 #include <machine/pcb.h>
77 #include <machine/pcb_ext.h>
78 #include <machine/smp.h>
79 #include <machine/vm86.h>
80
81 #ifdef CPU_ELAN
82 #include <machine/elan_mmcr.h>
83 #endif
84
85 #include <vm/vm.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_param.h>
91
92 #ifdef XEN
93 #include <xen/hypervisor.h>
94 #endif
95 #ifdef PC98
96 #include <pc98/cbus/cbus.h>
97 #else
98 #include <x86/isa/isa.h>
99 #endif
100
101 #ifdef XBOX
102 #include <machine/xbox.h>
103 #endif
104
105 #ifndef NSFBUFS
106 #define NSFBUFS (512 + maxusers * 16)
107 #endif
108
109 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
110 #define CPU_ENABLE_SSE
111 #endif
112
113 _Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread),
114 "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread.");
115 _Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb),
116 "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb.");
117
118 static void cpu_reset_real(void);
119 #ifdef SMP
120 static void cpu_reset_proxy(void);
121 static u_int cpu_reset_proxyid;
122 static volatile u_int cpu_reset_proxy_active;
123 #endif
124
125 static int nsfbufs;
126 static int nsfbufspeak;
127 static int nsfbufsused;
128
129 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
130 "Maximum number of sendfile(2) sf_bufs available");
131 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
132 "Number of sendfile(2) sf_bufs at peak usage");
133 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
134 "Number of sendfile(2) sf_bufs in use");
135
136 static void sf_buf_init(void *arg);
137 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
138
139 LIST_HEAD(sf_head, sf_buf);
140
141 /*
142 * A hash table of active sendfile(2) buffers
143 */
144 static struct sf_head *sf_buf_active;
145 static u_long sf_buf_hashmask;
146
147 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
148
149 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
150 static u_int sf_buf_alloc_want;
151
152 /*
153 * A lock used to synchronize access to the hash table and free list
154 */
155 static struct mtx sf_buf_lock;
156
157 union savefpu *
158 get_pcb_user_save_td(struct thread *td)
159 {
160 vm_offset_t p;
161 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
162 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
163 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
164 return ((union savefpu *)p);
165 }
166
167 union savefpu *
168 get_pcb_user_save_pcb(struct pcb *pcb)
169 {
170 vm_offset_t p;
171
172 p = (vm_offset_t)(pcb + 1);
173 return ((union savefpu *)p);
174 }
175
176 struct pcb *
177 get_pcb_td(struct thread *td)
178 {
179 vm_offset_t p;
180
181 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
182 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
183 sizeof(struct pcb);
184 return ((struct pcb *)p);
185 }
186
187 void *
188 alloc_fpusave(int flags)
189 {
190 void *res;
191 #ifdef CPU_ENABLE_SSE
192 struct savefpu_ymm *sf;
193 #endif
194
195 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
196 #ifdef CPU_ENABLE_SSE
197 if (use_xsave) {
198 sf = (struct savefpu_ymm *)res;
199 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
200 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
201 }
202 #endif
203 return (res);
204 }
205
206 /*
207 * Finish a fork operation, with process p2 nearly set up.
208 * Copy and update the pcb, set up the stack so that the child
209 * ready to run and return to user mode.
210 */
211 void
212 cpu_fork(td1, p2, td2, flags)
213 register struct thread *td1;
214 register struct proc *p2;
215 struct thread *td2;
216 int flags;
217 {
218 register struct proc *p1;
219 struct pcb *pcb2;
220 struct mdproc *mdp2;
221
222 p1 = td1->td_proc;
223 if ((flags & RFPROC) == 0) {
224 if ((flags & RFMEM) == 0) {
225 /* unshare user LDT */
226 struct mdproc *mdp1 = &p1->p_md;
227 struct proc_ldt *pldt, *pldt1;
228
229 mtx_lock_spin(&dt_lock);
230 if ((pldt1 = mdp1->md_ldt) != NULL &&
231 pldt1->ldt_refcnt > 1) {
232 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len);
233 if (pldt == NULL)
234 panic("could not copy LDT");
235 mdp1->md_ldt = pldt;
236 set_user_ldt(mdp1);
237 user_ldt_deref(pldt1);
238 } else
239 mtx_unlock_spin(&dt_lock);
240 }
241 return;
242 }
243
244 /* Ensure that td1's pcb is up to date. */
245 if (td1 == curthread)
246 td1->td_pcb->pcb_gs = rgs();
247 #ifdef DEV_NPX
248 critical_enter();
249 if (PCPU_GET(fpcurthread) == td1)
250 npxsave(td1->td_pcb->pcb_save);
251 critical_exit();
252 #endif
253
254 /* Point the pcb to the top of the stack */
255 pcb2 = get_pcb_td(td2);
256 td2->td_pcb = pcb2;
257
258 /* Copy td1's pcb */
259 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
260
261 /* Properly initialize pcb_save */
262 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
263 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
264 cpu_max_ext_state_size);
265
266 /* Point mdproc and then copy over td1's contents */
267 mdp2 = &p2->p_md;
268 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
269
270 /*
271 * Create a new fresh stack for the new process.
272 * Copy the trap frame for the return to user mode as if from a
273 * syscall. This copies most of the user mode register values.
274 * The -16 is so we can expand the trapframe if we go to vm86.
275 */
276 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1;
277 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
278
279 td2->td_frame->tf_eax = 0; /* Child returns zero */
280 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
281 td2->td_frame->tf_edx = 1;
282
283 /*
284 * If the parent process has the trap bit set (i.e. a debugger had
285 * single stepped the process to the system call), we need to clear
286 * the trap flag from the new frame unless the debugger had set PF_FORK
287 * on the parent. Otherwise, the child will receive a (likely
288 * unexpected) SIGTRAP when it executes the first instruction after
289 * returning to userland.
290 */
291 if ((p1->p_pfsflags & PF_FORK) == 0)
292 td2->td_frame->tf_eflags &= ~PSL_T;
293
294 /*
295 * Set registers for trampoline to user mode. Leave space for the
296 * return address on stack. These are the kernel mode register values.
297 */
298 #if defined(PAE) || defined(PAE_TABLES)
299 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
300 #else
301 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
302 #endif
303 pcb2->pcb_edi = 0;
304 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
305 pcb2->pcb_ebp = 0;
306 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
307 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
308 pcb2->pcb_eip = (int)fork_trampoline;
309 pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */
310 /*-
311 * pcb2->pcb_dr*: cloned above.
312 * pcb2->pcb_savefpu: cloned above.
313 * pcb2->pcb_flags: cloned above.
314 * pcb2->pcb_onfault: cloned above (always NULL here?).
315 * pcb2->pcb_gs: cloned above.
316 * pcb2->pcb_ext: cleared below.
317 */
318
319 /*
320 * XXX don't copy the i/o pages. this should probably be fixed.
321 */
322 pcb2->pcb_ext = 0;
323
324 /* Copy the LDT, if necessary. */
325 mtx_lock_spin(&dt_lock);
326 if (mdp2->md_ldt != NULL) {
327 if (flags & RFMEM) {
328 mdp2->md_ldt->ldt_refcnt++;
329 } else {
330 mdp2->md_ldt = user_ldt_alloc(mdp2,
331 mdp2->md_ldt->ldt_len);
332 if (mdp2->md_ldt == NULL)
333 panic("could not copy LDT");
334 }
335 }
336 mtx_unlock_spin(&dt_lock);
337
338 /* Setup to release spin count in fork_exit(). */
339 td2->td_md.md_spinlock_count = 1;
340 /*
341 * XXX XEN need to check on PSL_USER is handled
342 */
343 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
344 /*
345 * Now, cpu_switch() can schedule the new process.
346 * pcb_esp is loaded pointing to the cpu_switch() stack frame
347 * containing the return address when exiting cpu_switch.
348 * This will normally be to fork_trampoline(), which will have
349 * %ebx loaded with the new proc's pointer. fork_trampoline()
350 * will set up a stack to call fork_return(p, frame); to complete
351 * the return to user-mode.
352 */
353 }
354
355 /*
356 * Intercept the return address from a freshly forked process that has NOT
357 * been scheduled yet.
358 *
359 * This is needed to make kernel threads stay in kernel mode.
360 */
361 void
362 cpu_set_fork_handler(td, func, arg)
363 struct thread *td;
364 void (*func)(void *);
365 void *arg;
366 {
367 /*
368 * Note that the trap frame follows the args, so the function
369 * is really called like this: func(arg, frame);
370 */
371 td->td_pcb->pcb_esi = (int) func; /* function */
372 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
373 }
374
375 void
376 cpu_exit(struct thread *td)
377 {
378
379 /*
380 * If this process has a custom LDT, release it. Reset pc->pcb_gs
381 * and %gs before we free it in case they refer to an LDT entry.
382 */
383 mtx_lock_spin(&dt_lock);
384 if (td->td_proc->p_md.md_ldt) {
385 td->td_pcb->pcb_gs = _udatasel;
386 load_gs(_udatasel);
387 user_ldt_free(td);
388 } else
389 mtx_unlock_spin(&dt_lock);
390 }
391
392 void
393 cpu_thread_exit(struct thread *td)
394 {
395
396 #ifdef DEV_NPX
397 critical_enter();
398 if (td == PCPU_GET(fpcurthread))
399 npxdrop();
400 critical_exit();
401 #endif
402
403 /* Disable any hardware breakpoints. */
404 if (td->td_pcb->pcb_flags & PCB_DBREGS) {
405 reset_dbregs();
406 td->td_pcb->pcb_flags &= ~PCB_DBREGS;
407 }
408 }
409
410 void
411 cpu_thread_clean(struct thread *td)
412 {
413 struct pcb *pcb;
414
415 pcb = td->td_pcb;
416 if (pcb->pcb_ext != NULL) {
417 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
418 /*
419 * XXX do we need to move the TSS off the allocated pages
420 * before freeing them? (not done here)
421 */
422 kmem_free(kernel_arena, (vm_offset_t)pcb->pcb_ext,
423 ctob(IOPAGES + 1));
424 pcb->pcb_ext = NULL;
425 }
426 }
427
428 void
429 cpu_thread_swapin(struct thread *td)
430 {
431 }
432
433 void
434 cpu_thread_swapout(struct thread *td)
435 {
436 }
437
438 void
439 cpu_thread_alloc(struct thread *td)
440 {
441 struct pcb *pcb;
442 #ifdef CPU_ENABLE_SSE
443 struct xstate_hdr *xhdr;
444 #endif
445
446 td->td_pcb = pcb = get_pcb_td(td);
447 td->td_frame = (struct trapframe *)((caddr_t)pcb - 16) - 1;
448 pcb->pcb_ext = NULL;
449 pcb->pcb_save = get_pcb_user_save_pcb(pcb);
450 #ifdef CPU_ENABLE_SSE
451 if (use_xsave) {
452 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
453 bzero(xhdr, sizeof(*xhdr));
454 xhdr->xstate_bv = xsave_mask;
455 }
456 #endif
457 }
458
459 void
460 cpu_thread_free(struct thread *td)
461 {
462
463 cpu_thread_clean(td);
464 }
465
466 void
467 cpu_set_syscall_retval(struct thread *td, int error)
468 {
469
470 switch (error) {
471 case 0:
472 td->td_frame->tf_eax = td->td_retval[0];
473 td->td_frame->tf_edx = td->td_retval[1];
474 td->td_frame->tf_eflags &= ~PSL_C;
475 break;
476
477 case ERESTART:
478 /*
479 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int
480 * 0x80 is 2 bytes. We saved this in tf_err.
481 */
482 td->td_frame->tf_eip -= td->td_frame->tf_err;
483 break;
484
485 case EJUSTRETURN:
486 break;
487
488 default:
489 if (td->td_proc->p_sysent->sv_errsize) {
490 if (error >= td->td_proc->p_sysent->sv_errsize)
491 error = -1; /* XXX */
492 else
493 error = td->td_proc->p_sysent->sv_errtbl[error];
494 }
495 td->td_frame->tf_eax = error;
496 td->td_frame->tf_eflags |= PSL_C;
497 break;
498 }
499 }
500
501 /*
502 * Initialize machine state (pcb and trap frame) for a new thread about to
503 * upcall. Put enough state in the new thread's PCB to get it to go back
504 * userret(), where we can intercept it again to set the return (upcall)
505 * Address and stack, along with those from upcals that are from other sources
506 * such as those generated in thread_userret() itself.
507 */
508 void
509 cpu_set_upcall(struct thread *td, struct thread *td0)
510 {
511 struct pcb *pcb2;
512
513 /* Point the pcb to the top of the stack. */
514 pcb2 = td->td_pcb;
515
516 /*
517 * Copy the upcall pcb. This loads kernel regs.
518 * Those not loaded individually below get their default
519 * values here.
520 */
521 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
522 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE |
523 PCB_KERNNPX);
524 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
525 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
526 cpu_max_ext_state_size);
527
528 /*
529 * Create a new fresh stack for the new thread.
530 */
531 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
532
533 /* If the current thread has the trap bit set (i.e. a debugger had
534 * single stepped the process to the system call), we need to clear
535 * the trap flag from the new frame. Otherwise, the new thread will
536 * receive a (likely unexpected) SIGTRAP when it executes the first
537 * instruction after returning to userland.
538 */
539 td->td_frame->tf_eflags &= ~PSL_T;
540
541 /*
542 * Set registers for trampoline to user mode. Leave space for the
543 * return address on stack. These are the kernel mode register values.
544 */
545 pcb2->pcb_edi = 0;
546 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
547 pcb2->pcb_ebp = 0;
548 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
549 pcb2->pcb_ebx = (int)td; /* trampoline arg */
550 pcb2->pcb_eip = (int)fork_trampoline;
551 pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */
552 pcb2->pcb_gs = rgs();
553 /*
554 * If we didn't copy the pcb, we'd need to do the following registers:
555 * pcb2->pcb_cr3: cloned above.
556 * pcb2->pcb_dr*: cloned above.
557 * pcb2->pcb_savefpu: cloned above.
558 * pcb2->pcb_flags: cloned above.
559 * pcb2->pcb_onfault: cloned above (always NULL here?).
560 * pcb2->pcb_gs: cloned above.
561 * pcb2->pcb_ext: cleared below.
562 */
563 pcb2->pcb_ext = NULL;
564
565 /* Setup to release spin count in fork_exit(). */
566 td->td_md.md_spinlock_count = 1;
567 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
568 }
569
570 /*
571 * Set that machine state for performing an upcall that has to
572 * be done in thread_userret() so that those upcalls generated
573 * in thread_userret() itself can be done as well.
574 */
575 void
576 cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
577 stack_t *stack)
578 {
579
580 /*
581 * Do any extra cleaning that needs to be done.
582 * The thread may have optional components
583 * that are not present in a fresh thread.
584 * This may be a recycled thread so make it look
585 * as though it's newly allocated.
586 */
587 cpu_thread_clean(td);
588
589 /*
590 * Set the trap frame to point at the beginning of the uts
591 * function.
592 */
593 td->td_frame->tf_ebp = 0;
594 td->td_frame->tf_esp =
595 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
596 td->td_frame->tf_eip = (int)entry;
597
598 /*
599 * Pass the address of the mailbox for this kse to the uts
600 * function as a parameter on the stack.
601 */
602 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
603 (int)arg);
604 }
605
606 int
607 cpu_set_user_tls(struct thread *td, void *tls_base)
608 {
609 struct segment_descriptor sd;
610 uint32_t base;
611
612 /*
613 * Construct a descriptor and store it in the pcb for
614 * the next context switch. Also store it in the gdt
615 * so that the load of tf_fs into %fs will activate it
616 * at return to userland.
617 */
618 base = (uint32_t)tls_base;
619 sd.sd_lobase = base & 0xffffff;
620 sd.sd_hibase = (base >> 24) & 0xff;
621 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
622 sd.sd_hilimit = 0xf;
623 sd.sd_type = SDT_MEMRWA;
624 sd.sd_dpl = SEL_UPL;
625 sd.sd_p = 1;
626 sd.sd_xx = 0;
627 sd.sd_def32 = 1;
628 sd.sd_gran = 1;
629 critical_enter();
630 /* set %gs */
631 td->td_pcb->pcb_gsd = sd;
632 if (td == curthread) {
633 PCPU_GET(fsgs_gdt)[1] = sd;
634 load_gs(GSEL(GUGS_SEL, SEL_UPL));
635 }
636 critical_exit();
637 return (0);
638 }
639
640 /*
641 * Convert kernel VA to physical address
642 */
643 vm_paddr_t
644 kvtop(void *addr)
645 {
646 vm_paddr_t pa;
647
648 pa = pmap_kextract((vm_offset_t)addr);
649 if (pa == 0)
650 panic("kvtop: zero page frame");
651 return (pa);
652 }
653
654 #ifdef SMP
655 static void
656 cpu_reset_proxy()
657 {
658 cpuset_t tcrp;
659
660 cpu_reset_proxy_active = 1;
661 while (cpu_reset_proxy_active == 1)
662 ; /* Wait for other cpu to see that we've started */
663 CPU_SETOF(cpu_reset_proxyid, &tcrp);
664 stop_cpus(tcrp);
665 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
666 DELAY(1000000);
667 cpu_reset_real();
668 }
669 #endif
670
671 void
672 cpu_reset()
673 {
674 #ifdef XBOX
675 if (arch_i386_is_xbox) {
676 /* Kick the PIC16L, it can reboot the box */
677 pic16l_reboot();
678 for (;;);
679 }
680 #endif
681
682 #ifdef SMP
683 cpuset_t map;
684 u_int cnt;
685
686 if (smp_started) {
687 map = all_cpus;
688 CPU_CLR(PCPU_GET(cpuid), &map);
689 CPU_NAND(&map, &stopped_cpus);
690 if (!CPU_EMPTY(&map)) {
691 printf("cpu_reset: Stopping other CPUs\n");
692 stop_cpus(map);
693 }
694
695 if (PCPU_GET(cpuid) != 0) {
696 cpu_reset_proxyid = PCPU_GET(cpuid);
697 cpustop_restartfunc = cpu_reset_proxy;
698 cpu_reset_proxy_active = 0;
699 printf("cpu_reset: Restarting BSP\n");
700
701 /* Restart CPU #0. */
702 /* XXX: restart_cpus(1 << 0); */
703 CPU_SETOF(0, &started_cpus);
704 wmb();
705
706 cnt = 0;
707 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
708 cnt++; /* Wait for BSP to announce restart */
709 if (cpu_reset_proxy_active == 0)
710 printf("cpu_reset: Failed to restart BSP\n");
711 enable_intr();
712 cpu_reset_proxy_active = 2;
713
714 while (1);
715 /* NOTREACHED */
716 }
717
718 DELAY(1000000);
719 }
720 #endif
721 cpu_reset_real();
722 /* NOTREACHED */
723 }
724
725 static void
726 cpu_reset_real()
727 {
728 struct region_descriptor null_idt;
729 #ifndef PC98
730 int b;
731 #endif
732
733 disable_intr();
734 #ifdef XEN
735 if (smp_processor_id() == 0)
736 HYPERVISOR_shutdown(SHUTDOWN_reboot);
737 else
738 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
739 #endif
740 #ifdef CPU_ELAN
741 if (elan_mmcr != NULL)
742 elan_mmcr->RESCFG = 1;
743 #endif
744
745 if (cpu == CPU_GEODE1100) {
746 /* Attempt Geode's own reset */
747 outl(0xcf8, 0x80009044ul);
748 outl(0xcfc, 0xf);
749 }
750
751 #ifdef PC98
752 /*
753 * Attempt to do a CPU reset via CPU reset port.
754 */
755 if ((inb(0x35) & 0xa0) != 0xa0) {
756 outb(0x37, 0x0f); /* SHUT0 = 0. */
757 outb(0x37, 0x0b); /* SHUT1 = 0. */
758 }
759 outb(0xf0, 0x00); /* Reset. */
760 #else
761 #if !defined(BROKEN_KEYBOARD_RESET)
762 /*
763 * Attempt to do a CPU reset via the keyboard controller,
764 * do not turn off GateA20, as any machine that fails
765 * to do the reset here would then end up in no man's land.
766 */
767 outb(IO_KBD + 4, 0xFE);
768 DELAY(500000); /* wait 0.5 sec to see if that did it */
769 #endif
770
771 /*
772 * Attempt to force a reset via the Reset Control register at
773 * I/O port 0xcf9. Bit 2 forces a system reset when it
774 * transitions from 0 to 1. Bit 1 selects the type of reset
775 * to attempt: 0 selects a "soft" reset, and 1 selects a
776 * "hard" reset. We try a "hard" reset. The first write sets
777 * bit 1 to select a "hard" reset and clears bit 2. The
778 * second write forces a 0 -> 1 transition in bit 2 to trigger
779 * a reset.
780 */
781 outb(0xcf9, 0x2);
782 outb(0xcf9, 0x6);
783 DELAY(500000); /* wait 0.5 sec to see if that did it */
784
785 /*
786 * Attempt to force a reset via the Fast A20 and Init register
787 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
788 * Bit 0 asserts INIT# when set to 1. We are careful to only
789 * preserve bit 1 while setting bit 0. We also must clear bit
790 * 0 before setting it if it isn't already clear.
791 */
792 b = inb(0x92);
793 if (b != 0xff) {
794 if ((b & 0x1) != 0)
795 outb(0x92, b & 0xfe);
796 outb(0x92, b | 0x1);
797 DELAY(500000); /* wait 0.5 sec to see if that did it */
798 }
799 #endif /* PC98 */
800
801 printf("No known reset method worked, attempting CPU shutdown\n");
802 DELAY(1000000); /* wait 1 sec for printf to complete */
803
804 /* Wipe the IDT. */
805 null_idt.rd_limit = 0;
806 null_idt.rd_base = 0;
807 lidt(&null_idt);
808
809 /* "good night, sweet prince .... <THUNK!>" */
810 breakpoint();
811
812 /* NOTREACHED */
813 while(1);
814 }
815
816 /*
817 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
818 */
819 static void
820 sf_buf_init(void *arg)
821 {
822 struct sf_buf *sf_bufs;
823 vm_offset_t sf_base;
824 int i;
825
826 nsfbufs = NSFBUFS;
827 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
828
829 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
830 TAILQ_INIT(&sf_buf_freelist);
831 sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
832 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
833 M_NOWAIT | M_ZERO);
834 for (i = 0; i < nsfbufs; i++) {
835 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
836 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
837 }
838 sf_buf_alloc_want = 0;
839 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
840 }
841
842 /*
843 * Invalidate the cache lines that may belong to the page, if
844 * (possibly old) mapping of the page by sf buffer exists. Returns
845 * TRUE when mapping was found and cache invalidated.
846 */
847 boolean_t
848 sf_buf_invalidate_cache(vm_page_t m)
849 {
850 struct sf_head *hash_list;
851 struct sf_buf *sf;
852 boolean_t ret;
853
854 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
855 ret = FALSE;
856 mtx_lock(&sf_buf_lock);
857 LIST_FOREACH(sf, hash_list, list_entry) {
858 if (sf->m == m) {
859 /*
860 * Use pmap_qenter to update the pte for
861 * existing mapping, in particular, the PAT
862 * settings are recalculated.
863 */
864 pmap_qenter(sf->kva, &m, 1);
865 pmap_invalidate_cache_range(sf->kva, sf->kva +
866 PAGE_SIZE, FALSE);
867 ret = TRUE;
868 break;
869 }
870 }
871 mtx_unlock(&sf_buf_lock);
872 return (ret);
873 }
874
875 /*
876 * Get an sf_buf from the freelist. May block if none are available.
877 */
878 struct sf_buf *
879 sf_buf_alloc(struct vm_page *m, int flags)
880 {
881 pt_entry_t opte, *ptep;
882 struct sf_head *hash_list;
883 struct sf_buf *sf;
884 #ifdef SMP
885 cpuset_t other_cpus;
886 u_int cpuid;
887 #endif
888 int error;
889
890 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0,
891 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned"));
892 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
893 mtx_lock(&sf_buf_lock);
894 LIST_FOREACH(sf, hash_list, list_entry) {
895 if (sf->m == m) {
896 sf->ref_count++;
897 if (sf->ref_count == 1) {
898 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
899 nsfbufsused++;
900 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
901 }
902 #ifdef SMP
903 goto shootdown;
904 #else
905 goto done;
906 #endif
907 }
908 }
909 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
910 if (flags & SFB_NOWAIT)
911 goto done;
912 sf_buf_alloc_want++;
913 SFSTAT_INC(sf_allocwait);
914 error = msleep(&sf_buf_freelist, &sf_buf_lock,
915 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
916 sf_buf_alloc_want--;
917
918 /*
919 * If we got a signal, don't risk going back to sleep.
920 */
921 if (error)
922 goto done;
923 }
924 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
925 if (sf->m != NULL)
926 LIST_REMOVE(sf, list_entry);
927 LIST_INSERT_HEAD(hash_list, sf, list_entry);
928 sf->ref_count = 1;
929 sf->m = m;
930 nsfbufsused++;
931 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
932
933 /*
934 * Update the sf_buf's virtual-to-physical mapping, flushing the
935 * virtual address from the TLB. Since the reference count for
936 * the sf_buf's old mapping was zero, that mapping is not
937 * currently in use. Consequently, there is no need to exchange
938 * the old and new PTEs atomically, even under PAE.
939 */
940 ptep = vtopte(sf->kva);
941 opte = *ptep;
942 #ifdef XEN
943 PT_SET_MA(sf->kva, xpmap_ptom(VM_PAGE_TO_PHYS(m)) | pgeflag
944 | PG_RW | PG_V | pmap_cache_bits(m->md.pat_mode, 0));
945 #else
946 *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V |
947 pmap_cache_bits(m->md.pat_mode, 0);
948 #endif
949
950 /*
951 * Avoid unnecessary TLB invalidations: If the sf_buf's old
952 * virtual-to-physical mapping was not used, then any processor
953 * that has invalidated the sf_buf's virtual address from its TLB
954 * since the last used mapping need not invalidate again.
955 */
956 #ifdef SMP
957 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
958 CPU_ZERO(&sf->cpumask);
959 shootdown:
960 sched_pin();
961 cpuid = PCPU_GET(cpuid);
962 if (!CPU_ISSET(cpuid, &sf->cpumask)) {
963 CPU_SET(cpuid, &sf->cpumask);
964 invlpg(sf->kva);
965 }
966 if ((flags & SFB_CPUPRIVATE) == 0) {
967 other_cpus = all_cpus;
968 CPU_CLR(cpuid, &other_cpus);
969 CPU_NAND(&other_cpus, &sf->cpumask);
970 if (!CPU_EMPTY(&other_cpus)) {
971 CPU_OR(&sf->cpumask, &other_cpus);
972 smp_masked_invlpg(other_cpus, sf->kva);
973 }
974 }
975 sched_unpin();
976 #else
977 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
978 pmap_invalidate_page(kernel_pmap, sf->kva);
979 #endif
980 done:
981 mtx_unlock(&sf_buf_lock);
982 return (sf);
983 }
984
985 /*
986 * Remove a reference from the given sf_buf, adding it to the free
987 * list when its reference count reaches zero. A freed sf_buf still,
988 * however, retains its virtual-to-physical mapping until it is
989 * recycled or reactivated by sf_buf_alloc(9).
990 */
991 void
992 sf_buf_free(struct sf_buf *sf)
993 {
994
995 mtx_lock(&sf_buf_lock);
996 sf->ref_count--;
997 if (sf->ref_count == 0) {
998 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
999 nsfbufsused--;
1000 #ifdef XEN
1001 /*
1002 * Xen doesn't like having dangling R/W mappings
1003 */
1004 pmap_qremove(sf->kva, 1);
1005 sf->m = NULL;
1006 LIST_REMOVE(sf, list_entry);
1007 #endif
1008 if (sf_buf_alloc_want > 0)
1009 wakeup(&sf_buf_freelist);
1010 }
1011 mtx_unlock(&sf_buf_lock);
1012 }
1013
1014 /*
1015 * Software interrupt handler for queued VM system processing.
1016 */
1017 void
1018 swi_vm(void *dummy)
1019 {
1020 if (busdma_swi_pending != 0)
1021 busdma_swi();
1022 }
1023
1024 /*
1025 * Tell whether this address is in some physical memory region.
1026 * Currently used by the kernel coredump code in order to avoid
1027 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
1028 * or other unpredictable behaviour.
1029 */
1030
1031 int
1032 is_physical_memory(vm_paddr_t addr)
1033 {
1034
1035 #ifdef DEV_ISA
1036 /* The ISA ``memory hole''. */
1037 if (addr >= 0xa0000 && addr < 0x100000)
1038 return 0;
1039 #endif
1040
1041 /*
1042 * stuff other tests for known memory-mapped devices (PCI?)
1043 * here
1044 */
1045
1046 return 1;
1047 }
Cache object: c4f992e8622aab1b71afecdf329ab557
|