1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/11.2/sys/i386/i386/vm_machdep.c 332760 2018-04-19 07:15:40Z avg $");
45
46 #include "opt_isa.h"
47 #include "opt_npx.h"
48 #include "opt_reset.h"
49 #include "opt_cpu.h"
50 #include "opt_xbox.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/kernel.h>
57 #include <sys/ktr.h>
58 #include <sys/lock.h>
59 #include <sys/malloc.h>
60 #include <sys/mbuf.h>
61 #include <sys/mutex.h>
62 #include <sys/pioctl.h>
63 #include <sys/proc.h>
64 #include <sys/sysent.h>
65 #include <sys/sf_buf.h>
66 #include <sys/smp.h>
67 #include <sys/sched.h>
68 #include <sys/sysctl.h>
69 #include <sys/unistd.h>
70 #include <sys/vnode.h>
71 #include <sys/vmmeter.h>
72
73 #include <machine/cpu.h>
74 #include <machine/cputypes.h>
75 #include <machine/md_var.h>
76 #include <machine/pcb.h>
77 #include <machine/pcb_ext.h>
78 #include <machine/smp.h>
79 #include <machine/vm86.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_extern.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_param.h>
87
88 #ifdef XBOX
89 #include <machine/xbox.h>
90 #endif
91
92 #ifndef NSFBUFS
93 #define NSFBUFS (512 + maxusers * 16)
94 #endif
95
96 _Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread),
97 "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread.");
98 _Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb),
99 "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb.");
100 _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
101 "__OFFSETOF_MONINORBUF does not correspond with offset of pc_monitorbuf.");
102
103 union savefpu *
104 get_pcb_user_save_td(struct thread *td)
105 {
106 vm_offset_t p;
107
108 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
109 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
110 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
111 return ((union savefpu *)p);
112 }
113
114 union savefpu *
115 get_pcb_user_save_pcb(struct pcb *pcb)
116 {
117 vm_offset_t p;
118
119 p = (vm_offset_t)(pcb + 1);
120 return ((union savefpu *)p);
121 }
122
123 struct pcb *
124 get_pcb_td(struct thread *td)
125 {
126 vm_offset_t p;
127
128 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
129 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
130 sizeof(struct pcb);
131 return ((struct pcb *)p);
132 }
133
134 void *
135 alloc_fpusave(int flags)
136 {
137 void *res;
138 struct savefpu_ymm *sf;
139
140 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
141 if (use_xsave) {
142 sf = (struct savefpu_ymm *)res;
143 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
144 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
145 }
146 return (res);
147 }
148 /*
149 * Finish a fork operation, with process p2 nearly set up.
150 * Copy and update the pcb, set up the stack so that the child
151 * ready to run and return to user mode.
152 */
153 void
154 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
155 {
156 struct proc *p1;
157 struct pcb *pcb2;
158 struct mdproc *mdp2;
159
160 p1 = td1->td_proc;
161 if ((flags & RFPROC) == 0) {
162 if ((flags & RFMEM) == 0) {
163 /* unshare user LDT */
164 struct mdproc *mdp1 = &p1->p_md;
165 struct proc_ldt *pldt, *pldt1;
166
167 mtx_lock_spin(&dt_lock);
168 if ((pldt1 = mdp1->md_ldt) != NULL &&
169 pldt1->ldt_refcnt > 1) {
170 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len);
171 if (pldt == NULL)
172 panic("could not copy LDT");
173 mdp1->md_ldt = pldt;
174 set_user_ldt(mdp1);
175 user_ldt_deref(pldt1);
176 } else
177 mtx_unlock_spin(&dt_lock);
178 }
179 return;
180 }
181
182 /* Ensure that td1's pcb is up to date. */
183 if (td1 == curthread)
184 td1->td_pcb->pcb_gs = rgs();
185 critical_enter();
186 if (PCPU_GET(fpcurthread) == td1)
187 npxsave(td1->td_pcb->pcb_save);
188 critical_exit();
189
190 /* Point the pcb to the top of the stack */
191 pcb2 = get_pcb_td(td2);
192 td2->td_pcb = pcb2;
193
194 /* Copy td1's pcb */
195 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
196
197 /* Properly initialize pcb_save */
198 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
199 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
200 cpu_max_ext_state_size);
201
202 /* Point mdproc and then copy over td1's contents */
203 mdp2 = &p2->p_md;
204 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
205
206 /*
207 * Create a new fresh stack for the new process.
208 * Copy the trap frame for the return to user mode as if from a
209 * syscall. This copies most of the user mode register values.
210 * The -16 is so we can expand the trapframe if we go to vm86.
211 */
212 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1;
213 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
214
215 td2->td_frame->tf_eax = 0; /* Child returns zero */
216 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
217 td2->td_frame->tf_edx = 1;
218
219 /*
220 * If the parent process has the trap bit set (i.e. a debugger had
221 * single stepped the process to the system call), we need to clear
222 * the trap flag from the new frame unless the debugger had set PF_FORK
223 * on the parent. Otherwise, the child will receive a (likely
224 * unexpected) SIGTRAP when it executes the first instruction after
225 * returning to userland.
226 */
227 if ((p1->p_pfsflags & PF_FORK) == 0)
228 td2->td_frame->tf_eflags &= ~PSL_T;
229
230 /*
231 * Set registers for trampoline to user mode. Leave space for the
232 * return address on stack. These are the kernel mode register values.
233 */
234 #if defined(PAE) || defined(PAE_TABLES)
235 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
236 #else
237 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
238 #endif
239 pcb2->pcb_edi = 0;
240 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
241 pcb2->pcb_ebp = 0;
242 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
243 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
244 pcb2->pcb_eip = (int)fork_trampoline;
245 pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */
246 /*-
247 * pcb2->pcb_dr*: cloned above.
248 * pcb2->pcb_savefpu: cloned above.
249 * pcb2->pcb_flags: cloned above.
250 * pcb2->pcb_onfault: cloned above (always NULL here?).
251 * pcb2->pcb_gs: cloned above.
252 * pcb2->pcb_ext: cleared below.
253 */
254
255 /*
256 * XXX don't copy the i/o pages. this should probably be fixed.
257 */
258 pcb2->pcb_ext = 0;
259
260 /* Copy the LDT, if necessary. */
261 mtx_lock_spin(&dt_lock);
262 if (mdp2->md_ldt != NULL) {
263 if (flags & RFMEM) {
264 mdp2->md_ldt->ldt_refcnt++;
265 } else {
266 mdp2->md_ldt = user_ldt_alloc(mdp2,
267 mdp2->md_ldt->ldt_len);
268 if (mdp2->md_ldt == NULL)
269 panic("could not copy LDT");
270 }
271 }
272 mtx_unlock_spin(&dt_lock);
273
274 /* Setup to release spin count in fork_exit(). */
275 td2->td_md.md_spinlock_count = 1;
276 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
277
278 /*
279 * Now, cpu_switch() can schedule the new process.
280 * pcb_esp is loaded pointing to the cpu_switch() stack frame
281 * containing the return address when exiting cpu_switch.
282 * This will normally be to fork_trampoline(), which will have
283 * %ebx loaded with the new proc's pointer. fork_trampoline()
284 * will set up a stack to call fork_return(p, frame); to complete
285 * the return to user-mode.
286 */
287 }
288
289 /*
290 * Intercept the return address from a freshly forked process that has NOT
291 * been scheduled yet.
292 *
293 * This is needed to make kernel threads stay in kernel mode.
294 */
295 void
296 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
297 {
298 /*
299 * Note that the trap frame follows the args, so the function
300 * is really called like this: func(arg, frame);
301 */
302 td->td_pcb->pcb_esi = (int) func; /* function */
303 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
304 }
305
306 void
307 cpu_exit(struct thread *td)
308 {
309
310 /*
311 * If this process has a custom LDT, release it. Reset pc->pcb_gs
312 * and %gs before we free it in case they refer to an LDT entry.
313 */
314 mtx_lock_spin(&dt_lock);
315 if (td->td_proc->p_md.md_ldt) {
316 td->td_pcb->pcb_gs = _udatasel;
317 load_gs(_udatasel);
318 user_ldt_free(td);
319 } else
320 mtx_unlock_spin(&dt_lock);
321 }
322
323 void
324 cpu_thread_exit(struct thread *td)
325 {
326
327 critical_enter();
328 if (td == PCPU_GET(fpcurthread))
329 npxdrop();
330 critical_exit();
331
332 /* Disable any hardware breakpoints. */
333 if (td->td_pcb->pcb_flags & PCB_DBREGS) {
334 reset_dbregs();
335 td->td_pcb->pcb_flags &= ~PCB_DBREGS;
336 }
337 }
338
339 void
340 cpu_thread_clean(struct thread *td)
341 {
342 struct pcb *pcb;
343
344 pcb = td->td_pcb;
345 if (pcb->pcb_ext != NULL) {
346 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
347 /*
348 * XXX do we need to move the TSS off the allocated pages
349 * before freeing them? (not done here)
350 */
351 kmem_free(kernel_arena, (vm_offset_t)pcb->pcb_ext,
352 ctob(IOPAGES + 1));
353 pcb->pcb_ext = NULL;
354 }
355 }
356
357 void
358 cpu_thread_swapin(struct thread *td)
359 {
360 }
361
362 void
363 cpu_thread_swapout(struct thread *td)
364 {
365 }
366
367 void
368 cpu_thread_alloc(struct thread *td)
369 {
370 struct pcb *pcb;
371 struct xstate_hdr *xhdr;
372
373 td->td_pcb = pcb = get_pcb_td(td);
374 td->td_frame = (struct trapframe *)((caddr_t)pcb - 16) - 1;
375 pcb->pcb_ext = NULL;
376 pcb->pcb_save = get_pcb_user_save_pcb(pcb);
377 if (use_xsave) {
378 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
379 bzero(xhdr, sizeof(*xhdr));
380 xhdr->xstate_bv = xsave_mask;
381 }
382 }
383
384 void
385 cpu_thread_free(struct thread *td)
386 {
387
388 cpu_thread_clean(td);
389 }
390
391 void
392 cpu_set_syscall_retval(struct thread *td, int error)
393 {
394
395 switch (error) {
396 case 0:
397 td->td_frame->tf_eax = td->td_retval[0];
398 td->td_frame->tf_edx = td->td_retval[1];
399 td->td_frame->tf_eflags &= ~PSL_C;
400 break;
401
402 case ERESTART:
403 /*
404 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int
405 * 0x80 is 2 bytes. We saved this in tf_err.
406 */
407 td->td_frame->tf_eip -= td->td_frame->tf_err;
408 break;
409
410 case EJUSTRETURN:
411 break;
412
413 default:
414 td->td_frame->tf_eax = SV_ABI_ERRNO(td->td_proc, error);
415 td->td_frame->tf_eflags |= PSL_C;
416 break;
417 }
418 }
419
420 /*
421 * Initialize machine state, mostly pcb and trap frame for a new
422 * thread, about to return to userspace. Put enough state in the new
423 * thread's PCB to get it to go back to the fork_return(), which
424 * finalizes the thread state and handles peculiarities of the first
425 * return to userspace for the new thread.
426 */
427 void
428 cpu_copy_thread(struct thread *td, struct thread *td0)
429 {
430 struct pcb *pcb2;
431
432 /* Point the pcb to the top of the stack. */
433 pcb2 = td->td_pcb;
434
435 /*
436 * Copy the upcall pcb. This loads kernel regs.
437 * Those not loaded individually below get their default
438 * values here.
439 */
440 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
441 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE |
442 PCB_KERNNPX);
443 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
444 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
445 cpu_max_ext_state_size);
446
447 /*
448 * Create a new fresh stack for the new thread.
449 */
450 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
451
452 /* If the current thread has the trap bit set (i.e. a debugger had
453 * single stepped the process to the system call), we need to clear
454 * the trap flag from the new frame. Otherwise, the new thread will
455 * receive a (likely unexpected) SIGTRAP when it executes the first
456 * instruction after returning to userland.
457 */
458 td->td_frame->tf_eflags &= ~PSL_T;
459
460 /*
461 * Set registers for trampoline to user mode. Leave space for the
462 * return address on stack. These are the kernel mode register values.
463 */
464 pcb2->pcb_edi = 0;
465 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
466 pcb2->pcb_ebp = 0;
467 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
468 pcb2->pcb_ebx = (int)td; /* trampoline arg */
469 pcb2->pcb_eip = (int)fork_trampoline;
470 pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */
471 pcb2->pcb_gs = rgs();
472 /*
473 * If we didn't copy the pcb, we'd need to do the following registers:
474 * pcb2->pcb_cr3: cloned above.
475 * pcb2->pcb_dr*: cloned above.
476 * pcb2->pcb_savefpu: cloned above.
477 * pcb2->pcb_flags: cloned above.
478 * pcb2->pcb_onfault: cloned above (always NULL here?).
479 * pcb2->pcb_gs: cloned above.
480 * pcb2->pcb_ext: cleared below.
481 */
482 pcb2->pcb_ext = NULL;
483
484 /* Setup to release spin count in fork_exit(). */
485 td->td_md.md_spinlock_count = 1;
486 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
487 }
488
489 /*
490 * Set that machine state for performing an upcall that starts
491 * the entry function with the given argument.
492 */
493 void
494 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
495 stack_t *stack)
496 {
497
498 /*
499 * Do any extra cleaning that needs to be done.
500 * The thread may have optional components
501 * that are not present in a fresh thread.
502 * This may be a recycled thread so make it look
503 * as though it's newly allocated.
504 */
505 cpu_thread_clean(td);
506
507 /*
508 * Set the trap frame to point at the beginning of the entry
509 * function.
510 */
511 td->td_frame->tf_ebp = 0;
512 td->td_frame->tf_esp =
513 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
514 td->td_frame->tf_eip = (int)entry;
515
516 /* Return address sentinel value to stop stack unwinding. */
517 suword((void *)td->td_frame->tf_esp, 0);
518
519 /* Pass the argument to the entry point. */
520 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
521 (int)arg);
522 }
523
524 int
525 cpu_set_user_tls(struct thread *td, void *tls_base)
526 {
527 struct segment_descriptor sd;
528 uint32_t base;
529
530 /*
531 * Construct a descriptor and store it in the pcb for
532 * the next context switch. Also store it in the gdt
533 * so that the load of tf_fs into %fs will activate it
534 * at return to userland.
535 */
536 base = (uint32_t)tls_base;
537 sd.sd_lobase = base & 0xffffff;
538 sd.sd_hibase = (base >> 24) & 0xff;
539 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
540 sd.sd_hilimit = 0xf;
541 sd.sd_type = SDT_MEMRWA;
542 sd.sd_dpl = SEL_UPL;
543 sd.sd_p = 1;
544 sd.sd_xx = 0;
545 sd.sd_def32 = 1;
546 sd.sd_gran = 1;
547 critical_enter();
548 /* set %gs */
549 td->td_pcb->pcb_gsd = sd;
550 if (td == curthread) {
551 PCPU_GET(fsgs_gdt)[1] = sd;
552 load_gs(GSEL(GUGS_SEL, SEL_UPL));
553 }
554 critical_exit();
555 return (0);
556 }
557
558 /*
559 * Convert kernel VA to physical address
560 */
561 vm_paddr_t
562 kvtop(void *addr)
563 {
564 vm_paddr_t pa;
565
566 pa = pmap_kextract((vm_offset_t)addr);
567 if (pa == 0)
568 panic("kvtop: zero page frame");
569 return (pa);
570 }
571
572 /*
573 * Get an sf_buf from the freelist. May block if none are available.
574 */
575 void
576 sf_buf_map(struct sf_buf *sf, int flags)
577 {
578 pt_entry_t opte, *ptep;
579
580 /*
581 * Update the sf_buf's virtual-to-physical mapping, flushing the
582 * virtual address from the TLB. Since the reference count for
583 * the sf_buf's old mapping was zero, that mapping is not
584 * currently in use. Consequently, there is no need to exchange
585 * the old and new PTEs atomically, even under PAE.
586 */
587 ptep = vtopte(sf->kva);
588 opte = *ptep;
589 *ptep = VM_PAGE_TO_PHYS(sf->m) | pgeflag | PG_RW | PG_V |
590 pmap_cache_bits(sf->m->md.pat_mode, 0);
591
592 /*
593 * Avoid unnecessary TLB invalidations: If the sf_buf's old
594 * virtual-to-physical mapping was not used, then any processor
595 * that has invalidated the sf_buf's virtual address from its TLB
596 * since the last used mapping need not invalidate again.
597 */
598 #ifdef SMP
599 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
600 CPU_ZERO(&sf->cpumask);
601
602 sf_buf_shootdown(sf, flags);
603 #else
604 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
605 pmap_invalidate_page(kernel_pmap, sf->kva);
606 #endif
607 }
608
609 #ifdef SMP
610 void
611 sf_buf_shootdown(struct sf_buf *sf, int flags)
612 {
613 cpuset_t other_cpus;
614 u_int cpuid;
615
616 sched_pin();
617 cpuid = PCPU_GET(cpuid);
618 if (!CPU_ISSET(cpuid, &sf->cpumask)) {
619 CPU_SET(cpuid, &sf->cpumask);
620 invlpg(sf->kva);
621 }
622 if ((flags & SFB_CPUPRIVATE) == 0) {
623 other_cpus = all_cpus;
624 CPU_CLR(cpuid, &other_cpus);
625 CPU_NAND(&other_cpus, &sf->cpumask);
626 if (!CPU_EMPTY(&other_cpus)) {
627 CPU_OR(&sf->cpumask, &other_cpus);
628 smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap);
629 }
630 }
631 sched_unpin();
632 }
633 #endif
634
635 /*
636 * MD part of sf_buf_free().
637 */
638 int
639 sf_buf_unmap(struct sf_buf *sf)
640 {
641
642 return (0);
643 }
644
645 static void
646 sf_buf_invalidate(struct sf_buf *sf)
647 {
648 vm_page_t m = sf->m;
649
650 /*
651 * Use pmap_qenter to update the pte for
652 * existing mapping, in particular, the PAT
653 * settings are recalculated.
654 */
655 pmap_qenter(sf->kva, &m, 1);
656 pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE, FALSE);
657 }
658
659 /*
660 * Invalidate the cache lines that may belong to the page, if
661 * (possibly old) mapping of the page by sf buffer exists. Returns
662 * TRUE when mapping was found and cache invalidated.
663 */
664 boolean_t
665 sf_buf_invalidate_cache(vm_page_t m)
666 {
667
668 return (sf_buf_process_page(m, sf_buf_invalidate));
669 }
670
671 /*
672 * Software interrupt handler for queued VM system processing.
673 */
674 void
675 swi_vm(void *dummy)
676 {
677 if (busdma_swi_pending != 0)
678 busdma_swi();
679 }
680
681 /*
682 * Tell whether this address is in some physical memory region.
683 * Currently used by the kernel coredump code in order to avoid
684 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
685 * or other unpredictable behaviour.
686 */
687
688 int
689 is_physical_memory(vm_paddr_t addr)
690 {
691
692 #ifdef DEV_ISA
693 /* The ISA ``memory hole''. */
694 if (addr >= 0xa0000 && addr < 0x100000)
695 return 0;
696 #endif
697
698 /*
699 * stuff other tests for known memory-mapped devices (PCI?)
700 * here
701 */
702
703 return 1;
704 }
Cache object: 908374ea18dcc32f27ad18a591d28f63
|