1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/5.4/sys/i386/i386/vm_machdep.c 145335 2005-04-20 19:11:07Z cvs2svn $");
45
46 #include "opt_isa.h"
47 #include "opt_npx.h"
48 #ifdef PC98
49 #include "opt_pc98.h"
50 #endif
51 #include "opt_reset.h"
52 #include "opt_cpu.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/kse.h>
59 #include <sys/kernel.h>
60 #include <sys/ktr.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mbuf.h>
64 #include <sys/mutex.h>
65 #include <sys/pioctl.h>
66 #include <sys/proc.h>
67 #include <sys/sf_buf.h>
68 #include <sys/smp.h>
69 #include <sys/sysctl.h>
70 #include <sys/unistd.h>
71 #include <sys/vnode.h>
72 #include <sys/vmmeter.h>
73
74 #include <machine/cpu.h>
75 #include <machine/cputypes.h>
76 #include <machine/md_var.h>
77 #include <machine/pcb.h>
78 #include <machine/pcb_ext.h>
79 #include <machine/smp.h>
80 #include <machine/vm86.h>
81
82 #ifdef CPU_ELAN
83 #include <machine/elan_mmcr.h>
84 #endif
85
86 #include <vm/vm.h>
87 #include <vm/vm_extern.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_param.h>
92
93 #ifdef PC98
94 #include <pc98/pc98/pc98.h>
95 #else
96 #include <i386/isa/isa.h>
97 #endif
98
99 #ifndef NSFBUFS
100 #define NSFBUFS (512 + maxusers * 16)
101 #endif
102
103 static void cpu_reset_real(void);
104 #ifdef SMP
105 static void cpu_reset_proxy(void);
106 static u_int cpu_reset_proxyid;
107 static volatile u_int cpu_reset_proxy_active;
108 #endif
109 static void sf_buf_init(void *arg);
110 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
111
112 LIST_HEAD(sf_head, sf_buf);
113
114 /*
115 * A hash table of active sendfile(2) buffers
116 */
117 static struct sf_head *sf_buf_active;
118 static u_long sf_buf_hashmask;
119
120 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
121
122 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
123 static u_int sf_buf_alloc_want;
124
125 /*
126 * A lock used to synchronize access to the hash table and free list
127 */
128 static struct mtx sf_buf_lock;
129
130 extern int _ucodesel, _udatasel;
131
132 /*
133 * Finish a fork operation, with process p2 nearly set up.
134 * Copy and update the pcb, set up the stack so that the child
135 * ready to run and return to user mode.
136 */
137 void
138 cpu_fork(td1, p2, td2, flags)
139 register struct thread *td1;
140 register struct proc *p2;
141 struct thread *td2;
142 int flags;
143 {
144 register struct proc *p1;
145 struct pcb *pcb2;
146 struct mdproc *mdp2;
147 #ifdef DEV_NPX
148 register_t savecrit;
149 #endif
150
151 p1 = td1->td_proc;
152 if ((flags & RFPROC) == 0) {
153 if ((flags & RFMEM) == 0) {
154 /* unshare user LDT */
155 struct mdproc *mdp1 = &p1->p_md;
156 struct proc_ldt *pldt = mdp1->md_ldt;
157 if (pldt && pldt->ldt_refcnt > 1) {
158 pldt = user_ldt_alloc(mdp1, pldt->ldt_len);
159 if (pldt == NULL)
160 panic("could not copy LDT");
161 mdp1->md_ldt = pldt;
162 set_user_ldt(mdp1);
163 user_ldt_free(td1);
164 }
165 }
166 return;
167 }
168
169 /* Ensure that p1's pcb is up to date. */
170 #ifdef DEV_NPX
171 if (td1 == curthread)
172 td1->td_pcb->pcb_gs = rgs();
173 savecrit = intr_disable();
174 if (PCPU_GET(fpcurthread) == td1)
175 npxsave(&td1->td_pcb->pcb_save);
176 intr_restore(savecrit);
177 #endif
178
179 /* Point the pcb to the top of the stack */
180 pcb2 = (struct pcb *)(td2->td_kstack +
181 td2->td_kstack_pages * PAGE_SIZE) - 1;
182 td2->td_pcb = pcb2;
183
184 /* Copy p1's pcb */
185 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
186
187 /* Point mdproc and then copy over td1's contents */
188 mdp2 = &p2->p_md;
189 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
190
191 /*
192 * Create a new fresh stack for the new process.
193 * Copy the trap frame for the return to user mode as if from a
194 * syscall. This copies most of the user mode register values.
195 * The -16 is so we can expand the trapframe if we go to vm86.
196 */
197 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1;
198 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
199
200 td2->td_frame->tf_eax = 0; /* Child returns zero */
201 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
202 td2->td_frame->tf_edx = 1;
203
204 /*
205 * If the parent process has the trap bit set (i.e. a debugger had
206 * single stepped the process to the system call), we need to clear
207 * the trap flag from the new frame unless the debugger had set PF_FORK
208 * on the parent. Otherwise, the child will receive a (likely
209 * unexpected) SIGTRAP when it executes the first instruction after
210 * returning to userland.
211 */
212 if ((p1->p_pfsflags & PF_FORK) == 0)
213 td2->td_frame->tf_eflags &= ~PSL_T;
214
215 /*
216 * Set registers for trampoline to user mode. Leave space for the
217 * return address on stack. These are the kernel mode register values.
218 */
219 #ifdef PAE
220 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
221 #else
222 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
223 #endif
224 pcb2->pcb_edi = 0;
225 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
226 pcb2->pcb_ebp = 0;
227 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
228 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
229 pcb2->pcb_eip = (int)fork_trampoline;
230 pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */
231 pcb2->pcb_gs = rgs();
232 /*-
233 * pcb2->pcb_dr*: cloned above.
234 * pcb2->pcb_savefpu: cloned above.
235 * pcb2->pcb_flags: cloned above.
236 * pcb2->pcb_onfault: cloned above (always NULL here?).
237 * pcb2->pcb_gs: cloned above.
238 * pcb2->pcb_ext: cleared below.
239 */
240
241 /*
242 * XXX don't copy the i/o pages. this should probably be fixed.
243 */
244 pcb2->pcb_ext = 0;
245
246 /* Copy the LDT, if necessary. */
247 mtx_lock_spin(&sched_lock);
248 if (mdp2->md_ldt != 0) {
249 if (flags & RFMEM) {
250 mdp2->md_ldt->ldt_refcnt++;
251 } else {
252 mdp2->md_ldt = user_ldt_alloc(mdp2,
253 mdp2->md_ldt->ldt_len);
254 if (mdp2->md_ldt == NULL)
255 panic("could not copy LDT");
256 }
257 }
258 mtx_unlock_spin(&sched_lock);
259
260 /*
261 * Now, cpu_switch() can schedule the new process.
262 * pcb_esp is loaded pointing to the cpu_switch() stack frame
263 * containing the return address when exiting cpu_switch.
264 * This will normally be to fork_trampoline(), which will have
265 * %ebx loaded with the new proc's pointer. fork_trampoline()
266 * will set up a stack to call fork_return(p, frame); to complete
267 * the return to user-mode.
268 */
269 }
270
271 /*
272 * Intercept the return address from a freshly forked process that has NOT
273 * been scheduled yet.
274 *
275 * This is needed to make kernel threads stay in kernel mode.
276 */
277 void
278 cpu_set_fork_handler(td, func, arg)
279 struct thread *td;
280 void (*func)(void *);
281 void *arg;
282 {
283 /*
284 * Note that the trap frame follows the args, so the function
285 * is really called like this: func(arg, frame);
286 */
287 td->td_pcb->pcb_esi = (int) func; /* function */
288 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
289 }
290
291 void
292 cpu_exit(struct thread *td)
293 {
294 struct mdproc *mdp;
295 struct pcb *pcb = td->td_pcb;
296
297
298 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
299 mdp = &td->td_proc->p_md;
300 if (mdp->md_ldt) {
301 td->td_pcb->pcb_gs = _udatasel;
302 load_gs(_udatasel);
303 user_ldt_free(td);
304 }
305 if (pcb->pcb_flags & PCB_DBREGS) {
306 /* disable all hardware breakpoints */
307 reset_dbregs();
308 pcb->pcb_flags &= ~PCB_DBREGS;
309 }
310 }
311
312 void
313 cpu_thread_exit(struct thread *td)
314 {
315 struct pcb *pcb = td->td_pcb;
316 #ifdef DEV_NPX
317 if (td == PCPU_GET(fpcurthread))
318 npxdrop();
319 #endif
320 if (pcb->pcb_flags & PCB_DBREGS) {
321 /* disable all hardware breakpoints */
322 reset_dbregs();
323 pcb->pcb_flags &= ~PCB_DBREGS;
324 }
325 }
326
327 void
328 cpu_thread_clean(struct thread *td)
329 {
330 struct pcb *pcb;
331
332 pcb = td->td_pcb;
333 if (pcb->pcb_ext != 0) {
334 /* XXXKSE XXXSMP not SMP SAFE.. what locks do we have? */
335 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
336 /*
337 * XXX do we need to move the TSS off the allocated pages
338 * before freeing them? (not done here)
339 */
340 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
341 ctob(IOPAGES + 1));
342 pcb->pcb_ext = 0;
343 }
344 }
345
346 void
347 cpu_thread_swapin(struct thread *td)
348 {
349 }
350
351 void
352 cpu_thread_swapout(struct thread *td)
353 {
354 }
355
356 void
357 cpu_thread_setup(struct thread *td)
358 {
359
360 td->td_pcb = (struct pcb *)(td->td_kstack +
361 td->td_kstack_pages * PAGE_SIZE) - 1;
362 td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb - 16) - 1;
363 td->td_pcb->pcb_ext = NULL;
364 }
365
366 /*
367 * Initialize machine state (pcb and trap frame) for a new thread about to
368 * upcall. Pu t enough state in the new thread's PCB to get it to go back
369 * userret(), where we can intercept it again to set the return (upcall)
370 * Address and stack, along with those from upcals that are from other sources
371 * such as those generated in thread_userret() itself.
372 */
373 void
374 cpu_set_upcall(struct thread *td, struct thread *td0)
375 {
376 struct pcb *pcb2;
377
378 /* Point the pcb to the top of the stack. */
379 pcb2 = td->td_pcb;
380
381 /*
382 * Copy the upcall pcb. This loads kernel regs.
383 * Those not loaded individually below get their default
384 * values here.
385 *
386 * XXXKSE It might be a good idea to simply skip this as
387 * the values of the other registers may be unimportant.
388 * This would remove any requirement for knowing the KSE
389 * at this time (see the matching comment below for
390 * more analysis) (need a good safe default).
391 */
392 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
393 pcb2->pcb_flags &= ~(PCB_NPXTRAP|PCB_NPXINITDONE);
394
395 /*
396 * Create a new fresh stack for the new thread.
397 * The -16 is so we can expand the trapframe if we go to vm86.
398 * Don't forget to set this stack value into whatever supplies
399 * the address for the fault handlers.
400 * The contexts are filled in at the time we actually DO the
401 * upcall as only then do we know which KSE we got.
402 */
403 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
404
405 /*
406 * Set registers for trampoline to user mode. Leave space for the
407 * return address on stack. These are the kernel mode register values.
408 */
409 #ifdef PAE
410 pcb2->pcb_cr3 = vtophys(vmspace_pmap(td->td_proc->p_vmspace)->pm_pdpt);
411 #else
412 pcb2->pcb_cr3 = vtophys(vmspace_pmap(td->td_proc->p_vmspace)->pm_pdir);
413 #endif
414 pcb2->pcb_edi = 0;
415 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
416 pcb2->pcb_ebp = 0;
417 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
418 pcb2->pcb_ebx = (int)td; /* trampoline arg */
419 pcb2->pcb_eip = (int)fork_trampoline;
420 pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */
421 pcb2->pcb_gs = rgs();
422 /*
423 * If we didn't copy the pcb, we'd need to do the following registers:
424 * pcb2->pcb_dr*: cloned above.
425 * pcb2->pcb_savefpu: cloned above.
426 * pcb2->pcb_flags: cloned above.
427 * pcb2->pcb_onfault: cloned above (always NULL here?).
428 * pcb2->pcb_gs: cloned above. XXXKSE ???
429 * pcb2->pcb_ext: cleared below.
430 */
431 pcb2->pcb_ext = NULL;
432 }
433
434 /*
435 * Set that machine state for performing an upcall that has to
436 * be done in thread_userret() so that those upcalls generated
437 * in thread_userret() itself can be done as well.
438 */
439 void
440 cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
441 {
442
443 /*
444 * Do any extra cleaning that needs to be done.
445 * The thread may have optional components
446 * that are not present in a fresh thread.
447 * This may be a recycled thread so make it look
448 * as though it's newly allocated.
449 */
450 cpu_thread_clean(td);
451
452 /*
453 * Set the trap frame to point at the beginning of the uts
454 * function.
455 */
456 td->td_frame->tf_ebp = 0;
457 td->td_frame->tf_esp =
458 (int)ku->ku_stack.ss_sp + ku->ku_stack.ss_size - 16;
459 td->td_frame->tf_eip = (int)ku->ku_func;
460
461 /*
462 * Pass the address of the mailbox for this kse to the uts
463 * function as a parameter on the stack.
464 */
465 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
466 (int)ku->ku_mailbox);
467 }
468
469 /*
470 * Convert kernel VA to physical address
471 */
472 vm_paddr_t
473 kvtop(void *addr)
474 {
475 vm_paddr_t pa;
476
477 pa = pmap_kextract((vm_offset_t)addr);
478 if (pa == 0)
479 panic("kvtop: zero page frame");
480 return (pa);
481 }
482
483 /*
484 * Force reset the processor by invalidating the entire address space!
485 */
486
487 #ifdef SMP
488 static void
489 cpu_reset_proxy()
490 {
491
492 cpu_reset_proxy_active = 1;
493 while (cpu_reset_proxy_active == 1)
494 ; /* Wait for other cpu to see that we've started */
495 stop_cpus((1<<cpu_reset_proxyid));
496 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
497 DELAY(1000000);
498 cpu_reset_real();
499 }
500 #endif
501
502 void
503 cpu_reset()
504 {
505 #ifdef SMP
506 if (smp_active == 0) {
507 cpu_reset_real();
508 /* NOTREACHED */
509 } else {
510
511 u_int map;
512 int cnt;
513 printf("cpu_reset called on cpu#%d\n", PCPU_GET(cpuid));
514
515 map = PCPU_GET(other_cpus) & ~ stopped_cpus;
516
517 if (map != 0) {
518 printf("cpu_reset: Stopping other CPUs\n");
519 stop_cpus(map); /* Stop all other CPUs */
520 }
521
522 if (PCPU_GET(cpuid) == 0) {
523 DELAY(1000000);
524 cpu_reset_real();
525 /* NOTREACHED */
526 } else {
527 /* We are not BSP (CPU #0) */
528
529 cpu_reset_proxyid = PCPU_GET(cpuid);
530 cpustop_restartfunc = cpu_reset_proxy;
531 cpu_reset_proxy_active = 0;
532 printf("cpu_reset: Restarting BSP\n");
533 started_cpus = (1<<0); /* Restart CPU #0 */
534
535 cnt = 0;
536 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
537 cnt++; /* Wait for BSP to announce restart */
538 if (cpu_reset_proxy_active == 0)
539 printf("cpu_reset: Failed to restart BSP\n");
540 enable_intr();
541 cpu_reset_proxy_active = 2;
542
543 while (1);
544 /* NOTREACHED */
545 }
546 }
547 #else
548 cpu_reset_real();
549 #endif
550 }
551
552 static void
553 cpu_reset_real()
554 {
555
556 #ifdef CPU_ELAN
557 if (elan_mmcr != NULL)
558 elan_mmcr->RESCFG = 1;
559 #endif
560
561 if (cpu == CPU_GEODE1100) {
562 /* Attempt Geode's own reset */
563 outl(0xcf8, 0x80009044ul);
564 outl(0xcfc, 0xf);
565 }
566
567 #ifdef PC98
568 /*
569 * Attempt to do a CPU reset via CPU reset port.
570 */
571 disable_intr();
572 if ((inb(0x35) & 0xa0) != 0xa0) {
573 outb(0x37, 0x0f); /* SHUT0 = 0. */
574 outb(0x37, 0x0b); /* SHUT1 = 0. */
575 }
576 outb(0xf0, 0x00); /* Reset. */
577 #else
578 /*
579 * Attempt to do a CPU reset via the keyboard controller,
580 * do not turn of the GateA20, as any machine that fails
581 * to do the reset here would then end up in no man's land.
582 */
583
584 #if !defined(BROKEN_KEYBOARD_RESET)
585 outb(IO_KBD + 4, 0xFE);
586 DELAY(500000); /* wait 0.5 sec to see if that did it */
587 printf("Keyboard reset did not work, attempting CPU shutdown\n");
588 DELAY(1000000); /* wait 1 sec for printf to complete */
589 #endif
590 #endif /* PC98 */
591 /* force a shutdown by unmapping entire address space ! */
592 bzero((caddr_t)PTD, NBPTD);
593
594 /* "good night, sweet prince .... <THUNK!>" */
595 invltlb();
596 /* NOTREACHED */
597 while(1);
598 }
599
600 /*
601 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
602 */
603 static void
604 sf_buf_init(void *arg)
605 {
606 struct sf_buf *sf_bufs;
607 vm_offset_t sf_base;
608 int i;
609
610 nsfbufs = NSFBUFS;
611 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
612
613 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
614 TAILQ_INIT(&sf_buf_freelist);
615 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
616 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
617 M_NOWAIT | M_ZERO);
618 for (i = 0; i < nsfbufs; i++) {
619 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
620 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
621 }
622 sf_buf_alloc_want = 0;
623 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
624 }
625
626 /*
627 * Get an sf_buf from the freelist. Will block if none are available.
628 */
629 struct sf_buf *
630 sf_buf_alloc(struct vm_page *m, int flags)
631 {
632 pt_entry_t opte, *ptep;
633 struct sf_head *hash_list;
634 struct sf_buf *sf;
635 #ifdef SMP
636 cpumask_t cpumask, other_cpus;
637 #endif
638 int error;
639
640 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0,
641 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned"));
642 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
643 mtx_lock(&sf_buf_lock);
644 LIST_FOREACH(sf, hash_list, list_entry) {
645 if (sf->m == m) {
646 sf->ref_count++;
647 if (sf->ref_count == 1) {
648 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
649 nsfbufsused++;
650 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
651 }
652 #ifdef SMP
653 cpumask = PCPU_GET(cpumask);
654 if ((sf->cpumask & cpumask) == 0) {
655 sf->cpumask |= cpumask;
656 invlpg(sf->kva);
657 }
658 if ((flags & SFB_CPUPRIVATE) == 0) {
659 other_cpus = PCPU_GET(other_cpus) & ~sf->cpumask;
660 if (other_cpus != 0) {
661 sf->cpumask |= other_cpus;
662 mtx_lock_spin(&smp_ipi_mtx);
663 smp_masked_invlpg(other_cpus, sf->kva);
664 mtx_unlock_spin(&smp_ipi_mtx);
665 }
666 }
667 #endif
668 goto done;
669 }
670 }
671 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
672 if (flags & SFB_NOWAIT)
673 goto done;
674 sf_buf_alloc_want++;
675 mbstat.sf_allocwait++;
676 error = msleep(&sf_buf_freelist, &sf_buf_lock,
677 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
678 sf_buf_alloc_want--;
679
680 /*
681 * If we got a signal, don't risk going back to sleep.
682 */
683 if (error)
684 goto done;
685 }
686 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
687 if (sf->m != NULL)
688 LIST_REMOVE(sf, list_entry);
689 LIST_INSERT_HEAD(hash_list, sf, list_entry);
690 sf->ref_count = 1;
691 sf->m = m;
692 nsfbufsused++;
693 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
694
695 /*
696 * Update the sf_buf's virtual-to-physical mapping, flushing the
697 * virtual address from the TLB only if the PTE implies that the old
698 * mapping has been used. Since the reference count for the sf_buf's
699 * old mapping was zero, that mapping is not currently in use.
700 * Consequently, there is no need to exchange the old and new PTEs
701 * atomically, even under PAE.
702 */
703 ptep = vtopte(sf->kva);
704 opte = *ptep;
705 *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V;
706 #ifdef SMP
707 if (flags & SFB_CPUPRIVATE) {
708 if ((opte & (PG_A | PG_V)) == (PG_A | PG_V)) {
709 sf->cpumask = PCPU_GET(cpumask);
710 invlpg(sf->kva);
711 } else
712 sf->cpumask = all_cpus;
713 goto done;
714 } else
715 sf->cpumask = all_cpus;
716 #endif
717 if ((opte & (PG_A | PG_V)) == (PG_A | PG_V))
718 pmap_invalidate_page(kernel_pmap, sf->kva);
719 done:
720 mtx_unlock(&sf_buf_lock);
721 return (sf);
722 }
723
724 /*
725 * Remove a reference from the given sf_buf, adding it to the free
726 * list when its reference count reaches zero. A freed sf_buf still,
727 * however, retains its virtual-to-physical mapping until it is
728 * recycled or reactivated by sf_buf_alloc(9).
729 */
730 void
731 sf_buf_free(struct sf_buf *sf)
732 {
733
734 mtx_lock(&sf_buf_lock);
735 sf->ref_count--;
736 if (sf->ref_count == 0) {
737 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
738 nsfbufsused--;
739 if (sf_buf_alloc_want > 0)
740 wakeup_one(&sf_buf_freelist);
741 }
742 mtx_unlock(&sf_buf_lock);
743 }
744
745 /*
746 * Software interrupt handler for queued VM system processing.
747 */
748 void
749 swi_vm(void *dummy)
750 {
751 if (busdma_swi_pending != 0)
752 busdma_swi();
753 }
754
755 /*
756 * Tell whether this address is in some physical memory region.
757 * Currently used by the kernel coredump code in order to avoid
758 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
759 * or other unpredictable behaviour.
760 */
761
762 int
763 is_physical_memory(vm_paddr_t addr)
764 {
765
766 #ifdef DEV_ISA
767 /* The ISA ``memory hole''. */
768 if (addr >= 0xa0000 && addr < 0x100000)
769 return 0;
770 #endif
771
772 /*
773 * stuff other tests for known memory-mapped devices (PCI?)
774 * here
775 */
776
777 return 1;
778 }
Cache object: 7e4b987cf10a21ebacf28ff7a623efc3
|