1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include "opt_isa.h"
47 #include "opt_npx.h"
48 #ifdef PC98
49 #include "opt_pc98.h"
50 #endif
51 #include "opt_reset.h"
52 #include "opt_cpu.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/kse.h>
59 #include <sys/kernel.h>
60 #include <sys/ktr.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mbuf.h>
64 #include <sys/mutex.h>
65 #include <sys/pioctl.h>
66 #include <sys/proc.h>
67 #include <sys/sf_buf.h>
68 #include <sys/smp.h>
69 #include <sys/sched.h>
70 #include <sys/sysctl.h>
71 #include <sys/unistd.h>
72 #include <sys/vnode.h>
73 #include <sys/vmmeter.h>
74
75 #include <machine/cpu.h>
76 #include <machine/cputypes.h>
77 #include <machine/md_var.h>
78 #include <machine/pcb.h>
79 #include <machine/pcb_ext.h>
80 #include <machine/smp.h>
81 #include <machine/vm86.h>
82
83 #ifdef CPU_ELAN
84 #include <machine/elan_mmcr.h>
85 #endif
86
87 #include <vm/vm.h>
88 #include <vm/vm_extern.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_param.h>
93
94 #ifdef PC98
95 #include <pc98/pc98/pc98.h>
96 #else
97 #include <i386/isa/isa.h>
98 #endif
99
100 #ifndef NSFBUFS
101 #define NSFBUFS (512 + maxusers * 16)
102 #endif
103
104 static void cpu_reset_real(void);
105 #ifdef SMP
106 static void cpu_reset_proxy(void);
107 static u_int cpu_reset_proxyid;
108 static volatile u_int cpu_reset_proxy_active;
109 #endif
110 static void sf_buf_init(void *arg);
111 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
112
113 LIST_HEAD(sf_head, sf_buf);
114
115 /*
116 * A hash table of active sendfile(2) buffers
117 */
118 static struct sf_head *sf_buf_active;
119 static u_long sf_buf_hashmask;
120
121 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
122
123 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
124 static u_int sf_buf_alloc_want;
125
126 /*
127 * A lock used to synchronize access to the hash table and free list
128 */
129 static struct mtx sf_buf_lock;
130
131 extern int _ucodesel, _udatasel;
132
133 /*
134 * Finish a fork operation, with process p2 nearly set up.
135 * Copy and update the pcb, set up the stack so that the child
136 * ready to run and return to user mode.
137 */
138 void
139 cpu_fork(td1, p2, td2, flags)
140 register struct thread *td1;
141 register struct proc *p2;
142 struct thread *td2;
143 int flags;
144 {
145 register struct proc *p1;
146 struct pcb *pcb2;
147 struct mdproc *mdp2;
148 #ifdef DEV_NPX
149 register_t savecrit;
150 #endif
151
152 p1 = td1->td_proc;
153 if ((flags & RFPROC) == 0) {
154 if ((flags & RFMEM) == 0) {
155 /* unshare user LDT */
156 struct mdproc *mdp1 = &p1->p_md;
157 struct proc_ldt *pldt = mdp1->md_ldt;
158 if (pldt && pldt->ldt_refcnt > 1) {
159 pldt = user_ldt_alloc(mdp1, pldt->ldt_len);
160 if (pldt == NULL)
161 panic("could not copy LDT");
162 mdp1->md_ldt = pldt;
163 set_user_ldt(mdp1);
164 user_ldt_free(td1);
165 }
166 }
167 return;
168 }
169
170 /* Ensure that p1's pcb is up to date. */
171 #ifdef DEV_NPX
172 if (td1 == curthread)
173 td1->td_pcb->pcb_gs = rgs();
174 savecrit = intr_disable();
175 if (PCPU_GET(fpcurthread) == td1)
176 npxsave(&td1->td_pcb->pcb_save);
177 intr_restore(savecrit);
178 #endif
179
180 /* Point the pcb to the top of the stack */
181 pcb2 = (struct pcb *)(td2->td_kstack +
182 td2->td_kstack_pages * PAGE_SIZE) - 1;
183 td2->td_pcb = pcb2;
184
185 /* Copy p1's pcb */
186 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
187
188 /* Point mdproc and then copy over td1's contents */
189 mdp2 = &p2->p_md;
190 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
191
192 /*
193 * Create a new fresh stack for the new process.
194 * Copy the trap frame for the return to user mode as if from a
195 * syscall. This copies most of the user mode register values.
196 * The -16 is so we can expand the trapframe if we go to vm86.
197 */
198 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1;
199 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
200
201 td2->td_frame->tf_eax = 0; /* Child returns zero */
202 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
203 td2->td_frame->tf_edx = 1;
204
205 /*
206 * If the parent process has the trap bit set (i.e. a debugger had
207 * single stepped the process to the system call), we need to clear
208 * the trap flag from the new frame unless the debugger had set PF_FORK
209 * on the parent. Otherwise, the child will receive a (likely
210 * unexpected) SIGTRAP when it executes the first instruction after
211 * returning to userland.
212 */
213 if ((p1->p_pfsflags & PF_FORK) == 0)
214 td2->td_frame->tf_eflags &= ~PSL_T;
215
216 /*
217 * Set registers for trampoline to user mode. Leave space for the
218 * return address on stack. These are the kernel mode register values.
219 */
220 #ifdef PAE
221 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
222 #else
223 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
224 #endif
225 pcb2->pcb_edi = 0;
226 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
227 pcb2->pcb_ebp = 0;
228 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
229 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
230 pcb2->pcb_eip = (int)fork_trampoline;
231 pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */
232 pcb2->pcb_gs = rgs();
233 /*-
234 * pcb2->pcb_dr*: cloned above.
235 * pcb2->pcb_savefpu: cloned above.
236 * pcb2->pcb_flags: cloned above.
237 * pcb2->pcb_onfault: cloned above (always NULL here?).
238 * pcb2->pcb_gs: cloned above.
239 * pcb2->pcb_ext: cleared below.
240 */
241
242 /*
243 * XXX don't copy the i/o pages. this should probably be fixed.
244 */
245 pcb2->pcb_ext = 0;
246
247 /* Copy the LDT, if necessary. */
248 mtx_lock_spin(&sched_lock);
249 if (mdp2->md_ldt != 0) {
250 if (flags & RFMEM) {
251 mdp2->md_ldt->ldt_refcnt++;
252 } else {
253 mdp2->md_ldt = user_ldt_alloc(mdp2,
254 mdp2->md_ldt->ldt_len);
255 if (mdp2->md_ldt == NULL)
256 panic("could not copy LDT");
257 }
258 }
259 mtx_unlock_spin(&sched_lock);
260
261 /*
262 * Now, cpu_switch() can schedule the new process.
263 * pcb_esp is loaded pointing to the cpu_switch() stack frame
264 * containing the return address when exiting cpu_switch.
265 * This will normally be to fork_trampoline(), which will have
266 * %ebx loaded with the new proc's pointer. fork_trampoline()
267 * will set up a stack to call fork_return(p, frame); to complete
268 * the return to user-mode.
269 */
270 }
271
272 /*
273 * Intercept the return address from a freshly forked process that has NOT
274 * been scheduled yet.
275 *
276 * This is needed to make kernel threads stay in kernel mode.
277 */
278 void
279 cpu_set_fork_handler(td, func, arg)
280 struct thread *td;
281 void (*func)(void *);
282 void *arg;
283 {
284 /*
285 * Note that the trap frame follows the args, so the function
286 * is really called like this: func(arg, frame);
287 */
288 td->td_pcb->pcb_esi = (int) func; /* function */
289 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
290 }
291
292 void
293 cpu_exit(struct thread *td)
294 {
295 struct mdproc *mdp;
296 struct pcb *pcb = td->td_pcb;
297
298
299 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
300 mdp = &td->td_proc->p_md;
301 if (mdp->md_ldt) {
302 td->td_pcb->pcb_gs = _udatasel;
303 load_gs(_udatasel);
304 user_ldt_free(td);
305 }
306 if (pcb->pcb_flags & PCB_DBREGS) {
307 /* disable all hardware breakpoints */
308 reset_dbregs();
309 pcb->pcb_flags &= ~PCB_DBREGS;
310 }
311 }
312
313 void
314 cpu_thread_exit(struct thread *td)
315 {
316 struct pcb *pcb = td->td_pcb;
317 #ifdef DEV_NPX
318 if (td == PCPU_GET(fpcurthread))
319 npxdrop();
320 #endif
321 if (pcb->pcb_flags & PCB_DBREGS) {
322 /* disable all hardware breakpoints */
323 reset_dbregs();
324 pcb->pcb_flags &= ~PCB_DBREGS;
325 }
326 }
327
328 void
329 cpu_thread_clean(struct thread *td)
330 {
331 struct pcb *pcb;
332
333 pcb = td->td_pcb;
334 if (pcb->pcb_ext != 0) {
335 /* XXXKSE XXXSMP not SMP SAFE.. what locks do we have? */
336 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
337 /*
338 * XXX do we need to move the TSS off the allocated pages
339 * before freeing them? (not done here)
340 */
341 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
342 ctob(IOPAGES + 1));
343 pcb->pcb_ext = 0;
344 }
345 }
346
347 void
348 cpu_thread_swapin(struct thread *td)
349 {
350 }
351
352 void
353 cpu_thread_swapout(struct thread *td)
354 {
355 }
356
357 void
358 cpu_thread_setup(struct thread *td)
359 {
360
361 td->td_pcb = (struct pcb *)(td->td_kstack +
362 td->td_kstack_pages * PAGE_SIZE) - 1;
363 td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb - 16) - 1;
364 td->td_pcb->pcb_ext = NULL;
365 }
366
367 /*
368 * Initialize machine state (pcb and trap frame) for a new thread about to
369 * upcall. Pu t enough state in the new thread's PCB to get it to go back
370 * userret(), where we can intercept it again to set the return (upcall)
371 * Address and stack, along with those from upcals that are from other sources
372 * such as those generated in thread_userret() itself.
373 */
374 void
375 cpu_set_upcall(struct thread *td, struct thread *td0)
376 {
377 struct pcb *pcb2;
378
379 /* Point the pcb to the top of the stack. */
380 pcb2 = td->td_pcb;
381
382 /*
383 * Copy the upcall pcb. This loads kernel regs.
384 * Those not loaded individually below get their default
385 * values here.
386 *
387 * XXXKSE It might be a good idea to simply skip this as
388 * the values of the other registers may be unimportant.
389 * This would remove any requirement for knowing the KSE
390 * at this time (see the matching comment below for
391 * more analysis) (need a good safe default).
392 */
393 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
394 pcb2->pcb_flags &= ~(PCB_NPXTRAP|PCB_NPXINITDONE);
395
396 /*
397 * Create a new fresh stack for the new thread.
398 * The -16 is so we can expand the trapframe if we go to vm86.
399 * Don't forget to set this stack value into whatever supplies
400 * the address for the fault handlers.
401 * The contexts are filled in at the time we actually DO the
402 * upcall as only then do we know which KSE we got.
403 */
404 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
405
406 /*
407 * Set registers for trampoline to user mode. Leave space for the
408 * return address on stack. These are the kernel mode register values.
409 */
410 #ifdef PAE
411 pcb2->pcb_cr3 = vtophys(vmspace_pmap(td->td_proc->p_vmspace)->pm_pdpt);
412 #else
413 pcb2->pcb_cr3 = vtophys(vmspace_pmap(td->td_proc->p_vmspace)->pm_pdir);
414 #endif
415 pcb2->pcb_edi = 0;
416 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
417 pcb2->pcb_ebp = 0;
418 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
419 pcb2->pcb_ebx = (int)td; /* trampoline arg */
420 pcb2->pcb_eip = (int)fork_trampoline;
421 pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */
422 pcb2->pcb_gs = rgs();
423 /*
424 * If we didn't copy the pcb, we'd need to do the following registers:
425 * pcb2->pcb_dr*: cloned above.
426 * pcb2->pcb_savefpu: cloned above.
427 * pcb2->pcb_flags: cloned above.
428 * pcb2->pcb_onfault: cloned above (always NULL here?).
429 * pcb2->pcb_gs: cloned above. XXXKSE ???
430 * pcb2->pcb_ext: cleared below.
431 */
432 pcb2->pcb_ext = NULL;
433 }
434
435 /*
436 * Set that machine state for performing an upcall that has to
437 * be done in thread_userret() so that those upcalls generated
438 * in thread_userret() itself can be done as well.
439 */
440 void
441 cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
442 {
443
444 /*
445 * Do any extra cleaning that needs to be done.
446 * The thread may have optional components
447 * that are not present in a fresh thread.
448 * This may be a recycled thread so make it look
449 * as though it's newly allocated.
450 */
451 cpu_thread_clean(td);
452
453 /*
454 * Set the trap frame to point at the beginning of the uts
455 * function.
456 */
457 td->td_frame->tf_ebp = 0;
458 td->td_frame->tf_esp =
459 (int)ku->ku_stack.ss_sp + ku->ku_stack.ss_size - 16;
460 td->td_frame->tf_eip = (int)ku->ku_func;
461
462 /*
463 * Pass the address of the mailbox for this kse to the uts
464 * function as a parameter on the stack.
465 */
466 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
467 (int)ku->ku_mailbox);
468 }
469
470 /*
471 * Convert kernel VA to physical address
472 */
473 vm_paddr_t
474 kvtop(void *addr)
475 {
476 vm_paddr_t pa;
477
478 pa = pmap_kextract((vm_offset_t)addr);
479 if (pa == 0)
480 panic("kvtop: zero page frame");
481 return (pa);
482 }
483
484 /*
485 * Force reset the processor by invalidating the entire address space!
486 */
487
488 #ifdef SMP
489 static void
490 cpu_reset_proxy()
491 {
492
493 cpu_reset_proxy_active = 1;
494 while (cpu_reset_proxy_active == 1)
495 ; /* Wait for other cpu to see that we've started */
496 stop_cpus((1<<cpu_reset_proxyid));
497 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
498 DELAY(1000000);
499 cpu_reset_real();
500 }
501 #endif
502
503 void
504 cpu_reset()
505 {
506 #ifdef SMP
507 if (smp_active == 0) {
508 cpu_reset_real();
509 /* NOTREACHED */
510 } else {
511
512 u_int map;
513 int cnt;
514 printf("cpu_reset called on cpu#%d\n", PCPU_GET(cpuid));
515
516 map = PCPU_GET(other_cpus) & ~ stopped_cpus;
517
518 if (map != 0) {
519 printf("cpu_reset: Stopping other CPUs\n");
520 stop_cpus(map); /* Stop all other CPUs */
521 }
522
523 if (PCPU_GET(cpuid) == 0) {
524 DELAY(1000000);
525 cpu_reset_real();
526 /* NOTREACHED */
527 } else {
528 /* We are not BSP (CPU #0) */
529
530 cpu_reset_proxyid = PCPU_GET(cpuid);
531 cpustop_restartfunc = cpu_reset_proxy;
532 cpu_reset_proxy_active = 0;
533 printf("cpu_reset: Restarting BSP\n");
534 started_cpus = (1<<0); /* Restart CPU #0 */
535
536 cnt = 0;
537 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
538 cnt++; /* Wait for BSP to announce restart */
539 if (cpu_reset_proxy_active == 0)
540 printf("cpu_reset: Failed to restart BSP\n");
541 enable_intr();
542 cpu_reset_proxy_active = 2;
543
544 while (1);
545 /* NOTREACHED */
546 }
547 }
548 #else
549 cpu_reset_real();
550 #endif
551 }
552
553 static void
554 cpu_reset_real()
555 {
556
557 #ifdef CPU_ELAN
558 if (elan_mmcr != NULL)
559 elan_mmcr->RESCFG = 1;
560 #endif
561
562 if (cpu == CPU_GEODE1100) {
563 /* Attempt Geode's own reset */
564 outl(0xcf8, 0x80009044ul);
565 outl(0xcfc, 0xf);
566 }
567
568 #ifdef PC98
569 /*
570 * Attempt to do a CPU reset via CPU reset port.
571 */
572 disable_intr();
573 if ((inb(0x35) & 0xa0) != 0xa0) {
574 outb(0x37, 0x0f); /* SHUT0 = 0. */
575 outb(0x37, 0x0b); /* SHUT1 = 0. */
576 }
577 outb(0xf0, 0x00); /* Reset. */
578 #else
579 /*
580 * Attempt to do a CPU reset via the keyboard controller,
581 * do not turn of the GateA20, as any machine that fails
582 * to do the reset here would then end up in no man's land.
583 */
584
585 #if !defined(BROKEN_KEYBOARD_RESET)
586 outb(IO_KBD + 4, 0xFE);
587 DELAY(500000); /* wait 0.5 sec to see if that did it */
588 printf("Keyboard reset did not work, attempting CPU shutdown\n");
589 DELAY(1000000); /* wait 1 sec for printf to complete */
590 #endif
591 #endif /* PC98 */
592 /* force a shutdown by unmapping entire address space ! */
593 bzero((caddr_t)PTD, NBPTD);
594
595 /* "good night, sweet prince .... <THUNK!>" */
596 invltlb();
597 /* NOTREACHED */
598 while(1);
599 }
600
601 /*
602 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
603 */
604 static void
605 sf_buf_init(void *arg)
606 {
607 struct sf_buf *sf_bufs;
608 vm_offset_t sf_base;
609 int i;
610
611 nsfbufs = NSFBUFS;
612 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
613
614 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
615 TAILQ_INIT(&sf_buf_freelist);
616 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
617 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
618 M_NOWAIT | M_ZERO);
619 for (i = 0; i < nsfbufs; i++) {
620 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
621 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
622 }
623 sf_buf_alloc_want = 0;
624 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
625 }
626
627 /*
628 * Get an sf_buf from the freelist. Will block if none are available.
629 */
630 struct sf_buf *
631 sf_buf_alloc(struct vm_page *m, int flags)
632 {
633 pt_entry_t opte, *ptep;
634 struct sf_head *hash_list;
635 struct sf_buf *sf;
636 #ifdef SMP
637 cpumask_t cpumask, other_cpus;
638 #endif
639 int error;
640
641 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0,
642 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned"));
643 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
644 mtx_lock(&sf_buf_lock);
645 LIST_FOREACH(sf, hash_list, list_entry) {
646 if (sf->m == m) {
647 sf->ref_count++;
648 if (sf->ref_count == 1) {
649 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
650 nsfbufsused++;
651 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
652 }
653 #ifdef SMP
654 goto shootdown;
655 #else
656 goto done;
657 #endif
658 }
659 }
660 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
661 if (flags & SFB_NOWAIT)
662 goto done;
663 sf_buf_alloc_want++;
664 mbstat.sf_allocwait++;
665 error = msleep(&sf_buf_freelist, &sf_buf_lock,
666 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
667 sf_buf_alloc_want--;
668
669 /*
670 * If we got a signal, don't risk going back to sleep.
671 */
672 if (error)
673 goto done;
674 }
675 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
676 if (sf->m != NULL)
677 LIST_REMOVE(sf, list_entry);
678 LIST_INSERT_HEAD(hash_list, sf, list_entry);
679 sf->ref_count = 1;
680 sf->m = m;
681 nsfbufsused++;
682 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
683
684 /*
685 * Update the sf_buf's virtual-to-physical mapping, flushing the
686 * virtual address from the TLB. Since the reference count for
687 * the sf_buf's old mapping was zero, that mapping is not
688 * currently in use. Consequently, there is no need to exchange
689 * the old and new PTEs atomically, even under PAE.
690 */
691 ptep = vtopte(sf->kva);
692 opte = *ptep;
693 *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V;
694
695 /*
696 * Avoid unnecessary TLB invalidations: If the sf_buf's old
697 * virtual-to-physical mapping was not used, then any processor
698 * that has invalidated the sf_buf's virtual address from its TLB
699 * since the last used mapping need not invalidate again.
700 */
701 #ifdef SMP
702 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
703 sf->cpumask = 0;
704 shootdown:
705 sched_pin();
706 cpumask = PCPU_GET(cpumask);
707 if ((sf->cpumask & cpumask) == 0) {
708 sf->cpumask |= cpumask;
709 invlpg(sf->kva);
710 }
711 if ((flags & SFB_CPUPRIVATE) == 0) {
712 other_cpus = PCPU_GET(other_cpus) & ~sf->cpumask;
713 if (other_cpus != 0) {
714 sf->cpumask |= other_cpus;
715 mtx_lock_spin(&smp_ipi_mtx);
716 smp_masked_invlpg(other_cpus, sf->kva);
717 mtx_unlock_spin(&smp_ipi_mtx);
718 }
719 }
720 sched_unpin();
721 #else
722 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
723 pmap_invalidate_page(kernel_pmap, sf->kva);
724 #endif
725 done:
726 mtx_unlock(&sf_buf_lock);
727 return (sf);
728 }
729
730 /*
731 * Remove a reference from the given sf_buf, adding it to the free
732 * list when its reference count reaches zero. A freed sf_buf still,
733 * however, retains its virtual-to-physical mapping until it is
734 * recycled or reactivated by sf_buf_alloc(9).
735 */
736 void
737 sf_buf_free(struct sf_buf *sf)
738 {
739
740 mtx_lock(&sf_buf_lock);
741 sf->ref_count--;
742 if (sf->ref_count == 0) {
743 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
744 nsfbufsused--;
745 if (sf_buf_alloc_want > 0)
746 wakeup_one(&sf_buf_freelist);
747 }
748 mtx_unlock(&sf_buf_lock);
749 }
750
751 /*
752 * Software interrupt handler for queued VM system processing.
753 */
754 void
755 swi_vm(void *dummy)
756 {
757 if (busdma_swi_pending != 0)
758 busdma_swi();
759 }
760
761 /*
762 * Tell whether this address is in some physical memory region.
763 * Currently used by the kernel coredump code in order to avoid
764 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
765 * or other unpredictable behaviour.
766 */
767
768 int
769 is_physical_memory(vm_paddr_t addr)
770 {
771
772 #ifdef DEV_ISA
773 /* The ISA ``memory hole''. */
774 if (addr >= 0xa0000 && addr < 0x100000)
775 return 0;
776 #endif
777
778 /*
779 * stuff other tests for known memory-mapped devices (PCI?)
780 * here
781 */
782
783 return 1;
784 }
Cache object: 675e6374585e87fcd59f2294c6dd48f6
|