1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.71.2.7 1999/09/05 08:11:20 peter Exp $
42 */
43
44 #include "npx.h"
45 #include "opt_bounce.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/buf.h>
52 #include <sys/vnode.h>
53 #include <sys/vmmeter.h>
54
55 #include <machine/clock.h>
56 #include <machine/md_var.h>
57
58 #include <vm/vm.h>
59 #include <vm/vm_param.h>
60 #include <vm/vm_prot.h>
61 #include <vm/lock.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_extern.h>
66
67 #include <sys/user.h>
68
69 #ifdef PC98
70 #include <pc98/pc98/pc98.h>
71 #else
72 #include <i386/isa/isa.h>
73 #endif
74
75 #ifdef BOUNCE_BUFFERS
76 static vm_offset_t
77 vm_bounce_kva __P((int size, int waitok));
78 static void vm_bounce_kva_free __P((vm_offset_t addr, vm_offset_t size,
79 int now));
80 static vm_offset_t
81 vm_bounce_page_find __P((int count));
82 static void vm_bounce_page_free __P((vm_offset_t pa, int count));
83
84 static volatile int kvasfreecnt;
85
86 caddr_t bouncememory;
87 int bouncepages;
88 static int bpwait;
89 static vm_offset_t *bouncepa;
90 static int bmwait, bmfreeing;
91
92 #define BITS_IN_UNSIGNED (8*sizeof(unsigned))
93 static int bounceallocarraysize;
94 static unsigned *bounceallocarray;
95 static int bouncefree;
96
97 #if defined(PC98) && defined (EPSON_BOUNCEDMA)
98 #define SIXTEENMEG (3840*4096) /* 15MB boundary */
99 #else
100 #define SIXTEENMEG (4096*4096)
101 #endif
102 #define MAXBKVA 1024
103 int maxbkva = MAXBKVA*PAGE_SIZE;
104
105 /* special list that can be used at interrupt time for eventual kva free */
106 static struct kvasfree {
107 vm_offset_t addr;
108 vm_offset_t size;
109 } kvaf[MAXBKVA];
110
111 /*
112 * get bounce buffer pages (count physically contiguous)
113 * (only 1 inplemented now)
114 */
115 static vm_offset_t
116 vm_bounce_page_find(count)
117 int count;
118 {
119 int bit;
120 int s,i;
121
122 if (count != 1)
123 panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
124
125 s = splbio();
126 retry:
127 for (i = 0; i < bounceallocarraysize; i++) {
128 if (bounceallocarray[i] != 0xffffffff) {
129 bit = ffs(~bounceallocarray[i]);
130 if (bit) {
131 bounceallocarray[i] |= 1 << (bit - 1) ;
132 bouncefree -= count;
133 splx(s);
134 return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))];
135 }
136 }
137 }
138 bpwait = 1;
139 tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
140 goto retry;
141 }
142
143 static void
144 vm_bounce_kva_free(addr, size, now)
145 vm_offset_t addr;
146 vm_offset_t size;
147 int now;
148 {
149 int s = splbio();
150 kvaf[kvasfreecnt].addr = addr;
151 kvaf[kvasfreecnt].size = size;
152 ++kvasfreecnt;
153 if( now) {
154 /*
155 * this will do wakeups
156 */
157 vm_bounce_kva(0,0);
158 } else {
159 if (bmwait) {
160 /*
161 * if anyone is waiting on the bounce-map, then wakeup
162 */
163 wakeup((caddr_t) io_map);
164 bmwait = 0;
165 }
166 }
167 splx(s);
168 }
169
170 /*
171 * free count bounce buffer pages
172 */
173 static void
174 vm_bounce_page_free(pa, count)
175 vm_offset_t pa;
176 int count;
177 {
178 int allocindex;
179 int index;
180 int bit;
181
182 if (count != 1)
183 panic("vm_bounce_page_free -- no support for > 1 page yet!!!");
184
185 for(index=0;index<bouncepages;index++) {
186 if( pa == bouncepa[index])
187 break;
188 }
189
190 if( index == bouncepages)
191 panic("vm_bounce_page_free: invalid bounce buffer");
192
193 allocindex = index / BITS_IN_UNSIGNED;
194 bit = index % BITS_IN_UNSIGNED;
195
196 bounceallocarray[allocindex] &= ~(1 << bit);
197
198 bouncefree += count;
199 if (bpwait) {
200 bpwait = 0;
201 wakeup((caddr_t) &bounceallocarray);
202 }
203 }
204
205 /*
206 * allocate count bounce buffer kva pages
207 */
208 static vm_offset_t
209 vm_bounce_kva(size, waitok)
210 int size;
211 int waitok;
212 {
213 int i;
214 vm_offset_t kva = 0;
215 vm_offset_t off;
216 int s = splbio();
217 more:
218 if (!bmfreeing && kvasfreecnt) {
219 bmfreeing = 1;
220 for (i = 0; i < kvasfreecnt; i++) {
221 for(off=0;off<kvaf[i].size;off+=PAGE_SIZE) {
222 pmap_kremove( kvaf[i].addr + off);
223 }
224 kmem_free_wakeup(io_map, kvaf[i].addr,
225 kvaf[i].size);
226 }
227 kvasfreecnt = 0;
228 bmfreeing = 0;
229 if( bmwait) {
230 bmwait = 0;
231 wakeup( (caddr_t) io_map);
232 }
233 }
234
235 if( size == 0) {
236 splx(s);
237 return 0;
238 }
239
240 if ((kva = kmem_alloc_pageable(io_map, size)) == 0) {
241 if( !waitok) {
242 splx(s);
243 return 0;
244 }
245 bmwait = 1;
246 tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0);
247 goto more;
248 }
249 splx(s);
250 return kva;
251 }
252
253 /*
254 * same as vm_bounce_kva -- but really allocate (but takes pages as arg)
255 */
256 vm_offset_t
257 vm_bounce_kva_alloc(count)
258 int count;
259 {
260 int i;
261 vm_offset_t kva;
262 vm_offset_t pa;
263 if( bouncepages == 0) {
264 kva = (vm_offset_t) malloc(count*PAGE_SIZE, M_TEMP, M_WAITOK);
265 return kva;
266 }
267 kva = vm_bounce_kva(count*PAGE_SIZE, 1);
268 for(i=0;i<count;i++) {
269 pa = vm_bounce_page_find(1);
270 pmap_kenter(kva + i * PAGE_SIZE, pa);
271 }
272 return kva;
273 }
274
275 /*
276 * same as vm_bounce_kva_free -- but really free
277 */
278 void
279 vm_bounce_kva_alloc_free(kva, count)
280 vm_offset_t kva;
281 int count;
282 {
283 int i;
284 vm_offset_t pa;
285 if( bouncepages == 0) {
286 free((caddr_t) kva, M_TEMP);
287 return;
288 }
289 for(i = 0; i < count; i++) {
290 pa = pmap_kextract(kva + i * PAGE_SIZE);
291 vm_bounce_page_free(pa, 1);
292 }
293 vm_bounce_kva_free(kva, count*PAGE_SIZE, 0);
294 }
295
296 /*
297 * do the things necessary to the struct buf to implement
298 * bounce buffers... inserted before the disk sort
299 */
300 void
301 vm_bounce_alloc(bp)
302 struct buf *bp;
303 {
304 int countvmpg;
305 vm_offset_t vastart, vaend;
306 vm_offset_t vapstart, vapend;
307 vm_offset_t va, kva;
308 vm_offset_t pa;
309 int dobounceflag = 0;
310 int i;
311
312 if (bouncepages == 0)
313 return;
314
315 if (bp->b_flags & B_BOUNCE) {
316 printf("vm_bounce_alloc: called recursively???\n");
317 return;
318 }
319
320 if (bp->b_bufsize < bp->b_bcount) {
321 printf(
322 "vm_bounce_alloc: b_bufsize(0x%lx) < b_bcount(0x%lx) !!\n",
323 bp->b_bufsize, bp->b_bcount);
324 panic("vm_bounce_alloc");
325 }
326
327 /*
328 * This is not really necessary
329 * if( bp->b_bufsize != bp->b_bcount) {
330 * printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount);
331 * }
332 */
333
334
335 vastart = (vm_offset_t) bp->b_data;
336 vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
337
338 vapstart = trunc_page(vastart);
339 vapend = round_page(vaend);
340 countvmpg = (vapend - vapstart) / PAGE_SIZE;
341
342 /*
343 * if any page is above 16MB, then go into bounce-buffer mode
344 */
345 va = vapstart;
346 for (i = 0; i < countvmpg; i++) {
347 pa = pmap_kextract(va);
348 if (pa >= SIXTEENMEG)
349 ++dobounceflag;
350 if( pa == 0)
351 panic("vm_bounce_alloc: Unmapped page");
352 va += PAGE_SIZE;
353 }
354 if (dobounceflag == 0)
355 return;
356
357 if (bouncepages < dobounceflag)
358 panic("Not enough bounce buffers!!!");
359
360 /*
361 * allocate a replacement kva for b_addr
362 */
363 kva = vm_bounce_kva(countvmpg*PAGE_SIZE, 1);
364 #if 0
365 printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ",
366 (bp->b_flags & B_READ) ? "read":"write",
367 vapstart, vapend, countvmpg, kva);
368 #endif
369 va = vapstart;
370 for (i = 0; i < countvmpg; i++) {
371 pa = pmap_kextract(va);
372 if (pa >= SIXTEENMEG) {
373 /*
374 * allocate a replacement page
375 */
376 vm_offset_t bpa = vm_bounce_page_find(1);
377 pmap_kenter(kva + (PAGE_SIZE * i), bpa);
378 #if 0
379 printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa);
380 #endif
381 /*
382 * if we are writing, the copy the data into the page
383 */
384 if ((bp->b_flags & B_READ) == 0) {
385 bcopy((caddr_t) va, (caddr_t) kva + (PAGE_SIZE * i), PAGE_SIZE);
386 }
387 } else {
388 /*
389 * use original page
390 */
391 pmap_kenter(kva + (PAGE_SIZE * i), pa);
392 }
393 va += PAGE_SIZE;
394 }
395
396 /*
397 * flag the buffer as being bounced
398 */
399 bp->b_flags |= B_BOUNCE;
400 /*
401 * save the original buffer kva
402 */
403 bp->b_savekva = bp->b_data;
404 /*
405 * put our new kva into the buffer (offset by original offset)
406 */
407 bp->b_data = (caddr_t) (((vm_offset_t) kva) |
408 ((vm_offset_t) bp->b_savekva & PAGE_MASK));
409 #if 0
410 printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data);
411 #endif
412 return;
413 }
414
415 /*
416 * hook into biodone to free bounce buffer
417 */
418 void
419 vm_bounce_free(bp)
420 struct buf *bp;
421 {
422 int i;
423 vm_offset_t origkva, bouncekva, bouncekvaend;
424
425 /*
426 * if this isn't a bounced buffer, then just return
427 */
428 if ((bp->b_flags & B_BOUNCE) == 0)
429 return;
430
431 /*
432 * This check is not necessary
433 * if (bp->b_bufsize != bp->b_bcount) {
434 * printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n",
435 * bp->b_bufsize, bp->b_bcount);
436 * }
437 */
438
439 origkva = (vm_offset_t) bp->b_savekva;
440 bouncekva = (vm_offset_t) bp->b_data;
441 /*
442 printf("free: %d ", bp->b_bufsize);
443 */
444
445 /*
446 * check every page in the kva space for b_addr
447 */
448 for (i = 0; i < bp->b_bufsize; ) {
449 vm_offset_t mybouncepa;
450 vm_offset_t copycount;
451
452 copycount = round_page(bouncekva + 1) - bouncekva;
453 mybouncepa = pmap_kextract(trunc_page(bouncekva));
454
455 /*
456 * if this is a bounced pa, then process as one
457 */
458 if ( mybouncepa != pmap_kextract( trunc_page( origkva))) {
459 vm_offset_t tocopy = copycount;
460 if (i + tocopy > bp->b_bufsize)
461 tocopy = bp->b_bufsize - i;
462 /*
463 * if this is a read, then copy from bounce buffer into original buffer
464 */
465 if (bp->b_flags & B_READ)
466 bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy);
467 /*
468 * free the bounce allocation
469 */
470
471 /*
472 printf("(kva: %x, pa: %x)", bouncekva, mybouncepa);
473 */
474 vm_bounce_page_free(mybouncepa, 1);
475 }
476
477 origkva += copycount;
478 bouncekva += copycount;
479 i += copycount;
480 }
481
482 /*
483 printf("\n");
484 */
485 /*
486 * add the old kva into the "to free" list
487 */
488
489 bouncekva= trunc_page((vm_offset_t) bp->b_data);
490 bouncekvaend= round_page((vm_offset_t)bp->b_data + bp->b_bufsize);
491
492 /*
493 printf("freeva: %d\n", (bouncekvaend - bouncekva) / PAGE_SIZE);
494 */
495 vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0);
496 bp->b_data = bp->b_savekva;
497 bp->b_savekva = 0;
498 bp->b_flags &= ~B_BOUNCE;
499
500 return;
501 }
502
503
504 /*
505 * init the bounce buffer system
506 */
507 void
508 vm_bounce_init()
509 {
510 int i;
511
512 kvasfreecnt = 0;
513
514 if (bouncepages == 0)
515 return;
516
517 bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
518 bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
519
520 if (!bounceallocarray)
521 panic("Cannot allocate bounce resource array");
522
523 bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT);
524 if (!bouncepa)
525 panic("Cannot allocate physical memory array");
526
527 for(i=0;i<bounceallocarraysize;i++) {
528 bounceallocarray[i] = 0xffffffff;
529 }
530
531 for(i=0;i<bouncepages;i++) {
532 vm_offset_t pa;
533 if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * PAGE_SIZE)) >= SIXTEENMEG)
534 panic("bounce memory out of range");
535 if( pa == 0)
536 panic("bounce memory not resident");
537 bouncepa[i] = pa;
538 bounceallocarray[i/(8*sizeof(int))] &= ~(1<<(i%(8*sizeof(int))));
539 }
540 bouncefree = bouncepages;
541
542 }
543 #endif /* BOUNCE_BUFFERS */
544
545 /*
546 * quick version of vm_fault
547 */
548 void
549 vm_fault_quick(v, prot)
550 caddr_t v;
551 int prot;
552 {
553 if (prot & VM_PROT_WRITE)
554 subyte(v, fubyte(v));
555 else
556 fubyte(v);
557 }
558
559 /*
560 * Finish a fork operation, with process p2 nearly set up.
561 * Copy and update the kernel stack and pcb, making the child
562 * ready to run, and marking it so that it can return differently
563 * than the parent. Returns 1 in the child process, 0 in the parent.
564 * We currently double-map the user area so that the stack is at the same
565 * address in each process; in the future we will probably relocate
566 * the frame pointers on the stack after copying.
567 */
568 int
569 cpu_fork(p1, p2)
570 register struct proc *p1, *p2;
571 {
572 struct pcb *pcb2 = &p2->p_addr->u_pcb;
573 int sp, offset;
574 volatile int retval;
575 #ifdef USER_LDT
576 struct pcb *pcb = &p2->p_addr->u_pcb;
577 #endif
578
579 /*
580 * Copy pcb and stack from proc p1 to p2.
581 * We do this as cheaply as possible, copying only the active
582 * part of the stack. The stack and pcb need to agree;
583 * this is tricky, as the final pcb is constructed by savectx,
584 * but its frame isn't yet on the stack when the stack is copied.
585 * This should be done differently, with a single call
586 * that copies and updates the pcb+stack,
587 * replacing the bcopy and savectx.
588 */
589
590 __asm __volatile("movl %%esp,%0" : "=r" (sp));
591 offset = sp - (int)kstack;
592
593 retval = 1; /* return 1 in child */
594 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
595 (unsigned) ctob(UPAGES) - offset);
596 p2->p_md.md_regs = p1->p_md.md_regs;
597
598 *pcb2 = p1->p_addr->u_pcb;
599 pcb2->pcb_cr3 = vtophys(p2->p_vmspace->vm_pmap.pm_pdir);
600
601 #ifdef USER_LDT
602 /* Copy the LDT, if necessary. */
603 if (pcb->pcb_ldt != 0) {
604 union descriptor *new_ldt;
605 size_t len = pcb->pcb_ldt_len * sizeof(union descriptor);
606
607 new_ldt = (union descriptor *)kmem_alloc(kernel_map, len);
608 bcopy(pcb->pcb_ldt, new_ldt, len);
609 pcb->pcb_ldt = (caddr_t)new_ldt;
610 }
611 #endif
612
613 retval = 0; /* return 0 in parent */
614 savectx(pcb2);
615 return (retval);
616 }
617
618 void
619 cpu_exit(p)
620 register struct proc *p;
621 {
622 #ifdef USER_LDT
623 struct pcb *pcb;
624 #endif
625
626 #if NNPX > 0
627 npxexit(p);
628 #endif /* NNPX */
629 #ifdef USER_LDT
630 pcb = &p->p_addr->u_pcb;
631 if (pcb->pcb_ldt != 0) {
632 if (pcb == curpcb)
633 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
634 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt,
635 pcb->pcb_ldt_len * sizeof(union descriptor));
636 pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0;
637 }
638 #endif
639 cnt.v_swtch++;
640 cpu_switch(p);
641 panic("cpu_exit");
642 }
643
644 void
645 cpu_wait(p)
646 struct proc *p;
647 {
648 /* drop per-process resources */
649 pmap_dispose_proc(p);
650 vmspace_free(p->p_vmspace);
651 }
652
653 /*
654 * Dump the machine specific header information at the start of a core dump.
655 */
656 int
657 cpu_coredump(p, vp, cred)
658 struct proc *p;
659 struct vnode *vp;
660 struct ucred *cred;
661 {
662
663 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
664 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
665 p));
666 }
667
668 #ifdef notyet
669 static void
670 setredzone(pte, vaddr)
671 u_short *pte;
672 caddr_t vaddr;
673 {
674 /* eventually do this by setting up an expand-down stack segment
675 for ss0: selector, allowing stack access down to top of u.
676 this means though that protection violations need to be handled
677 thru a double fault exception that must do an integral task
678 switch to a known good context, within which a dump can be
679 taken. a sensible scheme might be to save the initial context
680 used by sched (that has physical memory mapped 1:1 at bottom)
681 and take the dump while still in mapped mode */
682 }
683 #endif
684
685 /*
686 * Convert kernel VA to physical address
687 */
688 u_long
689 kvtop(void *addr)
690 {
691 vm_offset_t va;
692
693 va = pmap_kextract((vm_offset_t)addr);
694 if (va == 0)
695 panic("kvtop: zero page frame");
696 return((int)va);
697 }
698
699 /*
700 * Map an IO request into kernel virtual address space.
701 *
702 * All requests are (re)mapped into kernel VA space.
703 * Notice that we use b_bufsize for the size of the buffer
704 * to be mapped. b_bcount might be modified by the driver.
705 */
706 void
707 vmapbuf(bp)
708 register struct buf *bp;
709 {
710 register caddr_t addr, v, kva;
711 vm_offset_t pa;
712
713 if ((bp->b_flags & B_PHYS) == 0)
714 panic("vmapbuf");
715
716 for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
717 addr < bp->b_data + bp->b_bufsize;
718 addr += PAGE_SIZE, v += PAGE_SIZE) {
719 /*
720 * Do the vm_fault if needed; do the copy-on-write thing
721 * when reading stuff off device into memory.
722 */
723 vm_fault_quick(addr,
724 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
725 pa = trunc_page(pmap_kextract((vm_offset_t) addr));
726 if (pa == 0)
727 panic("vmapbuf: page not present");
728 vm_page_hold(PHYS_TO_VM_PAGE(pa));
729 pmap_kenter((vm_offset_t) v, pa);
730 }
731
732 kva = bp->b_saveaddr;
733 bp->b_saveaddr = bp->b_data;
734 bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
735 }
736
737 /*
738 * Free the io map PTEs associated with this IO operation.
739 * We also invalidate the TLB entries and restore the original b_addr.
740 */
741 void
742 vunmapbuf(bp)
743 register struct buf *bp;
744 {
745 register caddr_t addr;
746 vm_offset_t pa;
747
748 if ((bp->b_flags & B_PHYS) == 0)
749 panic("vunmapbuf");
750
751 for (addr = (caddr_t)trunc_page(bp->b_data);
752 addr < bp->b_data + bp->b_bufsize;
753 addr += PAGE_SIZE) {
754 pa = trunc_page(pmap_kextract((vm_offset_t) addr));
755 pmap_kremove((vm_offset_t) addr);
756 vm_page_unhold(PHYS_TO_VM_PAGE(pa));
757 }
758
759 bp->b_data = bp->b_saveaddr;
760 }
761
762 /*
763 * Force reset the processor by invalidating the entire address space!
764 */
765 void
766 cpu_reset() {
767 #ifdef PC98
768 /*
769 * Attempt to do a CPU reset via CPU reset port.
770 */
771 asm("cli");
772 if ((inb(0x35) & 0xa0) != 0xa0) {
773 outb(0x37, 0x0f); /* SHUT0 = 0. */
774 outb(0x37, 0x0b); /* SHUT1 = 0. */
775 }
776 outb(0xf0, 0x00); /* Reset. */
777 #else
778 /*
779 * Attempt to do a CPU reset via the keyboard controller,
780 * do not turn of the GateA20, as any machine that fails
781 * to do the reset here would then end up in no man's land.
782 */
783
784 #if !defined(BROKEN_KEYBOARD_RESET)
785 outb(IO_KBD + 4, 0xFE);
786 DELAY(500000); /* wait 0.5 sec to see if that did it */
787 printf("Keyboard reset did not work, attempting CPU shutdown\n");
788 DELAY(1000000); /* wait 1 sec for printf to complete */
789 #endif
790 #endif /* PC98 */
791 /* force a shutdown by unmapping entire address space ! */
792 bzero((caddr_t) PTD, PAGE_SIZE);
793
794 /* "good night, sweet prince .... <THUNK!>" */
795 invltlb();
796 /* NOTREACHED */
797 while(1);
798 }
799
800 /*
801 * Grow the user stack to allow for 'sp'. This version grows the stack in
802 * chunks of SGROWSIZ.
803 */
804 int
805 grow(p, sp)
806 struct proc *p;
807 u_int sp;
808 {
809 unsigned int nss;
810 caddr_t v;
811 struct vmspace *vm = p->p_vmspace;
812
813 if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
814 return (1);
815
816 nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
817
818 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
819 return (0);
820
821 if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
822 SGROWSIZ) < nss) {
823 int grow_amount;
824 /*
825 * If necessary, grow the VM that the stack occupies
826 * to allow for the rlimit. This allows us to not have
827 * to allocate all of the VM up-front in execve (which
828 * is expensive).
829 * Grow the VM by the amount requested rounded up to
830 * the nearest SGROWSIZ to provide for some hysteresis.
831 */
832 grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
833 v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
834 SGROWSIZ) - grow_amount;
835 /*
836 * If there isn't enough room to extend by SGROWSIZ, then
837 * just extend to the maximum size
838 */
839 if (v < vm->vm_maxsaddr) {
840 v = vm->vm_maxsaddr;
841 grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
842 }
843 if ((grow_amount == 0) || (vm_map_find(&vm->vm_map, NULL, 0, (vm_offset_t *)&v,
844 grow_amount, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != KERN_SUCCESS)) {
845 return (0);
846 }
847 vm->vm_ssize += grow_amount >> PAGE_SHIFT;
848 }
849
850 return (1);
851 }
852
853 /*
854 * prototype routine to implement the pre-zeroed page mechanism
855 * this routine is called from the idle loop.
856 */
857 int
858 vm_page_zero_idle() {
859 vm_page_t m;
860 static int free_rover = 0;
861 if ((cnt.v_free_count > cnt.v_interrupt_free_min) &&
862 (m = vm_page_list_find(PQ_FREE, free_rover))) {
863 --(*vm_page_queues[m->queue].lcnt);
864 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
865 m->queue = PQ_NONE;
866 enable_intr();
867 pmap_zero_page(VM_PAGE_TO_PHYS(m));
868 disable_intr();
869 m->queue = PQ_ZERO + m->pc;
870 ++(*vm_page_queues[m->queue].lcnt);
871 TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, pageq);
872 free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
873 ++vm_page_zero_count;
874 return 1;
875 }
876 return 0;
877 }
878
879 /*
880 * Software interrupt handler for queued VM system processing.
881 */
882 void
883 swi_vm()
884 {
885 if (busdma_swi_pending != 0)
886 busdma_swi();
887 }
888
889 /*
890 * Tell whether this address is in some physical memory region.
891 * Currently used by the kernel coredump code in order to avoid
892 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
893 * or other unpredictable behaviour.
894 */
895
896 #include "isa.h"
897
898 int
899 is_physical_memory(addr)
900 vm_offset_t addr;
901 {
902
903 #if NISA > 0
904 /* The ISA ``memory hole''. */
905 if (addr >= 0xa0000 && addr < 0x100000)
906 return 0;
907 #endif
908
909 /*
910 * stuff other tests for known memory-mapped devices (PCI?)
911 * here
912 */
913
914 return 1;
915 }
Cache object: d1910b1852b98face46179835e879fd4
|