1 /*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $FreeBSD$
42 */
43
44 #include "npx.h"
45 #include "opt_user_ldt.h"
46 #ifdef PC98
47 #include "opt_pc98.h"
48 #endif
49 #include "opt_reset.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/proc.h>
55 #include <sys/buf.h>
56 #include <sys/vnode.h>
57 #include <sys/vmmeter.h>
58 #include <sys/kernel.h>
59 #include <sys/sysctl.h>
60 #include <sys/unistd.h>
61
62 #include <machine/clock.h>
63 #include <machine/cpu.h>
64 #include <machine/md_var.h>
65 #ifdef SMP
66 #include <machine/smp.h>
67 #endif
68 #include <machine/pcb.h>
69 #include <machine/pcb_ext.h>
70 #include <machine/vm86.h>
71
72 #include <vm/vm.h>
73 #include <vm/vm_param.h>
74 #include <sys/lock.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_extern.h>
79
80 #include <sys/user.h>
81
82 #ifdef PC98
83 #include <pc98/pc98/pc98.h>
84 #else
85 #include <i386/isa/isa.h>
86 #endif
87
88 static void cpu_reset_real __P((void));
89 #ifdef SMP
90 static void cpu_reset_proxy __P((void));
91 static u_int cpu_reset_proxyid;
92 static volatile u_int cpu_reset_proxy_active;
93 #endif
94 extern int _ucodesel, _udatasel;
95
96 /*
97 * quick version of vm_fault
98 */
99 int
100 vm_fault_quick(v, prot)
101 caddr_t v;
102 int prot;
103 {
104 int r;
105
106 if (prot & VM_PROT_WRITE)
107 r = subyte(v, fubyte(v));
108 else
109 r = fubyte(v);
110 return(r);
111 }
112
113 /*
114 * Finish a fork operation, with process p2 nearly set up.
115 * Copy and update the pcb, set up the stack so that the child
116 * ready to run and return to user mode.
117 */
118 void
119 cpu_fork(p1, p2, flags)
120 register struct proc *p1, *p2;
121 int flags;
122 {
123 struct pcb *pcb2;
124
125 if ((flags & RFPROC) == 0) {
126 #ifdef USER_LDT
127 if ((flags & RFMEM) == 0) {
128 /* unshare user LDT */
129 struct pcb *pcb1 = &p1->p_addr->u_pcb;
130 struct pcb_ldt *pcb_ldt = pcb1->pcb_ldt;
131 if (pcb_ldt && pcb_ldt->ldt_refcnt > 1) {
132 pcb_ldt = user_ldt_alloc(pcb1,pcb_ldt->ldt_len);
133 user_ldt_free(pcb1);
134 pcb1->pcb_ldt = pcb_ldt;
135 set_user_ldt(pcb1);
136 }
137 }
138 #endif
139 return;
140 }
141
142 #if NNPX > 0
143 /* Ensure that p1's pcb is up to date. */
144 if (npxproc == p1)
145 npxsave(&p1->p_addr->u_pcb.pcb_save);
146 #endif
147
148 /* Copy p1's pcb. */
149 p2->p_addr->u_pcb = p1->p_addr->u_pcb;
150 pcb2 = &p2->p_addr->u_pcb;
151
152 /*
153 * Create a new fresh stack for the new process.
154 * Copy the trap frame for the return to user mode as if from a
155 * syscall. This copies the user mode register values.
156 */
157 p2->p_md.md_regs = (struct trapframe *)
158 ((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
159 bcopy(p1->p_md.md_regs, p2->p_md.md_regs, sizeof(*p2->p_md.md_regs));
160
161 /*
162 * Set registers for trampoline to user mode. Leave space for the
163 * return address on stack. These are the kernel mode register values.
164 */
165 #ifdef PAE
166 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
167 #else
168 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
169 #endif
170 pcb2->pcb_edi = 0;
171 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
172 pcb2->pcb_ebp = 0;
173 pcb2->pcb_esp = (int)p2->p_md.md_regs - sizeof(void *);
174 pcb2->pcb_ebx = (int)p2; /* fork_trampoline argument */
175 pcb2->pcb_eip = (int)fork_trampoline;
176 /*
177 * pcb2->pcb_ldt: duplicated below, if necessary.
178 * pcb2->pcb_savefpu: cloned above.
179 * pcb2->pcb_flags: cloned above (always 0 here?).
180 * pcb2->pcb_onfault: cloned above (always NULL here?).
181 */
182
183 #ifdef SMP
184 pcb2->pcb_mpnest = 1;
185 #endif
186 /*
187 * XXX don't copy the i/o pages. this should probably be fixed.
188 */
189 pcb2->pcb_ext = 0;
190
191 #ifdef USER_LDT
192 /* Copy the LDT, if necessary. */
193 if (pcb2->pcb_ldt != 0) {
194 if (flags & RFMEM) {
195 pcb2->pcb_ldt->ldt_refcnt++;
196 } else {
197 pcb2->pcb_ldt = user_ldt_alloc(pcb2,
198 pcb2->pcb_ldt->ldt_len);
199 }
200 }
201 #endif
202
203 /*
204 * Now, cpu_switch() can schedule the new process.
205 * pcb_esp is loaded pointing to the cpu_switch() stack frame
206 * containing the return address when exiting cpu_switch.
207 * This will normally be to fork_trampoline(), which will have
208 * %ebx loaded with the new proc's pointer. fork_trampoline()
209 * will set up a stack to call fork_return(p, frame); to complete
210 * the return to user-mode.
211 */
212 }
213
214 /*
215 * Intercept the return address from a freshly forked process that has NOT
216 * been scheduled yet.
217 *
218 * This is needed to make kernel threads stay in kernel mode.
219 */
220 void
221 cpu_set_fork_handler(p, func, arg)
222 struct proc *p;
223 void (*func) __P((void *));
224 void *arg;
225 {
226 /*
227 * Note that the trap frame follows the args, so the function
228 * is really called like this: func(arg, frame);
229 */
230 p->p_addr->u_pcb.pcb_esi = (int) func; /* function */
231 p->p_addr->u_pcb.pcb_ebx = (int) arg; /* first arg */
232 }
233
234 void
235 cpu_exit(p)
236 register struct proc *p;
237 {
238 struct pcb *pcb = &p->p_addr->u_pcb;
239
240 #if NNPX > 0
241 npxexit(p);
242 #endif /* NNPX */
243 if (pcb->pcb_ext != 0) {
244 /*
245 * XXX do we need to move the TSS off the allocated pages
246 * before freeing them? (not done here)
247 */
248 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
249 ctob(IOPAGES + 1));
250 pcb->pcb_ext = 0;
251 }
252 #ifdef USER_LDT
253 user_ldt_free(pcb);
254 #endif
255 if (pcb->pcb_flags & PCB_DBREGS) {
256 /*
257 * disable all hardware breakpoints
258 */
259 reset_dbregs();
260 pcb->pcb_flags &= ~PCB_DBREGS;
261 }
262 cnt.v_swtch++;
263 cpu_switch(p);
264 panic("cpu_exit");
265 }
266
267 void
268 cpu_wait(p)
269 struct proc *p;
270 {
271 /* drop per-process resources */
272 pmap_dispose_proc(p);
273 }
274
275 /*
276 * Dump the machine specific header information at the start of a core dump.
277 */
278 int
279 cpu_coredump(p, vp, cred)
280 struct proc *p;
281 struct vnode *vp;
282 struct ucred *cred;
283 {
284 int error;
285 caddr_t tempuser;
286
287 tempuser = malloc(ctob(UPAGES), M_TEMP, M_WAITOK);
288 if (!tempuser)
289 return EINVAL;
290
291 bzero(tempuser, ctob(UPAGES));
292 bcopy(p->p_addr, tempuser, sizeof(struct user));
293 bcopy(p->p_md.md_regs,
294 tempuser + ((caddr_t) p->p_md.md_regs - (caddr_t) p->p_addr),
295 sizeof(struct trapframe));
296
297 error = vn_rdwr(UIO_WRITE, vp, (caddr_t) tempuser, ctob(UPAGES),
298 (off_t)0, UIO_SYSSPACE, IO_UNIT, cred, (int *)NULL, p);
299
300 free(tempuser, M_TEMP);
301
302 return error;
303 }
304
305 #ifdef notyet
306 static void
307 setredzone(pte, vaddr)
308 u_short *pte;
309 caddr_t vaddr;
310 {
311 /* eventually do this by setting up an expand-down stack segment
312 for ss0: selector, allowing stack access down to top of u.
313 this means though that protection violations need to be handled
314 thru a double fault exception that must do an integral task
315 switch to a known good context, within which a dump can be
316 taken. a sensible scheme might be to save the initial context
317 used by sched (that has physical memory mapped 1:1 at bottom)
318 and take the dump while still in mapped mode */
319 }
320 #endif
321
322 /*
323 * Convert kernel VA to physical address
324 */
325 vm_paddr_t
326 kvtop(void *addr)
327 {
328 vm_paddr_t pa;
329
330 pa = pmap_kextract((vm_offset_t)addr);
331 if (pa == 0)
332 panic("kvtop: zero page frame");
333 return (pa);
334 }
335
336 /*
337 * Force reset the processor by invalidating the entire address space!
338 */
339
340 #ifdef SMP
341 static void
342 cpu_reset_proxy()
343 {
344 u_int saved_mp_lock;
345
346 cpu_reset_proxy_active = 1;
347 while (cpu_reset_proxy_active == 1)
348 ; /* Wait for other cpu to disable interupts */
349 saved_mp_lock = mp_lock;
350 mp_lock = 1;
351 printf("cpu_reset_proxy: Grabbed mp lock for BSP\n");
352 cpu_reset_proxy_active = 3;
353 while (cpu_reset_proxy_active == 3)
354 ; /* Wait for other cpu to enable interrupts */
355 stop_cpus((1<<cpu_reset_proxyid));
356 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
357 DELAY(1000000);
358 cpu_reset_real();
359 }
360 #endif
361
362 void
363 cpu_reset()
364 {
365 #ifdef SMP
366 if (smp_active == 0) {
367 cpu_reset_real();
368 /* NOTREACHED */
369 } else {
370
371 u_int map;
372 int cnt;
373 printf("cpu_reset called on cpu#%d\n",cpuid);
374
375 map = other_cpus & ~ stopped_cpus;
376
377 if (map != 0) {
378 printf("cpu_reset: Stopping other CPUs\n");
379 stop_cpus(map); /* Stop all other CPUs */
380 }
381
382 if (cpuid == 0) {
383 DELAY(1000000);
384 cpu_reset_real();
385 /* NOTREACHED */
386 } else {
387 /* We are not BSP (CPU #0) */
388
389 cpu_reset_proxyid = cpuid;
390 cpustop_restartfunc = cpu_reset_proxy;
391 printf("cpu_reset: Restarting BSP\n");
392 started_cpus = (1<<0); /* Restart CPU #0 */
393
394 cnt = 0;
395 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
396 cnt++; /* Wait for BSP to announce restart */
397 if (cpu_reset_proxy_active == 0)
398 printf("cpu_reset: Failed to restart BSP\n");
399 __asm __volatile("cli" : : : "memory");
400 cpu_reset_proxy_active = 2;
401 cnt = 0;
402 while (cpu_reset_proxy_active == 2 && cnt < 10000000)
403 cnt++; /* Do nothing */
404 if (cpu_reset_proxy_active == 2) {
405 printf("cpu_reset: BSP did not grab mp lock\n");
406 cpu_reset_real(); /* XXX: Bogus ? */
407 }
408 cpu_reset_proxy_active = 4;
409 __asm __volatile("sti" : : : "memory");
410 while (1);
411 /* NOTREACHED */
412 }
413 }
414 #else
415 cpu_reset_real();
416 #endif
417 }
418
419 static void
420 cpu_reset_real()
421 {
422
423 #ifdef PC98
424 /*
425 * Attempt to do a CPU reset via CPU reset port.
426 */
427 disable_intr();
428 if ((inb(0x35) & 0xa0) != 0xa0) {
429 outb(0x37, 0x0f); /* SHUT0 = 0. */
430 outb(0x37, 0x0b); /* SHUT1 = 0. */
431 }
432 outb(0xf0, 0x00); /* Reset. */
433 #else
434 /*
435 * Attempt to do a CPU reset via the keyboard controller,
436 * do not turn of the GateA20, as any machine that fails
437 * to do the reset here would then end up in no man's land.
438 */
439
440 #if !defined(BROKEN_KEYBOARD_RESET)
441 outb(IO_KBD + 4, 0xFE);
442 DELAY(500000); /* wait 0.5 sec to see if that did it */
443 printf("Keyboard reset did not work, attempting CPU shutdown\n");
444 DELAY(1000000); /* wait 1 sec for printf to complete */
445 #endif
446 #endif /* PC98 */
447 /* force a shutdown by unmapping entire address space ! */
448 bzero((caddr_t) PTD, PAGE_SIZE);
449
450 /* "good night, sweet prince .... <THUNK!>" */
451 invltlb();
452 /* NOTREACHED */
453 while(1);
454 }
455
456 int
457 grow_stack(p, sp)
458 struct proc *p;
459 u_int sp;
460 {
461 int rv;
462
463 rv = vm_map_growstack (p, sp);
464 if (rv != KERN_SUCCESS)
465 return (0);
466
467 return (1);
468 }
469
470 SYSCTL_DECL(_vm_stats_misc);
471
472 static int cnt_prezero;
473
474 SYSCTL_INT(_vm_stats_misc, OID_AUTO,
475 cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
476
477 /*
478 * Implement the pre-zeroed page mechanism.
479 * This routine is called from the idle loop.
480 */
481
482 #define ZIDLE_LO(v) ((v) * 2 / 3)
483 #define ZIDLE_HI(v) ((v) * 4 / 5)
484
485 int
486 vm_page_zero_idle()
487 {
488 static int free_rover;
489 static int zero_state;
490 vm_page_t m;
491 int s;
492
493 /*
494 * Attempt to maintain approximately 1/2 of our free pages in a
495 * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid
496 * generally zeroing a page when the system is near steady-state.
497 * Otherwise we might get 'flutter' during disk I/O / IPC or
498 * fast sleeps. We also do not want to be continuously zeroing
499 * pages because doing so may flush our L1 and L2 caches too much.
500 */
501
502 if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
503 return(0);
504 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
505 return(0);
506
507 #ifdef SMP
508 if (try_mplock()) {
509 #endif
510 s = splvm();
511 __asm __volatile("sti" : : : "memory");
512 zero_state = 0;
513 m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
514 if (m != NULL && (m->flags & PG_ZERO) == 0) {
515 vm_page_queues[m->queue].lcnt--;
516 TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
517 m->queue = PQ_NONE;
518 splx(s);
519 pmap_zero_page(VM_PAGE_TO_PHYS(m));
520 (void)splvm();
521 vm_page_flag_set(m, PG_ZERO);
522 m->queue = PQ_FREE + m->pc;
523 vm_page_queues[m->queue].lcnt++;
524 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m,
525 pageq);
526 ++vm_page_zero_count;
527 ++cnt_prezero;
528 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
529 zero_state = 1;
530 }
531 free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
532 splx(s);
533 __asm __volatile("cli" : : : "memory");
534 #ifdef SMP
535 rel_mplock();
536 #endif
537 return (1);
538 #ifdef SMP
539 }
540 #endif
541 /*
542 * We have to enable interrupts for a moment if the try_mplock fails
543 * in order to potentially take an IPI. XXX this should be in
544 * swtch.s
545 */
546 __asm __volatile("sti; nop; cli" : : : "memory");
547 return (0);
548 }
549
550 /*
551 * Software interrupt handler for queued VM system processing.
552 */
553 void
554 swi_vm()
555 {
556 if (busdma_swi_pending != 0)
557 busdma_swi();
558 }
559
560 /*
561 * Tell whether this address is in some physical memory region.
562 * Currently used by the kernel coredump code in order to avoid
563 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
564 * or other unpredictable behaviour.
565 */
566
567 #include "isa.h"
568
569 int
570 is_physical_memory(addr)
571 vm_paddr_t addr;
572 {
573
574 #if NISA > 0
575 /* The ISA ``memory hole''. */
576 if (addr >= 0xa0000 && addr < 0x100000)
577 return 0;
578 #endif
579
580 /*
581 * stuff other tests for known memory-mapped devices (PCI?)
582 * here
583 */
584
585 return 1;
586 }
Cache object: 43293365b24862614b54086df76a00c1
|