1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
38 * $FreeBSD$
39 */
40
41 #include "apm.h"
42 #include "ether.h"
43 #include "npx.h"
44 #include "opt_atalk.h"
45 #include "opt_cpu.h"
46 #include "opt_ddb.h"
47 #include "opt_inet.h"
48 #include "opt_ipx.h"
49 #include "opt_maxmem.h"
50 #include "opt_msgbuf.h"
51 #include "opt_perfmon.h"
52 #include "opt_smp.h"
53 #include "opt_sysvipc.h"
54 #include "opt_user_ldt.h"
55 #include "opt_userconfig.h"
56 #include "opt_vm86.h"
57
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysproto.h>
61 #include <sys/signalvar.h>
62 #include <sys/kernel.h>
63 #include <sys/linker.h>
64 #include <sys/proc.h>
65 #include <sys/buf.h>
66 #include <sys/reboot.h>
67 #include <sys/callout.h>
68 #include <sys/malloc.h>
69 #include <sys/mbuf.h>
70 #include <sys/msgbuf.h>
71 #include <sys/sysent.h>
72 #include <sys/sysctl.h>
73 #include <sys/vmmeter.h>
74
75 #ifdef SYSVSHM
76 #include <sys/shm.h>
77 #endif
78
79 #ifdef SYSVMSG
80 #include <sys/msg.h>
81 #endif
82
83 #ifdef SYSVSEM
84 #include <sys/sem.h>
85 #endif
86
87 #include <vm/vm.h>
88 #include <vm/vm_param.h>
89 #include <vm/vm_prot.h>
90 #include <sys/lock.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_pager.h>
96 #include <vm/vm_extern.h>
97
98 #include <sys/user.h>
99 #include <sys/exec.h>
100
101 #include <ddb/ddb.h>
102
103 #if defined(INET) || defined(IPX) || defined(NATM) || defined(NETATALK) \
104 || NETHER > 0 || defined(NS)
105 #define NETISR
106 #endif
107
108 #ifdef NETISR
109 #include <net/netisr.h>
110 #endif
111
112 #include <machine/cpu.h>
113 #include <machine/reg.h>
114 #include <machine/clock.h>
115 #include <machine/specialreg.h>
116 #include <machine/cons.h>
117 #include <machine/bootinfo.h>
118 #include <machine/ipl.h>
119 #include <machine/md_var.h>
120 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
121 #ifdef SMP
122 #include <machine/smp.h>
123 #endif
124 #ifdef PERFMON
125 #include <machine/perfmon.h>
126 #endif
127
128 #include <i386/isa/isa_device.h>
129 #include <i386/isa/intr_machdep.h>
130 #ifndef VM86
131 #include <i386/isa/rtc.h>
132 #endif
133 #include <machine/random.h>
134 #include <sys/ptrace.h>
135
136 extern void init386 __P((int first));
137 extern void dblfault_handler __P((void));
138
139 extern void printcpuinfo(void); /* XXX header file */
140 extern void earlysetcpuclass(void); /* same header file */
141 extern void finishidentcpu(void);
142 extern void panicifcpuunsupported(void);
143 extern void initializecpu(void);
144
145 static void cpu_startup __P((void *));
146 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
147
148 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
149
150 int _udatasel, _ucodesel;
151 u_int atdevbase;
152
153 #if defined(SWTCH_OPTIM_STATS)
154 extern int swtch_optim_stats;
155 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats,
156 CTLFLAG_RD, &swtch_optim_stats, 0, "");
157 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count,
158 CTLFLAG_RD, &tlb_flush_count, 0, "");
159 #endif
160
161 #ifdef PC98
162 static int ispc98 = 1;
163 #else
164 static int ispc98 = 0;
165 #endif
166 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
167
168 int physmem = 0;
169 int cold = 1;
170
171 static int
172 sysctl_hw_physmem SYSCTL_HANDLER_ARGS
173 {
174 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req);
175 return (error);
176 }
177
178 SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD,
179 0, 0, sysctl_hw_physmem, "I", "");
180
181 static int
182 sysctl_hw_usermem SYSCTL_HANDLER_ARGS
183 {
184 int error = sysctl_handle_int(oidp, 0,
185 ctob(physmem - cnt.v_wire_count), req);
186 return (error);
187 }
188
189 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD,
190 0, 0, sysctl_hw_usermem, "I", "");
191
192 static int
193 sysctl_hw_availpages SYSCTL_HANDLER_ARGS
194 {
195 int error = sysctl_handle_int(oidp, 0,
196 i386_btop(avail_end - avail_start), req);
197 return (error);
198 }
199
200 SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD,
201 0, 0, sysctl_hw_availpages, "I", "");
202
203 static int
204 sysctl_machdep_msgbuf SYSCTL_HANDLER_ARGS
205 {
206 int error;
207
208 /* Unwind the buffer, so that it's linear (possibly starting with
209 * some initial nulls).
210 */
211 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr,
212 msgbufp->msg_size-msgbufp->msg_bufr,req);
213 if(error) return(error);
214 if(msgbufp->msg_bufr>0) {
215 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr,
216 msgbufp->msg_bufr,req);
217 }
218 return(error);
219 }
220
221 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD,
222 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer");
223
224 static int msgbuf_clear;
225
226 static int
227 sysctl_machdep_msgbuf_clear SYSCTL_HANDLER_ARGS
228 {
229 int error;
230 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
231 req);
232 if (!error && req->newptr) {
233 /* Clear the buffer and reset write pointer */
234 bzero(msgbufp->msg_ptr,msgbufp->msg_size);
235 msgbufp->msg_bufr=msgbufp->msg_bufx=0;
236 msgbuf_clear=0;
237 }
238 return (error);
239 }
240
241 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW,
242 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I",
243 "Clear kernel message buffer");
244
245 int bootverbose = 0, Maxmem = 0;
246 long dumplo;
247
248 vm_offset_t phys_avail[10];
249
250 /* must be 2 less so 0 0 can signal end of chunks */
251 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
252
253 #ifdef NETISR
254 static void setup_netisrs __P((struct linker_set *));
255 #endif
256
257 static vm_offset_t buffer_sva, buffer_eva;
258 vm_offset_t clean_sva, clean_eva;
259 static vm_offset_t pager_sva, pager_eva;
260 #ifdef NETISR
261 extern struct linker_set netisr_set;
262 #endif
263 #if NNPX > 0
264 extern struct isa_driver npxdriver;
265 #endif
266
267 #define offsetof(type, member) ((size_t)(&((type *)0)->member))
268
269 static void
270 cpu_startup(dummy)
271 void *dummy;
272 {
273 register unsigned i;
274 register caddr_t v;
275 vm_offset_t maxaddr;
276 vm_size_t size = 0;
277 int firstaddr;
278 vm_offset_t minaddr;
279
280 if (boothowto & RB_VERBOSE)
281 bootverbose++;
282
283 /*
284 * Good {morning,afternoon,evening,night}.
285 */
286 printf(version);
287 earlysetcpuclass();
288 startrtclock();
289 printcpuinfo();
290 panicifcpuunsupported();
291 #ifdef PERFMON
292 perfmon_init();
293 #endif
294 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024);
295 /*
296 * Display any holes after the first chunk of extended memory.
297 */
298 if (bootverbose) {
299 int indx;
300
301 printf("Physical memory chunk(s):\n");
302 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
303 int size1 = phys_avail[indx + 1] - phys_avail[indx];
304
305 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n",
306 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
307 size1 / PAGE_SIZE);
308 }
309 }
310
311 #ifdef NETISR
312 /*
313 * Quickly wire in netisrs.
314 */
315 setup_netisrs(&netisr_set);
316 #endif
317
318 /*
319 * Calculate callout wheel size
320 */
321 for (callwheelsize = 1, callwheelbits = 0;
322 callwheelsize < ncallout;
323 callwheelsize <<= 1, ++callwheelbits)
324 ;
325 callwheelmask = callwheelsize - 1;
326
327 /*
328 * Allocate space for system data structures.
329 * The first available kernel virtual address is in "v".
330 * As pages of kernel virtual memory are allocated, "v" is incremented.
331 * As pages of memory are allocated and cleared,
332 * "firstaddr" is incremented.
333 * An index into the kernel page table corresponding to the
334 * virtual memory address maintained in "v" is kept in "mapaddr".
335 */
336
337 /*
338 * Make two passes. The first pass calculates how much memory is
339 * needed and allocates it. The second pass assigns virtual
340 * addresses to the various data structures.
341 */
342 firstaddr = 0;
343 again:
344 v = (caddr_t)firstaddr;
345
346 #define valloc(name, type, num) \
347 (name) = (type *)v; v = (caddr_t)((name)+(num))
348 #define valloclim(name, type, num, lim) \
349 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
350
351 valloc(callout, struct callout, ncallout);
352 valloc(callwheel, struct callout_tailq, callwheelsize);
353 #ifdef SYSVSHM
354 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
355 #endif
356 #ifdef SYSVSEM
357 valloc(sema, struct semid_ds, seminfo.semmni);
358 valloc(sem, struct sem, seminfo.semmns);
359 /* This is pretty disgusting! */
360 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
361 #endif
362 #ifdef SYSVMSG
363 valloc(msgpool, char, msginfo.msgmax);
364 valloc(msgmaps, struct msgmap, msginfo.msgseg);
365 valloc(msghdrs, struct msg, msginfo.msgtql);
366 valloc(msqids, struct msqid_ds, msginfo.msgmni);
367 #endif
368
369 if (nbuf == 0) {
370 nbuf = 30;
371 if( physmem > 1024)
372 nbuf += min((physmem - 1024) / 8, 2048);
373 }
374 nswbuf = max(min(nbuf/4, 64), 16);
375
376 valloc(swbuf, struct buf, nswbuf);
377 valloc(buf, struct buf, nbuf);
378
379
380 /*
381 * End of first pass, size has been calculated so allocate memory
382 */
383 if (firstaddr == 0) {
384 size = (vm_size_t)(v - firstaddr);
385 firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
386 if (firstaddr == 0)
387 panic("startup: no room for tables");
388 goto again;
389 }
390
391 /*
392 * End of second pass, addresses have been assigned
393 */
394 if ((vm_size_t)(v - firstaddr) != size)
395 panic("startup: table size inconsistency");
396
397 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
398 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
399 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
400 (nbuf*BKVASIZE));
401 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
402 (nswbuf*MAXPHYS) + pager_map_size);
403 pager_map->system_map = 1;
404 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
405 (16*(ARG_MAX+(PAGE_SIZE*3))));
406
407 /*
408 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
409 * we use the more space efficient malloc in place of kmem_alloc.
410 */
411 {
412 vm_offset_t mb_map_size;
413 int xclusters;
414
415 /* Allow override of NMBCLUSTERS from the kernel environment */
416 if (getenv_int("kern.ipc.nmbclusters", &xclusters) &&
417 xclusters > nmbclusters)
418 nmbclusters = xclusters;
419
420 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES;
421 mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE));
422 mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT);
423 bzero(mclrefcnt, mb_map_size / MCLBYTES);
424 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
425 mb_map_size);
426 mb_map->system_map = 1;
427 }
428
429 /*
430 * Initialize callouts
431 */
432 SLIST_INIT(&callfree);
433 for (i = 0; i < ncallout; i++) {
434 callout_init(&callout[i]);
435 callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
436 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
437 }
438
439 for (i = 0; i < callwheelsize; i++) {
440 TAILQ_INIT(&callwheel[i]);
441 }
442
443 #if defined(USERCONFIG)
444 userconfig();
445 cninit(); /* the preferred console may have changed */
446 #endif
447
448 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count),
449 ptoa(cnt.v_free_count) / 1024);
450
451 /*
452 * Set up buffers, so they can be used to read disk labels.
453 */
454 bufinit();
455 vm_pager_bufferinit();
456
457 #ifdef SMP
458 /*
459 * OK, enough kmem_alloc/malloc state should be up, lets get on with it!
460 */
461 mp_start(); /* fire up the APs and APICs */
462 mp_announce();
463 #endif /* SMP */
464 }
465
466 #ifdef NETISR
467 int
468 register_netisr(num, handler)
469 int num;
470 netisr_t *handler;
471 {
472
473 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) {
474 printf("register_netisr: bad isr number: %d\n", num);
475 return (EINVAL);
476 }
477 netisrs[num] = handler;
478 return (0);
479 }
480
481 static void
482 setup_netisrs(ls)
483 struct linker_set *ls;
484 {
485 int i;
486 const struct netisrtab *nit;
487
488 for(i = 0; ls->ls_items[i]; i++) {
489 nit = (const struct netisrtab *)ls->ls_items[i];
490 register_netisr(nit->nit_num, nit->nit_isr);
491 }
492 }
493 #endif /* NETISR */
494
495 /*
496 * Send an interrupt to process.
497 *
498 * Stack is set up to allow sigcode stored
499 * at top to call routine, followed by kcall
500 * to sigreturn routine below. After sigreturn
501 * resets the signal mask, the stack, and the
502 * frame pointer, it returns to the user
503 * specified pc, psl.
504 */
505 void
506 sendsig(catcher, sig, mask, code)
507 sig_t catcher;
508 int sig, mask;
509 u_long code;
510 {
511 register struct proc *p = curproc;
512 register struct trapframe *regs;
513 register struct sigframe *fp;
514 struct sigframe sf;
515 struct sigacts *psp = p->p_sigacts;
516 int oonstack;
517
518 regs = p->p_md.md_regs;
519 oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
520 /*
521 * Allocate and validate space for the signal handler context.
522 */
523 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
524 (psp->ps_sigonstack & sigmask(sig))) {
525 fp = (struct sigframe *)(psp->ps_sigstk.ss_sp +
526 psp->ps_sigstk.ss_size - sizeof(struct sigframe));
527 psp->ps_sigstk.ss_flags |= SS_ONSTACK;
528 } else {
529 fp = (struct sigframe *)regs->tf_esp - 1;
530 }
531
532 /*
533 * grow() will return FALSE if the fp will not fit inside the stack
534 * and the stack can not be grown. useracc will return FALSE
535 * if access is denied.
536 */
537 #ifdef VM_STACK
538 if ((grow_stack (p, (int)fp) == FALSE) ||
539 #else
540 if ((grow(p, (int)fp) == FALSE) ||
541 #endif
542 (useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) {
543 /*
544 * Process has trashed its stack; give it an illegal
545 * instruction to halt it in its tracks.
546 */
547 SIGACTION(p, SIGILL) = SIG_DFL;
548 sig = sigmask(SIGILL);
549 p->p_sigignore &= ~sig;
550 p->p_sigcatch &= ~sig;
551 p->p_sigmask &= ~sig;
552 psignal(p, SIGILL);
553 return;
554 }
555
556 /*
557 * Build the argument list for the signal handler.
558 */
559 if (p->p_sysent->sv_sigtbl) {
560 if (sig < p->p_sysent->sv_sigsize)
561 sig = p->p_sysent->sv_sigtbl[sig];
562 else
563 sig = p->p_sysent->sv_sigsize + 1;
564 }
565 sf.sf_signum = sig;
566 sf.sf_code = code;
567 sf.sf_scp = &fp->sf_sc;
568 sf.sf_addr = (char *) regs->tf_err;
569 sf.sf_handler = catcher;
570
571 /* save scratch registers */
572 sf.sf_sc.sc_eax = regs->tf_eax;
573 sf.sf_sc.sc_ebx = regs->tf_ebx;
574 sf.sf_sc.sc_ecx = regs->tf_ecx;
575 sf.sf_sc.sc_edx = regs->tf_edx;
576 sf.sf_sc.sc_esi = regs->tf_esi;
577 sf.sf_sc.sc_edi = regs->tf_edi;
578 sf.sf_sc.sc_cs = regs->tf_cs;
579 sf.sf_sc.sc_ds = regs->tf_ds;
580 sf.sf_sc.sc_ss = regs->tf_ss;
581 sf.sf_sc.sc_es = regs->tf_es;
582 sf.sf_sc.sc_fs = rfs();
583 sf.sf_sc.sc_gs = rgs();
584 sf.sf_sc.sc_isp = regs->tf_isp;
585
586 /*
587 * Build the signal context to be used by sigreturn.
588 */
589 sf.sf_sc.sc_onstack = oonstack;
590 sf.sf_sc.sc_mask = mask;
591 sf.sf_sc.sc_sp = regs->tf_esp;
592 sf.sf_sc.sc_fp = regs->tf_ebp;
593 sf.sf_sc.sc_pc = regs->tf_eip;
594 sf.sf_sc.sc_ps = regs->tf_eflags;
595 sf.sf_sc.sc_trapno = regs->tf_trapno;
596 sf.sf_sc.sc_err = regs->tf_err;
597
598 #ifdef VM86
599 /*
600 * If we're a vm86 process, we want to save the segment registers.
601 * We also change eflags to be our emulated eflags, not the actual
602 * eflags.
603 */
604 if (regs->tf_eflags & PSL_VM) {
605 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
606 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86;
607
608 sf.sf_sc.sc_gs = tf->tf_vm86_gs;
609 sf.sf_sc.sc_fs = tf->tf_vm86_fs;
610 sf.sf_sc.sc_es = tf->tf_vm86_es;
611 sf.sf_sc.sc_ds = tf->tf_vm86_ds;
612
613 if (vm86->vm86_has_vme == 0)
614 sf.sf_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP))
615 | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
616
617 /*
618 * We should never have PSL_T set when returning from vm86
619 * mode. It may be set here if we deliver a signal before
620 * getting to vm86 mode, so turn it off.
621 *
622 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
623 * syscalls made by the signal handler. This just avoids
624 * wasting time for our lazy fixup of such faults. PSL_NT
625 * does nothing in vm86 mode, but vm86 programs can set it
626 * almost legitimately in probes for old cpu types.
627 */
628 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP);
629 }
630 #endif /* VM86 */
631
632 /*
633 * Copy the sigframe out to the user's stack.
634 */
635 if (copyout(&sf, fp, sizeof(struct sigframe)) != 0) {
636 /*
637 * Something is wrong with the stack pointer.
638 * ...Kill the process.
639 */
640 sigexit(p, SIGILL);
641 }
642
643 regs->tf_esp = (int)fp;
644 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
645 regs->tf_cs = _ucodesel;
646 regs->tf_ds = _udatasel;
647 regs->tf_es = _udatasel;
648 load_fs(_udatasel);
649 load_gs(_udatasel);
650 regs->tf_ss = _udatasel;
651 }
652
653 /*
654 * System call to cleanup state after a signal
655 * has been taken. Reset signal mask and
656 * stack state from context left by sendsig (above).
657 * Return to previous pc and psl as specified by
658 * context left by sendsig. Check carefully to
659 * make sure that the user has not modified the
660 * state to gain improper privileges.
661 */
662 int
663 sigreturn(p, uap)
664 struct proc *p;
665 struct sigreturn_args /* {
666 struct sigcontext *sigcntxp;
667 } */ *uap;
668 {
669 register struct sigcontext *scp;
670 register struct sigframe *fp;
671 register struct trapframe *regs = p->p_md.md_regs;
672 int eflags;
673
674 /*
675 * (XXX old comment) regs->tf_esp points to the return address.
676 * The user scp pointer is above that.
677 * The return address is faked in the signal trampoline code
678 * for consistency.
679 */
680 scp = uap->sigcntxp;
681 fp = (struct sigframe *)
682 ((caddr_t)scp - offsetof(struct sigframe, sf_sc));
683
684 if (useracc((caddr_t)fp, sizeof (*fp), B_WRITE) == 0)
685 return(EFAULT);
686
687 eflags = scp->sc_ps;
688 #ifdef VM86
689 if (eflags & PSL_VM) {
690 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
691 struct vm86_kernel *vm86;
692
693 /*
694 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
695 * set up the vm86 area, and we can't enter vm86 mode.
696 */
697 if (p->p_addr->u_pcb.pcb_ext == 0)
698 return (EINVAL);
699 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86;
700 if (vm86->vm86_inited == 0)
701 return (EINVAL);
702
703 /* go back to user mode if both flags are set */
704 if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
705 trapsignal(p, SIGBUS, 0);
706
707 if (vm86->vm86_has_vme) {
708 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
709 (eflags & VME_USERCHANGE) | PSL_VM;
710 } else {
711 vm86->vm86_eflags = eflags; /* save VIF, VIP */
712 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM;
713 }
714 tf->tf_vm86_ds = scp->sc_ds;
715 tf->tf_vm86_es = scp->sc_es;
716 tf->tf_vm86_fs = scp->sc_fs;
717 tf->tf_vm86_gs = scp->sc_gs;
718 tf->tf_ds = _udatasel;
719 tf->tf_es = _udatasel;
720 } else {
721 #endif /* VM86 */
722 /*
723 * Don't allow users to change privileged or reserved flags.
724 */
725 #define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
726 /*
727 * XXX do allow users to change the privileged flag PSL_RF.
728 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
729 * should sometimes set it there too. tf_eflags is kept in
730 * the signal context during signal handling and there is no
731 * other place to remember it, so the PSL_RF bit may be
732 * corrupted by the signal handler without us knowing.
733 * Corruption of the PSL_RF bit at worst causes one more or
734 * one less debugger trap, so allowing it is fairly harmless.
735 */
736 if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
737 #ifdef DEBUG
738 printf("sigreturn: eflags = 0x%x\n", eflags);
739 #endif
740 return(EINVAL);
741 }
742
743 /*
744 * Don't allow users to load a valid privileged %cs. Let the
745 * hardware check for invalid selectors, excess privilege in
746 * other selectors, invalid %eip's and invalid %esp's.
747 */
748 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
749 if (!CS_SECURE(scp->sc_cs)) {
750 #ifdef DEBUG
751 printf("sigreturn: cs = 0x%x\n", scp->sc_cs);
752 #endif
753 trapsignal(p, SIGBUS, T_PROTFLT);
754 return(EINVAL);
755 }
756 regs->tf_ds = scp->sc_ds;
757 regs->tf_es = scp->sc_es;
758 #ifdef VM86
759 }
760 #endif
761
762 /* restore scratch registers */
763 regs->tf_eax = scp->sc_eax;
764 regs->tf_ebx = scp->sc_ebx;
765 regs->tf_ecx = scp->sc_ecx;
766 regs->tf_edx = scp->sc_edx;
767 regs->tf_esi = scp->sc_esi;
768 regs->tf_edi = scp->sc_edi;
769 regs->tf_cs = scp->sc_cs;
770 regs->tf_ss = scp->sc_ss;
771 regs->tf_isp = scp->sc_isp;
772
773 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0)
774 return(EINVAL);
775
776 if (scp->sc_onstack & 01)
777 p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
778 else
779 p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
780 p->p_sigmask = scp->sc_mask & ~sigcantmask;
781 regs->tf_ebp = scp->sc_fp;
782 regs->tf_esp = scp->sc_sp;
783 regs->tf_eip = scp->sc_pc;
784 regs->tf_eflags = eflags;
785 return(EJUSTRETURN);
786 }
787
788 /*
789 * Machine dependent boot() routine
790 *
791 * I haven't seen anything to put here yet
792 * Possibly some stuff might be grafted back here from boot()
793 */
794 void
795 cpu_boot(int howto)
796 {
797 }
798
799 /*
800 * Shutdown the CPU as much as possible
801 */
802 void
803 cpu_halt(void)
804 {
805 for (;;)
806 __asm__ ("hlt");
807 }
808
809 /*
810 * Clear registers on exec
811 */
812 void
813 setregs(p, entry, stack, ps_strings)
814 struct proc *p;
815 u_long entry;
816 u_long stack;
817 u_long ps_strings;
818 {
819 struct trapframe *regs = p->p_md.md_regs;
820 struct pcb *pcb = &p->p_addr->u_pcb;
821
822 #ifdef USER_LDT
823 /* was i386_user_cleanup() in NetBSD */
824 if (pcb->pcb_ldt) {
825 if (pcb == curpcb) {
826 lldt(_default_ldt);
827 currentldt = _default_ldt;
828 }
829 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt,
830 pcb->pcb_ldt_len * sizeof(union descriptor));
831 pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0;
832 }
833 #endif
834
835 bzero((char *)regs, sizeof(struct trapframe));
836 regs->tf_eip = entry;
837 regs->tf_esp = stack;
838 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
839 regs->tf_ss = _udatasel;
840 regs->tf_ds = _udatasel;
841 regs->tf_es = _udatasel;
842 regs->tf_cs = _ucodesel;
843
844 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
845 regs->tf_ebx = ps_strings;
846
847 /* reset %fs and %gs as well */
848 pcb->pcb_fs = _udatasel;
849 pcb->pcb_gs = _udatasel;
850 if (pcb == curpcb) {
851 __asm("movw %w0,%%fs" : : "r" (_udatasel));
852 __asm("movw %w0,%%gs" : : "r" (_udatasel));
853 }
854
855 /*
856 * Initialize the math emulator (if any) for the current process.
857 * Actually, just clear the bit that says that the emulator has
858 * been initialized. Initialization is delayed until the process
859 * traps to the emulator (if it is done at all) mainly because
860 * emulators don't provide an entry point for initialization.
861 */
862 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP;
863
864 /*
865 * Arrange to trap the next npx or `fwait' instruction (see npx.c
866 * for why fwait must be trapped at least if there is an npx or an
867 * emulator). This is mainly to handle the case where npx0 is not
868 * configured, since the npx routines normally set up the trap
869 * otherwise. It should be done only at boot time, but doing it
870 * here allows modifying `npx_exists' for testing the emulator on
871 * systems with an npx.
872 */
873 load_cr0(rcr0() | CR0_MP | CR0_TS);
874
875 #if NNPX > 0
876 /* Initialize the npx (if any) for the current process. */
877 npxinit(__INITIAL_NPXCW__);
878 #endif
879
880 /*
881 * XXX - Linux emulator
882 * Make sure sure edx is 0x0 on entry. Linux binaries depend
883 * on it.
884 */
885 p->p_retval[1] = 0;
886 }
887
888 static int
889 sysctl_machdep_adjkerntz SYSCTL_HANDLER_ARGS
890 {
891 int error;
892 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2,
893 req);
894 if (!error && req->newptr)
895 resettodr();
896 return (error);
897 }
898
899 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW,
900 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "");
901
902 SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set,
903 CTLFLAG_RW, &disable_rtc_set, 0, "");
904
905 SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo,
906 CTLFLAG_RD, &bootinfo, bootinfo, "");
907
908 SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock,
909 CTLFLAG_RW, &wall_cmos_clock, 0, "");
910
911 /*
912 * Initialize 386 and configure to run kernel
913 */
914
915 /*
916 * Initialize segments & interrupt table
917 */
918
919 int _default_ldt;
920 #ifdef SMP
921 union descriptor gdt[NGDT + NCPU]; /* global descriptor table */
922 #else
923 union descriptor gdt[NGDT]; /* global descriptor table */
924 #endif
925 struct gate_descriptor idt[NIDT]; /* interrupt descriptor table */
926 union descriptor ldt[NLDT]; /* local descriptor table */
927 #ifdef SMP
928 /* table descriptors - used to load tables by microp */
929 struct region_descriptor r_gdt, r_idt;
930 #endif
931
932 extern struct i386tss common_tss; /* One tss per cpu */
933 #ifdef VM86
934 extern struct segment_descriptor common_tssd;
935 extern int private_tss; /* flag indicating private tss */
936 extern u_int my_tr; /* which task register setting */
937 #endif /* VM86 */
938
939 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
940 struct gate_descriptor *t_idt;
941 extern int has_f00f_bug;
942 #endif
943
944 static struct i386tss dblfault_tss;
945 static char dblfault_stack[PAGE_SIZE];
946
947 extern struct user *proc0paddr;
948
949
950 /* software prototypes -- in more palatable form */
951 struct soft_segment_descriptor gdt_segs[
952 #ifdef SMP
953 NGDT + NCPU
954 #endif
955 ] = {
956 /* GNULL_SEL 0 Null Descriptor */
957 { 0x0, /* segment base address */
958 0x0, /* length */
959 0, /* segment type */
960 0, /* segment descriptor priority level */
961 0, /* segment descriptor present */
962 0, 0,
963 0, /* default 32 vs 16 bit size */
964 0 /* limit granularity (byte/page units)*/ },
965 /* GCODE_SEL 1 Code Descriptor for kernel */
966 { 0x0, /* segment base address */
967 0xfffff, /* length - all address space */
968 SDT_MEMERA, /* segment type */
969 0, /* segment descriptor priority level */
970 1, /* segment descriptor present */
971 0, 0,
972 1, /* default 32 vs 16 bit size */
973 1 /* limit granularity (byte/page units)*/ },
974 /* GDATA_SEL 2 Data Descriptor for kernel */
975 { 0x0, /* segment base address */
976 0xfffff, /* length - all address space */
977 SDT_MEMRWA, /* segment type */
978 0, /* segment descriptor priority level */
979 1, /* segment descriptor present */
980 0, 0,
981 1, /* default 32 vs 16 bit size */
982 1 /* limit granularity (byte/page units)*/ },
983 /* GLDT_SEL 3 LDT Descriptor */
984 { (int) ldt, /* segment base address */
985 sizeof(ldt)-1, /* length - all address space */
986 SDT_SYSLDT, /* segment type */
987 SEL_UPL, /* segment descriptor priority level */
988 1, /* segment descriptor present */
989 0, 0,
990 0, /* unused - default 32 vs 16 bit size */
991 0 /* limit granularity (byte/page units)*/ },
992 /* GTGATE_SEL 4 Null Descriptor - Placeholder */
993 { 0x0, /* segment base address */
994 0x0, /* length - all address space */
995 0, /* segment type */
996 0, /* segment descriptor priority level */
997 0, /* segment descriptor present */
998 0, 0,
999 0, /* default 32 vs 16 bit size */
1000 0 /* limit granularity (byte/page units)*/ },
1001 /* GPANIC_SEL 5 Panic Tss Descriptor */
1002 { (int) &dblfault_tss, /* segment base address */
1003 sizeof(struct i386tss)-1,/* length - all address space */
1004 SDT_SYS386TSS, /* segment type */
1005 0, /* segment descriptor priority level */
1006 1, /* segment descriptor present */
1007 0, 0,
1008 0, /* unused - default 32 vs 16 bit size */
1009 0 /* limit granularity (byte/page units)*/ },
1010 /* GPROC0_SEL 6 Proc 0 Tss Descriptor */
1011 {
1012 (int) &common_tss, /* segment base address */
1013 sizeof(struct i386tss)-1,/* length - all address space */
1014 SDT_SYS386TSS, /* segment type */
1015 0, /* segment descriptor priority level */
1016 1, /* segment descriptor present */
1017 0, 0,
1018 0, /* unused - default 32 vs 16 bit size */
1019 0 /* limit granularity (byte/page units)*/ },
1020 /* GUSERLDT_SEL 7 User LDT Descriptor per process */
1021 { (int) ldt, /* segment base address */
1022 (512 * sizeof(union descriptor)-1), /* length */
1023 SDT_SYSLDT, /* segment type */
1024 0, /* segment descriptor priority level */
1025 1, /* segment descriptor present */
1026 0, 0,
1027 0, /* unused - default 32 vs 16 bit size */
1028 0 /* limit granularity (byte/page units)*/ },
1029 /* GAPMCODE32_SEL 8 APM BIOS 32-bit interface (32bit Code) */
1030 { 0, /* segment base address (overwritten by APM) */
1031 0xffff, /* length (overwritten by APM) */
1032 SDT_MEMERA, /* segment type */
1033 0, /* segment descriptor priority level */
1034 1, /* segment descriptor present */
1035 0, 0,
1036 1, /* default 32 vs 16 bit size */
1037 0 /* limit granularity (byte/page units)*/ },
1038 /* GAPMCODE16_SEL 9 APM BIOS 32-bit interface (16bit Code) */
1039 { 0, /* segment base address (overwritten by APM) */
1040 0xffff, /* length (overwritten by APM) */
1041 SDT_MEMERA, /* segment type */
1042 0, /* segment descriptor priority level */
1043 1, /* segment descriptor present */
1044 0, 0,
1045 0, /* default 32 vs 16 bit size */
1046 0 /* limit granularity (byte/page units)*/ },
1047 /* GAPMDATA_SEL 10 APM BIOS 32-bit interface (Data) */
1048 { 0, /* segment base address (overwritten by APM) */
1049 0xffff, /* length (overwritten by APM) */
1050 SDT_MEMRWA, /* segment type */
1051 0, /* segment descriptor priority level */
1052 1, /* segment descriptor present */
1053 0, 0,
1054 0, /* default 32 vs 16 bit size */
1055 0 /* limit granularity (byte/page units)*/ },
1056 };
1057
1058 static struct soft_segment_descriptor ldt_segs[] = {
1059 /* Null Descriptor - overwritten by call gate */
1060 { 0x0, /* segment base address */
1061 0x0, /* length - all address space */
1062 0, /* segment type */
1063 0, /* segment descriptor priority level */
1064 0, /* segment descriptor present */
1065 0, 0,
1066 0, /* default 32 vs 16 bit size */
1067 0 /* limit granularity (byte/page units)*/ },
1068 /* Null Descriptor - overwritten by call gate */
1069 { 0x0, /* segment base address */
1070 0x0, /* length - all address space */
1071 0, /* segment type */
1072 0, /* segment descriptor priority level */
1073 0, /* segment descriptor present */
1074 0, 0,
1075 0, /* default 32 vs 16 bit size */
1076 0 /* limit granularity (byte/page units)*/ },
1077 /* Null Descriptor - overwritten by call gate */
1078 { 0x0, /* segment base address */
1079 0x0, /* length - all address space */
1080 0, /* segment type */
1081 0, /* segment descriptor priority level */
1082 0, /* segment descriptor present */
1083 0, 0,
1084 0, /* default 32 vs 16 bit size */
1085 0 /* limit granularity (byte/page units)*/ },
1086 /* Code Descriptor for user */
1087 { 0x0, /* segment base address */
1088 0xfffff, /* length - all address space */
1089 SDT_MEMERA, /* segment type */
1090 SEL_UPL, /* segment descriptor priority level */
1091 1, /* segment descriptor present */
1092 0, 0,
1093 1, /* default 32 vs 16 bit size */
1094 1 /* limit granularity (byte/page units)*/ },
1095 /* Data Descriptor for user */
1096 { 0x0, /* segment base address */
1097 0xfffff, /* length - all address space */
1098 SDT_MEMRWA, /* segment type */
1099 SEL_UPL, /* segment descriptor priority level */
1100 1, /* segment descriptor present */
1101 0, 0,
1102 1, /* default 32 vs 16 bit size */
1103 1 /* limit granularity (byte/page units)*/ },
1104 };
1105
1106 void
1107 setidt(idx, func, typ, dpl, selec)
1108 int idx;
1109 inthand_t *func;
1110 int typ;
1111 int dpl;
1112 int selec;
1113 {
1114 struct gate_descriptor *ip;
1115
1116 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1117 ip = (t_idt != NULL ? t_idt : idt) + idx;
1118 #else
1119 ip = idt + idx;
1120 #endif
1121 ip->gd_looffset = (int)func;
1122 ip->gd_selector = selec;
1123 ip->gd_stkcpy = 0;
1124 ip->gd_xx = 0;
1125 ip->gd_type = typ;
1126 ip->gd_dpl = dpl;
1127 ip->gd_p = 1;
1128 ip->gd_hioffset = ((int)func)>>16 ;
1129 }
1130
1131 #define IDTVEC(name) __CONCAT(X,name)
1132
1133 extern inthand_t
1134 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1135 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1136 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1137 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1138 IDTVEC(syscall), IDTVEC(int0x80_syscall);
1139
1140 void
1141 sdtossd(sd, ssd)
1142 struct segment_descriptor *sd;
1143 struct soft_segment_descriptor *ssd;
1144 {
1145 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1146 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1147 ssd->ssd_type = sd->sd_type;
1148 ssd->ssd_dpl = sd->sd_dpl;
1149 ssd->ssd_p = sd->sd_p;
1150 ssd->ssd_def32 = sd->sd_def32;
1151 ssd->ssd_gran = sd->sd_gran;
1152 }
1153
1154 void
1155 init386(first)
1156 int first;
1157 {
1158 int x;
1159 unsigned biosbasemem, biosextmem;
1160 struct gate_descriptor *gdp;
1161 int gsel_tss;
1162
1163 struct isa_device *idp;
1164 #ifndef SMP
1165 /* table descriptors - used to load tables by microp */
1166 struct region_descriptor r_gdt, r_idt;
1167 #endif
1168 int pagesinbase, pagesinext;
1169 vm_offset_t target_page;
1170 int pa_indx, off;
1171 int speculative_mprobe;
1172
1173 /*
1174 * Prevent lowering of the ipl if we call tsleep() early.
1175 */
1176 safepri = cpl;
1177
1178 proc0.p_addr = proc0paddr;
1179
1180 atdevbase = ISA_HOLE_START + KERNBASE;
1181
1182 /*
1183 * Initialize the console before we print anything out.
1184 */
1185 cninit();
1186
1187 /*
1188 * make gdt memory segments, the code segment goes up to end of the
1189 * page with etext in it, the data segment goes to the end of
1190 * the address space
1191 */
1192 /*
1193 * XXX text protection is temporarily (?) disabled. The limit was
1194 * i386_btop(round_page(etext)) - 1.
1195 */
1196 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1;
1197 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1;
1198 #ifdef BDE_DEBUGGER
1199 #define NGDT1 8 /* avoid overwriting db entries with APM ones */
1200 #else
1201 #define NGDT1 (sizeof gdt_segs / sizeof gdt_segs[0])
1202 #endif
1203 for (x = 0; x < NGDT1; x++)
1204 ssdtosd(&gdt_segs[x], &gdt[x].sd);
1205 #ifdef VM86
1206 common_tssd = gdt[GPROC0_SEL].sd;
1207 #endif /* VM86 */
1208
1209 #ifdef SMP
1210 /*
1211 * Spin these up now. init_secondary() grabs them. We could use
1212 * #for(x,y,z) / #endfor cpp directives if they existed.
1213 */
1214 for (x = 0; x < NCPU; x++) {
1215 gdt_segs[NGDT + x] = gdt_segs[GPROC0_SEL];
1216 ssdtosd(&gdt_segs[NGDT + x], &gdt[NGDT + x].sd);
1217 }
1218 #endif
1219
1220 /* make ldt memory segments */
1221 /*
1222 * The data segment limit must not cover the user area because we
1223 * don't want the user area to be writable in copyout() etc. (page
1224 * level protection is lost in kernel mode on 386's). Also, we
1225 * don't want the user area to be writable directly (page level
1226 * protection of the user area is not available on 486's with
1227 * CR0_WP set, because there is no user-read/kernel-write mode).
1228 *
1229 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it
1230 * should be spelled ...MAX_USER...
1231 */
1232 #define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS
1233 /*
1234 * The code segment limit has to cover the user area until we move
1235 * the signal trampoline out of the user area. This is safe because
1236 * the code segment cannot be written to directly.
1237 */
1238 #define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE)
1239 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1;
1240 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1;
1241 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
1242 ssdtosd(&ldt_segs[x], &ldt[x].sd);
1243
1244 /* exceptions */
1245 for (x = 0; x < NIDT; x++)
1246 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1247 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1248 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1249 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1250 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1251 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1252 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1253 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1254 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1255 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
1256 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1257 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1258 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1259 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1260 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1261 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1262 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1263 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1264 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1265 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1266 setidt(0x80, &IDTVEC(int0x80_syscall),
1267 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1268
1269 #include "isa.h"
1270 #if NISA >0
1271 isa_defaultirq();
1272 #endif
1273 rand_initialize();
1274
1275 r_gdt.rd_limit = sizeof(gdt) - 1;
1276 r_gdt.rd_base = (int) gdt;
1277 lgdt(&r_gdt);
1278
1279 r_idt.rd_limit = sizeof(idt) - 1;
1280 r_idt.rd_base = (int) idt;
1281 lidt(&r_idt);
1282
1283 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
1284 lldt(_default_ldt);
1285 #ifdef USER_LDT
1286 currentldt = _default_ldt;
1287 #endif
1288
1289 #ifdef DDB
1290 kdb_init();
1291 if (boothowto & RB_KDB)
1292 Debugger("Boot flags requested debugger");
1293 #endif
1294
1295 finishidentcpu(); /* Final stage of CPU initialization */
1296 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1297 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1298 initializecpu(); /* Initialize CPU registers */
1299
1300 /* make an initial tss so cpu can get interrupt stack on syscall! */
1301 #ifdef VM86
1302 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
1303 #else
1304 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
1305 #endif /* VM86 */
1306 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
1307 common_tss.tss_ioopt = (sizeof common_tss) << 16;
1308 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1309 ltr(gsel_tss);
1310 #ifdef VM86
1311 private_tss = 0;
1312 my_tr = GPROC0_SEL;
1313 #endif
1314
1315 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
1316 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
1317 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
1318 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
1319 dblfault_tss.tss_cr3 = (int)IdlePTD;
1320 dblfault_tss.tss_eip = (int) dblfault_handler;
1321 dblfault_tss.tss_eflags = PSL_KERNEL;
1322 dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs =
1323 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
1324 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
1325 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
1326
1327 #ifdef VM86
1328 initial_bioscalls(&biosbasemem, &biosextmem);
1329 #else
1330
1331 /* Use BIOS values stored in RTC CMOS RAM, since probing
1332 * breaks certain 386 AT relics.
1333 */
1334 biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
1335 biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
1336 #endif
1337
1338 /*
1339 * If BIOS tells us that it has more than 640k in the basemem,
1340 * don't believe it - set it to 640k.
1341 */
1342 if (biosbasemem > 640) {
1343 printf("Preposterous RTC basemem of %uK, truncating to 640K\n",
1344 biosbasemem);
1345 biosbasemem = 640;
1346 }
1347 if (bootinfo.bi_memsizes_valid && bootinfo.bi_basemem > 640) {
1348 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1349 bootinfo.bi_basemem);
1350 bootinfo.bi_basemem = 640;
1351 }
1352
1353 /*
1354 * Warn if the official BIOS interface disagrees with the RTC
1355 * interface used above about the amount of base memory or the
1356 * amount of extended memory. Prefer the BIOS value for the base
1357 * memory. This is necessary for machines that `steal' base
1358 * memory for use as BIOS memory, at least if we are going to use
1359 * the BIOS for apm. Prefer the RTC value for extended memory.
1360 * Eventually the hackish interface shouldn't even be looked at.
1361 */
1362 if (bootinfo.bi_memsizes_valid) {
1363 if (bootinfo.bi_basemem != biosbasemem) {
1364 vm_offset_t pa;
1365
1366 printf(
1367 "BIOS basemem (%uK) != RTC basemem (%uK), setting to BIOS value\n",
1368 bootinfo.bi_basemem, biosbasemem);
1369 biosbasemem = bootinfo.bi_basemem;
1370
1371 /*
1372 * XXX if biosbasemem is now < 640, there is `hole'
1373 * between the end of base memory and the start of
1374 * ISA memory. The hole may be empty or it may
1375 * contain BIOS code or data. Map it read/write so
1376 * that the BIOS can write to it. (Memory from 0 to
1377 * the physical end of the kernel is mapped read-only
1378 * to begin with and then parts of it are remapped.
1379 * The parts that aren't remapped form holes that
1380 * remain read-only and are unused by the kernel.
1381 * The base memory area is below the physical end of
1382 * the kernel and right now forms a read-only hole.
1383 * The part of it from PAGE_SIZE to
1384 * (trunc_page(biosbasemem * 1024) - 1) will be
1385 * remapped and used by the kernel later.)
1386 *
1387 * This code is similar to the code used in
1388 * pmap_mapdev, but since no memory needs to be
1389 * allocated we simply change the mapping.
1390 */
1391 for (pa = trunc_page(biosbasemem * 1024);
1392 pa < ISA_HOLE_START; pa += PAGE_SIZE) {
1393 unsigned *pte;
1394
1395 pte = (unsigned *)vtopte(pa + KERNBASE);
1396 *pte = pa | PG_RW | PG_V;
1397 }
1398 }
1399 if (bootinfo.bi_extmem != biosextmem)
1400 printf("BIOS extmem (%uK) != RTC extmem (%uK)\n",
1401 bootinfo.bi_extmem, biosextmem);
1402 }
1403
1404 #ifdef SMP
1405 /* make hole for AP bootstrap code */
1406 pagesinbase = mp_bootaddress(biosbasemem) / PAGE_SIZE;
1407 #else
1408 pagesinbase = biosbasemem * 1024 / PAGE_SIZE;
1409 #endif
1410
1411 pagesinext = biosextmem * 1024 / PAGE_SIZE;
1412
1413 /*
1414 * Special hack for chipsets that still remap the 384k hole when
1415 * there's 16MB of memory - this really confuses people that
1416 * are trying to use bus mastering ISA controllers with the
1417 * "16MB limit"; they only have 16MB, but the remapping puts
1418 * them beyond the limit.
1419 */
1420 /*
1421 * If extended memory is between 15-16MB (16-17MB phys address range),
1422 * chop it to 15MB.
1423 */
1424 if ((pagesinext > 3840) && (pagesinext < 4096))
1425 pagesinext = 3840;
1426
1427 /*
1428 * Maxmem isn't the "maximum memory", it's one larger than the
1429 * highest page of the physical address space. It should be
1430 * called something like "Maxphyspage".
1431 */
1432 Maxmem = pagesinext + 0x100000/PAGE_SIZE;
1433 /*
1434 * Indicate that we wish to do a speculative search for memory beyond
1435 * the end of the reported size if the indicated amount is 64MB (0x4000
1436 * pages) - which is the largest amount that the BIOS/bootblocks can
1437 * currently report. If a specific amount of memory is indicated via
1438 * the MAXMEM option or the npx0 "msize", then don't do the speculative
1439 * memory probe.
1440 */
1441 if (Maxmem >= 0x4000)
1442 speculative_mprobe = TRUE;
1443 else
1444 speculative_mprobe = FALSE;
1445
1446 #ifdef MAXMEM
1447 Maxmem = MAXMEM/4;
1448 speculative_mprobe = FALSE;
1449 #endif
1450
1451 #if NNPX > 0
1452 idp = find_isadev(isa_devtab_null, &npxdriver, 0);
1453 if (idp != NULL && idp->id_msize != 0) {
1454 Maxmem = idp->id_msize / 4;
1455 speculative_mprobe = FALSE;
1456 }
1457 #endif
1458
1459 #ifdef SMP
1460 /* look for the MP hardware - needed for apic addresses */
1461 mp_probe();
1462 #endif
1463
1464 /* call pmap initialization to make new kernel address space */
1465 pmap_bootstrap (first, 0);
1466
1467 /*
1468 * Size up each available chunk of physical memory.
1469 */
1470
1471 /*
1472 * We currently don't bother testing base memory.
1473 * XXX ...but we probably should.
1474 */
1475 pa_indx = 0;
1476 if (pagesinbase > 1) {
1477 phys_avail[pa_indx++] = PAGE_SIZE; /* skip first page of memory */
1478 phys_avail[pa_indx] = ptoa(pagesinbase);/* memory up to the ISA hole */
1479 physmem = pagesinbase - 1;
1480 } else {
1481 /* point at first chunk end */
1482 pa_indx++;
1483 }
1484
1485 for (target_page = avail_start; target_page < ptoa(Maxmem); target_page += PAGE_SIZE) {
1486 int tmp, page_bad;
1487
1488 page_bad = FALSE;
1489
1490 /*
1491 * map page into kernel: valid, read/write, non-cacheable
1492 */
1493 *(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page;
1494 invltlb();
1495
1496 tmp = *(int *)CADDR1;
1497 /*
1498 * Test for alternating 1's and 0's
1499 */
1500 *(volatile int *)CADDR1 = 0xaaaaaaaa;
1501 if (*(volatile int *)CADDR1 != 0xaaaaaaaa) {
1502 page_bad = TRUE;
1503 }
1504 /*
1505 * Test for alternating 0's and 1's
1506 */
1507 *(volatile int *)CADDR1 = 0x55555555;
1508 if (*(volatile int *)CADDR1 != 0x55555555) {
1509 page_bad = TRUE;
1510 }
1511 /*
1512 * Test for all 1's
1513 */
1514 *(volatile int *)CADDR1 = 0xffffffff;
1515 if (*(volatile int *)CADDR1 != 0xffffffff) {
1516 page_bad = TRUE;
1517 }
1518 /*
1519 * Test for all 0's
1520 */
1521 *(volatile int *)CADDR1 = 0x0;
1522 if (*(volatile int *)CADDR1 != 0x0) {
1523 /*
1524 * test of page failed
1525 */
1526 page_bad = TRUE;
1527 }
1528 /*
1529 * Restore original value.
1530 */
1531 *(int *)CADDR1 = tmp;
1532
1533 /*
1534 * Adjust array of valid/good pages.
1535 */
1536 if (page_bad == FALSE) {
1537 /*
1538 * If this good page is a continuation of the
1539 * previous set of good pages, then just increase
1540 * the end pointer. Otherwise start a new chunk.
1541 * Note that "end" points one higher than end,
1542 * making the range >= start and < end.
1543 * If we're also doing a speculative memory
1544 * test and we at or past the end, bump up Maxmem
1545 * so that we keep going. The first bad page
1546 * will terminate the loop.
1547 */
1548 if (phys_avail[pa_indx] == target_page) {
1549 phys_avail[pa_indx] += PAGE_SIZE;
1550 if (speculative_mprobe == TRUE &&
1551 phys_avail[pa_indx] >= (64*1024*1024))
1552 Maxmem++;
1553 } else {
1554 pa_indx++;
1555 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1556 printf("Too many holes in the physical address space, giving up\n");
1557 pa_indx--;
1558 break;
1559 }
1560 phys_avail[pa_indx++] = target_page; /* start */
1561 phys_avail[pa_indx] = target_page + PAGE_SIZE; /* end */
1562 }
1563 physmem++;
1564 }
1565 }
1566
1567 *(int *)CMAP1 = 0;
1568 invltlb();
1569
1570 /*
1571 * XXX
1572 * The last chunk must contain at least one page plus the message
1573 * buffer to avoid complicating other code (message buffer address
1574 * calculation, etc.).
1575 */
1576 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1577 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1578 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1579 phys_avail[pa_indx--] = 0;
1580 phys_avail[pa_indx--] = 0;
1581 }
1582
1583 Maxmem = atop(phys_avail[pa_indx]);
1584
1585 /* Trim off space for the message buffer. */
1586 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1587
1588 avail_end = phys_avail[pa_indx];
1589
1590 /* now running on new page tables, configured,and u/iom is accessible */
1591
1592 /* Map the message buffer. */
1593 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
1594 pmap_enter(kernel_pmap, (vm_offset_t)msgbufp + off,
1595 avail_end + off, VM_PROT_ALL, TRUE);
1596
1597 msgbufinit(msgbufp, MSGBUF_SIZE);
1598
1599 /* make a call gate to reenter kernel with */
1600 gdp = &ldt[LSYS5CALLS_SEL].gd;
1601
1602 x = (int) &IDTVEC(syscall);
1603 gdp->gd_looffset = x++;
1604 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
1605 gdp->gd_stkcpy = 1;
1606 gdp->gd_type = SDT_SYS386CGT;
1607 gdp->gd_dpl = SEL_UPL;
1608 gdp->gd_p = 1;
1609 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
1610
1611 /* XXX does this work? */
1612 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
1613
1614 /* transfer to user mode */
1615
1616 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
1617 _udatasel = LSEL(LUDATA_SEL, SEL_UPL);
1618
1619 /* setup proc 0's pcb */
1620 proc0.p_addr->u_pcb.pcb_flags = 0;
1621 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD;
1622 #ifdef SMP
1623 proc0.p_addr->u_pcb.pcb_mpnest = 1;
1624 #endif
1625 #ifdef VM86
1626 proc0.p_addr->u_pcb.pcb_ext = 0;
1627 #endif
1628
1629 /* Sigh, relocate physical addresses left from bootstrap */
1630 if (bootinfo.bi_modulep) {
1631 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
1632 preload_bootstrap_relocate(KERNBASE);
1633 }
1634 if (bootinfo.bi_envp)
1635 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
1636 }
1637
1638 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1639 static void f00f_hack(void *unused);
1640 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
1641
1642 static void
1643 f00f_hack(void *unused) {
1644 #ifndef SMP
1645 struct region_descriptor r_idt;
1646 #endif
1647 vm_offset_t tmp;
1648
1649 if (!has_f00f_bug)
1650 return;
1651
1652 printf("Intel Pentium detected, installing workaround for F00F bug\n");
1653
1654 r_idt.rd_limit = sizeof(idt) - 1;
1655
1656 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
1657 if (tmp == 0)
1658 panic("kmem_alloc returned 0");
1659 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0)
1660 panic("kmem_alloc returned non-page-aligned memory");
1661 /* Put the first seven entries in the lower page */
1662 t_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8));
1663 bcopy(idt, t_idt, sizeof(idt));
1664 r_idt.rd_base = (int)t_idt;
1665 lidt(&r_idt);
1666 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
1667 VM_PROT_READ, FALSE) != KERN_SUCCESS)
1668 panic("vm_map_protect failed");
1669 return;
1670 }
1671 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
1672
1673 int
1674 ptrace_set_pc(p, addr)
1675 struct proc *p;
1676 unsigned long addr;
1677 {
1678 p->p_md.md_regs->tf_eip = addr;
1679 return (0);
1680 }
1681
1682 int
1683 ptrace_single_step(p)
1684 struct proc *p;
1685 {
1686 p->p_md.md_regs->tf_eflags |= PSL_T;
1687 return (0);
1688 }
1689
1690 int ptrace_read_u_check(p, addr, len)
1691 struct proc *p;
1692 vm_offset_t addr;
1693 size_t len;
1694 {
1695 vm_offset_t gap;
1696
1697 if ((vm_offset_t) (addr + len) < addr)
1698 return EPERM;
1699 if ((vm_offset_t) (addr + len) <= sizeof(struct user))
1700 return 0;
1701
1702 gap = (char *) p->p_md.md_regs - (char *) p->p_addr;
1703
1704 if ((vm_offset_t) addr < gap)
1705 return EPERM;
1706 if ((vm_offset_t) (addr + len) <=
1707 (vm_offset_t) (gap + sizeof(struct trapframe)))
1708 return 0;
1709 return EPERM;
1710 }
1711
1712 int ptrace_write_u(p, off, data)
1713 struct proc *p;
1714 vm_offset_t off;
1715 long data;
1716 {
1717 struct trapframe frame_copy;
1718 vm_offset_t min;
1719 struct trapframe *tp;
1720
1721 /*
1722 * Privileged kernel state is scattered all over the user area.
1723 * Only allow write access to parts of regs and to fpregs.
1724 */
1725 min = (char *)p->p_md.md_regs - (char *)p->p_addr;
1726 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) {
1727 tp = p->p_md.md_regs;
1728 frame_copy = *tp;
1729 *(int *)((char *)&frame_copy + (off - min)) = data;
1730 if (!EFLAGS_SECURE(frame_copy.tf_eflags, tp->tf_eflags) ||
1731 !CS_SECURE(frame_copy.tf_cs))
1732 return (EINVAL);
1733 *(int*)((char *)p->p_addr + off) = data;
1734 return (0);
1735 }
1736 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu);
1737 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) {
1738 *(int*)((char *)p->p_addr + off) = data;
1739 return (0);
1740 }
1741 return (EFAULT);
1742 }
1743
1744 int
1745 fill_regs(p, regs)
1746 struct proc *p;
1747 struct reg *regs;
1748 {
1749 struct pcb *pcb;
1750 struct trapframe *tp;
1751
1752 tp = p->p_md.md_regs;
1753 regs->r_es = tp->tf_es;
1754 regs->r_ds = tp->tf_ds;
1755 regs->r_edi = tp->tf_edi;
1756 regs->r_esi = tp->tf_esi;
1757 regs->r_ebp = tp->tf_ebp;
1758 regs->r_ebx = tp->tf_ebx;
1759 regs->r_edx = tp->tf_edx;
1760 regs->r_ecx = tp->tf_ecx;
1761 regs->r_eax = tp->tf_eax;
1762 regs->r_eip = tp->tf_eip;
1763 regs->r_cs = tp->tf_cs;
1764 regs->r_eflags = tp->tf_eflags;
1765 regs->r_esp = tp->tf_esp;
1766 regs->r_ss = tp->tf_ss;
1767 pcb = &p->p_addr->u_pcb;
1768 regs->r_fs = pcb->pcb_fs;
1769 regs->r_gs = pcb->pcb_gs;
1770 return (0);
1771 }
1772
1773 int
1774 set_regs(p, regs)
1775 struct proc *p;
1776 struct reg *regs;
1777 {
1778 struct pcb *pcb;
1779 struct trapframe *tp;
1780
1781 tp = p->p_md.md_regs;
1782 if (!EFLAGS_SECURE(regs->r_eflags, tp->tf_eflags) ||
1783 !CS_SECURE(regs->r_cs))
1784 return (EINVAL);
1785 tp->tf_es = regs->r_es;
1786 tp->tf_ds = regs->r_ds;
1787 tp->tf_edi = regs->r_edi;
1788 tp->tf_esi = regs->r_esi;
1789 tp->tf_ebp = regs->r_ebp;
1790 tp->tf_ebx = regs->r_ebx;
1791 tp->tf_edx = regs->r_edx;
1792 tp->tf_ecx = regs->r_ecx;
1793 tp->tf_eax = regs->r_eax;
1794 tp->tf_eip = regs->r_eip;
1795 tp->tf_cs = regs->r_cs;
1796 tp->tf_eflags = regs->r_eflags;
1797 tp->tf_esp = regs->r_esp;
1798 tp->tf_ss = regs->r_ss;
1799 pcb = &p->p_addr->u_pcb;
1800 pcb->pcb_fs = regs->r_fs;
1801 pcb->pcb_gs = regs->r_gs;
1802 return (0);
1803 }
1804
1805 int
1806 fill_fpregs(p, fpregs)
1807 struct proc *p;
1808 struct fpreg *fpregs;
1809 {
1810 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs);
1811 return (0);
1812 }
1813
1814 int
1815 set_fpregs(p, fpregs)
1816 struct proc *p;
1817 struct fpreg *fpregs;
1818 {
1819 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs);
1820 return (0);
1821 }
1822
1823 #ifndef DDB
1824 void
1825 Debugger(const char *msg)
1826 {
1827 printf("Debugger(\"%s\") called.\n", msg);
1828 }
1829 #endif /* no DDB */
1830
1831 #include <sys/disklabel.h>
1832
1833 /*
1834 * Determine the size of the transfer, and make sure it is
1835 * within the boundaries of the partition. Adjust transfer
1836 * if needed, and signal errors or early completion.
1837 */
1838 int
1839 bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel)
1840 {
1841 struct partition *p = lp->d_partitions + dkpart(bp->b_dev);
1842 int labelsect = lp->d_partitions[0].p_offset;
1843 int maxsz = p->p_size,
1844 sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT;
1845
1846 /* overwriting disk label ? */
1847 /* XXX should also protect bootstrap in first 8K */
1848 if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect &&
1849 #if LABELSECTOR != 0
1850 bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect &&
1851 #endif
1852 (bp->b_flags & B_READ) == 0 && wlabel == 0) {
1853 bp->b_error = EROFS;
1854 goto bad;
1855 }
1856
1857 #if defined(DOSBBSECTOR) && defined(notyet)
1858 /* overwriting master boot record? */
1859 if (bp->b_blkno + p->p_offset <= DOSBBSECTOR &&
1860 (bp->b_flags & B_READ) == 0 && wlabel == 0) {
1861 bp->b_error = EROFS;
1862 goto bad;
1863 }
1864 #endif
1865
1866 /* beyond partition? */
1867 if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) {
1868 /* if exactly at end of disk, return an EOF */
1869 if (bp->b_blkno == maxsz) {
1870 bp->b_resid = bp->b_bcount;
1871 return(0);
1872 }
1873 /* or truncate if part of it fits */
1874 sz = maxsz - bp->b_blkno;
1875 if (sz <= 0) {
1876 bp->b_error = EINVAL;
1877 goto bad;
1878 }
1879 bp->b_bcount = sz << DEV_BSHIFT;
1880 }
1881
1882 bp->b_pblkno = bp->b_blkno + p->p_offset;
1883 return(1);
1884
1885 bad:
1886 bp->b_flags |= B_ERROR;
1887 return(-1);
1888 }
1889
1890 #ifdef DDB
1891
1892 /*
1893 * Provide inb() and outb() as functions. They are normally only
1894 * available as macros calling inlined functions, thus cannot be
1895 * called inside DDB.
1896 *
1897 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
1898 */
1899
1900 #undef inb
1901 #undef outb
1902
1903 /* silence compiler warnings */
1904 u_char inb(u_int);
1905 void outb(u_int, u_char);
1906
1907 u_char
1908 inb(u_int port)
1909 {
1910 u_char data;
1911 /*
1912 * We use %%dx and not %1 here because i/o is done at %dx and not at
1913 * %edx, while gcc generates inferior code (movw instead of movl)
1914 * if we tell it to load (u_short) port.
1915 */
1916 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
1917 return (data);
1918 }
1919
1920 void
1921 outb(u_int port, u_char data)
1922 {
1923 u_char al;
1924 /*
1925 * Use an unnecessary assignment to help gcc's register allocator.
1926 * This make a large difference for gcc-1.40 and a tiny difference
1927 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
1928 * best results. gcc-2.6.0 can't handle this.
1929 */
1930 al = data;
1931 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));
1932 }
1933
1934 #endif /* DDB */
Cache object: 50d9453e2ff52ceddf09e182334132b0
|