1 /*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD$
34 */
35
36 #include "npx.h"
37 #include "opt_vm86.h"
38
39 #include <machine/asmacros.h>
40 #include <machine/ipl.h>
41 #include <machine/lock.h>
42 #include <machine/psl.h>
43 #include <machine/trap.h>
44 #ifdef SMP
45 #include <machine/smptests.h> /** CPL_AND_CML, REAL_ */
46 #endif
47
48 #include "assym.s"
49
50 #ifndef SMP
51 #define ECPL_LOCK /* make these nops */
52 #define ECPL_UNLOCK
53 #define ICPL_LOCK
54 #define ICPL_UNLOCK
55 #define FAST_ICPL_UNLOCK
56 #define AICPL_LOCK
57 #define AICPL_UNLOCK
58 #define AVCPL_LOCK
59 #define AVCPL_UNLOCK
60 #endif /* SMP */
61
62 #define KCSEL 0x08 /* kernel code selector */
63 #define KDSEL 0x10 /* kernel data selector */
64 #define SEL_RPL_MASK 0x0003
65 #define TRAPF_CS_OFF (13 * 4)
66
67 .text
68
69 /*****************************************************************************/
70 /* Trap handling */
71 /*****************************************************************************/
72 /*
73 * Trap and fault vector routines
74 */
75 #define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(_X,name); \
76 .type __CONCAT(_X,name),@function; __CONCAT(_X,name):
77 #define TRAP(a) pushl $(a) ; jmp _alltraps
78
79 /*
80 * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
81 * control. The sti's give the standard losing behaviour for ddb and kgdb.
82 */
83 #ifdef BDE_DEBUGGER
84 #define BDBTRAP(name) \
85 ss ; \
86 cmpb $0,_bdb_exists ; \
87 je 1f ; \
88 testb $SEL_RPL_MASK,4(%esp) ; \
89 jne 1f ; \
90 ss ; \
91 .globl __CONCAT(__CONCAT(bdb_,name),_ljmp); \
92 __CONCAT(__CONCAT(bdb_,name),_ljmp): \
93 ljmp $0,$0 ; \
94 1:
95 #else
96 #define BDBTRAP(name)
97 #endif
98
99 #define BPTTRAP(a) testl $PSL_I,4+8(%esp) ; je 1f ; sti ; 1: ; TRAP(a)
100
101 MCOUNT_LABEL(user)
102 MCOUNT_LABEL(btrap)
103
104 IDTVEC(div)
105 pushl $0; TRAP(T_DIVIDE)
106 IDTVEC(dbg)
107 BDBTRAP(dbg)
108 pushl $0; BPTTRAP(T_TRCTRAP)
109 IDTVEC(nmi)
110 pushl $0; TRAP(T_NMI)
111 IDTVEC(bpt)
112 BDBTRAP(bpt)
113 pushl $0; BPTTRAP(T_BPTFLT)
114 IDTVEC(ofl)
115 pushl $0; TRAP(T_OFLOW)
116 IDTVEC(bnd)
117 pushl $0; TRAP(T_BOUND)
118 IDTVEC(ill)
119 pushl $0; TRAP(T_PRIVINFLT)
120 IDTVEC(dna)
121 pushl $0; TRAP(T_DNA)
122 IDTVEC(fpusegm)
123 pushl $0; TRAP(T_FPOPFLT)
124 IDTVEC(tss)
125 TRAP(T_TSSFLT)
126 IDTVEC(missing)
127 TRAP(T_SEGNPFLT)
128 IDTVEC(stk)
129 TRAP(T_STKFLT)
130 IDTVEC(prot)
131 TRAP(T_PROTFLT)
132 IDTVEC(page)
133 TRAP(T_PAGEFLT)
134 IDTVEC(mchk)
135 pushl $0; TRAP(T_MCHK)
136 IDTVEC(rsvd)
137 pushl $0; TRAP(T_RESERVED)
138
139 IDTVEC(fpu)
140 #if NNPX > 0
141 /*
142 * Handle like an interrupt (except for accounting) so that we can
143 * call npxintr to clear the error. It would be better to handle
144 * npx interrupts as traps. This used to be difficult for nested
145 * interrupts, but now it is fairly easy - mask nested ones the
146 * same as SWI_AST's.
147 */
148 pushl $0 /* dummy error code */
149 pushl $0 /* dummy trap type */
150 pushal
151 pushl %ds
152 pushl %es /* now stack frame is a trap frame */
153 movl $KDSEL,%eax
154 movl %ax,%ds
155 movl %ax,%es
156 FAKE_MCOUNT(12*4(%esp))
157
158 #ifdef SMP
159 MPLOCKED incl _cnt+V_TRAP
160 FPU_LOCK
161 ECPL_LOCK
162 #ifdef CPL_AND_CML
163 movl _cml,%eax
164 pushl %eax /* save original cml */
165 orl $SWI_AST_MASK,%eax
166 movl %eax,_cml
167 #else
168 movl _cpl,%eax
169 pushl %eax /* save original cpl */
170 orl $SWI_AST_MASK,%eax
171 movl %eax,_cpl
172 #endif /* CPL_AND_CML */
173 ECPL_UNLOCK
174 pushl $0 /* dummy unit to finish intr frame */
175 #else /* SMP */
176 movl _cpl,%eax
177 pushl %eax
178 pushl $0 /* dummy unit to finish intr frame */
179 incl _cnt+V_TRAP
180 orl $SWI_AST_MASK,%eax
181 movl %eax,_cpl
182 #endif /* SMP */
183
184 call _npxintr
185
186 incb _intr_nesting_level
187 MEXITCOUNT
188 jmp _doreti
189 #else /* NNPX > 0 */
190 pushl $0; TRAP(T_ARITHTRAP)
191 #endif /* NNPX > 0 */
192
193 IDTVEC(align)
194 TRAP(T_ALIGNFLT)
195
196 SUPERALIGN_TEXT
197 .globl _alltraps
198 .type _alltraps,@function
199 _alltraps:
200 pushal
201 pushl %ds
202 pushl %es
203 alltraps_with_regs_pushed:
204 movl $KDSEL,%eax
205 movl %ax,%ds
206 movl %ax,%es
207 FAKE_MCOUNT(12*4(%esp))
208 calltrap:
209 FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
210 MPLOCKED incl _cnt+V_TRAP
211 ALIGN_LOCK
212 ECPL_LOCK
213 #ifdef CPL_AND_CML
214 movl _cml,%eax
215 movl %eax,%ebx /* keep orig. cml here during trap() */
216 orl $SWI_AST_MASK,%eax
217 movl %eax,_cml
218 #else
219 movl _cpl,%eax
220 movl %eax,%ebx /* keep orig. cpl here during trap() */
221 orl $SWI_AST_MASK,%eax
222 movl %eax,_cpl
223 #endif
224 ECPL_UNLOCK
225 call _trap
226
227 /*
228 * Return via _doreti to handle ASTs. Have to change trap frame
229 * to interrupt frame.
230 */
231 pushl %ebx /* cpl to restore */
232 subl $4,%esp /* dummy unit to finish intr frame */
233 MPLOCKED incb _intr_nesting_level
234 MEXITCOUNT
235 jmp _doreti
236
237 /*
238 * Call gate entry for syscall.
239 * The intersegment call has been set up to specify one dummy parameter.
240 * This leaves a place to put eflags so that the call frame can be
241 * converted to a trap frame. Note that the eflags is (semi-)bogusly
242 * pushed into (what will be) tf_err and then copied later into the
243 * final spot. It has to be done this way because esp can't be just
244 * temporarily altered for the pushfl - an interrupt might come in
245 * and clobber the saved cs/eip.
246 */
247 SUPERALIGN_TEXT
248 IDTVEC(syscall)
249 pushfl /* save eflags in tf_err for now */
250 subl $4,%esp /* skip over tf_trapno */
251 pushal
252 pushl %ds
253 pushl %es
254 movl $KDSEL,%eax /* switch to kernel segments */
255 movl %ax,%ds
256 movl %ax,%es
257 movl TF_ERR(%esp),%eax /* copy saved eflags to final spot */
258 movl %eax,TF_EFLAGS(%esp)
259 movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
260 FAKE_MCOUNT(12*4(%esp))
261 MPLOCKED incl _cnt+V_SYSCALL
262 SYSCALL_LOCK
263 ECPL_LOCK
264 #ifdef CPL_AND_CML
265 movl $SWI_AST_MASK,_cml
266 #else
267 movl $SWI_AST_MASK,_cpl
268 #endif
269 ECPL_UNLOCK
270 call _syscall
271
272 /*
273 * Return via _doreti to handle ASTs.
274 */
275 pushl $0 /* cpl to restore */
276 subl $4,%esp /* dummy unit to finish intr frame */
277 movb $1,_intr_nesting_level
278 MEXITCOUNT
279 jmp _doreti
280
281 /*
282 * Call gate entry for Linux/NetBSD syscall (int 0x80)
283 */
284 SUPERALIGN_TEXT
285 IDTVEC(int0x80_syscall)
286 subl $8,%esp /* skip over tf_trapno and tf_err */
287 pushal
288 pushl %ds
289 pushl %es
290 movl $KDSEL,%eax /* switch to kernel segments */
291 movl %ax,%ds
292 movl %ax,%es
293 movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
294 FAKE_MCOUNT(12*4(%esp))
295 MPLOCKED incl _cnt+V_SYSCALL
296 ALTSYSCALL_LOCK
297 ECPL_LOCK
298 #ifdef CPL_AND_CML
299 movl $SWI_AST_MASK,_cml
300 #else
301 movl $SWI_AST_MASK,_cpl
302 #endif
303 ECPL_UNLOCK
304 call _syscall
305
306 /*
307 * Return via _doreti to handle ASTs.
308 */
309 pushl $0 /* cpl to restore */
310 subl $4,%esp /* dummy unit to finish intr frame */
311 movb $1,_intr_nesting_level
312 MEXITCOUNT
313 jmp _doreti
314
315 ENTRY(fork_trampoline)
316 call _spl0
317 movl _curproc,%eax
318 addl $P_SWITCHTIME,%eax
319 movl _switchtime,%ecx
320 testl %ecx,%ecx
321 jne 1f
322 /* XXX unreachable except in the SMP case? */
323 pushl %eax
324 call _microuptime
325 popl %eax
326 movl _ticks,%eax
327 movl %eax,_switchticks
328 jmp 2f
329 1:
330 movl %ecx,(%eax)
331 movl _switchtime+4,%edx
332 movl %edx,4(%eax)
333 2:
334
335 /*
336 * cpu_set_fork_handler intercepts this function call to
337 * have this call a non-return function to stay in kernel mode.
338 * initproc has its own fork handler, but it does return.
339 */
340 pushl %ebx /* arg1 */
341 call %esi /* function */
342 addl $4,%esp
343 /* cut from syscall */
344
345 /*
346 * Return via _doreti to handle ASTs.
347 */
348 pushl $0 /* cpl to restore */
349 subl $4,%esp /* dummy unit to finish intr frame */
350 movb $1,_intr_nesting_level
351 MEXITCOUNT
352 jmp _doreti
353
354
355 #ifdef VM86
356 /*
357 * Include vm86 call routines, which want to call _doreti.
358 */
359 #include "i386/i386/vm86bios.s"
360 #endif /* VM86 */
361
362 /*
363 * Include what was once config+isa-dependent code.
364 * XXX it should be in a stand-alone file. It's still icu-dependent and
365 * belongs in i386/isa.
366 */
367 #include "i386/isa/vector.s"
368
369 /*
370 * Include what was once icu-dependent code.
371 * XXX it should be merged into this file (also move the definition of
372 * imen to vector.s or isa.c).
373 * Before including it, set up a normal asm environment so that vector.s
374 * doesn't have to know that stuff is included after it.
375 */
376 .data
377 ALIGN_DATA
378 .text
379 SUPERALIGN_TEXT
380 #include "i386/isa/ipl.s"
Cache object: 7727d6a3b6b22d173c5958fb46c95d0a
|