FreeBSD/Linux Kernel Cross Reference
sys/i386/isa/ipl.s
1 /*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)ipl.s
38 *
39 * $FreeBSD$
40 */
41
42
43 /*
44 * AT/386
45 * Vector interrupt control section
46 */
47
48 .data
49 ALIGN_DATA
50
51 /* current priority (all off) */
52 .globl _cpl
53 _cpl: .long HWI_MASK | SWI_MASK
54
55 .globl _tty_imask
56 _tty_imask: .long 0
57 .globl _bio_imask
58 _bio_imask: .long SWI_CAMBIO_MASK
59 .globl _cam_imask
60 _cam_imask: .long SWI_CAMBIO_MASK | SWI_CAMNET_MASK
61 .globl _net_imask
62 _net_imask: .long SWI_CAMNET_MASK
63 .globl _soft_imask
64 _soft_imask: .long SWI_MASK
65 .globl _softnet_imask
66 _softnet_imask: .long SWI_NET_MASK
67 .globl _softtty_imask
68 _softtty_imask: .long SWI_TTY_MASK
69
70
71 /* pending interrupts blocked by splxxx() */
72 .globl _ipending
73 _ipending: .long 0
74
75 /* set with bits for which queue to service */
76 .globl _netisr
77 _netisr: .long 0
78
79 .globl _netisrs
80 _netisrs:
81 .long dummynetisr, dummynetisr, dummynetisr, dummynetisr
82 .long dummynetisr, dummynetisr, dummynetisr, dummynetisr
83 .long dummynetisr, dummynetisr, dummynetisr, dummynetisr
84 .long dummynetisr, dummynetisr, dummynetisr, dummynetisr
85 .long dummynetisr, dummynetisr, dummynetisr, dummynetisr
86 .long dummynetisr, dummynetisr, dummynetisr, dummynetisr
87 .long dummynetisr, dummynetisr, dummynetisr, dummynetisr
88 .long dummynetisr, dummynetisr, dummynetisr, dummynetisr
89
90 .text
91
92 #ifdef SMP
93 #ifdef notnow
94 #define TEST_CIL \
95 cmpl $0x0100, _cil ; \
96 jne 1f ; \
97 cmpl $0, _inside_intr ; \
98 jne 1f ; \
99 int $3 ; \
100 1:
101 #else
102 #define TEST_CIL
103 #endif
104 #endif
105
106 /*
107 * Handle return from interrupts, traps and syscalls.
108 */
109 SUPERALIGN_TEXT
110 .type _doreti,@function
111 _doreti:
112 #ifdef SMP
113 TEST_CIL
114 #endif
115 FAKE_MCOUNT(_bintr) /* init "from" _bintr -> _doreti */
116 addl $4,%esp /* discard unit number */
117 popl %eax /* cpl or cml to restore */
118 doreti_next:
119 /*
120 * Check for pending HWIs and SWIs atomically with restoring cpl
121 * and exiting. The check has to be atomic with exiting to stop
122 * (ipending & ~cpl) changing from zero to nonzero while we're
123 * looking at it (this wouldn't be fatal but it would increase
124 * interrupt latency). Restoring cpl has to be atomic with exiting
125 * so that the stack cannot pile up (the nesting level of interrupt
126 * handlers is limited by the number of bits in cpl).
127 */
128 #ifdef SMP
129 TEST_CIL
130 cli /* early to prevent INT deadlock */
131 pushl %eax /* preserve cpl while getting lock */
132 ICPL_LOCK
133 popl %eax
134 doreti_next2:
135 #endif
136 movl %eax,%ecx
137 #ifdef CPL_AND_CML
138 orl _cpl, %ecx /* add cpl to cml */
139 #endif
140 notl %ecx /* set bit = unmasked level */
141 #ifndef SMP
142 cli
143 #endif
144 andl _ipending,%ecx /* set bit = unmasked pending INT */
145 jne doreti_unpend
146 doreti_exit:
147 #ifdef SMP
148 TEST_CIL
149 #endif
150 #ifdef CPL_AND_CML
151 movl %eax, _cml
152 #else
153 movl %eax,_cpl
154 #endif
155 FAST_ICPL_UNLOCK /* preserves %eax */
156 MPLOCKED decb _intr_nesting_level
157 MEXITCOUNT
158 #ifdef VM86
159 #ifdef CPL_AND_CML
160 /* XXX CPL_AND_CML needs work */
161 #error not ready for vm86
162 #endif
163 cmpl $1,_in_vm86call
164 je 1f /* want cpl == SWI_AST_PENDING */
165 /*
166 * XXX
167 * Sometimes when attempting to return to vm86 mode, cpl is not
168 * being reset to 0, so here we force it to 0 before returning to
169 * vm86 mode. doreti_stop is a convenient place to set a breakpoint.
170 * When the cpl problem is solved, this code can disappear.
171 */
172 ICPL_LOCK
173 cmpl $0,_cpl /* cpl == 0, skip it */
174 je 1f
175 testl $PSL_VM,TF_EFLAGS(%esp) /* going to VM86 mode? */
176 jne doreti_stop
177 testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) /* to user mode? */
178 je 1f
179 doreti_stop:
180 movl $0,_cpl
181 nop
182 1:
183 FAST_ICPL_UNLOCK /* preserves %eax */
184 #endif /* VM86 */
185
186 #ifdef SMP
187 #ifdef INTR_SIMPLELOCK
188 #error code needed here to decide which lock to release, INTR or giant
189 #endif
190 /* release the kernel lock */
191 pushl $_mp_lock /* GIANT_LOCK */
192 call _MPrellock
193 add $4, %esp
194 #endif /* SMP */
195
196 .globl doreti_popl_es
197 doreti_popl_es:
198 popl %es
199 .globl doreti_popl_ds
200 doreti_popl_ds:
201 popl %ds
202 popal
203 addl $8,%esp
204 .globl doreti_iret
205 doreti_iret:
206 iret
207
208 ALIGN_TEXT
209 .globl doreti_iret_fault
210 doreti_iret_fault:
211 subl $8,%esp
212 pushal
213 pushl %ds
214 .globl doreti_popl_ds_fault
215 doreti_popl_ds_fault:
216 pushl %es
217 .globl doreti_popl_es_fault
218 doreti_popl_es_fault:
219 movl $0,4+4+32+4(%esp) /* XXX should be the error code */
220 movl $T_PROTFLT,4+4+32+0(%esp)
221 jmp alltraps_with_regs_pushed
222
223 ALIGN_TEXT
224 doreti_unpend:
225 /*
226 * Enabling interrupts is safe because we haven't restored cpl yet.
227 * The locking from the "btrl" test is probably no longer necessary.
228 * We won't miss any new pending interrupts because we will check
229 * for them again.
230 */
231 #ifdef SMP
232 TEST_CIL
233 /* we enter with cpl locked */
234 bsfl %ecx, %ecx /* slow, but not worth optimizing */
235 btrl %ecx, _ipending
236 jnc doreti_next2 /* some intr cleared memory copy */
237 cmpl $NHWI, %ecx
238 jae 1f
239 btsl %ecx, _cil
240 1:
241 FAST_ICPL_UNLOCK /* preserves %eax */
242 sti /* late to prevent INT deadlock */
243 #else
244 sti
245 bsfl %ecx,%ecx /* slow, but not worth optimizing */
246 btrl %ecx,_ipending
247 jnc doreti_next /* some intr cleared memory copy */
248 #endif /* SMP */
249
250 /*
251 * Set up JUMP to _ihandlers[%ecx] for HWIs.
252 * Set up CALL of _ihandlers[%ecx] for SWIs.
253 * This is a bit early for the SMP case - we have to push %ecx and
254 * %edx, but could push only %ecx and load %edx later.
255 */
256 movl _ihandlers(,%ecx,4),%edx
257 cmpl $NHWI,%ecx
258 jae doreti_swi
259 cli
260 #ifdef SMP
261 pushl %edx /* preserve %edx */
262 #ifdef APIC_INTR_DIAGNOSTIC
263 pushl %ecx
264 #endif
265 pushl %eax /* preserve %eax */
266 ICPL_LOCK
267 #ifdef CPL_AND_CML
268 popl _cml
269 #else
270 popl _cpl
271 #endif
272 FAST_ICPL_UNLOCK
273 #ifdef APIC_INTR_DIAGNOSTIC
274 popl %ecx
275 #endif
276 popl %edx
277 #else
278 movl %eax,_cpl
279 #endif
280 MEXITCOUNT
281 #ifdef APIC_INTR_DIAGNOSTIC
282 lock
283 incl CNAME(apic_itrace_doreti)(,%ecx,4)
284 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
285 cmpl $APIC_INTR_DIAGNOSTIC_IRQ,%ecx
286 jne 9f
287 pushl %eax
288 pushl %ecx
289 pushl %edx
290 pushl $APIC_ITRACE_DORETI
291 call log_intr_event
292 addl $4,%esp
293 popl %edx
294 popl %ecx
295 popl %eax
296 9:
297 #endif
298 #endif
299 jmp %edx
300
301 ALIGN_TEXT
302 doreti_swi:
303 #ifdef SMP
304 TEST_CIL
305 #endif
306 pushl %eax
307 /*
308 * The SWI_AST handler has to run at cpl = SWI_AST_MASK and the
309 * SWI_CLOCK handler at cpl = SWI_CLOCK_MASK, so we have to restore
310 * all the h/w bits in cpl now and have to worry about stack growth.
311 * The worst case is currently (30 Jan 1994) 2 SWI handlers nested
312 * in dying interrupt frames and about 12 HWIs nested in active
313 * interrupt frames. There are only 4 different SWIs and the HWI
314 * and SWI masks limit the nesting further.
315 */
316 #ifdef SMP
317 orl imasks(,%ecx,4), %eax
318 pushl %ecx /* preserve for use by _swi_generic */
319 pushl %edx /* save handler entry point */
320 cli /* prevent INT deadlock */
321 pushl %eax /* save cpl|cml */
322 ICPL_LOCK
323 #ifdef CPL_AND_CML
324 popl _cml /* restore cml */
325 #else
326 popl _cpl /* restore cpl */
327 #endif
328 FAST_ICPL_UNLOCK
329 sti
330 popl %edx /* restore handler entry point */
331 popl %ecx
332 #else
333 orl imasks(,%ecx,4),%eax
334 movl %eax,_cpl
335 #endif
336 call %edx
337 popl %eax
338 jmp doreti_next
339
340 ALIGN_TEXT
341 swi_ast:
342 addl $8,%esp /* discard raddr & cpl to get trap frame */
343 #ifdef VM86
344 cmpl $1,_in_vm86call
345 je 1f /* stay in kernel mode */
346 #endif
347 testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
348 je swi_ast_phantom
349 swi_ast_user:
350 movl $T_ASTFLT,(2+8+0)*4(%esp)
351 movb $0,_intr_nesting_level /* finish becoming a trap handler */
352 call _trap
353 subl %eax,%eax /* recover cpl|cml */
354 #ifdef CPL_AND_CML
355 movl %eax, _cpl
356 #endif
357 movb $1,_intr_nesting_level /* for doreti_next to decrement */
358 jmp doreti_next
359
360 ALIGN_TEXT
361 swi_ast_phantom:
362 #ifdef VM86
363 /*
364 * check for ast from vm86 mode. Placed down here so the jumps do
365 * not get taken for mainline code.
366 */
367 testl $PSL_VM,TF_EFLAGS(%esp)
368 jne swi_ast_user
369 1:
370 #endif /* VM86 */
371 /*
372 * These happen when there is an interrupt in a trap handler before
373 * ASTs can be masked or in an lcall handler before they can be
374 * masked or after they are unmasked. They could be avoided for
375 * trap entries by using interrupt gates, and for lcall exits by
376 * using by using cli, but they are unavoidable for lcall entries.
377 */
378 cli
379 ICPL_LOCK
380 orl $SWI_AST_PENDING, _ipending
381 /* cpl is unlocked in doreti_exit */
382 subl %eax,%eax
383 #ifdef CPL_AND_CML
384 movl %eax, _cpl
385 #endif
386 jmp doreti_exit /* SWI_AST is highest so we must be done */
387
388
389 ALIGN_TEXT
390 swi_net:
391 MCOUNT
392 bsfl _netisr,%eax
393 je swi_net_done
394 swi_net_more:
395 btrl %eax,_netisr
396 jnc swi_net_next
397 call *_netisrs(,%eax,4)
398 swi_net_next:
399 bsfl _netisr,%eax
400 jne swi_net_more
401 swi_net_done:
402 ret
403
404 ALIGN_TEXT
405 dummynetisr:
406 MCOUNT
407 ret
408
409 /*
410 * The arg is in a nonstandard place, so swi_dispatcher() can't be called
411 * directly and swi_generic() can't use ENTRY() or MCOUNT.
412 */
413 ALIGN_TEXT
414 .globl _swi_generic
415 .type _swi_generic,@function
416 _swi_generic:
417 pushl %ecx
418 FAKE_MCOUNT(4(%esp))
419 call _swi_dispatcher
420 popl %ecx
421 ret
422
423 ENTRY(swi_null)
424 ret
425
426 #ifdef APIC_IO
427 #include "i386/isa/apic_ipl.s"
428 #else
429 #include "i386/isa/icu_ipl.s"
430 #endif /* APIC_IO */
Cache object: a362b074a7625261bce44dfddc4454cc
|