1 /*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD$
4 */
5
6 /*
7 * modified for PC98 by Kakefuda
8 */
9
10 #ifdef PC98
11 #define ICU_IMR_OFFSET 2 /* IO_ICU{1,2} + 2 */
12 #else
13 #define ICU_IMR_OFFSET 1 /* IO_ICU{1,2} + 1 */
14 #endif
15
16 #define ICU_EOI 0x20 /* XXX - define elsewhere */
17
18 #define IRQ_BIT(irq_num) (1 << ((irq_num) % 8))
19 #define IRQ_BYTE(irq_num) ((irq_num) >> 3)
20
21 #ifdef AUTO_EOI_1
22 #define ENABLE_ICU1 /* use auto-EOI to reduce i/o */
23 #define OUTB_ICU1
24 #else
25 #define ENABLE_ICU1 \
26 movb $ICU_EOI,%al ; /* as soon as possible send EOI ... */ \
27 OUTB_ICU1 /* ... to clear in service bit */
28 #define OUTB_ICU1 \
29 outb %al,$IO_ICU1
30 #endif
31
32 #ifdef AUTO_EOI_2
33 /*
34 * The data sheet says no auto-EOI on slave, but it sometimes works.
35 */
36 #define ENABLE_ICU1_AND_2 ENABLE_ICU1
37 #else
38 #define ENABLE_ICU1_AND_2 \
39 movb $ICU_EOI,%al ; /* as above */ \
40 outb %al,$IO_ICU2 ; /* but do second icu first ... */ \
41 OUTB_ICU1 /* ... then first icu (if !AUTO_EOI_1) */
42 #endif
43
44 /*
45 * Macros for interrupt interrupt entry, call to handler, and exit.
46 */
47
48 #define FAST_INTR(irq_num, vec_name, enable_icus) \
49 .text ; \
50 SUPERALIGN_TEXT ; \
51 IDTVEC(vec_name) ; \
52 pushl %eax ; /* save only call-used registers */ \
53 pushl %ecx ; \
54 pushl %edx ; \
55 pushl %ds ; \
56 MAYBE_PUSHL_ES ; \
57 mov $KDSEL,%ax ; \
58 mov %ax,%ds ; \
59 MAYBE_MOVW_AX_ES ; \
60 FAKE_MCOUNT((4+ACTUALLY_PUSHED)*4(%esp)) ; \
61 pushl _intr_unit + (irq_num) * 4 ; \
62 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
63 enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
64 addl $4,%esp ; \
65 incl _cnt+V_INTR ; /* book-keeping can wait */ \
66 movl _intr_countp + (irq_num) * 4,%eax ; \
67 incl (%eax) ; \
68 movl _cpl,%eax ; /* are we unmasking pending HWIs or SWIs? */ \
69 notl %eax ; \
70 andl _ipending,%eax ; \
71 jne 2f ; /* yes, maybe handle them */ \
72 1: ; \
73 MEXITCOUNT ; \
74 MAYBE_POPL_ES ; \
75 popl %ds ; \
76 popl %edx ; \
77 popl %ecx ; \
78 popl %eax ; \
79 iret ; \
80 ; \
81 ALIGN_TEXT ; \
82 2: ; \
83 cmpb $3,_intr_nesting_level ; /* is there enough stack? */ \
84 jae 1b ; /* no, return */ \
85 movl _cpl,%eax ; \
86 /* XXX next line is probably unnecessary now. */ \
87 movl $HWI_MASK|SWI_MASK,_cpl ; /* limit nesting ... */ \
88 incb _intr_nesting_level ; /* ... really limit it ... */ \
89 sti ; /* ... to do this as early as possible */ \
90 MAYBE_POPL_ES ; /* discard most of thin frame ... */ \
91 popl %ecx ; /* ... original %ds ... */ \
92 popl %edx ; \
93 xchgl %eax,4(%esp) ; /* orig %eax; save cpl */ \
94 pushal ; /* build fat frame (grrr) ... */ \
95 pushl %ecx ; /* ... actually %ds ... */ \
96 pushl %es ; \
97 pushl %fs ; \
98 mov $KDSEL,%ax ; \
99 mov %ax,%es ; \
100 mov %ax,%fs ; \
101 movl (3+8+0)*4(%esp),%ecx ; /* ... %ecx from thin frame ... */ \
102 movl %ecx,(3+6)*4(%esp) ; /* ... to fat frame ... */ \
103 movl (3+8+1)*4(%esp),%eax ; /* ... cpl from thin frame */ \
104 pushl %eax ; \
105 subl $4,%esp ; /* junk for unit number */ \
106 MEXITCOUNT ; \
107 jmp _doreti
108
109 #define INTR(irq_num, vec_name, icu, enable_icus, reg, maybe_extra_ipending) \
110 .text ; \
111 SUPERALIGN_TEXT ; \
112 IDTVEC(vec_name) ; \
113 pushl $0 ; /* dummy error code */ \
114 pushl $0 ; /* dummy trap type */ \
115 pushal ; \
116 pushl %ds ; /* save our data and extra segments ... */ \
117 pushl %es ; \
118 pushl %fs ; \
119 mov $KDSEL,%ax ; /* ... and reload with kernel's own ... */ \
120 mov %ax,%ds ; /* ... early for obsolete reasons */ \
121 mov %ax,%es ; \
122 mov %ax,%fs ; \
123 maybe_extra_ipending ; \
124 movb _imen + IRQ_BYTE(irq_num),%al ; \
125 orb $IRQ_BIT(irq_num),%al ; \
126 movb %al,_imen + IRQ_BYTE(irq_num) ; \
127 outb %al,$icu+ICU_IMR_OFFSET ; \
128 enable_icus ; \
129 movl _cpl,%eax ; \
130 testb $IRQ_BIT(irq_num),%reg ; \
131 jne 2f ; \
132 incb _intr_nesting_level ; \
133 __CONCAT(Xresume,irq_num): ; \
134 FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
135 incl _cnt+V_INTR ; /* tally interrupts */ \
136 movl _intr_countp + (irq_num) * 4,%eax ; \
137 incl (%eax) ; \
138 movl _cpl,%eax ; \
139 pushl %eax ; \
140 pushl _intr_unit + (irq_num) * 4 ; \
141 orl _intr_mask + (irq_num) * 4,%eax ; \
142 movl %eax,_cpl ; \
143 sti ; \
144 call *_intr_handler + (irq_num) * 4 ; \
145 cli ; /* must unmask _imen and icu atomically */ \
146 movb _imen + IRQ_BYTE(irq_num),%al ; \
147 andb $~IRQ_BIT(irq_num),%al ; \
148 movb %al,_imen + IRQ_BYTE(irq_num) ; \
149 outb %al,$icu+ICU_IMR_OFFSET ; \
150 sti ; /* XXX _doreti repeats the cli/sti */ \
151 MEXITCOUNT ; \
152 /* We could usually avoid the following jmp by inlining some of */ \
153 /* _doreti, but it's probably better to use less cache. */ \
154 jmp _doreti ; \
155 ; \
156 ALIGN_TEXT ; \
157 2: ; \
158 /* XXX skip mcounting here to avoid double count */ \
159 orb $IRQ_BIT(irq_num),_ipending + IRQ_BYTE(irq_num) ; \
160 popl %fs ; \
161 popl %es ; \
162 popl %ds ; \
163 popal ; \
164 addl $4+4,%esp ; \
165 iret
166
167 MCOUNT_LABEL(bintr)
168 FAST_INTR(0,fastintr0, ENABLE_ICU1)
169 FAST_INTR(1,fastintr1, ENABLE_ICU1)
170 FAST_INTR(2,fastintr2, ENABLE_ICU1)
171 FAST_INTR(3,fastintr3, ENABLE_ICU1)
172 FAST_INTR(4,fastintr4, ENABLE_ICU1)
173 FAST_INTR(5,fastintr5, ENABLE_ICU1)
174 FAST_INTR(6,fastintr6, ENABLE_ICU1)
175 FAST_INTR(7,fastintr7, ENABLE_ICU1)
176 FAST_INTR(8,fastintr8, ENABLE_ICU1_AND_2)
177 FAST_INTR(9,fastintr9, ENABLE_ICU1_AND_2)
178 FAST_INTR(10,fastintr10, ENABLE_ICU1_AND_2)
179 FAST_INTR(11,fastintr11, ENABLE_ICU1_AND_2)
180 FAST_INTR(12,fastintr12, ENABLE_ICU1_AND_2)
181 FAST_INTR(13,fastintr13, ENABLE_ICU1_AND_2)
182 FAST_INTR(14,fastintr14, ENABLE_ICU1_AND_2)
183 FAST_INTR(15,fastintr15, ENABLE_ICU1_AND_2)
184 #define CLKINTR_PENDING movl $1,CNAME(clkintr_pending)
185 INTR(0,intr0, IO_ICU1, ENABLE_ICU1, al, CLKINTR_PENDING)
186 INTR(1,intr1, IO_ICU1, ENABLE_ICU1, al,)
187 INTR(2,intr2, IO_ICU1, ENABLE_ICU1, al,)
188 INTR(3,intr3, IO_ICU1, ENABLE_ICU1, al,)
189 INTR(4,intr4, IO_ICU1, ENABLE_ICU1, al,)
190 INTR(5,intr5, IO_ICU1, ENABLE_ICU1, al,)
191 INTR(6,intr6, IO_ICU1, ENABLE_ICU1, al,)
192 INTR(7,intr7, IO_ICU1, ENABLE_ICU1, al,)
193 INTR(8,intr8, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
194 INTR(9,intr9, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
195 INTR(10,intr10, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
196 INTR(11,intr11, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
197 INTR(12,intr12, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
198 INTR(13,intr13, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
199 INTR(14,intr14, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
200 INTR(15,intr15, IO_ICU2, ENABLE_ICU1_AND_2, ah,)
201 MCOUNT_LABEL(eintr)
202
203 .data
204 .globl _ihandlers
205 _ihandlers: /* addresses of interrupt handlers */
206 /* actually resumption addresses for HWI's */
207 .long Xresume0, Xresume1, Xresume2, Xresume3
208 .long Xresume4, Xresume5, Xresume6, Xresume7
209 .long Xresume8, Xresume9, Xresume10, Xresume11
210 .long Xresume12, Xresume13, Xresume14, Xresume15
211 .long _swi_null, swi_net, _swi_null, _swi_null
212 .long _swi_vm, _swi_null, _softclock
213
214 imasks: /* masks for interrupt handlers */
215 .space NHWI*4 /* padding; HWI masks are elsewhere */
216
217 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
218 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
219
220 .text
Cache object: 5110d2c47c4d880b425da94b2f108066
|