1 /*
2 * from: vector.s, 386BSD 0.1 unknown origin
3 * $FreeBSD$
4 */
5
6
7 #include <machine/apic.h>
8 #include <machine/smp.h>
9
10 #include "i386/isa/intr_machdep.h"
11
12 /* convert an absolute IRQ# into a bitmask */
13 #define IRQ_BIT(irq_num) (1 << (irq_num))
14
15 /* make an index into the IO APIC from the IRQ# */
16 #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2))
17
18
19 /*
20 * Macros for interrupt interrupt entry, call to handler, and exit.
21 */
22
23 #define FAST_INTR(irq_num, vec_name) \
24 .text ; \
25 SUPERALIGN_TEXT ; \
26 IDTVEC(vec_name) ; \
27 pushl %eax ; /* save only call-used registers */ \
28 pushl %ecx ; \
29 pushl %edx ; \
30 pushl %ds ; \
31 MAYBE_PUSHL_ES ; \
32 pushl %fs ; \
33 movl $KDSEL,%eax ; \
34 mov %ax,%ds ; \
35 MAYBE_MOVW_AX_ES ; \
36 movl $KPSEL,%eax ; \
37 mov %ax,%fs ; \
38 FAKE_MCOUNT((5+ACTUALLY_PUSHED)*4(%esp)) ; \
39 pushl _intr_unit + (irq_num) * 4 ; \
40 call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
41 addl $4, %esp ; \
42 movl $0, lapic_eoi ; \
43 lock ; \
44 incl _cnt+V_INTR ; /* book-keeping can wait */ \
45 movl _intr_countp + (irq_num) * 4, %eax ; \
46 lock ; \
47 incl (%eax) ; \
48 MEXITCOUNT ; \
49 popl %fs ; \
50 MAYBE_POPL_ES ; \
51 popl %ds ; \
52 popl %edx ; \
53 popl %ecx ; \
54 popl %eax ; \
55 iret
56
57 /*
58 *
59 */
60 #define PUSH_FRAME \
61 pushl $0 ; /* dummy error code */ \
62 pushl $0 ; /* dummy trap type */ \
63 pushal ; \
64 pushl %ds ; /* save data and extra segments ... */ \
65 pushl %es ; \
66 pushl %fs
67
68 #define POP_FRAME \
69 popl %fs ; \
70 popl %es ; \
71 popl %ds ; \
72 popal ; \
73 addl $4+4,%esp
74
75 #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
76 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
77
78 #define MASK_IRQ(irq_num) \
79 IMASK_LOCK ; /* into critical reg */ \
80 testl $IRQ_BIT(irq_num), _apic_imen ; \
81 jne 7f ; /* masked, don't mask */ \
82 orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
83 movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
84 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
85 movl %eax, (%ecx) ; /* write the index */ \
86 movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \
87 orl $IOART_INTMASK, %eax ; /* set the mask */ \
88 movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \
89 7: ; /* already masked */ \
90 IMASK_UNLOCK
91 /*
92 * Test to see whether we are handling an edge or level triggered INT.
93 * Level-triggered INTs must still be masked as we don't clear the source,
94 * and the EOI cycle would cause redundant INTs to occur.
95 */
96 #define MASK_LEVEL_IRQ(irq_num) \
97 testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
98 jz 9f ; /* edge, don't mask */ \
99 MASK_IRQ(irq_num) ; \
100 9:
101
102
103 #ifdef APIC_INTR_REORDER
104 #define EOI_IRQ(irq_num) \
105 movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
106 movl (%eax), %eax ; \
107 testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
108 jz 9f ; /* not active */ \
109 movl $0, lapic_eoi ; \
110 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
111 9:
112
113 #else
114 #define EOI_IRQ(irq_num) \
115 testl $IRQ_BIT(irq_num), lapic_isr1; \
116 jz 9f ; /* not active */ \
117 movl $0, lapic_eoi; \
118 APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \
119 9:
120 #endif
121
122
123 /*
124 * Test to see if the source is currntly masked, clear if so.
125 */
126 #define UNMASK_IRQ(irq_num) \
127 IMASK_LOCK ; /* into critical reg */ \
128 testl $IRQ_BIT(irq_num), _apic_imen ; \
129 je 7f ; /* bit clear, not masked */ \
130 andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \
131 movl IOAPICADDR(irq_num),%ecx ; /* ioapic addr */ \
132 movl REDIRIDX(irq_num), %eax ; /* get the index */ \
133 movl %eax,(%ecx) ; /* write the index */ \
134 movl IOAPIC_WINDOW(%ecx),%eax ; /* current value */ \
135 andl $~IOART_INTMASK,%eax ; /* clear the mask */ \
136 movl %eax,IOAPIC_WINDOW(%ecx) ; /* new value */ \
137 7: ; \
138 IMASK_UNLOCK
139
140 #ifdef APIC_INTR_DIAGNOSTIC
141 #ifdef APIC_INTR_DIAGNOSTIC_IRQ
142 log_intr_event:
143 pushf
144 cli
145 pushl $CNAME(apic_itrace_debuglock)
146 call CNAME(s_lock_np)
147 addl $4, %esp
148 movl CNAME(apic_itrace_debugbuffer_idx), %ecx
149 andl $32767, %ecx
150 movl _cpuid, %eax
151 shll $8, %eax
152 orl 8(%esp), %eax
153 movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2)
154 incl %ecx
155 andl $32767, %ecx
156 movl %ecx, CNAME(apic_itrace_debugbuffer_idx)
157 pushl $CNAME(apic_itrace_debuglock)
158 call CNAME(s_unlock_np)
159 addl $4, %esp
160 popf
161 ret
162
163
164 #define APIC_ITRACE(name, irq_num, id) \
165 lock ; /* MP-safe */ \
166 incl CNAME(name) + (irq_num) * 4 ; \
167 pushl %eax ; \
168 pushl %ecx ; \
169 pushl %edx ; \
170 movl $(irq_num), %eax ; \
171 cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \
172 jne 7f ; \
173 pushl $id ; \
174 call log_intr_event ; \
175 addl $4, %esp ; \
176 7: ; \
177 popl %edx ; \
178 popl %ecx ; \
179 popl %eax
180 #else
181 #define APIC_ITRACE(name, irq_num, id) \
182 lock ; /* MP-safe */ \
183 incl CNAME(name) + (irq_num) * 4
184 #endif
185
186 #define APIC_ITRACE_ENTER 1
187 #define APIC_ITRACE_EOI 2
188 #define APIC_ITRACE_TRYISRLOCK 3
189 #define APIC_ITRACE_GOTISRLOCK 4
190 #define APIC_ITRACE_ENTER2 5
191 #define APIC_ITRACE_LEAVE 6
192 #define APIC_ITRACE_UNMASK 7
193 #define APIC_ITRACE_ACTIVE 8
194 #define APIC_ITRACE_MASKED 9
195 #define APIC_ITRACE_NOISRLOCK 10
196 #define APIC_ITRACE_MASKED2 11
197 #define APIC_ITRACE_SPLZ 12
198 #define APIC_ITRACE_DORETI 13
199
200 #else
201 #define APIC_ITRACE(name, irq_num, id)
202 #endif
203
204 #define INTR(irq_num, vec_name, maybe_extra_ipending) \
205 .text ; \
206 SUPERALIGN_TEXT ; \
207 /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \
208 IDTVEC(vec_name) ; \
209 PUSH_FRAME ; \
210 movl $KDSEL, %eax ; /* reload with kernel's data segment */ \
211 mov %ax, %ds ; \
212 mov %ax, %es ; \
213 movl $KPSEL, %eax ; \
214 mov %ax, %fs ; \
215 ; \
216 maybe_extra_ipending ; \
217 ; \
218 APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \
219 lock ; /* MP-safe */ \
220 btsl $(irq_num), iactive ; /* lazy masking */ \
221 jc 1f ; /* already active */ \
222 ; \
223 MASK_LEVEL_IRQ(irq_num) ; \
224 EOI_IRQ(irq_num) ; \
225 0: ; \
226 APIC_ITRACE(apic_itrace_tryisrlock, irq_num, APIC_ITRACE_TRYISRLOCK) ;\
227 MP_TRYLOCK ; /* XXX this is going away... */ \
228 testl %eax, %eax ; /* did we get it? */ \
229 jz 3f ; /* no */ \
230 ; \
231 APIC_ITRACE(apic_itrace_gotisrlock, irq_num, APIC_ITRACE_GOTISRLOCK) ;\
232 testl $IRQ_BIT(irq_num), _cpl ; \
233 jne 2f ; /* this INT masked */ \
234 ; \
235 incb _intr_nesting_level ; \
236 ; \
237 /* entry point used by doreti_unpend for HWIs. */ \
238 __CONCAT(Xresume,irq_num): ; \
239 FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
240 lock ; incl _cnt+V_INTR ; /* tally interrupts */ \
241 movl _intr_countp + (irq_num) * 4, %eax ; \
242 lock ; incl (%eax) ; \
243 ; \
244 movl _cpl, %eax ; \
245 pushl %eax ; \
246 orl _intr_mask + (irq_num) * 4, %eax ; \
247 movl %eax, _cpl ; \
248 lock ; \
249 andl $~IRQ_BIT(irq_num), _ipending ; \
250 ; \
251 pushl _intr_unit + (irq_num) * 4 ; \
252 APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \
253 sti ; \
254 call *_intr_handler + (irq_num) * 4 ; \
255 cli ; \
256 APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \
257 ; \
258 lock ; andl $~IRQ_BIT(irq_num), iactive ; \
259 UNMASK_IRQ(irq_num) ; \
260 APIC_ITRACE(apic_itrace_unmask, irq_num, APIC_ITRACE_UNMASK) ; \
261 sti ; /* doreti repeats cli/sti */ \
262 MEXITCOUNT ; \
263 jmp _doreti ; \
264 ; \
265 ALIGN_TEXT ; \
266 1: ; /* active */ \
267 APIC_ITRACE(apic_itrace_active, irq_num, APIC_ITRACE_ACTIVE) ; \
268 MASK_IRQ(irq_num) ; \
269 EOI_IRQ(irq_num) ; \
270 lock ; \
271 orl $IRQ_BIT(irq_num), _ipending ; \
272 lock ; \
273 btsl $(irq_num), iactive ; /* still active */ \
274 jnc 0b ; /* retry */ \
275 POP_FRAME ; \
276 iret ; /* XXX: iactive bit might be 0 now */ \
277 ALIGN_TEXT ; \
278 2: ; /* masked by cpl, leave iactive set */ \
279 APIC_ITRACE(apic_itrace_masked, irq_num, APIC_ITRACE_MASKED) ; \
280 lock ; \
281 orl $IRQ_BIT(irq_num), _ipending ; \
282 MP_RELLOCK ; \
283 POP_FRAME ; \
284 iret ; \
285 ALIGN_TEXT ; \
286 3: ; /* other cpu has isr lock */ \
287 APIC_ITRACE(apic_itrace_noisrlock, irq_num, APIC_ITRACE_NOISRLOCK) ;\
288 lock ; \
289 orl $IRQ_BIT(irq_num), _ipending ; \
290 testl $IRQ_BIT(irq_num), _cpl ; \
291 jne 4f ; /* this INT masked */ \
292 call forward_irq ; /* forward irq to lock holder */ \
293 POP_FRAME ; /* and return */ \
294 iret ; \
295 ALIGN_TEXT ; \
296 4: ; /* blocked */ \
297 APIC_ITRACE(apic_itrace_masked2, irq_num, APIC_ITRACE_MASKED2) ;\
298 POP_FRAME ; /* and return */ \
299 iret
300
301 /*
302 * Handle "spurious INTerrupts".
303 * Notes:
304 * This is different than the "spurious INTerrupt" generated by an
305 * 8259 PIC for missing INTs. See the APIC documentation for details.
306 * This routine should NOT do an 'EOI' cycle.
307 */
308 .text
309 SUPERALIGN_TEXT
310 .globl _Xspuriousint
311 _Xspuriousint:
312
313 /* No EOI cycle used here */
314
315 iret
316
317
318 /*
319 * Handle TLB shootdowns.
320 */
321 .text
322 SUPERALIGN_TEXT
323 .globl _Xinvltlb
324 _Xinvltlb:
325 pushl %eax
326
327 #ifdef COUNT_XINVLTLB_HITS
328 pushl %fs
329 movl $KPSEL, %eax
330 mov %ax, %fs
331 movl _cpuid, %eax
332 popl %fs
333 ss
334 incl _xhits(,%eax,4)
335 #endif /* COUNT_XINVLTLB_HITS */
336
337 movl %cr3, %eax /* invalidate the TLB */
338 movl %eax, %cr3
339
340 ss /* stack segment, avoid %ds load */
341 movl $0, lapic_eoi /* End Of Interrupt to APIC */
342
343 popl %eax
344 iret
345
346
347 #ifdef BETTER_CLOCK
348
349 /*
350 * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU,
351 *
352 * - Stores current cpu state in checkstate_cpustate[cpuid]
353 * 0 == user, 1 == sys, 2 == intr
354 * - Stores current process in checkstate_curproc[cpuid]
355 *
356 * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus.
357 *
358 * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags
359 */
360
361 .text
362 SUPERALIGN_TEXT
363 .globl _Xcpucheckstate
364 .globl _checkstate_cpustate
365 .globl _checkstate_curproc
366 .globl _checkstate_pc
367 _Xcpucheckstate:
368 pushl %eax
369 pushl %ebx
370 pushl %ds /* save current data segment */
371 pushl %fs
372
373 movl $KDSEL, %eax
374 mov %ax, %ds /* use KERNEL data segment */
375 movl $KPSEL, %eax
376 mov %ax, %fs
377
378 movl $0, lapic_eoi /* End Of Interrupt to APIC */
379
380 movl $0, %ebx
381 movl 20(%esp), %eax
382 andl $3, %eax
383 cmpl $3, %eax
384 je 1f
385 testl $PSL_VM, 24(%esp)
386 jne 1f
387 incl %ebx /* system or interrupt */
388 1:
389 movl _cpuid, %eax
390 movl %ebx, _checkstate_cpustate(,%eax,4)
391 movl _curproc, %ebx
392 movl %ebx, _checkstate_curproc(,%eax,4)
393 movl 16(%esp), %ebx
394 movl %ebx, _checkstate_pc(,%eax,4)
395
396 lock /* checkstate_probed_cpus |= (1<<id) */
397 btsl %eax, _checkstate_probed_cpus
398
399 popl %fs
400 popl %ds /* restore previous data segment */
401 popl %ebx
402 popl %eax
403 iret
404
405 #endif /* BETTER_CLOCK */
406
407 /*
408 * Executed by a CPU when it receives an Xcpuast IPI from another CPU,
409 *
410 * - Signals its receipt by clearing bit cpuid in checkstate_need_ast.
411 *
412 * - We need a better method of triggering asts on other cpus.
413 */
414
415 .text
416 SUPERALIGN_TEXT
417 .globl _Xcpuast
418 _Xcpuast:
419 PUSH_FRAME
420 movl $KDSEL, %eax
421 mov %ax, %ds /* use KERNEL data segment */
422 mov %ax, %es
423 movl $KPSEL, %eax
424 mov %ax, %fs
425
426 movl _cpuid, %eax
427 lock /* checkstate_need_ast &= ~(1<<id) */
428 btrl %eax, _checkstate_need_ast
429 movl $0, lapic_eoi /* End Of Interrupt to APIC */
430
431 lock
432 btsl %eax, _checkstate_pending_ast
433 jc 1f
434
435 FAKE_MCOUNT(13*4(%esp))
436
437 /*
438 * Giant locks do not come cheap.
439 * A lot of cycles are going to be wasted here.
440 */
441 call _get_mplock
442
443 movl _cpl, %eax
444 pushl %eax
445 orl $AST_PENDING, _astpending /* XXX */
446 incb _intr_nesting_level
447 sti
448
449 pushl $0
450
451 movl _cpuid, %eax
452 lock
453 btrl %eax, _checkstate_pending_ast
454 lock
455 btrl %eax, CNAME(resched_cpus)
456 jnc 2f
457 orl $AST_PENDING+AST_RESCHED,_astpending
458 lock
459 incl CNAME(want_resched_cnt)
460 2:
461 lock
462 incl CNAME(cpuast_cnt)
463 MEXITCOUNT
464 jmp _doreti
465 1:
466 /* We are already in the process of delivering an ast for this CPU */
467 POP_FRAME
468 iret
469
470
471 /*
472 * Executed by a CPU when it receives an XFORWARD_IRQ IPI.
473 */
474
475 .text
476 SUPERALIGN_TEXT
477 .globl _Xforward_irq
478 _Xforward_irq:
479 PUSH_FRAME
480 movl $KDSEL, %eax
481 mov %ax, %ds /* use KERNEL data segment */
482 mov %ax, %es
483 movl $KPSEL, %eax
484 mov %ax, %fs
485
486 movl $0, lapic_eoi /* End Of Interrupt to APIC */
487
488 FAKE_MCOUNT(13*4(%esp))
489
490 MP_TRYLOCK
491 testl %eax,%eax /* Did we get the lock ? */
492 jz 1f /* No */
493
494 lock
495 incl CNAME(forward_irq_hitcnt)
496 cmpb $4, _intr_nesting_level
497 jae 2f
498
499 movl _cpl, %eax
500 pushl %eax
501 incb _intr_nesting_level
502 sti
503
504 pushl $0
505
506 MEXITCOUNT
507 jmp _doreti /* Handle forwarded interrupt */
508 1:
509 lock
510 incl CNAME(forward_irq_misscnt)
511 call forward_irq /* Oops, we've lost the isr lock */
512 MEXITCOUNT
513 POP_FRAME
514 iret
515 2:
516 lock
517 incl CNAME(forward_irq_toodeepcnt)
518 3:
519 MP_RELLOCK
520 MEXITCOUNT
521 POP_FRAME
522 iret
523
524 /*
525 *
526 */
527 forward_irq:
528 MCOUNT
529 cmpl $0,_invltlb_ok
530 jz 4f
531
532 cmpl $0, CNAME(forward_irq_enabled)
533 jz 4f
534
535 movl _mp_lock,%eax
536 cmpl $FREE_LOCK,%eax
537 jne 1f
538 movl $0, %eax /* Pick CPU #0 if noone has lock */
539 1:
540 shrl $24,%eax
541 movl _cpu_num_to_apic_id(,%eax,4),%ecx
542 shll $24,%ecx
543 movl lapic_icr_hi, %eax
544 andl $~APIC_ID_MASK, %eax
545 orl %ecx, %eax
546 movl %eax, lapic_icr_hi
547
548 2:
549 movl lapic_icr_lo, %eax
550 andl $APIC_DELSTAT_MASK,%eax
551 jnz 2b
552 movl lapic_icr_lo, %eax
553 andl $APIC_RESV2_MASK, %eax
554 orl $(APIC_DEST_DESTFLD|APIC_DELMODE_FIXED|XFORWARD_IRQ_OFFSET), %eax
555 movl %eax, lapic_icr_lo
556 3:
557 movl lapic_icr_lo, %eax
558 andl $APIC_DELSTAT_MASK,%eax
559 jnz 3b
560 4:
561 ret
562
563 /*
564 * Executed by a CPU when it receives an Xcpustop IPI from another CPU,
565 *
566 * - Signals its receipt.
567 * - Waits for permission to restart.
568 * - Signals its restart.
569 */
570
571 .text
572 SUPERALIGN_TEXT
573 .globl _Xcpustop
574 _Xcpustop:
575 pushl %ebp
576 movl %esp, %ebp
577 pushl %eax
578 pushl %ecx
579 pushl %edx
580 pushl %ds /* save current data segment */
581 pushl %fs
582
583 movl $KDSEL, %eax
584 mov %ax, %ds /* use KERNEL data segment */
585 movl $KPSEL, %eax
586 mov %ax, %fs
587
588 movl $0, lapic_eoi /* End Of Interrupt to APIC */
589
590 movl _cpuid, %eax
591 imull $PCB_SIZE, %eax
592 leal CNAME(stoppcbs)(%eax), %eax
593 pushl %eax
594 call CNAME(savectx) /* Save process context */
595 addl $4, %esp
596
597
598 movl _cpuid, %eax
599
600 lock
601 btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
602 1:
603 btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
604 jnc 1b
605
606 lock
607 btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
608 lock
609 btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
610
611 test %eax, %eax
612 jnz 2f
613
614 movl CNAME(cpustop_restartfunc), %eax
615 test %eax, %eax
616 jz 2f
617 movl $0, CNAME(cpustop_restartfunc) /* One-shot */
618
619 call *%eax
620 2:
621 popl %fs
622 popl %ds /* restore previous data segment */
623 popl %edx
624 popl %ecx
625 popl %eax
626 movl %ebp, %esp
627 popl %ebp
628 iret
629
630
631 MCOUNT_LABEL(bintr)
632 FAST_INTR(0,fastintr0)
633 FAST_INTR(1,fastintr1)
634 FAST_INTR(2,fastintr2)
635 FAST_INTR(3,fastintr3)
636 FAST_INTR(4,fastintr4)
637 FAST_INTR(5,fastintr5)
638 FAST_INTR(6,fastintr6)
639 FAST_INTR(7,fastintr7)
640 FAST_INTR(8,fastintr8)
641 FAST_INTR(9,fastintr9)
642 FAST_INTR(10,fastintr10)
643 FAST_INTR(11,fastintr11)
644 FAST_INTR(12,fastintr12)
645 FAST_INTR(13,fastintr13)
646 FAST_INTR(14,fastintr14)
647 FAST_INTR(15,fastintr15)
648 FAST_INTR(16,fastintr16)
649 FAST_INTR(17,fastintr17)
650 FAST_INTR(18,fastintr18)
651 FAST_INTR(19,fastintr19)
652 FAST_INTR(20,fastintr20)
653 FAST_INTR(21,fastintr21)
654 FAST_INTR(22,fastintr22)
655 FAST_INTR(23,fastintr23)
656
657 #define CLKINTR_PENDING \
658 pushl $clock_lock ; \
659 call s_lock ; \
660 movl $1,CNAME(clkintr_pending) ; \
661 call s_unlock ; \
662 addl $4, %esp
663
664 INTR(0,intr0, CLKINTR_PENDING)
665 INTR(1,intr1,)
666 INTR(2,intr2,)
667 INTR(3,intr3,)
668 INTR(4,intr4,)
669 INTR(5,intr5,)
670 INTR(6,intr6,)
671 INTR(7,intr7,)
672 INTR(8,intr8,)
673 INTR(9,intr9,)
674 INTR(10,intr10,)
675 INTR(11,intr11,)
676 INTR(12,intr12,)
677 INTR(13,intr13,)
678 INTR(14,intr14,)
679 INTR(15,intr15,)
680 INTR(16,intr16,)
681 INTR(17,intr17,)
682 INTR(18,intr18,)
683 INTR(19,intr19,)
684 INTR(20,intr20,)
685 INTR(21,intr21,)
686 INTR(22,intr22,)
687 INTR(23,intr23,)
688 MCOUNT_LABEL(eintr)
689
690 /*
691 * Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.
692 *
693 * - Calls the generic rendezvous action function.
694 */
695 .text
696 SUPERALIGN_TEXT
697 .globl _Xrendezvous
698 _Xrendezvous:
699 PUSH_FRAME
700 movl $KDSEL, %eax
701 mov %ax, %ds /* use KERNEL data segment */
702 mov %ax, %es
703 movl $KPSEL, %eax
704 mov %ax, %fs
705
706 call _smp_rendezvous_action
707
708 movl $0, lapic_eoi /* End Of Interrupt to APIC */
709 POP_FRAME
710 iret
711
712
713 .data
714 /*
715 * Addresses of interrupt handlers.
716 * XresumeNN: Resumption addresses for HWIs.
717 */
718 .globl _ihandlers
719 _ihandlers:
720 /*
721 * used by:
722 * ipl.s: doreti_unpend
723 */
724 .long Xresume0, Xresume1, Xresume2, Xresume3
725 .long Xresume4, Xresume5, Xresume6, Xresume7
726 .long Xresume8, Xresume9, Xresume10, Xresume11
727 .long Xresume12, Xresume13, Xresume14, Xresume15
728 .long Xresume16, Xresume17, Xresume18, Xresume19
729 .long Xresume20, Xresume21, Xresume22, Xresume23
730 /*
731 * used by:
732 * ipl.s: doreti_unpend
733 * apic_ipl.s: splz_unpend
734 */
735 .long _swi_null, swi_net, _swi_null, _swi_null
736 .long _swi_vm, _swi_null, _softclock
737
738 imasks: /* masks for interrupt handlers */
739 .space NHWI*4 /* padding; HWI masks are elsewhere */
740
741 .long SWI_TTY_MASK, SWI_NET_MASK, SWI_CAMNET_MASK, SWI_CAMBIO_MASK
742 .long SWI_VM_MASK, SWI_TQ_MASK, SWI_CLOCK_MASK
743
744 /* active flag for lazy masking */
745 iactive:
746 .long 0
747
748 #ifdef COUNT_XINVLTLB_HITS
749 .globl _xhits
750 _xhits:
751 .space (NCPU * 4), 0
752 #endif /* COUNT_XINVLTLB_HITS */
753
754 /* variables used by stop_cpus()/restart_cpus()/Xcpustop */
755 .globl _stopped_cpus, _started_cpus
756 _stopped_cpus:
757 .long 0
758 _started_cpus:
759 .long 0
760
761 #ifdef BETTER_CLOCK
762 .globl _checkstate_probed_cpus
763 _checkstate_probed_cpus:
764 .long 0
765 #endif /* BETTER_CLOCK */
766 .globl _checkstate_need_ast
767 _checkstate_need_ast:
768 .long 0
769 _checkstate_pending_ast:
770 .long 0
771 .globl CNAME(forward_irq_misscnt)
772 .globl CNAME(forward_irq_toodeepcnt)
773 .globl CNAME(forward_irq_hitcnt)
774 .globl CNAME(resched_cpus)
775 .globl CNAME(want_resched_cnt)
776 .globl CNAME(cpuast_cnt)
777 .globl CNAME(cpustop_restartfunc)
778 CNAME(forward_irq_misscnt):
779 .long 0
780 CNAME(forward_irq_hitcnt):
781 .long 0
782 CNAME(forward_irq_toodeepcnt):
783 .long 0
784 CNAME(resched_cpus):
785 .long 0
786 CNAME(want_resched_cnt):
787 .long 0
788 CNAME(cpuast_cnt):
789 .long 0
790 CNAME(cpustop_restartfunc):
791 .long 0
792
793
794
795 .globl _apic_pin_trigger
796 _apic_pin_trigger:
797 .long 0
798
799 .text
Cache object: a6eba91c7b893c9c85f821df211dad00
|