1 /*-
2 * Copyright (c) 2001 Matthew Dillon. This code is distributed under
3 * the BSD copyright, /usr/src/COPYRIGHT.
4 *
5 * $FreeBSD: releng/5.1/sys/i386/i386/critical.c 111975 2003-03-08 03:58:50Z davidxu $
6 */
7
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/signalvar.h>
11 #include <sys/kernel.h>
12 #include <sys/lock.h>
13 #include <sys/mutex.h>
14 #include <sys/pcpu.h>
15 #include <sys/proc.h>
16 #include <sys/sysctl.h>
17 #include <sys/ucontext.h>
18 #include <machine/clock.h>
19 #include <machine/critical.h>
20
21 #ifdef SMP
22 #include <machine/privatespace.h>
23 #include <machine/smp.h>
24 #else
25 /*
26 * XXX this mess to get sched_ithd() and call_fast_unpend()
27 */
28 #include <sys/bus.h>
29 #include <machine/apic.h>
30 #include <machine/frame.h>
31 #include <i386/isa/icu.h>
32 #include <i386/isa/intr_machdep.h>
33 #endif
34
35 void i386_unpend(void); /* NOTE: not static, called from assembly */
36
37 /*
38 * cpu_unpend() - called from critical_exit() inline after quick
39 * interrupt-pending check.
40 */
41 void
42 cpu_unpend(void)
43 {
44 register_t eflags;
45 struct thread *td;
46
47 td = curthread;
48 eflags = intr_disable();
49 if (PCPU_GET(int_pending)) {
50 ++td->td_intr_nesting_level;
51 i386_unpend();
52 --td->td_intr_nesting_level;
53 }
54 intr_restore(eflags);
55 }
56
57 /*
58 * cpu_critical_fork_exit() - cleanup after fork
59 *
60 * For i386 we do not have to do anything, td_critnest is
61 * handled by the fork trampoline code.
62 */
63 void
64 cpu_critical_fork_exit(void)
65 {
66 }
67
68 /*
69 * cpu_thread_link() - thread linkup, initialize machine-dependant fields
70 *
71 * There are currently no machine-dependant fields that require
72 * initialization.
73 */
74 void
75 cpu_thread_link(struct thread *td)
76 {
77 }
78
79 /*
80 * Called from cpu_unpend or called from the assembly vector code
81 * to process any interrupts which may have occured while we were in
82 * a critical section.
83 *
84 * - interrupts must be disabled
85 * - td_critnest must be 0
86 * - td_intr_nesting_level must be incremented by the caller
87 *
88 * NOT STATIC (called from assembly)
89 */
90 void
91 i386_unpend(void)
92 {
93 struct clockframe frame;
94
95 frame.cf_cs = SEL_KPL;
96 frame.cf_eip = (register_t)i386_unpend;
97 frame.cf_eflags = PSL_KERNEL;
98 KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0"));
99 KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1"));
100 curthread->td_critnest = 1;
101 for (;;) {
102 u_int32_t mask;
103 int irq;
104
105 /*
106 * Fast interrupts have priority
107 */
108 if ((mask = PCPU_GET(fpending)) != 0) {
109 irq = bsfl(mask);
110 PCPU_SET(fpending, mask & ~(1 << irq));
111 call_fast_unpend(irq);
112 KASSERT((read_eflags() & PSL_I) == 0,
113 ("unpend interrupts enabled2 %d", irq));
114 continue;
115 }
116
117 /*
118 * Threaded interrupts come next
119 */
120 if ((mask = PCPU_GET(ipending)) != 0) {
121 irq = bsfl(mask);
122 PCPU_SET(ipending, mask & ~(1 << irq));
123 sched_ithd((void *)irq);
124 KASSERT((read_eflags() & PSL_I) == 0,
125 ("unpend interrupts enabled3 %d", irq));
126 continue;
127 }
128
129 /*
130 * Software interrupts and delayed IPIs are last
131 *
132 * XXX give the bits #defined names. see also
133 * isa/xxx_vector.s
134 */
135 if ((mask = PCPU_GET(spending)) != 0) {
136 irq = bsfl(mask);
137 PCPU_SET(spending, mask & ~(1 << irq));
138 switch(irq) {
139 case 0: /* bit 0 - hardclock */
140 hardclock_process(&frame);
141 break;
142 case 1: /* bit 1 - statclock */
143 if (profprocs != 0)
144 profclock(&frame);
145 if (pscnt == psdiv)
146 statclock(&frame);
147 break;
148 }
149 KASSERT((read_eflags() & PSL_I) == 0,
150 ("unpend interrupts enabled4 %d", irq));
151 continue;
152 }
153 break;
154 }
155 /*
156 * Interrupts are still disabled, we can safely clear int_pending
157 * and td_critnest.
158 */
159 KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled5"));
160 PCPU_SET(int_pending, 0);
161 curthread->td_critnest = 0;
162 }
Cache object: 45ef71b824b5ee231bf17a64f4e08b23
|