1 /*-
2 * Copyright (c) 2001 Matthew Dillon. This code is distributed under
3 * the BSD copyright, /usr/src/COPYRIGHT.
4 *
5 * $FreeBSD: releng/5.0/sys/i386/i386/critical.c 99742 2002-07-10 20:15:58Z dillon $
6 */
7
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/signalvar.h>
11 #include <sys/kernel.h>
12 #include <sys/lock.h>
13 #include <sys/mutex.h>
14 #include <sys/pcpu.h>
15 #include <sys/proc.h>
16 #include <sys/sysctl.h>
17 #include <sys/ucontext.h>
18 #include <machine/critical.h>
19
20 #ifdef SMP
21 #include <machine/privatespace.h>
22 #include <machine/smp.h>
23 #else
24 /*
25 * XXX this mess to get sched_ithd() and call_fast_unpend()
26 */
27 #include <sys/bus.h>
28 #include <machine/apic.h>
29 #include <machine/frame.h>
30 #include <i386/isa/icu.h>
31 #include <i386/isa/intr_machdep.h>
32 #endif
33
34 void i386_unpend(void); /* NOTE: not static, called from assembly */
35
36 /*
37 * cpu_unpend() - called from critical_exit() inline after quick
38 * interrupt-pending check.
39 */
40 void
41 cpu_unpend(void)
42 {
43 register_t eflags;
44 struct thread *td;
45
46 td = curthread;
47 eflags = intr_disable();
48 if (PCPU_GET(int_pending)) {
49 ++td->td_intr_nesting_level;
50 i386_unpend();
51 --td->td_intr_nesting_level;
52 }
53 intr_restore(eflags);
54 }
55
56 /*
57 * cpu_critical_fork_exit() - cleanup after fork
58 *
59 * For i386 we do not have to do anything, td_critnest is
60 * handled by the fork trampoline code.
61 */
62 void
63 cpu_critical_fork_exit(void)
64 {
65 }
66
67 /*
68 * cpu_thread_link() - thread linkup, initialize machine-dependant fields
69 *
70 * There are currently no machine-dependant fields that require
71 * initialization.
72 */
73 void
74 cpu_thread_link(struct thread *td)
75 {
76 }
77
78 /*
79 * Called from cpu_unpend or called from the assembly vector code
80 * to process any interrupts which may have occured while we were in
81 * a critical section.
82 *
83 * - interrupts must be disabled
84 * - td_critnest must be 0
85 * - td_intr_nesting_level must be incremented by the caller
86 *
87 * NOT STATIC (called from assembly)
88 */
89 void
90 i386_unpend(void)
91 {
92 KASSERT(curthread->td_critnest == 0, ("unpend critnest != 0"));
93 KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled1"));
94 curthread->td_critnest = 1;
95 for (;;) {
96 u_int32_t mask;
97 int irq;
98
99 /*
100 * Fast interrupts have priority
101 */
102 if ((mask = PCPU_GET(fpending)) != 0) {
103 irq = bsfl(mask);
104 PCPU_SET(fpending, mask & ~(1 << irq));
105 call_fast_unpend(irq);
106 KASSERT((read_eflags() & PSL_I) == 0,
107 ("unpend interrupts enabled2 %d", irq));
108 continue;
109 }
110
111 /*
112 * Threaded interrupts come next
113 */
114 if ((mask = PCPU_GET(ipending)) != 0) {
115 irq = bsfl(mask);
116 PCPU_SET(ipending, mask & ~(1 << irq));
117 sched_ithd((void *)irq);
118 KASSERT((read_eflags() & PSL_I) == 0,
119 ("unpend interrupts enabled3 %d", irq));
120 continue;
121 }
122
123 /*
124 * Software interrupts and delayed IPIs are last
125 *
126 * XXX give the bits #defined names. see also
127 * isa/xxx_vector.s
128 */
129 if ((mask = PCPU_GET(spending)) != 0) {
130 irq = bsfl(mask);
131 PCPU_SET(spending, mask & ~(1 << irq));
132 switch(irq) {
133 case 0: /* bit 0 - hardclock */
134 mtx_lock_spin(&sched_lock);
135 hardclock_process(curthread, 0);
136 mtx_unlock_spin(&sched_lock);
137 break;
138 case 1: /* bit 1 - statclock */
139 mtx_lock_spin(&sched_lock);
140 statclock_process(curthread->td_kse,
141 (register_t)i386_unpend, 0);
142 mtx_unlock_spin(&sched_lock);
143 break;
144 }
145 KASSERT((read_eflags() & PSL_I) == 0,
146 ("unpend interrupts enabled4 %d", irq));
147 continue;
148 }
149 break;
150 }
151 /*
152 * Interrupts are still disabled, we can safely clear int_pending
153 * and td_critnest.
154 */
155 KASSERT((read_eflags() & PSL_I) == 0, ("unpend interrupts enabled5"));
156 PCPU_SET(int_pending, 0);
157 curthread->td_critnest = 0;
158 }
Cache object: 126e4530d354966e9354bbc2ad98aeba
|