1 /*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #ifndef _MACHINE_SMPTESTS_H_
29 #define _MACHINE_SMPTESTS_H_
30
31
32 /*
33 * Various 'tests in progress' and configuration parameters.
34 */
35
36
37 /*
38 * Tor's clock improvements.
39 *
40 * When the giant kernel lock disappears, a different strategy should
41 * probably be used, thus this patch can only be considered a temporary
42 * measure.
43 *
44 * This patch causes (NCPU-1)*(128+100) extra IPIs per second.
45 * During profiling, the number is (NCPU-1)*(1024+100) extra IPIs/s
46 * in addition to extra IPIs due to forwarding ASTs to other CPUs.
47 *
48 * Having a shared AST flag in an SMP configuration is wrong, and I've
49 * just kludged around it, based upon the kernel lock blocking other
50 * processors from entering the kernel while handling an AST for one
51 * processor. When the giant kernel lock disappers, this kludge breaks.
52 *
53 * -- Tor
54 */
55 #define BETTER_CLOCK
56
57
58 /*
59 * Control the "giant lock" pushdown by logical steps.
60 */
61 #define PUSHDOWN_LEVEL_1
62 #define PUSHDOWN_LEVEL_2
63 #define PUSHDOWN_LEVEL_3_NOT
64 #define PUSHDOWN_LEVEL_4_NOT
65
66 /*
67 * Debug version of simple_lock. This will store the CPU id of the
68 * holding CPU along with the lock. When a CPU fails to get the lock
69 * it compares its own id to the holder id. If they are the same it
70 * panic()s, as simple locks are binary, and this would cause a deadlock.
71 *
72 */
73 #define SL_DEBUG
74
75
76 /*
77 * Put FAST_INTR() ISRs at an APIC priority above the regular INTs.
78 * Allow the mp_lock() routines to handle FAST interrupts while spinning.
79 */
80 #ifdef PUSHDOWN_LEVEL_1
81 #define FAST_HI
82 #endif
83
84
85 /*
86 * These defines enable critical region locking of areas that were
87 * protected via cli/sti in the UP kernel.
88 *
89 * MPINTRLOCK protects all the generic areas.
90 * COMLOCK protects the sio/cy drivers.
91 * CLOCKLOCK protects clock hardware and data
92 * known to be incomplete:
93 * joystick lkm
94 * ?
95 */
96 #ifdef PUSHDOWN_LEVEL_1
97 #define USE_MPINTRLOCK
98 #define USE_COMLOCK
99 #define USE_CLOCKLOCK
100 #endif
101
102
103 /*
104 * INTR_SIMPLELOCK has been removed, as the interrupt mechanism will likely
105 * not use this sort of optimization if we move to interrupt threads.
106 */
107 #ifdef PUSHDOWN_LEVEL_4
108 #endif
109
110
111 /*
112 * CPL_AND_CML has been removed. Interrupt threads will eventually not
113 * use either mechanism so there is no point trying to optimize it.
114 */
115 #ifdef PUSHDOWN_LEVEL_3
116 #endif
117
118
119 /*
120 * SPL_DEBUG_POSTCODE/INTR_SPL/SPL_DEBUG - removed
121 *
122 * These functions were too expensive for the standard case but, more
123 * importantly, we should be able to come up with a much cleaner way
124 * to handle the cpl. Having to do any locking at all is a mistake
125 * for something that is modified as often as cpl is.
126 */
127
128 /*
129 * FAST_WITHOUTCPL - now made the default (define removed). Text below
130 * contains the current discussion. I am confident we can find a solution
131 * that does not require us to process softints from a hard int, which can
132 * kill serial performance due to the lack of true hardware ipl's.
133 *
134 ****
135 *
136 * Ignore the ipending bits when exiting FAST_INTR() routines.
137 *
138 * according to Bruce:
139 *
140 * setsoft*() may set ipending. setsofttty() is actually used in the
141 * FAST_INTR handler in some serial drivers. This is necessary to get
142 * output completions and other urgent events handled as soon as possible.
143 * The flag(s) could be set in a variable other than ipending, but they
144 * needs to be checked against cpl to decide whether the software interrupt
145 * handler can/should run.
146 *
147 * (FAST_INTR used to just return
148 * in all cases until rev.1.7 of vector.s. This worked OK provided there
149 * were no user-mode CPU hogs. CPU hogs caused an average latency of 1/2
150 * clock tick for output completions...)
151 ***
152 *
153 * So I need to restore cpl handling someday, but AFTER
154 * I finish making spl/cpl MP-safe.
155 */
156 #ifdef PUSHDOWN_LEVEL_1
157 #endif
158
159
160 /*
161 * FAST_SIMPLELOCK no longer exists, because it doesn't help us. The cpu
162 * is likely to already hold the MP lock and recursive MP locks are now
163 * very cheap, so we do not need this optimization. Eventually *ALL*
164 * interrupts will run in their own thread, so there is no sense complicating
165 * matters now.
166 */
167 #ifdef PUSHDOWN_LEVEL_1
168 #endif
169
170
171 /*
172 * Portions of the old TEST_LOPRIO code, back from the grave!
173 */
174 #define GRAB_LOPRIO
175
176
177 /*
178 * Send CPUSTOP IPI for stop/restart of other CPUs on DDB break.
179 *
180 #define VERBOSE_CPUSTOP_ON_DDBBREAK
181 */
182 #define CPUSTOP_ON_DDBBREAK
183
184
185 /*
186 * Bracket code/comments relevant to the current 'giant lock' model.
187 * Everything is now the 'giant lock' model, but we will use this as
188 * we start to "push down" the lock.
189 */
190 #define GIANT_LOCK
191
192 #ifdef APIC_IO
193 /*
194 * Enable extra counters for some selected locations in the interrupt handlers.
195 * Look in apic_vector.s, apic_ipl.s and ipl.s for APIC_ITRACE or
196 * APIC_INTR_DIAGNOSTIC.
197 */
198 #undef APIC_INTR_DIAGNOSTIC
199
200 /*
201 * Add extra tracking of a specific interrupt. Look in apic_vector.s,
202 * apic_ipl.s and ipl.s for APIC_ITRACE and log_intr_event.
203 * APIC_INTR_DIAGNOSTIC must be defined for this to work.
204 */
205 #ifdef APIC_INTR_DIAGNOSTIC
206 #define APIC_INTR_DIAGNOSTIC_IRQ 17
207 #endif
208
209 /*
210 * Don't assume that slow interrupt handler X is called from vector
211 * X + ICU_OFFSET.
212 */
213 #define APIC_INTR_REORDER
214
215 /*
216 * Redirect clock interrupts to a higher priority (fast intr) vector,
217 * while still using the slow interrupt handler. Only effective when
218 * APIC_INTR_REORDER is defined.
219 */
220 #define APIC_INTR_HIGHPRI_CLOCK
221
222 #endif /* APIC_IO */
223
224 /*
225 * Misc. counters.
226 *
227 #define COUNT_XINVLTLB_HITS
228 */
229
230
231 /**
232 * Hack to "fake-out" kernel into thinking it is running on a 'default config'.
233 *
234 * value == default type
235 #define TEST_DEFAULT_CONFIG 6
236 */
237
238
239 /*
240 * Simple test code for IPI interaction, save for future...
241 *
242 #define TEST_TEST1
243 #define IPI_TARGET_TEST1 1
244 */
245
246
247 /*
248 * Address of POST hardware port.
249 * Defining this enables POSTCODE macros.
250 *
251 #define POST_ADDR 0x80
252 */
253
254
255 /*
256 * POST hardware macros.
257 */
258 #ifdef POST_ADDR
259 #define ASMPOSTCODE_INC \
260 pushl %eax ; \
261 movl _current_postcode, %eax ; \
262 incl %eax ; \
263 andl $0xff, %eax ; \
264 movl %eax, _current_postcode ; \
265 outb %al, $POST_ADDR ; \
266 popl %eax
267
268 /*
269 * Overwrite the current_postcode value.
270 */
271 #define ASMPOSTCODE(X) \
272 pushl %eax ; \
273 movl $X, %eax ; \
274 movl %eax, _current_postcode ; \
275 outb %al, $POST_ADDR ; \
276 popl %eax
277
278 /*
279 * Overwrite the current_postcode low nibble.
280 */
281 #define ASMPOSTCODE_LO(X) \
282 pushl %eax ; \
283 movl _current_postcode, %eax ; \
284 andl $0xf0, %eax ; \
285 orl $X, %eax ; \
286 movl %eax, _current_postcode ; \
287 outb %al, $POST_ADDR ; \
288 popl %eax
289
290 /*
291 * Overwrite the current_postcode high nibble.
292 */
293 #define ASMPOSTCODE_HI(X) \
294 pushl %eax ; \
295 movl _current_postcode, %eax ; \
296 andl $0x0f, %eax ; \
297 orl $(X<<4), %eax ; \
298 movl %eax, _current_postcode ; \
299 outb %al, $POST_ADDR ; \
300 popl %eax
301 #else
302 #define ASMPOSTCODE_INC
303 #define ASMPOSTCODE(X)
304 #define ASMPOSTCODE_LO(X)
305 #define ASMPOSTCODE_HI(X)
306 #endif /* POST_ADDR */
307
308
309 /*
310 * These are all temps for debugging...
311 *
312 #define GUARD_INTS
313 */
314
315 /*
316 * This macro traps unexpected INTs to a specific CPU, eg. GUARD_CPU.
317 */
318 #ifdef GUARD_INTS
319 #define GUARD_CPU 1
320 #define MAYBE_PANIC(irq_num) \
321 cmpl $GUARD_CPU, _cpuid ; \
322 jne 9f ; \
323 cmpl $1, _ok_test1 ; \
324 jne 9f ; \
325 pushl lapic_isr3 ; \
326 pushl lapic_isr2 ; \
327 pushl lapic_isr1 ; \
328 pushl lapic_isr0 ; \
329 pushl lapic_irr3 ; \
330 pushl lapic_irr2 ; \
331 pushl lapic_irr1 ; \
332 pushl lapic_irr0 ; \
333 pushl $irq_num ; \
334 pushl _cpuid ; \
335 pushl $panic_msg ; \
336 call _printf ; \
337 addl $44, %esp ; \
338 9:
339 #else
340 #define MAYBE_PANIC(irq_num)
341 #endif /* GUARD_INTS */
342
343 #endif /* _MACHINE_SMPTESTS_H_ */
Cache object: 823423917e781e818b81fd84b7ba2003
|