1 /*
2 * Copyright (c) 1996, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #ifndef _MACHINE_SMPTESTS_H_
29 #define _MACHINE_SMPTESTS_H_
30
31
32 /*
33 * Various 'tests in progress' and configuration parameters.
34 */
35
36
37 /*
38 * Tor's clock improvements.
39 *
40 * When the giant kernel lock disappears, a different strategy should
41 * probably be used, thus this patch can only be considered a temporary
42 * measure.
43 *
44 * This patch causes (NCPU-1)*(128+100) extra IPIs per second.
45 * During profiling, the number is (NCPU-1)*(1024+100) extra IPIs/s
46 * in addition to extra IPIs due to forwarding ASTs to other CPUs.
47 *
48 * Having a shared AST flag in an SMP configuration is wrong, and I've
49 * just kludged around it, based upon the kernel lock blocking other
50 * processors from entering the kernel while handling an AST for one
51 * processor. When the giant kernel lock disappers, this kludge breaks.
52 *
53 * -- Tor
54 */
55 #define BETTER_CLOCK
56
57
58 /*
59 * Control the "giant lock" pushdown by logical steps.
60 */
61 #define PUSHDOWN_LEVEL_1
62 #define PUSHDOWN_LEVEL_2
63 #define PUSHDOWN_LEVEL_3_NOT
64 #define PUSHDOWN_LEVEL_4_NOT
65
66
67 /*
68 * XXX some temp debug control of cpl locks
69 */
70 #ifdef PUSHDOWN_LEVEL_2
71 #define REAL_ECPL /* exception.s: SCPL_LOCK/SCPL_UNLOCK */
72 #define REAL_ICPL /* ipl.s: CPL_LOCK/CPL_UNLOCK/FAST */
73 #define REAL_AICPL /* apic_ipl.s: SCPL_LOCK/SCPL_UNLOCK */
74 #define REAL_AVCPL /* apic_vector.s: CPL_LOCK/CPL_UNLOCK */
75 #define REAL_IFCPL /* ipl_funcs.c: SCPL_LOCK/SCPL_UNLOCK */
76 #endif /* PUSHDOWN_LEVEL_2 */
77
78 /*
79 * The xCPL_LOCK/xCPL_UNLOCK defines control the spinlocks
80 * that protect cpl/cml/cil and the spl functions.
81 */
82 #ifdef REAL_ECPL
83 #define ECPL_LOCK SCPL_LOCK
84 #define ECPL_UNLOCK SCPL_UNLOCK
85 #else
86 #define ECPL_LOCK
87 #define ECPL_UNLOCK
88 #endif /* REAL_ECPL */
89
90 #ifdef REAL_ICPL
91 #define ICPL_LOCK CPL_LOCK
92 #define ICPL_UNLOCK CPL_UNLOCK
93 #define FAST_ICPL_UNLOCK movl $0, _cpl_lock
94 #else
95 #define ICPL_LOCK
96 #define ICPL_UNLOCK
97 #define FAST_ICPL_UNLOCK
98 #endif /* REAL_ICPL */
99
100 #ifdef REAL_AICPL
101 #define AICPL_LOCK SCPL_LOCK
102 #define AICPL_UNLOCK SCPL_UNLOCK
103 #else
104 #define AICPL_LOCK
105 #define AICPL_UNLOCK
106 #endif /* REAL_AICPL */
107
108 #ifdef REAL_AVCPL
109 #define AVCPL_LOCK CPL_LOCK
110 #define AVCPL_UNLOCK CPL_UNLOCK
111 #else
112 #define AVCPL_LOCK
113 #define AVCPL_UNLOCK
114 #endif /* REAL_AVCPL */
115
116 #ifdef REAL_IFCPL
117 #define IFCPL_LOCK() SCPL_LOCK()
118 #define IFCPL_UNLOCK() SCPL_UNLOCK()
119 #else
120 #define IFCPL_LOCK()
121 #define IFCPL_UNLOCK()
122 #endif /* REAL_IFCPL */
123
124
125 /*
126 * Debug version of simple_lock. This will store the CPU id of the
127 * holding CPU along with the lock. When a CPU fails to get the lock
128 * it compares its own id to the holder id. If they are the same it
129 * panic()s, as simple locks are binary, and this would cause a deadlock.
130 *
131 */
132 #define SL_DEBUG
133
134
135 /*
136 * Put FAST_INTR() ISRs at an APIC priority above the regular INTs.
137 * Allow the mp_lock() routines to handle FAST interrupts while spinning.
138 */
139 #ifdef PUSHDOWN_LEVEL_1
140 #define FAST_HI
141 #endif
142
143
144 /*
145 * These defines enable critical region locking of areas that were
146 * protected via cli/sti in the UP kernel.
147 *
148 * MPINTRLOCK protects all the generic areas.
149 * COMLOCK protects the sio/cy drivers.
150 * CLOCKLOCK protects clock hardware and data
151 * known to be incomplete:
152 * joystick lkm
153 * ?
154 */
155 #ifdef PUSHDOWN_LEVEL_1
156 #define USE_MPINTRLOCK
157 #define USE_COMLOCK
158 #define USE_CLOCKLOCK
159 #endif
160
161
162 /*
163 * Regular INTerrupts without the giant lock, NOT READY YET!!!
164 */
165 #ifdef PUSHDOWN_LEVEL_4
166 #define INTR_SIMPLELOCK
167 #endif
168
169
170 /*
171 * Separate the INTR() portion of cpl into another variable: cml.
172 */
173 #ifdef PUSHDOWN_LEVEL_3
174 #define CPL_AND_CML
175 #endif
176
177
178 /*
179 * Forces spl functions to spin while waiting for safe time to change cpl.
180 *
181 #define SPL_DEBUG_POSTCODE (slows the system down noticably)
182 */
183 #ifdef PUSHDOWN_LEVEL_3
184 #define INTR_SPL
185 #define SPL_DEBUG
186 #endif
187
188
189 /*
190 * Ignore the ipending bits when exiting FAST_INTR() routines.
191 *
192 ***
193 * according to Bruce:
194 *
195 * setsoft*() may set ipending. setsofttty() is actually used in the
196 * FAST_INTR handler in some serial drivers. This is necessary to get
197 * output completions and other urgent events handled as soon as possible.
198 * The flag(s) could be set in a variable other than ipending, but they
199 * needs to be checked against cpl to decide whether the software interrupt
200 * handler can/should run.
201 *
202 * (FAST_INTR used to just return
203 * in all cases until rev.1.7 of vector.s. This worked OK provided there
204 * were no user-mode CPU hogs. CPU hogs caused an average latency of 1/2
205 * clock tick for output completions...)
206 ***
207 *
208 * So I need to restore cpl handling someday, but AFTER
209 * I finish making spl/cpl MP-safe.
210 */
211 #ifdef PUSHDOWN_LEVEL_1
212 #define FAST_WITHOUTCPL
213 #endif
214
215
216 /*
217 * Use a simplelock to serialize FAST_INTR()s.
218 * sio.c, and probably other FAST_INTR() drivers, never expected several CPUs
219 * to be inside them at once. Things such as global vars prevent more
220 * than 1 thread of execution from existing at once, so we serialize
221 * the access of FAST_INTR()s via a simple lock.
222 * One optimization on this would be a simple lock per DRIVER, but I'm
223 * not sure how to organize that yet...
224 */
225 #ifdef PUSHDOWN_LEVEL_1
226 #define FAST_SIMPLELOCK
227 #endif
228
229
230 /*
231 * Portions of the old TEST_LOPRIO code, back from the grave!
232 */
233 #define GRAB_LOPRIO
234
235
236 /*
237 * Send CPUSTOP IPI for stop/restart of other CPUs on DDB break.
238 *
239 #define VERBOSE_CPUSTOP_ON_DDBBREAK
240 */
241 #define CPUSTOP_ON_DDBBREAK
242
243
244 /*
245 * Bracket code/comments relevant to the current 'giant lock' model.
246 * Everything is now the 'giant lock' model, but we will use this as
247 * we start to "push down" the lock.
248 */
249 #define GIANT_LOCK
250
251 #ifdef APIC_IO
252 /*
253 * Enable extra counters for some selected locations in the interrupt handlers.
254 * Look in apic_vector.s, apic_ipl.s and ipl.s for APIC_ITRACE or
255 * APIC_INTR_DIAGNOSTIC.
256 */
257 #undef APIC_INTR_DIAGNOSTIC
258
259 /*
260 * Add extra tracking of a specific interrupt. Look in apic_vector.s,
261 * apic_ipl.s and ipl.s for APIC_ITRACE and log_intr_event.
262 * APIC_INTR_DIAGNOSTIC must be defined for this to work.
263 */
264 #ifdef APIC_INTR_DIAGNOSTIC
265 #define APIC_INTR_DIAGNOSTIC_IRQ 17
266 #endif
267
268 /*
269 * Don't assume that slow interrupt handler X is called from vector
270 * X + ICU_OFFSET.
271 */
272 #define APIC_INTR_REORDER
273
274 /*
275 * Redirect clock interrupts to a higher priority (fast intr) vector,
276 * while still using the slow interrupt handler. Only effective when
277 * APIC_INTR_REORDER is defined.
278 */
279 #define APIC_INTR_HIGHPRI_CLOCK
280
281 #endif /* APIC_IO */
282
283 /*
284 * Misc. counters.
285 *
286 #define COUNT_XINVLTLB_HITS
287 */
288
289
290 /**
291 * Hack to "fake-out" kernel into thinking it is running on a 'default config'.
292 *
293 * value == default type
294 #define TEST_DEFAULT_CONFIG 6
295 */
296
297
298 /*
299 * Simple test code for IPI interaction, save for future...
300 *
301 #define TEST_TEST1
302 #define IPI_TARGET_TEST1 1
303 */
304
305
306 /*
307 * Address of POST hardware port.
308 * Defining this enables POSTCODE macros.
309 *
310 #define POST_ADDR 0x80
311 */
312
313
314 /*
315 * POST hardware macros.
316 */
317 #ifdef POST_ADDR
318 #define ASMPOSTCODE_INC \
319 pushl %eax ; \
320 movl _current_postcode, %eax ; \
321 incl %eax ; \
322 andl $0xff, %eax ; \
323 movl %eax, _current_postcode ; \
324 outb %al, $POST_ADDR ; \
325 popl %eax
326
327 /*
328 * Overwrite the current_postcode value.
329 */
330 #define ASMPOSTCODE(X) \
331 pushl %eax ; \
332 movl $X, %eax ; \
333 movl %eax, _current_postcode ; \
334 outb %al, $POST_ADDR ; \
335 popl %eax
336
337 /*
338 * Overwrite the current_postcode low nibble.
339 */
340 #define ASMPOSTCODE_LO(X) \
341 pushl %eax ; \
342 movl _current_postcode, %eax ; \
343 andl $0xf0, %eax ; \
344 orl $X, %eax ; \
345 movl %eax, _current_postcode ; \
346 outb %al, $POST_ADDR ; \
347 popl %eax
348
349 /*
350 * Overwrite the current_postcode high nibble.
351 */
352 #define ASMPOSTCODE_HI(X) \
353 pushl %eax ; \
354 movl _current_postcode, %eax ; \
355 andl $0x0f, %eax ; \
356 orl $(X<<4), %eax ; \
357 movl %eax, _current_postcode ; \
358 outb %al, $POST_ADDR ; \
359 popl %eax
360 #else
361 #define ASMPOSTCODE_INC
362 #define ASMPOSTCODE(X)
363 #define ASMPOSTCODE_LO(X)
364 #define ASMPOSTCODE_HI(X)
365 #endif /* POST_ADDR */
366
367
368 /*
369 * These are all temps for debugging...
370 *
371 #define GUARD_INTS
372 */
373
374 /*
375 * This macro traps unexpected INTs to a specific CPU, eg. GUARD_CPU.
376 */
377 #ifdef GUARD_INTS
378 #define GUARD_CPU 1
379 #define MAYBE_PANIC(irq_num) \
380 cmpl $GUARD_CPU, _cpuid ; \
381 jne 9f ; \
382 cmpl $1, _ok_test1 ; \
383 jne 9f ; \
384 pushl lapic_isr3 ; \
385 pushl lapic_isr2 ; \
386 pushl lapic_isr1 ; \
387 pushl lapic_isr0 ; \
388 pushl lapic_irr3 ; \
389 pushl lapic_irr2 ; \
390 pushl lapic_irr1 ; \
391 pushl lapic_irr0 ; \
392 pushl $irq_num ; \
393 pushl _cpuid ; \
394 pushl $panic_msg ; \
395 call _printf ; \
396 addl $44, %esp ; \
397 9:
398 #else
399 #define MAYBE_PANIC(irq_num)
400 #endif /* GUARD_INTS */
401
402 #endif /* _MACHINE_SMPTESTS_H_ */
Cache object: 47be99ce4198714c7639334bb0845954
|