1 /*-
2 * Copyright (c) 1997 Bruce Evans.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <machine/ipl.h>
32
33 #ifndef SMP
34 /*
35 * The volatile bitmap variables must be set atomically. This normally
36 * involves using a machine-dependent bit-set or `or' instruction.
37 */
38
39 #define DO_SETBITS(name, var, bits) \
40 void name(void) \
41 { \
42 setbits(var, bits); \
43 }
44
45 DO_SETBITS(setdelayed, &ipending, loadandclear((unsigned *)&idelayed))
46 DO_SETBITS(setsoftast, &ipending, SWI_AST_PENDING)
47 DO_SETBITS(setsoftcamnet,&ipending, SWI_CAMNET_PENDING)
48 DO_SETBITS(setsoftcambio,&ipending, SWI_CAMBIO_PENDING)
49 DO_SETBITS(setsoftclock, &ipending, SWI_CLOCK_PENDING)
50 DO_SETBITS(setsoftnet, &ipending, SWI_NET_PENDING)
51 DO_SETBITS(setsofttty, &ipending, SWI_TTY_PENDING)
52 DO_SETBITS(setsoftvm, &ipending, SWI_VM_PENDING)
53
54 DO_SETBITS(schedsoftcamnet, &idelayed, SWI_CAMNET_PENDING)
55 DO_SETBITS(schedsoftcambio, &idelayed, SWI_CAMBIO_PENDING)
56 DO_SETBITS(schedsoftnet, &idelayed, SWI_NET_PENDING)
57 DO_SETBITS(schedsofttty, &idelayed, SWI_TTY_PENDING)
58 DO_SETBITS(schedsoftvm, &idelayed, SWI_VM_PENDING)
59
60 unsigned
61 softclockpending(void)
62 {
63 return (ipending & SWI_CLOCK_PENDING);
64 }
65
66 #define GENSPL(name, set_cpl) \
67 unsigned name(void) \
68 { \
69 unsigned x; \
70 \
71 x = cpl; \
72 set_cpl; \
73 return (x); \
74 }
75
76 GENSPL(splbio, cpl |= bio_imask)
77 GENSPL(splcam, cpl |= cam_imask)
78 GENSPL(splclock, cpl = HWI_MASK | SWI_MASK)
79 GENSPL(splhigh, cpl = HWI_MASK | SWI_MASK)
80 GENSPL(splimp, cpl |= net_imask)
81 GENSPL(splnet, cpl |= SWI_NET_MASK)
82 GENSPL(splsoftcam, cpl |= SWI_CAMBIO_MASK | SWI_CAMNET_MASK)
83 GENSPL(splsoftcambio, cpl |= SWI_CAMBIO_MASK)
84 GENSPL(splsoftcamnet, cpl |= SWI_CAMNET_MASK)
85 GENSPL(splsoftclock, cpl = SWI_CLOCK_MASK)
86 GENSPL(splsofttty, cpl |= SWI_TTY_MASK)
87 GENSPL(splsoftvm, cpl |= SWI_VM_MASK)
88 GENSPL(splstatclock, cpl |= stat_imask)
89 GENSPL(spltty, cpl |= tty_imask)
90 GENSPL(splvm, cpl |= net_imask | bio_imask | cam_imask)
91
92 void
93 spl0(void)
94 {
95 cpl = SWI_AST_MASK;
96 if (ipending & ~SWI_AST_MASK)
97 splz();
98 }
99
100 void
101 splx(unsigned ipl)
102 {
103 cpl = ipl;
104 if (ipending & ~ipl)
105 splz();
106 }
107
108 #else /* !SMP */
109
110 #include <machine/smp.h>
111 #include <machine/smptests.h>
112
113 #ifndef SPL_DEBUG_POSTCODE
114 #undef POSTCODE
115 #undef POSTCODE_LO
116 #undef POSTCODE_HI
117 #define POSTCODE(X)
118 #define POSTCODE_LO(X)
119 #define POSTCODE_HI(X)
120 #endif /* SPL_DEBUG_POSTCODE */
121
122
123 /*
124 * The volatile bitmap variables must be set atomically. This normally
125 * involves using a machine-dependent bit-set or `or' instruction.
126 */
127
128 #define DO_SETBITS(name, var, bits) \
129 void name(void) \
130 { \
131 IFCPL_LOCK(); \
132 setbits(var, bits); \
133 IFCPL_UNLOCK(); \
134 }
135
136 DO_SETBITS(setdelayed, &ipending, loadandclear((unsigned *)&idelayed))
137 DO_SETBITS(setsoftast, &ipending, SWI_AST_PENDING)
138 DO_SETBITS(setsoftcamnet,&ipending, SWI_CAMNET_PENDING)
139 DO_SETBITS(setsoftcambio,&ipending, SWI_CAMBIO_PENDING)
140 DO_SETBITS(setsoftclock, &ipending, SWI_CLOCK_PENDING)
141 DO_SETBITS(setsoftnet, &ipending, SWI_NET_PENDING)
142 DO_SETBITS(setsofttty, &ipending, SWI_TTY_PENDING)
143 DO_SETBITS(setsoftvm, &ipending, SWI_VM_PENDING)
144
145 DO_SETBITS(schedsoftcamnet, &idelayed, SWI_CAMNET_PENDING)
146 DO_SETBITS(schedsoftcambio, &idelayed, SWI_CAMBIO_PENDING)
147 DO_SETBITS(schedsoftnet, &idelayed, SWI_NET_PENDING)
148 DO_SETBITS(schedsofttty, &idelayed, SWI_TTY_PENDING)
149 DO_SETBITS(schedsoftvm, &idelayed, SWI_VM_PENDING)
150
151 unsigned
152 softclockpending(void)
153 {
154 unsigned x;
155
156 IFCPL_LOCK();
157 x = ipending & SWI_CLOCK_PENDING;
158 IFCPL_UNLOCK();
159
160 return (x);
161 }
162
163
164 /*
165 * This version has to check for bsp_apic_ready,
166 * as calling simple_lock() (ie ss_lock) before then deadlocks the system.
167 * A sample count of GENSPL calls before bsp_apic_ready was set: 2193
168 */
169
170 #ifdef INTR_SPL
171
172 #ifdef SPL_DEBUG
173 #define MAXZ 100000000
174 #define SPIN_VAR unsigned z;
175 #define SPIN_RESET z = 0;
176 #if 0
177 #define SPIN_SPL \
178 if (++z >= MAXZ) { \
179 /* XXX allow lock-free panic */ \
180 bsp_apic_ready = 0; \
181 panic("\ncil: 0x%08x", cil); \
182 }
183 #else
184 #define SPIN_SPL \
185 if (++z >= MAXZ) { \
186 /* XXX allow lock-free panic */ \
187 bsp_apic_ready = 0; \
188 printf("\ncil: 0x%08x", cil); \
189 breakpoint(); \
190 }
191 #endif /* 0/1 */
192 #else /* SPL_DEBUG */
193 #define SPIN_VAR
194 #define SPIN_RESET
195 #define SPIN_SPL
196 #endif /* SPL_DEBUG */
197
198 #endif
199
200 #ifdef INTR_SPL
201
202 #define GENSPL(NAME, OP, MODIFIER, PC) \
203 unsigned NAME(void) \
204 { \
205 unsigned x, y; \
206 SPIN_VAR; \
207 \
208 if (!bsp_apic_ready) { \
209 x = cpl; \
210 cpl OP MODIFIER; \
211 return (x); \
212 } \
213 \
214 for (;;) { \
215 IFCPL_LOCK(); /* MP-safe */ \
216 x = y = cpl; /* current value */ \
217 POSTCODE(0x20 | PC); \
218 if (inside_intr) \
219 break; /* XXX only 1 INT allowed */ \
220 y OP MODIFIER; /* desired value */ \
221 if (cil & y) { /* not now */ \
222 IFCPL_UNLOCK(); /* allow cil to change */ \
223 SPIN_RESET; \
224 while (cil & y) \
225 SPIN_SPL \
226 continue; /* try again */ \
227 } \
228 break; \
229 } \
230 cpl OP MODIFIER; /* make the change */ \
231 IFCPL_UNLOCK(); \
232 \
233 return (x); \
234 }
235
236 /* NAME: OP: MODIFIER: PC: */
237 GENSPL(splbio, |=, bio_imask, 2)
238 GENSPL(splcam, |=, cam_imask, 7)
239 GENSPL(splclock, =, HWI_MASK | SWI_MASK, 3)
240 GENSPL(splhigh, =, HWI_MASK | SWI_MASK, 4)
241 GENSPL(splimp, |=, net_imask, 5)
242 GENSPL(splnet, |=, SWI_NET_MASK, 6)
243 GENSPL(splsoftcam, |=, SWI_CAMBIO_MASK | SWI_CAMNET_MASK, 8)
244 GENSPL(splsoftcambio, |=, SWI_CAMBIO_MASK, 9)
245 GENSPL(splsoftcamnet, |=, SWI_CAMNET_MASK, 10)
246 GENSPL(splsoftclock, =, SWI_CLOCK_MASK, 11)
247 GENSPL(splsofttty, |=, SWI_TTY_MASK, 12)
248 GENSPL(splsoftvm, |=, SWI_VM_MASK, 16)
249 GENSPL(splstatclock, |=, stat_imask, 13)
250 GENSPL(spltty, |=, tty_imask, 14)
251 GENSPL(splvm, |=, net_imask | bio_imask | cam_imask, 15)
252
253 #else /* INTR_SPL */
254
255 #define GENSPL(NAME, set_cpl) \
256 unsigned NAME(void) \
257 { \
258 unsigned x; \
259 \
260 if (!bsp_apic_ready) { \
261 x = cpl; \
262 set_cpl; \
263 } \
264 else { \
265 IFCPL_LOCK(); \
266 x = cpl; \
267 set_cpl; \
268 IFCPL_UNLOCK(); \
269 } \
270 \
271 return (x); \
272 }
273
274 GENSPL(splbio, cpl |= bio_imask)
275 GENSPL(splclock, cpl = HWI_MASK | SWI_MASK)
276 GENSPL(splhigh, cpl = HWI_MASK | SWI_MASK)
277 GENSPL(splimp, cpl |= net_imask)
278 GENSPL(splnet, cpl |= SWI_NET_MASK)
279 GENSPL(splcam, cpl |= cam_imask)
280 GENSPL(splsoftcam, cpl |= SWI_CAMBIO_MASK | SWI_CAMNET_MASK)
281 GENSPL(splsoftcambio, cpl |= SWI_CAMBIO_MASK)
282 GENSPL(splsoftcamnet, cpl |= SWI_CAMNET_MASK)
283 GENSPL(splsoftclock, cpl = SWI_CLOCK_MASK)
284 GENSPL(splsofttty, cpl |= SWI_TTY_MASK)
285 GENSPL(splsoftvm, cpl |= SWI_VM_MASK)
286 GENSPL(splstatclock, cpl |= stat_imask)
287 GENSPL(spltty, cpl |= tty_imask)
288 GENSPL(splvm, cpl |= net_imask | bio_imask | cam_imask)
289
290 #endif /* INTR_SPL */
291
292
293 void
294 spl0(void)
295 {
296 int unpend;
297 #ifdef INTR_SPL
298 SPIN_VAR;
299
300 for (;;) {
301 IFCPL_LOCK();
302 POSTCODE_HI(0xc);
303 if (cil & SWI_AST_MASK) { /* not now */
304 IFCPL_UNLOCK(); /* allow cil to change */
305 SPIN_RESET;
306 while (cil & SWI_AST_MASK)
307 SPIN_SPL
308 continue; /* try again */
309 }
310 break;
311 }
312 #else /* INTR_SPL */
313 IFCPL_LOCK();
314 #endif /* INTR_SPL */
315
316 cpl = SWI_AST_MASK;
317 unpend = ipending & ~SWI_AST_MASK;
318 IFCPL_UNLOCK();
319
320 if (unpend && !inside_intr)
321 splz();
322 }
323
324 void
325 splx(unsigned ipl)
326 {
327 int unpend;
328 #ifdef INTR_SPL
329 SPIN_VAR;
330 #endif
331
332 if (!bsp_apic_ready) {
333 cpl = ipl;
334 if (ipending & ~ipl)
335 splz();
336 return;
337 }
338
339 #ifdef INTR_SPL
340 for (;;) {
341 IFCPL_LOCK();
342 POSTCODE_HI(0xe);
343 if (inside_intr)
344 break; /* XXX only 1 INT allowed */
345 POSTCODE_HI(0xf);
346 if (cil & ipl) { /* not now */
347 IFCPL_UNLOCK(); /* allow cil to change */
348 SPIN_RESET;
349 while (cil & ipl)
350 SPIN_SPL
351 continue; /* try again */
352 }
353 break;
354 }
355 #else /* INTR_SPL */
356 IFCPL_LOCK();
357 #endif /* INTR_SPL */
358
359 cpl = ipl;
360 unpend = ipending & ~ipl;
361 IFCPL_UNLOCK();
362
363 if (unpend && !inside_intr)
364 splz();
365 }
366
367
368 /*
369 * Replaces UP specific inline found in (?) pci/pci_support.c.
370 *
371 * Stefan said:
372 * You know, that splq() is used in the shared interrupt multiplexer, and that
373 * the SMP version should not have too much overhead. If it is significantly
374 * slower, then moving the splq() out of the loop in intr_mux() and passing in
375 * the logical OR of all mask values might be a better solution than the
376 * current code. (This logical OR could of course be pre-calculated whenever
377 * another shared interrupt is registered ...)
378 */
379 intrmask_t
380 splq(intrmask_t mask)
381 {
382 intrmask_t tmp;
383 #ifdef INTR_SPL
384 intrmask_t tmp2;
385
386 for (;;) {
387 IFCPL_LOCK();
388 tmp = tmp2 = cpl;
389 tmp2 |= mask;
390 if (cil & tmp2) { /* not now */
391 IFCPL_UNLOCK(); /* allow cil to change */
392 while (cil & tmp2)
393 /* spin */ ;
394 continue; /* try again */
395 }
396 break;
397 }
398 cpl = tmp2;
399 #else /* INTR_SPL */
400 IFCPL_LOCK();
401 tmp = cpl;
402 cpl |= mask;
403 #endif /* INTR_SPL */
404
405 IFCPL_UNLOCK();
406 return (tmp);
407 }
408
409 #endif /* !SMP */
Cache object: b97f7cc7602bac9b8bf69fb0ac560b19
|