FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/mplock.s
1 /*
2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
8 *
9 * $FreeBSD$
10 *
11 * Functions for locking between CPUs in a SMP system.
12 *
13 * This is an "exclusive counting semaphore". This means that it can be
14 * free (0xffffffff) or be owned by a CPU (0xXXYYYYYY where XX is CPU-id
15 * and YYYYYY is the count).
16 *
17 * Contrary to most implementations around, this one is entirely atomic:
18 * The attempt to seize/release the semaphore and the increment/decrement
19 * is done in one atomic operation. This way we are safe from all kinds
20 * of weird reentrancy situations.
21 *
22 */
23
24 #include <machine/asmacros.h>
25 #include <machine/smptests.h> /** GRAB_LOPRIO */
26 #include <machine/apic.h>
27
28 #define GLPROFILE_NOT
29
30 #ifdef CHEAP_TPR
31
32 /* we assumme that the 'reserved bits' can be written with zeros */
33
34 #else /* CHEAP_TPR */
35
36 #error HEADS UP: this code needs work
37 /*
38 * The APIC doc says that reserved bits must be written with whatever
39 * value they currently contain, ie you should: read, modify, write,
40 * instead of just writing new values to the TPR register. Current
41 * silicon seems happy with just writing. If the behaviour of the
42 * silicon changes, all code that access the lapic_tpr must be modified.
43 * The last version to contain such code was:
44 * Id: mplock.s,v 1.17 1997/08/10 20:59:07 fsmp Exp
45 */
46
47 #endif /* CHEAP_TPR */
48
49 #ifdef GRAB_LOPRIO
50 /*
51 * Claim LOWest PRIOrity, ie. attempt to grab ALL INTerrupts.
52 */
53
54 /* location of saved TPR on stack */
55 #define TPR_TARGET 12(%esp)
56
57 /* after 1st acquire of lock we attempt to grab all hardware INTs */
58 #define GRAB_HWI movl $ALLHWI_LEVEL, TPR_TARGET
59 #define GRAB_HWI_2 movl $ALLHWI_LEVEL, lapic_tpr /* CHEAP_TPR */
60
61 /* after last release of lock give up LOW PRIO (ie, arbitrate INTerrupts) */
62 #define ARB_HWI movl $LOPRIO_LEVEL, lapic_tpr /* CHEAP_TPR */
63
64 #else /* GRAB_LOPRIO */
65
66 #define GRAB_HWI /* nop */
67 #define GRAB_HWI_2 /* nop */
68 #define ARB_HWI /* nop */
69
70 #endif /* GRAB_LOPRIO */
71
72
73 .text
74 /***********************************************************************
75 * void MPgetlock(unsigned int *lock)
76 * ----------------------------------
77 * Destroys %eax, %ecx, %edx and 12(%esp).
78 */
79
80 NON_GPROF_ENTRY(MPgetlock)
81 movl 4(%esp), %edx /* Get the address of the lock */
82 1:
83 movl $FREE_LOCK, %eax /* Assume it's free */
84 movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
85 incl %ecx /* - new count is one */
86 lock
87 cmpxchg %ecx, (%edx) /* - try it atomically */
88 jne 2f /* ...do not collect $200 */
89 #ifdef GLPROFILE
90 incl _gethits2
91 #endif /* GLPROFILE */
92 GRAB_HWI /* 1st acquire, grab hw INTs */
93 ret
94 2:
95 movl (%edx), %eax /* Try to see if we have it already */
96 andl $COUNT_FIELD, %eax /* - get count */
97 movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
98 orl %ecx, %eax /* - combine them */
99 movl %eax, %ecx
100 incl %ecx /* - new count is one more */
101 lock
102 cmpxchg %ecx, (%edx) /* - try it atomically */
103 #ifdef GLPROFILE
104 jne 4f /* - miss */
105 incl _gethits
106 #else
107 jne 3f /* - miss */
108 #endif /* GLPROFILE */
109 ret
110 #ifdef GLPROFILE
111 4:
112 incl _gethits3
113 #endif /* GLPROFILE */
114 3:
115 cmpl $FREE_LOCK, (%edx) /* Wait for it to become free */
116 jne 3b
117 jmp 1b
118
119
120 /***********************************************************************
121 * int MPtrylock(unsigned int *lock)
122 * ---------------------------------
123 * Destroys %eax, %ecx and %edx.
124 * Returns 1 if lock was successfull
125 */
126
127 NON_GPROF_ENTRY(MPtrylock)
128 movl 4(%esp), %edx /* Get the address of the lock */
129
130 movl $FREE_LOCK, %eax /* Assume it's free */
131 movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
132 incl %ecx /* - new count is one */
133 lock
134 cmpxchg %ecx, (%edx) /* - try it atomically */
135 jne 1f /* ...do not collect $200 */
136 #ifdef GLPROFILE
137 incl _tryhits2
138 #endif /* GLPROFILE */
139 GRAB_HWI_2 /* 1st acquire, grab hw INTs */
140 movl $1, %eax
141 ret
142 1:
143 movl (%edx), %eax /* Try to see if we have it already */
144 andl $COUNT_FIELD, %eax /* - get count */
145 movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
146 orl %ecx, %eax /* - combine them */
147 movl %eax, %ecx
148 incl %ecx /* - new count is one more */
149 lock
150 cmpxchg %ecx, (%edx) /* - try it atomically */
151 jne 2f /* - miss */
152 #ifdef GLPROFILE
153 incl _tryhits
154 #endif /* GLPROFILE */
155 movl $1, %eax
156 ret
157 2:
158 #ifdef GLPROFILE
159 incl _tryhits3
160 #endif /* GLPROFILE */
161 movl $0, %eax
162 ret
163
164
165 /***********************************************************************
166 * void MPrellock(unsigned int *lock)
167 * ----------------------------------
168 * Destroys %eax, %ecx and %edx.
169 */
170
171 NON_GPROF_ENTRY(MPrellock)
172 movl 4(%esp), %edx /* Get the address of the lock */
173 1:
174 movl (%edx), %eax /* - get the value */
175 movl %eax, %ecx
176 decl %ecx /* - new count is one less */
177 testl $COUNT_FIELD, %ecx /* - Unless it's zero... */
178 jnz 2f
179 ARB_HWI /* last release, arbitrate hw INTs */
180 movl $FREE_LOCK, %ecx /* - In which case we release it */
181 2:
182 lock
183 cmpxchg %ecx, (%edx) /* - try it atomically */
184 jne 1b /* ...do not collect $200 */
185 ret
186
187
188 /***********************************************************************
189 * void get_mplock()
190 * -----------------
191 * All registers preserved
192 *
193 * Stack (after call to _MPgetlock):
194 *
195 * &mp_lock 4(%esp)
196 * EFLAGS 8(%esp)
197 * local APIC TPR 12(%esp)
198 * edx 16(%esp)
199 * ecx 20(%esp)
200 * eax 24(%esp)
201 */
202
203 NON_GPROF_ENTRY(get_mplock)
204 pushl %eax
205 pushl %ecx
206 pushl %edx
207
208 /* block all HW INTs via Task Priority Register */
209 pushl lapic_tpr /* save current TPR */
210 pushfl /* save current EFLAGS */
211 testl $(1<<9), (%esp) /* test EI bit */
212 jnz 1f /* INTs currently enabled */
213 sti /* allow IPI and FAST INTs */
214 1:
215 pushl $_mp_lock
216 call _MPgetlock
217 add $4, %esp
218
219 popfl /* restore original EFLAGS */
220 popl lapic_tpr /* restore TPR */
221 popl %edx
222 popl %ecx
223 popl %eax
224 ret
225
226 /*
227 * Special version of get_mplock that is used during bootstrap when we can't
228 * yet enable interrupts of any sort since the APIC isn't online yet.
229 *
230 * XXX FIXME.. - APIC should be online from the start to simplify IPI's.
231 */
232 NON_GPROF_ENTRY(boot_get_mplock)
233 pushl %eax
234 pushl %ecx
235 pushl %edx
236
237 #ifdef GRAB_LOPRIO
238 pushl $0
239 pushfl
240 #endif
241
242 pushl $_mp_lock
243 call _MPgetlock
244 add $4, %esp
245
246 #ifdef GRAB_LOPRIO
247 popfl
248 addl $4, %esp
249 #endif
250
251 popl %edx
252 popl %ecx
253 popl %eax
254 ret
255
256 /***********************************************************************
257 * void try_mplock()
258 * -----------------
259 * reg %eax == 1 if success
260 */
261
262 NON_GPROF_ENTRY(try_mplock)
263 pushl %ecx
264 pushl %edx
265 pushl $_mp_lock
266 call _MPtrylock
267 add $4, %esp
268 popl %edx
269 popl %ecx
270 ret
271
272 /***********************************************************************
273 * void rel_mplock()
274 * -----------------
275 * All registers preserved
276 */
277
278 NON_GPROF_ENTRY(rel_mplock)
279 pushl %eax
280 pushl %ecx
281 pushl %edx
282 pushl $_mp_lock
283 call _MPrellock
284 add $4, %esp
285 popl %edx
286 popl %ecx
287 popl %eax
288 ret
289
290 /***********************************************************************
291 * void get_isrlock()
292 * -----------------
293 * no registers preserved, assummed the calling ISR does!
294 *
295 * Stack (after call to _MPgetlock):
296 *
297 * &mp_lock 4(%esp)
298 * EFLAGS 8(%esp)
299 * local APIC TPR 12(%esp)
300 */
301
302 NON_GPROF_ENTRY(get_isrlock)
303
304 /* block all HW INTs via Task Priority Register */
305 pushl lapic_tpr /* save current TPR */
306 pushfl /* save current EFLAGS */
307 sti /* allow IPI and FAST INTs */
308
309 pushl $_mp_lock
310 call _MPgetlock
311 add $4, %esp
312
313 popfl /* restore original EFLAGS */
314 popl lapic_tpr /* restore TPR */
315 ret
316
317
318 /***********************************************************************
319 * void try_isrlock()
320 * -----------------
321 * no registers preserved, assummed the calling ISR does!
322 * reg %eax == 1 if success
323 */
324
325 NON_GPROF_ENTRY(try_isrlock)
326 pushl $_mp_lock
327 call _MPtrylock
328 add $4, %esp
329 ret
330
331
332 /***********************************************************************
333 * void rel_isrlock()
334 * -----------------
335 * no registers preserved, assummed the calling ISR does!
336 */
337
338 NON_GPROF_ENTRY(rel_isrlock)
339 pushl $_mp_lock
340 call _MPrellock
341 add $4, %esp
342 ret
343
344
345 /***********************************************************************
346 * FPU locks
347 */
348
349 NON_GPROF_ENTRY(get_fpu_lock)
350 pushl lapic_tpr
351 pushfl
352 sti
353 pushl $_mp_lock
354 call _MPgetlock
355 add $4, %esp
356 popfl
357 popl lapic_tpr
358 ret
359
360 #ifdef notneeded
361 NON_GPROF_ENTRY(try_fpu_lock)
362 pushl $_mp_lock
363 call _MPtrylock
364 add $4, %esp
365 ret
366
367 NON_GPROF_ENTRY(rel_fpu_lock)
368 pushl $_mp_lock
369 call _MPrellock
370 add $4, %esp
371 ret
372 #endif /* notneeded */
373
374
375 /***********************************************************************
376 * align locks
377 */
378
379 NON_GPROF_ENTRY(get_align_lock)
380 pushl lapic_tpr
381 pushfl
382 sti
383 pushl $_mp_lock
384 call _MPgetlock
385 add $4, %esp
386 popfl
387 popl lapic_tpr
388 ret
389
390 #ifdef notneeded
391 NON_GPROF_ENTRY(try_align_lock)
392 pushl $_mp_lock
393 call _MPtrylock
394 add $4, %esp
395 ret
396
397 NON_GPROF_ENTRY(rel_align_lock)
398 pushl $_mp_lock
399 call _MPrellock
400 add $4, %esp
401 ret
402 #endif /* notneeded */
403
404
405 /***********************************************************************
406 * syscall locks
407 */
408
409 NON_GPROF_ENTRY(get_syscall_lock)
410 pushl lapic_tpr
411 pushfl
412 sti
413 pushl $_mp_lock
414 call _MPgetlock
415 add $4, %esp
416 popfl
417 popl lapic_tpr
418 ret
419
420 #ifdef notneeded
421 NON_GPROF_ENTRY(try_syscall_lock)
422 pushl $_mp_lock
423 call _MPtrylock
424 add $4, %esp
425 ret
426
427 NON_GPROF_ENTRY(rel_syscall_lock)
428 pushl $_mp_lock
429 call _MPrellock
430 add $4, %esp
431 ret
432 #endif /* notneeded */
433
434
435 /***********************************************************************
436 * altsyscall locks
437 */
438
439 NON_GPROF_ENTRY(get_altsyscall_lock)
440 pushl lapic_tpr
441 pushfl
442 sti
443 pushl $_mp_lock
444 call _MPgetlock
445 add $4, %esp
446 popfl
447 popl lapic_tpr
448 ret
449
450 #ifdef notneeded
451 NON_GPROF_ENTRY(try_altsyscall_lock)
452 pushl $_mp_lock
453 call _MPtrylock
454 add $4, %esp
455 ret
456
457 NON_GPROF_ENTRY(rel_altsyscall_lock)
458 pushl $_mp_lock
459 call _MPrellock
460 add $4, %esp
461 ret
462 #endif /* notneeded */
463
464
465 #ifdef RECURSIVE_MPINTRLOCK
466 /***********************************************************************
467 * void get_mpintrlock()
468 * -----------------
469 * All registers preserved
470 */
471
472 NON_GPROF_ENTRY(get_mpintrlock)
473 pushl %eax
474 pushl %ecx
475 pushl %edx
476
477 #ifdef GRAB_LOPRIO
478 pushl lapic_tpr
479 pushfl
480 #endif
481
482 pushl $_mpintr_lock
483 call _MPgetlock
484 add $4, %esp
485
486 #ifdef GRAB_LOPRIO
487 popfl
488 popl lapic_tpr
489 #endif
490
491 popl %edx
492 popl %ecx
493 popl %eax
494 ret
495
496 /***********************************************************************
497 * void rel_mpintrlock()
498 * -----------------
499 * All registers preserved
500 */
501
502 NON_GPROF_ENTRY(rel_mpintrlock)
503 pushl %eax
504 pushl %ecx
505 pushl %edx
506
507 pushl $_mpintr_lock
508 call _MPrellock
509 add $4, %esp
510
511 popl %edx
512 popl %ecx
513 popl %eax
514 ret
515 #endif /* RECURSIVE_MPINTRLOCK */
516
517
518 /***********************************************************************
519 *
520 */
521 .data
522 .p2align 2 /* xx_lock aligned on int boundary */
523
524 .globl _mp_lock
525 _mp_lock: .long 0
526
527 .globl _isr_lock
528 _isr_lock: .long 0
529
530 #ifdef RECURSIVE_MPINTRLOCK
531 .globl _mpintr_lock
532 _mpintr_lock: .long 0xffffffff
533 #endif /* RECURSIVE_MPINTRLOCK */
534
535
536 #ifdef GLPROFILE
537 .globl _gethits
538 _gethits:
539 .long 0
540 _gethits2:
541 .long 0
542 _gethits3:
543 .long 0
544
545 .globl _tryhits
546 _tryhits:
547 .long 0
548 _tryhits2:
549 .long 0
550 _tryhits3:
551 .long 0
552
553 msg:
554 .asciz "lock hits: 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x\n"
555 #endif /* GLPROFILE */
Cache object: 18335e32a0d89ae54f7965cfa7d86e4a
|