1 /*-
2 * Copyright (c) 2002 Matthew Dillon. This code is distributed under
3 * the BSD copyright, /usr/src/COPYRIGHT.
4 *
5 * This file contains prototypes and high-level inlines related to
6 * machine-level critical function support:
7 *
8 * cpu_critical_enter() - inlined
9 * cpu_critical_exit() - inlined
10 * cpu_critical_fork_exit() - prototyped
11 * cpu_thread_link() - prototyped
12 * related support functions residing
13 * in <arch>/<arch>/critical.c - prototyped
14 *
15 * $FreeBSD: releng/5.1/sys/i386/include/critical.h 99742 2002-07-10 20:15:58Z dillon $
16 */
17
18 #ifndef _MACHINE_CRITICAL_H_
19 #define _MACHINE_CRITICAL_H_
20
21 __BEGIN_DECLS
22
23 /*
24 * Prototypes - see <arch>/<arch>/critical.c
25 */
26 void cpu_unpend(void);
27 void cpu_critical_fork_exit(void);
28 void cpu_thread_link(struct thread *td);
29
30 #ifdef __GNUC__
31
32 /*
33 * cpu_critical_enter:
34 *
35 * This routine is called from critical_enter() on the 0->1 transition
36 * of td_critnest, prior to it being incremented to 1.
37 *
38 * If new-style critical section handling we do not have to do anything.
39 * However, as a side effect any interrupts occuring while td_critnest
40 * is non-zero will be deferred.
41 */
42 #define cpu_critical_enter()
43
44 /*
45 * cpu_critical_exit:
46 *
47 * This routine is called from critical_exit() on a 1->0 transition
48 * of td_critnest, after it has been decremented to 0. We are
49 * exiting the last critical section.
50 *
51 * Note that the td->critnest (1->0) transition interrupt race against
52 * our int_pending/unpend() check below is handled by the interrupt
53 * code for us, so we do not have to do anything fancy.
54 */
55 static __inline void
56 cpu_critical_exit(void)
57 {
58 /*
59 * We may have to schedule pending interrupts. Create
60 * conditions similar to an interrupt context and call
61 * unpend().
62 *
63 * note: we do this even if we are in an interrupt
64 * nesting level. Deep nesting is protected by
65 * critical_*() and if we conditionalized it then we
66 * would have to check int_pending again whenever
67 * we decrement td_intr_nesting_level to 0.
68 */
69 if (PCPU_GET(int_pending))
70 cpu_unpend();
71 }
72
73 #else /* !__GNUC__ */
74
75 void cpu_critical_enter(void)
76 void cpu_critical_exit(void)
77
78 #endif /* __GNUC__ */
79
80 __END_DECLS
81
82 #endif /* !_MACHINE_CRITICAL_H_ */
83
Cache object: 19224fcb753b86958efd6eab785d67e8
|