FreeBSD/Linux Kernel Cross Reference
sys/kern/ast.h
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993,1992,1991,1990,1989 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: ast.h,v $
29 * Revision 2.12 93/11/17 17:06:28 dbg
30 * Import machine/machspl.h for splsched.
31 * [93/06/11 dbg]
32 *
33 * Added AST_TIMER for timeouts. Defined AST_KERNEL to be
34 * the set of ASTs for periodic kernel operations. Added
35 * AST_KERNEL_CHECK to check for kernel ASTs. Added ANSI
36 * function prototypes.
37 * [93/01/13 dbg]
38 *
39 * Revision 2.11 92/05/22 18:37:52 jfriedl
40 * Put calls to aston/astoff in {} for safety.
41 *
42 * Revision 2.9 92/05/04 11:24:24 danner
43 * Changed AST_PER_THREAD definition (from dbg).
44 * [92/05/03 danner]
45 *
46 * Revision 2.8 92/04/06 01:16:11 rpd
47 * Fixed ast_context bug, with AST_PER_THREAD. From dbg.
48 * [92/04/05 rpd]
49 *
50 * Revision 2.7 91/08/28 11:14:20 jsb
51 * Renamed AST_CLPORT to AST_NETIPC.
52 * [91/08/14 21:38:09 jsb]
53 *
54 * Revision 2.6 91/06/06 17:06:48 jsb
55 * Added AST_CLPORT.
56 * [91/05/13 17:35:08 jsb]
57 *
58 * Revision 2.5 91/05/14 16:39:58 mrt
59 * Correcting copyright
60 *
61 * Revision 2.4 91/03/16 14:49:32 rpd
62 * Fixed dummy aston, astoff definitions.
63 * [91/02/12 rpd]
64 * Revised the AST interface, adding AST_NETWORK.
65 * Added volatile attribute to need_ast.
66 * [91/01/18 rpd]
67 *
68 * Revision 2.3 91/02/05 17:25:38 mrt
69 * Changed to new Mach copyright
70 * [91/02/01 16:11:14 mrt]
71 *
72 * Revision 2.2 90/06/02 14:53:34 rpd
73 * Merged with mainline.
74 * [90/03/26 22:02:55 rpd]
75 *
76 * Revision 2.1 89/08/03 15:45:04 rwd
77 * Created.
78 *
79 * 6-Sep-88 David Golub (dbg) at Carnegie-Mellon University
80 * Adapted to MACH_KERNEL and VAX.
81 *
82 * 11-Aug-88 David Black (dlb) at Carnegie-Mellon University
83 * Created. dbg gets equal credit for the design.
84 *
85 */
86
87 /*
88 * kern/ast.h: Definitions for Asynchronous System Traps.
89 */
90
91 #ifndef _KERN_AST_H_
92 #define _KERN_AST_H_
93
94 /*
95 * A CPU takes an AST when it is about to return to user code.
96 * Instead of going back to user code, it calls ast_taken.
97 * Machine-dependent code is responsible for maintaining
98 * a set of reasons for an AST, and passing this set to ast_taken.
99 */
100
101 #include <cpus.h>
102
103 #include <mach/boolean.h>
104 #include <kern/cpu_number.h>
105 #include <kern/macro_help.h>
106 #include <kern/kern_types.h>
107 #include <machine/ast.h>
108 #include <machine/machspl.h>
109
110 /*
111 * Bits for reasons
112 */
113
114 #define AST_ZILCH 0x0
115 #define AST_HALT 0x1 /* thread */
116 #define AST_TERMINATE 0x2 /* thread */
117 #define AST_BLOCK 0x4 /* scheduling */
118 #define AST_NETWORK 0x8 /* kernel */
119 #define AST_NETIPC 0x10 /* kernel */
120 #define AST_TIMER 0x20 /* kernel */
121
122 /*
123 * Per-thread ASTs are reset at context-switch time.
124 * machine/ast.h can define MACHINE_AST_PER_THREAD.
125 */
126
127 #ifndef MACHINE_AST_PER_THREAD
128 #define MACHINE_AST_PER_THREAD 0
129 #endif
130
131 #define AST_PER_THREAD (AST_HALT | AST_TERMINATE | MACHINE_AST_PER_THREAD)
132
133 /*
134 * Kernel ASTs are preserved across context switches. They
135 * may be taken at clean points in long-running kernel routines.
136 */
137 #define AST_KERNEL (AST_NETWORK | AST_NETIPC | AST_TIMER)
138
139 /*
140 * ASTs are set and checked only on the same CPU.
141 */
142 typedef unsigned int ast_t;
143
144 extern volatile ast_t need_ast[NCPUS];
145
146 #ifdef MACHINE_AST
147 /*
148 * machine/ast.h is responsible for defining aston and astoff.
149 */
150 #else /* MACHINE_AST */
151
152 #define aston(mycpu)
153 #define astoff(mycpu)
154
155 #endif /* MACHINE_AST */
156
157 /*
158 * Initialize AST package.
159 */
160 extern void ast_init(void);
161
162 /*
163 * Call at splsched to take ASTs for kernel and user.
164 * Returns at spl0.
165 */
166 extern void ast_taken(void);
167
168 /*
169 * Call at splsched to take ASTs for periodic kernel activities.
170 * Returns at spl0.
171 */
172 extern void ast_kernel_taken(void);
173
174 /*
175 * Call at interrupt time to check for conditions
176 * that may require a context-switch (AST_BLOCK),
177 * or for new per-thread ASTs for the current thread.
178 *
179 * "Thread" is always the current thread.
180 * "End_quantum" is to make this routine usable
181 * as a 'clock_sched' routine (see kern/sched_policy.h).
182 */
183 extern void ast_check(thread_t thread, boolean_t end_quantum);
184
185 /*
186 * Macro to check for kernel activity ASTs and call them.
187 * Comes in two versions: one to be used at spl0, and
188 * one to be used at splsched.
189 */
190 #define AST_KERNEL_CHECK(mycpu) \
191 MACRO_BEGIN \
192 if (need_ast[mycpu] & AST_KERNEL) { \
193 (void) splsched(); \
194 ast_kernel_taken(); \
195 } \
196 MACRO_END
197
198 #define AST_KERNEL_CHECK_HIGH(mycpu) \
199 MACRO_BEGIN \
200 if (need_ast[mycpu] & AST_KERNEL) { \
201 ast_kernel_taken(); \
202 (void) splsched(); \
203 } \
204 MACRO_END
205
206 /*
207 * ast_needed, ast_on, ast_off, ast_context, and ast_propagate
208 * assume splsched. mycpu is always cpu_number(). It is an
209 * argument in case cpu_number() is expensive.
210 */
211
212 #define ast_needed(mycpu) need_ast[mycpu]
213
214 #define ast_on(mycpu, reasons) \
215 MACRO_BEGIN \
216 if ((need_ast[mycpu] |= (reasons)) != AST_ZILCH) \
217 { aston(mycpu); } \
218 MACRO_END
219
220 #define ast_off(mycpu, reasons) \
221 MACRO_BEGIN \
222 if ((need_ast[mycpu] &= ~(reasons)) == AST_ZILCH) \
223 { astoff(mycpu); } \
224 MACRO_END
225
226 #define ast_propagate(thread, mycpu) ast_on((mycpu), (thread)->ast)
227
228 #define ast_context(thread, mycpu) \
229 MACRO_BEGIN \
230 if ((need_ast[mycpu] = \
231 (need_ast[mycpu] &~ AST_PER_THREAD) | (thread)->ast) \
232 != AST_ZILCH) \
233 { aston(mycpu); } \
234 else \
235 { astoff(mycpu); } \
236 MACRO_END
237
238
239 #define thread_ast_set(thread, reason) (thread)->ast |= (reason)
240 #define thread_ast_clear(thread, reason) (thread)->ast &= ~(reason)
241 #define thread_ast_clear_all(thread) (thread)->ast = AST_ZILCH
242
243 /*
244 * NOTE: if thread is the current thread, thread_ast_set should
245 * be followed by ast_propagate().
246 */
247
248 #endif /* _KERN_AST_H_ */
Cache object: df44805b9bb74e8ee58574d51830951d
|