FreeBSD/Linux Kernel Cross Reference
sys/kern/ipc_sched.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993, 1992,1991,1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: ipc_sched.c,v $
29 * Revision 2.16 93/05/15 18:55:54 mrt
30 * machparam.h -> machspl.h
31 *
32 * Revision 2.15 93/01/14 17:34:41 danner
33 * Added ANSI function prototypes.
34 * [92/12/29 dbg]
35 * Proper spl typing. 64bit cleanup.
36 * [92/12/01 af]
37 *
38 * Revision 2.14 92/08/03 17:37:37 jfriedl
39 * removed silly prototypes
40 * [92/08/02 jfriedl]
41 *
42 * Revision 2.13 92/05/21 17:14:02 jfriedl
43 * tried prototypes.
44 * [92/05/20 jfriedl]
45 *
46 * Revision 2.12 92/04/04 15:19:13 rpd
47 * Fixed thread_will_wait_with_timeout with convert_ipc_timeout_to_ticks,
48 * so that rounding happens properly.
49 * [92/04/04 rpd]
50 *
51 * Revision 2.11 92/04/01 19:33:13 rpd
52 * Fixed thread_handoff to check for stack-privilege violations.
53 * The old assertion isn't true with out-of-kernel default-pager.
54 * [92/03/24 rpd]
55 *
56 * Revision 2.10 91/07/31 17:45:30 dbg
57 * Check for new thread bound to wrong processor in thread_handoff.
58 * [91/07/25 dbg]
59 *
60 * Fix timeout race.
61 * [91/05/23 dbg]
62 *
63 * Revise scheduling state machine.
64 * [91/05/22 dbg]
65 *
66 * Revision 2.9 91/06/25 10:28:37 rpd
67 * Added some wait_result assertions.
68 * [91/05/30 rpd]
69 *
70 * Revision 2.8 91/05/18 14:31:50 rpd
71 * Updated thread_handoff to check stack_privilege.
72 * [91/03/30 rpd]
73 *
74 * Revision 2.7 91/05/14 16:42:33 mrt
75 * Correcting copyright
76 *
77 * Revision 2.6 91/03/16 14:50:09 rpd
78 * Rewrote ipc_thread_switch as thread_handoff,
79 * with new stack_handoff replacing stack_switch.
80 * Renamed ipc_thread_{go,will_wait,will_wait_with_timeout}
81 * to thread_{go,will_wait,will_wait_with_timeout}.
82 * [91/02/17 rpd]
83 * Removed ipc_thread_switch_hits.
84 * [91/01/28 rpd]
85 * Allow swapped threads on the run queues.
86 * [91/01/17 rpd]
87 *
88 * Revision 2.5 91/02/05 17:26:53 mrt
89 * Changed to new Mach copyright
90 * [91/02/01 16:13:23 mrt]
91 *
92 * Revision 2.4 91/01/08 15:15:52 rpd
93 * Added KEEP_STACKS support.
94 * [91/01/06 rpd]
95 * Added ipc_thread_switch_hits, ipc_thread_switch_misses counters.
96 * [91/01/03 22:07:15 rpd]
97 *
98 * Modified ipc_thread_switch to deal with pending timeouts.
99 * [90/12/20 rpd]
100 * Removed ipc_thread_go_and_block.
101 * Added ipc_thread_switch.
102 * [90/12/08 rpd]
103 *
104 * Revision 2.3 90/08/27 22:02:40 dbg
105 * Pass correct number of arguments to thread_swapin.
106 * [90/07/17 dbg]
107 *
108 * Revision 2.2 90/06/02 14:54:22 rpd
109 * Created for new IPC.
110 * [90/03/26 23:45:59 rpd]
111 *
112 */
113
114 #include <cpus.h>
115 #include <mach_host.h>
116
117 #include <mach/message.h>
118 #include <kern/counters.h>
119 #include <kern/cpu_number.h>
120 #include <kern/lock.h>
121 #include <kern/thread.h>
122 #include <kern/sched_prim.h>
123 #include <kern/processor.h>
124 #include <kern/time_out.h>
125 #include <kern/thread_swap.h>
126 #include <kern/ipc_sched.h>
127 #include <machine/machspl.h> /* for splsched/splx */
128 #include <machine/pmap.h>
129
130
131
132 /*
133 * These functions really belong in kern/sched_prim.c.
134 */
135
136 /*
137 * Routine: thread_go
138 * Purpose:
139 * Start a thread running.
140 * Conditions:
141 * IPC locks may be held.
142 */
143
144 void
145 thread_go(
146 thread_t thread)
147 {
148 int state;
149 spl_t s;
150
151 s = splsched();
152 thread_lock(thread);
153
154 reset_timeout_check(&thread->timer);
155
156 state = thread->state;
157 switch (state & TH_SCHED_STATE) {
158
159 case TH_WAIT | TH_SUSP | TH_UNINT:
160 case TH_WAIT | TH_UNINT:
161 case TH_WAIT:
162 /*
163 * Sleeping and not suspendable - put
164 * on run queue.
165 */
166 thread->state = (state &~ TH_WAIT) | TH_RUN;
167 thread->wait_result = THREAD_AWAKENED;
168 thread_setrun(thread, TRUE);
169 break;
170
171 case TH_WAIT | TH_SUSP:
172 case TH_RUN | TH_WAIT:
173 case TH_RUN | TH_WAIT | TH_SUSP:
174 case TH_RUN | TH_WAIT | TH_UNINT:
175 case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
176 /*
177 * Either already running, or suspended.
178 */
179 thread->state = state & ~TH_WAIT;
180 thread->wait_result = THREAD_AWAKENED;
181 break;
182
183 default:
184 /*
185 * Not waiting.
186 */
187 break;
188 }
189
190 thread_unlock(thread);
191 splx(s);
192 }
193
194 /*
195 * Routine: thread_will_wait
196 * Purpose:
197 * Assert that the thread intends to block.
198 */
199
200 void
201 thread_will_wait(
202 thread_t thread)
203 {
204 spl_t s;
205
206 s = splsched();
207 thread_lock(thread);
208
209 assert(thread->wait_result = -1); /* for later assertions */
210 thread->state |= TH_WAIT;
211
212 thread_unlock(thread);
213 splx(s);
214 }
215
216 /*
217 * Routine: thread_will_wait_with_timeout
218 * Purpose:
219 * Assert that the thread intends to block,
220 * with a timeout.
221 */
222
223 void
224 thread_will_wait_with_timeout(
225 thread_t thread,
226 mach_msg_timeout_t msecs)
227 {
228 natural_t ticks = convert_ipc_timeout_to_ticks(msecs);
229 spl_t s;
230
231 s = splsched();
232 thread_lock(thread);
233
234 assert(thread->wait_result = -1); /* for later assertions */
235 thread->state |= TH_WAIT;
236
237 set_timeout(&thread->timer, ticks);
238
239 thread_unlock(thread);
240 splx(s);
241 }
242
243 #if MACH_HOST
244 #define check_processor_set(thread) \
245 (current_processor()->processor_set == (thread)->processor_set)
246 #else /* MACH_HOST */
247 #define check_processor_set(thread) TRUE
248 #endif /* MACH_HOST */
249
250 #if NCPUS > 1
251 #define check_bound_processor(thread) \
252 ((thread)->bound_processor == PROCESSOR_NULL || \
253 (thread)->bound_processor == current_processor())
254 #else /* NCPUS > 1 */
255 #define check_bound_processor(thread) TRUE
256 #endif /* NCPUS > 1 */
257
258 /*
259 * Routine: thread_handoff
260 * Purpose:
261 * Switch to a new thread (new), leaving the current
262 * thread (old) blocked. If successful, moves the
263 * kernel stack from old to new and returns as the
264 * new thread. An explicit continuation for the old thread
265 * must be supplied.
266 *
267 * NOTE: Although we wakeup new, we don't set new->wait_result.
268 * Returns:
269 * TRUE if the handoff happened.
270 */
271
272 boolean_t
273 thread_handoff(
274 register thread_t old,
275 register continuation_t continuation,
276 register thread_t new)
277 {
278 spl_t s;
279
280 assert(current_thread() == old);
281
282 /*
283 * XXX Dubious things here:
284 * I don't check the idle_count on the processor set.
285 * No scheduling priority or policy checks.
286 * I assume the new thread is interruptible.
287 */
288
289 s = splsched();
290 thread_lock(new);
291
292 /*
293 * The first thing we must do is check the state
294 * of the threads, to ensure we can handoff.
295 * This check uses current_processor()->processor_set,
296 * which we can read without locking.
297 */
298
299 if ((old->stack_privilege == current_stack()) ||
300 (new->state != (TH_WAIT|TH_SWAPPED)) ||
301 !check_processor_set(new) ||
302 !check_bound_processor(new)) {
303 thread_unlock(new);
304 (void) splx(s);
305
306 counter_always(c_thread_handoff_misses++);
307 return FALSE;
308 }
309
310 reset_timeout_check(&new->timer);
311
312 new->state = TH_RUN;
313 thread_unlock(new);
314
315 #if NCPUS > 1
316 new->last_processor = current_processor();
317 #endif /* NCPUS > 1 */
318
319 ast_context(new, cpu_number());
320 timer_switch(&new->system_timer);
321
322 /*
323 * stack_handoff is machine-dependent. It does the
324 * machine-dependent components of a context-switch, like
325 * changing address spaces. It updates active_threads.
326 */
327
328 stack_handoff(old, new);
329
330 /*
331 * Now we must dispose of the old thread.
332 * This is like thread_continue, except
333 * that the old thread isn't waiting yet.
334 */
335
336 thread_lock(old);
337 old->swap_func = continuation;
338 assert(old->wait_result = -1); /* for later assertions */
339
340 if (old->state == TH_RUN) {
341 /*
342 * This is our fast path.
343 */
344
345 old->state = TH_WAIT|TH_SWAPPED;
346 }
347 else if (old->state == (TH_RUN|TH_SUSP)) {
348 /*
349 * Somebody is trying to suspend the thread.
350 */
351
352 old->state = TH_WAIT|TH_SUSP|TH_SWAPPED;
353 if (old->wake_active) {
354 /*
355 * Someone wants to know when the thread
356 * really stops.
357 */
358 old->wake_active = FALSE;
359 thread_unlock(old);
360 thread_wakeup((event_t)&old->wake_active);
361 goto after_old_thread;
362 }
363 } else
364 panic("thread_handoff");
365
366 thread_unlock(old);
367 after_old_thread:
368 (void) splx(s);
369
370 counter_always(c_thread_handoff_hits++);
371 return TRUE;
372 }
Cache object: 066fe76be209eb8720685adfd8fd07ac
|