1 /*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: syscall_subr.c,v $
29 * Revision 2.17 93/05/15 18:54:53 mrt
30 * machparam.h -> machspl.h
31 *
32 * Revision 2.16 93/03/09 10:55:43 danner
33 * Removed duplicated decl for thread_syscall_return.
34 * [93/03/06 af]
35 *
36 * Revision 2.15 93/01/14 17:36:35 danner
37 * Proper spl typing.
38 * [92/12/01 af]
39 *
40 * Revision 2.14 92/08/03 17:39:28 jfriedl
41 * removed silly prototypes
42 * [92/08/02 jfriedl]
43 *
44 * Revision 2.13 92/05/21 17:16:06 jfriedl
45 * Removed unused var 'result' in swtch_pri().
46 * [92/05/16 jfriedl]
47 *
48 * Revision 2.12 92/04/05 13:09:19 rpd
49 * Fixed thread_switch argument types.
50 * Fixed thread_depress_priority with convert_ipc_timeout_to_ticks,
51 * so that rounding happens properly.
52 * [92/04/04 rpd]
53 *
54 * Revision 2.11 92/02/19 16:06:53 elf
55 * Change calls to compute_priority.
56 * [92/01/19 rwd]
57 * Changed thread_depress_priority to not schedule a timeout when
58 * time is 0.
59 * [92/01/10 rwd]
60 *
61 * Revision 2.10 91/07/31 17:48:19 dbg
62 * Fix timeout race.
63 * [91/07/30 17:05:37 dbg]
64 *
65 * Revision 2.9 91/05/18 14:33:47 rpd
66 * Changed to use thread->depress_timer.
67 * [91/03/31 rpd]
68 *
69 * Revision 2.8 91/05/14 16:47:24 mrt
70 * Correcting copyright
71 *
72 * Revision 2.7 91/05/08 12:48:54 dbg
73 * Add volatile declarations.
74 * Removed history for non-existent routines.
75 * [91/04/26 14:43:58 dbg]
76 *
77 * Revision 2.6 91/03/16 14:51:54 rpd
78 * Renamed ipc_thread_will_wait_with_timeout
79 * to thread_will_wait_with_timeout.
80 * [91/02/17 rpd]
81 * Added swtch_continue, swtch_pri_continue, thread_switch_continue.
82 * [91/01/17 rpd]
83 *
84 * Revision 2.5 91/02/05 17:29:34 mrt
85 * Changed to new Mach copyright
86 * [91/02/01 16:18:14 mrt]
87 *
88 * Revision 2.4 91/01/08 15:17:15 rpd
89 * Added continuation argument to thread_run.
90 * [90/12/11 rpd]
91 * Added continuation argument to thread_block.
92 * [90/12/08 rpd]
93 *
94 * Revision 2.3 90/11/05 14:31:36 rpd
95 * Restored missing multiprocessor untimeout failure code.
96 * [90/10/29 rpd]
97 *
98 * Revision 2.2 90/06/02 14:56:17 rpd
99 * Updated to new scheduling technology.
100 * [90/03/26 22:19:48 rpd]
101 *
102 * Revision 2.1 89/08/03 15:52:39 rwd
103 * Created.
104 *
105 * 3-Aug-88 David Golub (dbg) at Carnegie-Mellon University
106 * Removed all non-MACH code.
107 *
108 * 6-Dec-87 Michael Young (mwyoung) at Carnegie-Mellon University
109 * Removed old history.
110 *
111 * 19-Jun-87 David Black (dlb) at Carnegie-Mellon University
112 * MACH_TT: boolean for swtch and swtch_pri is now whether there is
113 * other work that the kernel could run instead of this thread.
114 *
115 * 7-May-87 David Black (dlb) at Carnegie-Mellon University
116 * New versions of swtch and swtch_pri for MACH_TT. Both return a
117 * boolean indicating whether a context switch was done. Documented.
118 *
119 * 31-Jul-86 Rick Rashid (rfr) at Carnegie-Mellon University
120 * Changed TPswtch_pri to set p_pri to 127 to make sure looping
121 * processes which want to simply reschedule do not monopolize the
122 * cpu.
123 *
124 * 3-Jul-86 Fil Alleva (faa) at Carnegie-Mellon University
125 * Added TPswtch_pri(). [Added to Mach, 20-jul-86, mwyoung.]
126 *
127 */
128
129 #include <mach_fixpri.h>
130 #include <cpus.h>
131
132 #include <mach/boolean.h>
133 #include <mach/thread_switch.h>
134 #include <ipc/ipc_port.h>
135 #include <ipc/ipc_space.h>
136 #include <kern/counters.h>
137 #include <kern/ipc_kobject.h>
138 #include <kern/processor.h>
139 #include <kern/sched.h>
140 #include <kern/sched_prim.h>
141 #include <kern/ipc_sched.h>
142 #include <kern/task.h>
143 #include <kern/thread.h>
144 #include <kern/time_out.h>
145 #include <machine/machspl.h> /* for splsched */
146
147 #if MACH_FIXPRI
148 #include <mach/policy.h>
149 #endif MACH_FIXPRI
150
151
152
153 /*
154 * swtch and swtch_pri both attempt to context switch (logic in
155 * thread_block no-ops the context switch if nothing would happen).
156 * A boolean is returned that indicates whether there is anything
157 * else runnable.
158 *
159 * This boolean can be used by a thread waiting on a
160 * lock or condition: If FALSE is returned, the thread is justified
161 * in becoming a resource hog by continuing to spin because there's
162 * nothing else useful that the processor could do. If TRUE is
163 * returned, the thread should make one more check on the
164 * lock and then be a good citizen and really suspend.
165 */
166
167 extern void thread_depress_priority();
168 extern kern_return_t thread_depress_abort();
169
170 void swtch_continue()
171 {
172 register processor_t myprocessor;
173
174 myprocessor = current_processor();
175 thread_syscall_return(myprocessor->runq.count > 0 ||
176 myprocessor->processor_set->runq.count > 0);
177 /*NOTREACHED*/
178 }
179
180 boolean_t swtch()
181 {
182 register processor_t myprocessor;
183
184 #if NCPUS > 1
185 myprocessor = current_processor();
186 if (myprocessor->runq.count == 0 &&
187 myprocessor->processor_set->runq.count == 0)
188 return(FALSE);
189 #endif NCPUS > 1
190
191 counter(c_swtch_block++);
192 thread_block(swtch_continue);
193 myprocessor = current_processor();
194 return(myprocessor->runq.count > 0 ||
195 myprocessor->processor_set->runq.count > 0);
196 }
197
198 void swtch_pri_continue()
199 {
200 register thread_t thread = current_thread();
201 register processor_t myprocessor;
202
203 if (thread->depress_priority >= 0)
204 (void) thread_depress_abort(thread);
205 myprocessor = current_processor();
206 thread_syscall_return(myprocessor->runq.count > 0 ||
207 myprocessor->processor_set->runq.count > 0);
208 /*NOTREACHED*/
209 }
210
211 boolean_t swtch_pri(pri)
212 int pri;
213 {
214 register thread_t thread = current_thread();
215 register processor_t myprocessor;
216
217 #ifdef lint
218 pri++;
219 #endif lint
220
221 #if NCPUS > 1
222 myprocessor = current_processor();
223 if (myprocessor->runq.count == 0 &&
224 myprocessor->processor_set->runq.count == 0)
225 return(FALSE);
226 #endif NCPUS > 1
227
228 /*
229 * XXX need to think about depression duration.
230 * XXX currently using min quantum.
231 */
232 thread_depress_priority(thread, min_quantum);
233
234 counter(c_swtch_pri_block++);
235 thread_block(swtch_pri_continue);
236
237 if (thread->depress_priority >= 0)
238 (void) thread_depress_abort(thread);
239 myprocessor = current_processor();
240 return(myprocessor->runq.count > 0 ||
241 myprocessor->processor_set->runq.count > 0);
242 }
243
244 extern int hz;
245
246 void thread_switch_continue()
247 {
248 register thread_t cur_thread = current_thread();
249
250 /*
251 * Restore depressed priority
252 */
253 if (cur_thread->depress_priority >= 0)
254 (void) thread_depress_abort(cur_thread);
255 thread_syscall_return(KERN_SUCCESS);
256 /*NOTREACHED*/
257 }
258
259 /*
260 * thread_switch:
261 *
262 * Context switch. User may supply thread hint.
263 *
264 * Fixed priority threads that call this get what they asked for
265 * even if that violates priority order.
266 */
267 kern_return_t thread_switch(thread_name, option, option_time)
268 mach_port_t thread_name;
269 int option;
270 mach_msg_timeout_t option_time;
271 {
272 register thread_t cur_thread = current_thread();
273 register processor_t myprocessor;
274 ipc_port_t port;
275
276 /*
277 * Process option.
278 */
279 switch (option) {
280 case SWITCH_OPTION_NONE:
281 /*
282 * Nothing to do.
283 */
284 break;
285
286 case SWITCH_OPTION_DEPRESS:
287 /*
288 * Depress priority for given time.
289 */
290 thread_depress_priority(cur_thread, option_time);
291 break;
292
293 case SWITCH_OPTION_WAIT:
294 thread_will_wait_with_timeout(cur_thread, option_time);
295 break;
296
297 default:
298 return(KERN_INVALID_ARGUMENT);
299 }
300
301 /*
302 * Check and act on thread hint if appropriate.
303 */
304 if ((thread_name != 0) &&
305 (ipc_port_translate_send(cur_thread->task->itk_space,
306 thread_name, &port) == KERN_SUCCESS)) {
307 /* port is locked, but it might not be active */
308
309 /*
310 * Get corresponding thread.
311 */
312 if (ip_active(port) && (ip_kotype(port) == IKOT_THREAD)) {
313 register thread_t thread;
314 register spl_t s;
315
316 thread = (thread_t) port->ip_kobject;
317 /*
318 * Check if the thread is in the right pset. Then
319 * pull it off its run queue. If it
320 * doesn't come, then it's not eligible.
321 */
322 s = splsched();
323 thread_lock(thread);
324 if ((thread->processor_set == cur_thread->processor_set)
325 && (rem_runq(thread) != RUN_QUEUE_NULL)) {
326 /*
327 * Hah, got it!!
328 */
329 thread_unlock(thread);
330 (void) splx(s);
331 ip_unlock(port);
332 /* XXX thread might disappear on us now? */
333 #if MACH_FIXPRI
334 if (thread->policy == POLICY_FIXEDPRI) {
335 myprocessor = current_processor();
336 myprocessor->quantum = thread->sched_data;
337 myprocessor->first_quantum = TRUE;
338 }
339 #endif MACH_FIXPRI
340 counter(c_thread_switch_handoff++);
341 thread_run(thread_switch_continue, thread);
342 /*
343 * Restore depressed priority
344 */
345 if (cur_thread->depress_priority >= 0)
346 (void) thread_depress_abort(cur_thread);
347
348 return(KERN_SUCCESS);
349 }
350 thread_unlock(thread);
351 (void) splx(s);
352 }
353 ip_unlock(port);
354 }
355
356 /*
357 * No handoff hint supplied, or hint was wrong. Call thread_block() in
358 * hopes of running something else. If nothing else is runnable,
359 * thread_block will detect this. WARNING: thread_switch with no
360 * option will not do anything useful if the thread calling it is the
361 * highest priority thread (can easily happen with a collection
362 * of timesharing threads).
363 */
364 #if NCPUS > 1
365 myprocessor = current_processor();
366 if (myprocessor->processor_set->runq.count > 0 ||
367 myprocessor->runq.count > 0)
368 #endif NCPUS > 1
369 {
370 counter(c_thread_switch_block++);
371 thread_block(thread_switch_continue);
372 }
373
374 /*
375 * Restore depressed priority
376 */
377 if (cur_thread->depress_priority >= 0)
378 (void) thread_depress_abort(cur_thread);
379 return(KERN_SUCCESS);
380 }
381
382 /*
383 * thread_depress_priority
384 *
385 * Depress thread's priority to lowest possible for specified period.
386 * Intended for use when thread wants a lock but doesn't know which
387 * other thread is holding it. As with thread_switch, fixed
388 * priority threads get exactly what they asked for. Users access
389 * this by the SWITCH_OPTION_DEPRESS option to thread_switch. A Time
390 * of zero will result in no timeout being scheduled.
391 */
392 void
393 thread_depress_priority(thread, depress_time)
394 register thread_t thread;
395 mach_msg_timeout_t depress_time;
396 {
397 unsigned int ticks;
398 spl_t s;
399
400 /* convert from milliseconds to ticks */
401 ticks = convert_ipc_timeout_to_ticks(depress_time);
402
403 s = splsched();
404 thread_lock(thread);
405
406 /*
407 * If thread is already depressed, override previous depression.
408 */
409 reset_timeout_check(&thread->depress_timer);
410
411 /*
412 * Save current priority, then set priority and
413 * sched_pri to their lowest possible values.
414 */
415 thread->depress_priority = thread->priority;
416 thread->priority = 31;
417 thread->sched_pri = 31;
418 if (ticks != 0)
419 set_timeout(&thread->depress_timer, ticks);
420
421 thread_unlock(thread);
422 (void) splx(s);
423 }
424
425 /*
426 * thread_depress_timeout:
427 *
428 * Timeout routine for priority depression.
429 */
430 void
431 thread_depress_timeout(thread)
432 register thread_t thread;
433 {
434 spl_t s;
435
436 s = splsched();
437 thread_lock(thread);
438
439 /*
440 * If we lose a race with thread_depress_abort,
441 * then depress_priority might be -1.
442 */
443
444 if (thread->depress_priority >= 0) {
445 thread->priority = thread->depress_priority;
446 thread->depress_priority = -1;
447 compute_priority(thread, FALSE);
448 }
449
450 thread_unlock(thread);
451 (void) splx(s);
452 }
453
454 /*
455 * thread_depress_abort:
456 *
457 * Prematurely abort priority depression if there is one.
458 */
459 kern_return_t
460 thread_depress_abort(thread)
461 register thread_t thread;
462 {
463 spl_t s;
464
465 if (thread == THREAD_NULL)
466 return(KERN_INVALID_ARGUMENT);
467
468 s = splsched();
469 thread_lock(thread);
470
471 /*
472 * Only restore priority if thread is depressed.
473 */
474 if (thread->depress_priority >= 0) {
475 reset_timeout_check(&thread->depress_timer);
476 thread->priority = thread->depress_priority;
477 thread->depress_priority = -1;
478 compute_priority(thread, FALSE);
479 }
480
481 thread_unlock(thread);
482 (void) splx(s);
483 return(KERN_SUCCESS);
484 }
Cache object: ab202ddc866497de6455474b0c86f985
|