1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: syscall_subr.c,v $
29 * Revision 2.18 93/11/17 17:26:26 dbg
30 * Reverse return code from SWITCH_OPTION_IDLE. Returning at idle
31 * priority returns KERN_SUCCESS (and cancels depress timer (?) ).
32 * Timeout or thread_depress_abort returns KERN_ABORTED.
33 * [93/08/16 dbg]
34 *
35 * Added SWITCH_OPTION_IDLE to leave thread at depressed priority
36 * after thread_switch call.
37 * [93/07/16 dbg]
38 *
39 * Break up thread lock.
40 * [93/05/26 dbg]
41 *
42 * Implement thread_depression by switching thread to
43 * background policy temporarily. Keep non-depressed
44 * policy as thread->sched_policy. Use thread->timer
45 * instead of thread->depress_timer. Measure timeouts
46 * in seconds and nanoseconds internally.
47 *
48 * Removed include of kern/sched.h. Added ANSI function
49 * prototypes. Declared continuations as not returning.
50 * [93/05/21 dbg]
51 *
52 * Revision 2.17 93/05/15 18:54:53 mrt
53 * machparam.h -> machspl.h
54 *
55 * Revision 2.16 93/03/09 10:55:43 danner
56 * Removed duplicated decl for thread_syscall_return.
57 * [93/03/06 af]
58 *
59 * Revision 2.15 93/01/14 17:36:35 danner
60 * Proper spl typing.
61 * [92/12/01 af]
62 *
63 * Revision 2.14 92/08/03 17:39:28 jfriedl
64 * removed silly prototypes
65 * [92/08/02 jfriedl]
66 *
67 * Revision 2.13 92/05/21 17:16:06 jfriedl
68 * Removed unused var 'result' in swtch_pri().
69 * [92/05/16 jfriedl]
70 *
71 * Revision 2.12 92/04/05 13:09:19 rpd
72 * Fixed thread_switch argument types.
73 * Fixed thread_depress_priority with convert_ipc_timeout_to_ticks,
74 * so that rounding happens properly.
75 * [92/04/04 rpd]
76 *
77 * Revision 2.11 92/02/19 16:06:53 elf
78 * Change calls to compute_priority.
79 * [92/01/19 rwd]
80 * Changed thread_depress_priority to not schedule a timeout when
81 * time is 0.
82 * [92/01/10 rwd]
83 *
84 * Revision 2.10 91/07/31 17:48:19 dbg
85 * Fix timeout race.
86 * [91/07/30 17:05:37 dbg]
87 *
88 * Revision 2.9 91/05/18 14:33:47 rpd
89 * Changed to use thread->depress_timer.
90 * [91/03/31 rpd]
91 *
92 * Revision 2.8 91/05/14 16:47:24 mrt
93 * Correcting copyright
94 *
95 * Revision 2.7 91/05/08 12:48:54 dbg
96 * Add volatile declarations.
97 * Removed history for non-existent routines.
98 * [91/04/26 14:43:58 dbg]
99 *
100 * Revision 2.6 91/03/16 14:51:54 rpd
101 * Renamed ipc_thread_will_wait_with_timeout
102 * to thread_will_wait_with_timeout.
103 * [91/02/17 rpd]
104 * Added swtch_continue, swtch_pri_continue, thread_switch_continue.
105 * [91/01/17 rpd]
106 *
107 * Revision 2.5 91/02/05 17:29:34 mrt
108 * Changed to new Mach copyright
109 * [91/02/01 16:18:14 mrt]
110 *
111 * Revision 2.4 91/01/08 15:17:15 rpd
112 * Added continuation argument to thread_run.
113 * [90/12/11 rpd]
114 * Added continuation argument to thread_block.
115 * [90/12/08 rpd]
116 *
117 * Revision 2.3 90/11/05 14:31:36 rpd
118 * Restored missing multiprocessor untimeout failure code.
119 * [90/10/29 rpd]
120 *
121 * Revision 2.2 90/06/02 14:56:17 rpd
122 * Updated to new scheduling technology.
123 * [90/03/26 22:19:48 rpd]
124 *
125 * Revision 2.1 89/08/03 15:52:39 rwd
126 * Created.
127 *
128 * 3-Aug-88 David Golub (dbg) at Carnegie-Mellon University
129 * Removed all non-MACH code.
130 *
131 * 6-Dec-87 Michael Young (mwyoung) at Carnegie-Mellon University
132 * Removed old history.
133 *
134 * 19-Jun-87 David Black (dlb) at Carnegie-Mellon University
135 * MACH_TT: boolean for swtch and swtch_pri is now whether there is
136 * other work that the kernel could run instead of this thread.
137 *
138 * 7-May-87 David Black (dlb) at Carnegie-Mellon University
139 * New versions of swtch and swtch_pri for MACH_TT. Both return a
140 * boolean indicating whether a context switch was done. Documented.
141 *
142 * 31-Jul-86 Rick Rashid (rfr) at Carnegie-Mellon University
143 * Changed TPswtch_pri to set p_pri to 127 to make sure looping
144 * processes which want to simply reschedule do not monopolize the
145 * cpu.
146 *
147 * 3-Jul-86 Fil Alleva (faa) at Carnegie-Mellon University
148 * Added TPswtch_pri(). [Added to Mach, 20-jul-86, mwyoung.]
149 *
150 */
151
152 #include <cpus.h>
153 #include <mach_io_binding.h>
154
155 #include <mach/boolean.h>
156 #include <mach/policy.h>
157 #include <mach/thread_switch.h>
158
159 #include <ipc/ipc_port.h>
160 #include <ipc/ipc_space.h>
161
162 #include <kern/counters.h>
163 #include <kern/ipc_kobject.h>
164 #include <kern/kern_types.h>
165 #include <kern/mach_timer.h>
166 #include <kern/processor.h>
167 #include <kern/quantum.h>
168 #include <kern/sched_prim.h>
169 #include <kern/syscall_subr.h>
170 #include <kern/task.h>
171 #include <kern/thread.h>
172
173 #include <machine/machspl.h> /* for splsched */
174
175
176
177
178 /*
179 * swtch and swtch_pri both attempt to context switch (logic in
180 * thread_block no-ops the context switch if nothing would happen).
181 * A boolean is returned that indicates whether there is anything
182 * else runnable.
183 *
184 * This boolean can be used by a thread waiting on a
185 * lock or condition: If FALSE is returned, the thread is justified
186 * in becoming a resource hog by continuing to spin because there's
187 * nothing else useful that the processor could do. If TRUE is
188 * returned, the thread should make one more check on the
189 * lock and then be a good citizen and really suspend.
190 */
191
192 /*
193 * forward declarations
194 */
195 void
196 thread_depress_priority(
197 register thread_t thread,
198 mach_msg_timeout_t depress_time);
199
200 kern_return_t
201 thread_depress_abort(
202 thread_t thread);
203
204 no_return swtch_continue(void)
205 {
206 register processor_t myprocessor;
207
208 myprocessor = current_processor();
209 thread_syscall_return(
210 #if MACH_IO_BINDING
211 myprocessor->runq.count > 0 ||
212 #endif
213 myprocessor->processor_set->runq.count > 0);
214 /* NOTREACHED */
215 }
216
217 boolean_t swtch(void)
218 {
219 #if NCPUS > 1
220 register processor_t myprocessor;
221
222 myprocessor = current_processor();
223 if (
224 #if MACH_IO_BINDING
225 myprocessor->runq.count == 0 &&
226 #endif
227 myprocessor->processor_set->runq.count == 0)
228 return FALSE;
229 #endif /* NCPUS > 1 */
230
231 counter(c_swtch_block++);
232 thread_block_noreturn(swtch_continue);
233 /*NOTREACHED*/
234 }
235
236 no_return swtch_pri_continue(void)
237 {
238 register thread_t thread = current_thread();
239 register processor_t myprocessor;
240
241 if (thread->cur_policy != thread->sched_policy)
242 (void) thread_depress_abort(thread);
243 myprocessor = current_processor();
244 thread_syscall_return(
245 #if MACH_IO_BINDING
246 myprocessor->runq.count > 0 ||
247 #endif
248 myprocessor->processor_set->runq.count > 0);
249 /*NOTREACHED*/
250 }
251
252 boolean_t swtch_pri(
253 int pri)
254 {
255 register thread_t thread = current_thread();
256 #if NCPUS > 1
257 register processor_t myprocessor;
258 #endif
259
260 #ifdef lint
261 pri++;
262 #endif /* lint */
263
264 #if NCPUS > 1
265 myprocessor = current_processor();
266 if (
267 #if MACH_IO_BINDING
268 myprocessor->runq.count == 0 &&
269 #endif
270 myprocessor->processor_set->runq.count == 0)
271 return FALSE;
272 #endif /* NCPUS > 1 */
273
274 /*
275 * XXX need to think about depression duration.
276 * XXX currently using min quantum.
277 */
278 thread_depress_priority(thread, min_quantum);
279
280 counter(c_swtch_pri_block++);
281 thread_block_noreturn(swtch_pri_continue);
282 /*NOTREACHED*/
283 }
284
285 /*
286 * Data saved for thread_switch_continue.
287 */
288 struct thread_switch_save {
289 thread_t thread;
290 };
291
292 #define SAVE(thread) ((struct thread_switch_save *)&(thread)->saved)
293
294 no_return thread_switch_continue(void)
295 {
296 register thread_t cur_thread = current_thread();
297
298 /*
299 * Restore depressed priority
300 */
301 if (cur_thread->cur_policy != cur_thread->sched_policy) {
302 (void) thread_depress_abort(cur_thread);
303 }
304 thread_deallocate(SAVE(cur_thread)->thread);
305 thread_syscall_return(KERN_SUCCESS);
306 /*NOTREACHED*/
307 }
308
309 no_return thread_switch_idle_continue(void)
310 {
311 register thread_t cur_thread = current_thread();
312 kern_return_t kr;
313
314 /*
315 * Restore depressed priority
316 */
317 if (cur_thread->cur_policy != cur_thread->sched_policy) {
318 kr = KERN_SUCCESS;
319 }
320 else {
321 kr = KERN_ABORTED;
322 }
323 thread_deallocate(SAVE(cur_thread)->thread);
324 thread_syscall_return(kr);
325 /*NOTREACHED*/
326 }
327
328 /*
329 * thread_switch:
330 *
331 * Context switch. User may supply thread hint.
332 *
333 * Fixed priority threads that call this get what they asked for
334 * even if that violates priority order.
335 */
336 kern_return_t thread_switch(
337 mach_port_t thread_name,
338 int option,
339 mach_msg_timeout_t option_time)
340 {
341 register thread_t cur_thread = current_thread();
342 register processor_t myprocessor;
343 ipc_port_t port;
344 continuation_t continuation = thread_switch_continue;
345
346 /*
347 * Process option.
348 */
349 switch (option) {
350 case SWITCH_OPTION_NONE:
351 /*
352 * Nothing to do.
353 */
354 break;
355
356 case SWITCH_OPTION_DEPRESS:
357 /*
358 * Depress priority for given time.
359 */
360 thread_depress_priority(cur_thread, option_time);
361 break;
362
363 case SWITCH_OPTION_IDLE:
364 /*
365 * Depress priority for given time.
366 * Return at idle priority unless depression
367 * aborted or timed out.
368 */
369 thread_depress_priority(cur_thread, option_time);
370 continuation = thread_switch_idle_continue;
371 break;
372
373 case SWITCH_OPTION_WAIT:
374 /*
375 * Sleep for given time.
376 */
377 thread_will_wait_with_timeout(cur_thread, option_time);
378 break;
379
380 default:
381 return KERN_INVALID_ARGUMENT;
382 }
383
384 /*
385 * Check and act on thread hint if appropriate.
386 */
387 if ((thread_name != 0) &&
388 (ipc_port_translate_send(cur_thread->task->itk_space,
389 thread_name, &port) == KERN_SUCCESS)) {
390 /* port is locked, but it might not be active */
391
392 /*
393 * Get corresponding thread.
394 */
395 if (ip_active(port) && (ip_kotype(port) == IKOT_THREAD)) {
396 register thread_t thread;
397 spl_t s;
398
399 thread = (thread_t) port->ip_kobject;
400 /*
401 * Check if the thread is in the right pset. Then
402 * pull it off its run queue. If it
403 * doesn't come, then it's not eligible.
404 */
405 s = splsched();
406 thread_sched_lock(thread);
407 if ((thread->processor_set == cur_thread->processor_set)
408 && (rem_runq(thread) != RUN_QUEUE_HEAD_NULL)) {
409 /*
410 * Hah, got it!!
411 */
412 thread_sched_unlock(thread);
413 splx(s);
414 thread_reference(thread); /* keep it! */
415 ip_unlock(port);
416
417 {
418 extern sched_policy_data_t fp_sched_policy;
419
420 if (thread->sched_policy == &fp_sched_policy) {
421 myprocessor = current_processor();
422 myprocessor->quantum =
423 myprocessor->processor_set->set_quantum;
424 myprocessor->first_quantum = TRUE;
425 }
426 }
427
428 SAVE(cur_thread)->thread = thread;
429 counter(c_thread_switch_handoff++);
430 thread_run_noreturn(continuation, thread);
431 /*NOTREACHED*/
432 }
433 thread_sched_unlock(thread);
434 splx(s);
435 }
436 ip_unlock(port);
437 }
438
439 /*
440 * No handoff hint supplied, or hint was wrong. Call thread_block() in
441 * hopes of running something else. If nothing else is runnable,
442 * thread_block will detect this. WARNING: thread_switch with no
443 * option will not do anything useful if the thread calling it is the
444 * highest priority thread (can easily happen with a collection
445 * of timesharing threads).
446 */
447 #if NCPUS > 1
448 myprocessor = current_processor();
449 if (
450 #if MACH_IO_BINDING
451 myprocessor->runq.count > 0 ||
452 #endif
453 myprocessor->processor_set->runq.count > 0)
454 #endif /* NCPUS > 1 */
455 {
456 SAVE(cur_thread)->thread = THREAD_NULL;
457
458 counter(c_thread_switch_block++);
459 thread_block(continuation);
460 /*NOTREACHED*/
461 }
462
463 if (option == SWITCH_OPTION_IDLE) {
464 /*
465 * Return whether idle or not
466 */
467 return (cur_thread->cur_policy != cur_thread->sched_policy)
468 ? KERN_SUCCESS
469 : KERN_ABORTED;
470 }
471 else {
472 /*
473 * Restore depressed priority
474 */
475 if (cur_thread->cur_policy != cur_thread->sched_policy) {
476 (void) thread_depress_abort(cur_thread);
477 }
478 return KERN_SUCCESS;
479 }
480 }
481
482 void
483 thread_depress_timeout(
484 void *param); /* forward */
485
486 /*
487 * thread_depress_priority
488 *
489 * Depress thread's priority to lowest possible for specified period.
490 * Intended for use when thread wants a lock but doesn't know which
491 * other thread is holding it. As with thread_switch, fixed
492 * priority threads get exactly what they asked for. Users access
493 * this by the SWITCH_OPTION_DEPRESS option to thread_switch. A Time
494 * of zero will result in no timeout being scheduled.
495 */
496 void
497 thread_depress_priority(
498 register thread_t thread,
499 mach_msg_timeout_t depress_time) /* milliseconds */
500 {
501 time_spec_t interval;
502 spl_t s;
503
504 extern struct sched_policy bg_sched_policy;
505
506 milliseconds_to_time_spec(depress_time, interval);
507
508 s = splsched();
509 thread_sched_lock(thread);
510
511 /*
512 * If thread is already depressed, override previous depression.
513 */
514 timer_elt_remove(&thread->timer);
515
516 /*
517 * Set current policy to the background policy. Real
518 * scheduling policy remains as sched_policy.
519 */
520 thread->cur_policy = &bg_sched_policy;
521 thread->policy_index = bg_sched_policy.rank;
522
523 if (time_spec_nonzero(interval)) {
524 thread->timer.te_fcn = thread_depress_timeout;
525 timer_elt_enqueue(&thread->timer, interval, FALSE);
526 }
527
528 thread_sched_unlock(thread);
529 splx(s);
530 }
531
532 /*
533 * thread_depress_timeout:
534 *
535 * Timeout routine for priority depression.
536 */
537 void
538 thread_depress_timeout(
539 void *param)
540 {
541 register thread_t thread = (thread_t) param;
542 spl_t s;
543
544 s = splsched();
545 thread_sched_lock(thread);
546
547 /*
548 * If we lose a race with thread_depress_abort,
549 * then cur_policy might be the same as sched_policy.
550 */
551
552 if (thread->cur_policy != thread->sched_policy) {
553 /*
554 * Remove thread from run queue,
555 * restore old policy,
556 * put thread back on policy`s run queue.
557 */
558 run_queue_head_t runq;
559
560 runq = rem_runq(thread); /* from old policy */
561
562 thread->cur_policy = thread->sched_policy;
563 thread->policy_index = thread->sched_policy->rank;
564
565 if (runq != RUN_QUEUE_HEAD_NULL)
566 thread_setrun(thread, FALSE); /* to new policy */
567 else
568 UPDATE_PRIORITY(thread);
569 }
570
571 thread_sched_unlock(thread);
572 splx(s);
573 }
574
575 /*
576 * thread_depress_abort:
577 *
578 * Prematurely abort priority depression if there is one.
579 */
580 kern_return_t
581 thread_depress_abort(
582 thread_t thread)
583 {
584 spl_t s;
585
586 if (thread == THREAD_NULL)
587 return KERN_INVALID_ARGUMENT;
588
589 s = splsched();
590 thread_sched_lock(thread);
591
592 /*
593 * Only restore priority if thread is depressed.
594 */
595 if (thread->cur_policy != thread->sched_policy) {
596 run_queue_head_t runq;
597
598 runq = rem_runq(thread); /* from old policy */
599
600 timer_elt_remove(&thread->timer);
601
602 thread->cur_policy = thread->sched_policy;
603 thread->policy_index = thread->sched_policy->rank;
604
605 if (runq != RUN_QUEUE_HEAD_NULL)
606 thread_setrun(thread, FALSE); /* to new policy */
607 else
608 UPDATE_PRIORITY(thread);
609 }
610
611 thread_sched_unlock(thread);
612 splx(s);
613 return KERN_SUCCESS;
614 }
Cache object: 7de5af0a1421f50cb493bd1d290f0cd7
|