FreeBSD/Linux Kernel Cross Reference
sys/sched_policy/fp.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: fp.c,v $
29 * Revision 2.2 93/11/17 18:37:18 dbg
30 * Add per-policy scheduling parameters to thread and run queue.
31 * [93/05/10 dbg]
32 *
33 * Moved common operations to kern/run_queues.c.
34 * [93/04/10 dbg]
35 *
36 * Converted to per-policy run queue structure. Merged round-robin
37 * and fifo scheduling: difference is selected per-thread.
38 * [93/04/09 dbg]
39 *
40 * Merged into microkernel mainline.
41 *
42 * Added policy_init()
43 * [92/06/01 savage]
44 * Created from kern/sched_prim.c and tatsuo's sched_misc.c
45 * [92/02/01 savage]
46 *
47 */
48
49 /*
50 * Fixed priority scheduling.
51 */
52 #include <cpus.h>
53 #include <mach_host.h>
54 #include <mach_kdb.h>
55 #include <mach_io_binding.h>
56
57 #include <mach/boolean.h>
58 #include <mach/kern_return.h>
59
60 #include <kern/ast.h>
61 #include <kern/kalloc.h>
62 #include <kern/kern_io.h>
63 #include <kern/run_queues.h>
64 #include <kern/sched_policy.h>
65 #include <kern/processor.h>
66 #include <kern/thread.h>
67
68 #include <sched_policy/standard.h>
69
70 /*
71 * Fixed-priority policy.
72 * A thread may select itself to be preemptable (round-robin)
73 * or not (first-come-first-served).
74 */
75
76 /*
77 * Uses 32 run queues, with a low marker.
78 */
79 #define MAX_PRIORITY 31
80
81 #define NRQS (MAX_PRIORITY + 1)
82 struct fp_run_queue {
83 struct run_queue rq; /* common structure */
84 queue_head_t fp_queue[NRQS];
85 int fp_low;
86 int fp_max_priority;
87 };
88 typedef struct fp_run_queue * fp_run_queue_t;
89
90 #define fp_runq(rq) ((struct fp_run_queue *)(rq))
91 #define fp_count rq.rq_count
92
93 /*
94 * Per-thread scheduling fields:
95 */
96 struct fp_sched_data {
97 int max_priority;
98 int base_priority;
99 boolean_t no_preempt;
100 };
101
102 #define fp_sched(th) ((struct fp_sched_data *)&(th)->sched_data)
103
104 extern struct sched_policy fp_sched_policy; /* forward */
105
106 /*
107 * Choose thread.
108 */
109 thread_t
110 fp_thread_dequeue(
111 run_queue_t runq)
112 {
113 fp_run_queue_t rq = fp_runq(runq);
114 int low;
115 queue_t q;
116 queue_entry_t elt;
117 processor_t processor;
118
119 assert(rq->fp_count > 0);
120
121 /*
122 * Low marker is kept accurate.
123 */
124 low = rq->fp_low;
125 q = &rq->fp_queue[low];
126 assert(!queue_empty(q));
127
128 dequeue_head_macro(q, elt);
129
130 if (--rq->fp_count > 0) {
131 /*
132 * Update low marker.
133 */
134 while (queue_empty(q)) {
135 low++;
136 q++;
137 }
138 rq->fp_low = low;
139 }
140
141 processor = current_processor();
142 processor->quantum = processor->processor_set->set_quantum;
143 processor->first_quantum = TRUE;
144
145 return (thread_t) elt;
146 }
147
148 /*
149 * Put a thread onto a run queue in priority order.
150 * Return whether it can preempt the current thread.
151 */
152 boolean_t fp_thread_enqueue(
153 run_queue_t runq,
154 thread_t thread,
155 boolean_t may_preempt)
156 {
157 register fp_run_queue_t rq;
158 register unsigned int whichq;
159 register queue_t q;
160
161 whichq = fp_sched(thread)->base_priority;
162 if (whichq >= NRQS) {
163 printf("fp_thread_setrun: pri too high (%d)\n", whichq);
164 whichq = NRQS - 1;
165 }
166
167 rq = fp_runq(runq);
168
169 q = &rq->fp_queue[whichq];
170 enqueue_tail_macro(q, (queue_entry_t) thread);
171
172 if (whichq < rq->fp_low || rq->fp_count == 0)
173 rq->fp_low = whichq; /* minimize */
174 rq->fp_count++;
175
176 if (!may_preempt)
177 return FALSE;
178
179 {
180 thread_t cth = current_thread();
181
182 return fp_sched(cth)->base_priority > fp_sched(thread)->base_priority
183 || (fp_sched(cth)->base_priority == fp_sched(thread)->base_priority
184 && !fp_sched(cth)->no_preempt);
185 }
186 }
187
188 /*
189 * Remove a thread from the run queue.
190 */
191 void fp_thread_remqueue(
192 run_queue_t runq,
193 thread_t thread)
194 {
195 fp_run_queue_t rq = fp_runq(runq);
196
197 remqueue(&rq->fp_queue[0], (queue_entry_t) thread);
198 rq->fp_count--;
199 }
200
201 boolean_t fp_csw_needed(
202 run_queue_t runq,
203 thread_t thread)
204 {
205 fp_run_queue_t rq = fp_runq(runq);
206
207 if (rq->fp_low < fp_sched(thread)->base_priority)
208 return TRUE;
209
210 if (rq->fp_low > fp_sched(thread)->base_priority)
211 return FALSE;
212
213 /* can we be preempted by an equal-priority thread? */
214
215 if (fp_sched(thread)->no_preempt)
216 return FALSE;
217
218 if (current_processor()->first_quantum)
219 return FALSE;
220
221 return TRUE;
222 }
223
224 /*
225 * Set scheduling limit value for processor set.
226 */
227 kern_return_t
228 fp_runq_set_limit(
229 run_queue_t runq,
230 policy_param_t limit,
231 natural_t count)
232 {
233 int max_priority;
234
235 if (count == 0) {
236 /*
237 * Default value for max priority.
238 * Use 0 for fixed priority.
239 */
240 max_priority = 0;
241 }
242 else {
243 if (count < POLICY_PARAM_FIXEDPRI_COUNT)
244 return KERN_INVALID_ARGUMENT;
245
246 max_priority = ((struct policy_param_fixedpri *)limit)->priority;
247 if (max_priority < 0 || max_priority > MAX_PRIORITY)
248 return KERN_INVALID_ARGUMENT;
249 }
250
251 fp_runq(runq)->fp_max_priority = max_priority;
252 return KERN_SUCCESS;
253 }
254
255 /*
256 * Get scheduling limit value for processor set.
257 */
258 kern_return_t
259 fp_runq_get_limit(
260 run_queue_t runq,
261 policy_param_t limit,
262 natural_t *count)
263 {
264 if (*count < POLICY_PARAM_FIXEDPRI_COUNT)
265 return KERN_INVALID_ARGUMENT;
266
267 ((struct policy_param_fixedpri *)limit)->priority
268 = fp_runq(runq)->fp_max_priority;
269 ((struct policy_param_fixedpri *)limit)->no_preempt = TRUE;
270 *count = POLICY_PARAM_FIXEDPRI_COUNT;
271 return KERN_SUCCESS;
272 }
273
274 kern_return_t
275 fp_thread_set_limit(
276 thread_t thread,
277 policy_param_t limit,
278 natural_t count)
279 {
280 int max_priority;
281
282 if (count < POLICY_PARAM_FIXEDPRI_COUNT)
283 return KERN_INVALID_ARGUMENT;
284
285 max_priority = ((struct policy_param_fixedpri *)limit)->priority;
286 if (max_priority < 0 || max_priority > MAX_PRIORITY)
287 return KERN_INVALID_ARGUMENT;
288
289 fp_sched(thread)->max_priority = max_priority;
290 return KERN_SUCCESS;
291 }
292
293 /*
294 * Set the scheduling parameters for a thread. If they
295 * are not supplied, the thread`s current parameters
296 * are used if 'new_policy' is FALSE; otherwise, per-
297 * policy defaults are used. If 'new_policy' is FALSE,
298 * limit values are taken from the thread`s processor
299 * set; otherwise, the thread`s current limit values
300 * are used. If 'check_limits' is TRUE, returns an
301 * error if parameter values violate the limits; otherwise,
302 * the limits are silently enforced.
303 */
304 kern_return_t
305 fp_thread_set_param(
306 thread_t thread,
307 policy_param_t param,
308 natural_t count,
309 boolean_t new_policy,
310 boolean_t check_limits)
311 {
312 int base_priority;
313 int max_priority;
314 int pset_max_priority;
315 boolean_t no_preempt;
316
317 pset_max_priority =
318 fp_runq(thread->processor_set->runq.runqs[fp_sched_policy.rank])
319 ->fp_max_priority;
320
321 if (new_policy) {
322 /*
323 * Thread is not already running this policy.
324 * Use default values for parameters and limit.
325 */
326 base_priority = FP_BASEPRI_USER;
327 max_priority = pset_max_priority;
328 no_preempt = FALSE;
329 }
330 else {
331 /*
332 * Thread is already running policy. Use
333 * thread`s current values.
334 */
335 base_priority = fp_sched(thread)->base_priority;
336 max_priority = fp_sched(thread)->max_priority;
337 no_preempt = fp_sched(thread)->no_preempt;
338 }
339
340 if (count != 0) {
341 /*
342 * Data supplied: use it for scheduling parameters.
343 */
344 struct policy_param_fixedpri *pd;
345
346 if (count < POLICY_PARAM_FIXEDPRI_COUNT)
347 return KERN_INVALID_ARGUMENT;
348
349 pd = (struct policy_param_fixedpri *)param;
350
351 base_priority = pd->priority;
352 if (base_priority < 0 || base_priority > MAX_PRIORITY)
353 return KERN_INVALID_ARGUMENT;
354 no_preempt = (pd->no_preempt != 0);
355 }
356
357 if (check_limits) {
358 /*
359 * Error if parameters violate limits.
360 */
361 if (base_priority < max_priority)
362 return KERN_FAILURE;
363 }
364 else {
365 /*
366 * Validate current (or default)
367 * parameters against limits.
368 */
369 if (max_priority < pset_max_priority)
370 max_priority = pset_max_priority;
371 if (base_priority < max_priority)
372 base_priority = max_priority;
373 }
374
375 fp_sched(thread)->base_priority = base_priority;
376 fp_sched(thread)->max_priority = max_priority;
377 fp_sched(thread)->no_preempt = no_preempt;
378
379 return KERN_SUCCESS;
380 }
381
382 kern_return_t
383 fp_thread_get_param(
384 thread_t thread,
385 policy_param_t param, /* params and limits */
386 natural_t *count)
387 {
388 struct policy_info_fixedpri *pi;
389
390 if (*count < POLICY_INFO_FIXEDPRI_COUNT)
391 return KERN_INVALID_ARGUMENT;
392
393 pi = (struct policy_info_fixedpri *)param;
394
395 pi->priority = fp_sched(thread)->base_priority;
396 pi->no_preempt = fp_sched(thread)->no_preempt;
397 pi->max_priority= fp_sched(thread)->max_priority;
398 *count = POLICY_INFO_FIXEDPRI_COUNT;
399
400 return KERN_SUCCESS;
401 }
402
403 /*
404 * Set the default scheduling parameters for a task,
405 * to be used for newly created threads.
406 */
407 kern_return_t
408 fp_task_set_param(
409 task_t task,
410 policy_param_t param,
411 natural_t count)
412 {
413 int priority;
414 boolean_t no_preempt;
415
416 if (count == 0) {
417 /*
418 * Use default value for priority.
419 */
420 priority = FP_BASEPRI_USER;
421 no_preempt = FALSE;
422 }
423 else {
424 struct policy_param_fixedpri *pd;
425
426 if (count < POLICY_PARAM_FIXEDPRI_COUNT)
427 return KERN_INVALID_ARGUMENT;
428
429 pd = (struct policy_param_fixedpri *)param;
430
431 priority = pd->priority;
432 if (priority < 0 || priority > MAX_PRIORITY)
433 return KERN_INVALID_ARGUMENT;
434 no_preempt = (pd->no_preempt != 0);
435 }
436
437 ((struct policy_param_fixedpri *)&task->sched_data)
438 ->priority = priority;
439 ((struct policy_param_fixedpri *)&task->sched_data)
440 ->no_preempt = no_preempt;
441 task->sched_data_count = POLICY_PARAM_FIXEDPRI_COUNT;
442
443 return KERN_SUCCESS;
444 }
445
446 /*
447 * Get the default scheduling parameters for a task.
448 */
449 kern_return_t
450 fp_task_get_param(
451 task_t task,
452 policy_param_t param,
453 natural_t *count)
454 {
455 struct policy_param_fixedpri *pi;
456
457 if (*count < POLICY_PARAM_FIXEDPRI_COUNT)
458 return KERN_INVALID_ARGUMENT;
459
460 pi = (struct policy_param_fixedpri *)param;
461
462 pi->priority =
463 ((struct policy_param_fixedpri *)&task->sched_data)->priority;
464 pi->no_preempt =
465 ((struct policy_param_fixedpri *)&task->sched_data)->no_preempt;
466 *count = POLICY_PARAM_FIXEDPRI_COUNT;
467
468 return KERN_SUCCESS;
469 }
470
471
472 run_queue_t
473 fp_run_queue_alloc(void)
474 {
475 fp_run_queue_t rq;
476 int i;
477
478 rq = (fp_run_queue_t) kalloc(sizeof(struct fp_run_queue));
479
480 run_queue_init(&rq->rq, &fp_sched_policy);
481
482 for (i = 0; i < NRQS; i++)
483 queue_init(&rq->fp_queue[i]);
484 rq->fp_low = NRQS;
485
486 return &rq->rq;
487 }
488
489 void
490 fp_run_queue_free(
491 run_queue_t runq)
492 {
493 kfree((vm_offset_t) runq, sizeof(struct fp_run_queue));
494 }
495
496 #if MACH_KDB
497 #include <ddb/db_output.h>
498 void
499 fp_thread_db_print(
500 thread_t thread)
501 {
502 db_printf("%4s %2d",
503 (fp_sched(thread)->no_preempt) ? "FIFO" : "RR",
504 fp_sched(thread)->base_priority);
505 }
506 #endif
507
508 /*
509 * Statically allocated policy structure.
510 */
511 struct sched_policy fp_sched_policy = {
512 {
513 /* sched_ops */
514 fp_thread_dequeue,
515 fp_thread_enqueue,
516 fp_thread_remqueue,
517
518 fp_csw_needed,
519 ast_check,
520 0, /* no update_priority */
521
522 fp_run_queue_alloc,
523 fp_run_queue_free,
524
525 fp_runq_set_limit,
526 fp_runq_get_limit,
527 fp_thread_set_limit,
528 fp_thread_set_param,
529 fp_thread_get_param,
530 fp_task_set_param,
531 fp_task_get_param,
532
533 fp_thread_db_print
534 },
535 POLICY_FIXEDPRI,
536 "fixed priority"
537 };
538
Cache object: 83cb5d5bca84e2f8572812db4276ea4a
|