FreeBSD/Linux Kernel Cross Reference
sys/kernel/proc.c
1 /* This file contains essentially all of the process and message handling.
2 * Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
3 * There is one entry point from the outside:
4 *
5 * sys_call: a system call, i.e., the kernel is trapped with an INT
6 *
7 * As well as several entry points used from the interrupt and task level:
8 *
9 * lock_notify: notify a process of a system event
10 * lock_send: send a message to a process
11 * lock_enqueue: put a process on one of the scheduling queues
12 * lock_dequeue: remove a process from the scheduling queues
13 *
14 * Changes:
15 * Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
16 * Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
17 * May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
18 * May 24, 2005 new notification system call (Jorrit N. Herder)
19 * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
20 *
21 * The code here is critical to make everything work and is important for the
22 * overall performance of the system. A large fraction of the code deals with
23 * list manipulation. To make this both easy to understand and fast to execute
24 * pointer pointers are used throughout the code. Pointer pointers prevent
25 * exceptions for the head or tail of a linked list.
26 *
27 * node_t *queue, *new_node; // assume these as global variables
28 * node_t **xpp = &queue; // get pointer pointer to head of queue
29 * while (*xpp != NULL) // find last pointer of the linked list
30 * xpp = &(*xpp)->next; // get pointer to next pointer
31 * *xpp = new_node; // now replace the end (the NULL pointer)
32 * new_node->next = NULL; // and mark the new end of the list
33 *
34 * For example, when adding a new node to the end of the list, one normally
35 * makes an exception for an empty list and looks up the end of the list for
36 * nonempty lists. As shown above, this is not required with pointer pointers.
37 */
38
39 #include <minix/com.h>
40 #include <minix/callnr.h>
41 #include "kernel.h"
42 #include "proc.h"
43
44 /* Scheduling and message passing functions. The functions are available to
45 * other parts of the kernel through lock_...(). The lock temporarily disables
46 * interrupts to prevent race conditions.
47 */
48 FORWARD _PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dst,
49 message *m_ptr, unsigned flags));
50 FORWARD _PROTOTYPE( int mini_receive, (struct proc *caller_ptr, int src,
51 message *m_ptr, unsigned flags));
52 FORWARD _PROTOTYPE( int mini_notify, (struct proc *caller_ptr, int dst));
53 FORWARD _PROTOTYPE( int deadlock, (int function,
54 register struct proc *caller, int src_dst));
55 FORWARD _PROTOTYPE( void enqueue, (struct proc *rp));
56 FORWARD _PROTOTYPE( void dequeue, (struct proc *rp));
57 FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front));
58 FORWARD _PROTOTYPE( void pick_proc, (void));
59
60 #define BuildMess(m_ptr, src, dst_ptr) \
61 (m_ptr)->m_source = (src); \
62 (m_ptr)->m_type = NOTIFY_FROM(src); \
63 (m_ptr)->NOTIFY_TIMESTAMP = get_uptime(); \
64 switch (src) { \
65 case HARDWARE: \
66 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_int_pending; \
67 priv(dst_ptr)->s_int_pending = 0; \
68 break; \
69 case SYSTEM: \
70 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_sig_pending; \
71 priv(dst_ptr)->s_sig_pending = 0; \
72 break; \
73 }
74
75 #if (CHIP == INTEL)
76 #define CopyMess(s,sp,sm,dp,dm) \
77 cp_mess(s, (sp)->p_memmap[D].mem_phys, \
78 (vir_bytes)sm, (dp)->p_memmap[D].mem_phys, (vir_bytes)dm)
79 #endif /* (CHIP == INTEL) */
80
81 #if (CHIP == M68000)
82 /* M68000 does not have cp_mess() in assembly like INTEL. Declare prototype
83 * for cp_mess() here and define the function below. Also define CopyMess.
84 */
85 #endif /* (CHIP == M68000) */
86
87 /*===========================================================================*
88 * sys_call *
89 *===========================================================================*/
90 PUBLIC int sys_call(call_nr, src_dst, m_ptr)
91 int call_nr; /* system call number and flags */
92 int src_dst; /* src to receive from or dst to send to */
93 message *m_ptr; /* pointer to message in the caller's space */
94 {
95 /* System calls are done by trapping to the kernel with an INT instruction.
96 * The trap is caught and sys_call() is called to send or receive a message
97 * (or both). The caller is always given by 'proc_ptr'.
98 */
99 register struct proc *caller_ptr = proc_ptr; /* get pointer to caller */
100 int function = call_nr & SYSCALL_FUNC; /* get system call function */
101 unsigned flags = call_nr & SYSCALL_FLAGS; /* get flags */
102 int mask_entry; /* bit to check in send mask */
103 int group_size; /* used for deadlock check */
104 int result; /* the system call's result */
105 vir_clicks vlo, vhi; /* virtual clicks containing message to send */
106
107 /* Check if the process has privileges for the requested call. Calls to the
108 * kernel may only be SENDREC, because tasks always reply and may not block
109 * if the caller doesn't do receive().
110 */
111 if (! (priv(caller_ptr)->s_trap_mask & (1 << function)) ||
112 (iskerneln(src_dst) && function != SENDREC
113 && function != RECEIVE)) {
114 #if DEBUG_ENABLE_IPC_WARNINGS
115 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
116 function, proc_nr(caller_ptr), src_dst);
117 #endif
118 return(ETRAPDENIED); /* trap denied by mask or kernel */
119 }
120
121 /* Require a valid source and/ or destination process, unless echoing. */
122 if (src_dst != ANY && function != ECHO) {
123 if (! isokprocn(src_dst)) {
124 #if DEBUG_ENABLE_IPC_WARNINGS
125 kprintf("sys_call: invalid src_dst, src_dst %d, caller %d\n",
126 src_dst, proc_nr(caller_ptr));
127 #endif
128 return(EBADSRCDST); /* invalid process number */
129 }
130 if (isemptyn(src_dst)) {
131 #if DEBUG_ENABLE_IPC_WARNINGS
132 kprintf("sys_call: dead src_dst; trap %d, from %d, to %d\n",
133 function, proc_nr(caller_ptr), src_dst);
134 #endif
135 return(EDEADSRCDST);
136 }
137 }
138
139 /* If the call involves a message buffer, i.e., for SEND, RECEIVE, SENDREC,
140 * or ECHO, check the message pointer. This check allows a message to be
141 * anywhere in data or stack or gap. It will have to be made more elaborate
142 * for machines which don't have the gap mapped.
143 */
144 if (function & CHECK_PTR) {
145 vlo = (vir_bytes) m_ptr >> CLICK_SHIFT;
146 vhi = ((vir_bytes) m_ptr + MESS_SIZE - 1) >> CLICK_SHIFT;
147 if (vlo < caller_ptr->p_memmap[D].mem_vir || vlo > vhi ||
148 vhi >= caller_ptr->p_memmap[S].mem_vir +
149 caller_ptr->p_memmap[S].mem_len) {
150 #if DEBUG_ENABLE_IPC_WARNINGS
151 kprintf("sys_call: invalid message pointer, trap %d, caller %d\n",
152 function, proc_nr(caller_ptr));
153 #endif
154 return(EFAULT); /* invalid message pointer */
155 }
156 }
157
158 /* If the call is to send to a process, i.e., for SEND, SENDREC or NOTIFY,
159 * verify that the caller is allowed to send to the given destination.
160 */
161 if (function & CHECK_DST) {
162 if (! get_sys_bit(priv(caller_ptr)->s_ipc_to, nr_to_id(src_dst))) {
163 #if DEBUG_ENABLE_IPC_WARNINGS
164 kprintf("sys_call: ipc mask denied trap %d from %d to %d\n",
165 function, proc_nr(caller_ptr), src_dst);
166 #endif
167 return(ECALLDENIED); /* call denied by ipc mask */
168 }
169 }
170
171 /* Check for a possible deadlock for blocking SEND(REC) and RECEIVE. */
172 if (function & CHECK_DEADLOCK) {
173 if (group_size = deadlock(function, caller_ptr, src_dst)) {
174 #if DEBUG_ENABLE_IPC_WARNINGS
175 kprintf("sys_call: trap %d from %d to %d deadlocked, group size %d\n",
176 function, proc_nr(caller_ptr), src_dst, group_size);
177 #endif
178 return(ELOCKED);
179 }
180 }
181
182 /* Now check if the call is known and try to perform the request. The only
183 * system calls that exist in MINIX are sending and receiving messages.
184 * - SENDREC: combines SEND and RECEIVE in a single system call
185 * - SEND: sender blocks until its message has been delivered
186 * - RECEIVE: receiver blocks until an acceptable message has arrived
187 * - NOTIFY: nonblocking call; deliver notification or mark pending
188 * - ECHO: nonblocking call; directly echo back the message
189 */
190 switch(function) {
191 case SENDREC:
192 /* A flag is set so that notifications cannot interrupt SENDREC. */
193 priv(caller_ptr)->s_flags |= SENDREC_BUSY;
194 /* fall through */
195 case SEND:
196 result = mini_send(caller_ptr, src_dst, m_ptr, flags);
197 if (function == SEND || result != OK) {
198 break; /* done, or SEND failed */
199 } /* fall through for SENDREC */
200 case RECEIVE:
201 if (function == RECEIVE)
202 priv(caller_ptr)->s_flags &= ~SENDREC_BUSY;
203 result = mini_receive(caller_ptr, src_dst, m_ptr, flags);
204 break;
205 case NOTIFY:
206 result = mini_notify(caller_ptr, src_dst);
207 break;
208 case ECHO:
209 CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, caller_ptr, m_ptr);
210 result = OK;
211 break;
212 default:
213 result = EBADCALL; /* illegal system call */
214 }
215
216 /* Now, return the result of the system call to the caller. */
217 return(result);
218 }
219
220 /*===========================================================================*
221 * deadlock *
222 *===========================================================================*/
223 PRIVATE int deadlock(function, cp, src_dst)
224 int function; /* trap number */
225 register struct proc *cp; /* pointer to caller */
226 register int src_dst; /* src or dst process */
227 {
228 /* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
229 * a cyclic dependency of blocking send and receive calls. The only cyclic
230 * depency that is not fatal is if the caller and target directly SEND(REC)
231 * and RECEIVE to each other. If a deadlock is found, the group size is
232 * returned. Otherwise zero is returned.
233 */
234 register struct proc *xp; /* process pointer */
235 int group_size = 1; /* start with only caller */
236 int trap_flags;
237
238 while (src_dst != ANY) { /* check while process nr */
239 xp = proc_addr(src_dst); /* follow chain of processes */
240 group_size ++; /* extra process in group */
241
242 /* Check whether the last process in the chain has a depency. If it
243 * has not, the cycle cannot be closed and we are done.
244 */
245 if (xp->p_rts_flags & RECEIVING) { /* xp has dependency */
246 src_dst = xp->p_getfrom; /* get xp's source */
247 } else if (xp->p_rts_flags & SENDING) { /* xp has dependency */
248 src_dst = xp->p_sendto; /* get xp's destination */
249 } else {
250 return(0); /* not a deadlock */
251 }
252
253 /* Now check if there is a cyclic dependency. For group sizes of two,
254 * a combination of SEND(REC) and RECEIVE is not fatal. Larger groups
255 * or other combinations indicate a deadlock.
256 */
257 if (src_dst == proc_nr(cp)) { /* possible deadlock */
258 if (group_size == 2) { /* caller and src_dst */
259 /* The function number is magically converted to flags. */
260 if ((xp->p_rts_flags ^ (function << 2)) & SENDING) {
261 return(0); /* not a deadlock */
262 }
263 }
264 return(group_size); /* deadlock found */
265 }
266 }
267 return(0); /* not a deadlock */
268 }
269
270 /*===========================================================================*
271 * mini_send *
272 *===========================================================================*/
273 PRIVATE int mini_send(caller_ptr, dst, m_ptr, flags)
274 register struct proc *caller_ptr; /* who is trying to send a message? */
275 int dst; /* to whom is message being sent? */
276 message *m_ptr; /* pointer to message buffer */
277 unsigned flags; /* system call flags */
278 {
279 /* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
280 * for this message, copy the message to it and unblock 'dst'. If 'dst' is
281 * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
282 */
283 register struct proc *dst_ptr = proc_addr(dst);
284 register struct proc **xpp;
285
286 /* Check if 'dst' is blocked waiting for this message. The destination's
287 * SENDING flag may be set when its SENDREC call blocked while sending.
288 */
289 if ( (dst_ptr->p_rts_flags & (RECEIVING | SENDING)) == RECEIVING &&
290 (dst_ptr->p_getfrom == ANY || dst_ptr->p_getfrom == caller_ptr->p_nr)) {
291 /* Destination is indeed waiting for this message. */
292 CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
293 dst_ptr->p_messbuf);
294 if ((dst_ptr->p_rts_flags &= ~RECEIVING) == 0) enqueue(dst_ptr);
295 } else if ( ! (flags & NON_BLOCKING)) {
296 /* Destination is not waiting. Block and dequeue caller. */
297 caller_ptr->p_messbuf = m_ptr;
298 if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
299 caller_ptr->p_rts_flags |= SENDING;
300 caller_ptr->p_sendto = dst;
301
302 /* Process is now blocked. Put in on the destination's queue. */
303 xpp = &dst_ptr->p_caller_q; /* find end of list */
304 while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link;
305 *xpp = caller_ptr; /* add caller to end */
306 caller_ptr->p_q_link = NIL_PROC; /* mark new end of list */
307 } else {
308 return(ENOTREADY);
309 }
310 return(OK);
311 }
312
313 /*===========================================================================*
314 * mini_receive *
315 *===========================================================================*/
316 PRIVATE int mini_receive(caller_ptr, src, m_ptr, flags)
317 register struct proc *caller_ptr; /* process trying to get message */
318 int src; /* which message source is wanted */
319 message *m_ptr; /* pointer to message buffer */
320 unsigned flags; /* system call flags */
321 {
322 /* A process or task wants to get a message. If a message is already queued,
323 * acquire it and deblock the sender. If no message from the desired source
324 * is available block the caller, unless the flags don't allow blocking.
325 */
326 register struct proc **xpp;
327 register struct notification **ntf_q_pp;
328 message m;
329 int bit_nr;
330 sys_map_t *map;
331 bitchunk_t *chunk;
332 int i, src_id, src_proc_nr;
333
334 /* Check to see if a message from desired source is already available.
335 * The caller's SENDING flag may be set if SENDREC couldn't send. If it is
336 * set, the process should be blocked.
337 */
338 if (!(caller_ptr->p_rts_flags & SENDING)) {
339
340 /* Check if there are pending notifications, except for SENDREC. */
341 if (! (priv(caller_ptr)->s_flags & SENDREC_BUSY)) {
342
343 map = &priv(caller_ptr)->s_notify_pending;
344 for (chunk=&map->chunk[0]; chunk<&map->chunk[NR_SYS_CHUNKS]; chunk++) {
345
346 /* Find a pending notification from the requested source. */
347 if (! *chunk) continue; /* no bits in chunk */
348 for (i=0; ! (*chunk & (1<<i)); ++i) {} /* look up the bit */
349 src_id = (chunk - &map->chunk[0]) * BITCHUNK_BITS + i;
350 if (src_id >= NR_SYS_PROCS) break; /* out of range */
351 src_proc_nr = id_to_nr(src_id); /* get source proc */
352 #if DEBUG_ENABLE_IPC_WARNINGS
353 if(src_proc_nr == NONE) {
354 kprintf("mini_receive: sending notify from NONE\n");
355 }
356 #endif
357 if (src!=ANY && src!=src_proc_nr) continue; /* source not ok */
358 *chunk &= ~(1 << i); /* no longer pending */
359
360 /* Found a suitable source, deliver the notification message. */
361 BuildMess(&m, src_proc_nr, caller_ptr); /* assemble message */
362 CopyMess(src_proc_nr, proc_addr(HARDWARE), &m, caller_ptr, m_ptr);
363 return(OK); /* report success */
364 }
365 }
366
367 /* Check caller queue. Use pointer pointers to keep code simple. */
368 xpp = &caller_ptr->p_caller_q;
369 while (*xpp != NIL_PROC) {
370 if (src == ANY || src == proc_nr(*xpp)) {
371 /* Found acceptable message. Copy it and update status. */
372 CopyMess((*xpp)->p_nr, *xpp, (*xpp)->p_messbuf, caller_ptr, m_ptr);
373 if (((*xpp)->p_rts_flags &= ~SENDING) == 0) enqueue(*xpp);
374 *xpp = (*xpp)->p_q_link; /* remove from queue */
375 return(OK); /* report success */
376 }
377 xpp = &(*xpp)->p_q_link; /* proceed to next */
378 }
379 }
380
381 /* No suitable message is available or the caller couldn't send in SENDREC.
382 * Block the process trying to receive, unless the flags tell otherwise.
383 */
384 if ( ! (flags & NON_BLOCKING)) {
385 caller_ptr->p_getfrom = src;
386 caller_ptr->p_messbuf = m_ptr;
387 if (caller_ptr->p_rts_flags == 0) dequeue(caller_ptr);
388 caller_ptr->p_rts_flags |= RECEIVING;
389 return(OK);
390 } else {
391 return(ENOTREADY);
392 }
393 }
394
395 /*===========================================================================*
396 * mini_notify *
397 *===========================================================================*/
398 PRIVATE int mini_notify(caller_ptr, dst)
399 register struct proc *caller_ptr; /* sender of the notification */
400 int dst; /* which process to notify */
401 {
402 register struct proc *dst_ptr = proc_addr(dst);
403 int src_id; /* source id for late delivery */
404 message m; /* the notification message */
405
406 /* Check to see if target is blocked waiting for this message. A process
407 * can be both sending and receiving during a SENDREC system call.
408 */
409 if ((dst_ptr->p_rts_flags & (RECEIVING|SENDING)) == RECEIVING &&
410 ! (priv(dst_ptr)->s_flags & SENDREC_BUSY) &&
411 (dst_ptr->p_getfrom == ANY || dst_ptr->p_getfrom == caller_ptr->p_nr)) {
412
413 /* Destination is indeed waiting for a message. Assemble a notification
414 * message and deliver it. Copy from pseudo-source HARDWARE, since the
415 * message is in the kernel's address space.
416 */
417 BuildMess(&m, proc_nr(caller_ptr), dst_ptr);
418 CopyMess(proc_nr(caller_ptr), proc_addr(HARDWARE), &m,
419 dst_ptr, dst_ptr->p_messbuf);
420 dst_ptr->p_rts_flags &= ~RECEIVING; /* deblock destination */
421 if (dst_ptr->p_rts_flags == 0) enqueue(dst_ptr);
422 return(OK);
423 }
424
425 /* Destination is not ready to receive the notification. Add it to the
426 * bit map with pending notifications. Note the indirectness: the system id
427 * instead of the process number is used in the pending bit map.
428 */
429 src_id = priv(caller_ptr)->s_id;
430 set_sys_bit(priv(dst_ptr)->s_notify_pending, src_id);
431 return(OK);
432 }
433
434 /*===========================================================================*
435 * lock_notify *
436 *===========================================================================*/
437 PUBLIC int lock_notify(src, dst)
438 int src; /* sender of the notification */
439 int dst; /* who is to be notified */
440 {
441 /* Safe gateway to mini_notify() for tasks and interrupt handlers. The sender
442 * is explicitely given to prevent confusion where the call comes from. MINIX
443 * kernel is not reentrant, which means to interrupts are disabled after
444 * the first kernel entry (hardware interrupt, trap, or exception). Locking
445 * is done by temporarily disabling interrupts.
446 */
447 int result;
448
449 /* Exception or interrupt occurred, thus already locked. */
450 if (k_reenter >= 0) {
451 result = mini_notify(proc_addr(src), dst);
452 }
453
454 /* Call from task level, locking is required. */
455 else {
456 lock(0, "notify");
457 result = mini_notify(proc_addr(src), dst);
458 unlock(0);
459 }
460 return(result);
461 }
462
463 /*===========================================================================*
464 * enqueue *
465 *===========================================================================*/
466 PRIVATE void enqueue(rp)
467 register struct proc *rp; /* this process is now runnable */
468 {
469 /* Add 'rp' to one of the queues of runnable processes. This function is
470 * responsible for inserting a process into one of the scheduling queues.
471 * The mechanism is implemented here. The actual scheduling policy is
472 * defined in sched() and pick_proc().
473 */
474 int q; /* scheduling queue to use */
475 int front; /* add to front or back */
476
477 #if DEBUG_SCHED_CHECK
478 check_runqueues("enqueue");
479 if (rp->p_ready) kprintf("enqueue() already ready process\n");
480 #endif
481
482 /* Determine where to insert to process. */
483 sched(rp, &q, &front);
484
485 /* Now add the process to the queue. */
486 if (rdy_head[q] == NIL_PROC) { /* add to empty queue */
487 rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
488 rp->p_nextready = NIL_PROC; /* mark new end */
489 }
490 else if (front) { /* add to head of queue */
491 rp->p_nextready = rdy_head[q]; /* chain head of queue */
492 rdy_head[q] = rp; /* set new queue head */
493 }
494 else { /* add to tail of queue */
495 rdy_tail[q]->p_nextready = rp; /* chain tail of queue */
496 rdy_tail[q] = rp; /* set new queue tail */
497 rp->p_nextready = NIL_PROC; /* mark new end */
498 }
499
500 /* Now select the next process to run. */
501 pick_proc();
502
503 #if DEBUG_SCHED_CHECK
504 rp->p_ready = 1;
505 check_runqueues("enqueue");
506 #endif
507 }
508
509 /*===========================================================================*
510 * dequeue *
511 *===========================================================================*/
512 PRIVATE void dequeue(rp)
513 register struct proc *rp; /* this process is no longer runnable */
514 {
515 /* A process must be removed from the scheduling queues, for example, because
516 * it has blocked. If the currently active process is removed, a new process
517 * is picked to run by calling pick_proc().
518 */
519 register int q = rp->p_priority; /* queue to use */
520 register struct proc **xpp; /* iterate over queue */
521 register struct proc *prev_xp;
522
523 /* Side-effect for kernel: check if the task's stack still is ok? */
524 if (iskernelp(rp)) {
525 if (*priv(rp)->s_stack_guard != STACK_GUARD)
526 panic("stack overrun by task", proc_nr(rp));
527 }
528
529 #if DEBUG_SCHED_CHECK
530 check_runqueues("dequeue");
531 if (! rp->p_ready) kprintf("dequeue() already unready process\n");
532 #endif
533
534 /* Now make sure that the process is not in its ready queue. Remove the
535 * process if it is found. A process can be made unready even if it is not
536 * running by being sent a signal that kills it.
537 */
538 prev_xp = NIL_PROC;
539 for (xpp = &rdy_head[q]; *xpp != NIL_PROC; xpp = &(*xpp)->p_nextready) {
540
541 if (*xpp == rp) { /* found process to remove */
542 *xpp = (*xpp)->p_nextready; /* replace with next chain */
543 if (rp == rdy_tail[q]) /* queue tail removed */
544 rdy_tail[q] = prev_xp; /* set new tail */
545 if (rp == proc_ptr || rp == next_ptr) /* active process removed */
546 pick_proc(); /* pick new process to run */
547 break;
548 }
549 prev_xp = *xpp; /* save previous in chain */
550 }
551
552 #if DEBUG_SCHED_CHECK
553 rp->p_ready = 0;
554 check_runqueues("dequeue");
555 #endif
556 }
557
558 /*===========================================================================*
559 * sched *
560 *===========================================================================*/
561 PRIVATE void sched(rp, queue, front)
562 register struct proc *rp; /* process to be scheduled */
563 int *queue; /* return: queue to use */
564 int *front; /* return: front or back */
565 {
566 /* This function determines the scheduling policy. It is called whenever a
567 * process must be added to one of the scheduling queues to decide where to
568 * insert it. As a side-effect the process' priority may be updated.
569 */
570 static struct proc *prev_ptr = NIL_PROC; /* previous without time */
571 int time_left = (rp->p_ticks_left > 0); /* quantum fully consumed */
572 int penalty = 0; /* change in priority */
573
574 /* Check whether the process has time left. Otherwise give a new quantum
575 * and possibly raise the priority. Processes using multiple quantums
576 * in a row get a lower priority to catch infinite loops in high priority
577 * processes (system servers and drivers).
578 */
579 if ( ! time_left) { /* quantum consumed ? */
580 rp->p_ticks_left = rp->p_quantum_size; /* give new quantum */
581 if (prev_ptr == rp) penalty ++; /* catch infinite loops */
582 else penalty --; /* give slow way back */
583 prev_ptr = rp; /* store ptr for next */
584 }
585
586 /* Determine the new priority of this process. The bounds are determined
587 * by IDLE's queue and the maximum priority of this process. Kernel task
588 * and the idle process are never changed in priority.
589 */
590 if (penalty != 0 && ! iskernelp(rp)) {
591 rp->p_priority += penalty; /* update with penalty */
592 if (rp->p_priority < rp->p_max_priority) /* check upper bound */
593 rp->p_priority=rp->p_max_priority;
594 else if (rp->p_priority > IDLE_Q-1) /* check lower bound */
595 rp->p_priority = IDLE_Q-1;
596 }
597
598 /* If there is time left, the process is added to the front of its queue,
599 * so that it can immediately run. The queue to use simply is always the
600 * process' current priority.
601 */
602 *queue = rp->p_priority;
603 *front = time_left;
604 }
605
606 /*===========================================================================*
607 * pick_proc *
608 *===========================================================================*/
609 PRIVATE void pick_proc()
610 {
611 /* Decide who to run now. A new process is selected by setting 'next_ptr'.
612 * When a billable process is selected, record it in 'bill_ptr', so that the
613 * clock task can tell who to bill for system time.
614 */
615 register struct proc *rp; /* process to run */
616 int q; /* iterate over queues */
617
618 /* Check each of the scheduling queues for ready processes. The number of
619 * queues is defined in proc.h, and priorities are set in the task table.
620 * The lowest queue contains IDLE, which is always ready.
621 */
622 for (q=0; q < NR_SCHED_QUEUES; q++) {
623 if ( (rp = rdy_head[q]) != NIL_PROC) {
624 next_ptr = rp; /* run process 'rp' next */
625 if (priv(rp)->s_flags & BILLABLE)
626 bill_ptr = rp; /* bill for system time */
627 return;
628 }
629 }
630 }
631
632 /*===========================================================================*
633 * lock_send *
634 *===========================================================================*/
635 PUBLIC int lock_send(dst, m_ptr)
636 int dst; /* to whom is message being sent? */
637 message *m_ptr; /* pointer to message buffer */
638 {
639 /* Safe gateway to mini_send() for tasks. */
640 int result;
641 lock(2, "send");
642 result = mini_send(proc_ptr, dst, m_ptr, NON_BLOCKING);
643 unlock(2);
644 return(result);
645 }
646
647 /*===========================================================================*
648 * lock_enqueue *
649 *===========================================================================*/
650 PUBLIC void lock_enqueue(rp)
651 struct proc *rp; /* this process is now runnable */
652 {
653 /* Safe gateway to enqueue() for tasks. */
654 lock(3, "enqueue");
655 enqueue(rp);
656 unlock(3);
657 }
658
659 /*===========================================================================*
660 * lock_dequeue *
661 *===========================================================================*/
662 PUBLIC void lock_dequeue(rp)
663 struct proc *rp; /* this process is no longer runnable */
664 {
665 /* Safe gateway to dequeue() for tasks. */
666 lock(4, "dequeue");
667 dequeue(rp);
668 unlock(4);
669 }
670
Cache object: b6f0505f52d0c5bf32f88ccb97cba020
|