1 /*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: thread_swap.c,v $
29 * Revision 2.12 93/05/15 18:56:04 mrt
30 * machparam.h -> machspl.h
31 *
32 * Revision 2.11 93/01/14 17:37:17 danner
33 * Corrected casts for thread_wakeup and assert_wait.
34 * [93/01/12 danner]
35 * 64bit cleanup. Proper spl typing.
36 * [92/12/01 af]
37 *
38 * Revision 2.10 92/08/03 17:40:14 jfriedl
39 * removed silly prototypes
40 * [92/08/02 jfriedl]
41 *
42 * Revision 2.9 92/05/21 17:16:48 jfriedl
43 * tried prototypes.
44 * [92/05/20 jfriedl]
45 *
46 * Revision 2.8 91/07/31 17:50:04 dbg
47 * Revise scheduling state machine.
48 * [91/07/30 17:07:03 dbg]
49 *
50 * Revision 2.7 91/05/14 16:49:08 mrt
51 * Correcting copyright
52 *
53 * Revision 2.6 91/03/16 14:53:09 rpd
54 * Removed thread_swapout.
55 * [91/02/24 rpd]
56 * Added swapin_thread_continue.
57 * Simplified the state machine. Now it uses only
58 * TH_SW_IN, TH_SW_OUT, TH_SW_COMING_IN.
59 * [91/01/20 rpd]
60 *
61 * Simplified thread_swapin.
62 * [91/01/17 rpd]
63 *
64 * Revision 2.5 91/02/05 17:30:40 mrt
65 * Changed to new Mach copyright
66 * [91/02/01 16:20:14 mrt]
67 *
68 * Revision 2.4 91/01/08 15:18:20 rpd
69 * Added continuation argument to thread_block.
70 * [90/12/08 rpd]
71 *
72 * Removed swapout_thread, swapout_threads,
73 * swapout_scan, thread_swapout.
74 * [90/11/11 rpd]
75 *
76 * Revision 2.3 90/06/02 14:57:18 rpd
77 * In thread_swapout, free the thread's cached message buffer.
78 * [90/04/23 rpd]
79 * Converted to new processor set technology.
80 * [90/03/26 22:26:32 rpd]
81 *
82 * Revision 2.2 89/12/08 19:52:35 rwd
83 * Added call to zone_gc()
84 * [89/11/21 rwd]
85 *
86 * Revision 2.1 89/08/03 15:48:24 rwd
87 * Created.
88 *
89 * Revision 2.4 88/10/27 10:50:40 rpd
90 * Changed includes to the new style. Removed extraneous semis
91 * from the swapper_lock/swapper_unlock macros.
92 * [88/10/26 14:49:09 rpd]
93 *
94 * 15-Jun-88 Michael Young (mwyoung) at Carnegie-Mellon University
95 * Fix improper handling of swapper_lock() in swapin_thread().
96 * Problem discovery and elegant recoding due to Richard Draves.
97 *
98 * 4-May-88 David Golub (dbg) at Carnegie-Mellon University
99 * Remove vax-specific code.
100 *
101 * 1-Mar-88 David Black (dlb) at Carnegie-Mellon University
102 * Logic change due to replacement of wait_time field in thread
103 * with sched_stamp. Extra argument to thread_setrun().
104 *
105 * 25-Jan-88 Richard Sanzi (sanzi) at Carnegie-Mellon University
106 * Notify pcb module that pcb is about to be unwired by calling
107 * pcb_synch(thread).
108 *
109 * 21-Jan-88 David Golub (dbg) at Carnegie-Mellon University
110 * Fix lots more race conditions (thread_swapin called during
111 * swapout, thread_swapin called twice) by adding a swapper state
112 * machine to each thread. Moved thread_swappable here from
113 * kern/thread.c.
114 *
115 * 12-Nov-87 David Golub (dbg) at Carnegie-Mellon University
116 * Fix race condition in thread_swapout: mark thread as swapped
117 * before swapping out its stack, so that an intervening wakeup
118 * will put it on the swapin list.
119 *
120 * 5-Oct-87 David Golub (dbg) at Carnegie-Mellon University
121 * Changed to new scheduling state machine.
122 *
123 * 15-Sep-87 Michael Young (mwyoung) at Carnegie-Mellon University
124 * De-linted.
125 *
126 * 5-Sep-87 Michael Young (mwyoung) at Carnegie-Mellon University
127 * Added check for THREAD_SWAPPABLE in swapout_scan().
128 *
129 * 14-Jul-87 David Golub (dbg) at Carnegie-Mellon University
130 * Truncate the starting address and round up the size given to
131 * vm_map_pageable, when wiring/unwiring kernel stacks.
132 * KERNEL_STACK_SIZE is not necessarily a multiple of page_size; if
133 * it isn't, forgetting to round the address and size to page
134 * boundaries results in panic. Kmem_alloc and kmem_free, used in
135 * thread.c to allocate and free kernel stacks, already round to
136 * page boundaries.
137 *
138 * 26-Jun-87 Michael Young (mwyoung) at Carnegie-Mellon University
139 * Add thread_swapout_allowed flag to make it easy to turn
140 * off swapping when debugging.
141 *
142 * 4-Jun-87 David Golub (dbg) at Carnegie-Mellon University
143 * Pass correct number of parameters to lock_init - initialize
144 * swap_lock as sleepable instead of calling lock_sleepable
145 * separately.
146 *
147 * 1-Apr-87 Avadis Tevanian (avie) at Carnegie-Mellon University
148 * Include vm_param.h to pick up KERNEL_STACK_SIZE definition.
149 *
150 * 20-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University
151 * Lower ipl before calling thread_swapout().
152 *
153 * 19-Mar-87 David Golub (dbg) at Carnegie-Mellon University
154 * Fix one race condition in this (not so buggy) version - since
155 * thread_swapin can be called from interrupts, must raise IPL when
156 * locking swapper_lock.
157 *
158 * 09-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University
159 * Created, based somewhat loosely on the earlier (which was a highly
160 * buggy, race condition filled version).
161 *
162 */
163 /*
164 *
165 * File: kern/thread_swap.c
166 * Author: Avadis Tevanian, Jr.
167 * Date: 1987
168 *
169 * Mach thread swapper:
170 * Find idle threads to swap, freeing up kernel stack resources
171 * at the expense of allowing them to execute.
172 *
173 * Swap in threads that need to be run. This is done here
174 * by the swapper thread since it cannot be done (in general)
175 * when the kernel tries to place a thread on a run queue.
176 *
177 * Note: The act of swapping a thread in Mach does not mean that
178 * its memory gets forcibly swapped to secondary storage. The memory
179 * for the task corresponding to a swapped thread is paged out
180 * through the normal paging mechanism.
181 *
182 */
183
184 #include <ipc/ipc_kmsg.h>
185 #include <kern/counters.h>
186 #include <kern/thread.h>
187 #include <kern/lock.h>
188 #include <vm/vm_map.h>
189 #include <vm/vm_kern.h>
190 #include <mach/vm_param.h>
191 #include <kern/sched_prim.h>
192 #include <kern/processor.h>
193 #include <kern/thread_swap.h>
194 #include <machine/machspl.h> /* for splsched */
195
196
197
198 queue_head_t swapin_queue;
199 decl_simple_lock_data(, swapper_lock_data)
200
201 #define swapper_lock() simple_lock(&swapper_lock_data)
202 #define swapper_unlock() simple_unlock(&swapper_lock_data)
203
204 /*
205 * swapper_init: [exported]
206 *
207 * Initialize the swapper module.
208 */
209 void swapper_init()
210 {
211 queue_init(&swapin_queue);
212 simple_lock_init(&swapper_lock_data);
213 }
214
215 /*
216 * thread_swapin: [exported]
217 *
218 * Place the specified thread in the list of threads to swapin. It
219 * is assumed that the thread is locked, therefore we are at splsched.
220 *
221 * We don't bother with stack_alloc_try to optimize swapin;
222 * our callers have already tried that route.
223 */
224
225 void thread_swapin(thread)
226 thread_t thread;
227 {
228 switch (thread->state & TH_SWAP_STATE) {
229 case TH_SWAPPED:
230 /*
231 * Swapped out - queue for swapin thread.
232 */
233 thread->state = (thread->state & ~TH_SWAP_STATE)
234 | TH_SW_COMING_IN;
235 swapper_lock();
236 enqueue_tail(&swapin_queue, (queue_entry_t) thread);
237 swapper_unlock();
238 thread_wakeup((event_t) &swapin_queue);
239 break;
240
241 case TH_SW_COMING_IN:
242 /*
243 * Already queued for swapin thread, or being
244 * swapped in.
245 */
246 break;
247
248 default:
249 /*
250 * Already swapped in.
251 */
252 panic("thread_swapin");
253 }
254 }
255
256 /*
257 * thread_doswapin:
258 *
259 * Swapin the specified thread, if it should be runnable, then put
260 * it on a run queue. No locks should be held on entry, as it is
261 * likely that this routine will sleep (waiting for stack allocation).
262 */
263 void thread_doswapin(thread)
264 register thread_t thread;
265 {
266 spl_t s;
267
268 /*
269 * Allocate the kernel stack.
270 */
271
272 stack_alloc(thread, thread_continue);
273
274 /*
275 * Place on run queue.
276 */
277
278 s = splsched();
279 thread_lock(thread);
280 thread->state &= ~(TH_SWAPPED | TH_SW_COMING_IN);
281 if (thread->state & TH_RUN)
282 thread_setrun(thread, TRUE);
283 thread_unlock(thread);
284 (void) splx(s);
285 }
286
287 /*
288 * swapin_thread: [exported]
289 *
290 * This procedure executes as a kernel thread. Threads that need to
291 * be swapped in are swapped in by this thread.
292 */
293 void swapin_thread_continue()
294 {
295 for (;;) {
296 register thread_t thread;
297 spl_t s;
298
299 s = splsched();
300 swapper_lock();
301
302 while ((thread = (thread_t) dequeue_head(&swapin_queue))
303 != THREAD_NULL) {
304 swapper_unlock();
305 (void) splx(s);
306
307 thread_doswapin(thread); /* may block */
308
309 s = splsched();
310 swapper_lock();
311 }
312
313 assert_wait((event_t) &swapin_queue, FALSE);
314 swapper_unlock();
315 (void) splx(s);
316 counter(c_swapin_thread_block++);
317 thread_block(swapin_thread_continue);
318 }
319 }
320
321 void swapin_thread()
322 {
323 stack_privilege(current_thread());
324
325 swapin_thread_continue();
326 /*NOTREACHED*/
327 }
Cache object: 49e825a0d0fda7f4787261842a8389ee
|