FreeBSD/Linux Kernel Cross Reference
sys/norma/ipc_wait.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: ipc_wait.c,v $
29 * Revision 2.8 92/03/10 16:28:37 jsb
30 * Merged in norma branch changes as of NORMA_MK7.
31 * [92/03/09 12:50:44 jsb]
32 *
33 * Revision 2.7.2.3 92/02/18 19:17:09 jeffreyh
34 * [intel] optionally execute optimized norma_ipc_kmsg_accept().
35 * [92/02/13 13:06:38 jeffreyh]
36 *
37 * Revision 2.7.2.2 92/01/21 21:53:09 jsb
38 * De-linted.
39 * [92/01/17 12:21:06 jsb]
40 *
41 * Revision 2.7.2.1 92/01/09 18:46:05 jsb
42 * Use netipc_thread_{lock,unlock} instead of spls.
43 * [92/01/08 10:23:36 jsb]
44 *
45 * Revision 2.7 91/12/14 14:35:32 jsb
46 * Removed private assert definition.
47 *
48 * Revision 2.6 91/09/04 11:28:45 jsb
49 * Use splhigh/splx instead of sploff/splon for now.
50 * [91/09/04 09:46:45 jsb]
51 *
52 * Revision 2.5 91/08/28 11:16:12 jsb
53 * Renamed ast_clport things to ast_netipc things.
54 * [91/08/15 09:12:09 jsb]
55 *
56 * Fixed, and added counters.
57 * [91/08/14 19:22:35 jsb]
58 *
59 * Revision 2.4 91/08/03 18:19:32 jsb
60 * Fixed include.
61 * [91/07/17 14:06:39 jsb]
62 *
63 * Revision 2.3 91/06/17 15:48:02 jsb
64 * Changed norma include.
65 * [91/06/17 11:01:02 jsb]
66 *
67 * Revision 2.2 91/06/06 17:56:02 jsb
68 * First checkin.
69 * [91/06/06 17:51:41 jsb]
70 *
71 */
72 /*
73 * File: norma/ipc_wait.c
74 * Author: Joseph S. Barrera III
75 * Date: 1991
76 */
77
78 #include <mach_host.h>
79
80 #include <mach/port.h>
81 #include <mach/message.h>
82 #include <kern/assert.h>
83 #include <kern/counters.h>
84 #include <kern/sched_prim.h>
85 #include <kern/ipc_sched.h>
86 #include <kern/ipc_kobject.h>
87 #include <ipc/ipc_mqueue.h>
88 #include <ipc/ipc_thread.h>
89 #include <ipc/ipc_kmsg.h>
90 #include <ipc/ipc_port.h>
91 #include <ipc/ipc_pset.h>
92 #include <ipc/ipc_space.h>
93 #include <ipc/ipc_marequest.h>
94
95 extern void netipc_thread_lock();
96 extern void netipc_thread_unlock();
97
98 /*
99 * XXX Needs locking to be multiprocessor safe.
100 * XXX We probably might also want per-processor spinning,
101 * XXX although this will complicate the sending code.
102 *
103 * We signal that we are waiting by setting handoff_mqueue nonzero.
104 * Our sender specifies that something has changed by setting msg nonzero.
105 * We then signal that we are releasing this module by setting msg zero.
106 *
107 * YYY We've added locking so some of the comments above are out of data.
108 *
109 * XXX Should try having this loop handle asts?
110 */
111 ipc_mqueue_t norma_ipc_handoff_mqueue;
112 ipc_kmsg_t norma_ipc_handoff_msg;
113 mach_msg_size_t norma_ipc_handoff_max_size;
114 mach_msg_size_t norma_ipc_handoff_msg_size;
115
116 int c_break_reset = 0;
117 int c_break_handoff = 0;
118 int c_break_thread = 0;
119 int c_break_gcount = 0;
120 int c_break_lcount = 0;
121 int c_break_ast = 0;
122 int c_break_ast_terminate = 0;
123 int c_break_ast_halt = 0;
124 int c_break_ast_block = 0;
125 int c_break_ast_network = 0;
126 int c_break_ast_netipc = 0;
127
128 #if iPSC386 || iPSC860
129 int norma_ipc_kmsg_accept_disabled = 1;
130 #endif iPSC386 || iPSC860
131
132 /*
133 * Spin until something else is runnable or until a kmsg shows up.
134 */
135 ipc_kmsg_t
136 norma_ipc_kmsg_accept(mqueue, max_size, msg_size)
137 register volatile ipc_mqueue_t mqueue;
138 mach_msg_size_t max_size;
139 mach_msg_size_t *msg_size;
140 {
141 register processor_t myprocessor;
142 register volatile thread_t *threadp;
143 register volatile int *gcount;
144 register volatile int *lcount;
145 int mycpu;
146
147 #if iPSC386 || iPSC860
148 if (norma_ipc_kmsg_accept_disabled) {
149 return IKM_NULL;
150 }
151 #endif iPSC386 || iPSC860
152 #if 1
153 if (c_break_reset) {
154 c_break_reset = 0;
155 c_break_handoff = 0;
156 c_break_thread = 0;
157 c_break_gcount = 0;
158 c_break_lcount = 0;
159 c_break_ast = 0;
160 c_break_ast_halt = 0;
161 c_break_ast_terminate = 0;
162 c_break_ast_block = 0;
163 c_break_ast_network = 0;
164 c_break_ast_netipc = 0;
165 }
166 #endif
167
168 mycpu = cpu_number();
169 myprocessor = current_processor();
170 threadp = (volatile thread_t *) &myprocessor->next_thread;
171 lcount = (volatile int *) &myprocessor->runq.count;
172
173 /*
174 * Don't mark cpu idle; we still like our pmap.
175 * XXX Will myprocessor->next_thread ever get set?
176 */
177
178 #if MACH_HOST
179 gcount = (volatile int *) &myprocessor->processor_set->runq.count;
180 #else MACH_HOST
181 gcount = (volatile int *) &default_pset.runq.count;
182 #endif MACH_HOST
183
184 /*
185 * Indicate that we are spinning on this queue.
186 *
187 * Nonzero norma_ipc_handoff_mqueue keeps other receivers away.
188 * Nonzero norma_ipc_handoff_msg keeps other senders away.
189 */
190 netipc_thread_lock();
191 if (norma_ipc_handoff_mqueue != IMQ_NULL) {
192 printf("This is okay: handoff_mqueue conflict detected.\n");
193 netipc_thread_unlock();
194 return IKM_NULL;
195 }
196 assert(norma_ipc_handoff_msg == 0);
197 norma_ipc_handoff_max_size = max_size;
198 norma_ipc_handoff_msg_size = 0;
199 norma_ipc_handoff_mqueue = mqueue;
200 netipc_thread_unlock();
201
202 /*
203 * Spin until reschedule or kmsg handoff. Do asts in the meantime.
204 */
205 for (;;) {
206 if (norma_ipc_handoff_msg != IKM_NULL) {
207 c_break_handoff++;
208 break;
209 }
210 if (need_ast[mycpu]) {
211 if (need_ast[mycpu] & AST_HALT) {
212 c_break_ast_halt++;
213 }
214 if (need_ast[mycpu] & AST_TERMINATE) {
215 c_break_ast_terminate++;
216 }
217 if (need_ast[mycpu] & AST_BLOCK) {
218 c_break_ast_block++;
219 }
220 if (need_ast[mycpu] & AST_NETWORK) {
221 c_break_ast_network++;
222 }
223 if (need_ast[mycpu] & AST_NETIPC) {
224 c_break_ast_netipc++;
225 }
226 c_break_ast++;
227 break;
228 }
229 if (*threadp != (volatile thread_t) THREAD_NULL) {
230 c_break_thread++;
231 break;
232 }
233 if (*gcount != 0) {
234 c_break_gcount++;
235 break;
236 }
237 if (*lcount != 0) {
238 c_break_lcount++;
239 break;
240 }
241 }
242
243 /*
244 * Before we release mqueue, we must check for a delivered message.
245 */
246 netipc_thread_lock();
247 if (norma_ipc_handoff_msg != IKM_NULL) {
248 /*
249 * Someone left us a message,
250 * or an indication of a message that was too large.
251 */
252 if (norma_ipc_handoff_msg_size) {
253 *msg_size = norma_ipc_handoff_msg_size;
254 norma_ipc_handoff_mqueue = IMQ_NULL;
255 norma_ipc_handoff_msg = IKM_NULL;
256 netipc_thread_unlock();
257 return IKM_NULL;
258 } else {
259 register ipc_kmsg_t kmsg;
260 kmsg = norma_ipc_handoff_msg;
261 norma_ipc_handoff_mqueue = IMQ_NULL;
262 norma_ipc_handoff_msg = IKM_NULL;
263 netipc_thread_unlock();
264 return kmsg;
265 }
266 }
267 norma_ipc_handoff_mqueue = IMQ_NULL;
268 norma_ipc_handoff_msg = IKM_NULL;
269 assert(ipc_kmsg_queue_first(&mqueue->imq_messages) == IKM_NULL);
270 netipc_thread_unlock();
271 return IKM_NULL;
272 }
Cache object: c1edf11fde8846f6953c70b782b34ff8
|