FreeBSD/Linux Kernel Cross Reference
sys/ipc/mach_msg.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993,1992,1991,1990,1989 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: mach_msg.c,v $
29 * Revision 2.22 93/11/17 17:04:23 dbg
30 * Declared continuation functions as not returning.
31 * [93/05/04 dbg]
32 *
33 * Added more special-purpose code for kernel message path.
34 * [93/03/30 dbg]
35 *
36 * Fixed kmsg return parameter from ipc_mqueue_receive by using a
37 * union type. Added ANSI function prototypes.
38 * [93/03/29 dbg]
39 *
40 * Include kern/sched_prim.h instead of kern/ipc_sched.h.
41 * [93/03/25 dbg]
42 *
43 * Revision 2.21 93/03/09 10:54:49 danner
44 * Added typecast to quiet GCC.
45 * [93/03/06 af]
46 *
47 * Revision 2.20 93/01/14 17:33:14 danner
48 * 64bit cleanup.
49 * [92/11/30 af]
50 *
51 * Revision 2.19 92/08/03 17:36:07 jfriedl
52 * removed silly prototypes
53 * [92/08/02 jfriedl]
54 *
55 * Revision 2.18 92/05/21 17:12:14 jfriedl
56 * Added some things to quiet gcc warnings.
57 * Also made correct for when assert is off.
58 * Renamed vars 'timeout' to 'time_out' so as not to conflict with global.
59 * [92/05/16 jfriedl]
60 *
61 * Revision 2.17 92/01/03 20:13:19 dbg
62 * Add quick dispatch to Mach Kernel messages.
63 * [91/12/18 dbg]
64 *
65 * Revision 2.16 91/12/14 14:28:41 jsb
66 * Removed ipc_fields.h hack.
67 *
68 * Revision 2.15 91/11/14 16:58:17 rpd
69 * Picked up mysterious norma changes.
70 * [91/11/14 rpd]
71 *
72 * Revision 2.14 91/10/09 16:11:23 af
73 * Added <ipc/ipc_notify.h>. Fixed type-mismatch in msg_rpc_trap.
74 * [91/09/02 rpd]
75 *
76 * Revision 2.13 91/08/28 11:13:53 jsb
77 * Changed MACH_RCV_TOO_LARGE and MACH_RCV_INVALID_NOTIFY to work
78 * like MACH_RCV_HEADER_ERROR, using ipc_kmsg_copyout_dest.
79 * [91/08/12 rpd]
80 *
81 * Added seqno argument to ipc_mqueue_receive.
82 * Updated mach_msg_trap fast path for seqno processing.
83 * [91/08/10 rpd]
84 * Fixed mach_msg_interrupt to check for MACH_RCV_IN_PROGRESS.
85 * [91/08/03 rpd]
86 * Renamed clport things to norma_ipc things.
87 * [91/08/15 08:24:12 jsb]
88 *
89 * Revision 2.12 91/07/31 17:43:41 dbg
90 * Add mach_msg_interrupt to force a thread waiting in mach_msg_continue
91 * or mach_msg_receive_continue into a stable state.
92 * [91/07/30 17:02:11 dbg]
93 *
94 * Revision 2.11 91/06/25 10:27:47 rpd
95 * Fixed ikm_cache critical sections to avoid blocking operations.
96 * [91/05/23 rpd]
97 *
98 * Revision 2.10 91/06/17 15:46:33 jsb
99 * Renamed NORMA conditionals.
100 * [91/06/17 10:46:35 jsb]
101 *
102 * Revision 2.9 91/06/06 17:06:12 jsb
103 * A little more NORMA_IPC support.
104 * [91/05/13 17:22:08 jsb]
105 *
106 * Revision 2.8 91/05/14 16:38:44 mrt
107 * Correcting copyright
108 *
109 * Revision 2.7 91/03/16 14:49:09 rpd
110 * Replaced ipc_thread_switch with thread_handoff.
111 * Replaced ith_saved with ikm_cache.
112 * [91/02/16 rpd]
113 * Made null mach_msg_trap measurement easier.
114 * [91/01/29 rpd]
115 *
116 * Revision 2.6 91/02/05 17:24:37 mrt
117 * Changed to new Mach copyright
118 * [91/02/01 15:53:02 mrt]
119 *
120 * Revision 2.5 91/01/08 15:15:03 rpd
121 * Added KEEP_STACKS support.
122 * [91/01/07 rpd]
123 * Changed to use thread_syscall_return.
124 * Added msg_receive_continue.
125 * [90/12/18 rpd]
126 * Added mach_msg_continue, mach_msg_receive_continue.
127 * Changes to support kernel stack discarding/hand-off.
128 * [90/12/09 17:29:04 rpd]
129 *
130 * Removed MACH_IPC_GENNOS.
131 * [90/11/08 rpd]
132 *
133 * Revision 2.4 90/12/14 11:01:36 jsb
134 * Added NORMA_IPC support: always ipc_mqueue_send() to a remote port.
135 * [90/12/13 21:25:47 jsb]
136 *
137 * Revision 2.3 90/11/05 14:30:29 rpd
138 * Removed ipc_object_release_macro.
139 * Changed ip_reference to ipc_port_reference.
140 * Changed ip_release to ipc_port_release.
141 * Changed io_release to ipc_object_release.
142 * Use new io_reference and io_release.
143 * Use new ip_reference and ip_release.
144 * [90/10/29 rpd]
145 *
146 * Revision 2.2 90/06/02 14:52:22 rpd
147 * Created for new IPC.
148 * [90/03/26 21:05:49 rpd]
149 *
150 */
151 /*
152 * File: ipc/mach_msg.c
153 * Author: Rich Draves
154 * Date: 1989
155 *
156 * Exported message traps. See mach/message.h.
157 */
158
159 #include <mach_ipc_compat.h>
160 #include <norma_ipc.h>
161
162 #include <mach/kern_return.h>
163 #include <mach/port.h>
164 #include <mach/message.h>
165 #include <kern/assert.h>
166 #include <kern/counters.h>
167 #include <kern/exception.h>
168 #include <kern/kern_kmsg.h>
169 #include <kern/lock.h>
170 #include <kern/memory.h>
171 #include <kern/sched_prim.h>
172 #include <vm/vm_map.h>
173 #include <ipc/ipc_kmsg.h>
174 #include <ipc/ipc_marequest.h>
175 #include <ipc/ipc_mqueue.h>
176 #include <ipc/ipc_object.h>
177 #include <ipc/ipc_notify.h>
178 #include <ipc/ipc_port.h>
179 #include <ipc/ipc_pset.h>
180 #include <ipc/ipc_space.h>
181 #include <ipc/ipc_thread.h>
182 #include <ipc/ipc_entry.h>
183 #include <ipc/mach_msg.h>
184
185
186 /*
187 * Continuation routines used here
188 */
189 no_return mach_msg_receive_continue(void); /* forward */
190 no_return mach_msg_continue(void); /* forward */
191
192 /*
193 * Routine: mach_msg_send
194 * Purpose:
195 * Send a message.
196 * Conditions:
197 * Nothing locked.
198 * Returns:
199 * MACH_MSG_SUCCESS Sent the message.
200 * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
201 * MACH_SEND_NO_BUFFER Couldn't allocate buffer.
202 * MACH_SEND_INVALID_DATA Couldn't copy message data.
203 * MACH_SEND_INVALID_HEADER
204 * Illegal value in the message header bits.
205 * MACH_SEND_INVALID_DEST The space is dead.
206 * MACH_SEND_INVALID_NOTIFY Bad notify port.
207 * MACH_SEND_INVALID_DEST Can't copyin destination port.
208 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
209 * MACH_SEND_TIMED_OUT Timeout expired without delivery.
210 * MACH_SEND_INTERRUPTED Delivery interrupted.
211 * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request.
212 * MACH_SEND_WILL_NOTIFY Msg-accepted notif. requested.
213 * MACH_SEND_NOTIFY_IN_PROGRESS
214 * This space has already forced a message to this port.
215 */
216
217 mach_msg_return_t
218 mach_msg_send(
219 mach_msg_header_t *msg,
220 mach_msg_option_t option,
221 mach_msg_size_t send_size,
222 mach_msg_timeout_t time_out,
223 mach_port_t notify)
224 {
225 ipc_space_t space = current_space();
226 vm_map_t map = current_map();
227 ipc_kmsg_t kmsg;
228 mach_msg_return_t mr;
229
230 mr = ipc_kmsg_get(msg, send_size, &kmsg);
231 if (mr != MACH_MSG_SUCCESS)
232 return mr;
233
234 if (option & MACH_SEND_CANCEL) {
235 if (notify == MACH_PORT_NULL)
236 mr = MACH_SEND_INVALID_NOTIFY;
237 else
238 mr = ipc_kmsg_copyin(kmsg, space, map, notify);
239 } else
240 mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
241 if (mr != MACH_MSG_SUCCESS) {
242 ikm_free(kmsg);
243 return mr;
244 }
245
246 if (option & MACH_SEND_NOTIFY) {
247 mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
248 ((option & MACH_SEND_TIMEOUT) ?
249 time_out : MACH_MSG_TIMEOUT_NONE));
250 if (mr == MACH_SEND_TIMED_OUT) {
251 ipc_port_t dest = (ipc_port_t)
252 kmsg->ikm_header.msgh_remote_port;
253
254 if (notify == MACH_PORT_NULL)
255 mr = MACH_SEND_INVALID_NOTIFY;
256 else
257 mr = ipc_marequest_create(space, dest,
258 notify, &kmsg->ikm_marequest);
259 if (mr == MACH_MSG_SUCCESS) {
260 ipc_mqueue_send_always(kmsg);
261 return MACH_SEND_WILL_NOTIFY;
262 }
263 }
264 } else
265 mr = ipc_mqueue_send(kmsg, option & MACH_SEND_TIMEOUT,
266 time_out);
267
268 if (mr != MACH_MSG_SUCCESS) {
269 mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);
270
271 assert(kmsg->ikm_marequest == IMAR_NULL);
272 (void) ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
273 }
274
275 return mr;
276 }
277
278 /*
279 * Routine: mach_msg_receive
280 * Purpose:
281 * Receive a message.
282 * Conditions:
283 * Nothing locked.
284 * Returns:
285 * MACH_MSG_SUCCESS Received a message.
286 * MACH_RCV_INVALID_NAME The name doesn't denote a right,
287 * or the denoted right is not receive or port set.
288 * MACH_RCV_IN_SET Receive right is a member of a set.
289 * MACH_RCV_TOO_LARGE Message wouldn't fit into buffer.
290 * MACH_RCV_TIMED_OUT Timeout expired without a message.
291 * MACH_RCV_INTERRUPTED Reception interrupted.
292 * MACH_RCV_PORT_DIED Port/set died while receiving.
293 * MACH_RCV_PORT_CHANGED Port moved into set while receiving.
294 * MACH_RCV_INVALID_DATA Couldn't copy to user buffer.
295 * MACH_RCV_INVALID_NOTIFY Bad notify port.
296 * MACH_RCV_HEADER_ERROR
297 */
298
299 mach_msg_return_t
300 mach_msg_receive(
301 mach_msg_header_t *msg,
302 mach_msg_option_t option,
303 mach_msg_size_t rcv_size,
304 mach_port_t rcv_name,
305 mach_msg_timeout_t time_out,
306 mach_port_t notify)
307 {
308 ipc_thread_t self = current_thread();
309 ipc_space_t space = current_space();
310 vm_map_t map = current_map();
311 ipc_object_t object;
312 ipc_mqueue_t mqueue;
313 ipc_kmsg_t kmsg;
314 mach_port_seqno_t seqno;
315 mach_msg_return_t mr;
316 union ipc_kmsg_return kmsg_ret;
317
318 mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
319 if (mr != MACH_MSG_SUCCESS)
320 return mr;
321 /* hold ref for object; mqueue is locked */
322
323 /*
324 * ipc_mqueue_receive may not return, because if we block
325 * then our kernel stack may be discarded. So we save
326 * state here for mach_msg_receive_continue to pick up.
327 */
328
329 self->ith_msg = msg;
330 self->ith_option = option;
331 self->ith_rcv_size = rcv_size;
332 self->ith_timeout = time_out;
333 self->ith_notify = notify;
334 self->ith_object = object;
335 self->ith_mqueue = mqueue;
336
337 if (option & MACH_RCV_LARGE) {
338 mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
339 rcv_size, time_out,
340 FALSE, mach_msg_receive_continue,
341 &kmsg_ret, &seqno);
342 /* mqueue is unlocked */
343 ipc_object_release(object);
344 if (mr != MACH_MSG_SUCCESS) {
345 if (mr == MACH_RCV_TOO_LARGE) {
346 mach_msg_size_t real_size = kmsg_ret.msize;
347
348 assert(real_size > rcv_size);
349
350 (void) copyout(&real_size,
351 &msg->msgh_size,
352 sizeof(mach_msg_size_t));
353 }
354
355 return mr;
356 }
357
358 kmsg = kmsg_ret.kmsg;
359 kmsg->ikm_header.msgh_seqno = seqno;
360 assert(kmsg->ikm_header.msgh_size <= rcv_size);
361 } else {
362 mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
363 MACH_MSG_SIZE_MAX, time_out,
364 FALSE, mach_msg_receive_continue,
365 &kmsg_ret, &seqno);
366 /* mqueue is unlocked */
367 ipc_object_release(object);
368 if (mr != MACH_MSG_SUCCESS)
369 return mr;
370
371 kmsg = kmsg_ret.kmsg;
372 kmsg->ikm_header.msgh_seqno = seqno;
373 if (kmsg->ikm_header.msgh_size > rcv_size) {
374 ipc_kmsg_copyout_dest(kmsg, space);
375 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
376 return MACH_RCV_TOO_LARGE;
377 }
378 }
379
380 if (option & MACH_RCV_NOTIFY) {
381 if (notify == MACH_PORT_NULL)
382 mr = MACH_RCV_INVALID_NOTIFY;
383 else
384 mr = ipc_kmsg_copyout(kmsg, space, map, notify);
385 } else
386 mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
387 if (mr != MACH_MSG_SUCCESS) {
388 if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
389 (void) ipc_kmsg_put(msg, kmsg,
390 kmsg->ikm_header.msgh_size);
391 } else {
392 ipc_kmsg_copyout_dest(kmsg, space);
393 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
394 }
395
396 return mr;
397 }
398
399 return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
400 }
401
402 /*
403 * Routine: mach_msg_receive_continue
404 * Purpose:
405 * Continue after blocking for a message.
406 * Conditions:
407 * Nothing locked. We are running on a new kernel stack,
408 * with the receive state saved in the thread. From here
409 * control goes back to user space.
410 */
411
412 no_return
413 mach_msg_receive_continue(void)
414 {
415 ipc_thread_t self = current_thread();
416 ipc_space_t space = current_space();
417 vm_map_t map = current_map();
418 mach_msg_header_t *msg = self->ith_msg;
419 mach_msg_option_t option = self->ith_option;
420 mach_msg_size_t rcv_size = self->ith_rcv_size;
421 mach_msg_timeout_t time_out = self->ith_timeout;
422 mach_port_t notify = self->ith_notify;
423 ipc_object_t object = self->ith_object;
424 ipc_mqueue_t mqueue = self->ith_mqueue;
425 ipc_kmsg_t kmsg;
426 mach_port_seqno_t seqno;
427 mach_msg_return_t mr;
428 union ipc_kmsg_return kmsg_ret;
429
430 if (option & MACH_RCV_LARGE) {
431 mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
432 rcv_size, time_out,
433 TRUE, mach_msg_receive_continue,
434 &kmsg_ret, &seqno);
435 /* mqueue is unlocked */
436 ipc_object_release(object);
437 if (mr != MACH_MSG_SUCCESS) {
438 if (mr == MACH_RCV_TOO_LARGE) {
439 mach_msg_size_t real_size = kmsg_ret.msize;
440
441 assert(real_size > rcv_size);
442
443 (void) copyout(&real_size,
444 &msg->msgh_size,
445 sizeof(mach_msg_size_t));
446 }
447
448 thread_syscall_return(mr);
449 /*NOTREACHED*/
450 }
451
452 kmsg = kmsg_ret.kmsg;
453 kmsg->ikm_header.msgh_seqno = seqno;
454 assert(kmsg->ikm_header.msgh_size <= rcv_size);
455 } else {
456 mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
457 MACH_MSG_SIZE_MAX, time_out,
458 TRUE, mach_msg_receive_continue,
459 &kmsg_ret, &seqno);
460 /* mqueue is unlocked */
461 ipc_object_release(object);
462 if (mr != MACH_MSG_SUCCESS) {
463 thread_syscall_return(mr);
464 /*NOTREACHED*/
465 }
466
467 kmsg = kmsg_ret.kmsg;
468 kmsg->ikm_header.msgh_seqno = seqno;
469 if (kmsg->ikm_header.msgh_size > rcv_size) {
470 ipc_kmsg_copyout_dest(kmsg, space);
471 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
472 thread_syscall_return(MACH_RCV_TOO_LARGE);
473 /*NOTREACHED*/
474 }
475 }
476
477 if (option & MACH_RCV_NOTIFY) {
478 if (notify == MACH_PORT_NULL)
479 mr = MACH_RCV_INVALID_NOTIFY;
480 else
481 mr = ipc_kmsg_copyout(kmsg, space, map, notify);
482 } else
483 mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
484 if (mr != MACH_MSG_SUCCESS) {
485 if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
486 (void) ipc_kmsg_put(msg, kmsg,
487 kmsg->ikm_header.msgh_size);
488 } else {
489 ipc_kmsg_copyout_dest(kmsg, space);
490 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
491 }
492
493 thread_syscall_return(mr);
494 /*NOTREACHED*/
495 }
496
497 mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
498 thread_syscall_return(mr);
499 /*NOTREACHED*/
500 }
501
502 /*
503 * Routine: mach_msg_trap [mach trap]
504 * Purpose:
505 * Possibly send a message; possibly receive a message.
506 * Conditions:
507 * Nothing locked.
508 * Returns:
509 * All of mach_msg_send and mach_msg_receive error codes.
510 */
511
512 mach_msg_return_t
513 mach_msg_trap(
514 mach_msg_header_t *msg,
515 mach_msg_option_t option,
516 mach_msg_size_t send_size,
517 mach_msg_size_t rcv_size,
518 mach_port_t rcv_name,
519 mach_msg_timeout_t time_out,
520 mach_port_t notify)
521 {
522 mach_msg_return_t mr;
523
524 /* first check for common cases */
525
526 if (option == (MACH_SEND_MSG|MACH_RCV_MSG)) {
527 register ipc_thread_t self = current_thread();
528 ipc_space_t space = self->task->itk_space;
529 register ipc_kmsg_t kmsg;
530 register ipc_port_t dest_port;
531 ipc_object_t rcv_object;
532 register ipc_mqueue_t rcv_mqueue;
533 mach_msg_size_t reply_size;
534
535 /*
536 * This case is divided into ten sections, each
537 * with a label. There are five optimized
538 * sections and six unoptimized sections, which
539 * do the same thing but handle all possible
540 * cases and are slower.
541 *
542 * The five sections for an RPC are
543 * 1) Get request message into a buffer.
544 * (fast_get or slow_get)
545 * 2) Copyin request message and rcv_name.
546 * (fast_copyin or slow_copyin)
547 * 3) Enqueue request and dequeue reply.
548 * (fast_send_receive or
549 * slow_send and slow_receive)
550 * 4) Copyout reply message.
551 * (fast_copyout or slow_copyout)
552 * 5) Put reply message to user`s buffer.
553 * (fast_put or slow_put)
554 *
555 * Keep the locking hierarchy firmly in mind.
556 * (First spaces, then ports, then port sets,
557 * then message queues.) Only a non-blocking
558 * attempt can be made to acquire locks out of
559 * order, or acquire two locks on the same level.
560 * Acquiring two locks on the same level will
561 * fail if the objects are really the same,
562 * unless simple locking is disabled. This is OK,
563 * because then the extra unlock does nothing.
564 *
565 * There are two major reasons these RPCs can`t use
566 * ipc_thread_switch, and use slow_send/slow_receive:
567 * 1) Kernel RPCs.
568 * 2) Servers fall behind clients, so
569 * client doesn`t find a blocked server thread and
570 * server finds waiting messages and can`t block.
571 */
572
573 /*
574 fast_get:
575 */
576 /*
577 * optimized ipc_kmsg_get
578 *
579 * No locks, references, or messages held.
580 * We must clear ikm_cache before copyinmsg.
581 */
582
583 if ((send_size > IKM_SAVED_MSG_SIZE) ||
584 (send_size < sizeof(mach_msg_header_t)) ||
585 (send_size & 3) ||
586 ((kmsg = ikm_cache()) == IKM_NULL))
587 goto slow_get;
588
589 ikm_cache() = IKM_NULL;
590 ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);
591
592 if (copyinmsg(msg,
593 &kmsg->ikm_header,
594 send_size)) {
595 ikm_free(kmsg);
596 goto slow_get;
597 }
598
599 kmsg->ikm_header.msgh_size = send_size;
600
601 fast_copyin:
602 /*
603 * optimized ipc_kmsg_copyin/ipc_mqueue_copyin
604 *
605 * We have the request message data in kmsg.
606 * Must still do copyin, send, receive, etc.
607 *
608 * If the message isn`t simple, we can`t combine
609 * ipc_kmsg_copyin_header and ipc_mqueue_copyin,
610 * because copyin of the message body might
611 * affect rcv_name.
612 */
613
614 switch (kmsg->ikm_header.msgh_bits) {
615 case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
616 MACH_MSG_TYPE_MAKE_SEND_ONCE): {
617 register ipc_entry_t table;
618 register ipc_entry_num_t size;
619 register ipc_port_t reply_port;
620
621 /* sending a request message */
622
623 {
624 register mach_port_index_t index;
625 register mach_port_gen_t gen;
626
627 {
628 register mach_port_t reply_name =
629 kmsg->ikm_header.msgh_local_port;
630
631 if (reply_name != rcv_name)
632 goto slow_copyin;
633
634 /* optimized ipc_entry_lookup of reply_name */
635
636 index = MACH_PORT_INDEX(reply_name);
637 gen = MACH_PORT_GEN(reply_name);
638 }
639
640 is_read_lock(space);
641 assert(space->is_active);
642
643 size = space->is_table_size;
644 table = space->is_table;
645
646 if (index >= size)
647 goto abort_request_copyin;
648
649 {
650 register ipc_entry_t entry;
651 register ipc_entry_bits_t bits;
652
653 entry = &table[index];
654 bits = entry->ie_bits;
655
656 /* check generation number and type bit */
657
658 if ((bits & (IE_BITS_GEN_MASK|
659 MACH_PORT_TYPE_RECEIVE)) !=
660 (gen | MACH_PORT_TYPE_RECEIVE))
661 goto abort_request_copyin;
662
663 reply_port = (ipc_port_t) entry->ie_object;
664 assert(reply_port != IP_NULL);
665 }
666 }
667
668 /* optimized ipc_entry_lookup of dest_name */
669
670 {
671 register mach_port_index_t index;
672 register mach_port_gen_t gen;
673
674 {
675 register mach_port_t dest_name =
676 kmsg->ikm_header.msgh_remote_port;
677
678 index = MACH_PORT_INDEX(dest_name);
679 gen = MACH_PORT_GEN(dest_name);
680 }
681
682 if (index >= size)
683 goto abort_request_copyin;
684
685 {
686 register ipc_entry_t entry;
687 register ipc_entry_bits_t bits;
688
689 entry = &table[index];
690 bits = entry->ie_bits;
691
692 /* check generation number and type bit */
693
694 if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
695 (gen | MACH_PORT_TYPE_SEND))
696 goto abort_request_copyin;
697
698 assert(IE_BITS_UREFS(bits) > 0);
699
700 dest_port = (ipc_port_t) entry->ie_object;
701 assert(dest_port != IP_NULL);
702 }
703 }
704
705 /*
706 * To do an atomic copyin, need simultaneous
707 * locks on both ports and the space. If
708 * dest_port == reply_port, and simple locking is
709 * enabled, then we will abort. Otherwise it`s
710 * OK to unlock twice.
711 */
712
713 ip_lock(dest_port);
714 if (!ip_active(dest_port) ||
715 !ip_lock_try(reply_port)) {
716 ip_unlock(dest_port);
717 goto abort_request_copyin;
718 }
719 is_read_unlock(space);
720
721 assert(dest_port->ip_srights > 0);
722 dest_port->ip_srights++;
723 ip_reference(dest_port);
724
725 assert(ip_active(reply_port));
726 assert(reply_port->ip_receiver_name ==
727 kmsg->ikm_header.msgh_local_port);
728 assert(reply_port->ip_receiver == space);
729
730 reply_port->ip_sorights++;
731 ip_reference(reply_port);
732
733 kmsg->ikm_header.msgh_bits =
734 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
735 MACH_MSG_TYPE_PORT_SEND_ONCE);
736 kmsg->ikm_header.msgh_remote_port =
737 (mach_port_t) dest_port;
738 kmsg->ikm_header.msgh_local_port =
739 (mach_port_t) reply_port;
740
741 /* make sure we can queue to the destination */
742
743 if (dest_port->ip_receiver == ipc_space_kernel) {
744 /*
745 * The kernel server has a reference to
746 * the reply port, which it hands back
747 * to us in the reply message. We do
748 * not need to keep another reference to
749 * it.
750 */
751 ip_unlock(reply_port);
752
753 assert(ip_active(dest_port));
754 ip_unlock(dest_port);
755 goto kernel_send;
756 }
757
758 #if NORMA_IPC
759 if (IP_NORMA_IS_PROXY(dest_port)) {
760 ip_unlock(dest_port);
761 ip_unlock(reply_port);
762 goto norma_send;
763 }
764 #endif /* NORMA_IPC */
765
766 if (dest_port->ip_msgcount >= dest_port->ip_qlimit)
767 goto abort_request_send_receive;
768
769 /* optimized ipc_mqueue_copyin */
770
771 if (reply_port->ip_pset != IPS_NULL)
772 goto abort_request_send_receive;
773
774 rcv_object = (ipc_object_t) reply_port;
775 io_reference(rcv_object);
776 rcv_mqueue = &reply_port->ip_messages;
777 imq_lock(rcv_mqueue);
778 io_unlock(rcv_object);
779 goto fast_send_receive;
780
781 abort_request_copyin:
782 is_read_unlock(space);
783 goto slow_copyin;
784
785 abort_request_send_receive:
786 ip_unlock(dest_port);
787 ip_unlock(reply_port);
788 goto slow_send;
789 }
790
791 case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
792 register ipc_entry_num_t size;
793 register ipc_entry_t table;
794
795 /* sending a reply message */
796
797 {
798 register mach_port_t reply_name =
799 kmsg->ikm_header.msgh_local_port;
800
801 if (reply_name != MACH_PORT_NULL)
802 goto slow_copyin;
803 }
804
805 is_write_lock(space);
806 assert(space->is_active);
807
808 /* optimized ipc_entry_lookup */
809
810 size = space->is_table_size;
811 table = space->is_table;
812
813 {
814 register ipc_entry_t entry;
815 register mach_port_gen_t gen;
816 register mach_port_index_t index;
817
818 {
819 register mach_port_t dest_name =
820 kmsg->ikm_header.msgh_remote_port;
821
822 index = MACH_PORT_INDEX(dest_name);
823 gen = MACH_PORT_GEN(dest_name);
824 }
825
826 if (index >= size)
827 goto abort_reply_dest_copyin;
828
829 entry = &table[index];
830
831 /* check generation, collision bit, and type bit */
832
833 if ((entry->ie_bits & (IE_BITS_GEN_MASK|
834 IE_BITS_COLLISION|
835 MACH_PORT_TYPE_SEND_ONCE)) !=
836 (gen | MACH_PORT_TYPE_SEND_ONCE))
837 goto abort_reply_dest_copyin;
838
839 /* optimized ipc_right_copyin */
840
841 assert(IE_BITS_TYPE(entry->ie_bits) ==
842 MACH_PORT_TYPE_SEND_ONCE);
843 assert(IE_BITS_UREFS(entry->ie_bits) == 1);
844 assert((entry->ie_bits & IE_BITS_MAREQUEST) == 0);
845
846 if (entry->ie_request != 0)
847 goto abort_reply_dest_copyin;
848
849 dest_port = (ipc_port_t) entry->ie_object;
850 assert(dest_port != IP_NULL);
851
852 ip_lock(dest_port);
853 if (!ip_active(dest_port)) {
854 ip_unlock(dest_port);
855 goto abort_reply_dest_copyin;
856 }
857
858 assert(dest_port->ip_sorights > 0);
859
860 /* optimized ipc_entry_dealloc */
861
862 entry->ie_next = table->ie_next;
863 table->ie_next = index;
864 entry->ie_bits = gen;
865 entry->ie_object = IO_NULL;
866 }
867
868 kmsg->ikm_header.msgh_bits =
869 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
870 0);
871 kmsg->ikm_header.msgh_remote_port =
872 (mach_port_t) dest_port;
873
874 /* make sure we can queue to the destination */
875
876 assert(dest_port->ip_receiver != ipc_space_kernel);
877 #if NORMA_IPC
878 if (IP_NORMA_IS_PROXY(dest_port)) {
879 is_write_unlock(space);
880 ip_unlock(dest_port);
881 goto norma_send;
882 }
883 #endif /* NORMA_IPC */
884
885 /* optimized ipc_entry_lookup/ipc_mqueue_copyin */
886
887 {
888 register ipc_entry_t entry;
889 register ipc_entry_bits_t bits;
890
891 {
892 register mach_port_index_t index;
893 register mach_port_gen_t gen;
894
895 index = MACH_PORT_INDEX(rcv_name);
896 gen = MACH_PORT_GEN(rcv_name);
897
898 if (index >= size)
899 goto abort_reply_rcv_copyin;
900
901 entry = &table[index];
902 bits = entry->ie_bits;
903
904 /* check generation number */
905
906 if ((bits & IE_BITS_GEN_MASK) != gen)
907 goto abort_reply_rcv_copyin;
908 }
909
910 /* check type bits; looking for receive or set */
911
912 if (bits & MACH_PORT_TYPE_PORT_SET) {
913 register ipc_pset_t rcv_pset;
914
915 rcv_pset = (ipc_pset_t) entry->ie_object;
916 assert(rcv_pset != IPS_NULL);
917
918 ips_lock(rcv_pset);
919 assert(ips_active(rcv_pset));
920
921 rcv_object = (ipc_object_t) rcv_pset;
922 rcv_mqueue = &rcv_pset->ips_messages;
923 } else if (bits & MACH_PORT_TYPE_RECEIVE) {
924 register ipc_port_t rcv_port;
925
926 rcv_port = (ipc_port_t) entry->ie_object;
927 assert(rcv_port != IP_NULL);
928
929 if (!ip_lock_try(rcv_port))
930 goto abort_reply_rcv_copyin;
931 assert(ip_active(rcv_port));
932
933 if (rcv_port->ip_pset != IPS_NULL) {
934 ip_unlock(rcv_port);
935 goto abort_reply_rcv_copyin;
936 }
937
938 rcv_object = (ipc_object_t) rcv_port;
939 rcv_mqueue = &rcv_port->ip_messages;
940 } else
941 goto abort_reply_rcv_copyin;
942 }
943
944 is_write_unlock(space);
945 io_reference(rcv_object);
946 imq_lock(rcv_mqueue);
947 io_unlock(rcv_object);
948 goto fast_send_receive;
949
950 abort_reply_dest_copyin:
951 is_write_unlock(space);
952 goto slow_copyin;
953
954 abort_reply_rcv_copyin:
955 ip_unlock(dest_port);
956 is_write_unlock(space);
957 goto slow_send;
958 }
959
960 default:
961 goto slow_copyin;
962 }
963 /*NOTREACHED*/
964
965 fast_send_receive:
966 /*
967 * optimized ipc_mqueue_send/ipc_mqueue_receive
968 *
969 * Finished get/copyin of kmsg and copyin of rcv_name.
970 * space is unlocked, dest_port is locked,
971 * we can queue kmsg to dest_port,
972 * rcv_mqueue is locked, rcv_object holds a ref,
973 * if rcv_object is a port it isn`t in a port set
974 *
975 * Note that if simple locking is turned off,
976 * then we could have dest_mqueue == rcv_mqueue
977 * and not abort when we try to lock dest_mqueue.
978 */
979
980 assert(ip_active(dest_port));
981 assert(dest_port->ip_receiver != ipc_space_kernel);
982 #if NORMA_IPC
983 assert(! IP_NORMA_IS_PROXY(dest_port));
984 #endif /* NORMA_IPC */
985 assert((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
986 (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
987 MACH_MSG_TYPE_PORT_SEND_ONCE));
988 assert((kmsg->ikm_header.msgh_bits &
989 MACH_MSGH_BITS_CIRCULAR) == 0);
990
991 {
992 register ipc_mqueue_t dest_mqueue;
993 register ipc_thread_t receiver;
994
995 {
996 register ipc_pset_t dest_pset;
997
998 dest_pset = dest_port->ip_pset;
999 if (dest_pset == IPS_NULL)
1000 dest_mqueue = &dest_port->ip_messages;
1001 else
1002 dest_mqueue = &dest_pset->ips_messages;
1003 }
1004
1005 if (!imq_lock_try(dest_mqueue)) {
1006 abort_send_receive:
1007 ip_unlock(dest_port);
1008 imq_unlock(rcv_mqueue);
1009 ipc_object_release(rcv_object);
1010 goto slow_send;
1011 }
1012
1013 receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
1014 if ((receiver == ITH_NULL) ||
1015 (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
1016 != IKM_NULL)) {
1017 imq_unlock(dest_mqueue);
1018 goto abort_send_receive;
1019 }
1020
1021 /*
1022 * There is a receiver thread waiting, and
1023 * there is no reply message for us to pick up.
1024 * We have hope of hand-off, so save state.
1025 */
1026
1027 self->ith_msg = msg;
1028 self->ith_rcv_size = rcv_size;
1029 self->ith_object = rcv_object;
1030 self->ith_mqueue = rcv_mqueue;
1031
1032 if ((receiver->swap_func == mach_msg_continue) &&
1033 thread_handoff(self, mach_msg_continue, receiver)) {
1034 assert(current_thread() == receiver);
1035
1036 /*
1037 * We can use the optimized receive code,
1038 * because the receiver is using no options.
1039 */
1040 } else if ((receiver->swap_func == exception_raise_continue) &&
1041 thread_handoff(self, mach_msg_continue, receiver)) {
1042 counter(c_mach_msg_trap_block_exc++);
1043 assert(current_thread() == receiver);
1044
1045 /*
1046 * We are a reply message coming back through
1047 * the optimized exception-handling path.
1048 * Finish with rcv_mqueue and dest_mqueue,
1049 * and then jump to exception code with
1050 * dest_port still locked. We don`t bother
1051 * with a sequence number in this case.
1052 */
1053
1054 ipc_thread_enqueue_macro(
1055 &rcv_mqueue->imq_threads, self);
1056 self->ith_state = MACH_RCV_IN_PROGRESS;
1057 self->ith_msize = MACH_MSG_SIZE_MAX;
1058 imq_unlock(rcv_mqueue);
1059
1060 ipc_thread_rmqueue_first_macro(
1061 &dest_mqueue->imq_threads, receiver);
1062 imq_unlock(dest_mqueue);
1063
1064 exception_raise_continue_fast(dest_port, kmsg);
1065 /*NOTREACHED*/
1066 return MACH_MSG_SUCCESS;
1067 } else if ((send_size <= receiver->ith_msize) &&
1068 thread_handoff(self, mach_msg_continue, receiver)) {
1069 assert(current_thread() == receiver);
1070
1071 if ((receiver->swap_func ==
1072 mach_msg_receive_continue) &&
1073 ((receiver->ith_option & MACH_RCV_NOTIFY) == 0)) {
1074 /*
1075 * We can still use the optimized code.
1076 */
1077 } else {
1078 counter(c_mach_msg_trap_block_slow++);
1079 /*
1080 * We are running as the receiver,
1081 * but we can`t use the optimized code.
1082 * Finish send/receive processing.
1083 */
1084
1085 dest_port->ip_msgcount++;
1086 ip_unlock(dest_port);
1087
1088 ipc_thread_enqueue_macro(
1089 &rcv_mqueue->imq_threads, self);
1090 self->ith_state = MACH_RCV_IN_PROGRESS;
1091 self->ith_msize = MACH_MSG_SIZE_MAX;
1092 imq_unlock(rcv_mqueue);
1093
1094 ipc_thread_rmqueue_first_macro(
1095 &dest_mqueue->imq_threads, receiver);
1096 receiver->ith_state = MACH_MSG_SUCCESS;
1097 receiver->ith_kmsg = kmsg;
1098 receiver->ith_seqno = dest_port->ip_seqno++;
1099 imq_unlock(dest_mqueue);
1100
1101 /*
1102 * Call the receiver`s continuation.
1103 */
1104
1105 receiver->wait_result = THREAD_AWAKENED;
1106 (*receiver->swap_func)();
1107 /*NOTREACHED*/
1108 return MACH_MSG_SUCCESS;
1109 }
1110 } else {
1111 /*
1112 * The receiver can`t accept the message,
1113 * or we can`t switch to the receiver.
1114 */
1115
1116 imq_unlock(dest_mqueue);
1117 goto abort_send_receive;
1118 }
1119 counter(c_mach_msg_trap_block_fast++);
1120
1121 /*
1122 * Safe to unlock dest_port now that we are
1123 * committed to this path, because we hold
1124 * dest_mqueue locked. We never bother changing
1125 * dest_port->ip_msgcount.
1126 */
1127
1128 ip_unlock(dest_port);
1129
1130 /*
1131 * We need to finish preparing self for its
1132 * time asleep in rcv_mqueue.
1133 */
1134
1135 ipc_thread_enqueue_macro(&rcv_mqueue->imq_threads, self);
1136 self->ith_state = MACH_RCV_IN_PROGRESS;
1137 self->ith_msize = MACH_MSG_SIZE_MAX;
1138 imq_unlock(rcv_mqueue);
1139
1140 /*
1141 * Finish extracting receiver from dest_mqueue.
1142 */
1143
1144 ipc_thread_rmqueue_first_macro(
1145 &dest_mqueue->imq_threads, receiver);
1146 kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
1147 imq_unlock(dest_mqueue);
1148
1149 /*
1150 * We don`t have to do any post-dequeue processing of
1151 * the message. We never incremented ip_msgcount, we
1152 * know it has no msg-accepted request, and blocked
1153 * senders aren`t a worry because we found the port
1154 * with a receiver waiting.
1155 */
1156
1157 self = receiver;
1158 space = self->task->itk_space;
1159
1160 msg = self->ith_msg;
1161 rcv_size = self->ith_rcv_size;
1162 rcv_object = self->ith_object;
1163
1164 /* inline ipc_object_release */
1165 io_lock(rcv_object);
1166 io_release(rcv_object);
1167 io_check_unlock(rcv_object);
1168 }
1169
1170 fast_copyout:
1171 /*
1172 * Nothing locked and no references held, except
1173 * we have kmsg with msgh_seqno filled in. Must
1174 * still check against rcv_size and do
1175 * ipc_kmsg_copyout/ipc_kmsg_put.
1176 */
1177
1178 assert((ipc_port_t) kmsg->ikm_header.msgh_remote_port
1179 == dest_port);
1180
1181 reply_size = kmsg->ikm_header.msgh_size;
1182 if (rcv_size < reply_size)
1183 goto slow_copyout;
1184
1185 /* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */
1186
1187 switch (kmsg->ikm_header.msgh_bits) {
1188 case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
1189 MACH_MSG_TYPE_PORT_SEND_ONCE): {
1190 ipc_port_t reply_port =
1191 (ipc_port_t) kmsg->ikm_header.msgh_local_port;
1192 mach_port_t dest_name, reply_name;
1193
1194 /* receiving a request message */
1195
1196 if (!IP_VALID(reply_port))
1197 goto slow_copyout;
1198
1199 is_write_lock(space);
1200 assert(space->is_active);
1201
1202 /*
1203 * To do an atomic copyout, need simultaneous
1204 * locks on both ports and the space. If
1205 * dest_port == reply_port, and simple locking is
1206 * enabled, then we will abort. Otherwise it`s
1207 * OK to unlock twice.
1208 */
1209
1210 ip_lock(dest_port);
1211 if (!ip_active(dest_port) ||
1212 !ip_lock_try(reply_port))
1213 goto abort_request_copyout;
1214
1215 if (!ip_active(reply_port)) {
1216 ip_unlock(reply_port);
1217 goto abort_request_copyout;
1218 }
1219
1220 assert(reply_port->ip_sorights > 0);
1221 ip_unlock(reply_port);
1222
1223 {
1224 register ipc_entry_t table;
1225 register ipc_entry_t entry;
1226 register mach_port_index_t index;
1227
1228 /* optimized ipc_entry_get */
1229
1230 table = space->is_table;
1231 index = table->ie_next;
1232
1233 if (index == 0)
1234 goto abort_request_copyout;
1235
1236 entry = &table[index];
1237 table->ie_next = entry->ie_next;
1238 entry->ie_request = 0;
1239
1240 {
1241 register mach_port_gen_t gen;
1242
1243 assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
1244 gen = entry->ie_bits + IE_BITS_GEN_ONE;
1245
1246 reply_name = MACH_PORT_MAKE(index, gen);
1247
1248 /* optimized ipc_right_copyout */
1249
1250 entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
1251 }
1252
1253 assert(MACH_PORT_VALID(reply_name));
1254 entry->ie_object = (ipc_object_t) reply_port;
1255 is_write_unlock(space);
1256 }
1257
1258 /* optimized ipc_object_copyout_dest */
1259
1260 assert(dest_port->ip_srights > 0);
1261 ip_release(dest_port);
1262
1263 if (dest_port->ip_receiver == space)
1264 dest_name = dest_port->ip_receiver_name;
1265 else
1266 dest_name = MACH_PORT_NULL;
1267
1268 if ((--dest_port->ip_srights == 0) &&
1269 (dest_port->ip_nsrequest != IP_NULL)) {
1270 ipc_port_t nsrequest;
1271 mach_port_mscount_t mscount;
1272
1273 /* a rather rare case */
1274
1275 nsrequest = dest_port->ip_nsrequest;
1276 mscount = dest_port->ip_mscount;
1277 dest_port->ip_nsrequest = IP_NULL;
1278 ip_unlock(dest_port);
1279
1280 ipc_notify_no_senders(nsrequest, mscount);
1281 } else
1282 ip_unlock(dest_port);
1283
1284 kmsg->ikm_header.msgh_bits =
1285 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
1286 MACH_MSG_TYPE_PORT_SEND);
1287 kmsg->ikm_header.msgh_remote_port = reply_name;
1288 kmsg->ikm_header.msgh_local_port = dest_name;
1289 goto fast_put;
1290
1291 abort_request_copyout:
1292 ip_unlock(dest_port);
1293 is_write_unlock(space);
1294 goto slow_copyout;
1295 }
1296
1297 case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
1298 register mach_port_t dest_name;
1299
1300 /* receiving a reply message */
1301
1302 ip_lock(dest_port);
1303 if (!ip_active(dest_port))
1304 goto slow_copyout;
1305
1306 /* optimized ipc_object_copyout_dest */
1307
1308 assert(dest_port->ip_sorights > 0);
1309
1310 if (dest_port->ip_receiver == space) {
1311 ip_release(dest_port);
1312 dest_port->ip_sorights--;
1313 dest_name = dest_port->ip_receiver_name;
1314 ip_unlock(dest_port);
1315 } else {
1316 ip_unlock(dest_port);
1317
1318 ipc_notify_send_once(dest_port);
1319 dest_name = MACH_PORT_NULL;
1320 }
1321
1322 kmsg->ikm_header.msgh_bits =
1323 MACH_MSGH_BITS(0,
1324 MACH_MSG_TYPE_PORT_SEND_ONCE);
1325 kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
1326 kmsg->ikm_header.msgh_local_port = dest_name;
1327 goto fast_put;
1328 }
1329
1330 case MACH_MSGH_BITS_COMPLEX|
1331 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
1332 register mach_port_t dest_name;
1333
1334 /* receiving a complex reply message */
1335
1336 ip_lock(dest_port);
1337 if (!ip_active(dest_port))
1338 goto slow_copyout;
1339
1340 /* optimized ipc_object_copyout_dest */
1341
1342 assert(dest_port->ip_sorights > 0);
1343
1344 if (dest_port->ip_receiver == space) {
1345 ip_release(dest_port);
1346 dest_port->ip_sorights--;
1347 dest_name = dest_port->ip_receiver_name;
1348 ip_unlock(dest_port);
1349 } else {
1350 ip_unlock(dest_port);
1351
1352 ipc_notify_send_once(dest_port);
1353 dest_name = MACH_PORT_NULL;
1354 }
1355
1356 kmsg->ikm_header.msgh_bits =
1357 MACH_MSGH_BITS_COMPLEX |
1358 MACH_MSGH_BITS(0,
1359 MACH_MSG_TYPE_PORT_SEND_ONCE);
1360 kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
1361 kmsg->ikm_header.msgh_local_port = dest_name;
1362
1363 mr = ipc_kmsg_copyout_body(
1364 (vm_offset_t) (&kmsg->ikm_header + 1),
1365 (vm_offset_t) &kmsg->ikm_header
1366 + kmsg->ikm_header.msgh_size,
1367 space,
1368 current_map());
1369
1370 if (mr != MACH_MSG_SUCCESS) {
1371 (void) ipc_kmsg_put(msg, kmsg,
1372 kmsg->ikm_header.msgh_size);
1373 return mr | MACH_RCV_BODY_ERROR;
1374 }
1375 goto fast_put;
1376 }
1377
1378 default:
1379 goto slow_copyout;
1380 }
1381 /*NOTREACHED*/
1382
1383 fast_put:
1384 /*
1385 * We have the reply message data in kmsg,
1386 * and the reply message size in reply_size.
1387 * Just need to copy it out to the user and free kmsg.
1388 * We must check ikm_cache after copyoutmsg.
1389 */
1390
1391 ikm_check_initialized(kmsg, kmsg->ikm_size);
1392
1393 if ((kmsg->ikm_size != IKM_SAVED_KMSG_SIZE) ||
1394 copyoutmsg(&kmsg->ikm_header,
1395 msg,
1396 reply_size) ||
1397 (ikm_cache() != IKM_NULL))
1398 goto slow_put;
1399
1400 ikm_cache() = kmsg;
1401 thread_syscall_return(MACH_MSG_SUCCESS);
1402 /*NOTREACHED*/
1403 return MACH_MSG_SUCCESS; /* help for the compiler */
1404
1405 /*
1406 * The slow path has a few non-register temporary
1407 * variables used only for call-by-reference.
1408 */
1409
1410 slow_get:
1411 /*
1412 * No locks, references, or messages held.
1413 * Still have to get the request, send it,
1414 * receive reply, etc.
1415 */
1416 {
1417 ipc_kmsg_t temp_kmsg;
1418
1419 mr = ipc_kmsg_get(msg, send_size, &temp_kmsg);
1420 if (mr != MACH_MSG_SUCCESS) {
1421 thread_syscall_return(mr);
1422 /*NOTREACHED*/
1423 }
1424 kmsg = temp_kmsg;
1425
1426 /* try to get back on optimized path */
1427 goto fast_copyin;
1428 }
1429
1430 slow_copyin:
1431 /*
1432 * We have the message data in kmsg, but
1433 * we still need to copyin, send it,
1434 * receive a reply, and do copyout.
1435 */
1436
1437 mr = ipc_kmsg_copyin(kmsg, space, current_map(),
1438 MACH_PORT_NULL);
1439 if (mr != MACH_MSG_SUCCESS) {
1440 ikm_free(kmsg);
1441 thread_syscall_return(mr);
1442 /*NOTREACHED*/
1443 }
1444
1445 /* try to get back on optimized path */
1446
1447 if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR)
1448 goto slow_send;
1449
1450 dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
1451 assert(IP_VALID(dest_port));
1452
1453 ip_lock(dest_port);
1454 if (dest_port->ip_receiver == ipc_space_kernel) {
1455 assert(ip_active(dest_port));
1456 ip_unlock(dest_port);
1457 goto kernel_send;
1458 }
1459
1460 if (ip_active(dest_port) &&
1461 #if NORMA_IPC
1462 (! IP_NORMA_IS_PROXY(dest_port)) &&
1463 #endif /* NORMA_IPC */
1464 ((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
1465 (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
1466 MACH_MSG_TYPE_PORT_SEND_ONCE)))
1467 {
1468 /*
1469 * Try an optimized ipc_mqueue_copyin.
1470 * It will work if this is a request message.
1471 */
1472
1473 register ipc_port_t reply_port;
1474
1475 reply_port = (ipc_port_t)
1476 kmsg->ikm_header.msgh_local_port;
1477 if (IP_VALID(reply_port)) {
1478 if (ip_lock_try(reply_port)) {
1479 if (ip_active(reply_port) &&
1480 reply_port->ip_receiver == space &&
1481 reply_port->ip_receiver_name == rcv_name &&
1482 reply_port->ip_pset == IPS_NULL)
1483 {
1484 /* Grab a reference to the reply port. */
1485 rcv_object = (ipc_object_t) reply_port;
1486 io_reference(rcv_object);
1487 rcv_mqueue = &reply_port->ip_messages;
1488 imq_lock(rcv_mqueue);
1489 io_unlock(rcv_object);
1490 goto fast_send_receive;
1491 }
1492 ip_unlock(reply_port);
1493 }
1494 }
1495 }
1496
1497 ip_unlock(dest_port);
1498 goto slow_send;
1499
1500 #if NORMA_IPC
1501 norma_send:
1502 /*
1503 * Nothing is locked. We have acquired kmsg, but
1504 * we still need to send it and receive a reply.
1505 */
1506
1507 mr = norma_ipc_send(kmsg);
1508 if (mr != MACH_MSG_SUCCESS) {
1509 mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
1510 current_map());
1511
1512 assert(kmsg->ikm_marequest == IMAR_NULL);
1513 (void) ipc_kmsg_put(msg, kmsg,
1514 kmsg->ikm_header.msgh_size);
1515 thread_syscall_return(mr);
1516 /*NOTREACHED*/
1517 }
1518
1519 goto slow_get_rcv_port;
1520 #endif /* NORMA_IPC */
1521
1522 kernel_send:
1523 /*
1524 * Special case: send message to kernel services.
1525 * The request message has been copied into the
1526 * kmsg. Nothing is locked.
1527 */
1528
1529 /*
1530 * Perform the kernel function.
1531 */
1532
1533 kmsg = ipc_kobject_server(kmsg);
1534 if (kmsg == IKM_NULL) {
1535 /*
1536 * No reply. Take the
1537 * slow receive path.
1538 */
1539 goto slow_get_rcv_port;
1540 }
1541
1542 /*
1543 * Check that:
1544 * the reply port is alive
1545 * we hold the receive right
1546 * the name has not changed.
1547 * the port is not in a set
1548 * If any of these are not true,
1549 * we cannot directly receive the reply
1550 * message.
1551 */
1552 dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
1553 ip_lock(dest_port);
1554
1555 if ((!ip_active(dest_port)) ||
1556 (dest_port->ip_receiver != space) ||
1557 (dest_port->ip_receiver_name != rcv_name) ||
1558 (dest_port->ip_pset != IPS_NULL))
1559 {
1560 ip_unlock(dest_port);
1561 ipc_mqueue_send_always(kmsg);
1562 goto slow_get_rcv_port;
1563 }
1564
1565 rcv_mqueue = &dest_port->ip_messages;
1566 imq_lock(rcv_mqueue);
1567 /* keep port locked, and don`t change ref count yet */
1568
1569 /*
1570 * If there are messages on the port
1571 * or other threads waiting for a message,
1572 * we cannot directly receive the reply.
1573 */
1574 if ((ipc_thread_queue_first(&rcv_mqueue->imq_threads)
1575 != ITH_NULL) ||
1576 (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
1577 != IKM_NULL))
1578 {
1579 imq_unlock(rcv_mqueue);
1580 ip_unlock(dest_port);
1581 ipc_mqueue_send_always(kmsg);
1582 goto slow_get_rcv_port;
1583 }
1584
1585 /*
1586 * We can directly receive this reply.
1587 * Since the kernel reply never blocks,
1588 * it holds no message_accepted request.
1589 * Since there were no messages queued
1590 * on the reply port, there should be
1591 * no threads blocked waiting to send.
1592 */
1593
1594 assert(kmsg->ikm_marequest == IMAR_NULL);
1595 assert(ipc_thread_queue_first(&reply_port->ip_blocked)
1596 == ITH_NULL);
1597
1598 kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
1599 imq_unlock(rcv_mqueue);
1600
1601 /* check the reply message size */
1602
1603 reply_size = kmsg->ikm_header.msgh_size;
1604 if (rcv_size < reply_size) {
1605 ip_check_unlock(dest_port);
1606 goto slow_copyout;
1607 }
1608
1609 /*
1610 * optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header
1611 *
1612 * Dest_port is still locked, so the checks above
1613 * are still valid:
1614 * port is alive
1615 * receiver is this task
1616 * port`s name in this task is rcv_name
1617 *
1618 * The message is a kernel reply message: it has
1619 * a send-once right to the dest port. It may
1620 * be complex.
1621 */
1622
1623 switch (kmsg->ikm_header.msgh_bits) {
1624
1625 case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
1626 /* receiving a simple kernel reply message */
1627
1628 /* optimized ipc_object_copyout_dest */
1629
1630 assert(dest_port->ip_sorights > 0);
1631
1632 dest_port->ip_sorights--;
1633 ip_release(dest_port);
1634 ip_check_unlock(dest_port);
1635
1636 kmsg->ikm_header.msgh_bits =
1637 MACH_MSGH_BITS(0,
1638 MACH_MSG_TYPE_PORT_SEND_ONCE);
1639 kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
1640 kmsg->ikm_header.msgh_local_port = rcv_name;
1641 break;
1642 }
1643
1644 case MACH_MSGH_BITS_COMPLEX|
1645 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
1646 /* receiving a complex kernel reply message */
1647
1648 /* optimized ipc_object_copyout_dest */
1649
1650 assert(dest_port->ip_sorights > 0);
1651
1652 dest_port->ip_sorights--;
1653 ip_release(dest_port);
1654 ip_check_unlock(dest_port);
1655
1656 kmsg->ikm_header.msgh_bits =
1657 MACH_MSGH_BITS_COMPLEX |
1658 MACH_MSGH_BITS(0,
1659 MACH_MSG_TYPE_PORT_SEND_ONCE);
1660 kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
1661 kmsg->ikm_header.msgh_local_port = rcv_name;
1662
1663 mr = ipc_kmsg_copyout_body(
1664 (vm_offset_t) (&kmsg->ikm_header + 1),
1665 (vm_offset_t) &kmsg->ikm_header
1666 + kmsg->ikm_header.msgh_size,
1667 space,
1668 current_map());
1669
1670 if (mr != MACH_MSG_SUCCESS) {
1671 (void) ipc_kmsg_put(msg, kmsg,
1672 kmsg->ikm_header.msgh_size);
1673 return mr | MACH_RCV_BODY_ERROR;
1674 }
1675 break;
1676 }
1677
1678 default:
1679 ip_check_unlock(dest_port);
1680 goto slow_copyout;
1681 }
1682
1683 /*
1684 * Copy the reply message body out to user space.
1685 * We know that it can be released to the kernel
1686 * reply message pool.
1687 */
1688
1689 if (copyoutmsg(&kmsg->ikm_header,
1690 msg,
1691 reply_size))
1692 mr = MACH_RCV_INVALID_DATA;
1693 else
1694 mr = MACH_MSG_SUCCESS;
1695
1696 kernel_reply_kmsg_put(kmsg);
1697
1698 thread_syscall_return(mr);
1699 /*NOTREACHED*/
1700
1701 slow_send:
1702 /*
1703 * Nothing is locked. We have acquired kmsg, but
1704 * we still need to send it and receive a reply.
1705 */
1706
1707 mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
1708 MACH_MSG_TIMEOUT_NONE);
1709 if (mr != MACH_MSG_SUCCESS) {
1710 mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
1711 current_map());
1712
1713 assert(kmsg->ikm_marequest == IMAR_NULL);
1714 (void) ipc_kmsg_put(msg, kmsg,
1715 kmsg->ikm_header.msgh_size);
1716 thread_syscall_return(mr);
1717 /*NOTREACHED*/
1718 }
1719
1720 slow_get_rcv_port:
1721 /*
1722 * We have sent the message. Copy in the receive port.
1723 */
1724 {
1725 ipc_object_t temp_rcv_object;
1726 ipc_mqueue_t temp_rcv_mqueue;
1727
1728 mr = ipc_mqueue_copyin(space, rcv_name,
1729 &temp_rcv_mqueue, &temp_rcv_object);
1730 if (mr != MACH_MSG_SUCCESS) {
1731 thread_syscall_return(mr);
1732 /*NOTREACHED*/
1733 }
1734 rcv_mqueue = temp_rcv_mqueue;
1735 rcv_object = temp_rcv_object;
1736 /* hold ref for rcv_object; rcv_mqueue is locked */
1737 }
1738
1739 /*
1740 slow_receive:
1741 */
1742 /*
1743 * Now we have sent the request and copied in rcv_name,
1744 * so rcv_mqueue is locked and hold ref for rcv_object.
1745 * Just receive a reply and try to get back to fast path.
1746 *
1747 * ipc_mqueue_receive may not return, because if we block
1748 * then our kernel stack may be discarded. So we save
1749 * state here for mach_msg_continue to pick up.
1750 */
1751 {
1752 union ipc_kmsg_return kmsg_ret;
1753 mach_port_seqno_t temp_seqno;
1754
1755 self->ith_msg = msg;
1756 self->ith_rcv_size = rcv_size;
1757 self->ith_object = rcv_object;
1758 self->ith_mqueue = rcv_mqueue;
1759
1760 mr = ipc_mqueue_receive(rcv_mqueue,
1761 MACH_MSG_OPTION_NONE,
1762 MACH_MSG_SIZE_MAX,
1763 MACH_MSG_TIMEOUT_NONE,
1764 FALSE, mach_msg_continue,
1765 &kmsg_ret, &temp_seqno);
1766 /* rcv_mqueue is unlocked */
1767 ipc_object_release(rcv_object);
1768 if (mr != MACH_MSG_SUCCESS) {
1769 thread_syscall_return(mr);
1770 /*NOTREACHED*/
1771 }
1772
1773 (kmsg = kmsg_ret.kmsg)->ikm_header.msgh_seqno = temp_seqno;
1774 dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
1775 goto fast_copyout;
1776 }
1777
1778 slow_copyout:
1779 /*
1780 * Nothing locked and no references held, except
1781 * we have kmsg with msgh_seqno filled in. Must
1782 * still check against rcv_size and do
1783 * ipc_kmsg_copyout/ipc_kmsg_put.
1784 */
1785
1786 reply_size = kmsg->ikm_header.msgh_size;
1787 if (rcv_size < reply_size) {
1788 ipc_kmsg_copyout_dest(kmsg, space);
1789 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
1790 thread_syscall_return(MACH_RCV_TOO_LARGE);
1791 /*NOTREACHED*/
1792 }
1793
1794 mr = ipc_kmsg_copyout(kmsg, space, current_map(),
1795 MACH_PORT_NULL);
1796 if (mr != MACH_MSG_SUCCESS) {
1797 if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
1798 (void) ipc_kmsg_put(msg, kmsg,
1799 kmsg->ikm_header.msgh_size);
1800 } else {
1801 ipc_kmsg_copyout_dest(kmsg, space);
1802 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
1803 }
1804
1805 thread_syscall_return(mr);
1806 /*NOTREACHED*/
1807 }
1808
1809 /* try to get back on optimized path */
1810
1811 goto fast_put;
1812
1813 slow_put:
1814 mr = ipc_kmsg_put(msg, kmsg, reply_size);
1815 thread_syscall_return(mr);
1816 /*NOTREACHED*/
1817 } else if (option == MACH_SEND_MSG) {
1818 ipc_space_t space = current_space();
1819 vm_map_t map = current_map();
1820 ipc_kmsg_t kmsg;
1821
1822 mr = ipc_kmsg_get(msg, send_size, &kmsg);
1823 if (mr != MACH_MSG_SUCCESS)
1824 return mr;
1825
1826 mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
1827 if (mr != MACH_MSG_SUCCESS) {
1828 ikm_free(kmsg);
1829 return mr;
1830 }
1831
1832 mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
1833 MACH_MSG_TIMEOUT_NONE);
1834 if (mr != MACH_MSG_SUCCESS) {
1835 mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);
1836
1837 assert(kmsg->ikm_marequest == IMAR_NULL);
1838 (void) ipc_kmsg_put(msg, kmsg,
1839 kmsg->ikm_header.msgh_size);
1840 }
1841
1842 return mr;
1843 } else if (option == MACH_RCV_MSG) {
1844 ipc_thread_t self = current_thread();
1845 ipc_space_t space = current_space();
1846 vm_map_t map = current_map();
1847 ipc_object_t object;
1848 ipc_mqueue_t mqueue;
1849 ipc_kmsg_t kmsg;
1850 mach_port_seqno_t seqno;
1851 union ipc_kmsg_return kmsg_ret;
1852
1853 mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
1854 if (mr != MACH_MSG_SUCCESS)
1855 return mr;
1856 /* hold ref for object; mqueue is locked */
1857
1858 /*
1859 * ipc_mqueue_receive may not return, because if we block
1860 * then our kernel stack may be discarded. So we save
1861 * state here for mach_msg_continue to pick up.
1862 */
1863
1864 self->ith_msg = msg;
1865 self->ith_rcv_size = rcv_size;
1866 self->ith_object = object;
1867 self->ith_mqueue = mqueue;
1868
1869 mr = ipc_mqueue_receive(mqueue,
1870 MACH_MSG_OPTION_NONE,
1871 MACH_MSG_SIZE_MAX,
1872 MACH_MSG_TIMEOUT_NONE,
1873 FALSE, mach_msg_continue,
1874 &kmsg_ret, &seqno);
1875 /* mqueue is unlocked */
1876 ipc_object_release(object);
1877 if (mr != MACH_MSG_SUCCESS)
1878 return mr;
1879
1880 kmsg = kmsg_ret.kmsg;
1881 kmsg->ikm_header.msgh_seqno = seqno;
1882 if (rcv_size < kmsg->ikm_header.msgh_size) {
1883 ipc_kmsg_copyout_dest(kmsg, space);
1884 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
1885 return MACH_RCV_TOO_LARGE;
1886 }
1887
1888 mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
1889 if (mr != MACH_MSG_SUCCESS) {
1890 if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
1891 (void) ipc_kmsg_put(msg, kmsg,
1892 kmsg->ikm_header.msgh_size);
1893 } else {
1894 ipc_kmsg_copyout_dest(kmsg, space);
1895 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
1896 }
1897
1898 return mr;
1899 }
1900
1901 return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
1902 } else if (option == MACH_MSG_OPTION_NONE) {
1903 /*
1904 * We can measure the "null mach_msg_trap"
1905 * (syscall entry and thread_syscall_return exit)
1906 * with this path.
1907 */
1908
1909 thread_syscall_return(MACH_MSG_SUCCESS);
1910 /*NOTREACHED*/
1911 }
1912
1913 if (option & MACH_SEND_MSG) {
1914 mr = mach_msg_send(msg, option, send_size,
1915 time_out, notify);
1916 if (mr != MACH_MSG_SUCCESS)
1917 return mr;
1918 }
1919
1920 if (option & MACH_RCV_MSG) {
1921 mr = mach_msg_receive(msg, option, rcv_size, rcv_name,
1922 time_out, notify);
1923 if (mr != MACH_MSG_SUCCESS)
1924 return mr;
1925 }
1926
1927 return MACH_MSG_SUCCESS;
1928 }
1929
1930 /*
1931 * Routine: mach_msg_continue
1932 * Purpose:
1933 * Continue after blocking for a message.
1934 * Conditions:
1935 * Nothing locked. We are running on a new kernel stack,
1936 * with the receive state saved in the thread. From here
1937 * control goes back to user space.
1938 */
1939
1940 no_return
1941 mach_msg_continue(void)
1942 {
1943 ipc_thread_t thread = current_thread();
1944 task_t task = thread->task;
1945 ipc_space_t space = task->itk_space;
1946 vm_map_t map = task->map;
1947 mach_msg_header_t *msg = thread->ith_msg;
1948 mach_msg_size_t rcv_size = thread->ith_rcv_size;
1949 ipc_object_t object = thread->ith_object;
1950 ipc_mqueue_t mqueue = thread->ith_mqueue;
1951 ipc_kmsg_t kmsg;
1952 mach_port_seqno_t seqno;
1953 mach_msg_return_t mr;
1954 union ipc_kmsg_return kmsg_ret;
1955
1956 mr = ipc_mqueue_receive(mqueue, MACH_MSG_OPTION_NONE,
1957 MACH_MSG_SIZE_MAX, MACH_MSG_TIMEOUT_NONE,
1958 TRUE, mach_msg_continue,
1959 &kmsg_ret, &seqno);
1960
1961 /* mqueue is unlocked */
1962 ipc_object_release(object);
1963 if (mr != MACH_MSG_SUCCESS) {
1964 thread_syscall_return(mr);
1965 /*NOTREACHED*/
1966 }
1967
1968 kmsg = kmsg_ret.kmsg;
1969 kmsg->ikm_header.msgh_seqno = seqno;
1970 if (kmsg->ikm_header.msgh_size > rcv_size) {
1971 ipc_kmsg_copyout_dest(kmsg, space);
1972 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
1973 thread_syscall_return(MACH_RCV_TOO_LARGE);
1974 /*NOTREACHED*/
1975 }
1976
1977 mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
1978 if (mr != MACH_MSG_SUCCESS) {
1979 if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
1980 (void) ipc_kmsg_put(msg, kmsg,
1981 kmsg->ikm_header.msgh_size);
1982 } else {
1983 ipc_kmsg_copyout_dest(kmsg, space);
1984 (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
1985 }
1986
1987 thread_syscall_return(mr);
1988 /*NOTREACHED*/
1989 }
1990
1991 mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
1992 thread_syscall_return(mr);
1993 /*NOTREACHED*/
1994 }
1995
1996 /*
1997 * Routine: mach_msg_interrupt
1998 * Purpose:
1999 * Attempts to force a thread waiting at mach_msg_continue or
2000 * mach_msg_receive_continue into a clean point. Returns TRUE
2001 * if this was possible.
2002 * Conditions:
2003 * Nothing locked. The thread must NOT be runnable.
2004 */
2005
2006 boolean_t
2007 mach_msg_interrupt(
2008 thread_t thread)
2009 {
2010 ipc_mqueue_t mqueue;
2011
2012 assert((thread->swap_func == mach_msg_continue) ||
2013 (thread->swap_func == mach_msg_receive_continue));
2014
2015 mqueue = thread->ith_mqueue;
2016 imq_lock(mqueue);
2017 if (thread->ith_state != MACH_RCV_IN_PROGRESS) {
2018 /*
2019 * The thread is no longer waiting for a message.
2020 * It may have a message sitting in ith_kmsg.
2021 * We can't clean this up.
2022 */
2023
2024 imq_unlock(mqueue);
2025 return FALSE;
2026 }
2027 ipc_thread_rmqueue(&mqueue->imq_threads, thread);
2028 imq_unlock(mqueue);
2029
2030 ipc_object_release(thread->ith_object);
2031
2032 thread_set_syscall_return(thread, MACH_RCV_INTERRUPTED);
2033 thread->swap_func = thread_exception_return;
2034 return TRUE;
2035 }
2036
2037 #if MACH_IPC_COMPAT
2038
2039 #include <kern/kern_io.h>
2040
2041 /*
2042 * Routine: msg_return_translate
2043 * Purpose:
2044 * Translate from new error code to old error code.
2045 */
2046
2047 msg_return_t
2048 msg_return_translate(
2049 mach_msg_return_t mr)
2050 {
2051 switch (mr &~ MACH_MSG_MASK) {
2052 case MACH_MSG_SUCCESS:
2053 return 0; /* SEND_SUCCESS/RCV_SUCCESS/RPC_SUCCESS */
2054
2055 case MACH_SEND_NO_BUFFER:
2056 case MACH_SEND_NO_NOTIFY:
2057 printf("msg_return_translate: %x -> interrupted\n", mr);
2058 return SEND_INTERRUPTED;
2059
2060 case MACH_SEND_MSG_TOO_SMALL:
2061 return SEND_MSG_TOO_SMALL;
2062 case MACH_SEND_INVALID_DATA:
2063 case MACH_SEND_INVALID_MEMORY:
2064 return SEND_INVALID_MEMORY;
2065 case MACH_SEND_TIMED_OUT:
2066 return SEND_TIMED_OUT;
2067 case MACH_SEND_INTERRUPTED:
2068 return SEND_INTERRUPTED;
2069 case MACH_SEND_INVALID_DEST:
2070 case MACH_SEND_INVALID_REPLY:
2071 case MACH_SEND_INVALID_RIGHT:
2072 case MACH_SEND_INVALID_TYPE:
2073 return SEND_INVALID_PORT;
2074 case MACH_SEND_WILL_NOTIFY:
2075 return SEND_WILL_NOTIFY;
2076 case MACH_SEND_NOTIFY_IN_PROGRESS:
2077 return SEND_NOTIFY_IN_PROGRESS;
2078
2079 case MACH_RCV_INVALID_NAME:
2080 case MACH_RCV_IN_SET:
2081 case MACH_RCV_PORT_DIED:
2082 return RCV_INVALID_PORT;
2083 case MACH_RCV_TOO_LARGE:
2084 return RCV_TOO_LARGE;
2085 case MACH_RCV_TIMED_OUT:
2086 return RCV_TIMED_OUT;
2087 case MACH_RCV_INTERRUPTED:
2088 return RCV_INTERRUPTED;
2089 case MACH_RCV_PORT_CHANGED:
2090 return RCV_PORT_CHANGE;
2091 case MACH_RCV_INVALID_DATA:
2092 return RCV_INVALID_MEMORY;
2093
2094 case MACH_SEND_IN_PROGRESS:
2095 case MACH_SEND_INVALID_NOTIFY:
2096 case MACH_SEND_INVALID_HEADER:
2097 case MACH_RCV_IN_PROGRESS:
2098 case MACH_RCV_INVALID_NOTIFY:
2099 case MACH_RCV_HEADER_ERROR:
2100 case MACH_RCV_BODY_ERROR:
2101 default:
2102 #if MACH_ASSERT
2103 assert(!"msg_return_translate");
2104 #else
2105 panic("msg_return_translate");
2106 #endif
2107 return 0; /* lint */
2108 }
2109 }
2110
2111 /*
2112 * Routine: msg_send_trap [mach trap]
2113 * Purpose:
2114 * Send a message.
2115 * Conditions:
2116 * Nothing locked.
2117 * Returns:
2118 */
2119
2120 msg_return_t
2121 msg_send_trap(
2122 msg_header_t *msg,
2123 msg_option_t option,
2124 msg_size_t send_size,
2125 msg_timeout_t time_out)
2126 {
2127 ipc_space_t space = current_space();
2128 vm_map_t map = current_map();
2129 ipc_kmsg_t kmsg;
2130 mach_msg_return_t mr;
2131
2132 send_size = (send_size + 3) & ~3; /* round up */
2133
2134 if (send_size > MSG_SIZE_MAX)
2135 return SEND_MSG_TOO_LARGE;
2136
2137 mr = ipc_kmsg_get((mach_msg_header_t *) msg,
2138 (mach_msg_size_t) send_size,
2139 &kmsg);
2140 if (mr != MACH_MSG_SUCCESS)
2141 return msg_return_translate(mr);
2142
2143 mr = ipc_kmsg_copyin_compat(kmsg, space, map);
2144 if (mr != MACH_MSG_SUCCESS) {
2145 ikm_free(kmsg);
2146 return msg_return_translate(mr);
2147 }
2148
2149 if (option & SEND_NOTIFY) {
2150 mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
2151 ((option & SEND_TIMEOUT) ?
2152 (mach_msg_timeout_t) time_out :
2153 MACH_MSG_TIMEOUT_NONE));
2154 if (mr == MACH_SEND_TIMED_OUT) {
2155 ipc_port_t dest = (ipc_port_t)
2156 kmsg->ikm_header.msgh_remote_port;
2157
2158 mr = ipc_marequest_create(space, dest, MACH_PORT_NULL,
2159 &kmsg->ikm_marequest);
2160 if (mr == MACH_MSG_SUCCESS) {
2161 ipc_mqueue_send_always(kmsg);
2162 return SEND_WILL_NOTIFY;
2163 }
2164 }
2165 } else
2166 mr = ipc_mqueue_send(kmsg,
2167 ((option & SEND_TIMEOUT) ?
2168 MACH_SEND_TIMEOUT :
2169 MACH_MSG_OPTION_NONE),
2170 (mach_msg_timeout_t) time_out);
2171
2172 if (mr != MACH_MSG_SUCCESS)
2173 ipc_kmsg_destroy(kmsg);
2174
2175 return msg_return_translate(mr);
2176 }
2177
2178 /*
2179 * Routine: msg_receive_trap [mach trap]
2180 * Purpose:
2181 * Receive a message.
2182 * Conditions:
2183 * Nothing locked.
2184 * Returns:
2185 */
2186
2187 msg_return_t
2188 msg_receive_trap(
2189 msg_header_t *msg,
2190 msg_option_t option,
2191 msg_size_t rcv_size,
2192 port_name_t rcv_name,
2193 msg_timeout_t time_out)
2194 {
2195 ipc_thread_t self;
2196 ipc_space_t space = current_space();
2197 vm_map_t map = current_map();
2198 ipc_object_t object;
2199 ipc_mqueue_t mqueue;
2200 ipc_kmsg_t kmsg;
2201 mach_port_seqno_t seqno;
2202 mach_msg_return_t mr;
2203 union ipc_kmsg_return kmsg_ret;
2204
2205 mr = ipc_mqueue_copyin(space, (mach_port_t) rcv_name,
2206 &mqueue, &object);
2207 if (mr != MACH_MSG_SUCCESS)
2208 return msg_return_translate(mr);
2209 /* hold ref for object; mqueue is locked */
2210
2211 /*
2212 * ipc_mqueue_receive may not return, because if we block
2213 * then our kernel stack may be discarded. So we save
2214 * state here for msg_receive_continue to pick up.
2215 */
2216
2217 self = current_thread();
2218 self->ith_msg = (mach_msg_header_t *) msg;
2219 self->ith_option = (mach_msg_option_t) option;
2220 self->ith_rcv_size = (mach_msg_size_t) rcv_size;
2221 self->ith_timeout = (mach_msg_timeout_t) time_out;
2222 self->ith_object = object;
2223 self->ith_mqueue = mqueue;
2224
2225 mr = ipc_mqueue_receive(mqueue,
2226 (option & RCV_TIMEOUT) ?
2227 MACH_RCV_TIMEOUT : MACH_MSG_OPTION_NONE,
2228 (mach_msg_size_t) rcv_size,
2229 (mach_msg_timeout_t) time_out,
2230 FALSE, msg_receive_continue,
2231 &kmsg_ret, &seqno);
2232 /* mqueue is unlocked */
2233 ipc_object_release(object);
2234 if (mr != MACH_MSG_SUCCESS) {
2235 if (mr == MACH_RCV_TOO_LARGE) {
2236 msg_size_t real_size = kmsg_ret.msize;
2237
2238 assert(real_size > rcv_size);
2239
2240 (void) copyout(&real_size,
2241 &msg->msg_size,
2242 sizeof(msg_size_t));
2243 }
2244
2245 return msg_return_translate(mr);
2246 }
2247
2248 kmsg = kmsg_ret.kmsg;
2249 assert(kmsg->ikm_header.msgh_size <= (mach_msg_size_t) rcv_size);
2250
2251 mr = ipc_kmsg_copyout_compat(kmsg, space, map);
2252 assert(mr == MACH_MSG_SUCCESS);
2253
2254 mr = ipc_kmsg_put((mach_msg_header_t *) msg, kmsg,
2255 kmsg->ikm_header.msgh_size);
2256 return msg_return_translate(mr);
2257 }
2258
2259 /*
2260 * Routine: msg_rpc_trap [mach trap]
2261 * Purpose:
2262 * Send and receive a message.
2263 * Conditions:
2264 * Nothing locked.
2265 * Returns:
2266 */
2267
2268 msg_return_t
2269 msg_rpc_trap(
2270 msg_header_t *msg,
2271 msg_option_t option,
2272 msg_size_t send_size,
2273 msg_size_t rcv_size,
2274 msg_timeout_t send_timeout,
2275 msg_timeout_t rcv_timeout)
2276 {
2277 ipc_thread_t self;
2278 ipc_space_t space = current_space();
2279 vm_map_t map = current_map();
2280 ipc_port_t reply;
2281 ipc_pset_t pset;
2282 ipc_mqueue_t mqueue;
2283 ipc_kmsg_t kmsg;
2284 mach_port_seqno_t seqno;
2285 mach_msg_return_t mr;
2286 union ipc_kmsg_return kmsg_ret;
2287
2288 /*
2289 * Instead of using msg_send_trap and msg_receive_trap,
2290 * we implement msg_rpc_trap directly. The difference
2291 * is how the reply port is handled. Instead of using
2292 * ipc_mqueue_copyin, we save a reference for the reply
2293 * port carried in the sent message. For example,
2294 * consider a rename kernel call which changes the name
2295 * of the call's own reply port. This is the behaviour
2296 * of the Mach 2.5 msg_rpc_trap.
2297 */
2298
2299 send_size = (send_size + 3) & ~3; /* round up */
2300
2301 if (send_size > MSG_SIZE_MAX)
2302 return SEND_MSG_TOO_LARGE;
2303
2304 mr = ipc_kmsg_get((mach_msg_header_t *) msg,
2305 (mach_msg_size_t) send_size,
2306 &kmsg);
2307 if (mr != MACH_MSG_SUCCESS)
2308 return msg_return_translate(mr);
2309
2310 mr = ipc_kmsg_copyin_compat(kmsg, space, map);
2311 if (mr != MACH_MSG_SUCCESS) {
2312 ikm_free(kmsg);
2313 return msg_return_translate(mr);
2314 }
2315
2316 reply = (ipc_port_t) kmsg->ikm_header.msgh_local_port;
2317 if (IP_VALID(reply))
2318 ipc_port_reference(reply);
2319
2320 if (option & SEND_NOTIFY) {
2321 mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
2322 ((option & SEND_TIMEOUT) ?
2323 (mach_msg_timeout_t) send_timeout :
2324 MACH_MSG_TIMEOUT_NONE));
2325 if (mr == MACH_SEND_TIMED_OUT) {
2326 ipc_port_t dest = (ipc_port_t)
2327 kmsg->ikm_header.msgh_remote_port;
2328
2329 mr = ipc_marequest_create(space, dest, MACH_PORT_NULL,
2330 &kmsg->ikm_marequest);
2331 if (mr == MACH_MSG_SUCCESS) {
2332 ipc_mqueue_send_always(kmsg);
2333 if (IP_VALID(reply))
2334 ipc_port_release(reply);
2335 return SEND_WILL_NOTIFY;
2336 }
2337 }
2338 } else
2339 mr = ipc_mqueue_send(kmsg,
2340 ((option & SEND_TIMEOUT) ?
2341 MACH_SEND_TIMEOUT :
2342 MACH_MSG_OPTION_NONE),
2343 (mach_msg_timeout_t) send_timeout);
2344
2345 if (mr != MACH_MSG_SUCCESS) {
2346 ipc_kmsg_destroy(kmsg);
2347 if (IP_VALID(reply))
2348 ipc_port_release(reply);
2349 return msg_return_translate(mr);
2350 }
2351
2352 if (!IP_VALID(reply))
2353 return RCV_INVALID_PORT;
2354
2355 ip_lock(reply);
2356 if (reply->ip_receiver != space) {
2357 ip_release(reply);
2358 ip_check_unlock(reply);
2359 return RCV_INVALID_PORT;
2360 }
2361
2362 assert(ip_active(reply));
2363 pset = reply->ip_pset;
2364
2365 if (pset != IPS_NULL) {
2366 ips_lock(pset);
2367 if (ips_active(pset)) {
2368 ips_unlock(pset);
2369 ip_release(reply);
2370 ip_unlock(reply);
2371 return RCV_INVALID_PORT;
2372 }
2373
2374 ipc_pset_remove(pset, reply);
2375 ips_check_unlock(pset);
2376 assert(reply->ip_pset == IPS_NULL);
2377 }
2378
2379 mqueue = &reply->ip_messages;
2380 imq_lock(mqueue);
2381 ip_unlock(reply);
2382
2383 /*
2384 * ipc_mqueue_receive may not return, because if we block
2385 * then our kernel stack may be discarded. So we save
2386 * state here for msg_receive_continue to pick up.
2387 */
2388
2389 self = current_thread();
2390 self->ith_msg = (mach_msg_header_t *) msg;
2391 self->ith_option = (mach_msg_option_t) option;
2392 self->ith_rcv_size = (mach_msg_size_t) rcv_size;
2393 self->ith_timeout = (mach_msg_timeout_t) rcv_timeout;
2394 self->ith_object = (ipc_object_t) reply;
2395 self->ith_mqueue = mqueue;
2396
2397 mr = ipc_mqueue_receive(mqueue,
2398 (option & RCV_TIMEOUT) ?
2399 MACH_RCV_TIMEOUT : MACH_MSG_OPTION_NONE,
2400 (mach_msg_size_t) rcv_size,
2401 (mach_msg_timeout_t) rcv_timeout,
2402 FALSE, msg_receive_continue,
2403 &kmsg_ret, &seqno);
2404 /* mqueue is unlocked */
2405 ipc_port_release(reply);
2406 if (mr != MACH_MSG_SUCCESS) {
2407 if (mr == MACH_RCV_TOO_LARGE) {
2408 msg_size_t real_size = kmsg_ret.msize;
2409
2410 assert(real_size > rcv_size);
2411
2412 (void) copyout(&real_size,
2413 &msg->msg_size,
2414 sizeof(msg_size_t));
2415 }
2416
2417 return msg_return_translate(mr);
2418 }
2419
2420 kmsg = kmsg_ret.kmsg;
2421 assert(kmsg->ikm_header.msgh_size <= (mach_msg_size_t) rcv_size);
2422
2423 mr = ipc_kmsg_copyout_compat(kmsg, space, map);
2424 assert(mr == MACH_MSG_SUCCESS);
2425
2426 mr = ipc_kmsg_put((mach_msg_header_t *) msg,
2427 kmsg, kmsg->ikm_header.msgh_size);
2428 return msg_return_translate(mr);
2429 }
2430
2431 /*
2432 * Routine: msg_receive_continue
2433 * Purpose:
2434 * Continue after blocking for a message.
2435 * Conditions:
2436 * Nothing locked. We are running on a new kernel stack,
2437 * with the receive state saved in the thread. From here
2438 * control goes back to user space.
2439 */
2440
2441 no_return
2442 msg_receive_continue(void)
2443 {
2444 ipc_thread_t self = current_thread();
2445 msg_header_t *msg = (msg_header_t *) self->ith_msg;
2446 msg_option_t option = (msg_option_t) self->ith_option;
2447 msg_size_t rcv_size = (msg_size_t) self->ith_rcv_size;
2448 msg_timeout_t time_out = (msg_timeout_t) self->ith_timeout;
2449 ipc_object_t object = self->ith_object;
2450 ipc_mqueue_t mqueue = self->ith_mqueue;
2451 ipc_kmsg_t kmsg;
2452 mach_port_seqno_t seqno;
2453 mach_msg_return_t mr;
2454 union ipc_kmsg_return kmsg_ret;
2455
2456 mr = ipc_mqueue_receive(mqueue,
2457 (option & RCV_TIMEOUT) ?
2458 MACH_RCV_TIMEOUT : MACH_MSG_OPTION_NONE,
2459 (mach_msg_size_t) rcv_size,
2460 (mach_msg_timeout_t) time_out,
2461 TRUE, msg_receive_continue,
2462 &kmsg_ret, &seqno);
2463 /* mqueue is unlocked */
2464 ipc_object_release(object);
2465 if (mr != MACH_MSG_SUCCESS) {
2466 if (mr == MACH_RCV_TOO_LARGE) {
2467 msg_size_t real_size = kmsg_ret.msize;
2468
2469 assert(real_size > rcv_size);
2470
2471 (void) copyout(&real_size,
2472 &msg->msg_size,
2473 sizeof(msg_size_t));
2474 }
2475
2476 thread_syscall_return(msg_return_translate(mr));
2477 /*NOTREACHED*/
2478 }
2479
2480 kmsg = kmsg_ret.kmsg;
2481 assert(kmsg->ikm_header.msgh_size <= (mach_msg_size_t) rcv_size);
2482
2483 mr = ipc_kmsg_copyout_compat(kmsg, current_space(), current_map());
2484 assert(mr == MACH_MSG_SUCCESS);
2485
2486 mr = ipc_kmsg_put((mach_msg_header_t *) msg, kmsg,
2487 kmsg->ikm_header.msgh_size);
2488 thread_syscall_return(msg_return_translate(mr));
2489 /*NOTREACHED*/
2490 }
2491
2492 #endif /* MACH_IPC_COMPAT */
Cache object: 01bc972bca0fa8d8c61ba8a9524b8eaa
|