1 /*
2 * Mach Operating System
3 * Copyright (c) 1990,1991,1992 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: ipc_output.c,v $
29 * Revision 2.6 92/03/10 16:28:06 jsb
30 * Merged in norma branch changes as of NORMA_MK7.
31 * [92/03/09 12:50:12 jsb]
32 *
33 * Revision 2.5.2.4 92/02/21 14:31:59 jsb
34 * Removed code incorrectly duplicated by bmerge.
35 *
36 * Revision 2.5.2.3 92/02/21 11:24:50 jsb
37 * Moved ipc_kmsg_copyout_to_network here from ipc/ipc_kmsg.c.
38 * Renamed norma_ipc_destroy_proxy to norma_ipc_dead_destination.
39 * [92/02/21 10:36:14 jsb]
40 *
41 * In norma_ipc_send, convert kmsg to network format.
42 * [92/02/21 09:07:00 jsb]
43 *
44 * Changed for norma_ipc_send_port now returning uid.
45 * [92/02/20 17:15:23 jsb]
46 *
47 * Added netipc_thread_wakeup to netipc_safe_vm_map_copy_invoke_cont.
48 * Added logic to convert node number to netipc packet address in
49 * netipc_packet_print.
50 * [92/02/18 17:37:14 jsb]
51 *
52 * Perform norma_ipc_send_*_dest only after successful acknowledgement.
53 * This allows simplification of reference counting for destination
54 * ports. The old scheme kept a single reference for the port while it
55 * was queued. The new scheme keeps a reference for the port for every
56 * kmsg it has queued, which is released in norma_ipc_send_*_dest.
57 * The only explicit reference counting management required by the
58 * new scheme is the acquisition of a port reference for a proxy before
59 * calling norma_ipc_destroy_proxy, which expects the caller to supply
60 * a reference. Eliminated netipc_port_release and netipc_free_port_list,
61 * since norma_ipc_send_*_dest now handle releasing references after
62 * message delivery. Changed safe kmsg freeing code to call
63 * norma_ipc_send_*_dest (which must not be called at interrupt level).
64 * Also changed usage of ip_norma_queue_next field to allow elimination
65 * of ip_norma_queued field.
66 * [92/02/18 09:14:14 jsb]
67 *
68 * Revision 2.5.2.2 92/02/18 19:15:50 jeffreyh
69 * iPSC changes from Intel.
70 * [92/02/18 jeffreyh]
71 * [intel] added debugging callhere stuff, routine for
72 * netipc_vm_map_copy_cont_check(), all for iPSC.
73 * [92/02/13 13:10:00 jeffreyh]
74 *
75 * Revision 2.5.2.1 92/01/21 21:52:17 jsb
76 * More de-linting.
77 * [92/01/17 11:40:20 jsb]
78 *
79 * Added definition of, and call to, netipc_safe_ikm_free.
80 * [92/01/16 22:13:17 jsb]
81 *
82 * Minor de-linting.
83 * [92/01/14 22:01:43 jsb]
84 *
85 * Reworked interface with underlying protocol module; see comment
86 * that begins 'This is now how we cooperate...' below.
87 * [92/01/14 09:29:51 jsb]
88 *
89 * De-linted. Added netipc_packet_print.
90 * [92/01/13 10:16:50 jsb]
91 *
92 * Moved netipc_packet definitions, processing, allocation, to here.
93 * Moved netipc_ack status demultiplexing here.
94 * [92/01/11 17:35:35 jsb]
95 *
96 * Moved old contents to norma/ipc_wire.c.
97 * Now contains functions split from norma/ipc_net.c.
98 * [92/01/10 20:40:51 jsb]
99 *
100 */
101 /*
102 * File: norma/ipc_output.c
103 * Author: Joseph S. Barrera III
104 * Date: 1990
105 *
106 * Functions to support ipc between nodes in a single Mach cluster.
107 */
108
109 #include <norma/ipc_net.h>
110
111 /*
112 * This is now how we cooperate with the reliable transmission module
113 * underneath us:
114 * 1. When we have something to send, we call netipc_start.
115 * 2. When he can send it, he upcalls netipc_send_new.
116 * 3. We then call netipc_send_{kmsg,page,...} as appropriate.
117 * From these routines, we call netipc_send_with_timeout,
118 * specifying a packetid and a seqid.
119 * 4. If he decides a packet needs to be retransmitted, he calls
120 * netipc_send_old with the same packetid and seqid.
121 * 5. He receives acknowledgements, eliminates redundant ones,
122 * and calls netipc_recv_ack_with_status with same packetid, seqid.
123 * 6. From ack_with_status we multiplex on status and call one
124 * of netipc_recv_{success,retarget,dead,not_found}.
125 * 7. Netipc_recv_success calls netipc_start if there is more to send.
126 *
127 * The rationale here is
128 * 1. Keep retransmission knowledge in protocol module
129 * 2. Keep kmsg, copy object, etc. knowledge here.
130 * 3. Do windowing based on limitations imposed by both
131 * layers. E.g. continuations limit how much of a copy
132 * object we can send at a time.
133 * 4. Allow for smarter protocol modules in the future while
134 * retaining the no-copy aspect provided by kmsg-walking
135 * code in this module.
136 * 5. Allow for simpler protocol modules for reliable interconnects.
137 *
138 * Things left to do:
139 * 1. Actually add windowing
140 * 2. Have this module do its own seqid allocation, separate from
141 * protocol seqids. This eliminates need for Xnetipc_next_seqid.
142 * 3. Eliminate array-based lookup of netipc_packets
143 */
144
145 #define NORMA_REALIGN_OOL_DATA 0
146
147 #define DP_TYPE_KMSG 0L
148 #define DP_TYPE_PAGE 1L
149 #define DP_TYPE_KMSG_MORE 2L
150 #define DP_TYPE_OOL_PORTS 3L
151
152 typedef struct netipc_packet *netipc_packet_t;
153 #define NETIPC_PACKET_NULL ((netipc_packet_t) 0)
154
155 struct netipc_packet {
156 unsigned long dp_type;
157 unsigned long dp_remote;
158 unsigned long dp_seqid;
159 unsigned long dp_first_seqid;
160 unsigned long dp_last_seqid;
161 unsigned long dp_last_unacked;
162 ipc_kmsg_t dp_kmsg;
163 unsigned long dp_offset;
164 netipc_packet_t dp_next;
165 ipc_port_t dp_remote_port;
166 vm_map_copy_t dp_copy;
167 unsigned long dp_copy_index;
168 unsigned long dp_copy_npages;
169 unsigned long dp_copy_last;
170 unsigned long dp_page_list_base;
171 boolean_t dp_has_continuation;
172 boolean_t dp_being_continued;
173 };
174
175 #define MAX_NUM_NODES 256 /* XXX */
176 netipc_packet_t netipc_packet[MAX_NUM_NODES];
177
178 extern void netipc_start();
179 extern void norma_ipc_send_dest();
180 extern void norma_ipc_send_migrating_dest();
181 extern unsigned long norma_ipc_send_port();
182
183 netipc_packet_t netipc_packet_allocate();
184 void netipc_packet_deallocate();
185
186 void norma_ipc_queue_port();
187 #define norma_ipc_queued(port) ((port)->ip_norma_queue_next != (port))
188 #define norma_ipc_unqueue(port) ((port)->ip_norma_queue_next = (port))
189
190 /*
191 * This should be conditionalized on machine and type of interconnect.
192 * For now, we assume that everyone will be happy with 32 bit alignment.
193 */
194 #define WORD_SIZE 4
195 #define WORD_MASK (WORD_SIZE - 1)
196
197 #define ROUND_WORD(x) ((((unsigned long)(x)) + WORD_MASK) & ~WORD_MASK)
198 #define TRUNC_WORD(x) (((unsigned long)(x)) & ~WORD_MASK)
199 #define WORD_ALIGNED(x) ((((unsigned long)(x)) & WORD_MASK) == 0)
200
201 zone_t netipc_packet_zone;
202
203 ipc_kmsg_t netipc_kmsg_cache;
204 vm_size_t netipc_kmsg_first_half;
205 int netipc_kmsg_cache_hits; /* debugging */
206 int netipc_kmsg_cache_misses; /* debugging */
207 int netipc_kmsg_splits; /* debugging */
208
209 struct netipc_hdr send_hdr_p;
210 struct netipc_hdr send_hdr_k;
211 struct netipc_hdr send_hdr_m;
212 struct netipc_hdr send_hdr_o;
213
214 struct netvec netvec_p[3];
215 struct netvec netvec_k[3];
216 struct netvec netvec_m[3];
217 struct netvec netvec_o[3];
218
219 struct vm_map_copy netipc_kmsg_more_copy;
220 struct vm_map_copy netipc_ool_ports_copy;
221
222 #if iPSC386 || iPSC860
223 /* XXX debugging */
224 int verbose_already_queued = 0;
225 int norma_ipc_queue_port_on_list;
226 int netipc_packet_list_empty;
227 netipc_packet_t netipc_last_interesting_dp;
228 vm_map_copy_t netipc_last_interesting_copy;
229 kern_return_t (*netipc_last_dp_copy_continuation)();
230 #endif iPSC386 || iPSC860
231
232 /*
233 * vm_map_copy_discard_cont is not an interesting continuation, that is,
234 * it does not affect the way a copy object is sent, because it will
235 * not result in any new page lists.
236 */
237 extern kern_return_t vm_map_copy_discard_cont();
238 #define vm_map_copy_has_interesting_cont(copy) \
239 (vm_map_copy_has_cont(copy) && (copy)->cpy_cont != vm_map_copy_discard_cont)
240
241 netipc_output_init()
242 {
243 /*
244 * Initialize send_hdr_k and netvec_k
245 */
246 send_hdr_k.type = NETIPC_TYPE_KMSG;
247 send_hdr_k.remote = node_self();
248 netvec_k[0].addr = KVTODEV(&send_hdr_k);
249 netvec_k[0].size = sizeof(struct netipc_hdr);
250
251 /*
252 * Initialize send_hdr_p and netvec_p
253 */
254 send_hdr_p.type = NETIPC_TYPE_PAGE;
255 send_hdr_p.remote = node_self();
256 netvec_p[0].addr = KVTODEV(&send_hdr_p);
257 netvec_p[0].size = sizeof(struct netipc_hdr);
258
259 /*
260 * Initialize send_hdr_m and netvec_m
261 */
262 send_hdr_m.type = NETIPC_TYPE_PAGE;
263 send_hdr_m.remote = node_self();
264 netvec_m[0].addr = KVTODEV(&send_hdr_m);
265 netvec_m[0].size = sizeof(struct netipc_hdr);
266
267 /*
268 * Initialize send_hdr_o and netvec_o
269 */
270 send_hdr_o.type = NETIPC_TYPE_PAGE;
271 send_hdr_o.remote = node_self();
272 netvec_o[0].addr = KVTODEV(&send_hdr_o);
273 netvec_o[0].size = sizeof(struct netipc_hdr);
274
275 netipc_packet_zone = zinit(sizeof(struct netipc_packet), 512*1024,
276 PAGE_SIZE, FALSE, "netipc packet");
277 }
278
279 /*
280 * Called from netipc_recv_retarget and netipc_recv_dead.
281 */
282 ipc_port_t
283 netipc_dequeue_port(dp)
284 register netipc_packet_t dp;
285 {
286 ipc_port_t remote_port;
287
288 assert(netipc_locked());
289 assert(dp);
290 printf1("netipc_dequeue_port(%d)\n", dp->dp_remote);
291 netipc_set_seqid(dp, dp->dp_first_seqid);
292 assert(dp->dp_type == DP_TYPE_KMSG); /* is at start of dp (kmsg) */
293
294 /*
295 * Remove this port from this node's queue.
296 * Leave the port referenced.
297 */
298 remote_port = dp->dp_remote_port;
299 assert(remote_port->ip_norma_dest_node == dp->dp_remote);
300 dp->dp_remote_port = dp->dp_remote_port->ip_norma_queue_next;
301 norma_ipc_unqueue(remote_port);
302
303 /*
304 * Move kmsg from dp back onto port.
305 */
306 assert(dp->dp_kmsg != IKM_NULL);
307 ipc_kmsg_unrmqueue_first(&remote_port->ip_messages.imq_messages,
308 dp->dp_kmsg);
309 remote_port->ip_msgcount++;
310
311 /*
312 * If there is another port, start it sending;
313 * otherwise, release dp.
314 */
315 if (dp->dp_remote_port != IP_NULL) {
316 printf1("== dequeue_port: advancing to port 0x%x\n",
317 dp->dp_remote_port);
318 (void) dp_advance(dp);
319 netipc_start(dp->dp_remote);
320 } else {
321 unsigned long remote = dp->dp_remote;
322 printf1("== dequeue_port: no more ports\n");
323 netipc_packet_deallocate(dp);
324 netipc_packet[remote] = (netipc_packet_t) 0;
325 }
326 return remote_port;
327 }
328
329 #define kmsg_size(kmsg) (ikm_plus_overhead(kmsg->ikm_header.msgh_size))
330
331 boolean_t
332 is_one_page_kmsg(kmsg)
333 ipc_kmsg_t kmsg;
334 {
335 if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX_DATA) {
336 return FALSE;
337 }
338 if (kmsg_size(kmsg) > PAGE_SIZE) {
339 return FALSE;
340 }
341 return TRUE;
342 }
343
344 /*
345 * Routine: ipc_kmsg_copyout_to_network
346 * Purpose:
347 * Prepare a copied-in message for norma_ipc_send.
348 * This means translating ports to uids, translating
349 * entry-list copy objects into page list copy objects,
350 * and setting MACH_MSG_BITS_COMPLEX_XXX bits.
351 * Derived from ipc_kmsg_copyin_from_kernel.
352 * Conditions:
353 * Nothing locked.
354 */
355
356 void
357 ipc_kmsg_copyout_to_network(kmsg)
358 ipc_kmsg_t kmsg;
359 {
360 vm_offset_t saddr, eaddr;
361 kern_return_t kr;
362
363 if ((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0) {
364 return;
365 }
366
367 saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
368 eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
369
370 while (saddr < eaddr) {
371 register mach_msg_type_long_t *type;
372 mach_msg_type_name_t name;
373 mach_msg_type_size_t size;
374 mach_msg_type_number_t number;
375 vm_size_t length;
376
377 type = (mach_msg_type_long_t *) saddr;
378 if (type->msgtl_header.msgt_longform) {
379 name = type->msgtl_name;
380 size = type->msgtl_size;
381 number = type->msgtl_number;
382 saddr += sizeof(mach_msg_type_long_t);
383 } else {
384 name = type->msgtl_header.msgt_name;
385 size = type->msgtl_header.msgt_size;
386 number = type->msgtl_header.msgt_number;
387 saddr += sizeof(mach_msg_type_t);
388 }
389
390 /* calculate length of data in bytes, rounding up */
391 length = ((number * size) + 7) >> 3;
392
393 if (length == 0) {
394 continue;
395 }
396
397 if (MACH_MSG_TYPE_PORT_ANY(name)) {
398 register ipc_port_t *ports;
399 mach_msg_type_number_t i;
400
401 if (type->msgtl_header.msgt_inline) {
402 ports = (ipc_port_t *) saddr;
403 saddr += (length + 3) &~ 3;
404 } else {
405 ports = (ipc_port_t *) *(vm_offset_t *) saddr;
406 saddr += sizeof(vm_offset_t);
407 kmsg->ikm_header.msgh_bits |=
408 MACH_MSGH_BITS_COMPLEX_DATA;
409 }
410 kmsg->ikm_header.msgh_bits |=
411 MACH_MSGH_BITS_COMPLEX_PORTS;
412 for (i = 0; i < number; i++) {
413 if (! IO_VALID((ipc_object_t) ports[i])) {
414 /* XXX clean??? */
415 ports[i] = IP_NULL;
416 continue;
417 }
418 ports[i] = (ipc_port_t)
419 norma_ipc_send_port(ports[i], name);
420 }
421 continue;
422 }
423
424 if (type->msgtl_header.msgt_inline) {
425 saddr += (length + 3) &~ 3;
426 continue;
427 }
428
429 kmsg->ikm_header.msgh_bits |= MACH_MSGH_BITS_COMPLEX_DATA;
430 kr = vm_map_convert_to_page_list((vm_map_copy_t *) saddr);
431 if (kr != KERN_SUCCESS) {
432 /*
433 * XXX
434 * vm_map_convert_to_page_list has failed.
435 * Now what? If the error had been detected
436 * before the MiG stub was invoked
437 * the stub would do the right thing,
438 * but it's a bit late now.
439 * Probably the best we can do is return a null
440 * copy object. Need to adjust number accordingly.
441 *
442 * XXX
443 * Discard original copy object?
444 */
445 printf("XXX convert_to_network: page_list: %d", kr);
446 if (type->msgtl_header.msgt_longform) {
447 type->msgtl_number = 0;
448 } else {
449 type->msgtl_header.msgt_number = 0;
450 }
451 * (vm_map_copy_t *) saddr = VM_MAP_COPY_NULL;
452 }
453 saddr += sizeof(vm_offset_t);
454 }
455 }
456
457 /*
458 * Main entry point from regular ipc code.
459 */
460 mach_msg_return_t
461 norma_ipc_send(kmsg)
462 ipc_kmsg_t kmsg;
463 {
464 register mach_msg_header_t *msgh;
465 ipc_port_t local_port, remote_port;
466 mach_msg_bits_t bits;
467
468 ipc_kmsg_copyout_to_network(kmsg);
469
470 msgh = (mach_msg_header_t *) &kmsg->ikm_header;
471 remote_port = (ipc_port_t) msgh->msgh_remote_port;
472 local_port = (ipc_port_t) msgh->msgh_local_port;
473 bits = msgh->msgh_bits;
474
475 #if 1
476 if (kmsg_size(kmsg) > PAGE_SIZE) {
477 /*
478 * Now that we are actually using this bit in this case,
479 * we should simplify some of the tests below.
480 */
481 msgh->msgh_bits = (bits |= MACH_MSGH_BITS_COMPLEX_DATA);
482 }
483 #endif
484
485 /*
486 * Get the receiver's uid.
487 */
488 assert(remote_port->ip_norma_uid != 0);
489 msgh->msgh_remote_port = (mach_port_t) remote_port->ip_norma_uid;
490
491 /*
492 * If there is a local port, get its uid (creating one if necessary).
493 *
494 * Why do we only do this now?
495 * XXX
496 * Why copyin_type? Should we change the type in the message?
497 */
498 if (local_port) {
499 msgh->msgh_local_port = (mach_port_t)
500 norma_ipc_send_port(local_port, ipc_object_copyin_type(
501 MACH_MSGH_BITS_LOCAL(bits)));
502 }
503
504 /*
505 * Block interrupts while queueing message and port.
506 */
507 netipc_thread_lock();
508
509 /*
510 * XXX
511 * Should check to see whether this proxy died!?
512 */
513
514 /*
515 * Enqueue the kmsg on the port.
516 */
517 assert(remote_port->ip_pset == /*IPS_NULL*/0); /* XXX ??? */
518 remote_port->ip_msgcount++;
519 ipc_kmsg_enqueue_macro(&remote_port->ip_messages.imq_messages, kmsg);
520
521 /*
522 * Enqueue the port on the queue of ports with something to send.
523 */
524 norma_ipc_queue_port(remote_port);
525
526 /*
527 * Release spl, and return.
528 */
529 netipc_thread_unlock();
530 return KERN_SUCCESS;
531 }
532
533 /*
534 * Put port on list of ports trying to send to port->ip_norma_det_node.
535 * If there are no other ports, then start a send.
536 */
537 void
538 norma_ipc_queue_port(remote_port)
539 ipc_port_t remote_port;
540 {
541 ipc_port_t port;
542 ipc_kmsg_t kmsg;
543 ipc_kmsg_queue_t kmsgs;
544 unsigned long remote;
545 netipc_packet_t dp;
546
547 assert(netipc_locked());
548
549 /*
550 * If it's on the list, return.
551 */
552 if (norma_ipc_queued(remote_port)) {
553 assert(netipc_packet[remote_port->ip_norma_dest_node] !=
554 NETIPC_PACKET_NULL);
555 #if iPSC386 || iPSC860
556 netipc_called_here(__FILE__, __LINE__, "norma_ipc_queue_port");
557 if (verbose_already_queued) {
558 printf("norma_ipc_queue_port(remote_port=%x) -- I saw one\n",
559 remote_port);
560 }
561 norma_ipc_queue_port_on_list++;
562 #endif iPSC386 || iPSC860
563 return;
564 }
565
566 /*
567 * If there are other ports already on the list,
568 * then queue the port and return.
569 */
570 remote = remote_port->ip_norma_dest_node;
571 dp = netipc_packet[remote];
572 if (dp != NETIPC_PACKET_NULL) {
573 for (port = dp->dp_remote_port;
574 port->ip_norma_queue_next;
575 port = port->ip_norma_queue_next) {
576 continue;
577 }
578 remote_port->ip_norma_queue_next = IP_NULL;
579 port->ip_norma_queue_next = remote_port;
580 return;
581 }
582
583 /*
584 * Pull first kmsg from port.
585 */
586 kmsgs = &remote_port->ip_messages.imq_messages;
587 kmsg = ipc_kmsg_queue_first(kmsgs);
588 assert(kmsg != IKM_NULL);
589 ipc_kmsg_rmqueue_first_macro(kmsgs, kmsg);
590 remote_port->ip_msgcount--;
591
592 /*
593 * Allocate and initialize a dp.
594 */
595 dp = netipc_packet_allocate();
596 if (dp == NETIPC_PACKET_NULL) {
597 panic("netipc_packet_allocate. bogus panic.\n");
598 }
599 netipc_packet[remote] = dp;
600 dp->dp_first_seqid = Xnetipc_next_seqid(remote);
601 dp->dp_last_unacked = dp->dp_first_seqid;
602 dp->dp_type = DP_TYPE_KMSG;
603 dp->dp_remote = remote;
604 dp->dp_remote_port = remote_port;
605 dp->dp_remote_port->ip_norma_queue_next = IP_NULL;
606 dp->dp_kmsg = kmsg;
607 if (is_one_page_kmsg(kmsg)) {
608 dp->dp_last_seqid = dp->dp_first_seqid;
609 } else {
610 dp->dp_last_seqid = 0;
611 }
612
613 /*
614 * Send it if we can.
615 */
616 netipc_start(remote);
617 }
618
619 /*
620 * Return to port message queue a kmsg removed via ipc_kmsg_rmqueue_first.
621 */
622 ipc_kmsg_unrmqueue_first(queue, kmsg)
623 ipc_kmsg_queue_t queue;
624 ipc_kmsg_t kmsg;
625 {
626 assert(netipc_locked());
627 printf1("*** ipc_kmsg_unrmqueue_first(0x%x, 0x%x)\n", queue, kmsg);
628 if (queue->ikmq_base == IKM_NULL) {
629 kmsg->ikm_next = kmsg;
630 kmsg->ikm_prev = kmsg;
631 } else {
632 register ipc_kmsg_t first = queue->ikmq_base;
633 register ipc_kmsg_t last = first->ikm_prev;
634
635 kmsg->ikm_next = first;
636 kmsg->ikm_prev = last;
637 first->ikm_prev = kmsg;
638 last->ikm_next = kmsg;
639 }
640 queue->ikmq_base = kmsg;
641 }
642
643 ipc_port_t netipc_dead_port_list = IP_NULL;
644
645 /*
646 * Called from netipc_send_dp.
647 */
648 netipc_send_kmsg(remote, dp)
649 unsigned long remote;
650 register netipc_packet_t dp;
651 {
652 unsigned int netvec_count;
653 vm_offset_t length;
654
655 #if iPSC386 || iPSC860
656 netipc_called_here(__FILE__, __LINE__, "netipc_send_kmsg");
657 #endif iPSC386 || iPSC860
658 assert(netipc_locked());
659
660 /*
661 * Kmsgs are word aligned.
662 */
663 assert(WORD_ALIGNED(dp->dp_kmsg));
664
665 /*
666 * Fill in send_hdr_k.
667 */
668 send_hdr_k.seqid = dp->dp_seqid;
669
670 /*
671 * Fill in netvec_k.
672 * Cache KVTODEV and page-splitting computations.
673 * (Kmsgs occasionally cross page boundaries, unfortunately.)
674 *
675 * This routine attempts to cache the results of KVTODEV
676 * since it is relatively expensive.
677 * This caching may be less effective now since we've added
678 * flow control, since we don't immediately stuff the kmsg back
679 * into the ikm_cache, which means it might not be the kmsg
680 * we see next. Perhaps we can use the kmsg->ikm_page field
681 * for caching physaddr?
682 */
683 if (dp->dp_kmsg != netipc_kmsg_cache) {
684 vm_offset_t data = (vm_offset_t) dp->dp_kmsg;
685 netipc_kmsg_cache = dp->dp_kmsg;
686
687 netipc_kmsg_first_half = round_page(data) - data;
688
689 netvec_k[1].addr = KVTODEV(data);
690 netvec_k[2].addr = KVTODEV(data + netipc_kmsg_first_half);
691 netipc_kmsg_cache_misses++;
692 } else {
693 netipc_kmsg_cache_hits++;
694 }
695
696 /*
697 * Calculate how much of kmsg to send.
698 */
699 length = kmsg_size(dp->dp_kmsg);
700 length = ROUND_WORD(length);
701 if (length > PAGE_SIZE) {
702 length = PAGE_SIZE;
703 }
704
705 /*
706 * Set vector, with either one or two pieces for kmsg.
707 */
708 if (length > netipc_kmsg_first_half) {
709 netvec_k[1].size = netipc_kmsg_first_half;
710 netvec_k[2].size = length - netipc_kmsg_first_half;
711 netvec_count = 3;
712 netipc_kmsg_splits++;
713 } else {
714 netvec_k[1].size = length;
715 netvec_count = 2;
716 }
717
718 /*
719 * Start the send, and the associated timer.
720 */
721 netipc_send_with_timeout(remote, netvec_k, netvec_count,
722 (unsigned long) dp, dp->dp_seqid);
723 }
724
725 /*
726 * Called from netipc_send_dp.
727 *
728 * Derived from netipc_send_kmsg and netipc_send_page.
729 * Sends from a kmsg but uses page packets.
730 */
731 netipc_send_kmsg_more(remote, dp)
732 unsigned long remote;
733 register netipc_packet_t dp;
734 {
735 vm_size_t first_half, length;
736 vm_offset_t data, offset;
737
738 #if iPSC386 || iPSC860
739 netipc_called_here(__FILE__, __LINE__, "netipc_send_kmsg_more");
740 #endif iPSC386 || iPSC860
741 assert(dp->dp_type == DP_TYPE_KMSG_MORE);
742
743 assert(netipc_locked());
744
745 /*
746 * Kmsgs are word aligned.
747 */
748 assert(WORD_ALIGNED(dp->dp_kmsg));
749
750 /*
751 * Calculate where in the kmsg to start,
752 * and how much to send.
753 */
754 offset = PAGE_SIZE * dp->dp_copy_index + PAGE_SIZE;
755 data = (vm_offset_t) dp->dp_kmsg + offset;
756 length = kmsg_size(dp->dp_kmsg) - offset;
757 length = ROUND_WORD(length);
758 if (length > PAGE_SIZE) {
759 length = PAGE_SIZE;
760 }
761
762 /*
763 * Fill in send_hdr_m.
764 */
765 send_hdr_m.pg.pg_copy_offset = 0;
766 send_hdr_m.pg.pg_msgh_offset = 0;
767 send_hdr_m.pg.pg_page_first = (dp->dp_copy_index == 0);
768 send_hdr_m.pg.pg_page_last = (dp->dp_copy_index ==
769 dp->dp_copy_npages - 1);
770 send_hdr_m.pg.pg_copy_size = dp->dp_copy->size;
771 send_hdr_m.pg.pg_copy_last = dp->dp_copy_last;
772 send_hdr_m.seqid = dp->dp_seqid;
773
774 /*
775 * If data crosses a page boundary, we need to point netvec_m
776 * to both physical pages involved.
777 */
778 first_half = round_page(data) - data;
779 if (length > first_half) {
780 netvec_m[1].addr = KVTODEV(data);
781 netvec_m[1].size = first_half;
782
783 netvec_m[2].addr = KVTODEV(data + first_half);
784 netvec_m[2].size = length - first_half;
785
786 netipc_send_with_timeout(remote, netvec_m, 3,
787 (unsigned long) dp, dp->dp_seqid);
788 } else {
789 netvec_m[1].addr = KVTODEV(data);
790 netvec_m[1].size = length;
791
792 netipc_send_with_timeout(remote, netvec_m, 2,
793 (unsigned long) dp, dp->dp_seqid);
794 }
795 }
796
797 /*
798 * Called from netipc_send_dp.
799 *
800 * Derived from netipc_send_kmsg_more.
801 * Sends from a kalloc'd region containing out-of-line ports,
802 * but uses page packets.
803 */
804 netipc_send_ool_ports(remote, dp)
805 unsigned long remote;
806 register netipc_packet_t dp;
807 {
808 vm_size_t first_half, length;
809 vm_offset_t data, offset;
810
811 #if iPSC386 || iPSC860
812 netipc_called_here(__FILE__, __LINE__, "netipc_send_ool_ports");
813 #endif iPSC386 || iPSC860
814 assert(netipc_locked());
815 assert(dp->dp_type == DP_TYPE_OOL_PORTS);
816
817 data = * (vm_offset_t *)
818 ((vm_offset_t) &dp->dp_kmsg->ikm_header + dp->dp_offset);
819
820 /*
821 * Kalloc'd regions for out-of-line ports are word aligned.
822 */
823 assert(WORD_ALIGNED(data));
824
825 /*
826 * Calculate where in the kmsg to start,
827 * and how much to send.
828 */
829 offset = PAGE_SIZE * dp->dp_copy_index;
830 data += offset;
831 length = dp->dp_copy->size - offset;
832 length = ROUND_WORD(length);
833 if (length > PAGE_SIZE) {
834 length = PAGE_SIZE;
835 }
836
837 /*
838 * Fill in send_hdr_o.
839 */
840 send_hdr_o.pg.pg_copy_offset = 0;
841 send_hdr_o.pg.pg_msgh_offset = dp->dp_offset;
842 send_hdr_o.pg.pg_page_first = (dp->dp_copy_index == 0);
843 send_hdr_o.pg.pg_page_last = (dp->dp_copy_index ==
844 dp->dp_copy_npages - 1);
845 send_hdr_o.pg.pg_copy_size = dp->dp_copy->size;
846 send_hdr_o.pg.pg_copy_last = dp->dp_copy_last;
847 send_hdr_o.seqid = dp->dp_seqid;
848
849 /*
850 * If data crosses a page boundary, we need to point netvec_o
851 * to both physical pages involved.
852 */
853 first_half = round_page(data) - data;
854 if (length > first_half) {
855 netvec_o[1].addr = KVTODEV(data);
856 netvec_o[1].size = first_half;
857
858 netvec_o[2].addr = KVTODEV(data + first_half);
859 netvec_o[2].size = length - first_half;
860
861 netipc_send_with_timeout(remote, netvec_o, 3,
862 (unsigned long) dp, dp->dp_seqid);
863 } else {
864 netvec_o[1].addr = KVTODEV(data);
865 netvec_o[1].size = length;
866
867 netipc_send_with_timeout(remote, netvec_o, 2,
868 (unsigned long) dp, dp->dp_seqid);
869 }
870 }
871
872 /*
873 * Called from netipc_send_dp.
874 */
875 netipc_send_page(remote, dp)
876 unsigned long remote;
877 register netipc_packet_t dp;
878 {
879 vm_page_t *page_list;
880 unsigned long align;
881 unsigned long length;
882 unsigned long offset;
883
884 #if iPSC386 || iPSC860
885 netipc_called_here(__FILE__, __LINE__, "netipc_send_page");
886 #endif iPSC386 || iPSC860
887 if (dp->dp_copy_index == 0) {
888 dp->dp_page_list_base = 0;
889 dp->dp_has_continuation =
890 vm_map_copy_has_interesting_cont(dp->dp_copy);
891 dp->dp_being_continued = FALSE;
892 #if iPSC386 || iPSC860
893 netipc_called_here(__FILE__, __LINE__, "netipc_send_page (dp_copy_index == 0)");
894 #endif iPSC386 || iPSC860
895 }
896 if (dp->dp_has_continuation) {
897 #if iPSC386 || iPSC860
898 netipc_last_interesting_dp = dp;
899 netipc_last_interesting_copy = dp->dp_copy;
900 netipc_last_dp_copy_continuation = (dp->dp_copy)->cpy_cont;
901 netipc_called_here(__FILE__, __LINE__, "netipc_send_page (has continuation)");
902 #endif iPSC386 || iPSC860
903 netipc_send_page_with_continuation(remote, dp);
904 return;
905 }
906
907 assert(netipc_locked());
908 assert(dp->dp_copy_index < dp->dp_copy_npages);
909
910 /*
911 * Calculate length and offset.
912 * Round both to word boundaries.
913 */
914 #if NORMA_REALIGN_OOL_DATA
915 offset = (dp->dp_copy->offset & page_mask);
916 align = (offset & WORD_MASK);
917 if (dp->dp_copy_index < dp->dp_copy_npages - 1) {
918 /*
919 * This is not the last page and therefore length will
920 * be a whole page. We just need to make offset word aligned.
921 */
922 offset -= align;
923 length = PAGE_SIZE;
924 } else if (offset == 0) {
925 /*
926 * Offset is page aligned and therefore word aligned.
927 * We just need to set length.
928 */
929 length = (dp->dp_copy->size & page_mask);
930 if (length == 0) {
931 length = PAGE_SIZE;
932 } else {
933 length = ROUND_WORD(length);
934 }
935 } else {
936 /*
937 * This is the last page, and this page list did not
938 * start on a page boundary.
939 *
940 * This code should correspond to the code in
941 * netipc_next_copy_object to calculate dp_copy_npages.
942 */
943 vm_offset_t end = offset + dp->dp_copy->size;
944 offset -= align;
945 end = ROUND_WORD(end);
946 length = ((end - offset) & page_mask);
947 if (length == 0) {
948 length = PAGE_SIZE;
949 }
950 }
951 #else NORMA_REALIGN_OOL_DATA
952 offset = 0;
953 align = dp->dp_copy->offset & page_mask;
954 if (dp->dp_copy_index < dp->dp_copy_npages - 1) {
955 length = PAGE_SIZE;
956 } else {
957 length = ((dp->dp_copy->size + align) & page_mask);
958 if (length == 0) {
959 length = PAGE_SIZE;
960 } else {
961 length = ROUND_WORD(length);
962 }
963 }
964 #endif NORMA_REALIGN_OOL_DATA
965
966 assert(WORD_ALIGNED(offset));
967 assert(WORD_ALIGNED(length));
968 assert(length > 0);
969 assert(length <= PAGE_SIZE);
970
971 send_hdr_p.pg.pg_copy_offset = align;
972 send_hdr_p.pg.pg_msgh_offset = dp->dp_offset;
973 send_hdr_p.pg.pg_copy_size = dp->dp_copy->size;
974 send_hdr_p.pg.pg_page_first = (dp->dp_copy_index == 0);
975 send_hdr_p.pg.pg_page_last = (dp->dp_copy_index ==
976 dp->dp_copy_npages - 1);
977 send_hdr_p.pg.pg_copy_last = dp->dp_copy_last;
978 send_hdr_p.seqid = dp->dp_seqid;
979
980 /*
981 * If data crosses a page boundary, we need to point netvec_p
982 * to both physical pages involved.
983 */
984 page_list = &dp->dp_copy->cpy_page_list[dp->dp_copy_index];
985 if (offset + length > PAGE_SIZE) {
986 vm_offset_t first_half = PAGE_SIZE - offset;
987
988 netvec_p[1].addr = VPTODEV(page_list[0]) + offset;
989 netvec_p[1].size = first_half;
990
991 netvec_p[2].addr = VPTODEV(page_list[1]);
992 netvec_p[2].size = length - first_half;
993
994 netipc_send_with_timeout(remote, netvec_p, 3,
995 (unsigned long) dp, dp->dp_seqid);
996 } else {
997 netvec_p[1].addr = VPTODEV(page_list[0]) + offset;
998 netvec_p[1].size = length;
999
1000 netipc_send_with_timeout(remote, netvec_p, 2,
1001 (unsigned long) dp, dp->dp_seqid);
1002 }
1003 }
1004
1005 /*
1006 * Like netipc_send_page, but can deal with copy objects with continuations.
1007 * Does not try to be tricky about changing allignment, which is okay, because
1008 * beginning/end page fragementation is less significant for the large copy
1009 * objects that typically have continuations.
1010 *
1011 * XXX
1012 * This turns out not to be that different from netipc_send_page,
1013 * so should probably remerge the two.
1014 */
1015 netipc_send_page_with_continuation(remote, dp)
1016 unsigned long remote;
1017 register netipc_packet_t dp;
1018 {
1019 unsigned long align;
1020 unsigned long length;
1021 int index;
1022
1023 #if iPSC386 || iPSC860
1024 netipc_called_here(__FILE__, __LINE__, "netipc_send_page_with_continuation");
1025 #endif iPSC386 || iPSC860
1026 assert(netipc_locked());
1027 assert(dp->dp_has_continuation);
1028
1029 /*
1030 * If we are currently being continued, return right away.
1031 */
1032 if (dp->dp_being_continued) {
1033 printf3("being continued\n");
1034 #if iPSC386 || iPSC860
1035 netipc_called_here(__FILE__, __LINE__, "{continued...}");
1036 #endif iPSC386 || iPSC860
1037 return;
1038 }
1039
1040 /*
1041 * Calculate index into current page list from
1042 * dp_copy_index, the current number of pages sent.
1043 * If dp_copy is a continuation, these numbers won't be the same.
1044 */
1045 index = dp->dp_copy_index - dp->dp_page_list_base;
1046 printf3("send_page_with_cont dp=0x%x c_idx=%d c_npages=%d idx=%d\n",
1047 dp, dp->dp_copy_index, dp->dp_copy_npages, index);
1048 assert(index >= 0 && index <= dp->dp_copy->cpy_npages);
1049
1050 /*
1051 * We may be at the end of the current page list,
1052 * in which case we need to call the copy continuation.
1053 * We cannot do this ourselves, since the operation might
1054 * block. We therefore let the netipc thread do it.
1055 * It will call this routine again with dp_page_list_base reset.
1056 */
1057 if (index == dp->dp_copy->cpy_npages) {
1058 netipc_safe_vm_map_copy_invoke_cont(dp);
1059 #if iPSC386 || iPSC860
1060 netipc_called_here(__FILE__, __LINE__, "{netipc_safe_vm_map_copy_invoke_cont");
1061 #endif iPSC386 || iPSC860
1062 return;
1063 }
1064
1065 /*
1066 * Calculate length. Round to word boundary.
1067 */
1068 align = dp->dp_copy->offset & page_mask;
1069 if (index < dp->dp_copy_npages - 1) {
1070 length = PAGE_SIZE;
1071 } else {
1072 length = ((dp->dp_copy->size + align) & page_mask);
1073 if (length == 0) {
1074 length = PAGE_SIZE;
1075 } else {
1076 length = ROUND_WORD(length);
1077 }
1078 }
1079
1080 assert(WORD_ALIGNED(length));
1081 assert(length > 0);
1082 assert(length <= PAGE_SIZE);
1083
1084 send_hdr_p.pg.pg_copy_offset = align;
1085 send_hdr_p.pg.pg_msgh_offset = dp->dp_offset;
1086 send_hdr_p.pg.pg_copy_size = dp->dp_copy->size;
1087 send_hdr_p.pg.pg_page_first = (dp->dp_copy_index == 0);
1088 send_hdr_p.pg.pg_page_last = (dp->dp_copy_index ==
1089 dp->dp_copy_npages - 1);
1090 send_hdr_p.pg.pg_copy_last = dp->dp_copy_last;
1091 send_hdr_p.seqid = dp->dp_seqid;
1092
1093 netvec_p[1].addr = VPTODEV(dp->dp_copy->cpy_page_list[index]);
1094 netvec_p[1].size = length;
1095
1096 netipc_send_with_timeout(remote, netvec_p, 2,
1097 (unsigned long) dp, dp->dp_seqid);
1098 }
1099
1100 /*
1101 * Advance dp->dp_copy to the next copy object in the list.
1102 */
1103 netipc_next_copy_object(dp)
1104 register netipc_packet_t dp;
1105 {
1106 vm_offset_t saddr, eaddr;
1107 ipc_kmsg_t kmsg = dp->dp_kmsg;
1108
1109 assert(netipc_locked());
1110 assert(dp->dp_offset >= sizeof(kmsg->ikm_header));
1111 assert(dp->dp_type == DP_TYPE_PAGE ||
1112 dp->dp_type == DP_TYPE_KMSG_MORE ||
1113 dp->dp_type == DP_TYPE_OOL_PORTS);
1114
1115 saddr = (vm_offset_t) &kmsg->ikm_header + dp->dp_offset;
1116 eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
1117
1118 dp->dp_copy = VM_MAP_COPY_NULL;
1119 dp->dp_copy_index = 0;
1120 dp->dp_copy_last = TRUE;
1121
1122 if (dp->dp_type == DP_TYPE_KMSG_MORE) {
1123 dp->dp_copy = &netipc_kmsg_more_copy;
1124 dp->dp_copy->size = kmsg_size(kmsg) - PAGE_SIZE;
1125 dp->dp_copy_npages = atop(round_page(dp->dp_copy->size));
1126 assert(dp->dp_copy->size > 0);
1127 assert(dp->dp_copy_npages > 0);
1128 } else if (dp->dp_offset > sizeof(kmsg->ikm_header)) {
1129 printf4("nextipc_next_copy_object: multiple copy objects\n");
1130 /* skip copy object to get to next type record */
1131 saddr += sizeof(vm_offset_t);
1132 printf4("nextipc_next_copy_object: saddr=0x%x, eaddr=0x%x\n",
1133 saddr, eaddr);
1134 }
1135
1136 while (saddr < eaddr) {
1137 mach_msg_type_long_t *type;
1138 mach_msg_type_name_t name;
1139 mach_msg_type_size_t size; /* XXX */
1140 mach_msg_type_number_t number; /* XXX */
1141 vm_size_t length;
1142
1143 type = (mach_msg_type_long_t *) saddr;
1144 if (type->msgtl_header.msgt_longform) {
1145 name = type->msgtl_name;
1146 size = type->msgtl_size;
1147 number = type->msgtl_number;
1148 saddr += sizeof(mach_msg_type_long_t);
1149 } else {
1150 name = type->msgtl_header.msgt_name;
1151 size = type->msgtl_header.msgt_size;
1152 number = type->msgtl_header.msgt_number;
1153 saddr += sizeof(mach_msg_type_t);
1154 }
1155
1156 /* calculate length of data in bytes, rounding up */
1157
1158 length = ((number * size) + 7) >> 3;
1159
1160 if (type->msgtl_header.msgt_inline) {
1161 /* inline data sizes round up to int boundaries */
1162 saddr += (length + 3) &~ 3;
1163 continue;
1164 }
1165
1166 /*
1167 * XXX This is required because net_deliver does
1168 * XXX funny rounding to msgh_size.
1169 * XXX Why doesn't anything in ipc/ipc_kmsg.c need this?
1170 */
1171 if (saddr >= eaddr) {
1172 printf4("nextipc_next_co: saddr=0x%x, eaddr=0x%x\n",
1173 saddr, eaddr);
1174 break;
1175 }
1176
1177 if (* (vm_map_copy_t *) saddr == VM_MAP_COPY_NULL) {
1178 saddr += sizeof(vm_offset_t);
1179 continue;
1180 }
1181
1182 if (dp->dp_copy) {
1183 printf4("setting dp_copy_last false!\n");
1184 dp->dp_copy_last = FALSE;
1185 break;
1186 }
1187
1188 if (MACH_MSG_TYPE_PORT_ANY(name)) {
1189 dp->dp_type = DP_TYPE_OOL_PORTS;
1190 dp->dp_copy = &netipc_ool_ports_copy;
1191 dp->dp_copy->size = length;
1192 dp->dp_copy_npages = atop(round_page(length));
1193 dp->dp_offset = saddr - (vm_offset_t)&kmsg->ikm_header;
1194 assert(dp->dp_copy->size > 0);
1195 saddr += sizeof(vm_offset_t);
1196 continue;
1197 }
1198
1199 dp->dp_copy = * (vm_map_copy_t *) saddr;
1200 dp->dp_offset = saddr - (vm_offset_t) &kmsg->ikm_header;
1201
1202 assert(dp->dp_copy != VM_MAP_COPY_NULL);
1203 assert(dp->dp_copy->type == VM_MAP_COPY_PAGE_LIST);
1204 assert(dp->dp_copy->size == length);
1205
1206 if (vm_map_copy_has_interesting_cont(dp->dp_copy)) {
1207 /*
1208 * This copy object has a continuation,
1209 * which means that we won't change alignment,
1210 * thus dp_copy_npages, the number of pages that
1211 * the copy object will have on the destination,
1212 * is the same as the number of pages that it
1213 * has here. We cannot use dp->dp_copy->cpy_npages
1214 * since that is just the number of pages in the
1215 * first page list in the copy object.
1216 */
1217 /* panic("0x%x has continuation\n", dp->dp_copy);*/
1218 dp->dp_copy_npages =
1219 atop(round_page(dp->dp_copy->offset +
1220 dp->dp_copy->size) -
1221 trunc_page(dp->dp_copy->offset));
1222 assert(dp->dp_copy_npages >= dp->dp_copy->cpy_npages);
1223 } else {
1224 /*
1225 * This copy object does not have a continuation,
1226 * and therefore things are simple enough that we
1227 * will bother to change alignment if we can send
1228 * the copy object in one fewer pages than it
1229 * currently occupies, which is possible when the
1230 * total amount used by the first and last pages
1231 * is no larger than a page, after taking word
1232 * alignment into account.
1233 */
1234 vm_offset_t offset = dp->dp_copy->offset & page_mask;
1235 vm_offset_t end = offset + dp->dp_copy->size;
1236 #if NORMA_REALIGN_OOL_DATA
1237 offset = TRUNC_WORD(offset);
1238 end = ROUND_WORD(end);
1239 dp->dp_copy_npages = atop(round_page(end - offset));
1240 #else NORMA_REALIGN_OOL_DATA
1241 dp->dp_copy_npages = atop(round_page(end) -
1242 trunc_page(offset));
1243 #endif NORMA_REALIGN_OOL_DATA
1244 assert(dp->dp_copy_npages ==
1245 dp->dp_copy->cpy_npages ||
1246 dp->dp_copy_npages ==
1247 dp->dp_copy->cpy_npages - 1);
1248 }
1249 saddr += sizeof(vm_offset_t);
1250 }
1251 assert(dp->dp_copy);
1252 }
1253
1254 /*
1255 * This routine is overly general because it was written before it was clear
1256 * that copy object page lists cannot be backed up once their continuation
1257 * has been called.
1258 *
1259 * Note: dp_last_seqid is set when we first visit it.
1260 */
1261 netipc_set_seqid(dp, seqid)
1262 register netipc_packet_t dp;
1263 register unsigned long seqid;
1264 {
1265 assert(netipc_locked());
1266
1267 /*
1268 * Are we there already?
1269 */
1270 if (dp->dp_seqid == seqid) {
1271 return;
1272 }
1273
1274 /*
1275 * We must be in the correct dp.
1276 */
1277 assert(dp->dp_first_seqid <= seqid);
1278 assert(dp->dp_last_seqid == 0 || seqid <= dp->dp_last_seqid);
1279
1280 /*
1281 * If we want to be at the kmsg, go there.
1282 */
1283 if (dp->dp_first_seqid == seqid) {
1284 dp->dp_type = DP_TYPE_KMSG;
1285 dp->dp_seqid = seqid;
1286 return;
1287 }
1288
1289 /*
1290 * If we are in the right copy object, just change the index.
1291 */
1292 if (dp->dp_type != DP_TYPE_KMSG) {
1293 int index = dp->dp_copy_index + (seqid - dp->dp_seqid);
1294 if (index >= 0 && index < dp->dp_copy_npages) {
1295 dp->dp_copy_index = index;
1296 dp->dp_seqid = seqid;
1297 return;
1298 }
1299 }
1300
1301 /*
1302 * We might be too far forward and thus need to back up.
1303 * The easiest way of backing up is to start at the kmsg
1304 * and walk forward. This isn't necessary the most efficient way!
1305 *
1306 * Note that this cannot happen if this is a simple message.
1307 *
1308 * XXX
1309 * Page-list continuations limit how far we can back up.
1310 */
1311 if (dp->dp_seqid > seqid) {
1312 dp->dp_seqid = dp->dp_first_seqid;
1313 assert(dp->dp_first_seqid < seqid);
1314 }
1315
1316 /*
1317 * If we are currently at the kmsg, advance to the first copy object.
1318 * Otherwise, advance seqid to next copy object.
1319 *
1320 * XXX where do we make a fast check for simple messages?
1321 */
1322 if (dp->dp_seqid == dp->dp_first_seqid) {
1323 dp->dp_seqid++;
1324 if (kmsg_size(dp->dp_kmsg) > PAGE_SIZE) {
1325 dp->dp_type = DP_TYPE_KMSG_MORE;
1326 } else {
1327 dp->dp_type = DP_TYPE_PAGE;
1328 }
1329 dp->dp_offset = sizeof(dp->dp_kmsg->ikm_header);
1330 } else {
1331 printf4(">> %d ", dp->dp_seqid);
1332 dp->dp_seqid -= dp->dp_copy_index; /* beginning of this obj */
1333 printf4(">> %d ", dp->dp_seqid);
1334 dp->dp_seqid += dp->dp_copy_npages; /* begin of next */
1335 printf4("-> %d\n", dp->dp_seqid);
1336 }
1337
1338 /*
1339 * Examine each copy object to see whether it contains seqid.
1340 * If it does, set index appropriately and return.
1341 *
1342 * XXX
1343 * This should no longer be a for loop. We should only
1344 * need to walk to the next copy object.
1345 *
1346 * XXX
1347 * Should discard current copy object?!
1348 */
1349 for (;;) {
1350 netipc_next_copy_object(dp);
1351 if (dp->dp_copy_last && dp->dp_last_seqid == 0) {
1352 dp->dp_last_seqid =
1353 dp->dp_seqid + dp->dp_copy_npages - 1;
1354 }
1355 assert(seqid >= dp->dp_seqid);
1356 if (seqid < dp->dp_seqid + dp->dp_copy_npages) {
1357 dp->dp_copy_index = seqid - dp->dp_seqid;
1358 dp->dp_seqid = seqid;
1359 return;
1360 }
1361 assert(! dp->dp_copy_last);
1362 dp->dp_seqid += dp->dp_copy_npages;
1363 }
1364 }
1365
1366 /*
1367 * Called when dp->dp_kmsg has been completely sent, and it's time
1368 * to move to the next kmsg destined for dp->dp_remote, either from
1369 * the current port or from the next one.
1370 *
1371 * Returns true if there is still something to send.
1372 */
1373 boolean_t
1374 dp_advance(dp)
1375 register netipc_packet_t dp;
1376 {
1377 ipc_kmsg_t kmsg;
1378 struct ipc_kmsg_queue *kmsgs;
1379
1380 assert(netipc_locked());
1381
1382 /*
1383 * Find next kmsg on the current port.
1384 */
1385 kmsgs = &dp->dp_remote_port->ip_messages.imq_messages;
1386 kmsg = ipc_kmsg_queue_first(kmsgs);
1387
1388 /*
1389 * If there are no more kmsgs on this port,
1390 * move to the next port and check there.
1391 */
1392 if (kmsg == IKM_NULL) {
1393 ipc_port_t port;
1394
1395 /*
1396 * If there are no more ports waiting to send
1397 * to this node, reset and return.
1398 */
1399 port = dp->dp_remote_port->ip_norma_queue_next;
1400 norma_ipc_unqueue(dp->dp_remote_port);
1401 if (port == IP_NULL) {
1402 unsigned long remote = dp->dp_remote;
1403 netipc_packet_deallocate(dp);
1404 netipc_packet[remote] = (netipc_packet_t) 0;
1405 return FALSE;
1406 }
1407 dp->dp_remote_port = port;
1408
1409 /*
1410 * Find first kmsg on the new port.
1411 */
1412 kmsgs = &port->ip_messages.imq_messages;
1413 kmsg = ipc_kmsg_queue_first(kmsgs);
1414 }
1415
1416 /*
1417 * Remove the kmsg from the port.
1418 */
1419 assert(kmsg != IKM_NULL);
1420 ipc_kmsg_rmqueue_first_macro(kmsgs, kmsg);
1421 dp->dp_remote_port->ip_msgcount--;
1422
1423 /*
1424 * Reset dp and return success.
1425 */
1426 dp->dp_kmsg = kmsg;
1427 dp->dp_type = DP_TYPE_KMSG;
1428 dp->dp_first_seqid = Xnetipc_next_seqid(dp->dp_remote);
1429 dp->dp_last_unacked = dp->dp_first_seqid;
1430 if (is_one_page_kmsg(kmsg)) {
1431 dp->dp_last_seqid = dp->dp_first_seqid;
1432 } else {
1433 dp->dp_last_seqid = 0;
1434 }
1435 return TRUE;
1436 }
1437
1438 /*
1439 * Free kmsg and last copy object associated with dp->dp_kmsg.
1440 */
1441 dp_finish(dp)
1442 netipc_packet_t dp;
1443 {
1444 register ipc_kmsg_t kmsg = dp->dp_kmsg;
1445 mach_msg_bits_t bits;
1446
1447 /*
1448 * Queue kmsg to be freed, after getting bits and storing remote port.
1449 */
1450 printf2("-free %d..%d\n", dp->dp_first_seqid, dp->dp_last_seqid);
1451 bits = kmsg->ikm_header.msgh_bits;
1452 kmsg->ikm_header.msgh_remote_port = (mach_port_t) dp->dp_remote_port;
1453 netipc_safe_ikm_free(kmsg);
1454
1455 /*
1456 * Discard last copy object.
1457 *
1458 * XXX Should have discarded all previous copy objects.
1459 * XXX Netipc_next_copy_object obvious place to do so.
1460 */
1461 if (bits & MACH_MSGH_BITS_COMPLEX_DATA) {
1462 if (! dp->dp_copy) {
1463 netipc_set_seqid(dp, dp->dp_last_seqid);
1464 assert(dp->dp_copy);
1465 }
1466 if (dp->dp_copy != &netipc_kmsg_more_copy &&
1467 dp->dp_copy != &netipc_ool_ports_copy) {
1468 netipc_safe_vm_map_copy_discard(dp->dp_copy);
1469 }
1470 }
1471
1472 }
1473
1474 /*
1475 * Received successful ack of seqid.
1476 */
1477 void
1478 netipc_recv_success(dp, seqid)
1479 netipc_packet_t dp;
1480 unsigned long seqid;
1481 {
1482 assert(netipc_locked());
1483 assert(seqid == dp->dp_last_unacked);
1484 dp->dp_last_unacked++;
1485 if (dp->dp_last_seqid && dp->dp_last_seqid == seqid) {
1486 dp_finish(dp);
1487 if (dp_advance(dp)) {
1488 netipc_start(dp->dp_remote);
1489 }
1490 } else {
1491 netipc_start(dp->dp_remote);
1492 }
1493 }
1494
1495 void
1496 netipc_recv_retarget(dp, seqid, new_remote)
1497 netipc_packet_t dp;
1498 unsigned long seqid;
1499 unsigned long new_remote;
1500 {
1501 ipc_port_t remote_port;
1502
1503 assert(netipc_locked());
1504 assert(seqid == dp->dp_first_seqid);
1505
1506 /*
1507 * Handle acknowledgement stuff, and find port.
1508 */
1509 remote_port = netipc_dequeue_port(dp);
1510 if (remote_port == IP_NULL) {
1511 return;
1512 }
1513
1514 /*
1515 * Reset destination node field of destination port.
1516 * If new destination is still remote, then start a send;
1517 * otherwise, queued messages will be absorbed automatically by
1518 * norma_ipc_receive_rright. XXX (that part does not work yet) XXX
1519 */
1520 remote_port->ip_norma_dest_node = new_remote;
1521 if (new_remote != node_self()) {
1522 norma_ipc_queue_port(remote_port);
1523 } else {
1524 printf("*** TELL JOE: retarget to node self.\n");
1525 }
1526 }
1527
1528 void
1529 netipc_recv_dead(dp, seqid)
1530 netipc_packet_t dp;
1531 unsigned long seqid;
1532 {
1533 ipc_port_t remote_port;
1534
1535 assert(netipc_locked());
1536 assert(seqid == dp->dp_first_seqid);
1537
1538 /*
1539 * Handle acknowledgement stuff, and find port.
1540 */
1541 remote_port = netipc_dequeue_port(dp);
1542 if (remote_port == IP_NULL) {
1543 return;
1544 }
1545 printf1("*** netipc_recv_dead! 0x%x:%x\n",
1546 remote_port, remote_port->ip_norma_uid);
1547
1548 /*
1549 * Put the port on the dead port list, so that the netipc thread
1550 * can find it and call norma_ipc_destroy_proxy.
1551 *
1552 * Using ip_norma_queue_next makes the port look
1553 * like it's already queued. This will prevent norma_ipc_queue_port
1554 * from sticking it on the queue again and starting another send.
1555 */
1556 assert(! norma_ipc_queued(remote_port));
1557 remote_port->ip_norma_queue_next = netipc_dead_port_list;
1558 netipc_dead_port_list = remote_port;
1559 }
1560
1561 void
1562 netipc_recv_not_found(dp, seqid)
1563 netipc_packet_t dp;
1564 unsigned long seqid;
1565 {
1566 assert(netipc_locked());
1567 /*
1568 * XXX For now, we handle this as if the port had died.
1569 */
1570 printf1("netipc_recv_not_found!\n");
1571 netipc_recv_dead(dp, seqid);
1572 }
1573
1574 void
1575 netipc_recv_ack_with_status(packetid, seqid, status, data)
1576 unsigned long packetid;
1577 unsigned long seqid;
1578 kern_return_t status;
1579 unsigned long data;
1580 {
1581 netipc_packet_t dp;
1582
1583 dp = (netipc_packet_t) packetid;
1584 assert(dp);
1585 if (status == KERN_SUCCESS) {
1586 netipc_recv_success(dp, seqid);
1587 } else if (status == KERN_NOT_RECEIVER) {
1588 netipc_recv_retarget(dp, seqid, data);
1589 } else if (status == KERN_INVALID_RIGHT) {
1590 netipc_recv_dead(dp, seqid);
1591 } else if (status == KERN_INVALID_NAME) {
1592 netipc_recv_not_found(dp, seqid);
1593 } else {
1594 panic("status %d from receive_dest\n", status);
1595 }
1596 }
1597
1598 netipc_send_dp(remote, dp)
1599 unsigned long remote;
1600 netipc_packet_t dp;
1601 {
1602 #if iPSC386 || iPSC860
1603 netipc_called_here(__FILE__, __LINE__, "netipc_send_dp (enter)");
1604 #endif iPSC386 || iPSC860
1605 if (dp->dp_type == DP_TYPE_KMSG) {
1606 netipc_send_kmsg(remote, dp);
1607 } else if (dp->dp_type == DP_TYPE_PAGE) {
1608 netipc_send_page(remote, dp);
1609 } else if (dp->dp_type == DP_TYPE_KMSG_MORE) {
1610 netipc_send_kmsg_more(remote, dp);
1611 } else {
1612 assert(dp->dp_type == DP_TYPE_OOL_PORTS);
1613 netipc_send_ool_ports(remote, dp);
1614 }
1615 #if iPSC386 || iPSC860
1616 netipc_called_here(__FILE__, __LINE__, "netipc_send_dp (leave)");
1617 #endif iPSC386 || iPSC860
1618 }
1619
1620 /*
1621 * Called from lower level when we have previously stated that we
1622 * have more to send and when the send interface is not busy.
1623 *
1624 * Seqid is the new seqid that should be used if there is something to send.
1625 */
1626 boolean_t
1627 netipc_send_new(remote, seqid)
1628 unsigned long remote;
1629 unsigned long seqid;
1630 {
1631 register netipc_packet_t dp;
1632
1633 #if iPSC386 || iPSC860
1634 netipc_called_here(__FILE__, __LINE__, "netipc_send_new (enter)");
1635 #endif iPSC386 || iPSC860
1636 assert(netipc_locked());
1637 dp = netipc_packet[remote];
1638 if (dp == NETIPC_PACKET_NULL) {
1639 #if iPSC386 || iPSC860
1640 netipc_called_here(__FILE__, __LINE__, "{dp == 0, leave}");
1641 #endif iPSC386 || iPSC860
1642 return FALSE;
1643 }
1644 assert(dp->dp_remote == remote);
1645 if (dp->dp_last_seqid && seqid > dp->dp_last_seqid) {
1646 #if iPSC386 || iPSC860
1647 netipc_called_here(__FILE__, __LINE__, "{seqid > last, leave}");
1648 #endif iPSC386 || iPSC860
1649 return FALSE;
1650 }
1651 if (seqid > dp->dp_last_unacked) {
1652 #if iPSC386 || iPSC860
1653 netipc_called_here(__FILE__, __LINE__, "{seqid > last_unacked, leave}");
1654 #endif iPSC386 || iPSC860
1655 return FALSE; /* stop-and-wait */
1656 }
1657 netipc_set_seqid(dp, seqid);
1658 netipc_send_dp(remote, dp);
1659 #if iPSC386 || iPSC860
1660 netipc_called_here(__FILE__, __LINE__, "netipc_send_new (leave)");
1661 #endif iPSC386 || iPSC860
1662 return TRUE;
1663 }
1664
1665 /*
1666 * Called from lower level when we have to retransmit something that
1667 * we have already sent.
1668 */
1669 netipc_send_old(packetid, seqid)
1670 unsigned long packetid;
1671 unsigned long seqid;
1672 {
1673 netipc_packet_t dp;
1674
1675 dp = (netipc_packet_t) packetid;
1676 assert(dp);
1677 netipc_set_seqid(dp, seqid);
1678 netipc_send_dp(dp->dp_remote, dp);
1679 }
1680
1681 netipc_packet_t netipc_packet_list = NETIPC_PACKET_NULL;
1682 int netipc_packet_count = 0;
1683
1684 netipc_packet_t netipc_continuing_packet_list = NETIPC_PACKET_NULL;
1685
1686 netipc_packet_t
1687 netipc_packet_allocate()
1688 {
1689 netipc_packet_t dp;
1690
1691 assert(netipc_locked());
1692 dp = netipc_packet_list;
1693 if (dp != NETIPC_PACKET_NULL) {
1694 netipc_packet_list = dp->dp_next;
1695 netipc_packet_count--;
1696 }
1697 #if iPSC386 || iPSC860
1698 else {
1699 /*
1700 * must be netipc_output_replenish() hasn't tried
1701 * to grab it's 300 packets yet...
1702 */
1703 netipc_packet_list_empty++;
1704 dp = (netipc_packet_t) zalloc(netipc_packet_zone);
1705 }
1706 #endif iPSC386 || iPSC860
1707 return dp;
1708 }
1709
1710 void
1711 netipc_packet_deallocate(dp)
1712 netipc_packet_t dp;
1713 {
1714 assert(netipc_locked());
1715
1716 dp->dp_next = netipc_packet_list;
1717 netipc_packet_list = dp;
1718 netipc_packet_count++;
1719 }
1720
1721 /*
1722 * Currently requires a thread wakeup every VM_MAP_COPY_PAGE_LIST_MAX pages.
1723 * Does this matter? Can we do better?
1724 */
1725 netipc_safe_vm_map_copy_invoke_cont(dp)
1726 netipc_packet_t dp;
1727 {
1728 assert(netipc_locked());
1729 assert(! dp->dp_being_continued);
1730 dp->dp_being_continued = TRUE;
1731 printf3("netipc_safe_vm_map_copy_invoke_cont(dp=0x%x)\n", dp);
1732 dp->dp_next = netipc_continuing_packet_list;
1733 netipc_continuing_packet_list = dp;
1734 netipc_thread_wakeup();
1735 }
1736
1737
1738 #if iPSC386 || iPSC860
1739 netipc_vm_map_copy_cont_check(copy)
1740 vm_map_copy_t copy;
1741 {
1742 extern kern_return_t vm_map_copy_discard_cont();
1743 extern kern_return_t vm_map_copyin_page_list_cont();
1744 extern kern_return_t norma_deliver_page_continuation();
1745
1746 if ((copy->cpy_cont != vm_map_copy_discard_cont) &&
1747 (copy->cpy_cont != vm_map_copyin_page_list_cont) &&
1748 (copy->cpy_cont != norma_deliver_page_continuation)) {
1749 printf("Unknown continuation: copy=%x, cpy_cont=%x\n",
1750 copy, copy->cpy_cont);
1751 assert(0);
1752 }
1753 }
1754 #endif iPSC386 || iPSC860
1755
1756
1757 netipc_vm_map_copy_invoke(dp)
1758 netipc_packet_t dp;
1759 {
1760 kern_return_t kr;
1761 vm_map_copy_t old_copy, new_copy;
1762
1763 /*
1764 * Get the old copy object and save its npages value.
1765 */
1766 assert(netipc_locked());
1767 printf3("netipc_vm_map_copy_invoke_cont(dp=0x%x)\n", dp);
1768 old_copy = dp->dp_copy;
1769
1770 /*
1771 * Unlock, and invoke the continuation.
1772 * If the continuation succeeds, discard the old copy object, and lock.
1773 * If it fails... not sure what to do.
1774 */
1775 netipc_thread_unlock();
1776 #if iPSC386 || iPSC860
1777 netipc_vm_map_copy_cont_check(old_copy);
1778 #endif iPSC386 || iPSC860
1779 vm_map_copy_invoke_cont(old_copy, &new_copy, &kr);
1780 if (kr != KERN_SUCCESS) {
1781 /*
1782 * XXX
1783 * What do we do here?
1784 * What should appear in the receiver's address space?
1785 *
1786 * Should we abort the send at this point?
1787 * We cannot, really, since we let the sender
1788 * continue... didn't we?
1789 * I guess we shouldn't.
1790 */
1791 netipc_thread_lock();
1792 panic("netipc_vm_map_copy_invoke: kr=%d%x\n", kr, kr);
1793 return;
1794 }
1795 vm_map_copy_discard(old_copy);
1796 netipc_thread_lock();
1797
1798 /*
1799 * The continuation invocation succeeded.
1800 * Adjust page_list_base and reset being_continued flag.
1801 */
1802 dp->dp_page_list_base = dp->dp_copy_index;
1803 dp->dp_copy = new_copy;
1804 dp->dp_being_continued = FALSE;
1805 }
1806
1807 /*
1808 * XXX
1809 * Use the int type field to implement a linked list.
1810 *
1811 * XXX
1812 * It's really quite unfortunate to have to do a wakeup each time
1813 * we want to discard a copy. It would be much better for the sending
1814 * thread -- if he's still waiting -- to do the discard.
1815 * We could also check to see whether the pages were stolen, in
1816 * which case it's not as important to release the pages quickly.
1817 */
1818 vm_map_copy_t netipc_safe_vm_map_copy_discard_list = VM_MAP_COPY_NULL;
1819
1820 netipc_safe_vm_map_copy_discard(copy)
1821 vm_map_copy_t copy;
1822 {
1823 assert(netipc_locked());
1824 assert(copy->type == VM_MAP_COPY_PAGE_LIST);
1825 copy->type = (int) netipc_safe_vm_map_copy_discard_list;
1826 netipc_safe_vm_map_copy_discard_list = copy;
1827 netipc_thread_wakeup();
1828 }
1829
1830 ipc_kmsg_t netipc_safe_ikm_free_list = IKM_NULL;
1831
1832 netipc_safe_ikm_free(kmsg)
1833 ipc_kmsg_t kmsg;
1834 {
1835 kmsg->ikm_next = netipc_safe_ikm_free_list;
1836 netipc_safe_ikm_free_list = kmsg;
1837 }
1838
1839 netipc_output_replenish()
1840 {
1841 assert(netipc_unlocked());
1842 while (netipc_continuing_packet_list != NETIPC_PACKET_NULL) {
1843 netipc_packet_t dp;
1844
1845 netipc_thread_lock();
1846 dp = netipc_continuing_packet_list;
1847 netipc_continuing_packet_list = dp->dp_next;
1848 netipc_vm_map_copy_invoke(dp);
1849 printf3("netipc_replenish: send_page_with_c 0x%x\n", dp);
1850 netipc_start(dp->dp_remote);
1851 netipc_thread_unlock();
1852 }
1853 while (netipc_dead_port_list != IP_NULL) {
1854 ipc_port_t port;
1855
1856 netipc_thread_lock();
1857 port = netipc_dead_port_list;
1858 netipc_dead_port_list = port->ip_norma_queue_next;
1859 netipc_thread_unlock();
1860 ip_reference(port);
1861 norma_ipc_dead_destination(port);
1862 }
1863 while (netipc_safe_vm_map_copy_discard_list != VM_MAP_COPY_NULL) {
1864 vm_map_copy_t copy;
1865
1866 netipc_thread_lock();
1867 copy = netipc_safe_vm_map_copy_discard_list;
1868 netipc_safe_vm_map_copy_discard_list
1869 = (vm_map_copy_t) copy->type;
1870 netipc_thread_unlock();
1871 copy->type = VM_MAP_COPY_PAGE_LIST;
1872 vm_map_copy_discard(copy);
1873 }
1874 while (netipc_safe_ikm_free_list != IKM_NULL) {
1875 ipc_kmsg_t kmsg;
1876 ipc_port_t dest;
1877 mach_msg_bits_t bits;
1878
1879 /*
1880 * Lock, grab kmsg, and grab dest and bits from kmsg
1881 * before it is freed.
1882 */
1883 netipc_thread_lock();
1884 kmsg = netipc_safe_ikm_free_list;
1885 netipc_safe_ikm_free_list = kmsg->ikm_next;
1886 dest = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
1887 bits = kmsg->ikm_header.msgh_bits;
1888
1889 /*
1890 * Free kmsg under lock or not, as appropriate.
1891 */
1892 if (kmsg->ikm_size == IKM_SIZE_NORMA) {
1893 netipc_page_put(kmsg->ikm_page);
1894 netipc_thread_unlock();
1895 } else {
1896 netipc_thread_unlock();
1897 if (ikm_cache() == IKM_NULL &&
1898 kmsg->ikm_size == IKM_SAVED_KMSG_SIZE) {
1899 ikm_cache() = kmsg;
1900 } else {
1901 ikm_free(kmsg);
1902 }
1903 }
1904 /*
1905 * Perform deferred copyout (including release) of dest.
1906 */
1907 assert(dest->ip_references > 0);
1908 if (bits & MACH_MSGH_BITS_MIGRATED) {
1909 norma_ipc_send_migrating_dest(dest);
1910 } else {
1911 norma_ipc_send_dest(dest, MACH_MSGH_BITS_REMOTE(bits));
1912 }
1913 }
1914 while (netipc_packet_count < 300) { /* XXX ??? ever alloced at int? */
1915 netipc_packet_t dp;
1916
1917 dp = (netipc_packet_t) zalloc(netipc_packet_zone);
1918 netipc_thread_lock();
1919 dp->dp_next = netipc_packet_list;
1920 netipc_packet_list = dp;
1921 netipc_packet_count++;
1922 netipc_thread_unlock();
1923 }
1924 }
1925
1926 #include <mach_kdb.h>
1927 #if MACH_KDB
1928
1929 #define printf kdbprintf
1930
1931 /*
1932 * Routine: netipc_packet_print
1933 * Purpose:
1934 * Pretty-print a netipc packet for ddb.
1935 */
1936
1937 netipc_packet_print(dp)
1938 netipc_packet_t dp;
1939 {
1940 extern int indent;
1941
1942 if ((unsigned int) dp < MAX_NUM_NODES) {
1943 dp = netipc_packet[(unsigned int) dp];
1944 if (dp == NETIPC_PACKET_NULL) {
1945 printf("null netipc packet\n");
1946 return;
1947 }
1948 }
1949
1950 printf("netipc packet 0x%x\n", dp);
1951
1952 indent += 2;
1953
1954 iprintf("type=%d", dp->dp_type);
1955 switch ((int) dp->dp_type) {
1956 case DP_TYPE_KMSG:
1957 printf("[kmsg]");
1958 break;
1959
1960 case DP_TYPE_PAGE:
1961 printf("[page]");
1962 break;
1963
1964 case DP_TYPE_KMSG_MORE:
1965 printf("[kmsg_more]");
1966 break;
1967
1968 case DP_TYPE_OOL_PORTS:
1969 printf("[ool_ports]");
1970 break;
1971
1972 default:
1973 printf("[bad type]");
1974 break;
1975 }
1976 printf(", remote=%d", dp->dp_remote);
1977 printf(", seqid=%d", dp->dp_seqid);
1978 printf(", first_seqid=%d", dp->dp_first_seqid);
1979 printf(", last_seqid=%d\n", dp->dp_last_seqid);
1980
1981 iprintf("kmsg=0x%x", dp->dp_kmsg);
1982 printf(", offset=%d", dp->dp_offset);
1983 printf("[0x%x]", dp->dp_offset + (char *) dp->dp_kmsg);
1984 printf(", next=0x%x\n", dp->dp_next);
1985
1986 iprintf("copy=0x%x", dp->dp_copy);
1987 printf(", index=%d", dp->dp_copy_index);
1988 printf(", npages=%d", dp->dp_copy_npages);
1989 printf(", base=%d", dp->dp_page_list_base);
1990 printf(", has_cont=%d", dp->dp_page_list_base);
1991 printf(", being_cont=%d\n", dp->dp_being_continued);
1992
1993 indent -=2;
1994 }
1995 #endif MACH_KDB
Cache object: 04c5f15b9396fd6b18bacf8b1b21f7e2
|