FreeBSD/Linux Kernel Cross Reference
sys/chips/nw_mk.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Scienctxe
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: nw_mk.c,v $
29 * Revision 2.3 93/12/23 17:00:37 dbg
30 * Converted to new timers.
31 * [93/12/23 dbg]
32 *
33 * Revision 2.2 93/08/10 15:18:43 mrt
34 * Initial check-in.
35 * [93/06/09 16:00:26 jcb]
36 *
37 *
38 */
39
40 /*** MACH KERNEL WRAPPER ***/
41
42 #ifndef STUB
43 #include <kern/task.h>
44 #include <kern/thread.h>
45 #include <kern/sched_prim.h>
46 #include <kern/eventcount.h>
47 #include <kern/mach_timer.h>
48 #include <kern/clock.h>
49 #include <machine/machspl.h> /* spl definitions */
50 #include <vm/vm_kern.h>
51 #include <chips/nc.h>
52 #include <chips/nw_mk.h>
53
54 decl_simple_lock_data(, nw_simple_lock);
55 spl_t previous_spl;
56
57 #define nw_lock() \
58 previous_spl = splimp(); \
59 simple_lock(&nw_simple_lock)
60
61 #define nw_unlock() \
62 simple_unlock(&nw_simple_lock); \
63 splx(previous_spl)
64
65 typedef struct nw_pvs {
66 task_t owner;
67 char *buf_start;
68 char *buf_end;
69 struct nw_pvs *next;
70 } nw_pv_s, *nw_pv_t;
71
72 typedef struct nw_waiters {
73 thread_t waiter;
74 struct nw_waiters *next;
75 } nw_waiter_s, *nw_waiter_t;
76
77 typedef struct {
78 nw_pv_t pv;
79 thread_t sig_waiter;
80 nw_waiter_t rx_first;
81 nw_waiter_t rx_last;
82 nw_waiter_t tx_first;
83 nw_waiter_t tx_last;
84 } nw_hecb, *nw_hecb_t;
85
86 #else
87 #include "nc.h"
88 #include "nw_mk.h"
89 #endif
90
91 /*** Types and data structures ***/
92
93 int h_initialized = FALSE;
94 nw_pv_s nw_pv[2*MAX_EP];
95 nw_pv_t nw_free_pv;
96 nw_waiter_s nw_waiter[2*MAX_EP];
97 nw_waiter_t nw_free_waiter;
98 nw_ep_owned_s nw_waited[3*MAX_EP];
99 nw_ep_owned_t nw_free_waited;
100 nw_hecb hect[MAX_EP];
101 timer_elt_data_t nw_fast_timer, nw_slow_timer;
102
103 /*** Initialization ***/
104
105 void h_initialize() {
106 int ep, last_ep;
107
108 if (!h_initialized) {
109 last_ep = sizeof(nw_pv)/sizeof(nw_pv_s) - 1;
110 for (ep = 0; ep < last_ep; ep++) {
111 nw_pv[ep].next = &nw_pv[ep+1];
112 }
113 nw_pv[last_ep].next = NULL;
114 nw_free_pv = &nw_pv[0];
115 last_ep = sizeof(nw_waiter)/sizeof(nw_waiter_s) - 1;
116 for (ep = 0; ep < last_ep; ep++) {
117 nw_waiter[ep].next = &nw_waiter[ep+1];
118 }
119 nw_waiter[last_ep].next = NULL;
120 nw_free_waiter = &nw_waiter[0];
121 last_ep = sizeof(nw_waited)/sizeof(nw_ep_owned_s) - 1;
122 for (ep = 0; ep < last_ep; ep++) {
123 nw_waited[ep].next = &nw_waited[ep+1];
124 }
125 nw_waited[last_ep].next = NULL;
126 nw_free_waited = &nw_waited[0];
127 last_ep = sizeof(hect)/sizeof(nw_hecb);
128 for (ep = 0; ep < last_ep; ep++) {
129 hect[ep].pv = NULL;
130 hect[ep].sig_waiter = NULL;
131 hect[ep].rx_first = NULL;
132 hect[ep].rx_last = NULL;
133 hect[ep].tx_first = NULL;
134 hect[ep].tx_last = NULL;
135 }
136 nw_fast_timer.te_fcn = mk_fast_sweep;
137 nw_fast_timer.te_param = NULL;
138 nw_fast_timer.te_flags = TELT_UNSET;
139 nw_fast_timer.te_clock = sys_clock;
140 nw_slow_timer.te_fcn = mk_slow_sweep;
141 nw_slow_timer.te_param = NULL;
142 nw_slow_timer.te_flags = TELT_UNSET;
143 nw_slow_timer.te_clock = sys_clock;
144 #if PRODUCTION
145 {
146 time_spec_t interval;
147
148 interval.seconds = 2;
149 interval.nanoseconds = 0;
150 (void) timer_elt_enqueue(&nw_slow_timer, interval, FALSE);
151 }
152 #endif
153 h_initialized = TRUE;
154 }
155 }
156
157 /*** User-trappable functions ***/
158
159 nw_result mk_update(mach_port_t master_port, nw_update_type up_type,
160 int *up_info) {
161 nw_result rc;
162
163 if (master_port == 0) { /* XXX */
164 rc = NW_FAILURE;
165 } else {
166 nw_lock();
167 switch (up_type) {
168 case NW_HOST_ADDRESS_REGISTER:
169 case NW_HOST_ADDRESS_UNREGISTER:
170 if (invalid_user_access(current_task()->map, (vm_offset_t) up_info,
171 (vm_offset_t) up_info + sizeof(nw_address_s) - 1,
172 VM_PROT_READ | VM_PROT_WRITE)) {
173 rc = NW_INVALID_ARGUMENT;
174 } else {
175 rc = nc_update(up_type, up_info);
176 }
177 break;
178 case NW_INITIALIZE:
179 nc_initialize();
180 rc = NW_SUCCESS;
181 break;
182 default:
183 rc = NW_INVALID_ARGUMENT;
184 }
185 nw_unlock();
186 }
187 return rc;
188 }
189
190
191
192 nw_result mk_lookup(nw_lookup_type lt, int *look_info) {
193 nw_result rc;
194 int max_size, dev;
195
196 nw_lock();
197 switch (lt) {
198 case NW_HOST_ADDRESS_LOOKUP:
199 if (invalid_user_access(current_task()->map, (vm_offset_t) look_info,
200 (vm_offset_t) look_info + sizeof(nw_address_s) - 1,
201 VM_PROT_READ | VM_PROT_WRITE)) {
202 rc = NW_INVALID_ARGUMENT;
203 } else {
204 rc = nc_lookup(lt, look_info);
205 }
206 break;
207 case NW_STATUS:
208 max_size = sizeof(nw_device);
209 if (max_size < sizeof(nw_result))
210 max_size = sizeof(nw_result);
211 if (invalid_user_access(current_task()->map, (vm_offset_t) look_info,
212 (vm_offset_t) look_info + max_size - 1,
213 VM_PROT_READ | VM_PROT_WRITE) ||
214 (dev = look_info[0]) >= MAX_DEV || dev < 0) {
215 rc = NW_INVALID_ARGUMENT;
216 } else {
217 if (devct[dev].status != NW_SUCCESS) {
218 look_info[0] = (int) devct[dev].status;
219 rc = NW_SUCCESS;
220 } else {
221 rc = (*(devct[dev].entry->status)) (dev);
222 }
223 }
224 break;
225 default:
226 rc = NW_INVALID_ARGUMENT;
227 }
228 nw_unlock();
229 return rc;
230 }
231
232
233 nw_result mk_endpoint_allocate_internal(nw_ep_t epp, nw_protocol protocol,
234 nw_acceptance accept,
235 u_int buffer_size, boolean_t system) {
236 nw_result rc;
237 u_int ep;
238 vm_offset_t kernel_addr, user_addr;
239 nw_pv_t pv;
240 nw_ep_owned_t owned;
241
242 ep = *epp;
243 if (buffer_size == 0)
244 buffer_size = 0x1000;
245 else
246 buffer_size = (buffer_size + 0xfff) & ~0xfff;
247 nw_lock();
248 if (ep >= MAX_EP || (pv = hect[ep].pv) != NULL) {
249 rc = NW_BAD_EP;
250 } else if (nw_free_pv == NULL || nw_free_waited == NULL) {
251 rc = NW_NO_EP;
252 } else if (projected_buffer_allocate(current_task()->map, buffer_size, 0,
253 &kernel_addr, &user_addr,
254 VM_PROT_READ | VM_PROT_WRITE,
255 VM_INHERIT_NONE) != KERN_SUCCESS) {
256 rc = NW_NO_RESOURCES;
257 } else {
258 rc = nc_endpoint_allocate(epp, protocol, accept,
259 (char *) kernel_addr, buffer_size);
260 if (rc == NW_NO_EP && (ep = *epp) != 0) {
261 rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->
262 close)) (ep);
263 if (rc == NW_SYNCH) {
264 hect[ep].sig_waiter = current_thread();
265 assert_wait(0, TRUE);
266 simple_unlock(&nw_simple_lock);
267 thread_block(CONTINUE_NULL);
268 }
269 rc = nc_endpoint_deallocate(ep);
270 if (rc == NW_SUCCESS) {
271 nc_line_update(&ect[ep].conn->peer, 0);
272 rc = nc_endpoint_allocate(epp, protocol, accept,
273 (char *) kernel_addr, buffer_size);
274 }
275 }
276 if (rc == NW_SUCCESS) {
277 ep = *epp;
278 if (system) {
279 hect[ep].pv = NULL;
280 } else {
281 hect[ep].pv = nw_free_pv;
282 nw_free_pv = nw_free_pv->next;
283 hect[ep].pv->owner = current_task();
284 hect[ep].pv->buf_start = (char *) user_addr;
285 hect[ep].pv->buf_end = (char *) user_addr + buffer_size;
286 hect[ep].pv->next = NULL;
287 }
288 hect[ep].sig_waiter = NULL;
289 hect[ep].rx_first = NULL;
290 hect[ep].rx_last = NULL;
291 hect[ep].tx_first = NULL;
292 hect[ep].tx_last = NULL;
293 owned = nw_free_waited;
294 nw_free_waited = nw_free_waited->next;
295 owned->ep = ep;
296 owned->next = current_task()->nw_ep_owned;
297 current_task()->nw_ep_owned = owned;
298 } else {
299 projected_buffer_deallocate(current_task()->map, user_addr,
300 user_addr + buffer_size);
301 }
302 }
303 nw_unlock();
304 return rc;
305 }
306
307
308 nw_result mk_endpoint_allocate(nw_ep_t epp, nw_protocol protocol,
309 nw_acceptance accept, u_int buffer_size) {
310 nw_result rc;
311
312 if (invalid_user_access(current_task()->map, (vm_offset_t) epp,
313 (vm_offset_t) epp + sizeof(nw_ep) - 1,
314 VM_PROT_READ | VM_PROT_WRITE) ||
315 (protocol != NW_RAW && protocol != NW_DATAGRAM &&
316 protocol != NW_SEQ_PACKET) || (accept != NW_NO_ACCEPT &&
317 accept != NW_APPL_ACCEPT && accept != NW_AUTO_ACCEPT)) {
318 rc = NW_INVALID_ARGUMENT;
319 } else {
320 rc = mk_endpoint_allocate_internal(epp, protocol, accept,
321 buffer_size, FALSE);
322 }
323 return rc;
324 }
325
326 nw_result mk_endpoint_deallocate_internal(nw_ep ep, task_t task,
327 boolean_t shutdown) {
328 nw_result rc;
329 nw_pv_t pv, pv_previous;
330 nw_ep_owned_t owned, owned_previous;
331 nw_waiter_t w, w_previous, w_next;
332
333 nw_lock();
334 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
335 rc = NW_BAD_EP;
336 } else {
337 pv_previous = NULL;
338 while (pv != NULL && pv->owner != task) {
339 pv_previous = pv;
340 pv = pv->next;
341 }
342 if (pv == NULL) {
343 rc = NW_PROT_VIOLATION;
344 } else {
345 if (projected_buffer_deallocate(task->map, pv->buf_start,
346 pv->buf_end) != KERN_SUCCESS) {
347 rc = NW_INCONSISTENCY;
348 printf("Endpoint deallocate: inconsistency p. buffer\n");
349 } else {
350 if (pv_previous == NULL)
351 hect[ep].pv = pv->next;
352 else
353 pv_previous->next = pv->next;
354 pv->next = nw_free_pv;
355 nw_free_pv = pv;
356 owned = task->nw_ep_owned;
357 owned_previous = NULL;
358 while (owned != NULL && owned->ep != ep) {
359 owned_previous = owned;
360 owned = owned->next;
361 }
362 if (owned == NULL) {
363 rc = NW_INCONSISTENCY;
364 printf("Endpoint deallocate: inconsistency owned\n");
365 } else {
366 if (owned_previous == NULL)
367 task->nw_ep_owned = owned->next;
368 else
369 owned_previous->next = owned->next;
370 owned->next = nw_free_waited;
371 nw_free_waited = owned;
372 if (hect[ep].sig_waiter != NULL &&
373 hect[ep].sig_waiter->task == task) {
374 /* if (!shutdown)*/
375 mk_deliver_result(hect[ep].sig_waiter, NW_ABORTED);
376 hect[ep].sig_waiter = NULL;
377 }
378 w = hect[ep].rx_first;
379 w_previous = NULL;
380 while (w != NULL) {
381 if (w->waiter->task == task) {
382 /* if (!shutdown)*/
383 mk_deliver_result(w->waiter, NULL);
384 w_next = w->next;
385 if (w_previous == NULL)
386 hect[ep].rx_first = w_next;
387 else
388 w_previous->next = w_next;
389 w->next = nw_free_waiter;
390 nw_free_waiter = w;
391 w = w_next;
392 } else {
393 w_previous = w;
394 w = w->next;
395 }
396 }
397 if (hect[ep].rx_first == NULL)
398 hect[ep].rx_last = NULL;
399 w = hect[ep].tx_first;
400 w_previous = NULL;
401 while (w != NULL) {
402 if (w->waiter->task == task) {
403 /* if (!shutdown)*/
404 mk_deliver_result(w->waiter, NW_ABORTED);
405 w_next = w->next;
406 if (w_previous == NULL)
407 hect[ep].tx_first = w_next;
408 else
409 w_previous->next = w_next;
410 w->next = nw_free_waiter;
411 nw_free_waiter = w;
412 w = w_next;
413 } else {
414 w_previous = w;
415 w = w->next;
416 }
417 }
418 if (hect[ep].tx_first == NULL)
419 hect[ep].tx_last = NULL;
420 if (hect[ep].pv == NULL) {
421 if (ect[ep].state != NW_UNCONNECTED) {
422 rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->
423 close)) (ep);
424 if (rc == NW_SYNCH) {
425 hect[ep].sig_waiter = current_thread();
426 assert_wait(0, TRUE);
427 simple_unlock(&nw_simple_lock);
428 thread_block(CONTINUE_NULL);
429 }
430 }
431 rc = nc_endpoint_deallocate(ep);
432 }
433 }
434 }
435 }
436 }
437 nw_unlock();
438 return rc;
439 }
440
441 nw_result mk_endpoint_deallocate(nw_ep ep) {
442
443 mk_endpoint_deallocate_internal(ep, current_task(), FALSE);
444 }
445
446
447 nw_buffer_t mk_buffer_allocate(nw_ep ep, u_int size) {
448 nw_buffer_t buf;
449 nw_pv_t pv;
450
451 nw_lock();
452 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
453 buf = NW_BUFFER_ERROR;
454 } else {
455 while (pv != NULL && pv->owner != current_task())
456 pv = pv->next;
457 if (pv == NULL) {
458 buf = NW_BUFFER_ERROR;
459 } else {
460 buf = nc_buffer_allocate(ep, size);
461 if (buf != NULL) {
462 buf = (nw_buffer_t) ((char *) buf - ect[ep].buf_start + pv->buf_start);
463 }
464 }
465 }
466 nw_unlock();
467 return buf;
468 }
469
470
471
472 nw_result mk_buffer_deallocate(nw_ep ep, nw_buffer_t buffer) {
473 nw_result rc;
474 nw_pv_t pv;
475
476 nw_lock();
477 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
478 rc = NW_BAD_EP;
479 } else {
480 while (pv != NULL && pv->owner != current_task())
481 pv = pv->next;
482 if (pv == NULL) {
483 rc = NW_PROT_VIOLATION;
484 } else {
485 if ((char *) buffer < pv->buf_start ||
486 (char *) buffer + sizeof(nw_buffer_s) > pv->buf_end ||
487 !buffer->buf_used ||
488 (char *) buffer + buffer->buf_length > pv->buf_end) {
489 rc = NW_BAD_BUFFER;
490 } else {
491 buffer = (nw_buffer_t) ((char *) buffer - pv->buf_start +
492 ect[ep].buf_start);
493 rc = nc_buffer_deallocate(ep, buffer);
494 }
495 }
496 }
497 nw_unlock();
498 return rc;
499 }
500
501
502 nw_result mk_connection_open_internal(nw_ep local_ep, nw_address_1 rem_addr_1,
503 nw_address_2 rem_addr_2, nw_ep remote_ep) {
504 nw_result rc;
505
506 rc = (*devct[NW_DEVICE(rem_addr_1)].entry->open) (local_ep,
507 rem_addr_1, rem_addr_2,
508 remote_ep);
509 if (rc == NW_SYNCH) {
510 hect[local_ep].sig_waiter = current_thread();
511 assert_wait(0, TRUE);
512 simple_unlock(&nw_simple_lock);
513 thread_block(CONTINUE_NULL);
514 }
515 return rc;
516 }
517
518 nw_result mk_connection_open(nw_ep local_ep, nw_address_1 rem_addr_1,
519 nw_address_2 rem_addr_2, nw_ep remote_ep) {
520 nw_result rc;
521 nw_pv_t pv;
522
523 nw_lock();
524 if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
525 rc = NW_BAD_EP;
526 } else {
527 while (pv != NULL && pv->owner != current_task())
528 pv = pv->next;
529 if (pv == NULL) {
530 rc = NW_PROT_VIOLATION;
531 } else {
532 rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->open))
533 (local_ep, rem_addr_1, rem_addr_2, remote_ep);
534 if (rc == NW_SYNCH) {
535 hect[local_ep].sig_waiter = current_thread();
536 assert_wait(0, TRUE);
537 current_thread()->nw_ep_waited = NULL;
538 simple_unlock(&nw_simple_lock);
539 thread_block(mk_return);
540 }
541 }
542 }
543 nw_unlock();
544 return rc;
545 }
546
547
548 nw_result mk_connection_accept(nw_ep ep, nw_buffer_t msg,
549 nw_ep_t new_epp) {
550 nw_result rc;
551 nw_pv_t pv;
552
553 nw_lock();
554 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
555 rc = NW_BAD_EP;
556 } else {
557 while (pv != NULL && pv->owner != current_task())
558 pv = pv->next;
559 if (pv == NULL) {
560 rc = NW_PROT_VIOLATION;
561 } else if ((char *) msg < pv->buf_start ||
562 (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
563 !msg->buf_used ||
564 (char *) msg + msg->buf_length > pv->buf_end) {
565 rc = NW_BAD_BUFFER;
566 } else if (new_epp != NULL &&
567 (invalid_user_access(current_task()->map, (vm_offset_t) new_epp,
568 (vm_offset_t) new_epp + sizeof(nw_ep) - 1,
569 VM_PROT_READ | VM_PROT_WRITE) ||
570 (*new_epp != 0 && *new_epp != ep))) {
571 rc = NW_INVALID_ARGUMENT;
572 } else {
573 rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->accept))
574 (ep, msg, new_epp);
575 if (rc == NW_SYNCH) {
576 hect[ep].sig_waiter = current_thread();
577 assert_wait(0, TRUE);
578 current_thread()->nw_ep_waited = NULL;
579 simple_unlock(&nw_simple_lock);
580 thread_block(mk_return);
581 }
582 }
583 }
584 nw_unlock();
585 return rc;
586 }
587
588 nw_result mk_connection_close(nw_ep ep) {
589 nw_result rc;
590 nw_pv_t pv;
591
592 nw_lock();
593 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
594 rc = NW_BAD_EP;
595 } else {
596 while (pv != NULL && pv->owner != current_task())
597 pv = pv->next;
598 if (pv == NULL) {
599 rc = NW_PROT_VIOLATION;
600 } else {
601 rc = (*devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->close)
602 (ep);
603 if (rc == NW_SYNCH) {
604 hect[ep].sig_waiter = current_thread();
605 assert_wait(0, TRUE);
606 current_thread()->nw_ep_waited = NULL;
607 simple_unlock(&nw_simple_lock);
608 thread_block(mk_return);
609 }
610 }
611 }
612 nw_unlock();
613 return rc;
614 }
615
616
617 nw_result mk_multicast_add(nw_ep local_ep, nw_address_1 rem_addr_1,
618 nw_address_2 rem_addr_2, nw_ep remote_ep) {
619 nw_result rc;
620 nw_pv_t pv;
621
622 nw_lock();
623 if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
624 rc = NW_BAD_EP;
625 } else {
626 while (pv != NULL && pv->owner != current_task())
627 pv = pv->next;
628 if (pv == NULL) {
629 rc = NW_PROT_VIOLATION;
630 } else {
631 rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->add))
632 (local_ep, rem_addr_1, rem_addr_2, remote_ep);
633 if (rc == NW_SYNCH) {
634 hect[local_ep].sig_waiter = current_thread();
635 assert_wait(0, TRUE);
636 current_thread()->nw_ep_waited = NULL;
637 simple_unlock(&nw_simple_lock);
638 thread_block(mk_return);
639 }
640 }
641 }
642 nw_unlock();
643 return rc;
644 }
645
646
647 nw_result mk_multicast_drop(nw_ep local_ep, nw_address_1 rem_addr_1,
648 nw_address_2 rem_addr_2, nw_ep remote_ep) {
649 nw_result rc;
650 nw_pv_t pv;
651
652 nw_lock();
653 if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
654 rc = NW_BAD_EP;
655 } else {
656 while (pv != NULL && pv->owner != current_task())
657 pv = pv->next;
658 if (pv == NULL) {
659 rc = NW_PROT_VIOLATION;
660 } else {
661 rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->drop))
662 (local_ep, rem_addr_1, rem_addr_2, remote_ep);
663 if (rc == NW_SYNCH) {
664 hect[local_ep].sig_waiter = current_thread();
665 assert_wait(0, TRUE);
666 current_thread()->nw_ep_waited = NULL;
667 simple_unlock(&nw_simple_lock);
668 thread_block(mk_return);
669 }
670 }
671 }
672 nw_unlock();
673 return rc;
674 }
675
676
677 nw_result mk_endpoint_status(nw_ep ep, nw_state_t state,
678 nw_peer_t peer) {
679 nw_result rc;
680 nw_pv_t pv;
681
682 nw_lock();
683 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
684 rc = NW_BAD_EP;
685 } else {
686 while (pv != NULL && pv->owner != current_task())
687 pv = pv->next;
688 if (pv == NULL) {
689 rc = NW_PROT_VIOLATION;
690 } else {
691 if (invalid_user_access(current_task()->map, (vm_offset_t) state,
692 (vm_offset_t) state + sizeof(nw_state) - 1,
693 VM_PROT_WRITE) ||
694 invalid_user_access(current_task()->map, (vm_offset_t) peer,
695 (vm_offset_t) peer + sizeof(nw_peer_s) - 1,
696 VM_PROT_WRITE)) {
697 rc = NW_INVALID_ARGUMENT;
698 } else {
699 rc = nc_endpoint_status(ep, state, peer);
700 }
701 }
702 }
703 nw_unlock();
704 return rc;
705 }
706
707
708 nw_result mk_send(nw_ep ep, nw_buffer_t msg, nw_options options) {
709 nw_result rc;
710 nw_pv_t pv;
711 nw_ep sender;
712 int dev;
713 nw_ecb_t ecb;
714 nw_tx_header_t header, first_header, previous_header;
715 nw_hecb_t hecb;
716 nw_waiter_t w;
717
718 nw_lock();
719 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
720 rc = NW_BAD_EP;
721 } else {
722 while (pv != NULL && pv->owner != current_task())
723 pv = pv->next;
724 if (pv == NULL) {
725 rc = NW_PROT_VIOLATION;
726 } else {
727 ecb = &ect[ep];
728 if (ecb->state == NW_INEXISTENT ||
729 (ecb->protocol == NW_SEQ_PACKET && ecb->conn == NULL)) {
730 rc = NW_BAD_EP;
731 } else {
732 first_header = header = nc_tx_header_allocate();
733 previous_header = NULL;
734 rc = NW_SUCCESS;
735 while (header != NULL) {
736 if ((char *) msg < pv->buf_start ||
737 (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
738 ((int) msg & 0x3) || (msg->block_offset & 0x3) ||
739 (msg->block_length & 0x3) || !msg->buf_used ||
740 (char *) msg + msg->buf_length > pv->buf_end ||
741 msg->block_offset + msg->block_length > msg->buf_length) {
742 rc = NW_BAD_BUFFER;
743 break;
744 } else {
745 if (previous_header == NULL) {
746 if (ecb->protocol == NW_SEQ_PACKET)
747 header->peer = ecb->conn->peer;
748 else
749 header->peer = msg->peer;
750 } else {
751 previous_header->next = header;
752 }
753 header->buffer = (nw_buffer_t) ((char *) msg - pv->buf_start +
754 ecb->buf_start);
755 header->block = (char *) header->buffer + msg->block_offset;
756 if (!msg->block_deallocate)
757 header->buffer = NULL;
758 header->msg_length = 0;
759 header->block_length = msg->block_length;
760 first_header->msg_length += header->block_length;
761 header->next = NULL;
762 if (msg->buf_next == NULL)
763 break;
764 msg = msg->buf_next;
765 previous_header = header;
766 header = nc_tx_header_allocate();
767 }
768 }
769 if (header == NULL) {
770 nc_tx_header_deallocate(first_header);
771 rc = NW_NO_RESOURCES;
772 } else if (rc == NW_SUCCESS) {
773 dev = NW_DEVICE(first_header->peer.rem_addr_1);
774 if (ecb->protocol != NW_DATAGRAM ||
775 devct[dev].type != NW_CONNECTION_ORIENTED) {
776 sender = first_header->peer.local_ep;
777 rc = NW_SUCCESS;
778 } else {
779 sender = nc_line_lookup(&first_header->peer);
780 if (sender == -1) {
781 rc = NW_BAD_ADDRESS;
782 } else if (sender > 0) {
783 rc = NW_SUCCESS;
784 } else {
785 rc = mk_endpoint_allocate_internal(&sender, NW_LINE,
786 NW_AUTO_ACCEPT, 0, TRUE);
787 if (rc == NW_SUCCESS) {
788 rc = mk_connection_open_internal(sender,
789 first_header->peer.rem_addr_1,
790 first_header->peer.rem_addr_2,
791 MASTER_LINE_EP);
792 if (rc == NW_SUCCESS)
793 nc_line_update(&first_header->peer, sender);
794 }
795 }
796 }
797 if (rc == NW_SUCCESS) {
798 first_header->sender = sender;
799 first_header->options = options;
800 rc = (*(devct[dev].entry->send)) (sender, first_header, options);
801 if ((rc == NW_SYNCH || rc == NW_QUEUED) &&
802 nw_free_waiter != NULL) {
803 w = nw_free_waiter;
804 nw_free_waiter = w->next;
805 w->waiter = current_thread();
806 w->next = NULL;
807 hecb = &hect[sender];
808 if (hecb->tx_last == NULL) {
809 hecb->tx_first = hecb->tx_last = w;
810 } else {
811 hecb->tx_last = hecb->tx_last->next = w;
812 }
813 assert_wait(0, TRUE);
814 current_thread()->nw_ep_waited = NULL;
815 simple_unlock(&nw_simple_lock);
816 thread_block(mk_return);
817 }
818 }
819 }
820 }
821 }
822 }
823 nw_unlock();
824 return rc;
825 }
826
827
828 nw_buffer_t mk_receive(nw_ep ep, int time_out) {
829 nw_buffer_t rc;
830 nw_pv_t pv;
831 nw_ecb_t ecb;
832 nw_rx_header_t header;
833 nw_hecb_t hecb;
834 nw_waiter_t w;
835 nw_ep_owned_t waited;
836
837 nw_lock();
838 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
839 rc = NW_BUFFER_ERROR;
840 } else {
841 while (pv != NULL && pv->owner != current_task())
842 pv = pv->next;
843 if (pv == NULL) {
844 rc = NW_BUFFER_ERROR;
845 } else {
846 ecb = &ect[ep];
847 header = ecb->rx_first;
848 if (header != NULL) {
849 rc = (nw_buffer_t) ((char *) header->buffer - ecb->buf_start +
850 pv->buf_start);
851 ecb->rx_first = header->next;
852 if (ecb->rx_first == NULL)
853 ecb->rx_last = NULL;
854 nc_rx_header_deallocate(header);
855 } else if (time_out != 0 && nw_free_waiter != NULL &&
856 (time_out == -1 || nw_free_waited != NULL)) {
857 w = nw_free_waiter;
858 nw_free_waiter = w->next;
859 w->waiter = current_thread();
860 w->next = NULL;
861 hecb = &hect[ep];
862 if (hecb->rx_last == NULL)
863 hecb->rx_first = hecb->rx_last = w;
864 else
865 hecb->rx_last = hecb->rx_last->next = w;
866 assert_wait(0, TRUE);
867 if (time_out != -1) {
868 waited = nw_free_waited;
869 nw_free_waited = waited->next;
870 waited->ep = ep;
871 waited->next = NULL;
872 current_thread()->nw_ep_waited = waited;
873 current_thread()->wait_result = NULL;
874 if (!(current_thread()->timer.te_flags & TELT_SET))
875 thread_set_timeout(time_out);
876 } else {
877 current_thread()->nw_ep_waited = NULL;
878 }
879 simple_unlock(&nw_simple_lock);
880 thread_block(mk_return);
881 } else {
882 rc = NULL;
883 }
884 }
885 }
886 nw_unlock();
887 return rc;
888 }
889
890
891 nw_buffer_t mk_rpc(nw_ep ep, nw_buffer_t msg, nw_options options,
892 int time_out) {
893 nw_buffer_t rc;
894 nw_result nrc;
895 nw_ep sender;
896 int dev;
897 nw_pv_t pv;
898 nw_ecb_t ecb;
899 nw_tx_header_t header, first_header, previous_header;
900 nw_hecb_t hecb;
901 nw_waiter_t w;
902 nw_ep_owned_t waited;
903
904 nw_lock();
905 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
906 rc = NW_BUFFER_ERROR;
907 } else {
908 while (pv != NULL && pv->owner != current_task())
909 pv = pv->next;
910 if (pv == NULL) {
911 rc = NW_BUFFER_ERROR;
912 } else {
913 ecb = &ect[ep];
914 if (ecb->state == NW_INEXISTENT ||
915 (ecb->protocol == NW_SEQ_PACKET && ecb->conn == NULL)) {
916 rc = NW_BUFFER_ERROR;
917 } else {
918 first_header = header = nc_tx_header_allocate();
919 previous_header = NULL;
920 rc = NULL;
921 while (header != NULL) {
922 if ((char *) msg < pv->buf_start ||
923 (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
924 ((int) msg & 0x3) || (msg->block_offset & 0x3) ||
925 (msg->block_length & 0x3) || !msg->buf_used ||
926 (char *) msg + msg->buf_length > pv->buf_end ||
927 msg->block_offset + msg->block_length > msg->buf_length) {
928 rc = NW_BUFFER_ERROR;
929 break;
930 } else {
931 if (previous_header == NULL) {
932 if (ecb->protocol == NW_SEQ_PACKET)
933 header->peer = ecb->conn->peer;
934 else
935 header->peer = msg->peer;
936 } else {
937 previous_header->next = header;
938 }
939 header->buffer = (nw_buffer_t) ((char *) msg - pv->buf_start +
940 ecb->buf_start);
941 header->block = (char *) header->buffer + msg->block_offset;
942 if (!msg->block_deallocate)
943 header->buffer = NULL;
944 header->msg_length = 0;
945 header->block_length = msg->block_length;
946 first_header->msg_length += header->block_length;
947 header->next = NULL;
948 if (msg->buf_next == NULL)
949 break;
950 msg = msg->buf_next;
951 previous_header = header;
952 header = nc_tx_header_allocate();
953 }
954 }
955 if (header == NULL) {
956 nc_tx_header_deallocate(first_header);
957 rc = NW_BUFFER_ERROR;
958 } else if (rc != NW_BUFFER_ERROR) {
959 dev = NW_DEVICE(first_header->peer.rem_addr_1);
960 if (ecb->protocol != NW_DATAGRAM ||
961 devct[dev].type != NW_CONNECTION_ORIENTED) {
962 sender = first_header->peer.local_ep;
963 nrc = NW_SUCCESS;
964 } else {
965 sender = nc_line_lookup(&first_header->peer);
966 if (sender == -1) {
967 nrc = NW_BAD_ADDRESS;
968 } else if (sender > 0) {
969 nrc = NW_SUCCESS;
970 } else {
971 nrc = mk_endpoint_allocate_internal(&sender, NW_LINE,
972 NW_AUTO_ACCEPT, 0, TRUE);
973 if (nrc == NW_SUCCESS) {
974 nrc = mk_connection_open_internal(sender,
975 first_header->peer.rem_addr_1,
976 first_header->peer.rem_addr_2,
977 MASTER_LINE_EP);
978 if (nrc == NW_SUCCESS)
979 nc_line_update(&first_header->peer, sender);
980 }
981 }
982 }
983 if (nrc == NW_SUCCESS) {
984 first_header->sender = sender;
985 first_header->options = options;
986 rc = (*(devct[dev].entry->rpc)) (sender, first_header, options);
987 if (rc != NULL && rc != NW_BUFFER_ERROR) {
988 rc = (nw_buffer_t) ((char *) rc - ecb->buf_start +
989 pv->buf_start);
990 } else if (rc == NULL && time_out != 0 && nw_free_waiter != NULL &&
991 (time_out == -1 || nw_free_waited != NULL)) {
992 w = nw_free_waiter;
993 nw_free_waiter = w->next;
994 w->waiter = current_thread();
995 w->next = NULL;
996 hecb = &hect[ep];
997 if (hecb->rx_last == NULL)
998 hecb->rx_first = hecb->rx_last = w;
999 else
1000 hecb->rx_last = hecb->rx_last->next = w;
1001 assert_wait(0, TRUE);
1002 if (time_out != -1) {
1003 waited = nw_free_waited;
1004 nw_free_waited = waited->next;
1005 waited->ep = ep;
1006 waited->next = NULL;
1007 current_thread()->nw_ep_waited = waited;
1008 current_thread()->wait_result = NULL;
1009 if (!(current_thread()->timer.te_flags & TELT_SET))
1010 thread_set_timeout(time_out);
1011 } else {
1012 current_thread()->nw_ep_waited = NULL;
1013 }
1014 simple_unlock(&nw_simple_lock);
1015 thread_block(mk_return);
1016 }
1017 }
1018 }
1019 }
1020 }
1021 }
1022 nw_unlock();
1023 return rc;
1024 }
1025
1026 nw_buffer_t mk_select(u_int nep, nw_ep_t epp, int time_out) {
1027 nw_buffer_t rc;
1028 nw_pv_t pv;
1029 int i;
1030 nw_ep ep;
1031 nw_ecb_t ecb;
1032 nw_rx_header_t header;
1033 nw_hecb_t hecb;
1034 nw_waiter_t w, w_next;
1035 nw_ep_owned_t waited;
1036
1037 if (invalid_user_access(current_task()->map, (vm_offset_t) epp,
1038 (vm_offset_t) epp + nep*sizeof(nw_ep) - 1,
1039 VM_PROT_READ)) {
1040 rc = NW_BUFFER_ERROR;
1041 } else {
1042 nw_lock();
1043 for (i = 0; i < nep; i++) {
1044 ep = epp[i];
1045 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
1046 rc = NW_BUFFER_ERROR;
1047 break;
1048 } else {
1049 while (pv != NULL && pv->owner != current_task())
1050 pv = pv->next;
1051 if (pv == NULL) {
1052 rc = NW_BUFFER_ERROR;
1053 break;
1054 } else {
1055 ecb = &ect[ep];
1056 header = ecb->rx_first;
1057 if (header != NULL) {
1058 rc = (nw_buffer_t) ((char *) header->buffer - ecb->buf_start +
1059 pv->buf_start);
1060 ecb->rx_first = header->next;
1061 if (ecb->rx_first == NULL)
1062 ecb->rx_last = NULL;
1063 nc_rx_header_deallocate(header);
1064 break;
1065 }
1066 }
1067 }
1068 }
1069 if (i == nep) {
1070 if (time_out == 0) {
1071 rc = NULL;
1072 } else {
1073 w = nw_free_waiter;
1074 waited = nw_free_waited;
1075 i = 0;
1076 while (i < nep &&
1077 nw_free_waiter != NULL && nw_free_waited != NULL) {
1078 nw_free_waiter = nw_free_waiter->next;
1079 nw_free_waited = nw_free_waited->next;
1080 i++;
1081 }
1082 if (i < nep) {
1083 nw_free_waiter = w;
1084 nw_free_waited = waited;
1085 rc = NW_BUFFER_ERROR;
1086 } else {
1087 current_thread()->nw_ep_waited = waited;
1088 for (i = 0; i < nep; i++) {
1089 ep = epp[i];
1090 waited->ep = ep;
1091 if (i < nep-1)
1092 waited = waited->next;
1093 else
1094 waited->next = NULL;
1095 w->waiter = current_thread();
1096 w_next = w->next;
1097 w->next = NULL;
1098 hecb = &hect[ep];
1099 if (hecb->rx_last == NULL)
1100 hecb->rx_first = hecb->rx_last = w;
1101 else
1102 hecb->rx_last = hecb->rx_last->next = w;
1103 w = w_next;
1104 }
1105 assert_wait(0, TRUE);
1106 if (time_out != -1) {
1107 current_thread()->wait_result = NULL;
1108 if (!(current_thread()->timer.te_flags & TELT_SET))
1109 thread_set_timeout(time_out);
1110 }
1111 simple_unlock(&nw_simple_lock);
1112 thread_block(mk_return);
1113 }
1114 }
1115 }
1116 nw_unlock();
1117 }
1118 return rc;
1119 }
1120
1121
1122 /*** System-dependent support ***/
1123
1124 void mk_endpoint_collect(task_t task) {
1125
1126 while (task->nw_ep_owned != NULL) {
1127 mk_endpoint_deallocate_internal(task->nw_ep_owned->ep, task, TRUE);
1128 }
1129 }
1130
1131 void mk_waited_collect(thread_t thread) {
1132 nw_hecb_t hecb;
1133 nw_waiter_t w, w_previous;
1134 nw_ep_owned_t waited, waited_previous;
1135
1136 waited = thread->nw_ep_waited;
1137 if (waited != NULL) {
1138 while (waited != NULL) {
1139 hecb = &hect[waited->ep];
1140 w = hecb->rx_first;
1141 w_previous = NULL;
1142 while (w != NULL && w->waiter != thread) {
1143 w_previous = w;
1144 w = w->next;
1145 }
1146 if (w != NULL) {
1147 if (w_previous == NULL)
1148 hecb->rx_first = w->next;
1149 else
1150 w_previous->next = w->next;
1151 if (w->next == NULL)
1152 hecb->rx_last = w_previous;
1153 w->next = nw_free_waiter;
1154 nw_free_waiter = w;
1155 }
1156 waited_previous = waited;
1157 waited = waited->next;
1158 }
1159 waited_previous->next = nw_free_waited;
1160 nw_free_waited = thread->nw_ep_waited;
1161 thread->nw_ep_waited = NULL;
1162 }
1163 }
1164
1165 no_return mk_return(void) {
1166
1167 thread_syscall_return(current_thread()->wait_result);
1168 }
1169
1170
1171 boolean_t mk_deliver_result(thread_t thread, int result) {
1172 boolean_t rc;
1173 int state, s;
1174
1175 s = splsched();
1176 thread_lock(thread);
1177 state = thread->state;
1178
1179 timer_elt_remove(&thread->timer);
1180
1181 switch (state & TH_SCHED_STATE) {
1182 case TH_WAIT | TH_SUSP | TH_UNINT:
1183 case TH_WAIT | TH_UNINT:
1184 case TH_WAIT:
1185 /*
1186 * Sleeping and not suspendable - put on run queue.
1187 */
1188 thread->state = (state &~ TH_WAIT) | TH_RUN;
1189 thread->wait_result = (kern_return_t) result;
1190 thread_setrun(thread, TRUE);
1191 rc = TRUE;
1192 break;
1193
1194 case TH_WAIT | TH_SUSP:
1195 case TH_RUN | TH_WAIT:
1196 case TH_RUN | TH_WAIT | TH_SUSP:
1197 case TH_RUN | TH_WAIT | TH_UNINT:
1198 case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
1199 /*
1200 * Either already running, or suspended.
1201 */
1202 thread->state = state &~ TH_WAIT;
1203 thread->wait_result = (kern_return_t) result;
1204 rc = FALSE;
1205 break;
1206
1207 default:
1208 /*
1209 * Not waiting.
1210 */
1211 rc = FALSE;
1212 break;
1213 }
1214 thread_unlock(thread);
1215 splx(s);
1216 return rc;
1217 }
1218
1219
1220 boolean_t nc_deliver_result(nw_ep ep, nw_delivery type, int result) {
1221 boolean_t rc;
1222 nw_hecb_t hecb;
1223 nw_ecb_t ecb;
1224 nw_waiter_t w;
1225 thread_t thread;
1226 task_t task;
1227 nw_pv_t pv;
1228 nw_buffer_t buf;
1229 nw_rx_header_t rx_header;
1230 nw_tx_header_t tx_header;
1231 nw_ep lep;
1232
1233 hecb = &hect[ep];
1234 ecb = &ect[ep];
1235
1236 thread = NULL;
1237 if (type == NW_RECEIVE || type == NW_RECEIVE_URGENT) {
1238 w = hecb->rx_first;
1239 if (w != NULL) {
1240 thread = w->waiter;
1241 hecb->rx_first = w->next;
1242 if (hecb->rx_first == NULL)
1243 hecb->rx_last = NULL;
1244 w->next = nw_free_waiter;
1245 nw_free_waiter = w;
1246 task = thread->task;
1247 pv = hecb->pv;
1248 while (pv != NULL && pv->owner != task)
1249 pv = pv->next;
1250 if (pv == NULL) {
1251 rc = FALSE;
1252 } else {
1253 buf = (nw_buffer_t) ((char *) result - ecb->buf_start + pv->buf_start);
1254 rc = mk_deliver_result(thread, (int) buf);
1255 }
1256 } else {
1257 rx_header = nc_rx_header_allocate();
1258 if (rx_header == NULL) {
1259 rc = FALSE;
1260 } else {
1261 rx_header->buffer = (nw_buffer_t) result;
1262 if (type == NW_RECEIVE) {
1263 rx_header->next = NULL;
1264 if (ecb->rx_last == NULL)
1265 ecb->rx_first = rx_header;
1266 else
1267 ecb->rx_last->next = rx_header;
1268 ecb->rx_last = rx_header;
1269 } else {
1270 rx_header->next = ecb->rx_first;
1271 if (ecb->rx_first == NULL)
1272 ecb->rx_last = rx_header;
1273 ecb->rx_first = rx_header;
1274 }
1275 rc = TRUE;
1276 }
1277 }
1278 } else if (type == NW_SEND) {
1279 w = hecb->tx_first;
1280 if (w == NULL) {
1281 rc = FALSE;
1282 } else {
1283 thread = w->waiter;
1284 hecb->tx_first = w->next;
1285 if (hecb->tx_first == NULL)
1286 hecb->tx_last = NULL;
1287 w->next = nw_free_waiter;
1288 nw_free_waiter = w;
1289 rc = mk_deliver_result(thread, result);
1290 }
1291 tx_header = ect[ep].tx_initial;
1292 if (result == NW_SUCCESS) {
1293 lep = tx_header->peer.local_ep;
1294 while (tx_header != NULL) {
1295 if (tx_header->buffer != NULL)
1296 nc_buffer_deallocate(lep, tx_header->buffer);
1297 tx_header = tx_header->next;
1298 }
1299 }
1300 nc_tx_header_deallocate(ect[ep].tx_initial);
1301 ect[ep].tx_initial = ect[ep].tx_current = NULL;
1302 } else if (type == NW_SIGNAL) {
1303 thread = hecb->sig_waiter;
1304 hecb->sig_waiter = NULL;
1305 if (thread == NULL) {
1306 rc = FALSE;
1307 } else {
1308 rc = mk_deliver_result(thread, result);
1309 }
1310 }
1311 return rc;
1312 }
1313
1314 void mk_fast_sweep() {
1315
1316 nw_lock();
1317 nc_fast_sweep();
1318 nw_unlock();
1319 }
1320
1321 void h_fast_timer_set() {
1322
1323 #ifdef PRODUCTION
1324 if (!(nw_fast_timer.flags & TELT_SET) {
1325 time_spec_t interval;
1326 interval.seconds = 0;
1327 interval.nanoseconds = 10 * 1000000; /* 10 milliseconds */
1328 (void) timer_elt_enqueue(&nw_fast_timer, interval, FALSE);
1329 }
1330 #endif
1331 }
1332
1333 void h_fast_timer_reset() {
1334
1335 timer_elt_remove(&nw_fast_timer);
1336 }
1337
1338 void mk_slow_sweep() {
1339
1340 #ifdef PRODUCTION
1341 nw_lock();
1342 nc_slow_sweep();
1343 nw_unlock();
1344 {
1345 time_spec_t interval;
1346
1347 interval.seconds = 2;
1348 interval.nanoseconds = 0;
1349 (void) timer_elt_enqueue(&nw_slow_timer, interval, FALSE);
1350 }
1351 #endif
1352 }
1353
Cache object: 2578d48200af6d94912e473ae0f60fe6
|