FreeBSD/Linux Kernel Cross Reference
sys/chips/nw_mk.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Scienctxe
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: nw_mk.c,v $
29 * Revision 2.2 93/08/10 15:18:43 mrt
30 * Initial check-in.
31 * [93/06/09 16:00:26 jcb]
32 *
33 *
34 */
35
36 /*** MACH KERNEL WRAPPER ***/
37
38 #ifndef STUB
39 #include <kern/task.h>
40 #include <kern/thread.h>
41 #include <kern/sched_prim.h>
42 #include <kern/eventcount.h>
43 #include <kern/time_out.h>
44 #include <machine/machspl.h> /* spl definitions */
45 #include <vm/vm_kern.h>
46 #include <chips/nc.h>
47 #include <chips/nw_mk.h>
48
49 decl_simple_lock_data(, nw_simple_lock);
50 u_int previous_spl;
51
52 #define nw_lock() \
53 previous_spl = splimp(); \
54 simple_lock(&nw_simple_lock)
55
56 #define nw_unlock() \
57 simple_unlock(&nw_simple_lock); \
58 splx(previous_spl)
59
60 typedef struct nw_pvs {
61 task_t owner;
62 char *buf_start;
63 char *buf_end;
64 struct nw_pvs *next;
65 } nw_pv_s, *nw_pv_t;
66
67 typedef struct nw_waiters {
68 thread_t waiter;
69 struct nw_waiters *next;
70 } nw_waiter_s, *nw_waiter_t;
71
72 typedef struct {
73 nw_pv_t pv;
74 thread_t sig_waiter;
75 nw_waiter_t rx_first;
76 nw_waiter_t rx_last;
77 nw_waiter_t tx_first;
78 nw_waiter_t tx_last;
79 } nw_hecb, *nw_hecb_t;
80
81 #else
82 #include "nc.h"
83 #include "nw_mk.h"
84 #endif
85
86 /*** Types and data structures ***/
87
88 int h_initialized = FALSE;
89 nw_pv_s nw_pv[2*MAX_EP];
90 nw_pv_t nw_free_pv;
91 nw_waiter_s nw_waiter[2*MAX_EP];
92 nw_waiter_t nw_free_waiter;
93 nw_ep_owned_s nw_waited[3*MAX_EP];
94 nw_ep_owned_t nw_free_waited;
95 nw_hecb hect[MAX_EP];
96 timer_elt_data_t nw_fast_timer, nw_slow_timer;
97
98 /*** Initialization ***/
99
100 void h_initialize() {
101 int ep, last_ep;
102
103 if (!h_initialized) {
104 last_ep = sizeof(nw_pv)/sizeof(nw_pv_s) - 1;
105 for (ep = 0; ep < last_ep; ep++) {
106 nw_pv[ep].next = &nw_pv[ep+1];
107 }
108 nw_pv[last_ep].next = NULL;
109 nw_free_pv = &nw_pv[0];
110 last_ep = sizeof(nw_waiter)/sizeof(nw_waiter_s) - 1;
111 for (ep = 0; ep < last_ep; ep++) {
112 nw_waiter[ep].next = &nw_waiter[ep+1];
113 }
114 nw_waiter[last_ep].next = NULL;
115 nw_free_waiter = &nw_waiter[0];
116 last_ep = sizeof(nw_waited)/sizeof(nw_ep_owned_s) - 1;
117 for (ep = 0; ep < last_ep; ep++) {
118 nw_waited[ep].next = &nw_waited[ep+1];
119 }
120 nw_waited[last_ep].next = NULL;
121 nw_free_waited = &nw_waited[0];
122 last_ep = sizeof(hect)/sizeof(nw_hecb);
123 for (ep = 0; ep < last_ep; ep++) {
124 hect[ep].pv = NULL;
125 hect[ep].sig_waiter = NULL;
126 hect[ep].rx_first = NULL;
127 hect[ep].rx_last = NULL;
128 hect[ep].tx_first = NULL;
129 hect[ep].tx_last = NULL;
130 }
131 nw_fast_timer.fcn = mk_fast_sweep;
132 nw_fast_timer.param = NULL;
133 nw_fast_timer.set = TELT_UNSET;
134 nw_slow_timer.fcn = mk_slow_sweep;
135 nw_slow_timer.param = NULL;
136 #if PRODUCTION
137 set_timeout(&nw_slow_timer, 2*hz);
138 #endif
139 h_initialized = TRUE;
140 }
141 }
142
143 /*** User-trappable functions ***/
144
145 nw_result mk_update(mach_port_t master_port, nw_update_type up_type,
146 int *up_info) {
147 nw_result rc;
148
149 if (master_port == 0) { /* XXX */
150 rc = NW_FAILURE;
151 } else {
152 nw_lock();
153 switch (up_type) {
154 case NW_HOST_ADDRESS_REGISTER:
155 case NW_HOST_ADDRESS_UNREGISTER:
156 if (invalid_user_access(current_task()->map, (vm_offset_t) up_info,
157 (vm_offset_t) up_info + sizeof(nw_address_s) - 1,
158 VM_PROT_READ | VM_PROT_WRITE)) {
159 rc = NW_INVALID_ARGUMENT;
160 } else {
161 rc = nc_update(up_type, up_info);
162 }
163 break;
164 case NW_INITIALIZE:
165 nc_initialize();
166 rc = NW_SUCCESS;
167 break;
168 default:
169 rc = NW_INVALID_ARGUMENT;
170 }
171 nw_unlock();
172 }
173 return rc;
174 }
175
176
177
178 nw_result mk_lookup(nw_lookup_type lt, int *look_info) {
179 nw_result rc;
180 int max_size, dev;
181
182 nw_lock();
183 switch (lt) {
184 case NW_HOST_ADDRESS_LOOKUP:
185 if (invalid_user_access(current_task()->map, (vm_offset_t) look_info,
186 (vm_offset_t) look_info + sizeof(nw_address_s) - 1,
187 VM_PROT_READ | VM_PROT_WRITE)) {
188 rc = NW_INVALID_ARGUMENT;
189 } else {
190 rc = nc_lookup(lt, look_info);
191 }
192 break;
193 case NW_STATUS:
194 max_size = sizeof(nw_device);
195 if (max_size < sizeof(nw_result))
196 max_size = sizeof(nw_result);
197 if (invalid_user_access(current_task()->map, (vm_offset_t) look_info,
198 (vm_offset_t) look_info + max_size - 1,
199 VM_PROT_READ | VM_PROT_WRITE) ||
200 (dev = look_info[0]) >= MAX_DEV || dev < 0) {
201 rc = NW_INVALID_ARGUMENT;
202 } else {
203 if (devct[dev].status != NW_SUCCESS) {
204 look_info[0] = (int) devct[dev].status;
205 rc = NW_SUCCESS;
206 } else {
207 rc = (*(devct[dev].entry->status)) (dev);
208 }
209 }
210 break;
211 default:
212 rc = NW_INVALID_ARGUMENT;
213 }
214 nw_unlock();
215 return rc;
216 }
217
218
219 nw_result mk_endpoint_allocate_internal(nw_ep_t epp, nw_protocol protocol,
220 nw_acceptance accept,
221 u_int buffer_size, boolean_t system) {
222 nw_result rc;
223 u_int ep;
224 vm_offset_t kernel_addr, user_addr;
225 nw_pv_t pv;
226 nw_ep_owned_t owned;
227
228 ep = *epp;
229 if (buffer_size == 0)
230 buffer_size = 0x1000;
231 else
232 buffer_size = (buffer_size + 0xfff) & ~0xfff;
233 nw_lock();
234 if (ep >= MAX_EP || (pv = hect[ep].pv) != NULL) {
235 rc = NW_BAD_EP;
236 } else if (nw_free_pv == NULL || nw_free_waited == NULL) {
237 rc = NW_NO_EP;
238 } else if (projected_buffer_allocate(current_task()->map, buffer_size, 0,
239 &kernel_addr, &user_addr,
240 VM_PROT_READ | VM_PROT_WRITE,
241 VM_INHERIT_NONE) != KERN_SUCCESS) {
242 rc = NW_NO_RESOURCES;
243 } else {
244 rc = nc_endpoint_allocate(epp, protocol, accept,
245 (char *) kernel_addr, buffer_size);
246 if (rc == NW_NO_EP && (ep = *epp) != 0) {
247 rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->
248 close)) (ep);
249 if (rc == NW_SYNCH) {
250 hect[ep].sig_waiter = current_thread();
251 assert_wait(0, TRUE);
252 simple_unlock(&nw_simple_lock);
253 thread_block((void (*)()) 0);
254 }
255 rc = nc_endpoint_deallocate(ep);
256 if (rc == NW_SUCCESS) {
257 nc_line_update(&ect[ep].conn->peer, 0);
258 rc = nc_endpoint_allocate(epp, protocol, accept,
259 (char *) kernel_addr, buffer_size);
260 }
261 }
262 if (rc == NW_SUCCESS) {
263 ep = *epp;
264 if (system) {
265 hect[ep].pv = NULL;
266 } else {
267 hect[ep].pv = nw_free_pv;
268 nw_free_pv = nw_free_pv->next;
269 hect[ep].pv->owner = current_task();
270 hect[ep].pv->buf_start = (char *) user_addr;
271 hect[ep].pv->buf_end = (char *) user_addr + buffer_size;
272 hect[ep].pv->next = NULL;
273 }
274 hect[ep].sig_waiter = NULL;
275 hect[ep].rx_first = NULL;
276 hect[ep].rx_last = NULL;
277 hect[ep].tx_first = NULL;
278 hect[ep].tx_last = NULL;
279 owned = nw_free_waited;
280 nw_free_waited = nw_free_waited->next;
281 owned->ep = ep;
282 owned->next = current_task()->nw_ep_owned;
283 current_task()->nw_ep_owned = owned;
284 } else {
285 projected_buffer_deallocate(current_task()->map, user_addr,
286 user_addr + buffer_size);
287 }
288 }
289 nw_unlock();
290 return rc;
291 }
292
293
294 nw_result mk_endpoint_allocate(nw_ep_t epp, nw_protocol protocol,
295 nw_acceptance accept, u_int buffer_size) {
296 nw_result rc;
297
298 if (invalid_user_access(current_task()->map, (vm_offset_t) epp,
299 (vm_offset_t) epp + sizeof(nw_ep) - 1,
300 VM_PROT_READ | VM_PROT_WRITE) ||
301 (protocol != NW_RAW && protocol != NW_DATAGRAM &&
302 protocol != NW_SEQ_PACKET) || (accept != NW_NO_ACCEPT &&
303 accept != NW_APPL_ACCEPT && accept != NW_AUTO_ACCEPT)) {
304 rc = NW_INVALID_ARGUMENT;
305 } else {
306 rc = mk_endpoint_allocate_internal(epp, protocol, accept,
307 buffer_size, FALSE);
308 }
309 return rc;
310 }
311
312 nw_result mk_endpoint_deallocate_internal(nw_ep ep, task_t task,
313 boolean_t shutdown) {
314 nw_result rc;
315 nw_pv_t pv, pv_previous;
316 nw_ep_owned_t owned, owned_previous;
317 nw_waiter_t w, w_previous, w_next;
318
319 nw_lock();
320 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
321 rc = NW_BAD_EP;
322 } else {
323 pv_previous = NULL;
324 while (pv != NULL && pv->owner != task) {
325 pv_previous = pv;
326 pv = pv->next;
327 }
328 if (pv == NULL) {
329 rc = NW_PROT_VIOLATION;
330 } else {
331 if (projected_buffer_deallocate(task->map, pv->buf_start,
332 pv->buf_end) != KERN_SUCCESS) {
333 rc = NW_INCONSISTENCY;
334 printf("Endpoint deallocate: inconsistency p. buffer\n");
335 } else {
336 if (pv_previous == NULL)
337 hect[ep].pv = pv->next;
338 else
339 pv_previous->next = pv->next;
340 pv->next = nw_free_pv;
341 nw_free_pv = pv;
342 owned = task->nw_ep_owned;
343 owned_previous = NULL;
344 while (owned != NULL && owned->ep != ep) {
345 owned_previous = owned;
346 owned = owned->next;
347 }
348 if (owned == NULL) {
349 rc = NW_INCONSISTENCY;
350 printf("Endpoint deallocate: inconsistency owned\n");
351 } else {
352 if (owned_previous == NULL)
353 task->nw_ep_owned = owned->next;
354 else
355 owned_previous->next = owned->next;
356 owned->next = nw_free_waited;
357 nw_free_waited = owned;
358 if (hect[ep].sig_waiter != NULL &&
359 hect[ep].sig_waiter->task == task) {
360 /* if (!shutdown)*/
361 mk_deliver_result(hect[ep].sig_waiter, NW_ABORTED);
362 hect[ep].sig_waiter = NULL;
363 }
364 w = hect[ep].rx_first;
365 w_previous = NULL;
366 while (w != NULL) {
367 if (w->waiter->task == task) {
368 /* if (!shutdown)*/
369 mk_deliver_result(w->waiter, NULL);
370 w_next = w->next;
371 if (w_previous == NULL)
372 hect[ep].rx_first = w_next;
373 else
374 w_previous->next = w_next;
375 w->next = nw_free_waiter;
376 nw_free_waiter = w;
377 w = w_next;
378 } else {
379 w_previous = w;
380 w = w->next;
381 }
382 }
383 if (hect[ep].rx_first == NULL)
384 hect[ep].rx_last = NULL;
385 w = hect[ep].tx_first;
386 w_previous = NULL;
387 while (w != NULL) {
388 if (w->waiter->task == task) {
389 /* if (!shutdown)*/
390 mk_deliver_result(w->waiter, NW_ABORTED);
391 w_next = w->next;
392 if (w_previous == NULL)
393 hect[ep].tx_first = w_next;
394 else
395 w_previous->next = w_next;
396 w->next = nw_free_waiter;
397 nw_free_waiter = w;
398 w = w_next;
399 } else {
400 w_previous = w;
401 w = w->next;
402 }
403 }
404 if (hect[ep].tx_first == NULL)
405 hect[ep].tx_last = NULL;
406 if (hect[ep].pv == NULL) {
407 if (ect[ep].state != NW_UNCONNECTED) {
408 rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->
409 close)) (ep);
410 if (rc == NW_SYNCH) {
411 hect[ep].sig_waiter = current_thread();
412 assert_wait(0, TRUE);
413 simple_unlock(&nw_simple_lock);
414 thread_block((void (*)()) 0);
415 }
416 }
417 rc = nc_endpoint_deallocate(ep);
418 }
419 }
420 }
421 }
422 }
423 nw_unlock();
424 return rc;
425 }
426
427 nw_result mk_endpoint_deallocate(nw_ep ep) {
428
429 mk_endpoint_deallocate_internal(ep, current_task(), FALSE);
430 }
431
432
433 nw_buffer_t mk_buffer_allocate(nw_ep ep, u_int size) {
434 nw_buffer_t buf;
435 nw_pv_t pv;
436
437 nw_lock();
438 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
439 buf = NW_BUFFER_ERROR;
440 } else {
441 while (pv != NULL && pv->owner != current_task())
442 pv = pv->next;
443 if (pv == NULL) {
444 buf = NW_BUFFER_ERROR;
445 } else {
446 buf = nc_buffer_allocate(ep, size);
447 if (buf != NULL) {
448 buf = (nw_buffer_t) ((char *) buf - ect[ep].buf_start + pv->buf_start);
449 }
450 }
451 }
452 nw_unlock();
453 return buf;
454 }
455
456
457
458 nw_result mk_buffer_deallocate(nw_ep ep, nw_buffer_t buffer) {
459 nw_result rc;
460 nw_pv_t pv;
461
462 nw_lock();
463 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
464 rc = NW_BAD_EP;
465 } else {
466 while (pv != NULL && pv->owner != current_task())
467 pv = pv->next;
468 if (pv == NULL) {
469 rc = NW_PROT_VIOLATION;
470 } else {
471 if ((char *) buffer < pv->buf_start ||
472 (char *) buffer + sizeof(nw_buffer_s) > pv->buf_end ||
473 !buffer->buf_used ||
474 (char *) buffer + buffer->buf_length > pv->buf_end) {
475 rc = NW_BAD_BUFFER;
476 } else {
477 buffer = (nw_buffer_t) ((char *) buffer - pv->buf_start +
478 ect[ep].buf_start);
479 rc = nc_buffer_deallocate(ep, buffer);
480 }
481 }
482 }
483 nw_unlock();
484 return rc;
485 }
486
487
488 nw_result mk_connection_open_internal(nw_ep local_ep, nw_address_1 rem_addr_1,
489 nw_address_2 rem_addr_2, nw_ep remote_ep) {
490 nw_result rc;
491
492 rc = (*devct[NW_DEVICE(rem_addr_1)].entry->open) (local_ep,
493 rem_addr_1, rem_addr_2,
494 remote_ep);
495 if (rc == NW_SYNCH) {
496 hect[local_ep].sig_waiter = current_thread();
497 assert_wait(0, TRUE);
498 simple_unlock(&nw_simple_lock);
499 thread_block((void (*)()) 0);
500 }
501 return rc;
502 }
503
504 nw_result mk_connection_open(nw_ep local_ep, nw_address_1 rem_addr_1,
505 nw_address_2 rem_addr_2, nw_ep remote_ep) {
506 nw_result rc;
507 nw_pv_t pv;
508
509 nw_lock();
510 if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
511 rc = NW_BAD_EP;
512 } else {
513 while (pv != NULL && pv->owner != current_task())
514 pv = pv->next;
515 if (pv == NULL) {
516 rc = NW_PROT_VIOLATION;
517 } else {
518 rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->open))
519 (local_ep, rem_addr_1, rem_addr_2, remote_ep);
520 if (rc == NW_SYNCH) {
521 hect[local_ep].sig_waiter = current_thread();
522 assert_wait(0, TRUE);
523 current_thread()->nw_ep_waited = NULL;
524 simple_unlock(&nw_simple_lock);
525 thread_block(mk_return);
526 }
527 }
528 }
529 nw_unlock();
530 return rc;
531 }
532
533
534 nw_result mk_connection_accept(nw_ep ep, nw_buffer_t msg,
535 nw_ep_t new_epp) {
536 nw_result rc;
537 nw_pv_t pv;
538
539 nw_lock();
540 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
541 rc = NW_BAD_EP;
542 } else {
543 while (pv != NULL && pv->owner != current_task())
544 pv = pv->next;
545 if (pv == NULL) {
546 rc = NW_PROT_VIOLATION;
547 } else if ((char *) msg < pv->buf_start ||
548 (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
549 !msg->buf_used ||
550 (char *) msg + msg->buf_length > pv->buf_end) {
551 rc = NW_BAD_BUFFER;
552 } else if (new_epp != NULL &&
553 (invalid_user_access(current_task()->map, (vm_offset_t) new_epp,
554 (vm_offset_t) new_epp + sizeof(nw_ep) - 1,
555 VM_PROT_READ | VM_PROT_WRITE) ||
556 (*new_epp != 0 && *new_epp != ep))) {
557 rc = NW_INVALID_ARGUMENT;
558 } else {
559 rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->accept))
560 (ep, msg, new_epp);
561 if (rc == NW_SYNCH) {
562 hect[ep].sig_waiter = current_thread();
563 assert_wait(0, TRUE);
564 current_thread()->nw_ep_waited = NULL;
565 simple_unlock(&nw_simple_lock);
566 thread_block(mk_return);
567 }
568 }
569 }
570 nw_unlock();
571 return rc;
572 }
573
574 nw_result mk_connection_close(nw_ep ep) {
575 nw_result rc;
576 nw_pv_t pv;
577
578 nw_lock();
579 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
580 rc = NW_BAD_EP;
581 } else {
582 while (pv != NULL && pv->owner != current_task())
583 pv = pv->next;
584 if (pv == NULL) {
585 rc = NW_PROT_VIOLATION;
586 } else {
587 rc = (*devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->close)
588 (ep);
589 if (rc == NW_SYNCH) {
590 hect[ep].sig_waiter = current_thread();
591 assert_wait(0, TRUE);
592 current_thread()->nw_ep_waited = NULL;
593 simple_unlock(&nw_simple_lock);
594 thread_block(mk_return);
595 }
596 }
597 }
598 nw_unlock();
599 return rc;
600 }
601
602
603 nw_result mk_multicast_add(nw_ep local_ep, nw_address_1 rem_addr_1,
604 nw_address_2 rem_addr_2, nw_ep remote_ep) {
605 nw_result rc;
606 nw_pv_t pv;
607
608 nw_lock();
609 if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
610 rc = NW_BAD_EP;
611 } else {
612 while (pv != NULL && pv->owner != current_task())
613 pv = pv->next;
614 if (pv == NULL) {
615 rc = NW_PROT_VIOLATION;
616 } else {
617 rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->add))
618 (local_ep, rem_addr_1, rem_addr_2, remote_ep);
619 if (rc == NW_SYNCH) {
620 hect[local_ep].sig_waiter = current_thread();
621 assert_wait(0, TRUE);
622 current_thread()->nw_ep_waited = NULL;
623 simple_unlock(&nw_simple_lock);
624 thread_block(mk_return);
625 }
626 }
627 }
628 nw_unlock();
629 return rc;
630 }
631
632
633 nw_result mk_multicast_drop(nw_ep local_ep, nw_address_1 rem_addr_1,
634 nw_address_2 rem_addr_2, nw_ep remote_ep) {
635 nw_result rc;
636 nw_pv_t pv;
637
638 nw_lock();
639 if (local_ep >= MAX_EP || (pv = hect[local_ep].pv) == NULL) {
640 rc = NW_BAD_EP;
641 } else {
642 while (pv != NULL && pv->owner != current_task())
643 pv = pv->next;
644 if (pv == NULL) {
645 rc = NW_PROT_VIOLATION;
646 } else {
647 rc = (*(devct[NW_DEVICE(rem_addr_1)].entry->drop))
648 (local_ep, rem_addr_1, rem_addr_2, remote_ep);
649 if (rc == NW_SYNCH) {
650 hect[local_ep].sig_waiter = current_thread();
651 assert_wait(0, TRUE);
652 current_thread()->nw_ep_waited = NULL;
653 simple_unlock(&nw_simple_lock);
654 thread_block(mk_return);
655 }
656 }
657 }
658 nw_unlock();
659 return rc;
660 }
661
662
663 nw_result mk_endpoint_status(nw_ep ep, nw_state_t state,
664 nw_peer_t peer) {
665 nw_result rc;
666 nw_pv_t pv;
667
668 nw_lock();
669 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
670 rc = NW_BAD_EP;
671 } else {
672 while (pv != NULL && pv->owner != current_task())
673 pv = pv->next;
674 if (pv == NULL) {
675 rc = NW_PROT_VIOLATION;
676 } else {
677 if (invalid_user_access(current_task()->map, (vm_offset_t) state,
678 (vm_offset_t) state + sizeof(nw_state) - 1,
679 VM_PROT_WRITE) ||
680 invalid_user_access(current_task()->map, (vm_offset_t) peer,
681 (vm_offset_t) peer + sizeof(nw_peer_s) - 1,
682 VM_PROT_WRITE)) {
683 rc = NW_INVALID_ARGUMENT;
684 } else {
685 rc = nc_endpoint_status(ep, state, peer);
686 }
687 }
688 }
689 nw_unlock();
690 return rc;
691 }
692
693
694 nw_result mk_send(nw_ep ep, nw_buffer_t msg, nw_options options) {
695 nw_result rc;
696 nw_pv_t pv;
697 nw_ep sender;
698 int dev;
699 nw_ecb_t ecb;
700 nw_tx_header_t header, first_header, previous_header;
701 nw_hecb_t hecb;
702 nw_waiter_t w;
703
704 nw_lock();
705 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
706 rc = NW_BAD_EP;
707 } else {
708 while (pv != NULL && pv->owner != current_task())
709 pv = pv->next;
710 if (pv == NULL) {
711 rc = NW_PROT_VIOLATION;
712 } else {
713 ecb = &ect[ep];
714 if (ecb->state == NW_INEXISTENT ||
715 (ecb->protocol == NW_SEQ_PACKET && ecb->conn == NULL)) {
716 rc = NW_BAD_EP;
717 } else {
718 first_header = header = nc_tx_header_allocate();
719 previous_header = NULL;
720 rc = NW_SUCCESS;
721 while (header != NULL) {
722 if ((char *) msg < pv->buf_start ||
723 (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
724 ((int) msg & 0x3) || (msg->block_offset & 0x3) ||
725 (msg->block_length & 0x3) || !msg->buf_used ||
726 (char *) msg + msg->buf_length > pv->buf_end ||
727 msg->block_offset + msg->block_length > msg->buf_length) {
728 rc = NW_BAD_BUFFER;
729 break;
730 } else {
731 if (previous_header == NULL) {
732 if (ecb->protocol == NW_SEQ_PACKET)
733 header->peer = ecb->conn->peer;
734 else
735 header->peer = msg->peer;
736 } else {
737 previous_header->next = header;
738 }
739 header->buffer = (nw_buffer_t) ((char *) msg - pv->buf_start +
740 ecb->buf_start);
741 header->block = (char *) header->buffer + msg->block_offset;
742 if (!msg->block_deallocate)
743 header->buffer = NULL;
744 header->msg_length = 0;
745 header->block_length = msg->block_length;
746 first_header->msg_length += header->block_length;
747 header->next = NULL;
748 if (msg->buf_next == NULL)
749 break;
750 msg = msg->buf_next;
751 previous_header = header;
752 header = nc_tx_header_allocate();
753 }
754 }
755 if (header == NULL) {
756 nc_tx_header_deallocate(first_header);
757 rc = NW_NO_RESOURCES;
758 } else if (rc == NW_SUCCESS) {
759 dev = NW_DEVICE(first_header->peer.rem_addr_1);
760 if (ecb->protocol != NW_DATAGRAM ||
761 devct[dev].type != NW_CONNECTION_ORIENTED) {
762 sender = first_header->peer.local_ep;
763 rc = NW_SUCCESS;
764 } else {
765 sender = nc_line_lookup(&first_header->peer);
766 if (sender == -1) {
767 rc = NW_BAD_ADDRESS;
768 } else if (sender > 0) {
769 rc = NW_SUCCESS;
770 } else {
771 rc = mk_endpoint_allocate_internal(&sender, NW_LINE,
772 NW_AUTO_ACCEPT, 0, TRUE);
773 if (rc == NW_SUCCESS) {
774 rc = mk_connection_open_internal(sender,
775 first_header->peer.rem_addr_1,
776 first_header->peer.rem_addr_2,
777 MASTER_LINE_EP);
778 if (rc == NW_SUCCESS)
779 nc_line_update(&first_header->peer, sender);
780 }
781 }
782 }
783 if (rc == NW_SUCCESS) {
784 first_header->sender = sender;
785 first_header->options = options;
786 rc = (*(devct[dev].entry->send)) (sender, first_header, options);
787 if ((rc == NW_SYNCH || rc == NW_QUEUED) &&
788 nw_free_waiter != NULL) {
789 w = nw_free_waiter;
790 nw_free_waiter = w->next;
791 w->waiter = current_thread();
792 w->next = NULL;
793 hecb = &hect[sender];
794 if (hecb->tx_last == NULL) {
795 hecb->tx_first = hecb->tx_last = w;
796 } else {
797 hecb->tx_last = hecb->tx_last->next = w;
798 }
799 assert_wait(0, TRUE);
800 current_thread()->nw_ep_waited = NULL;
801 simple_unlock(&nw_simple_lock);
802 thread_block(mk_return);
803 }
804 }
805 }
806 }
807 }
808 }
809 nw_unlock();
810 return rc;
811 }
812
813
814 nw_buffer_t mk_receive(nw_ep ep, int time_out) {
815 nw_buffer_t rc;
816 nw_pv_t pv;
817 nw_ecb_t ecb;
818 nw_rx_header_t header;
819 nw_hecb_t hecb;
820 nw_waiter_t w;
821 nw_ep_owned_t waited;
822
823 nw_lock();
824 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
825 rc = NW_BUFFER_ERROR;
826 } else {
827 while (pv != NULL && pv->owner != current_task())
828 pv = pv->next;
829 if (pv == NULL) {
830 rc = NW_BUFFER_ERROR;
831 } else {
832 ecb = &ect[ep];
833 header = ecb->rx_first;
834 if (header != NULL) {
835 rc = (nw_buffer_t) ((char *) header->buffer - ecb->buf_start +
836 pv->buf_start);
837 ecb->rx_first = header->next;
838 if (ecb->rx_first == NULL)
839 ecb->rx_last = NULL;
840 nc_rx_header_deallocate(header);
841 } else if (time_out != 0 && nw_free_waiter != NULL &&
842 (time_out == -1 || nw_free_waited != NULL)) {
843 w = nw_free_waiter;
844 nw_free_waiter = w->next;
845 w->waiter = current_thread();
846 w->next = NULL;
847 hecb = &hect[ep];
848 if (hecb->rx_last == NULL)
849 hecb->rx_first = hecb->rx_last = w;
850 else
851 hecb->rx_last = hecb->rx_last->next = w;
852 assert_wait(0, TRUE);
853 if (time_out != -1) {
854 waited = nw_free_waited;
855 nw_free_waited = waited->next;
856 waited->ep = ep;
857 waited->next = NULL;
858 current_thread()->nw_ep_waited = waited;
859 current_thread()->wait_result = NULL;
860 if (!current_thread()->timer.set)
861 thread_set_timeout(time_out);
862 } else {
863 current_thread()->nw_ep_waited = NULL;
864 }
865 simple_unlock(&nw_simple_lock);
866 thread_block(mk_return);
867 } else {
868 rc = NULL;
869 }
870 }
871 }
872 nw_unlock();
873 return rc;
874 }
875
876
877 nw_buffer_t mk_rpc(nw_ep ep, nw_buffer_t msg, nw_options options,
878 int time_out) {
879 nw_buffer_t rc;
880 nw_result nrc;
881 nw_ep sender;
882 int dev;
883 nw_pv_t pv;
884 nw_ecb_t ecb;
885 nw_tx_header_t header, first_header, previous_header;
886 nw_hecb_t hecb;
887 nw_waiter_t w;
888 nw_ep_owned_t waited;
889
890 nw_lock();
891 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
892 rc = NW_BUFFER_ERROR;
893 } else {
894 while (pv != NULL && pv->owner != current_task())
895 pv = pv->next;
896 if (pv == NULL) {
897 rc = NW_BUFFER_ERROR;
898 } else {
899 ecb = &ect[ep];
900 if (ecb->state == NW_INEXISTENT ||
901 (ecb->protocol == NW_SEQ_PACKET && ecb->conn == NULL)) {
902 rc = NW_BUFFER_ERROR;
903 } else {
904 first_header = header = nc_tx_header_allocate();
905 previous_header = NULL;
906 rc = NULL;
907 while (header != NULL) {
908 if ((char *) msg < pv->buf_start ||
909 (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
910 ((int) msg & 0x3) || (msg->block_offset & 0x3) ||
911 (msg->block_length & 0x3) || !msg->buf_used ||
912 (char *) msg + msg->buf_length > pv->buf_end ||
913 msg->block_offset + msg->block_length > msg->buf_length) {
914 rc = NW_BUFFER_ERROR;
915 break;
916 } else {
917 if (previous_header == NULL) {
918 if (ecb->protocol == NW_SEQ_PACKET)
919 header->peer = ecb->conn->peer;
920 else
921 header->peer = msg->peer;
922 } else {
923 previous_header->next = header;
924 }
925 header->buffer = (nw_buffer_t) ((char *) msg - pv->buf_start +
926 ecb->buf_start);
927 header->block = (char *) header->buffer + msg->block_offset;
928 if (!msg->block_deallocate)
929 header->buffer = NULL;
930 header->msg_length = 0;
931 header->block_length = msg->block_length;
932 first_header->msg_length += header->block_length;
933 header->next = NULL;
934 if (msg->buf_next == NULL)
935 break;
936 msg = msg->buf_next;
937 previous_header = header;
938 header = nc_tx_header_allocate();
939 }
940 }
941 if (header == NULL) {
942 nc_tx_header_deallocate(first_header);
943 rc = NW_BUFFER_ERROR;
944 } else if (rc != NW_BUFFER_ERROR) {
945 dev = NW_DEVICE(first_header->peer.rem_addr_1);
946 if (ecb->protocol != NW_DATAGRAM ||
947 devct[dev].type != NW_CONNECTION_ORIENTED) {
948 sender = first_header->peer.local_ep;
949 nrc = NW_SUCCESS;
950 } else {
951 sender = nc_line_lookup(&first_header->peer);
952 if (sender == -1) {
953 nrc = NW_BAD_ADDRESS;
954 } else if (sender > 0) {
955 nrc = NW_SUCCESS;
956 } else {
957 nrc = mk_endpoint_allocate_internal(&sender, NW_LINE,
958 NW_AUTO_ACCEPT, 0, TRUE);
959 if (nrc == NW_SUCCESS) {
960 nrc = mk_connection_open_internal(sender,
961 first_header->peer.rem_addr_1,
962 first_header->peer.rem_addr_2,
963 MASTER_LINE_EP);
964 if (nrc == NW_SUCCESS)
965 nc_line_update(&first_header->peer, sender);
966 }
967 }
968 }
969 if (nrc == NW_SUCCESS) {
970 first_header->sender = sender;
971 first_header->options = options;
972 rc = (*(devct[dev].entry->rpc)) (sender, first_header, options);
973 if (rc != NULL && rc != NW_BUFFER_ERROR) {
974 rc = (nw_buffer_t) ((char *) rc - ecb->buf_start +
975 pv->buf_start);
976 } else if (rc == NULL && time_out != 0 && nw_free_waiter != NULL &&
977 (time_out == -1 || nw_free_waited != NULL)) {
978 w = nw_free_waiter;
979 nw_free_waiter = w->next;
980 w->waiter = current_thread();
981 w->next = NULL;
982 hecb = &hect[ep];
983 if (hecb->rx_last == NULL)
984 hecb->rx_first = hecb->rx_last = w;
985 else
986 hecb->rx_last = hecb->rx_last->next = w;
987 assert_wait(0, TRUE);
988 if (time_out != -1) {
989 waited = nw_free_waited;
990 nw_free_waited = waited->next;
991 waited->ep = ep;
992 waited->next = NULL;
993 current_thread()->nw_ep_waited = waited;
994 current_thread()->wait_result = NULL;
995 if (!current_thread()->timer.set)
996 thread_set_timeout(time_out);
997 } else {
998 current_thread()->nw_ep_waited = NULL;
999 }
1000 simple_unlock(&nw_simple_lock);
1001 thread_block(mk_return);
1002 }
1003 }
1004 }
1005 }
1006 }
1007 }
1008 nw_unlock();
1009 return rc;
1010 }
1011
1012 nw_buffer_t mk_select(u_int nep, nw_ep_t epp, int time_out) {
1013 nw_buffer_t rc;
1014 nw_pv_t pv;
1015 int i;
1016 nw_ep ep;
1017 nw_ecb_t ecb;
1018 nw_rx_header_t header;
1019 nw_hecb_t hecb;
1020 nw_waiter_t w, w_next;
1021 nw_ep_owned_t waited;
1022
1023 if (invalid_user_access(current_task()->map, (vm_offset_t) epp,
1024 (vm_offset_t) epp + nep*sizeof(nw_ep) - 1,
1025 VM_PROT_READ)) {
1026 rc = NW_BUFFER_ERROR;
1027 } else {
1028 nw_lock();
1029 for (i = 0; i < nep; i++) {
1030 ep = epp[i];
1031 if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
1032 rc = NW_BUFFER_ERROR;
1033 break;
1034 } else {
1035 while (pv != NULL && pv->owner != current_task())
1036 pv = pv->next;
1037 if (pv == NULL) {
1038 rc = NW_BUFFER_ERROR;
1039 break;
1040 } else {
1041 ecb = &ect[ep];
1042 header = ecb->rx_first;
1043 if (header != NULL) {
1044 rc = (nw_buffer_t) ((char *) header->buffer - ecb->buf_start +
1045 pv->buf_start);
1046 ecb->rx_first = header->next;
1047 if (ecb->rx_first == NULL)
1048 ecb->rx_last = NULL;
1049 nc_rx_header_deallocate(header);
1050 break;
1051 }
1052 }
1053 }
1054 }
1055 if (i == nep) {
1056 if (time_out == 0) {
1057 rc = NULL;
1058 } else {
1059 w = nw_free_waiter;
1060 waited = nw_free_waited;
1061 i = 0;
1062 while (i < nep &&
1063 nw_free_waiter != NULL && nw_free_waited != NULL) {
1064 nw_free_waiter = nw_free_waiter->next;
1065 nw_free_waited = nw_free_waited->next;
1066 i++;
1067 }
1068 if (i < nep) {
1069 nw_free_waiter = w;
1070 nw_free_waited = waited;
1071 rc = NW_BUFFER_ERROR;
1072 } else {
1073 current_thread()->nw_ep_waited = waited;
1074 for (i = 0; i < nep; i++) {
1075 ep = epp[i];
1076 waited->ep = ep;
1077 if (i < nep-1)
1078 waited = waited->next;
1079 else
1080 waited->next = NULL;
1081 w->waiter = current_thread();
1082 w_next = w->next;
1083 w->next = NULL;
1084 hecb = &hect[ep];
1085 if (hecb->rx_last == NULL)
1086 hecb->rx_first = hecb->rx_last = w;
1087 else
1088 hecb->rx_last = hecb->rx_last->next = w;
1089 w = w_next;
1090 }
1091 assert_wait(0, TRUE);
1092 if (time_out != -1) {
1093 current_thread()->wait_result = NULL;
1094 if (!current_thread()->timer.set)
1095 thread_set_timeout(time_out);
1096 }
1097 simple_unlock(&nw_simple_lock);
1098 thread_block(mk_return);
1099 }
1100 }
1101 }
1102 nw_unlock();
1103 }
1104 return rc;
1105 }
1106
1107
1108 /*** System-dependent support ***/
1109
1110 void mk_endpoint_collect(task_t task) {
1111
1112 while (task->nw_ep_owned != NULL) {
1113 mk_endpoint_deallocate_internal(task->nw_ep_owned->ep, task, TRUE);
1114 }
1115 }
1116
1117 void mk_waited_collect(thread_t thread) {
1118 nw_hecb_t hecb;
1119 nw_waiter_t w, w_previous;
1120 nw_ep_owned_t waited, waited_previous;
1121
1122 waited = thread->nw_ep_waited;
1123 if (waited != NULL) {
1124 while (waited != NULL) {
1125 hecb = &hect[waited->ep];
1126 w = hecb->rx_first;
1127 w_previous = NULL;
1128 while (w != NULL && w->waiter != thread) {
1129 w_previous = w;
1130 w = w->next;
1131 }
1132 if (w != NULL) {
1133 if (w_previous == NULL)
1134 hecb->rx_first = w->next;
1135 else
1136 w_previous->next = w->next;
1137 if (w->next == NULL)
1138 hecb->rx_last = w_previous;
1139 w->next = nw_free_waiter;
1140 nw_free_waiter = w;
1141 }
1142 waited_previous = waited;
1143 waited = waited->next;
1144 }
1145 waited_previous->next = nw_free_waited;
1146 nw_free_waited = thread->nw_ep_waited;
1147 thread->nw_ep_waited = NULL;
1148 }
1149 }
1150
1151 void mk_return() {
1152
1153 thread_syscall_return(current_thread()->wait_result);
1154 }
1155
1156
1157 boolean_t mk_deliver_result(thread_t thread, int result) {
1158 boolean_t rc;
1159 int state, s;
1160
1161 s = splsched();
1162 thread_lock(thread);
1163 state = thread->state;
1164
1165 reset_timeout_check(&thread->timer);
1166
1167 switch (state & TH_SCHED_STATE) {
1168 case TH_WAIT | TH_SUSP | TH_UNINT:
1169 case TH_WAIT | TH_UNINT:
1170 case TH_WAIT:
1171 /*
1172 * Sleeping and not suspendable - put on run queue.
1173 */
1174 thread->state = (state &~ TH_WAIT) | TH_RUN;
1175 thread->wait_result = (kern_return_t) result;
1176 simpler_thread_setrun(thread, TRUE);
1177 rc = TRUE;
1178 break;
1179
1180 case TH_WAIT | TH_SUSP:
1181 case TH_RUN | TH_WAIT:
1182 case TH_RUN | TH_WAIT | TH_SUSP:
1183 case TH_RUN | TH_WAIT | TH_UNINT:
1184 case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
1185 /*
1186 * Either already running, or suspended.
1187 */
1188 thread->state = state &~ TH_WAIT;
1189 thread->wait_result = (kern_return_t) result;
1190 rc = FALSE;
1191 break;
1192
1193 default:
1194 /*
1195 * Not waiting.
1196 */
1197 rc = FALSE;
1198 break;
1199 }
1200 thread_unlock(thread);
1201 splx(s);
1202 return rc;
1203 }
1204
1205
1206 boolean_t nc_deliver_result(nw_ep ep, nw_delivery type, int result) {
1207 boolean_t rc;
1208 nw_hecb_t hecb;
1209 nw_ecb_t ecb;
1210 nw_waiter_t w;
1211 thread_t thread;
1212 task_t task;
1213 nw_pv_t pv;
1214 nw_buffer_t buf;
1215 nw_rx_header_t rx_header;
1216 nw_tx_header_t tx_header;
1217 nw_ep lep;
1218
1219 hecb = &hect[ep];
1220 ecb = &ect[ep];
1221
1222 thread = NULL;
1223 if (type == NW_RECEIVE || type == NW_RECEIVE_URGENT) {
1224 w = hecb->rx_first;
1225 if (w != NULL) {
1226 thread = w->waiter;
1227 hecb->rx_first = w->next;
1228 if (hecb->rx_first == NULL)
1229 hecb->rx_last = NULL;
1230 w->next = nw_free_waiter;
1231 nw_free_waiter = w;
1232 task = thread->task;
1233 pv = hecb->pv;
1234 while (pv != NULL && pv->owner != task)
1235 pv = pv->next;
1236 if (pv == NULL) {
1237 rc = FALSE;
1238 } else {
1239 buf = (nw_buffer_t) ((char *) result - ecb->buf_start + pv->buf_start);
1240 rc = mk_deliver_result(thread, (int) buf);
1241 }
1242 } else {
1243 rx_header = nc_rx_header_allocate();
1244 if (rx_header == NULL) {
1245 rc = FALSE;
1246 } else {
1247 rx_header->buffer = (nw_buffer_t) result;
1248 if (type == NW_RECEIVE) {
1249 rx_header->next = NULL;
1250 if (ecb->rx_last == NULL)
1251 ecb->rx_first = rx_header;
1252 else
1253 ecb->rx_last->next = rx_header;
1254 ecb->rx_last = rx_header;
1255 } else {
1256 rx_header->next = ecb->rx_first;
1257 if (ecb->rx_first == NULL)
1258 ecb->rx_last = rx_header;
1259 ecb->rx_first = rx_header;
1260 }
1261 rc = TRUE;
1262 }
1263 }
1264 } else if (type == NW_SEND) {
1265 w = hecb->tx_first;
1266 if (w == NULL) {
1267 rc = FALSE;
1268 } else {
1269 thread = w->waiter;
1270 hecb->tx_first = w->next;
1271 if (hecb->tx_first == NULL)
1272 hecb->tx_last = NULL;
1273 w->next = nw_free_waiter;
1274 nw_free_waiter = w;
1275 rc = mk_deliver_result(thread, result);
1276 }
1277 tx_header = ect[ep].tx_initial;
1278 if (result == NW_SUCCESS) {
1279 lep = tx_header->peer.local_ep;
1280 while (tx_header != NULL) {
1281 if (tx_header->buffer != NULL)
1282 nc_buffer_deallocate(lep, tx_header->buffer);
1283 tx_header = tx_header->next;
1284 }
1285 }
1286 nc_tx_header_deallocate(ect[ep].tx_initial);
1287 ect[ep].tx_initial = ect[ep].tx_current = NULL;
1288 } else if (type == NW_SIGNAL) {
1289 thread = hecb->sig_waiter;
1290 hecb->sig_waiter = NULL;
1291 if (thread == NULL) {
1292 rc = FALSE;
1293 } else {
1294 rc = mk_deliver_result(thread, result);
1295 }
1296 }
1297 return rc;
1298 }
1299
1300 int mk_fast_sweep() {
1301
1302 nw_lock();
1303 nc_fast_sweep();
1304 nw_unlock();
1305 return 0;
1306 }
1307
1308 void h_fast_timer_set() {
1309
1310 #ifdef PRODUCTION
1311 if (!nw_fast_timer.set)
1312 set_timeout(&nw_fast_timer, 1);
1313 #endif
1314 }
1315
1316 void h_fast_timer_reset() {
1317
1318 if (nw_fast_timer.set)
1319 reset_timeout(&nw_fast_timer);
1320 }
1321
1322 int mk_slow_sweep() {
1323
1324 #ifdef PRODUCTION
1325 nw_lock();
1326 nc_slow_sweep();
1327 nw_unlock();
1328 set_timeout(&nw_slow_timer, 2*hz);
1329 return 0;
1330 #endif
1331 }
1332
Cache object: 7f3b1d02c7e46e40b163aff52f2646e0
|