The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/norma/ipc_net.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1991,1992 Carnegie Mellon University
    4  * All Rights Reserved.
    5  * 
    6  * Permission to use, copy, modify and distribute this software and its
    7  * documentation is hereby granted, provided that both the copyright
    8  * notice and this permission notice appear in all copies of the
    9  * software, derivative works or modified versions, and any portions
   10  * thereof, and that both notices appear in supporting documentation.
   11  * 
   12  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   13  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   14  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   15  * 
   16  * Carnegie Mellon requests users of this software to return to
   17  * 
   18  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   19  *  School of Computer Science
   20  *  Carnegie Mellon University
   21  *  Pittsburgh PA 15213-3890
   22  * 
   23  * any improvements or extensions that they make and grant Carnegie Mellon
   24  * the rights to redistribute these changes.
   25  */
   26 /*
   27  * HISTORY
   28  * $Log:        ipc_net.c,v $
   29  * Revision 2.12  93/01/14  17:53:58  danner
   30  *      64bit cleanup. Proper spl typing.
   31  *      [92/12/01            af]
   32  * 
   33  * Revision 2.11  92/03/10  16:27:47  jsb
   34  *      Merged in norma branch changes as of NORMA_MK7.
   35  *      [92/03/09  12:49:32  jsb]
   36  * 
   37  * Revision 2.10.2.5  92/02/21  11:24:34  jsb
   38  *      Moved spl{on,off} definitions earlier in the file (before all uses).
   39  *      [92/02/18  08:03:01  jsb]
   40  * 
   41  *      Removed accidently included lint declarations of panic, etc.
   42  *      [92/02/16  11:17:48  jsb]
   43  * 
   44  *      Eliminated netipc_thread_wakeup/netipc_replenish race.
   45  *      [92/02/09  14:16:24  jsb]
   46  * 
   47  * Revision 2.10.2.4  92/02/18  19:14:43  jeffreyh
   48  *      [intel] added support for callhere debug option of iPSC.
   49  *      [92/02/13  13:02:21  jeffreyh]
   50  * 
   51  * Revision 2.10.2.3  92/01/21  21:51:30  jsb
   52  *      From sjs@osf.org: moved node_incarnation declaration here from
   53  *      ipc_ether.c.
   54  *      [92/01/17  14:37:03  jsb]
   55  * 
   56  *      New implementation of netipc lock routines which uses sploff/splon
   57  *      and which releases spl before calling interrupt handlers (but after
   58  *      taking netipc lock).
   59  *      [92/01/16  22:20:30  jsb]
   60  * 
   61  *      Removed panic in netipc_copy_grab: callers can now deal with failure.
   62  *      [92/01/14  21:59:10  jsb]
   63  * 
   64  *      In netipc_drain_intr_request, decrement request counts before calling
   65  *      interrupt routines, not after. This preserves assertion that there is
   66  *      at most one outstanding send or receive interrupt.
   67  *      [92/01/13  20:17:18  jsb]
   68  * 
   69  *      De-linted.
   70  *      [92/01/13  10:15:50  jsb]
   71  * 
   72  *      Now contains locking, allocation, and debugging printf routines.
   73  *      Purged log.
   74  *      [92/01/11  17:38:28  jsb]
   75  * 
   76  */ 
   77 /*
   78  *      File:   norma/ipc_net.c
   79  *      Author: Joseph S. Barrera III
   80  *      Date:   1991
   81  *
   82  *      Routines for reliable delivery and flow control for NORMA_IPC.
   83  */
   84 
   85 #include <norma/ipc_net.h>
   86 
   87 #if     i386 || i860
   88 #else
   89 #define sploff()        splhigh()
   90 #define splon(s)        splx(s)
   91 #endif
   92 
   93 /*
   94  * Not proven to be multiprocessor-safe
   95  */
   96 
   97 unsigned long node_incarnation = 1;             /* should never be zero */
   98 
   99 decl_simple_lock_data(,netipc_lock)
  100 
  101 thread_t netipc_lock_owner = THREAD_NULL;
  102 #define THREAD_INTR     ((thread_t) 1)
  103 
  104 int send_intr_request = 0;
  105 int recv_intr_request = 0;
  106 int timeout_intr_request = 0;
  107 
  108 #if     iPSC386 || iPSC860
  109 extern void     netipc_called_here();
  110 #endif  iPSC386 || iPSC860
  111 
  112 /*
  113  * Called with interrupts not explicitly disabled but with lock held.
  114  * Returns with interrupts as they were and with lock released.
  115  */
  116 int
  117 netipc_drain_intr_request()
  118 {
  119         spl_t   s;
  120 
  121         s = sploff();
  122 #if     iPSC386 || iPSC860
  123         netipc_called_here(__FILE__, __LINE__, "netipc_drain_intr_request");
  124 #endif  iPSC836 || iPSC860
  125         assert(netipc_lock_owner != THREAD_NULL);
  126         while (send_intr_request > 0 ||
  127                recv_intr_request > 0 ||
  128                timeout_intr_request > 0) {
  129                 /*
  130                  * Send and receive interrupts are counting interrupts.
  131                  * Many timeout interrupts map into one.
  132                  */
  133                 netipc_lock_owner = THREAD_INTR;
  134                 if (send_intr_request > 0) {
  135                         send_intr_request--;
  136                         splon(s);
  137 #if     iPSC386 || iPSC860
  138                         { int spl = spldcm();
  139 #endif  iPSC836 || iPSC860
  140                         _netipc_send_intr();
  141 #if     iPSC386 || iPSC860
  142                         splx(spl);}
  143 #endif  iPSC836 || iPSC860
  144                         s = sploff();
  145                 } else if (recv_intr_request > 0) {
  146                         recv_intr_request--;
  147                         splon(s);
  148 #if     iPSC386 || iPSC860
  149                         { int spl = spldcm();
  150 #endif  iPSC836 || iPSC860
  151                         _netipc_recv_intr();
  152 #if     iPSC386 || iPSC860
  153                         splx(spl);}
  154 #endif  iPSC836 || iPSC860
  155                         s = sploff();
  156                 } else {
  157                         assert(timeout_intr_request > 0);
  158                         timeout_intr_request = 0;
  159                         splon(s);
  160 #if     iPSC386 || iPSC860
  161                         { int spl = splclock();
  162 #endif  iPSC836 || iPSC860
  163                         _netipc_timeout_intr();
  164 #if     iPSC386 || iPSC860
  165                         splx(spl);}
  166 #endif  iPSC836 || iPSC860
  167                         s = sploff();
  168                 }
  169         }
  170         netipc_lock_owner = THREAD_NULL;
  171         splon(s);
  172 }
  173 
  174 /*
  175  * XXX
  176  * These testing functions should have spls.
  177  */
  178 
  179 boolean_t
  180 netipc_thread_locked()
  181 {
  182         return (netipc_lock_owner == current_thread());
  183 }
  184 
  185 boolean_t
  186 netipc_intr_locked()
  187 {
  188         return (netipc_lock_owner == THREAD_INTR);
  189 }
  190 
  191 boolean_t
  192 netipc_locked()
  193 {
  194         return (netipc_lock_owner != THREAD_NULL);
  195 }
  196 
  197 boolean_t
  198 netipc_unlocked()
  199 {
  200         return (netipc_lock_owner == THREAD_NULL);
  201 }
  202 
  203 void
  204 netipc_thread_lock()
  205 {
  206         spl_t s;
  207 
  208         /*
  209          * Threads fight among themselves.
  210          */
  211         simple_lock(&netipc_lock);
  212 
  213         /*
  214          * A single thread fights against interrupt handler.
  215          */
  216         s = sploff();
  217         assert(netipc_unlocked());
  218         netipc_lock_owner = current_thread();
  219         splon(s);
  220 }
  221 
  222 void
  223 netipc_thread_unlock()
  224 {
  225         assert(netipc_thread_locked());
  226 
  227         /*
  228          * Process queued interrupts, and release simple lock.
  229          */
  230         netipc_drain_intr_request();
  231         simple_unlock(&netipc_lock);
  232 }
  233 
  234 netipc_send_intr()
  235 {
  236         spl_t   s;
  237 
  238         s = sploff();
  239         if (netipc_lock_owner == THREAD_NULL) {
  240                 netipc_lock_owner = THREAD_INTR;
  241                 splon(s);
  242 #if     iPSC386 || iPSC860
  243                 { int spl = spldcm();
  244 #endif  iPSC836 || iPSC860
  245                 _netipc_send_intr();
  246 #if     iPSC386 || iPSC860
  247                 splx(spl);}
  248 #endif  iPSC836 || iPSC860
  249                 netipc_drain_intr_request();
  250         } else {
  251                 assert(send_intr_request == 0);
  252                 send_intr_request++;
  253                 splon(s);
  254         }
  255 }
  256 
  257 netipc_recv_intr()
  258 {
  259         spl_t s;
  260 
  261         s = sploff();
  262         if (netipc_lock_owner == THREAD_NULL) {
  263                 netipc_lock_owner = THREAD_INTR;
  264                 splon(s);
  265 #if     iPSC386 || iPSC860
  266                 { int spl = spldcm();
  267 #endif  iPSC836 || iPSC860
  268                 _netipc_recv_intr();
  269 #if     iPSC386 || iPSC860
  270                 splx(spl);}
  271 #endif  iPSC836 || iPSC860
  272                 netipc_drain_intr_request();
  273         } else {
  274                 assert(recv_intr_request == 0);
  275                 recv_intr_request++;
  276                 splon(s);
  277         }
  278 }
  279 
  280 netipc_timeout_intr()
  281 {
  282         spl_t s;
  283 
  284         s = sploff();
  285         if (netipc_lock_owner == THREAD_NULL) {
  286                 netipc_lock_owner = THREAD_INTR;
  287                 splon(s);
  288 #if     iPSC386 || iPSC860
  289                 { int spl = splclock();
  290 #endif  iPSC836 || iPSC860
  291                 _netipc_timeout_intr();
  292 #if     iPSC386 || iPSC860
  293                 splx(spl);}
  294 #endif  iPSC836 || iPSC860
  295                 netipc_drain_intr_request();
  296         } else {
  297                 timeout_intr_request = 1;
  298                 splon(s);
  299         }
  300 }
  301 
  302 extern int      netipc_self_stopped;
  303 
  304 boolean_t       netipc_thread_awake = FALSE;
  305 boolean_t       netipc_thread_reawaken = FALSE;
  306 int             netipc_thread_awaken = 0;
  307 vm_page_t       netipc_page_list = VM_PAGE_NULL;
  308 int             netipc_page_list_count = 0;
  309 int             netipc_page_list_low = 20;
  310 int             netipc_page_list_high = 30;
  311 
  312 extern zone_t   vm_map_copy_zone;
  313 vm_map_copy_t   netipc_vm_map_copy_list = VM_MAP_COPY_NULL;
  314 int             netipc_vm_map_copy_count = 0;
  315 
  316 netipc_page_put(m)
  317         vm_page_t m;
  318 {
  319         assert(netipc_locked());
  320 
  321         * (vm_page_t *) &m->pageq.next = netipc_page_list;
  322         netipc_page_list = m;
  323         netipc_page_list_count++;
  324         if (netipc_self_stopped) {
  325                 netipc_self_unstop();
  326         }
  327 }
  328 
  329 vm_map_copy_t
  330 netipc_copy_grab()
  331 {
  332         vm_map_copy_t copy;
  333 
  334         assert(netipc_locked());
  335         copy = netipc_vm_map_copy_list;
  336         if (copy != VM_MAP_COPY_NULL) {
  337                 netipc_vm_map_copy_list = (vm_map_copy_t) copy->type;
  338                 netipc_vm_map_copy_count--;
  339                 copy->type = VM_MAP_COPY_PAGE_LIST;
  340         }
  341         return copy;
  342 }
  343 
  344 void
  345 netipc_copy_ungrab(copy)
  346         vm_map_copy_t copy;
  347 {
  348         assert(netipc_locked());
  349         copy->type = (int) netipc_vm_map_copy_list;
  350         netipc_vm_map_copy_list = copy;
  351         netipc_vm_map_copy_count++;
  352 }
  353 
  354 netipc_thread_wakeup()
  355 {
  356         assert(netipc_locked());
  357         if (netipc_thread_awake) {
  358                 netipc_thread_reawaken = TRUE;
  359         } else {
  360                 thread_wakeup((vm_offset_t) &netipc_thread_awake);
  361         }
  362 }
  363 
  364 /*
  365  * XXX
  366  * The wakeup protocol for this loop is not quite correct...
  367  *
  368  * XXX
  369  * We should move the lists out all at once, not one elt at a time.
  370  *
  371  * XXX
  372  * The locking here is farcical.
  373  */
  374 netipc_replenish(always)
  375         boolean_t always;
  376 {
  377         vm_page_t m;
  378 
  379         assert(netipc_unlocked());
  380         netipc_output_replenish();      /* XXX move somewhere else */
  381         while (netipc_vm_map_copy_count < 300) {
  382                 vm_map_copy_t copy;
  383 
  384                 copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
  385                 netipc_thread_lock();
  386                 copy->type = (int) netipc_vm_map_copy_list;
  387                 netipc_vm_map_copy_list = copy;
  388                 netipc_vm_map_copy_count++;
  389                 netipc_thread_unlock();
  390         }
  391         if (current_thread()->vm_privilege) {
  392                 return; /* we might allocate from the reserved pool */
  393         }
  394         while (netipc_page_list_count < netipc_page_list_high) {
  395                 m = vm_page_grab();
  396                 if (m == VM_PAGE_NULL) {
  397                         break;
  398                 }
  399                 m->tabled = FALSE;
  400                 vm_page_init(m, m->phys_addr);
  401 
  402                 netipc_thread_lock();
  403                 * (vm_page_t *) &m->pageq.next = netipc_page_list;
  404                 netipc_page_list = m;
  405                 netipc_page_list_count++;
  406                 if (netipc_self_stopped) {
  407                         netipc_self_unstop();
  408                 }
  409                 netipc_thread_unlock();
  410         }
  411         while (always && netipc_page_list_count < netipc_page_list_low) {
  412                 while ((m = vm_page_grab()) == VM_PAGE_NULL) {
  413                         vm_page_wait(0);
  414                 }
  415                 m->tabled = FALSE;
  416                 vm_page_init(m, m->phys_addr);
  417 
  418                 netipc_thread_lock();
  419                 * (vm_page_t *) &m->pageq.next = netipc_page_list;
  420                 netipc_page_list = m;
  421                 netipc_page_list_count++;
  422                 if (netipc_self_stopped) {
  423                         netipc_self_unstop();
  424                 }
  425                 netipc_thread_unlock();
  426         }
  427 }
  428 
  429 /*
  430  * Grab a vm_page_t at interrupt level. May return VM_PAGE_NULL.
  431  */
  432 vm_page_t
  433 netipc_page_grab()
  434 {
  435         vm_page_t m;
  436 
  437         assert(netipc_locked());
  438         if ((m = netipc_page_list) != VM_PAGE_NULL) {
  439                 netipc_page_list = (vm_page_t) m->pageq.next;
  440                 netipc_page_list_count--;
  441         } else {
  442                 netipc_thread_wakeup();
  443         }
  444         return m;
  445 }
  446 
  447 void
  448 netipc_thread_continue()
  449 {
  450         netipc_thread_lock();
  451         for (;;) {
  452                 /*
  453                  * Record that we are awake.
  454                  * Look out for new awaken requests while we are out working.
  455                  */
  456                 netipc_thread_awaken++;
  457                 netipc_thread_awake = TRUE;
  458                 netipc_thread_reawaken = FALSE;
  459 
  460                 /*
  461                  * Call netipc_replenish with netipc lock unlocked.
  462                  */
  463                 netipc_thread_unlock();
  464                 netipc_replenish(TRUE);
  465                 netipc_thread_lock();
  466 
  467                 /*
  468                  * If we don't yet have enough pages, or someone
  469                  * came up with something new for us to do, then
  470                  * do more work before going to sleep.
  471                  */
  472                 if (netipc_page_list_count < netipc_page_list_low ||
  473                     netipc_thread_reawaken) {
  474                         continue;
  475                 }
  476 
  477                 /*
  478                  * Nothing left for us to do right now.  Go to sleep.
  479                  */
  480                 netipc_thread_awake = FALSE;
  481                 assert_wait((vm_offset_t) &netipc_thread_awake, FALSE);
  482                 (void) netipc_thread_unlock();
  483                 thread_block(netipc_thread_continue);
  484                 netipc_thread_lock();
  485         }
  486 }
  487 
  488 void
  489 netipc_thread()
  490 {
  491         thread_set_own_priority(0);     /* high priority */
  492         netipc_thread_continue();
  493 }
  494 
  495 int Noise0 = 0; /* print netipc packets */      
  496 int Noise1 = 0; /* notification and migration debugging */
  497 int Noise2 = 0; /* synch and timeout printfs */
  498 int Noise3 = 0; /* copy object continuation debugging */
  499 int Noise4 = 0; /* multiple out-of-line section debugging */
  500 int Noise5 = 0; /* obsolete acks */
  501 int Noise6 = 0; /* short print of rcvd packets, including msgh_id */
  502 
  503 extern cnputc();
  504 
  505 /* VARARGS */
  506 printf1(fmt, va_alist)
  507         char* fmt;
  508         va_dcl
  509 {
  510         va_list listp;
  511 
  512         if (Noise1) {
  513                 va_start(listp);
  514                 _doprnt(fmt, &listp, cnputc, 0);
  515                 va_end(listp);
  516         }
  517 }
  518 
  519 /* VARARGS */
  520 printf2(fmt, va_alist)
  521         char* fmt;
  522         va_dcl
  523 {
  524         va_list listp;
  525 
  526         if (Noise2) {
  527                 va_start(listp);
  528                 _doprnt(fmt, &listp, cnputc, 0);
  529                 va_end(listp);
  530         }
  531 }
  532 
  533 /* VARARGS */
  534 printf3(fmt, va_alist)
  535         char* fmt;
  536         va_dcl
  537 {
  538         va_list listp;
  539 
  540         if (Noise3) {
  541                 va_start(listp);
  542                 _doprnt(fmt, &listp, cnputc, 0);
  543                 va_end(listp);
  544         }
  545 }
  546 
  547 /* VARARGS */
  548 printf4(fmt, va_alist)
  549         char* fmt;
  550         va_dcl
  551 {
  552         va_list listp;
  553 
  554         if (Noise4) {
  555                 va_start(listp);
  556                 _doprnt(fmt, &listp, cnputc, 0);
  557                 va_end(listp);
  558         }
  559 }
  560 
  561 /* VARARGS */
  562 printf5(fmt, va_alist)
  563         char* fmt;
  564         va_dcl
  565 {
  566         va_list listp;
  567 
  568         if (Noise5) {
  569                 va_start(listp);
  570                 _doprnt(fmt, &listp, cnputc, 0);
  571                 va_end(listp);
  572         }
  573 }
  574 
  575 /* VARARGS */
  576 printf6(fmt, va_alist)
  577         char* fmt;
  578         va_dcl
  579 {
  580         va_list listp;
  581 
  582         if (Noise6) {
  583                 va_start(listp);
  584                 _doprnt(fmt, &listp, cnputc, 0);
  585                 va_end(listp);
  586         }
  587 }
  588 
  589 #if     iPSC386 || iPSC860
  590 #define MAX_CALLS       256
  591 char    *called_here_filename_buffer[MAX_CALLS];
  592 char    *called_here_notation_buffer[MAX_CALLS];
  593 int     called_here_line_buffer[MAX_CALLS];
  594 int     called_here_next = 0;
  595 
  596 void netipc_called_here(filename, line, notation)
  597         char    *filename, *notation;
  598         int     line;
  599 {
  600         spl_t   s;
  601         int     i;
  602 
  603         s = sploff();
  604         i = called_here_next++;
  605         if (called_here_next >= MAX_CALLS) {
  606                 called_here_next = 0;
  607         }
  608         called_here_filename_buffer[called_here_next] = 0;
  609         called_here_filename_buffer[i] = filename;
  610         called_here_notation_buffer[i] = notation;
  611         called_here_line_buffer[i] = line;
  612         splon(s);
  613 }
  614 
  615 
  616 void db_show_netipc_called_here()
  617 {
  618         int     i, j;
  619         char    *s, *slash;
  620 
  621         kdbprintf(" #   Line File\n");
  622         j = called_here_next - 1;
  623         for (i = 0; i < MAX_CALLS; i++) {
  624                 if (j < 0) {
  625                         j = MAX_CALLS - 1;
  626                 }
  627                 if (called_here_filename_buffer[j]) {
  628                         slash = 0;
  629                         for (s = called_here_filename_buffer[j]; *s; s++) {
  630                                 if (*s == '/')
  631                                         slash = s + 1;
  632                         }
  633                         kdbprintf("%3d %5d %s\t\t%s\n", j,
  634                                 called_here_line_buffer[j],
  635                                 slash ? slash : called_here_filename_buffer[j],
  636                                 called_here_notation_buffer[j]);
  637                         j--;
  638                 } else {
  639                         return;
  640                 }
  641         }
  642 }
  643 
  644 #endif  iPSC386 || iPSC860

Cache object: 3c532a4346d0acb0f42e692e83764a4a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.