The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sqt/model_dep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1992,1991 Carnegie Mellon University
    4  * Copyright (c) 1992,1991 Sequent Computer Systems
    5  * All Rights Reserved.
    6  * 
    7  * Permission to use, copy, modify and distribute this software and its
    8  * documentation is hereby granted, provided that both the copyright
    9  * notice and this permission notice appear in all copies of the
   10  * software, derivative works or modified versions, and any portions
   11  * thereof, and that both notices appear in supporting documentation.
   12  * 
   13  * CARNEGIE MELLON AND SEQUENT COMPUTER SYSTEMS ALLOW FREE USE OF
   14  * THIS SOFTWARE IN ITS "AS IS" CONDITION.  CARNEGIE MELLON AND
   15  * SEQUENT COMPUTER SYSTEMS DISCLAIM ANY LIABILITY OF ANY KIND FOR
   16  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   17  * 
   18  * Carnegie Mellon requests users of this software to return to
   19  * 
   20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   21  *  School of Computer Science
   22  *  Carnegie Mellon University
   23  *  Pittsburgh PA 15213-3890
   24  * 
   25  * any improvements or extensions that they make and grant Carnegie Mellon 
   26  * the rights to redistribute these changes.
   27  */
   28 
   29 /*
   30  * HISTORY
   31  * $Log:        model_dep.c,v $
   32  * Revision 2.7  93/01/14  17:56:03  danner
   33  *      Added cpu_interrupt_to_db and nmi_intr.
   34  *      [92/10/25            dbg]
   35  *      Added cpu_interrupt_to_db and nmi_intr.
   36  *      [92/10/25            dbg]
   37  * 
   38  * Revision 2.6  92/01/03  20:28:07  dbg
   39  *      Rename kdb_init to ddb_init.  Remove esym.
   40  *      Allocate interrupt stacks before initial debugger breakpoint.
   41  *      [91/09/11            dbg]
   42  * 
   43  * Revision 2.5  91/07/31  18:03:05  dbg
   44  *      Changed copyright.
   45  * 
   46  *      Removed call to pcb_module_init (now called from
   47  *      machine-independent code).
   48  *      [91/07/26            dbg]
   49  * 
   50  *      Call interrupt_stack_alloc.
   51  *      [91/06/27            dbg]
   52  * 
   53  * Revision 2.4  91/05/20  22:23:05  rpd
   54  *      Fixed avail_remaining, avail_next initialization.
   55  * 
   56  * Revision 2.3  91/05/18  14:37:37  rpd
   57  *      Changed pmap_bootstrap arguments.
   58  *      Moved pmap_free_pages and pmap_next_page here.
   59  *      [91/05/15            rpd]
   60  * 
   61  * Revision 2.2  91/05/08  12:58:00  dbg
   62  *      Each CPU calls mp_desc_init on its own.
   63  *      [91/02/13            dbg]
   64  * 
   65  *      Add pmap_valid_page.
   66  *      [91/01/17            dbg]
   67  * 
   68  *      Altered for pure kernel.
   69  *      Created from pieces of sqt/machdep.c and sqt/startup.c.
   70  *      [90/10/03            dbg]
   71  *
   72  */
   73 
   74 /*
   75  * model_dep.c
   76  *      Various startup/initializations.  i386 version.
   77  *      Also has machine-dependent shutdown code and other
   78  *      similar stuff.
   79  */
   80 #include <mach_kdb.h>
   81 
   82 #include <mach/kern_return.h>
   83 #include <mach/processor_info.h>
   84 
   85 #include <mach/boolean.h>
   86 #include <mach/machine.h>
   87 #include <mach/vm_param.h>
   88 
   89 #include <vm/pmap.h>
   90 #include <vm/vm_kern.h>
   91 
   92 #include <kern/lock.h>
   93 #include <kern/time_out.h>
   94 
   95 #include <sys/reboot.h>
   96 
   97 #include <i386/proc_reg.h>
   98 
   99 #include <sqt/vm_defs.h>
  100 #include <sqt/ioconf.h>
  101 #include <sqt/intctl.h>
  102 #include <sqt/trap.h>
  103 #include <sqt/mutex.h>
  104 
  105 #include <sqt/SGSproc.h>
  106 #include <sqt/slic.h>
  107 #include <sqt/slicreg.h>
  108 #include <sqt/engine.h>
  109 #include <sqt/cfg.h>
  110 #include <sqt/clkarb.h>
  111 
  112 #include <sqtsec/sec.h>
  113 
  114 int             mono_P_eng;             /* engine for mono_p drivers */
  115 
  116 /*
  117  * Bootstrap memory allocator
  118  */
  119 char            *calloc();
  120 #define csalloc(n,type) (type *) calloc((int)(n)*(sizeof(type)))
  121 
  122 boolean_t       calloc_ok = TRUE;               /* flag for legal calloc's */
  123 
  124 extern  boolean_t   light_show;
  125 
  126 extern  int     mono_P_slic;
  127 extern  int     resphysmem;             /* reserved physical memory */
  128 
  129 extern  vm_offset_t topmem;             /* top of memory */
  130 extern  vm_size_t   totalmem;           /* total memory (topmem-holes) */
  131 
  132 extern  int     master_cpu;
  133 
  134 extern boolean_t upyet;
  135 
  136 vm_offset_t     loadpt = 0;
  137 vm_offset_t     avail_start;
  138 vm_offset_t     avail_end;
  139 vm_offset_t     virtual_avail;
  140 vm_offset_t     virtual_end;
  141 vm_size_t       mem_size;
  142 vm_offset_t     avail_next;
  143 unsigned int    avail_remaining;
  144 
  145 /*
  146  * Parameters passed from bootstrap loader/start.s
  147  */
  148 vm_offset_t     first_avail = 0;
  149 
  150 extern  struct  ctlr_desc *slic_to_config[];
  151 
  152 extern u_char           cons_scsi;      /* slic address of console scsi */
  153 struct sec_cib *        cbdcib;         /* address of console board device */
  154 struct sec_gmode        cbdgsm;         /* get/set modes command */
  155 
  156 struct sec_cib *        wdtcib;         /* address of watchdog timer cib */
  157 struct sec_smode        wdtsm;          /* for setmodes and startio commands */
  158 
  159 int     wdtreset();     /* forward */
  160 int     wdt_timeout;
  161 
  162 struct reboot           bootdata;
  163 
  164 int *   va_led = (int *)VA_LED;         /* processor LED */
  165 
  166 extern char             version[];      /* system version string */
  167 
  168 /*
  169  * machine_startup()
  170  *      Do basic system initializations.
  171  *
  172  * Called by first processor to start, very early after system is alive.
  173  * Runs with paging enabled.
  174  *
  175  * first_avail = first available physical address.
  176  */
  177 
  178 void
  179 machine_startup()
  180 {
  181         register int i;
  182         unsigned procid;
  183         extern char edata, end;
  184 
  185         /*
  186          * Zero BSS.
  187          */
  188         blkclr((char *)&edata, (char *)&end - (char *)&edata);
  189 
  190         /*
  191          * Set up the virtual address of the configuration table.
  192          */
  193         va_CD_LOC = PHYSTOKV(CD_LOC, struct config_desc *);
  194 
  195         /*
  196          * Map the rest of physical memory, and set up the
  197          * kernel virtual address space.
  198          */
  199         avail_start = first_avail;
  200         avail_end   = (vm_offset_t) (va_CD_LOC->c_maxmem);
  201         mem_size    = avail_end - loadpt;
  202 
  203         vm_set_page_size();
  204 
  205         pmap_bootstrap(loadpt);
  206 
  207         /*
  208          * Allocate and set up mapping for SLIC and LEDs.
  209          */
  210 
  211         map_slic_and_LEDs(&avail_start);
  212 
  213         /*
  214          * Enable NMIs on the processor.
  215          */
  216 
  217         wrslave(va_slic->sl_procid, PROC_CTL,
  218                 PROC_CTL_NO_SSTEP | PROC_CTL_NO_HOLD | PROC_CTL_NO_RESET);
  219 
  220         /*
  221          * Configure the HW and initialize interrupt table (int_bin_table).
  222          *
  223          * Configure allocates and fills out:
  224          *      engine[] array          ; one per processor
  225          *      Nengine                 ; # processors
  226          *      mono_P_slic             ; Slic addr for mono_P drivers
  227          * plus sets up device drivers/etc.
  228          */
  229 
  230         va_slic->sl_lmask = 0;                  /* insure interrupts OFF */
  231         configure();
  232 
  233         upyet = TRUE;                           /* governs less than in DYNIX */
  234 
  235         /*
  236          * Fill out engine structures; these were allocated by
  237          * configure() who also filled out the e_slicaddr fields,
  238          * and turned on E_FPU387 and/or E_FPA in e_flags if appropriate.
  239          *
  240          * Figure out my procid from engine structures.
  241          *
  242          * If there were any mono-P drivers for existing HW, then
  243          * they were bound to 'me' (eg, booting processor); thus
  244          * set flag to avoid ever taking `me' offline.
  245          */
  246 
  247         procid = Nengine;
  248         for (i = 0; i < Nengine; i++) {
  249                 engine[i].e_flags |= E_OFFLINE; /* not up yet */
  250                 if (va_slic->sl_procid == engine[i].e_slicaddr) {
  251                         procid = i;
  252                         master_cpu = i;
  253  printf("Master cpu is %d\n", i);
  254                         if (mono_P_slic >= 0) {
  255                                 mono_P_eng = i;
  256                                 engine[i].e_flags |= E_DRIVER;
  257                         }
  258                 }
  259         }
  260 
  261         /*
  262          * There should be NO allocations after this!
  263          * (at least not from calloc)
  264          */
  265 
  266         calloc_end();
  267 
  268         /*
  269          * Allocate the interrupt stacks, from 1-1 physical memory.
  270          * Uses avail_start.
  271          */
  272         interrupt_stack_alloc();
  273 
  274         /*
  275          * Allocate per-processor descriptor tables.
  276          */
  277         (void) mp_desc_init(master_cpu);
  278 
  279         /*
  280          *      Initialize for pmap_free_pages and pmap_next_page.
  281          *      This must happen after calls to calloc.
  282          */
  283 
  284         avail_remaining = atop(avail_end - avail_start);
  285         avail_next = avail_start;
  286 
  287 #if     MACH_KDB
  288         /*
  289          * Initialize the kernel debugger.
  290          */
  291         ddb_init();
  292 
  293         /*
  294          * Take a debug trap if user asked for it.
  295          */
  296         if (boothowto & RB_KDB)
  297             Debugger();
  298 
  299 #endif  MACH_KDB
  300 
  301         /*
  302          * Print out system version number.
  303          */
  304         printf(version);
  305 
  306         /*
  307          * Start Mach.
  308          */
  309         setup_main();
  310         /*NOTREACHED*/
  311 }
  312 
  313 /*
  314  * machine_init()
  315  * Called with Mach running, but no threads.
  316  */
  317 machine_init()
  318 {
  319         register struct cntlrs * b8k;
  320 
  321         /*
  322          * System is mapped enough to do self-init's.
  323          */
  324 
  325         self_init();
  326 
  327         /*
  328          * Enable Ecc error reporting in memory controllers.
  329          *
  330          * Note: errors will merely be latched.  No interrupts
  331          * are generated other than NMI for uncorrectable errors.
  332          */
  333 
  334         memenable();
  335 
  336         /*
  337          * Initialize IO controller mapping.
  338          */
  339 
  340         for (b8k = b8k_cntlrs; b8k->conf_b8k != NULL; b8k++)
  341                 (*b8k->b8k_map)();
  342 
  343         /*
  344          * Get the time
  345          */
  346         inittodr();
  347 }
  348 
  349 void start_other_cpus()
  350 {
  351         decl_simple_lock_data(extern, start_lock)
  352         register int    i;
  353         register struct machine_slot *ms;
  354 
  355         /*
  356          * Allow other CPUs to run if started
  357          */
  358         simple_unlock(&start_lock);     /* see locore.s */
  359 
  360 #if 0
  361  /* do later */
  362         if (boothowto & RB_UNIPROC) {
  363             printf("Uni-processor boot; slaves will not be started.\n");
  364             return;
  365         }
  366 
  367         /*
  368          * Start up other CPUs
  369          */
  370         for (i = 0, ms = &machine_slot[0];
  371              i < NCPUS;
  372              i++, ms++)
  373         {
  374             if (ms->is_cpu && i != master_cpu && !ms->running) {
  375                 cpu_start(i);
  376             }
  377         }
  378 #endif 0
  379 }
  380 
  381 /*
  382  * Slave comes up here.
  383  */
  384 slave_machine_init()
  385 {
  386         self_init();
  387 }
  388 
  389 
  390 /*
  391  * self_init()
  392  *      Do self init's.  Done by each processor as it comes alive.
  393  */
  394 
  395 self_init()
  396 {
  397         struct  engine  *eng;                   /* my engine structure */
  398         unsigned procid;                        /* logical processor # */
  399 
  400         procid = cpu_number();
  401         eng = &engine[procid];
  402 
  403         /*
  404          * Fill out relevant fields in "engine" structure.
  405          */
  406 
  407         eng->e_fpuon = (CR0_PG|CR0_PE);         /* how to turn FPU on */
  408         if (eng->e_flags & E_FPU387) {          /* if 387... */
  409             eng->e_fpuon |= CR0_ET|CR0_MP;          /*  ... set for 387 */
  410             eng->e_fpuoff = eng->e_fpuon | CR0_EM;  /* off ==> emulate math */
  411             init_fpu();                             /* 387 needs fninit */
  412         } else {
  413             eng->e_fpuon |= CR0_EM;                 /*  ... set for 387 */
  414             eng->e_fpuoff = eng->e_fpuon;           /* off ==> emulate math */
  415         }
  416 
  417         /*
  418          * Do other processor-local inits.
  419          */
  420 
  421         localinit();
  422 
  423         /*
  424          * Fill out the engine structure.
  425          */
  426 
  427         eng->e_flags &= ~E_OFFLINE;                     /* on-line, now! */
  428 
  429         /*
  430          * Say hello.
  431          *
  432          * Processor up -- turn processor LED on.
  433          */
  434 
  435         if (light_show) {
  436                 DISABLE();
  437                 if (fp_lights)
  438                         FP_LIGHTON(cpu_number());
  439                 *va_led = 1;
  440                 ENABLE();
  441         }
  442 
  443         enable_nmi();
  444 }
  445 
  446 /*
  447  * map_slic_and_LEDs(phys_addr_p)
  448  *
  449  *      Map in the SLIC and the per-processor LEDs, allocating
  450  *      page tables for them.
  451  *      Alters *phys_addr_p to allocate physical pages for page tables.
  452  *
  453  * These are shared by all processors.
  454  *
  455  * When/if need to map more such things, should make this table-driven.
  456  */
  457 
  458 /*
  459  * Return a pointer to the page-table entry that maps a given virtual
  460  * address.  May allocate a physical page for the page table.
  461  */
  462 extern pt_entry_t *     kpde;           /* kernel page directory VA */
  463 
  464 pt_entry_t *
  465 io_map_pte(va, phys_addr_p)
  466         vm_offset_t     va;             /* VA to map */
  467         vm_offset_t     *phys_addr_p;   /* PA to allocate from */
  468 {
  469         register pt_entry_t *pdp;
  470         register pt_entry_t *ptp;
  471 
  472         pdp = kpde;
  473         pdp += pdenum(va);
  474 
  475         if ((*pdp & INTEL_PTE_VALID) == 0) {
  476             /*
  477              * Must allocate a page table page
  478              */
  479             vm_offset_t pa;
  480 
  481             pa = *phys_addr_p;          /* physical address */
  482             *phys_addr_p += I386_PGBYTES;
  483             bzero(PHYSTOKV(pa, char *), I386_PGBYTES);
  484             *pdp = pa_to_pte(pa) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
  485         }
  486         ptp = (pt_entry_t *)ptetokv(*pdp);      /* virtual address */
  487         return &ptp[ptenum(va)];
  488 }
  489 
  490 map_slic_and_LEDs(phys_addr_p)
  491         vm_offset_t     *phys_addr_p;
  492 {
  493         register pt_entry_t     *ptp;
  494 
  495 #if 0
  496         /*
  497          * Allocate mapping for FPA.
  498          * Not mapped in maplocalIO(); rather let page fault turn it on
  499          * per process.
  500          */
  501 
  502         alloc_fpa();
  503 #endif 0
  504 
  505         /* Map SLIC - one page */
  506         ptp = io_map_pte(VA_SLIC, phys_addr_p);
  507         *ptp = pa_to_pte(PHYS_SLIC) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
  508 
  509         /*
  510          * Map processor LED.
  511          */
  512         ptp = io_map_pte(VA_LED, phys_addr_p);
  513         *ptp = pa_to_pte(PHYS_LED) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
  514 
  515         /*
  516          * Map elapsed-time counter..
  517          */
  518         ptp = io_map_pte(VA_ETC, phys_addr_p);
  519         *ptp = pa_to_pte(PHYS_ETC) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
  520 
  521 }
  522 
  523 /*
  524  * localinit()
  525  *      Init local processor resources.
  526  *
  527  * This involves:
  528  *      turning on the cache,
  529  *      setting up SLIC interrupt control,
  530  */
  531 
  532 localinit()
  533 {
  534         register struct cpuslic *sl = va_slic;
  535         register struct engine *eng = &engine[cpu_number()];
  536 
  537         /*
  538          * If processor has an FPA, initialize it.
  539          */
  540 
  541         if (eng->e_flags & E_FPA) {
  542         }
  543 
  544         /*
  545          * Set up SLIC interrupt control and start local clock.
  546          */
  547 
  548         (void) splhi();                         /* all intrs masked */
  549         ENABLE();                               /* but ON at processor */
  550 
  551 #ifdef  DEBUG
  552         if ((sl->sl_ictl & (SL_HARDINT|SL_SOFTINT)) != 0) {
  553                 printf("localinit: pending interrupts 0x%x\n", 
  554                                 sl->sl_ictl & (SL_HARDINT|SL_SOFTINT));
  555                 panic("localinit");
  556         }
  557 #endif  DEBUG
  558         assert((sl->sl_ictl & (SL_HARDINT|SL_SOFTINT)) == 0);
  559         sl->sl_ictl = 0x00;                     /* not using `m' bit */
  560 
  561         sl->sl_procgrp = TMPOS_GROUP;           /* set group ID */
  562         setgm(sl->sl_procid, SL_GM_ALLON);      /* set self group-mask all ON */
  563 #ifdef  CHECKSLIC
  564         assert(sl->sl_gmask == SL_GM_ALLON);
  565 #endif  CHECKSLIC
  566 
  567 }
  568 
  569 /*
  570  *      Fix up the virtual address space when all io_map calls are done.
  571  */
  572 void
  573 io_map_done()
  574 {
  575         virtual_avail = round_page(virtual_avail);
  576 }
  577 
  578 /*
  579  * calloc()
  580  *      Allocate zeroed memory at boot time.
  581  *
  582  * Done via bumping "curmem" value.
  583  *
  584  * Skips holes in physical memory after memory is configured (topmem != 0).
  585  * Assumes allocations to that point are in memory contiguous from physical 0.
  586  *
  587  * callocrnd() is used to round up so that next allocation occurs
  588  * on a given boundary.
  589  *
  590  * XXX These should be rewritten using pmap_steal_memory.
  591  */
  592 
  593 boolean_t       cmem_exists();  /* forward */
  594 
  595 caddr_t
  596 calloc(size)
  597         int     size;
  598 {
  599         char *  val;
  600 
  601         assert(calloc_ok);
  602 
  603         size = (size + (sizeof(int) - 1)) & ~(sizeof(int)-1);
  604 
  605         /*
  606          * If ok to check, insure memory exists and skip hole if necessary.
  607          * Skipping hole puts curmem on hole boundary, thus arbitrary alignment.
  608          */
  609 
  610         while (!cmem_exists(avail_start, size)) {
  611             avail_start = i386_round_page(avail_start);
  612             assert(avail_start < avail_end);
  613         }
  614 
  615         /*
  616          * Allocate and clear the memory.
  617          */
  618 
  619         val = PHYSTOKV(avail_start, char *);
  620         avail_start += size;
  621 
  622         bzero(val, (unsigned)size);
  623         return(val);
  624 }
  625 
  626 callocrnd(bound)
  627         int     bound;
  628 {
  629         avail_start = ((avail_start + bound - 1) / bound) * bound;
  630 }
  631 
  632 calloc_end()
  633 {
  634         callocrnd(PAGE_SIZE);
  635         calloc_ok = 0;
  636 }
  637 
  638 /*
  639  * cmem_exists()
  640  *      Check for existence of memory from a given address for a given size.
  641  */
  642 
  643 boolean_t
  644 cmem_exists(paddr, size)
  645         caddr_t paddr;
  646         register int    size;
  647 {
  648         register int    pg;
  649 
  650         size += (int) paddr & (I386_PGBYTES-1);
  651         for (pg = i386_btop(paddr); size > 0; size -= I386_PGBYTES, pg++)
  652                 if (!page_exists(pg))
  653                         return(0);
  654         return(1);
  655 }
  656 
  657 /*
  658  * Return a number to use in spin loops that takes into account
  659  * both the cpu rate and the mip rating.
  660  */
  661 
  662 calc_delay(x)
  663         unsigned int    x;
  664 {
  665         extern int      cpurate;
  666         extern   int    lcpuspeed;
  667 
  668         if (!upyet)
  669                 return (x*cpurate);
  670         else
  671                 return (x*engine[cpu_number()].e_cpu_speed*lcpuspeed)/100;
  672 }
  673 
  674 /*
  675  * halt_all_cpus()
  676  *      Reboot the machine.
  677  *
  678  * Boot routine returns to Firmware.  If called by panic it tries to sync
  679  * up disks and returns specifying that the alternate boot name is to be
  680  * booted.  This is normally the Memory dumper.
  681  *
  682  * Only ONE engine is alive at this point.
  683  */
  684 
  685 halt_all_cpus(do_reboot)
  686         boolean_t       do_reboot;
  687 {
  688         register struct sec_gmode *cbdmptr = &cbdgsm;
  689         register struct sec_smode *wdtsmptr = &wdtsm;
  690         register spl_t s_ipl;
  691         extern etext;
  692         extern boolean_t dblpanic;
  693 
  694         int     howto = 0;              /* XXX */
  695 
  696         return_fw(do_reboot);           /* XXX */
  697 
  698         if (!upyet)
  699                 return_fw(FALSE);
  700 
  701         /*
  702          * Get powerup reboot structure.
  703          */
  704 
  705         cbdmptr->gm_status = 0;
  706         bootdata.re_powerup = 1;        /* 0 booted data, 1 powerup values */
  707         cbdmptr->gm_un.gm_board.sec_reboot =
  708                                 KVTOPHYS(&bootdata, struct reboot *);
  709         cbdcib->cib_inst = SINST_GETMODE;
  710         cbdcib->cib_status = KVTOPHYS(&cbdgsm, int *);
  711         s_ipl = splhi();
  712         mIntr(cons_scsi, 7, SDEV_SCSIBOARD);
  713         splx(s_ipl);
  714 
  715         while ((cbdmptr->gm_status & SINST_INSDONE) == 0)
  716                 continue;
  717 
  718         if (cbdmptr->gm_status != SINST_INSDONE) {
  719                 printf("Cannot get Console Board modes\n");
  720                 return_fw(FALSE);
  721         }
  722 
  723         /*
  724          * Now tell FW how to reboot
  725          */
  726 
  727         bootdata.re_powerup = 0;        /* 0 booted data, 1 powerup values */
  728         cbdmptr->gm_un.gm_board.sec_dopoll = 0;         /* no change */
  729         cbdmptr->gm_un.gm_board.sec_powerup = 0;        /* no change */
  730         cbdmptr->gm_un.gm_board.sec_errlight = SERR_LIGHT_SAME;
  731 
  732         bootdata.re_boot_flag = howto;
  733 
  734         cbdmptr->gm_status = 0;
  735         cbdcib->cib_inst = SINST_SETMODE;
  736         cbdcib->cib_status = KVTOPHYS(&cbdgsm, int *);
  737         s_ipl = splhi();
  738         mIntr(cons_scsi, 7, SDEV_SCSIBOARD);
  739         splx(s_ipl);
  740 
  741         while ((cbdmptr->gm_status & SINST_INSDONE) == 0)
  742                 continue;
  743 
  744         if (cbdmptr->gm_status != SINST_INSDONE) {
  745                 printf("Cannot set Console Board modes\n");
  746                 return_fw(FALSE);
  747         }
  748 
  749         if (do_reboot) {
  750                 /*
  751                  * Set watchdog for 1 minute.
  752                  * Prevent ERROR light from going on...
  753                  */
  754 
  755                 untimeout(wdtreset, (caddr_t)0);        /* Stop wdt reset */
  756 
  757                 wdtsmptr->sm_status = 0;
  758                 wdtsmptr->sm_un.sm_wdt_mode = 60;       /* Set for minute! */
  759 
  760                 wdtcib->cib_inst = SINST_SETMODE;
  761                 wdtcib->cib_status = KVTOPHYS(&wdtsm, int *);
  762                 s_ipl = splhi();
  763                 mIntr(cons_scsi, 7, SDEV_WATCHDOG);
  764                 splx(s_ipl);
  765 
  766                 while ((wdtsmptr->sm_status & SINST_INSDONE) == 0)
  767                         continue;
  768 
  769                 if (wdtsmptr->sm_status != SINST_INSDONE) {
  770                         printf("Cannot Setmode Watchdog\n");
  771                         return_fw(FALSE);
  772                 }
  773 
  774                 wdtsmptr->sm_status = 0;
  775                 wdtcib->cib_inst = SINST_STARTIO;
  776                 wdtcib->cib_status = KVTOPHYS(&wdtsm, int *);
  777                 s_ipl = splhi();
  778                 mIntr(cons_scsi, 7, SDEV_WATCHDOG);
  779                 splx(s_ipl);
  780 
  781                 while ((wdtsmptr->sm_status & SINST_INSDONE) == 0)
  782                         continue;
  783 
  784                 if (wdtsmptr->sm_status != SINST_INSDONE) {
  785                         printf("Cannot Restart Watchdog\n");
  786                         return_fw(FALSE);
  787                 }
  788         }
  789 
  790         (void) spl1();
  791 
  792         return_fw(do_reboot);
  793 }
  794 
  795 /*
  796  * return_fw()
  797  *      Return to Firmware.
  798  */
  799 
  800 return_fw(do_reboot)
  801         boolean_t       do_reboot;
  802 {
  803         register struct ctlr_toc *toc;
  804         register int    i;
  805         extern  boolean_t       conscsi_yet;
  806         extern  int     light_show;
  807         extern  char    *panicstr;
  808         extern  int     (*cust_panics[])();
  809 
  810         if (upyet)
  811                 (void) splhi();
  812 
  813 #if 0
  814         /*
  815          * If a panic, call custom panic handlers.
  816          */
  817 
  818         if (panicstr != NULL)
  819                 for (i = 0; cust_panics[i] != NULL; i++)
  820                         (*cust_panics[i])();
  821 #endif 0
  822         /*
  823          * Get table of contents pointer for processor board.
  824          */
  825 
  826         toc = PHYSTOKV(&va_CD_LOC->c_toc[SLB_SGSPROCBOARD],
  827                         struct ctlr_toc *);             /* SGS processors */
  828 
  829         /*
  830          * Turn off light show - if enabled.
  831          * Since panic may be called before initialization is complete,
  832          * all front panel processor lights are turned off.
  833          */
  834 
  835         if (light_show) {
  836                 if (fp_lights) {
  837                         for (i = 0; i < toc->ct_count; i++)
  838                                 FP_LIGHTOFF(i);
  839                 }
  840                 *va_led = 0;
  841         }
  842 
  843         /*
  844          * If the console scsi has not yet received its INIT command
  845          * then use the powerup cib.
  846          */
  847 
  848         if (!conscsi_yet) {
  849                 struct sec_powerup *scp;
  850                 scp = PHYSTOKV(CD_LOC->c_cons->cd_sc_init_queue,
  851                                 struct sec_powerup *);
  852                 scp->pu_cib.cib_inst = SINST_RETTODIAG;
  853                 scp->pu_cib.cib_status = SRD_BREAK;
  854         } else {
  855                 cbdcib->cib_inst = SINST_RETTODIAG;
  856                 cbdcib->cib_status = (!do_reboot) ? SRD_BREAK : SRD_REBOOT;
  857         }
  858 #if     defined(DEBUG) && defined(i386) && !defined(KXX)        /*XXX*/
  859         flush_cache();                                          /*XXX*/
  860 #endif  DEBUG&&i386&&!KXX                                       /*XXX*/
  861         mIntr(cons_scsi, 7, SDEV_SCSIBOARD);
  862 
  863         /*
  864          * SCED will take control.
  865          */
  866 
  867         for (;;);
  868         /*NOTREACHED*/
  869 }
  870 
  871 /*
  872  * Watchdog timer routines.
  873  *
  874  * Hit watchdog timer every half second.
  875  */
  876 
  877 /*
  878  * wdtinit()
  879  *      Initialize watchdog timeout interval.
  880  */
  881 
  882 wdtinit()
  883 {
  884         register struct sec_smode *wdtsmptr = &wdtsm;
  885         spl_t s_ipl;
  886 
  887         if (wdt_timeout <= 0)
  888                 return;
  889         wdtsmptr->sm_status = 0;
  890         wdtsmptr->sm_un.sm_wdt_mode = wdt_timeout;
  891 
  892         wdtcib->cib_inst = SINST_SETMODE;
  893         wdtcib->cib_status = KVTOPHYS(&wdtsm, int *);
  894         s_ipl = splhi();
  895         mIntr(cons_scsi, 7, SDEV_WATCHDOG);
  896         splx(s_ipl);
  897 
  898         while ((wdtsmptr->sm_status & SINST_INSDONE) == 0)
  899                 continue;
  900 
  901         if (wdtsmptr->sm_status != SINST_INSDONE)
  902                 panic("Initializing Watchdog");
  903         timeout(wdtreset, (caddr_t)0, hz/2);
  904 }
  905 
  906 wdtreset()
  907 {
  908         register struct sec_smode *wdtsmptr = &wdtsm;
  909         spl_t s_ipl;
  910 
  911         wdtsmptr->sm_status = 0;
  912         wdtcib->cib_inst = SINST_STARTIO;
  913         wdtcib->cib_status = KVTOPHYS(&wdtsm, int *);
  914 
  915         /*
  916          * Tell SCED about the command.  Bin 3 is sufficient, helps avoid
  917          * SLIC-bus saturation/lockup (since SCED interrupts Dynix mostly on
  918          * bins 4-7, using bin 3 to interrupt SCED gives SCED -> Dynix priority
  919          * over Dynix -> SCED, thus SCED won't deadlock against Dynix).
  920          */
  921 
  922         s_ipl = splhi();
  923         mIntr(cons_scsi, 3, SDEV_WATCHDOG);
  924         splx(s_ipl);
  925 
  926         while ((wdtsmptr->sm_status & SINST_INSDONE) == 0)
  927                 continue;
  928 
  929         if (wdtsmptr->sm_status != SINST_INSDONE)
  930                 panic("Resetting Watchdog");
  931         timeout(wdtreset, (caddr_t)0, hz/2);
  932 }
  933 
  934 light_off(cpu_num)
  935 {
  936         if (light_show) {
  937                 DISABLE();
  938                 if (fp_lights)
  939                         FP_LIGHTOFF(cpu_num);
  940                 *va_led = 0;
  941                 ENABLE();
  942         }
  943 }
  944 
  945 light_on(cpu_num)
  946 {
  947         if (light_show) {
  948                 if (fp_lights)
  949                         FP_LIGHTON(cpu_num);
  950                 *va_led = 1;
  951         }
  952 }
  953 
  954 
  955 /*
  956  * access_error()
  957  *      Access Error Reporting:
  958  *              Bus timeouts
  959  *              ECC Uncorrectable
  960  *              Processor fatal error (SGS only)
  961  *
  962  * Called from NMI handler, SEC_error and MBAd_error with
  963  * copy of Access error register.
  964  *
  965  * Called at SPLHI.
  966  */
  967 
  968 access_error(errval)
  969         u_char errval;
  970 {
  971         register int io_access;
  972         register char *s;
  973         u_char acctype;
  974         extern  memerr();
  975 
  976         printf("Access Error Register = 0x%x\n", errval);
  977         errval = ~errval;
  978         acctype = errval & SLB_ATMSK;
  979         io_access = errval & SLB_AEIO;
  980 
  981         switch (acctype) {
  982         case SLB_AEFATAL:
  983                         s = "Fatal";
  984                 break;
  985         case SLB_AENONFAT:
  986                 if (io_access)
  987                         s = "Non-Fatal";
  988                 else
  989                         s = "Ecc Correctable";
  990                 break;
  991         case SLB_AETIMOUT:
  992                 s = "Timeout";
  993                 break;
  994         default:
  995                 s = "Unknown";
  996                 break;
  997         }
  998         printf("%s error on %s %s.\n", s,
  999                 (errval & SLB_AEIO) ? "I/O" : "memory",
 1000                 (errval & SLB_AERD) ? "read" : "write");
 1001 
 1002         /*
 1003          * If memory error get more data...
 1004          */
 1005 
 1006         if ((acctype == SLB_AEFATAL) && (io_access != SLB_AEIO)) {
 1007                 if (upyet) {
 1008                         /*
 1009                          * Avoid races with memory polling.
 1010                          */
 1011                         untimeout(memerr, (caddr_t) 0);
 1012 #ifdef  notdef
 1013                         /*
 1014                          * If concurrent access errors, the loser of the
 1015                          * race commits suicide.
 1016                          *
 1017                          * Since we are about to die, do not bother releasing
 1018                          * lock.
 1019                          */
 1020                         if (cp_lock(&uncmem_lock, SPLHI) == CPLOCKFAIL) {
 1021                                 printf("Concurrent ECC Uncorrectable Error\n");
 1022                                 pause_self();
 1023                                 /*NOTREACHED*/
 1024                         }
 1025 #endif  notdef
 1026                 }
 1027                 memlog();
 1028         }
 1029 }
 1030 
 1031 /*
 1032  * cpu_start()
 1033  *      Start another processor by "unpausing" it.
 1034  *
 1035  * Called by tmp_ctl TMP_ONLINE command.
 1036  *
 1037  * The semaphore tmp_onoff is assumed to be held by the caller.
 1038  * This semaphore guarantees that only one on/off line transaction
 1039  * occurs at a time.  No real need to single-thread these on SGS,
 1040  * but doesn't hurt and provides some basic sanity (who knows, maybe
 1041  * there is some hidden reason! ;-)
 1042  */
 1043 kern_return_t
 1044 cpu_start(engno)
 1045 {
 1046         register struct engine *eng = &engine[engno];
 1047         spl_t   s = splhi();
 1048 
 1049 #ifdef  notyet                          /* SGS VLSI doesn't support flush yet */
 1050         u_char  bic_slic;
 1051         u_char  chan_ctl;
 1052 
 1053         /*
 1054          * Re-enable the appropriate BIC channel (this is left disabled
 1055          * by an offline).  This is a NOP on early rev BIC's.
 1056          *
 1057          * Note that only one SLIC on the processor board talks to the BIC.
 1058          */
 1059 
 1060         bic_slic = BIC_SLIC(eng->e_slicaddr,
 1061                         slic_to_config[eng->e_slicaddr]->cd_flags);
 1062         chan_ctl = (eng->e_slicaddr & 0x01) ? BIC_CDIAGCTL1 : BIC_CDIAGCTL0;
 1063 
 1064         wrSubslave(bic_slic, PROC_BIC, chan_ctl, 
 1065                 (u_char) (rdSubslave(bic_slic, PROC_BIC, chan_ctl) & ~BICCDC_DISABLE));
 1066 #endif  notyet                          /* SGS VLSI doesn't support flush yet */
 1067 
 1068         /*
 1069          * Un-hold the processor, turn on the LED, and *don't* reset.
 1070          * Also enable NMI's: it's ok for 1st online (don't expect any NMI
 1071          * sources) and subsequent online's need NMI's enabled here since they
 1072          * don't execute localinit() to enable NMI's.  This gives small risk
 1073          * of strange crash if NMI is asserted on 1st online (since processor
 1074          * is an 8086 at this time); if a problem, need to keep state in
 1075          * e_flags whether the processor has ever been online'd before, and
 1076          * initialize PROC_CTL differently here 1st time vs subsequent times.
 1077          */
 1078 
 1079         if (light_show && fp_lights) {
 1080                 FP_LIGHTON(engno);
 1081         }
 1082 
 1083         wrslave(eng->e_slicaddr, PROC_CTL,
 1084                 PROC_CTL_NO_SSTEP | PROC_CTL_NO_HOLD | PROC_CTL_NO_RESET);
 1085 
 1086         splx(s);
 1087 
 1088         return KERN_SUCCESS;
 1089 }
 1090 
 1091 /*
 1092  * halt_engine()
 1093  *      Halt processor via pause and reset.
 1094  *
 1095  * Turn off processor light.
 1096  * Done implicitly via reset on B8K.
 1097  * If fp_lights then done explicitly.
 1098  *
 1099  * Called by tmp_ctl with TMP_OFFLINE command to shutdown a processor.
 1100  */
 1101 
 1102 #ifdef  notyet                          /* SGS VLSI doesn't support flush yet */
 1103 static  struct  proc_cmcs {
 1104         u_char  pc_subaddr;             /* sub-slave address of CMC */
 1105         u_int   pc_diag_flag;           /* flags -- all zero if CMC in use */
 1106 }       proc_cmcs[] = {
 1107         { PROC_CMC_0, CFG_SP_CMC0|CFG_SP_DRAM_0|CFG_SP_TRAM_0|CFG_SP_SRAM_0 },
 1108         { PROC_CMC_1, CFG_SP_CMC1|CFG_SP_DRAM_1|CFG_SP_TRAM_1|CFG_SP_SRAM_1 },
 1109 };
 1110 #endif  notyet                          /* SGS VLSI doesn't support flush yet */
 1111 
 1112 halt_engine(engno)
 1113 {
 1114         register struct engine *eng = &engine[engno];
 1115         spl_t   s = splhi();
 1116 
 1117 #ifdef  notyet                          /* SGS VLSI doesn't support flush yet */
 1118         register struct ctlr_desc *cd = slic_to_config[eng->e_slicaddr];
 1119         register struct proc_cmcs *pc;
 1120         register int    i;
 1121         u_char          bic_slic;
 1122         u_char          chan_ctl;
 1123         u_char          cmc_mode;
 1124 #endif  notyet                          /* SGS VLSI doesn't support flush yet */
 1125         u_char          slicid = eng->e_slicaddr;
 1126 
 1127         /*
 1128          * HOLD the processor, but don't reset it (also turn OFF led).
 1129          * Wait for processor to be HELD.
 1130          */
 1131 
 1132         wrslave(slicid, PROC_CTL,
 1133                         PROC_CTL_LED_OFF | PROC_CTL_NO_NMI |
 1134                         PROC_CTL_NO_SSTEP | PROC_CTL_NO_RESET);
 1135 
 1136         while (rdslave(slicid, PROC_STAT) & PROC_STAT_NO_HOLDA)
 1137                 continue;
 1138 
 1139 #ifdef  notyet                          /* SGS VLSI doesn't support flush yet */
 1140         /*
 1141          * NOTE: the flush algorithm is probably WRONG!  Verify/fix
 1142          * when VLSI does support the flush function.
 1143          */
 1144         /*
 1145          * Flush the processor's cache.
 1146          *
 1147          * For each cache set, if it passed all power-up diagnostics then
 1148          * tell the CMC it's a "master", flush it, make it a "slave" again.
 1149          */
 1150 
 1151         for (pc = proc_cmcs, i = 0; i < cd->cd_p_nsets; i++, pc++) {
 1152 
 1153                 /*
 1154                  * If any diagnostic flag is on, this cache set wasn't in use.
 1155                  */
 1156 
 1157                 if (eng->e_diag_flag & pc->pc_diag_flag)
 1158                         continue;
 1159 
 1160                 /*
 1161                  * Make the CMC the "master" and start the flush.
 1162                  */
 1163 
 1164                 cmc_mode = rdSubslave(slicid, pc->pc_subaddr, CMC_MODE);
 1165                 wrSubslave(slicid, pc->pc_subaddr, CMC_MODE,
 1166                                 cmc_mode & ~(CMCM_SLAVE | CMCM_DISA_FLUSH));
 1167 
 1168                 /*
 1169                  * Wait for flush to finish.
 1170                  */
 1171 
 1172                 while (rdSubslave(slicid, pc->pc_subaddr, CMC_STATUS) & CMCS_FLUSH)
 1173                         continue;
 1174 
 1175                 /*
 1176                  * Make the CMC a "slave" again.
 1177                  */
 1178 
 1179                 wrSubslave(slicid, pc->pc_subaddr, CMC_MODE, cmc_mode);
 1180         }
 1181 
 1182         /*
 1183          * Isolate the processor from the bus by turning off the appropriate
 1184          * BIC channel.  This is a NOP on early rev BIC's.
 1185          *
 1186          * Note that only one SLIC on the processor board talks to the BIC.
 1187          */
 1188 
 1189         bic_slic = BIC_SLIC(slicid, cd->cd_flags);
 1190         chan_ctl = (slicid & 0x01) ? BIC_CDIAGCTL1 : BIC_CDIAGCTL0;
 1191 
 1192         wrSubslave(bic_slic, PROC_BIC, chan_ctl, 
 1193                 (u_char) (rdSubslave(bic_slic, PROC_BIC, chan_ctl) | BICCDC_DISABLE));
 1194 #endif  notyet                          /* SGS VLSI doesn't support flush yet */
 1195 
 1196         if (light_show && fp_lights)
 1197                 FP_LIGHTOFF(engno);
 1198 
 1199         splx(s);
 1200 }
 1201 
 1202 
 1203 /*
 1204  * halt_cpu()
 1205  *      Arrange that processor get turned off.
 1206  *
 1207  * Called from MACH processor shutdown thread.
 1208  * Processor is already running on its private stack.
 1209  */
 1210 
 1211 halt_cpu()
 1212 {
 1213         int     engno = cpu_number();
 1214         int     slicid = engine[engno].e_slicaddr;
 1215 
 1216         /*
 1217          * Interrupts have already been flushed.
 1218          * If we drop IPL to let interrupts in, the clock interrupt
 1219          * handler will die because we have no thread.
 1220          */
 1221 #if 0
 1222         /*
 1223          * Insure no interrupts are pending on this processor.
 1224          * This is essentially a NOP until MACH is more symmetric
 1225          * and distributes interrupts to any processor.
 1226          */
 1227 
 1228         (void) splhi();
 1229         flush_intr();
 1230 #endif
 1231 
 1232         if (light_show && fp_lights)
 1233                 FP_LIGHTOFF(cpu_number());
 1234 
 1235         /*
 1236          * HOLD the processor, but don't reset it (also turn OFF led).
 1237          * Wait for processor to be HELD.
 1238          */
 1239 
 1240         wrslave(slicid, PROC_CTL,
 1241                         PROC_CTL_LED_OFF | PROC_CTL_NO_NMI |
 1242                         PROC_CTL_NO_SSTEP | PROC_CTL_NO_RESET);
 1243 
 1244         while (rdslave(slicid, PROC_STAT) & PROC_STAT_NO_HOLDA)
 1245                 continue;
 1246 
 1247         /*
 1248          * There is no escape!
 1249          */
 1250 
 1251         for(;;);
 1252         /*NOTREACHED*/
 1253 }
 1254 
 1255 /*
 1256  * flush_intr()
 1257  *      Flush pending interrupts.
 1258  *
 1259  * Used when shutting down processor to insure pending interrupts
 1260  * are cleared (and handled).
 1261  */
 1262 
 1263 flush_intr()
 1264 {
 1265         int     counter;
 1266 
 1267         /*
 1268          * While there is a pending (HW or SW) interrupt, open
 1269          * a window to let it in.
 1270          *
 1271          * Need to loop (to get branch) until HW chip-workaround is
 1272          * removed (only admit interrupts on non-sequential fetch).
 1273          */
 1274 
 1275         SLICPRI(0);                             /* try not to win arbitration */
 1276 
 1277         for (;;) {
 1278                 for (counter=0; counter < 100; counter++)
 1279                         continue;
 1280                 if ((va_slic->sl_ictl & (SL_HARDINT|SL_SOFTINT)) == 0)
 1281                         break;
 1282                 (void) spl0();
 1283                 for (counter=0; counter < 10; counter++)
 1284                         continue;               /* window to take int */
 1285                 (void) splhi();
 1286         }
 1287 }
 1288 
 1289 /*
 1290  * Send cross cpu interrupt for pmap update
 1291  */
 1292 interrupt_processor(cpu)
 1293         int     cpu;
 1294 {
 1295         spl_t s;
 1296 
 1297         s = splhi();
 1298         sendsoft(engine[cpu].e_slicaddr, PMAPUPDATE);
 1299         splx(s);
 1300 }
 1301 
 1302 /*
 1303  * Send soft-clock interrupt to master
 1304  */
 1305 setsoftclock()
 1306 {
 1307         spl_t   s;
 1308 
 1309         s = splhi();
 1310         sendsoft(mono_P_slic, SOFTCLOCK);
 1311         splx(s);
 1312 }
 1313 
 1314 /*
 1315  * Send cross cpu interrupt for debugger entry.
 1316  * Use NMI since software interrupt is lowest priority.
 1317  */
 1318 cpu_interrupt_to_db(cpu)
 1319         int     cpu;
 1320 {
 1321         spl_t   s;
 1322 
 1323         s = splhi();
 1324         nmIntr(engine[cpu].e_slicaddr, PAUSESELF);
 1325         splx(s);
 1326 }
 1327 
 1328 panic_others()
 1329 {
 1330         int i, me = cpu_number();
 1331 
 1332         for (i = 0; i < NCPUS; i++) {
 1333                 if (i == me)
 1334                         continue;
 1335                 if (machine_slot[i].is_cpu && machine_slot[i].running) {
 1336                         spl_t s;
 1337 
 1338                         s = splhi();
 1339                         nmIntr(engine[i].e_slicaddr, PAUSESELF);
 1340                         splx(s);
 1341                 }
 1342         }
 1343 }
 1344 
 1345 /*
 1346  * NMI interrupt.
 1347  * If for remote debugger entry, trap to debugger.
 1348  * Otherwise, panic (cause unknown so far).
 1349  */
 1350 extern void     allow_nmi();
 1351 extern void     remote_db_enter();
 1352 
 1353 int     sqt_proc_stat[NCPUS];
 1354 void
 1355 nmi_intr()
 1356 {
 1357         int     proc_flt;
 1358 
 1359         /*
 1360          * Read processor fault register to get NMI reason
 1361          */
 1362         proc_flt = rdslave(va_slic->sl_procid, PROC_FLT);
 1363         if ((proc_flt & PROC_FLT_SLIC_NMI) == 0) {
 1364             /*
 1365              * NMI triggered from SLIC.
 1366              * Another CPU has entered the kernel debugger.
 1367              * Allow further NMIs, then stop.
 1368              */
 1369 
 1370             /*
 1371              * Reset processor fault register (any write will do)
 1372              */
 1373             wrslave(va_slic->sl_procid, PROC_FLT, 0xbb);
 1374 
 1375             /*
 1376              * XXX must re-enable NMIs at SLIC register
 1377              */
 1378             sqt_proc_stat[cpu_number()] = 
 1379                 rdslave(va_slic->sl_procid, PROC_STAT);
 1380 
 1381             /*
 1382              * Toggle nmi accept in the processor control register
 1383              * so an NMI that arrived concurrently will be seen
 1384              * when NMIs are reenabled.
 1385              */
 1386             wrslave(va_slic->sl_procid, PROC_CTL,
 1387                         PROC_CTL_NO_NMI | PROC_CTL_NO_SSTEP | 
 1388                         PROC_CTL_NO_HOLD | PROC_CTL_NO_RESET);
 1389             wrslave(va_slic->sl_procid, PROC_CTL,
 1390                         PROC_CTL_NO_SSTEP | PROC_CTL_NO_HOLD |
 1391                         PROC_CTL_NO_RESET);
 1392             allow_nmi();
 1393             remote_db_enter();
 1394             return;
 1395         }
 1396 
 1397         /*
 1398          * Unknown NMI.  Panic.
 1399          */
 1400         panic("NMI");
 1401 }
 1402 
 1403 /*ARGSUSED*/
 1404 kern_return_t
 1405 cpu_control(slot_num, info, count)
 1406         int                     slot_num;
 1407         processor_info_t        info;
 1408         long                    *count;
 1409 {
 1410         return (KERN_FAILURE);
 1411 }
 1412 
 1413 
 1414 #include <mach/vm_prot.h>
 1415 #include <vm/pmap.h>
 1416 #include <mach/time_value.h>
 1417 
 1418 timemmap(dev,off,prot)
 1419         vm_prot_t prot;
 1420 {
 1421         extern time_value_t *mtime;
 1422 
 1423 #ifdef  lint
 1424         dev++; off++;
 1425 #endif  lint
 1426 
 1427         if (prot & VM_PROT_WRITE) return (-1);
 1428 
 1429         return (i386_btop(pmap_extract(pmap_kernel(), (vm_offset_t) mtime)));
 1430 }
 1431 
 1432 /*
 1433  *      XXX These functions assume physical memory is contiguous.
 1434  *      What about page_exists?
 1435  */
 1436 
 1437 unsigned int pmap_free_pages()
 1438 {
 1439         return avail_remaining;
 1440 }
 1441 
 1442 boolean_t pmap_next_page(addrp)
 1443         vm_offset_t *addrp;
 1444 {
 1445         if (avail_next == avail_end)
 1446                 return FALSE;
 1447 
 1448         *addrp = avail_next;
 1449         avail_next += PAGE_SIZE;
 1450         avail_remaining--;
 1451         return TRUE;
 1452 }
 1453 
 1454 boolean_t pmap_valid_page(x)
 1455         vm_offset_t x;
 1456 {
 1457         return (avail_start <= x) && (x < avail_end);
 1458 }

Cache object: 4b8f724c7ec63f5001a708ca12242e00


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.