The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ia64/ia64/machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003,2004 Marcel Moolenaar
    3  * Copyright (c) 2000,2001 Doug Rabson
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/6.0/sys/ia64/ia64/machdep.c 150112 2005-09-13 21:07:14Z marcel $");
   30 
   31 #include "opt_compat.h"
   32 #include "opt_ddb.h"
   33 #include "opt_kstack_pages.h"
   34 #include "opt_msgbuf.h"
   35 
   36 #include <sys/param.h>
   37 #include <sys/proc.h>
   38 #include <sys/systm.h>
   39 #include <sys/bio.h>
   40 #include <sys/buf.h>
   41 #include <sys/bus.h>
   42 #include <sys/cons.h>
   43 #include <sys/cpu.h>
   44 #include <sys/eventhandler.h>
   45 #include <sys/exec.h>
   46 #include <sys/imgact.h>
   47 #include <sys/kdb.h>
   48 #include <sys/kernel.h>
   49 #include <sys/linker.h>
   50 #include <sys/lock.h>
   51 #include <sys/malloc.h>
   52 #include <sys/mbuf.h>
   53 #include <sys/msgbuf.h>
   54 #include <sys/pcpu.h>
   55 #include <sys/ptrace.h>
   56 #include <sys/random.h>
   57 #include <sys/reboot.h>
   58 #include <sys/sched.h>
   59 #include <sys/signalvar.h>
   60 #include <sys/syscall.h>
   61 #include <sys/sysctl.h>
   62 #include <sys/sysproto.h>
   63 #include <sys/ucontext.h>
   64 #include <sys/uio.h>
   65 #include <sys/uuid.h>
   66 #include <sys/vmmeter.h>
   67 #include <sys/vnode.h>
   68 
   69 #include <ddb/ddb.h>
   70 
   71 #include <net/netisr.h>
   72 
   73 #include <vm/vm.h>
   74 #include <vm/vm_extern.h>
   75 #include <vm/vm_kern.h>
   76 #include <vm/vm_page.h>
   77 #include <vm/vm_map.h>
   78 #include <vm/vm_object.h>
   79 #include <vm/vm_pager.h>
   80 
   81 #include <machine/bootinfo.h>
   82 #include <machine/clock.h>
   83 #include <machine/cpu.h>
   84 #include <machine/efi.h>
   85 #include <machine/elf.h>
   86 #include <machine/fpu.h>
   87 #include <machine/mca.h>
   88 #include <machine/md_var.h>
   89 #include <machine/mutex.h>
   90 #include <machine/pal.h>
   91 #include <machine/pcb.h>
   92 #include <machine/reg.h>
   93 #include <machine/sal.h>
   94 #include <machine/sigframe.h>
   95 #ifdef SMP
   96 #include <machine/smp.h>
   97 #endif
   98 #include <machine/unwind.h>
   99 #include <machine/vmparam.h>
  100 
  101 #include <i386/include/specialreg.h>
  102 
  103 u_int64_t processor_frequency;
  104 u_int64_t bus_frequency;
  105 u_int64_t itc_frequency;
  106 int cold = 1;
  107 
  108 u_int64_t pa_bootinfo;
  109 struct bootinfo bootinfo;
  110 
  111 struct pcpu early_pcpu;
  112 extern char kstack[]; 
  113 vm_offset_t proc0kstack;
  114 
  115 extern u_int64_t kernel_text[], _end[];
  116 
  117 extern u_int64_t ia64_gateway_page[];
  118 extern u_int64_t break_sigtramp[];
  119 extern u_int64_t epc_sigtramp[];
  120 
  121 struct fpswa_iface *fpswa_iface;
  122 
  123 u_int64_t ia64_pal_base;
  124 u_int64_t ia64_port_base;
  125 
  126 char machine[] = MACHINE;
  127 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
  128 
  129 static char cpu_model[64];
  130 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
  131     "The CPU model name");
  132 
  133 static char cpu_family[64];
  134 SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
  135     "The CPU family name");
  136 
  137 #ifdef DDB
  138 extern vm_offset_t ksym_start, ksym_end;
  139 #endif
  140 
  141 static void cpu_startup(void *);
  142 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
  143 
  144 struct msgbuf *msgbufp=0;
  145 
  146 long Maxmem = 0;
  147 long realmem = 0;
  148 
  149 vm_offset_t phys_avail[100];
  150 
  151 /* must be 2 less so 0 0 can signal end of chunks */
  152 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
  153 
  154 void mi_startup(void);          /* XXX should be in a MI header */
  155 
  156 struct kva_md_info kmi;
  157 
  158 #define Mhz     1000000L
  159 #define Ghz     (1000L*Mhz)
  160 
  161 static void
  162 identifycpu(void)
  163 {
  164         char vendor[17];
  165         char *family_name, *model_name;
  166         u_int64_t features, tmp;
  167         int number, revision, model, family, archrev;
  168 
  169         /*
  170          * Assumes little-endian.
  171          */
  172         *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
  173         *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
  174         vendor[16] = '\0';
  175 
  176         tmp = ia64_get_cpuid(3);
  177         number = (tmp >> 0) & 0xff;
  178         revision = (tmp >> 8) & 0xff;
  179         model = (tmp >> 16) & 0xff;
  180         family = (tmp >> 24) & 0xff;
  181         archrev = (tmp >> 32) & 0xff;
  182 
  183         family_name = model_name = "unknown";
  184         switch (family) {
  185         case 0x07:
  186                 family_name = "Itanium";
  187                 model_name = "Merced";
  188                 break;
  189         case 0x1f:
  190                 family_name = "Itanium 2";
  191                 switch (model) {
  192                 case 0x00:
  193                         model_name = "McKinley";
  194                         break;
  195                 case 0x01:
  196                         /*
  197                          * Deerfield is a low-voltage variant based on the
  198                          * Madison core. We need circumstantial evidence
  199                          * (i.e. the clock frequency) to identify those.
  200                          * Allow for roughly 1% error margin.
  201                          */
  202                         tmp = processor_frequency >> 7;
  203                         if ((processor_frequency - tmp) < 1*Ghz &&
  204                             (processor_frequency + tmp) >= 1*Ghz)
  205                                 model_name = "Deerfield";
  206                         else
  207                                 model_name = "Madison";
  208                         break;
  209                 case 0x02:
  210                         model_name = "Madison II";
  211                         break;
  212                 }
  213                 break;
  214         }
  215         snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
  216         snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
  217 
  218         features = ia64_get_cpuid(4);
  219 
  220         printf("CPU: %s (", model_name);
  221         if (processor_frequency) {
  222                 printf("%ld.%02ld-Mhz ",
  223                     (processor_frequency + 4999) / Mhz,
  224                     ((processor_frequency + 4999) / (Mhz/100)) % 100);
  225         }
  226         printf("%s)\n", family_name);
  227         printf("  Origin = \"%s\"  Revision = %d\n", vendor, revision);
  228         printf("  Features = 0x%b\n", (u_int32_t) features,
  229             "\020"
  230             "\001LB"    /* long branch (brl) instruction. */
  231             "\002SD"    /* Spontaneous deferral. */
  232             "\003AO"    /* 16-byte atomic operations (ld, st, cmpxchg). */ );
  233 }
  234 
  235 static void
  236 cpu_startup(dummy)
  237         void *dummy;
  238 {
  239 
  240         /*
  241          * Good {morning,afternoon,evening,night}.
  242          */
  243         identifycpu();
  244 
  245         /* startrtclock(); */
  246 #ifdef PERFMON
  247         perfmon_init();
  248 #endif
  249         printf("real memory  = %ld (%ld MB)\n", ia64_ptob(Maxmem),
  250             ia64_ptob(Maxmem) / 1048576);
  251         realmem = ia64_ptob(Maxmem);
  252 
  253         /*
  254          * Display any holes after the first chunk of extended memory.
  255          */
  256         if (bootverbose) {
  257                 int indx;
  258 
  259                 printf("Physical memory chunk(s):\n");
  260                 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
  261                         int size1 = phys_avail[indx + 1] - phys_avail[indx];
  262 
  263                         printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx],
  264                             phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE);
  265                 }
  266         }
  267 
  268         vm_ksubmap_init(&kmi);
  269 
  270         printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
  271             ptoa(cnt.v_free_count) / 1048576);
  272  
  273         if (fpswa_iface == NULL)
  274                 printf("Warning: no FPSWA package supplied\n");
  275         else
  276                 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
  277                     (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
  278 
  279         /*
  280          * Set up buffers, so they can be used to read disk labels.
  281          */
  282         bufinit();
  283         vm_pager_bufferinit();
  284 
  285         /*
  286          * Traverse the MADT to discover IOSAPIC and Local SAPIC
  287          * information.
  288          */
  289         ia64_probe_sapics();
  290         ia64_mca_init();
  291 }
  292 
  293 void
  294 cpu_boot(int howto)
  295 {
  296 
  297         efi_reset_system();
  298 }
  299 
  300 /* Get current clock frequency for the given cpu id. */
  301 int
  302 cpu_est_clockrate(int cpu_id, uint64_t *rate)
  303 {
  304 
  305         if (pcpu_find(cpu_id) == NULL || rate == NULL)
  306                 return (EINVAL);
  307         *rate = processor_frequency;
  308         return (0);
  309 }
  310 
  311 void
  312 cpu_halt()
  313 {
  314 
  315         efi_reset_system();
  316 }
  317 
  318 static void
  319 cpu_idle_default(void)
  320 {
  321         struct ia64_pal_result res;
  322 
  323         res = ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
  324 }
  325 
  326 void
  327 cpu_idle()
  328 {
  329         (*cpu_idle_hook)();
  330 }
  331 
  332 /* Other subsystems (e.g., ACPI) can hook this later. */
  333 void (*cpu_idle_hook)(void) = cpu_idle_default;
  334 
  335 void
  336 cpu_reset()
  337 {
  338 
  339         cpu_boot(0);
  340 }
  341 
  342 void
  343 cpu_switch(struct thread *old, struct thread *new)
  344 {
  345         struct pcb *oldpcb, *newpcb;
  346 
  347         oldpcb = old->td_pcb;
  348 #if COMPAT_IA32
  349         ia32_savectx(oldpcb);
  350 #endif
  351         if (PCPU_GET(fpcurthread) == old)
  352                 old->td_frame->tf_special.psr |= IA64_PSR_DFH;
  353         if (!savectx(oldpcb)) {
  354                 newpcb = new->td_pcb;
  355                 oldpcb->pcb_current_pmap =
  356                     pmap_switch(newpcb->pcb_current_pmap);
  357                 PCPU_SET(curthread, new);
  358 #if COMPAT_IA32
  359                 ia32_restorectx(newpcb);
  360 #endif
  361                 if (PCPU_GET(fpcurthread) == new)
  362                         new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
  363                 restorectx(newpcb);
  364                 /* We should not get here. */
  365                 panic("cpu_switch: restorectx() returned");
  366                 /* NOTREACHED */
  367         }
  368 }
  369 
  370 void
  371 cpu_throw(struct thread *old __unused, struct thread *new)
  372 {
  373         struct pcb *newpcb;
  374 
  375         newpcb = new->td_pcb;
  376         (void)pmap_switch(newpcb->pcb_current_pmap);
  377         PCPU_SET(curthread, new);
  378 #if COMPAT_IA32
  379         ia32_restorectx(newpcb);
  380 #endif
  381         restorectx(newpcb);
  382         /* We should not get here. */
  383         panic("cpu_throw: restorectx() returned");
  384         /* NOTREACHED */
  385 }
  386 
  387 void
  388 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
  389 {
  390         size_t pcpusz;
  391 
  392         /*
  393          * Make sure the PCB is 16-byte aligned by making the PCPU
  394          * a multiple of 16 bytes. We assume the PCPU is 16-byte
  395          * aligned itself.
  396          */
  397         pcpusz = (sizeof(struct pcpu) + 15) & ~15;
  398         KASSERT(size >= pcpusz + sizeof(struct pcb),
  399             ("%s: too small an allocation for pcpu", __func__));
  400         pcpu->pc_pcb = (struct pcb *)((char*)pcpu + pcpusz);
  401         pcpu->pc_acpi_id = cpuid;
  402 }
  403 
  404 void
  405 spinlock_enter(void)
  406 {
  407         struct thread *td;
  408 
  409         td = curthread;
  410         if (td->td_md.md_spinlock_count == 0)
  411                 td->td_md.md_saved_intr = intr_disable();
  412         td->td_md.md_spinlock_count++;
  413         critical_enter();
  414 }
  415 
  416 void
  417 spinlock_exit(void)
  418 {
  419         struct thread *td;
  420 
  421         td = curthread;
  422         critical_exit();
  423         td->td_md.md_spinlock_count--;
  424         if (td->td_md.md_spinlock_count == 0)
  425                 intr_restore(td->td_md.md_saved_intr);
  426 }
  427 
  428 void
  429 map_pal_code(void)
  430 {
  431         pt_entry_t pte;
  432         uint64_t psr;
  433 
  434         if (ia64_pal_base == 0)
  435                 return;
  436 
  437         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
  438             PTE_PL_KERN | PTE_AR_RWX;
  439         pte |= ia64_pal_base & PTE_PPN_MASK;
  440 
  441         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
  442             "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2));
  443 
  444         __asm __volatile("mov   %0=psr" : "=r"(psr));
  445         __asm __volatile("rsm   psr.ic|psr.i");
  446         __asm __volatile("srlz.i");
  447         __asm __volatile("mov   cr.ifa=%0" ::
  448             "r"(IA64_PHYS_TO_RR7(ia64_pal_base)));
  449         __asm __volatile("mov   cr.itir=%0" :: "r"(IA64_ID_PAGE_SHIFT << 2));
  450         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(pte));
  451         __asm __volatile("srlz.d");             /* XXX not needed. */
  452         __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(pte));
  453         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
  454         __asm __volatile("srlz.i");
  455 }
  456 
  457 void
  458 map_gateway_page(void)
  459 {
  460         pt_entry_t pte;
  461         uint64_t psr;
  462 
  463         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
  464             PTE_PL_KERN | PTE_AR_X_RX;
  465         pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK;
  466 
  467         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
  468             "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
  469 
  470         __asm __volatile("mov   %0=psr" : "=r"(psr));
  471         __asm __volatile("rsm   psr.ic|psr.i");
  472         __asm __volatile("srlz.i");
  473         __asm __volatile("mov   cr.ifa=%0" :: "r"(VM_MAX_ADDRESS));
  474         __asm __volatile("mov   cr.itir=%0" :: "r"(PAGE_SHIFT << 2));
  475         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
  476         __asm __volatile("srlz.d");             /* XXX not needed. */
  477         __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(pte));
  478         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
  479         __asm __volatile("srlz.i");
  480 
  481         /* Expose the mapping to userland in ar.k5 */
  482         ia64_set_k5(VM_MAX_ADDRESS);
  483 }
  484 
  485 static void
  486 calculate_frequencies(void)
  487 {
  488         struct ia64_sal_result sal;
  489         struct ia64_pal_result pal;
  490 
  491         sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
  492         pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
  493 
  494         if (sal.sal_status == 0 && pal.pal_status == 0) {
  495                 if (bootverbose) {
  496                         printf("Platform clock frequency %ld Hz\n",
  497                                sal.sal_result[0]);
  498                         printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
  499                                "ITC ratio %ld/%ld\n",
  500                                pal.pal_result[0] >> 32,
  501                                pal.pal_result[0] & ((1L << 32) - 1),
  502                                pal.pal_result[1] >> 32,
  503                                pal.pal_result[1] & ((1L << 32) - 1),
  504                                pal.pal_result[2] >> 32,
  505                                pal.pal_result[2] & ((1L << 32) - 1));
  506                 }
  507                 processor_frequency =
  508                         sal.sal_result[0] * (pal.pal_result[0] >> 32)
  509                         / (pal.pal_result[0] & ((1L << 32) - 1));
  510                 bus_frequency =
  511                         sal.sal_result[0] * (pal.pal_result[1] >> 32)
  512                         / (pal.pal_result[1] & ((1L << 32) - 1));
  513                 itc_frequency =
  514                         sal.sal_result[0] * (pal.pal_result[2] >> 32)
  515                         / (pal.pal_result[2] & ((1L << 32) - 1));
  516         }
  517 }
  518 
  519 void
  520 ia64_init(void)
  521 {
  522         int phys_avail_cnt;
  523         vm_offset_t kernstart, kernend;
  524         vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
  525         char *p;
  526         struct efi_md *md;
  527         int metadata_missing;
  528 
  529         /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
  530 
  531         /*
  532          * TODO: Disable interrupts, floating point etc.
  533          * Maybe flush cache and tlb
  534          */
  535         ia64_set_fpsr(IA64_FPSR_DEFAULT);
  536 
  537         /*
  538          * TODO: Get critical system information (if possible, from the
  539          * information provided by the boot program).
  540          */
  541 
  542         /*
  543          * pa_bootinfo is the physical address of the bootinfo block as
  544          * passed to us by the loader and set in locore.s.
  545          */
  546         bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
  547 
  548         if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
  549                 bzero(&bootinfo, sizeof(bootinfo));
  550                 bootinfo.bi_kernend = (vm_offset_t) round_page(_end);
  551         }
  552 
  553         /*
  554          * Look for the I/O ports first - we need them for console
  555          * probing.
  556          */
  557         for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
  558                 switch (md->md_type) {
  559                 case EFI_MD_TYPE_IOPORT:
  560                         ia64_port_base = IA64_PHYS_TO_RR6(md->md_phys);
  561                         break;
  562                 case EFI_MD_TYPE_PALCODE:
  563                         ia64_pal_base = md->md_phys;
  564                         break;
  565                 }
  566         }
  567 
  568         metadata_missing = 0;
  569         if (bootinfo.bi_modulep)
  570                 preload_metadata = (caddr_t)bootinfo.bi_modulep;
  571         else
  572                 metadata_missing = 1;
  573         if (envmode == 1)
  574                 kern_envp = static_env;
  575         else
  576                 kern_envp = (caddr_t)bootinfo.bi_envp;
  577 
  578         /*
  579          * Look at arguments passed to us and compute boothowto.
  580          */
  581         boothowto = bootinfo.bi_boothowto;
  582 
  583         /*
  584          * Catch case of boot_verbose set in environment.
  585          */
  586         if ((p = getenv("boot_verbose")) != NULL) {
  587                 if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
  588                         boothowto |= RB_VERBOSE;
  589                 }
  590                 freeenv(p);
  591         }
  592 
  593         if (boothowto & RB_VERBOSE)
  594                 bootverbose = 1;
  595 
  596         /*
  597          * Initialize the console before we print anything out.
  598          */
  599         cninit();
  600 
  601         /* OUTPUT NOW ALLOWED */
  602 
  603         if (ia64_pal_base != 0) {
  604                 ia64_pal_base &= ~IA64_ID_PAGE_MASK;
  605                 /*
  606                  * We use a TR to map the first 256M of memory - this might
  607                  * cover the palcode too.
  608                  */
  609                 if (ia64_pal_base == 0)
  610                         printf("PAL code mapped by the kernel's TR\n");
  611         } else
  612                 printf("PAL code not found\n");
  613 
  614         /*
  615          * Wire things up so we can call the firmware.
  616          */
  617         map_pal_code();
  618         efi_boot_minimal(bootinfo.bi_systab);
  619         ia64_sal_init();
  620         calculate_frequencies();
  621 
  622         /*
  623          * Find the beginning and end of the kernel.
  624          */
  625         kernstart = trunc_page(kernel_text);
  626 #ifdef DDB
  627         ksym_start = bootinfo.bi_symtab;
  628         ksym_end = bootinfo.bi_esymtab;
  629         kernend = (vm_offset_t)round_page(ksym_end);
  630 #else
  631         kernend = (vm_offset_t)round_page(_end);
  632 #endif
  633 
  634         /* But if the bootstrap tells us otherwise, believe it! */
  635         if (bootinfo.bi_kernend)
  636                 kernend = round_page(bootinfo.bi_kernend);
  637         if (metadata_missing)
  638                 printf("WARNING: loader(8) metadata is missing!\n");
  639 
  640         /* Get FPSWA interface */
  641         fpswa_iface = (bootinfo.bi_fpswa == 0) ? NULL :
  642             (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa);
  643 
  644         /* Init basic tunables, including hz */
  645         init_param1();
  646 
  647         p = getenv("kernelname");
  648         if (p) {
  649                 strncpy(kernelname, p, sizeof(kernelname) - 1);
  650                 freeenv(p);
  651         }
  652 
  653         kernstartpfn = atop(IA64_RR_MASK(kernstart));
  654         kernendpfn = atop(IA64_RR_MASK(kernend));
  655 
  656         /*
  657          * Size the memory regions and load phys_avail[] with the results.
  658          */
  659 
  660         /*
  661          * Find out how much memory is available, by looking at
  662          * the memory descriptors.
  663          */
  664 
  665 #ifdef DEBUG_MD
  666         printf("Memory descriptor count: %d\n", mdcount);
  667 #endif
  668 
  669         phys_avail_cnt = 0;
  670         for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
  671 #ifdef DEBUG_MD
  672                 printf("MD %p: type %d pa 0x%lx cnt 0x%lx\n", md,
  673                     md->md_type, md->md_phys, md->md_pages);
  674 #endif
  675 
  676                 pfn0 = ia64_btop(round_page(md->md_phys));
  677                 pfn1 = ia64_btop(trunc_page(md->md_phys + md->md_pages * 4096));
  678                 if (pfn1 <= pfn0)
  679                         continue;
  680 
  681                 if (md->md_type != EFI_MD_TYPE_FREE)
  682                         continue;
  683 
  684                 /*
  685                  * Wimp out for now since we do not DTRT here with
  686                  * pci bus mastering (no bounce buffering, for example).
  687                  */
  688                 if (pfn0 >= ia64_btop(0x100000000UL)) {
  689                         printf("Skipping memory chunk start 0x%lx\n",
  690                             md->md_phys);
  691                         continue;
  692                 }
  693                 if (pfn1 >= ia64_btop(0x100000000UL)) {
  694                         printf("Skipping memory chunk end 0x%lx\n",
  695                             md->md_phys + md->md_pages * 4096);
  696                         continue;
  697                 }
  698 
  699                 /*
  700                  * We have a memory descriptor that describes conventional
  701                  * memory that is for general use. We must determine if the
  702                  * loader has put the kernel in this region.
  703                  */
  704                 physmem += (pfn1 - pfn0);
  705                 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
  706                         /*
  707                          * Must compute the location of the kernel
  708                          * within the segment.
  709                          */
  710 #ifdef DEBUG_MD
  711                         printf("Descriptor %p contains kernel\n", mp);
  712 #endif
  713                         if (pfn0 < kernstartpfn) {
  714                                 /*
  715                                  * There is a chunk before the kernel.
  716                                  */
  717 #ifdef DEBUG_MD
  718                                 printf("Loading chunk before kernel: "
  719                                        "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
  720 #endif
  721                                 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
  722                                 phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
  723                                 phys_avail_cnt += 2;
  724                         }
  725                         if (kernendpfn < pfn1) {
  726                                 /*
  727                                  * There is a chunk after the kernel.
  728                                  */
  729 #ifdef DEBUG_MD
  730                                 printf("Loading chunk after kernel: "
  731                                        "0x%lx / 0x%lx\n", kernendpfn, pfn1);
  732 #endif
  733                                 phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
  734                                 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
  735                                 phys_avail_cnt += 2;
  736                         }
  737                 } else {
  738                         /*
  739                          * Just load this cluster as one chunk.
  740                          */
  741 #ifdef DEBUG_MD
  742                         printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
  743                                pfn0, pfn1);
  744 #endif
  745                         phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
  746                         phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
  747                         phys_avail_cnt += 2;
  748                         
  749                 }
  750         }
  751         phys_avail[phys_avail_cnt] = 0;
  752 
  753         Maxmem = physmem;
  754         init_param2(physmem);
  755 
  756         /*
  757          * Initialize error message buffer (at end of core).
  758          */
  759         msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
  760         msgbufinit(msgbufp, MSGBUF_SIZE);
  761 
  762         proc_linkup(&proc0, &ksegrp0, &thread0);
  763         /*
  764          * Init mapping for kernel stack for proc 0
  765          */
  766         proc0kstack = (vm_offset_t)kstack;
  767         thread0.td_kstack = proc0kstack;
  768         thread0.td_kstack_pages = KSTACK_PAGES;
  769 
  770         /*
  771          * Setup the global data for the bootstrap cpu.
  772          */
  773         pcpup = (struct pcpu *)pmap_steal_memory(PAGE_SIZE);
  774         ia64_set_k4((u_int64_t)pcpup);
  775         pcpu_init(pcpup, 0, PAGE_SIZE);
  776         PCPU_SET(curthread, &thread0);
  777 
  778         mutex_init();
  779 
  780         /*
  781          * Initialize the rest of proc 0's PCB.
  782          *
  783          * Set the kernel sp, reserving space for an (empty) trapframe,
  784          * and make proc0's trapframe pointer point to it for sanity.
  785          * Initialise proc0's backing store to start after u area.
  786          */
  787         cpu_thread_setup(&thread0);
  788         thread0.td_frame->tf_flags = FRAME_SYSCALL;
  789         thread0.td_pcb->pcb_special.sp =
  790             (u_int64_t)thread0.td_frame - 16;
  791         thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack;
  792 
  793         /*
  794          * Initialize the virtual memory system.
  795          */
  796         pmap_bootstrap();
  797 
  798         /*
  799          * Initialize debuggers, and break into them if appropriate.
  800          */
  801         kdb_init();
  802 
  803 #ifdef KDB
  804         if (boothowto & RB_KDB)
  805                 kdb_enter("Boot flags requested debugger\n");
  806 #endif
  807 
  808         ia64_set_tpr(0);
  809 
  810         /*
  811          * Save our current context so that we have a known (maybe even
  812          * sane) context as the initial context for new threads that are
  813          * forked from us. If any of those threads (including thread0)
  814          * does something wrong, we may be lucky and return here where
  815          * we're ready for them with a nice panic.
  816          */
  817         if (!savectx(thread0.td_pcb))
  818                 mi_startup();
  819 
  820         /* We should not get here. */
  821         panic("ia64_init: Whooaa there!");
  822         /* NOTREACHED */
  823 }
  824 
  825 uint64_t
  826 ia64_get_hcdp(void)
  827 {
  828 
  829         return (bootinfo.bi_hcdp);
  830 }
  831 
  832 void
  833 bzero(void *buf, size_t len)
  834 {
  835         caddr_t p = buf;
  836 
  837         while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
  838                 *p++ = 0;
  839                 len--;
  840         }
  841         while (len >= sizeof(u_long) * 8) {
  842                 *(u_long*) p = 0;
  843                 *((u_long*) p + 1) = 0;
  844                 *((u_long*) p + 2) = 0;
  845                 *((u_long*) p + 3) = 0;
  846                 len -= sizeof(u_long) * 8;
  847                 *((u_long*) p + 4) = 0;
  848                 *((u_long*) p + 5) = 0;
  849                 *((u_long*) p + 6) = 0;
  850                 *((u_long*) p + 7) = 0;
  851                 p += sizeof(u_long) * 8;
  852         }
  853         while (len >= sizeof(u_long)) {
  854                 *(u_long*) p = 0;
  855                 len -= sizeof(u_long);
  856                 p += sizeof(u_long);
  857         }
  858         while (len) {
  859                 *p++ = 0;
  860                 len--;
  861         }
  862 }
  863 
  864 void
  865 DELAY(int n)
  866 {
  867         u_int64_t start, end, now;
  868 
  869         start = ia64_get_itc();
  870         end = start + (itc_frequency * n) / 1000000;
  871         /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
  872         do {
  873                 now = ia64_get_itc();
  874         } while (now < end || (now > start && end < start));
  875 }
  876 
  877 /*
  878  * Send an interrupt (signal) to a process.
  879  */
  880 void
  881 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
  882 {
  883         struct proc *p;
  884         struct thread *td;
  885         struct trapframe *tf;
  886         struct sigacts *psp;
  887         struct sigframe sf, *sfp;
  888         u_int64_t sbs, sp;
  889         int oonstack;
  890 
  891         td = curthread;
  892         p = td->td_proc;
  893         PROC_LOCK_ASSERT(p, MA_OWNED);
  894         psp = p->p_sigacts;
  895         mtx_assert(&psp->ps_mtx, MA_OWNED);
  896         tf = td->td_frame;
  897         sp = tf->tf_special.sp;
  898         oonstack = sigonstack(sp);
  899         sbs = 0;
  900 
  901         /* save user context */
  902         bzero(&sf, sizeof(struct sigframe));
  903         sf.sf_uc.uc_sigmask = *mask;
  904         sf.sf_uc.uc_stack = td->td_sigstk;
  905         sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
  906             ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
  907 
  908         /*
  909          * Allocate and validate space for the signal handler
  910          * context. Note that if the stack is in P0 space, the
  911          * call to grow() is a nop, and the useracc() check
  912          * will fail if the process has not already allocated
  913          * the space with a `brk'.
  914          */
  915         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
  916             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  917                 sbs = (u_int64_t)td->td_sigstk.ss_sp;
  918                 sbs = (sbs + 15) & ~15;
  919                 sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
  920 #if defined(COMPAT_43)
  921                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  922 #endif
  923         } else
  924                 sfp = (struct sigframe *)sp;
  925         sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
  926 
  927         /* Fill in the siginfo structure for POSIX handlers. */
  928         if (SIGISMEMBER(psp->ps_siginfo, sig)) {
  929                 sf.sf_si.si_signo = sig;
  930                 sf.sf_si.si_code = code;
  931                 sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
  932                 code = (u_int64_t)&sfp->sf_si;
  933         }
  934 
  935         mtx_unlock(&psp->ps_mtx);
  936         PROC_UNLOCK(p);
  937 
  938         get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
  939 
  940         /* Copy the frame out to userland. */
  941         if (copyout(&sf, sfp, sizeof(sf)) != 0) {
  942                 /*
  943                  * Process has trashed its stack; give it an illegal
  944                  * instruction to halt it in its tracks.
  945                  */
  946                 PROC_LOCK(p);
  947                 sigexit(td, SIGILL);
  948                 return;
  949         }
  950 
  951         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
  952                 tf->tf_special.psr &= ~IA64_PSR_RI;
  953                 tf->tf_special.iip = ia64_get_k5() +
  954                     ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
  955         } else
  956                 tf->tf_special.iip = ia64_get_k5() +
  957                     ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
  958 
  959         /*
  960          * Setup the trapframe to return to the signal trampoline. We pass
  961          * information to the trampoline in the following registers:
  962          *
  963          *      gp      new backing store or NULL
  964          *      r8      signal number
  965          *      r9      signal code or siginfo pointer
  966          *      r10     signal handler (function descriptor)
  967          */
  968         tf->tf_special.sp = (u_int64_t)sfp - 16;
  969         tf->tf_special.gp = sbs;
  970         tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
  971         tf->tf_special.ndirty = 0;
  972         tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
  973         tf->tf_scratch.gr8 = sig;
  974         tf->tf_scratch.gr9 = code;
  975         tf->tf_scratch.gr10 = (u_int64_t)catcher;
  976 
  977         PROC_LOCK(p);
  978         mtx_lock(&psp->ps_mtx);
  979 }
  980 
  981 /*
  982  * Build siginfo_t for SA thread
  983  */
  984 void
  985 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
  986 {
  987         struct proc *p;
  988         struct thread *td;
  989 
  990         td = curthread;
  991         p = td->td_proc;
  992         PROC_LOCK_ASSERT(p, MA_OWNED);
  993 
  994         bzero(si, sizeof(*si));
  995         si->si_signo = sig;
  996         si->si_code = code;
  997         /* XXXKSE fill other fields */
  998 }
  999 
 1000 /*
 1001  * System call to cleanup state after a signal
 1002  * has been taken.  Reset signal mask and
 1003  * stack state from context left by sendsig (above).
 1004  * Return to previous pc and psl as specified by
 1005  * context left by sendsig. Check carefully to
 1006  * make sure that the user has not modified the
 1007  * state to gain improper privileges.
 1008  *
 1009  * MPSAFE
 1010  */
 1011 int
 1012 sigreturn(struct thread *td,
 1013         struct sigreturn_args /* {
 1014                 ucontext_t *sigcntxp;
 1015         } */ *uap)
 1016 {
 1017         ucontext_t uc;
 1018         struct trapframe *tf;
 1019         struct proc *p;
 1020         struct pcb *pcb;
 1021 
 1022         tf = td->td_frame;
 1023         p = td->td_proc;
 1024         pcb = td->td_pcb;
 1025 
 1026         /*
 1027          * Fetch the entire context structure at once for speed.
 1028          * We don't use a normal argument to simplify RSE handling.
 1029          */
 1030         if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
 1031                 return (EFAULT);
 1032 
 1033         set_mcontext(td, &uc.uc_mcontext);
 1034 
 1035         PROC_LOCK(p);
 1036 #if defined(COMPAT_43)
 1037         if (sigonstack(tf->tf_special.sp))
 1038                 td->td_sigstk.ss_flags |= SS_ONSTACK;
 1039         else
 1040                 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
 1041 #endif
 1042         td->td_sigmask = uc.uc_sigmask;
 1043         SIG_CANTMASK(td->td_sigmask);
 1044         signotify(td);
 1045         PROC_UNLOCK(p);
 1046 
 1047         return (EJUSTRETURN);
 1048 }
 1049 
 1050 #ifdef COMPAT_FREEBSD4
 1051 int
 1052 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
 1053 {
 1054 
 1055         return sigreturn(td, (struct sigreturn_args *)uap);
 1056 }
 1057 #endif
 1058 
 1059 /*
 1060  * Construct a PCB from a trapframe. This is called from kdb_trap() where
 1061  * we want to start a backtrace from the function that caused us to enter
 1062  * the debugger. We have the context in the trapframe, but base the trace
 1063  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
 1064  * enough for a backtrace.
 1065  */
 1066 void
 1067 makectx(struct trapframe *tf, struct pcb *pcb)
 1068 {
 1069 
 1070         pcb->pcb_special = tf->tf_special;
 1071         pcb->pcb_special.__spare = ~0UL;        /* XXX see unwind.c */
 1072         save_callee_saved(&pcb->pcb_preserved);
 1073         save_callee_saved_fp(&pcb->pcb_preserved_fp);
 1074 }
 1075 
 1076 int
 1077 ia64_flush_dirty(struct thread *td, struct _special *r)
 1078 {
 1079         struct iovec iov;
 1080         struct uio uio;
 1081         uint64_t bspst, kstk, rnat;
 1082         int error;
 1083 
 1084         if (r->ndirty == 0)
 1085                 return (0);
 1086 
 1087         kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
 1088         if (td == curthread) {
 1089                 __asm __volatile("mov   ar.rsc=0;;");
 1090                 __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
 1091                 /* Make sure we have all the user registers written out. */
 1092                 if (bspst - kstk < r->ndirty) {
 1093                         __asm __volatile("flushrs;;");
 1094                         __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
 1095                 }
 1096                 __asm __volatile("mov   %0=ar.rnat;;" : "=r"(rnat));
 1097                 __asm __volatile("mov   ar.rsc=3");
 1098                 error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
 1099                 kstk += r->ndirty;
 1100                 r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
 1101                     ? *(uint64_t*)(kstk | 0x1f8L) : rnat;
 1102         } else {
 1103                 iov.iov_base = (void*)(uintptr_t)kstk;
 1104                 iov.iov_len = r->ndirty;
 1105                 uio.uio_iov = &iov;
 1106                 uio.uio_iovcnt = 1;
 1107                 uio.uio_offset = r->bspstore;
 1108                 uio.uio_resid = r->ndirty;
 1109                 uio.uio_segflg = UIO_SYSSPACE;
 1110                 uio.uio_rw = UIO_WRITE;
 1111                 uio.uio_td = td;
 1112                 error = proc_rwmem(td->td_proc, &uio);
 1113                 /*
 1114                  * XXX proc_rwmem() doesn't currently return ENOSPC,
 1115                  * so I think it can bogusly return 0. Neither do
 1116                  * we allow short writes.
 1117                  */
 1118                 if (uio.uio_resid != 0 && error == 0)
 1119                         error = ENOSPC;
 1120         }
 1121 
 1122         r->bspstore += r->ndirty;
 1123         r->ndirty = 0;
 1124         return (error);
 1125 }
 1126 
 1127 int
 1128 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
 1129 {
 1130         struct trapframe *tf;
 1131         int error;
 1132 
 1133         tf = td->td_frame;
 1134         bzero(mc, sizeof(*mc));
 1135         mc->mc_special = tf->tf_special;
 1136         error = ia64_flush_dirty(td, &mc->mc_special);
 1137         if (tf->tf_flags & FRAME_SYSCALL) {
 1138                 mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
 1139                 mc->mc_scratch = tf->tf_scratch;
 1140                 if (flags & GET_MC_CLEAR_RET) {
 1141                         mc->mc_scratch.gr8 = 0;
 1142                         mc->mc_scratch.gr9 = 0;
 1143                         mc->mc_scratch.gr10 = 0;
 1144                         mc->mc_scratch.gr11 = 0;
 1145                 }
 1146         } else {
 1147                 mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
 1148                 mc->mc_scratch = tf->tf_scratch;
 1149                 mc->mc_scratch_fp = tf->tf_scratch_fp;
 1150                 /*
 1151                  * XXX If the thread never used the high FP registers, we
 1152                  * probably shouldn't waste time saving them.
 1153                  */
 1154                 ia64_highfp_save(td);
 1155                 mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
 1156                 mc->mc_high_fp = td->td_pcb->pcb_high_fp;
 1157         }
 1158         save_callee_saved(&mc->mc_preserved);
 1159         save_callee_saved_fp(&mc->mc_preserved_fp);
 1160         return (error);
 1161 }
 1162 
 1163 int
 1164 set_mcontext(struct thread *td, const mcontext_t *mc)
 1165 {
 1166         struct _special s;
 1167         struct trapframe *tf;
 1168         uint64_t psrmask;
 1169 
 1170         tf = td->td_frame;
 1171 
 1172         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
 1173             ("Whoa there! We have more than 8KB of dirty registers!"));
 1174 
 1175         s = mc->mc_special;
 1176         /*
 1177          * Only copy the user mask and the restart instruction bit from
 1178          * the new context.
 1179          */
 1180         psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
 1181             IA64_PSR_MFH | IA64_PSR_RI;
 1182         s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
 1183         /* We don't have any dirty registers of the new context. */
 1184         s.ndirty = 0;
 1185         if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
 1186                 /*
 1187                  * We can get an async context passed to us while we
 1188                  * entered the kernel through a syscall: sigreturn(2)
 1189                  * and kse_switchin(2) both take contexts that could
 1190                  * previously be the result of a trap or interrupt.
 1191                  * Hence, we cannot assert that the trapframe is not
 1192                  * a syscall frame, but we can assert that it's at
 1193                  * least an expected syscall.
 1194                  */
 1195                 if (tf->tf_flags & FRAME_SYSCALL) {
 1196                         KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn ||
 1197                             tf->tf_scratch.gr15 == SYS_kse_switchin, ("foo"));
 1198                         tf->tf_flags &= ~FRAME_SYSCALL;
 1199                 }
 1200                 tf->tf_scratch = mc->mc_scratch;
 1201                 tf->tf_scratch_fp = mc->mc_scratch_fp;
 1202                 if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
 1203                         td->td_pcb->pcb_high_fp = mc->mc_high_fp;
 1204         } else {
 1205                 KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
 1206                 if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
 1207                         s.cfm = tf->tf_special.cfm;
 1208                         s.iip = tf->tf_special.iip;
 1209                         tf->tf_scratch.gr15 = 0;        /* Clear syscall nr. */
 1210                 } else
 1211                         tf->tf_scratch = mc->mc_scratch;
 1212         }
 1213         tf->tf_special = s;
 1214         restore_callee_saved(&mc->mc_preserved);
 1215         restore_callee_saved_fp(&mc->mc_preserved_fp);
 1216 
 1217         if (mc->mc_flags & _MC_FLAGS_KSE_SET_MBOX)
 1218                 suword((caddr_t)mc->mc_special.ifa, mc->mc_special.isr);
 1219 
 1220         return (0);
 1221 }
 1222 
 1223 /*
 1224  * Clear registers on exec.
 1225  */
 1226 void
 1227 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
 1228 {
 1229         struct trapframe *tf;
 1230         uint64_t *ksttop, *kst;
 1231 
 1232         tf = td->td_frame;
 1233         ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
 1234             (tf->tf_special.bspstore & 0x1ffUL));
 1235 
 1236         /*
 1237          * We can ignore up to 8KB of dirty registers by masking off the
 1238          * lower 13 bits in exception_restore() or epc_syscall(). This
 1239          * should be enough for a couple of years, but if there are more
 1240          * than 8KB of dirty registers, we lose track of the bottom of
 1241          * the kernel stack. The solution is to copy the active part of
 1242          * the kernel stack down 1 page (or 2, but not more than that)
 1243          * so that we always have less than 8KB of dirty registers.
 1244          */
 1245         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
 1246             ("Whoa there! We have more than 8KB of dirty registers!"));
 1247 
 1248         bzero(&tf->tf_special, sizeof(tf->tf_special));
 1249         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {      /* break syscalls. */
 1250                 bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
 1251                 bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
 1252                 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
 1253                 tf->tf_special.bspstore = IA64_BACKINGSTORE;
 1254                 /*
 1255                  * Copy the arguments onto the kernel register stack so that
 1256                  * they get loaded by the loadrs instruction. Skip over the
 1257                  * NaT collection points.
 1258                  */
 1259                 kst = ksttop - 1;
 1260                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
 1261                         *kst-- = 0;
 1262                 *kst-- = 0;
 1263                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
 1264                         *kst-- = 0;
 1265                 *kst-- = ps_strings;
 1266                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
 1267                         *kst-- = 0;
 1268                 *kst = stack;
 1269                 tf->tf_special.ndirty = (ksttop - kst) << 3;
 1270         } else {                                /* epc syscalls (default). */
 1271                 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
 1272                 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
 1273                 /*
 1274                  * Write values for out0, out1 and out2 to the user's backing
 1275                  * store and arrange for them to be restored into the user's
 1276                  * initial register frame.
 1277                  * Assumes that (bspstore & 0x1f8) < 0x1e0.
 1278                  */
 1279                 suword((caddr_t)tf->tf_special.bspstore - 24, stack);
 1280                 suword((caddr_t)tf->tf_special.bspstore - 16, ps_strings);
 1281                 suword((caddr_t)tf->tf_special.bspstore -  8, 0);
 1282         }
 1283 
 1284         tf->tf_special.iip = entry;
 1285         tf->tf_special.sp = (stack & ~15) - 16;
 1286         tf->tf_special.rsc = 0xf;
 1287         tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
 1288         tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
 1289             IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
 1290             IA64_PSR_CPL_USER;
 1291 }
 1292 
 1293 int
 1294 ptrace_set_pc(struct thread *td, unsigned long addr)
 1295 {
 1296         uint64_t slot;
 1297 
 1298         switch (addr & 0xFUL) {
 1299         case 0:
 1300                 slot = IA64_PSR_RI_0;
 1301                 break;
 1302         case 1:
 1303                 /* XXX we need to deal with MLX bundles here */
 1304                 slot = IA64_PSR_RI_1;
 1305                 break;
 1306         case 2:
 1307                 slot = IA64_PSR_RI_2;
 1308                 break;
 1309         default:
 1310                 return (EINVAL);
 1311         }
 1312 
 1313         td->td_frame->tf_special.iip = addr & ~0x0FULL;
 1314         td->td_frame->tf_special.psr =
 1315             (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
 1316         return (0);
 1317 }
 1318 
 1319 int
 1320 ptrace_single_step(struct thread *td)
 1321 {
 1322         struct trapframe *tf;
 1323 
 1324         /*
 1325          * There's no way to set single stepping when we're leaving the
 1326          * kernel through the EPC syscall path. The way we solve this is
 1327          * by enabling the lower-privilege trap so that we re-enter the
 1328          * kernel as soon as the privilege level changes. See trap.c for
 1329          * how we proceed from there.
 1330          */
 1331         tf = td->td_frame;
 1332         if (tf->tf_flags & FRAME_SYSCALL)
 1333                 tf->tf_special.psr |= IA64_PSR_LP;
 1334         else
 1335                 tf->tf_special.psr |= IA64_PSR_SS;
 1336         return (0);
 1337 }
 1338 
 1339 int
 1340 ptrace_clear_single_step(struct thread *td)
 1341 {
 1342         struct trapframe *tf;
 1343 
 1344         /*
 1345          * Clear any and all status bits we may use to implement single
 1346          * stepping.
 1347          */
 1348         tf = td->td_frame;
 1349         tf->tf_special.psr &= ~IA64_PSR_SS;
 1350         tf->tf_special.psr &= ~IA64_PSR_LP;
 1351         tf->tf_special.psr &= ~IA64_PSR_TB;
 1352         return (0);
 1353 }
 1354 
 1355 int
 1356 fill_regs(struct thread *td, struct reg *regs)
 1357 {
 1358         struct trapframe *tf;
 1359 
 1360         tf = td->td_frame;
 1361         regs->r_special = tf->tf_special;
 1362         regs->r_scratch = tf->tf_scratch;
 1363         save_callee_saved(&regs->r_preserved);
 1364         return (0);
 1365 }
 1366 
 1367 int
 1368 set_regs(struct thread *td, struct reg *regs)
 1369 {
 1370         struct trapframe *tf;
 1371         int error;
 1372 
 1373         tf = td->td_frame;
 1374         error = ia64_flush_dirty(td, &tf->tf_special);
 1375         if (!error) {
 1376                 tf->tf_special = regs->r_special;
 1377                 tf->tf_special.bspstore += tf->tf_special.ndirty;
 1378                 tf->tf_special.ndirty = 0;
 1379                 tf->tf_scratch = regs->r_scratch;
 1380                 restore_callee_saved(&regs->r_preserved);
 1381         }
 1382         return (error);
 1383 }
 1384 
 1385 int
 1386 fill_dbregs(struct thread *td, struct dbreg *dbregs)
 1387 {
 1388 
 1389         return (ENOSYS);
 1390 }
 1391 
 1392 int
 1393 set_dbregs(struct thread *td, struct dbreg *dbregs)
 1394 {
 1395 
 1396         return (ENOSYS);
 1397 }
 1398 
 1399 int
 1400 fill_fpregs(struct thread *td, struct fpreg *fpregs)
 1401 {
 1402         struct trapframe *frame = td->td_frame;
 1403         struct pcb *pcb = td->td_pcb;
 1404 
 1405         /* Save the high FP registers. */
 1406         ia64_highfp_save(td);
 1407 
 1408         fpregs->fpr_scratch = frame->tf_scratch_fp;
 1409         save_callee_saved_fp(&fpregs->fpr_preserved);
 1410         fpregs->fpr_high = pcb->pcb_high_fp;
 1411         return (0);
 1412 }
 1413 
 1414 int
 1415 set_fpregs(struct thread *td, struct fpreg *fpregs)
 1416 {
 1417         struct trapframe *frame = td->td_frame;
 1418         struct pcb *pcb = td->td_pcb;
 1419 
 1420         /* Throw away the high FP registers (should be redundant). */
 1421         ia64_highfp_drop(td);
 1422 
 1423         frame->tf_scratch_fp = fpregs->fpr_scratch;
 1424         restore_callee_saved_fp(&fpregs->fpr_preserved);
 1425         pcb->pcb_high_fp = fpregs->fpr_high;
 1426         return (0);
 1427 }
 1428 
 1429 /*
 1430  * High FP register functions.
 1431  */
 1432 
 1433 int
 1434 ia64_highfp_drop(struct thread *td)
 1435 {
 1436         struct pcb *pcb;
 1437         struct pcpu *cpu;
 1438         struct thread *thr;
 1439 
 1440         mtx_lock_spin(&td->td_md.md_highfp_mtx);
 1441         pcb = td->td_pcb;
 1442         cpu = pcb->pcb_fpcpu;
 1443         if (cpu == NULL) {
 1444                 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1445                 return (0);
 1446         }
 1447         pcb->pcb_fpcpu = NULL;
 1448         thr = cpu->pc_fpcurthread;
 1449         cpu->pc_fpcurthread = NULL;
 1450         mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1451 
 1452         /* Post-mortem sanity checking. */
 1453         KASSERT(thr == td, ("Inconsistent high FP state"));
 1454         return (1);
 1455 }
 1456 
 1457 int
 1458 ia64_highfp_save(struct thread *td)
 1459 {
 1460         struct pcb *pcb;
 1461         struct pcpu *cpu;
 1462         struct thread *thr;
 1463 
 1464         /* Don't save if the high FP registers weren't modified. */
 1465         if ((td->td_frame->tf_special.psr & IA64_PSR_MFH) == 0)
 1466                 return (ia64_highfp_drop(td));
 1467 
 1468         mtx_lock_spin(&td->td_md.md_highfp_mtx);
 1469         pcb = td->td_pcb;
 1470         cpu = pcb->pcb_fpcpu;
 1471         if (cpu == NULL) {
 1472                 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1473                 return (0);
 1474         }
 1475 #ifdef SMP
 1476         if (td == curthread)
 1477                 sched_pin();
 1478         if (cpu != pcpup) {
 1479                 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1480                 ipi_send(cpu, IPI_HIGH_FP);
 1481                 if (td == curthread)
 1482                         sched_unpin();
 1483                 while (pcb->pcb_fpcpu == cpu)
 1484                         DELAY(100);
 1485                 return (1);
 1486         } else {
 1487                 save_high_fp(&pcb->pcb_high_fp);
 1488                 if (td == curthread)
 1489                         sched_unpin();
 1490         }
 1491 #else
 1492         save_high_fp(&pcb->pcb_high_fp);
 1493 #endif
 1494         pcb->pcb_fpcpu = NULL;
 1495         thr = cpu->pc_fpcurthread;
 1496         cpu->pc_fpcurthread = NULL;
 1497         mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1498 
 1499         /* Post-mortem sanity cxhecking. */
 1500         KASSERT(thr == td, ("Inconsistent high FP state"));
 1501         return (1);
 1502 }
 1503 
 1504 int
 1505 sysbeep(int pitch, int period)
 1506 {
 1507         return (ENODEV);
 1508 }

Cache object: 0dc0f8b0b7329c7ae3aec7ce6dd1daef


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.