The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ia64/ia64/machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003,2004 Marcel Moolenaar
    3  * Copyright (c) 2000,2001 Doug Rabson
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/6.4/sys/ia64/ia64/machdep.c 161844 2006-09-01 22:15:57Z marcel $");
   30 
   31 #include "opt_compat.h"
   32 #include "opt_ddb.h"
   33 #include "opt_kstack_pages.h"
   34 #include "opt_msgbuf.h"
   35 
   36 #include <sys/param.h>
   37 #include <sys/proc.h>
   38 #include <sys/systm.h>
   39 #include <sys/bio.h>
   40 #include <sys/buf.h>
   41 #include <sys/bus.h>
   42 #include <sys/cons.h>
   43 #include <sys/cpu.h>
   44 #include <sys/eventhandler.h>
   45 #include <sys/exec.h>
   46 #include <sys/imgact.h>
   47 #include <sys/kdb.h>
   48 #include <sys/kernel.h>
   49 #include <sys/linker.h>
   50 #include <sys/lock.h>
   51 #include <sys/malloc.h>
   52 #include <sys/mbuf.h>
   53 #include <sys/msgbuf.h>
   54 #include <sys/pcpu.h>
   55 #include <sys/ptrace.h>
   56 #include <sys/random.h>
   57 #include <sys/reboot.h>
   58 #include <sys/sched.h>
   59 #include <sys/signalvar.h>
   60 #include <sys/syscall.h>
   61 #include <sys/sysctl.h>
   62 #include <sys/sysproto.h>
   63 #include <sys/ucontext.h>
   64 #include <sys/uio.h>
   65 #include <sys/uuid.h>
   66 #include <sys/vmmeter.h>
   67 #include <sys/vnode.h>
   68 
   69 #include <ddb/ddb.h>
   70 
   71 #include <net/netisr.h>
   72 
   73 #include <vm/vm.h>
   74 #include <vm/vm_extern.h>
   75 #include <vm/vm_kern.h>
   76 #include <vm/vm_page.h>
   77 #include <vm/vm_map.h>
   78 #include <vm/vm_object.h>
   79 #include <vm/vm_pager.h>
   80 
   81 #include <machine/bootinfo.h>
   82 #include <machine/clock.h>
   83 #include <machine/cpu.h>
   84 #include <machine/efi.h>
   85 #include <machine/elf.h>
   86 #include <machine/fpu.h>
   87 #include <machine/mca.h>
   88 #include <machine/md_var.h>
   89 #include <machine/mutex.h>
   90 #include <machine/pal.h>
   91 #include <machine/pcb.h>
   92 #include <machine/reg.h>
   93 #include <machine/sal.h>
   94 #include <machine/sigframe.h>
   95 #ifdef SMP
   96 #include <machine/smp.h>
   97 #endif
   98 #include <machine/unwind.h>
   99 #include <machine/vmparam.h>
  100 
  101 #include <i386/include/specialreg.h>
  102 
  103 u_int64_t processor_frequency;
  104 u_int64_t bus_frequency;
  105 u_int64_t itc_frequency;
  106 int cold = 1;
  107 
  108 u_int64_t pa_bootinfo;
  109 struct bootinfo bootinfo;
  110 
  111 struct pcpu early_pcpu;
  112 extern char kstack[]; 
  113 vm_offset_t proc0kstack;
  114 
  115 extern u_int64_t kernel_text[], _end[];
  116 
  117 extern u_int64_t ia64_gateway_page[];
  118 extern u_int64_t break_sigtramp[];
  119 extern u_int64_t epc_sigtramp[];
  120 
  121 struct fpswa_iface *fpswa_iface;
  122 
  123 u_int64_t ia64_pal_base;
  124 u_int64_t ia64_port_base;
  125 
  126 char machine[] = MACHINE;
  127 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
  128 
  129 static char cpu_model[64];
  130 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
  131     "The CPU model name");
  132 
  133 static char cpu_family[64];
  134 SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
  135     "The CPU family name");
  136 
  137 #ifdef DDB
  138 extern vm_offset_t ksym_start, ksym_end;
  139 #endif
  140 
  141 static void cpu_startup(void *);
  142 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
  143 
  144 struct msgbuf *msgbufp=0;
  145 
  146 long Maxmem = 0;
  147 long realmem = 0;
  148 
  149 vm_offset_t phys_avail[100];
  150 
  151 /* must be 2 less so 0 0 can signal end of chunks */
  152 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
  153 
  154 void mi_startup(void);          /* XXX should be in a MI header */
  155 
  156 struct kva_md_info kmi;
  157 
  158 #define Mhz     1000000L
  159 #define Ghz     (1000L*Mhz)
  160 
  161 static void
  162 identifycpu(void)
  163 {
  164         char vendor[17];
  165         char *family_name, *model_name;
  166         u_int64_t features, tmp;
  167         int number, revision, model, family, archrev;
  168 
  169         /*
  170          * Assumes little-endian.
  171          */
  172         *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
  173         *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
  174         vendor[16] = '\0';
  175 
  176         tmp = ia64_get_cpuid(3);
  177         number = (tmp >> 0) & 0xff;
  178         revision = (tmp >> 8) & 0xff;
  179         model = (tmp >> 16) & 0xff;
  180         family = (tmp >> 24) & 0xff;
  181         archrev = (tmp >> 32) & 0xff;
  182 
  183         family_name = model_name = "unknown";
  184         switch (family) {
  185         case 0x07:
  186                 family_name = "Itanium";
  187                 model_name = "Merced";
  188                 break;
  189         case 0x1f:
  190                 family_name = "Itanium 2";
  191                 switch (model) {
  192                 case 0x00:
  193                         model_name = "McKinley";
  194                         break;
  195                 case 0x01:
  196                         /*
  197                          * Deerfield is a low-voltage variant based on the
  198                          * Madison core. We need circumstantial evidence
  199                          * (i.e. the clock frequency) to identify those.
  200                          * Allow for roughly 1% error margin.
  201                          */
  202                         tmp = processor_frequency >> 7;
  203                         if ((processor_frequency - tmp) < 1*Ghz &&
  204                             (processor_frequency + tmp) >= 1*Ghz)
  205                                 model_name = "Deerfield";
  206                         else
  207                                 model_name = "Madison";
  208                         break;
  209                 case 0x02:
  210                         model_name = "Madison II";
  211                         break;
  212                 }
  213                 break;
  214         case 0x20:
  215                 family_name = "Itanium 2";
  216                 switch (model) {
  217                 case 0x00:
  218                         model_name = "Montecito";
  219                         break;
  220                 }
  221                 break;
  222         }
  223         snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
  224         snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
  225 
  226         features = ia64_get_cpuid(4);
  227 
  228         printf("CPU: %s (", model_name);
  229         if (processor_frequency) {
  230                 printf("%ld.%02ld-Mhz ",
  231                     (processor_frequency + 4999) / Mhz,
  232                     ((processor_frequency + 4999) / (Mhz/100)) % 100);
  233         }
  234         printf("%s)\n", family_name);
  235         printf("  Origin = \"%s\"  Revision = %d\n", vendor, revision);
  236         printf("  Features = 0x%b\n", (u_int32_t) features,
  237             "\020"
  238             "\001LB"    /* long branch (brl) instruction. */
  239             "\002SD"    /* Spontaneous deferral. */
  240             "\003AO"    /* 16-byte atomic operations (ld, st, cmpxchg). */ );
  241 }
  242 
  243 static void
  244 cpu_startup(dummy)
  245         void *dummy;
  246 {
  247 
  248         /*
  249          * Good {morning,afternoon,evening,night}.
  250          */
  251         identifycpu();
  252 
  253         /* startrtclock(); */
  254 #ifdef PERFMON
  255         perfmon_init();
  256 #endif
  257         printf("real memory  = %ld (%ld MB)\n", ia64_ptob(Maxmem),
  258             ia64_ptob(Maxmem) / 1048576);
  259         realmem = Maxmem;
  260 
  261         /*
  262          * Display any holes after the first chunk of extended memory.
  263          */
  264         if (bootverbose) {
  265                 int indx;
  266 
  267                 printf("Physical memory chunk(s):\n");
  268                 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
  269                         int size1 = phys_avail[indx + 1] - phys_avail[indx];
  270 
  271                         printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx],
  272                             phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE);
  273                 }
  274         }
  275 
  276         vm_ksubmap_init(&kmi);
  277 
  278         printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
  279             ptoa(cnt.v_free_count) / 1048576);
  280  
  281         if (fpswa_iface == NULL)
  282                 printf("Warning: no FPSWA package supplied\n");
  283         else
  284                 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
  285                     (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
  286 
  287         /*
  288          * Set up buffers, so they can be used to read disk labels.
  289          */
  290         bufinit();
  291         vm_pager_bufferinit();
  292 
  293         /*
  294          * Traverse the MADT to discover IOSAPIC and Local SAPIC
  295          * information.
  296          */
  297         ia64_probe_sapics();
  298         ia64_mca_init();
  299 }
  300 
  301 void
  302 cpu_boot(int howto)
  303 {
  304 
  305         efi_reset_system();
  306 }
  307 
  308 /* Get current clock frequency for the given cpu id. */
  309 int
  310 cpu_est_clockrate(int cpu_id, uint64_t *rate)
  311 {
  312 
  313         if (pcpu_find(cpu_id) == NULL || rate == NULL)
  314                 return (EINVAL);
  315         *rate = processor_frequency;
  316         return (0);
  317 }
  318 
  319 void
  320 cpu_halt()
  321 {
  322 
  323         efi_reset_system();
  324 }
  325 
  326 static void
  327 cpu_idle_default(void)
  328 {
  329         struct ia64_pal_result res;
  330 
  331         res = ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
  332 }
  333 
  334 void
  335 cpu_idle()
  336 {
  337         (*cpu_idle_hook)();
  338 }
  339 
  340 /* Other subsystems (e.g., ACPI) can hook this later. */
  341 void (*cpu_idle_hook)(void) = cpu_idle_default;
  342 
  343 void
  344 cpu_reset()
  345 {
  346 
  347         cpu_boot(0);
  348 }
  349 
  350 void
  351 cpu_switch(struct thread *old, struct thread *new)
  352 {
  353         struct pcb *oldpcb, *newpcb;
  354 
  355         oldpcb = old->td_pcb;
  356 #ifdef COMPAT_IA32
  357         ia32_savectx(oldpcb);
  358 #endif
  359         if (PCPU_GET(fpcurthread) == old)
  360                 old->td_frame->tf_special.psr |= IA64_PSR_DFH;
  361         if (!savectx(oldpcb)) {
  362                 newpcb = new->td_pcb;
  363                 oldpcb->pcb_current_pmap =
  364                     pmap_switch(newpcb->pcb_current_pmap);
  365                 PCPU_SET(curthread, new);
  366 #ifdef COMPAT_IA32
  367                 ia32_restorectx(newpcb);
  368 #endif
  369                 if (PCPU_GET(fpcurthread) == new)
  370                         new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
  371                 restorectx(newpcb);
  372                 /* We should not get here. */
  373                 panic("cpu_switch: restorectx() returned");
  374                 /* NOTREACHED */
  375         }
  376 }
  377 
  378 void
  379 cpu_throw(struct thread *old __unused, struct thread *new)
  380 {
  381         struct pcb *newpcb;
  382 
  383         newpcb = new->td_pcb;
  384         (void)pmap_switch(newpcb->pcb_current_pmap);
  385         PCPU_SET(curthread, new);
  386 #ifdef COMPAT_IA32
  387         ia32_restorectx(newpcb);
  388 #endif
  389         restorectx(newpcb);
  390         /* We should not get here. */
  391         panic("cpu_throw: restorectx() returned");
  392         /* NOTREACHED */
  393 }
  394 
  395 void
  396 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
  397 {
  398         size_t pcpusz;
  399 
  400         /*
  401          * Make sure the PCB is 16-byte aligned by making the PCPU
  402          * a multiple of 16 bytes. We assume the PCPU is 16-byte
  403          * aligned itself.
  404          */
  405         pcpusz = (sizeof(struct pcpu) + 15) & ~15;
  406         KASSERT(size >= pcpusz + sizeof(struct pcb),
  407             ("%s: too small an allocation for pcpu", __func__));
  408         pcpu->pc_pcb = (struct pcb *)((char*)pcpu + pcpusz);
  409         pcpu->pc_acpi_id = cpuid;
  410 }
  411 
  412 void
  413 spinlock_enter(void)
  414 {
  415         struct thread *td;
  416 
  417         td = curthread;
  418         if (td->td_md.md_spinlock_count == 0)
  419                 td->td_md.md_saved_intr = intr_disable();
  420         td->td_md.md_spinlock_count++;
  421         critical_enter();
  422 }
  423 
  424 void
  425 spinlock_exit(void)
  426 {
  427         struct thread *td;
  428 
  429         td = curthread;
  430         critical_exit();
  431         td->td_md.md_spinlock_count--;
  432         if (td->td_md.md_spinlock_count == 0)
  433                 intr_restore(td->td_md.md_saved_intr);
  434 }
  435 
  436 void
  437 map_pal_code(void)
  438 {
  439         pt_entry_t pte;
  440         uint64_t psr;
  441 
  442         if (ia64_pal_base == 0)
  443                 return;
  444 
  445         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
  446             PTE_PL_KERN | PTE_AR_RWX;
  447         pte |= ia64_pal_base & PTE_PPN_MASK;
  448 
  449         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
  450             "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2));
  451 
  452         __asm __volatile("mov   %0=psr" : "=r"(psr));
  453         __asm __volatile("rsm   psr.ic|psr.i");
  454         __asm __volatile("srlz.i");
  455         __asm __volatile("mov   cr.ifa=%0" ::
  456             "r"(IA64_PHYS_TO_RR7(ia64_pal_base)));
  457         __asm __volatile("mov   cr.itir=%0" :: "r"(IA64_ID_PAGE_SHIFT << 2));
  458         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(pte));
  459         __asm __volatile("srlz.d");             /* XXX not needed. */
  460         __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(pte));
  461         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
  462         __asm __volatile("srlz.i");
  463 }
  464 
  465 void
  466 map_gateway_page(void)
  467 {
  468         pt_entry_t pte;
  469         uint64_t psr;
  470 
  471         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
  472             PTE_PL_KERN | PTE_AR_X_RX;
  473         pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK;
  474 
  475         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
  476             "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
  477 
  478         __asm __volatile("mov   %0=psr" : "=r"(psr));
  479         __asm __volatile("rsm   psr.ic|psr.i");
  480         __asm __volatile("srlz.i");
  481         __asm __volatile("mov   cr.ifa=%0" :: "r"(VM_MAX_ADDRESS));
  482         __asm __volatile("mov   cr.itir=%0" :: "r"(PAGE_SHIFT << 2));
  483         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
  484         __asm __volatile("srlz.d");             /* XXX not needed. */
  485         __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(pte));
  486         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
  487         __asm __volatile("srlz.i");
  488 
  489         /* Expose the mapping to userland in ar.k5 */
  490         ia64_set_k5(VM_MAX_ADDRESS);
  491 }
  492 
  493 static void
  494 calculate_frequencies(void)
  495 {
  496         struct ia64_sal_result sal;
  497         struct ia64_pal_result pal;
  498 
  499         sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
  500         pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
  501 
  502         if (sal.sal_status == 0 && pal.pal_status == 0) {
  503                 if (bootverbose) {
  504                         printf("Platform clock frequency %ld Hz\n",
  505                                sal.sal_result[0]);
  506                         printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
  507                                "ITC ratio %ld/%ld\n",
  508                                pal.pal_result[0] >> 32,
  509                                pal.pal_result[0] & ((1L << 32) - 1),
  510                                pal.pal_result[1] >> 32,
  511                                pal.pal_result[1] & ((1L << 32) - 1),
  512                                pal.pal_result[2] >> 32,
  513                                pal.pal_result[2] & ((1L << 32) - 1));
  514                 }
  515                 processor_frequency =
  516                         sal.sal_result[0] * (pal.pal_result[0] >> 32)
  517                         / (pal.pal_result[0] & ((1L << 32) - 1));
  518                 bus_frequency =
  519                         sal.sal_result[0] * (pal.pal_result[1] >> 32)
  520                         / (pal.pal_result[1] & ((1L << 32) - 1));
  521                 itc_frequency =
  522                         sal.sal_result[0] * (pal.pal_result[2] >> 32)
  523                         / (pal.pal_result[2] & ((1L << 32) - 1));
  524         }
  525 }
  526 
  527 void
  528 ia64_init(void)
  529 {
  530         int phys_avail_cnt;
  531         vm_offset_t kernstart, kernend;
  532         vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
  533         char *p;
  534         struct efi_md *md;
  535         int metadata_missing;
  536 
  537         /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
  538 
  539         /*
  540          * TODO: Disable interrupts, floating point etc.
  541          * Maybe flush cache and tlb
  542          */
  543         ia64_set_fpsr(IA64_FPSR_DEFAULT);
  544 
  545         /*
  546          * TODO: Get critical system information (if possible, from the
  547          * information provided by the boot program).
  548          */
  549 
  550         /*
  551          * pa_bootinfo is the physical address of the bootinfo block as
  552          * passed to us by the loader and set in locore.s.
  553          */
  554         bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
  555 
  556         if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
  557                 bzero(&bootinfo, sizeof(bootinfo));
  558                 bootinfo.bi_kernend = (vm_offset_t) round_page(_end);
  559         }
  560 
  561         /*
  562          * Look for the I/O ports first - we need them for console
  563          * probing.
  564          */
  565         for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
  566                 switch (md->md_type) {
  567                 case EFI_MD_TYPE_IOPORT:
  568                         ia64_port_base = IA64_PHYS_TO_RR6(md->md_phys);
  569                         break;
  570                 case EFI_MD_TYPE_PALCODE:
  571                         ia64_pal_base = md->md_phys;
  572                         break;
  573                 }
  574         }
  575 
  576         metadata_missing = 0;
  577         if (bootinfo.bi_modulep)
  578                 preload_metadata = (caddr_t)bootinfo.bi_modulep;
  579         else
  580                 metadata_missing = 1;
  581         if (envmode == 1)
  582                 kern_envp = static_env;
  583         else
  584                 kern_envp = (caddr_t)bootinfo.bi_envp;
  585 
  586         /*
  587          * Look at arguments passed to us and compute boothowto.
  588          */
  589         boothowto = bootinfo.bi_boothowto;
  590 
  591         /*
  592          * Catch case of boot_verbose set in environment.
  593          */
  594         if ((p = getenv("boot_verbose")) != NULL) {
  595                 if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
  596                         boothowto |= RB_VERBOSE;
  597                 }
  598                 freeenv(p);
  599         }
  600 
  601         if (boothowto & RB_VERBOSE)
  602                 bootverbose = 1;
  603 
  604         /*
  605          * Initialize the console before we print anything out.
  606          */
  607         cninit();
  608 
  609         /* OUTPUT NOW ALLOWED */
  610 
  611         if (ia64_pal_base != 0) {
  612                 ia64_pal_base &= ~IA64_ID_PAGE_MASK;
  613                 /*
  614                  * We use a TR to map the first 256M of memory - this might
  615                  * cover the palcode too.
  616                  */
  617                 if (ia64_pal_base == 0)
  618                         printf("PAL code mapped by the kernel's TR\n");
  619         } else
  620                 printf("PAL code not found\n");
  621 
  622         /*
  623          * Wire things up so we can call the firmware.
  624          */
  625         map_pal_code();
  626         efi_boot_minimal(bootinfo.bi_systab);
  627         ia64_sal_init();
  628         calculate_frequencies();
  629 
  630         /*
  631          * Find the beginning and end of the kernel.
  632          */
  633         kernstart = trunc_page(kernel_text);
  634 #ifdef DDB
  635         ksym_start = bootinfo.bi_symtab;
  636         ksym_end = bootinfo.bi_esymtab;
  637         kernend = (vm_offset_t)round_page(ksym_end);
  638 #else
  639         kernend = (vm_offset_t)round_page(_end);
  640 #endif
  641 
  642         /* But if the bootstrap tells us otherwise, believe it! */
  643         if (bootinfo.bi_kernend)
  644                 kernend = round_page(bootinfo.bi_kernend);
  645         if (metadata_missing)
  646                 printf("WARNING: loader(8) metadata is missing!\n");
  647 
  648         /* Get FPSWA interface */
  649         fpswa_iface = (bootinfo.bi_fpswa == 0) ? NULL :
  650             (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa);
  651 
  652         /* Init basic tunables, including hz */
  653         init_param1();
  654 
  655         p = getenv("kernelname");
  656         if (p) {
  657                 strncpy(kernelname, p, sizeof(kernelname) - 1);
  658                 freeenv(p);
  659         }
  660 
  661         kernstartpfn = atop(IA64_RR_MASK(kernstart));
  662         kernendpfn = atop(IA64_RR_MASK(kernend));
  663 
  664         /*
  665          * Size the memory regions and load phys_avail[] with the results.
  666          */
  667 
  668         /*
  669          * Find out how much memory is available, by looking at
  670          * the memory descriptors.
  671          */
  672 
  673 #ifdef DEBUG_MD
  674         printf("Memory descriptor count: %d\n", mdcount);
  675 #endif
  676 
  677         phys_avail_cnt = 0;
  678         for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
  679 #ifdef DEBUG_MD
  680                 printf("MD %p: type %d pa 0x%lx cnt 0x%lx\n", md,
  681                     md->md_type, md->md_phys, md->md_pages);
  682 #endif
  683 
  684                 pfn0 = ia64_btop(round_page(md->md_phys));
  685                 pfn1 = ia64_btop(trunc_page(md->md_phys + md->md_pages * 4096));
  686                 if (pfn1 <= pfn0)
  687                         continue;
  688 
  689                 if (md->md_type != EFI_MD_TYPE_FREE)
  690                         continue;
  691 
  692                 /*
  693                  * Wimp out for now since we do not DTRT here with
  694                  * pci bus mastering (no bounce buffering, for example).
  695                  */
  696                 if (pfn0 >= ia64_btop(0x100000000UL)) {
  697                         printf("Skipping memory chunk start 0x%lx\n",
  698                             md->md_phys);
  699                         continue;
  700                 }
  701                 if (pfn1 >= ia64_btop(0x100000000UL)) {
  702                         printf("Skipping memory chunk end 0x%lx\n",
  703                             md->md_phys + md->md_pages * 4096);
  704                         continue;
  705                 }
  706 
  707                 /*
  708                  * We have a memory descriptor that describes conventional
  709                  * memory that is for general use. We must determine if the
  710                  * loader has put the kernel in this region.
  711                  */
  712                 physmem += (pfn1 - pfn0);
  713                 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
  714                         /*
  715                          * Must compute the location of the kernel
  716                          * within the segment.
  717                          */
  718 #ifdef DEBUG_MD
  719                         printf("Descriptor %p contains kernel\n", mp);
  720 #endif
  721                         if (pfn0 < kernstartpfn) {
  722                                 /*
  723                                  * There is a chunk before the kernel.
  724                                  */
  725 #ifdef DEBUG_MD
  726                                 printf("Loading chunk before kernel: "
  727                                        "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
  728 #endif
  729                                 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
  730                                 phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
  731                                 phys_avail_cnt += 2;
  732                         }
  733                         if (kernendpfn < pfn1) {
  734                                 /*
  735                                  * There is a chunk after the kernel.
  736                                  */
  737 #ifdef DEBUG_MD
  738                                 printf("Loading chunk after kernel: "
  739                                        "0x%lx / 0x%lx\n", kernendpfn, pfn1);
  740 #endif
  741                                 phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
  742                                 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
  743                                 phys_avail_cnt += 2;
  744                         }
  745                 } else {
  746                         /*
  747                          * Just load this cluster as one chunk.
  748                          */
  749 #ifdef DEBUG_MD
  750                         printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
  751                                pfn0, pfn1);
  752 #endif
  753                         phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
  754                         phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
  755                         phys_avail_cnt += 2;
  756                         
  757                 }
  758         }
  759         phys_avail[phys_avail_cnt] = 0;
  760 
  761         Maxmem = physmem;
  762         init_param2(physmem);
  763 
  764         /*
  765          * Initialize error message buffer (at end of core).
  766          */
  767         msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
  768         msgbufinit(msgbufp, MSGBUF_SIZE);
  769 
  770         proc_linkup(&proc0, &ksegrp0, &thread0);
  771         /*
  772          * Init mapping for kernel stack for proc 0
  773          */
  774         proc0kstack = (vm_offset_t)kstack;
  775         thread0.td_kstack = proc0kstack;
  776         thread0.td_kstack_pages = KSTACK_PAGES;
  777 
  778         /*
  779          * Setup the global data for the bootstrap cpu.
  780          */
  781         pcpup = (struct pcpu *)pmap_steal_memory(PAGE_SIZE);
  782         ia64_set_k4((u_int64_t)pcpup);
  783         pcpu_init(pcpup, 0, PAGE_SIZE);
  784         PCPU_SET(curthread, &thread0);
  785 
  786         mutex_init();
  787 
  788         /*
  789          * Initialize the rest of proc 0's PCB.
  790          *
  791          * Set the kernel sp, reserving space for an (empty) trapframe,
  792          * and make proc0's trapframe pointer point to it for sanity.
  793          * Initialise proc0's backing store to start after u area.
  794          */
  795         cpu_thread_setup(&thread0);
  796         thread0.td_frame->tf_flags = FRAME_SYSCALL;
  797         thread0.td_pcb->pcb_special.sp =
  798             (u_int64_t)thread0.td_frame - 16;
  799         thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack;
  800 
  801         /*
  802          * Initialize the virtual memory system.
  803          */
  804         pmap_bootstrap();
  805 
  806         /*
  807          * Initialize debuggers, and break into them if appropriate.
  808          */
  809         kdb_init();
  810 
  811 #ifdef KDB
  812         if (boothowto & RB_KDB)
  813                 kdb_enter("Boot flags requested debugger\n");
  814 #endif
  815 
  816         ia64_set_tpr(0);
  817 
  818         /*
  819          * Save our current context so that we have a known (maybe even
  820          * sane) context as the initial context for new threads that are
  821          * forked from us. If any of those threads (including thread0)
  822          * does something wrong, we may be lucky and return here where
  823          * we're ready for them with a nice panic.
  824          */
  825         if (!savectx(thread0.td_pcb))
  826                 mi_startup();
  827 
  828         /* We should not get here. */
  829         panic("ia64_init: Whooaa there!");
  830         /* NOTREACHED */
  831 }
  832 
  833 uint64_t
  834 ia64_get_hcdp(void)
  835 {
  836 
  837         return (bootinfo.bi_hcdp);
  838 }
  839 
  840 void
  841 bzero(void *buf, size_t len)
  842 {
  843         caddr_t p = buf;
  844 
  845         while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
  846                 *p++ = 0;
  847                 len--;
  848         }
  849         while (len >= sizeof(u_long) * 8) {
  850                 *(u_long*) p = 0;
  851                 *((u_long*) p + 1) = 0;
  852                 *((u_long*) p + 2) = 0;
  853                 *((u_long*) p + 3) = 0;
  854                 len -= sizeof(u_long) * 8;
  855                 *((u_long*) p + 4) = 0;
  856                 *((u_long*) p + 5) = 0;
  857                 *((u_long*) p + 6) = 0;
  858                 *((u_long*) p + 7) = 0;
  859                 p += sizeof(u_long) * 8;
  860         }
  861         while (len >= sizeof(u_long)) {
  862                 *(u_long*) p = 0;
  863                 len -= sizeof(u_long);
  864                 p += sizeof(u_long);
  865         }
  866         while (len) {
  867                 *p++ = 0;
  868                 len--;
  869         }
  870 }
  871 
  872 void
  873 DELAY(int n)
  874 {
  875         u_int64_t start, end, now;
  876 
  877         start = ia64_get_itc();
  878         end = start + (itc_frequency * n) / 1000000;
  879         /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
  880         do {
  881                 now = ia64_get_itc();
  882         } while (now < end || (now > start && end < start));
  883 }
  884 
  885 /*
  886  * Send an interrupt (signal) to a process.
  887  */
  888 void
  889 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
  890 {
  891         struct proc *p;
  892         struct thread *td;
  893         struct trapframe *tf;
  894         struct sigacts *psp;
  895         struct sigframe sf, *sfp;
  896         u_int64_t sbs, sp;
  897         int oonstack;
  898 
  899         td = curthread;
  900         p = td->td_proc;
  901         PROC_LOCK_ASSERT(p, MA_OWNED);
  902         psp = p->p_sigacts;
  903         mtx_assert(&psp->ps_mtx, MA_OWNED);
  904         tf = td->td_frame;
  905         sp = tf->tf_special.sp;
  906         oonstack = sigonstack(sp);
  907         sbs = 0;
  908 
  909         /* save user context */
  910         bzero(&sf, sizeof(struct sigframe));
  911         sf.sf_uc.uc_sigmask = *mask;
  912         sf.sf_uc.uc_stack = td->td_sigstk;
  913         sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
  914             ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
  915 
  916         /*
  917          * Allocate and validate space for the signal handler
  918          * context. Note that if the stack is in P0 space, the
  919          * call to grow() is a nop, and the useracc() check
  920          * will fail if the process has not already allocated
  921          * the space with a `brk'.
  922          */
  923         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
  924             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  925                 sbs = (u_int64_t)td->td_sigstk.ss_sp;
  926                 sbs = (sbs + 15) & ~15;
  927                 sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
  928 #if defined(COMPAT_43)
  929                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  930 #endif
  931         } else
  932                 sfp = (struct sigframe *)sp;
  933         sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
  934 
  935         /* Fill in the siginfo structure for POSIX handlers. */
  936         if (SIGISMEMBER(psp->ps_siginfo, sig)) {
  937                 sf.sf_si.si_signo = sig;
  938                 sf.sf_si.si_code = code;
  939                 sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
  940                 code = (u_int64_t)&sfp->sf_si;
  941         }
  942 
  943         mtx_unlock(&psp->ps_mtx);
  944         PROC_UNLOCK(p);
  945 
  946         get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
  947 
  948         /* Copy the frame out to userland. */
  949         if (copyout(&sf, sfp, sizeof(sf)) != 0) {
  950                 /*
  951                  * Process has trashed its stack; give it an illegal
  952                  * instruction to halt it in its tracks.
  953                  */
  954                 PROC_LOCK(p);
  955                 sigexit(td, SIGILL);
  956                 return;
  957         }
  958 
  959         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
  960                 tf->tf_special.psr &= ~IA64_PSR_RI;
  961                 tf->tf_special.iip = ia64_get_k5() +
  962                     ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
  963         } else
  964                 tf->tf_special.iip = ia64_get_k5() +
  965                     ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
  966 
  967         /*
  968          * Setup the trapframe to return to the signal trampoline. We pass
  969          * information to the trampoline in the following registers:
  970          *
  971          *      gp      new backing store or NULL
  972          *      r8      signal number
  973          *      r9      signal code or siginfo pointer
  974          *      r10     signal handler (function descriptor)
  975          */
  976         tf->tf_special.sp = (u_int64_t)sfp - 16;
  977         tf->tf_special.gp = sbs;
  978         tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
  979         tf->tf_special.ndirty = 0;
  980         tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
  981         tf->tf_scratch.gr8 = sig;
  982         tf->tf_scratch.gr9 = code;
  983         tf->tf_scratch.gr10 = (u_int64_t)catcher;
  984 
  985         PROC_LOCK(p);
  986         mtx_lock(&psp->ps_mtx);
  987 }
  988 
  989 /*
  990  * Build siginfo_t for SA thread
  991  */
  992 void
  993 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
  994 {
  995         struct proc *p;
  996         struct thread *td;
  997 
  998         td = curthread;
  999         p = td->td_proc;
 1000         PROC_LOCK_ASSERT(p, MA_OWNED);
 1001 
 1002         bzero(si, sizeof(*si));
 1003         si->si_signo = sig;
 1004         si->si_code = code;
 1005         /* XXXKSE fill other fields */
 1006 }
 1007 
 1008 /*
 1009  * System call to cleanup state after a signal
 1010  * has been taken.  Reset signal mask and
 1011  * stack state from context left by sendsig (above).
 1012  * Return to previous pc and psl as specified by
 1013  * context left by sendsig. Check carefully to
 1014  * make sure that the user has not modified the
 1015  * state to gain improper privileges.
 1016  *
 1017  * MPSAFE
 1018  */
 1019 int
 1020 sigreturn(struct thread *td,
 1021         struct sigreturn_args /* {
 1022                 ucontext_t *sigcntxp;
 1023         } */ *uap)
 1024 {
 1025         ucontext_t uc;
 1026         struct trapframe *tf;
 1027         struct proc *p;
 1028         struct pcb *pcb;
 1029 
 1030         tf = td->td_frame;
 1031         p = td->td_proc;
 1032         pcb = td->td_pcb;
 1033 
 1034         /*
 1035          * Fetch the entire context structure at once for speed.
 1036          * We don't use a normal argument to simplify RSE handling.
 1037          */
 1038         if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
 1039                 return (EFAULT);
 1040 
 1041         set_mcontext(td, &uc.uc_mcontext);
 1042 
 1043         PROC_LOCK(p);
 1044 #if defined(COMPAT_43)
 1045         if (sigonstack(tf->tf_special.sp))
 1046                 td->td_sigstk.ss_flags |= SS_ONSTACK;
 1047         else
 1048                 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
 1049 #endif
 1050         td->td_sigmask = uc.uc_sigmask;
 1051         SIG_CANTMASK(td->td_sigmask);
 1052         signotify(td);
 1053         PROC_UNLOCK(p);
 1054 
 1055         return (EJUSTRETURN);
 1056 }
 1057 
 1058 #ifdef COMPAT_FREEBSD4
 1059 int
 1060 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
 1061 {
 1062 
 1063         return sigreturn(td, (struct sigreturn_args *)uap);
 1064 }
 1065 #endif
 1066 
 1067 /*
 1068  * Construct a PCB from a trapframe. This is called from kdb_trap() where
 1069  * we want to start a backtrace from the function that caused us to enter
 1070  * the debugger. We have the context in the trapframe, but base the trace
 1071  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
 1072  * enough for a backtrace.
 1073  */
 1074 void
 1075 makectx(struct trapframe *tf, struct pcb *pcb)
 1076 {
 1077 
 1078         pcb->pcb_special = tf->tf_special;
 1079         pcb->pcb_special.__spare = ~0UL;        /* XXX see unwind.c */
 1080         save_callee_saved(&pcb->pcb_preserved);
 1081         save_callee_saved_fp(&pcb->pcb_preserved_fp);
 1082 }
 1083 
 1084 int
 1085 ia64_flush_dirty(struct thread *td, struct _special *r)
 1086 {
 1087         struct iovec iov;
 1088         struct uio uio;
 1089         uint64_t bspst, kstk, rnat;
 1090         int error;
 1091 
 1092         if (r->ndirty == 0)
 1093                 return (0);
 1094 
 1095         kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
 1096         if (td == curthread) {
 1097                 __asm __volatile("mov   ar.rsc=0;;");
 1098                 __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
 1099                 /* Make sure we have all the user registers written out. */
 1100                 if (bspst - kstk < r->ndirty) {
 1101                         __asm __volatile("flushrs;;");
 1102                         __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
 1103                 }
 1104                 __asm __volatile("mov   %0=ar.rnat;;" : "=r"(rnat));
 1105                 __asm __volatile("mov   ar.rsc=3");
 1106                 error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
 1107                 kstk += r->ndirty;
 1108                 r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
 1109                     ? *(uint64_t*)(kstk | 0x1f8L) : rnat;
 1110         } else {
 1111                 PHOLD(td->td_proc);
 1112                 iov.iov_base = (void*)(uintptr_t)kstk;
 1113                 iov.iov_len = r->ndirty;
 1114                 uio.uio_iov = &iov;
 1115                 uio.uio_iovcnt = 1;
 1116                 uio.uio_offset = r->bspstore;
 1117                 uio.uio_resid = r->ndirty;
 1118                 uio.uio_segflg = UIO_SYSSPACE;
 1119                 uio.uio_rw = UIO_WRITE;
 1120                 uio.uio_td = td;
 1121                 error = proc_rwmem(td->td_proc, &uio);
 1122                 /*
 1123                  * XXX proc_rwmem() doesn't currently return ENOSPC,
 1124                  * so I think it can bogusly return 0. Neither do
 1125                  * we allow short writes.
 1126                  */
 1127                 if (uio.uio_resid != 0 && error == 0)
 1128                         error = ENOSPC;
 1129                 PRELE(td->td_proc);
 1130         }
 1131 
 1132         r->bspstore += r->ndirty;
 1133         r->ndirty = 0;
 1134         return (error);
 1135 }
 1136 
 1137 int
 1138 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
 1139 {
 1140         struct trapframe *tf;
 1141         int error;
 1142 
 1143         tf = td->td_frame;
 1144         bzero(mc, sizeof(*mc));
 1145         mc->mc_special = tf->tf_special;
 1146         error = ia64_flush_dirty(td, &mc->mc_special);
 1147         if (tf->tf_flags & FRAME_SYSCALL) {
 1148                 mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
 1149                 mc->mc_scratch = tf->tf_scratch;
 1150                 if (flags & GET_MC_CLEAR_RET) {
 1151                         mc->mc_scratch.gr8 = 0;
 1152                         mc->mc_scratch.gr9 = 0;
 1153                         mc->mc_scratch.gr10 = 0;
 1154                         mc->mc_scratch.gr11 = 0;
 1155                 }
 1156         } else {
 1157                 mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
 1158                 mc->mc_scratch = tf->tf_scratch;
 1159                 mc->mc_scratch_fp = tf->tf_scratch_fp;
 1160                 /*
 1161                  * XXX If the thread never used the high FP registers, we
 1162                  * probably shouldn't waste time saving them.
 1163                  */
 1164                 ia64_highfp_save(td);
 1165                 mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
 1166                 mc->mc_high_fp = td->td_pcb->pcb_high_fp;
 1167         }
 1168         save_callee_saved(&mc->mc_preserved);
 1169         save_callee_saved_fp(&mc->mc_preserved_fp);
 1170         return (error);
 1171 }
 1172 
 1173 int
 1174 set_mcontext(struct thread *td, const mcontext_t *mc)
 1175 {
 1176         struct _special s;
 1177         struct trapframe *tf;
 1178         uint64_t psrmask;
 1179 
 1180         tf = td->td_frame;
 1181 
 1182         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
 1183             ("Whoa there! We have more than 8KB of dirty registers!"));
 1184 
 1185         s = mc->mc_special;
 1186         /*
 1187          * Only copy the user mask and the restart instruction bit from
 1188          * the new context.
 1189          */
 1190         psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
 1191             IA64_PSR_MFH | IA64_PSR_RI;
 1192         s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
 1193         /* We don't have any dirty registers of the new context. */
 1194         s.ndirty = 0;
 1195         if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
 1196                 /*
 1197                  * We can get an async context passed to us while we
 1198                  * entered the kernel through a syscall: sigreturn(2)
 1199                  * and kse_switchin(2) both take contexts that could
 1200                  * previously be the result of a trap or interrupt.
 1201                  * Hence, we cannot assert that the trapframe is not
 1202                  * a syscall frame, but we can assert that it's at
 1203                  * least an expected syscall.
 1204                  */
 1205                 if (tf->tf_flags & FRAME_SYSCALL) {
 1206                         KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn ||
 1207                             tf->tf_scratch.gr15 == SYS_kse_switchin, ("foo"));
 1208                         tf->tf_flags &= ~FRAME_SYSCALL;
 1209                 }
 1210                 tf->tf_scratch = mc->mc_scratch;
 1211                 tf->tf_scratch_fp = mc->mc_scratch_fp;
 1212                 if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
 1213                         td->td_pcb->pcb_high_fp = mc->mc_high_fp;
 1214         } else {
 1215                 KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
 1216                 if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
 1217                         s.cfm = tf->tf_special.cfm;
 1218                         s.iip = tf->tf_special.iip;
 1219                         tf->tf_scratch.gr15 = 0;        /* Clear syscall nr. */
 1220                 } else
 1221                         tf->tf_scratch = mc->mc_scratch;
 1222         }
 1223         tf->tf_special = s;
 1224         restore_callee_saved(&mc->mc_preserved);
 1225         restore_callee_saved_fp(&mc->mc_preserved_fp);
 1226 
 1227         if (mc->mc_flags & _MC_FLAGS_KSE_SET_MBOX)
 1228                 suword((caddr_t)mc->mc_special.ifa, mc->mc_special.isr);
 1229 
 1230         return (0);
 1231 }
 1232 
 1233 /*
 1234  * Clear registers on exec.
 1235  */
 1236 void
 1237 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
 1238 {
 1239         struct trapframe *tf;
 1240         uint64_t *ksttop, *kst;
 1241 
 1242         tf = td->td_frame;
 1243         ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
 1244             (tf->tf_special.bspstore & 0x1ffUL));
 1245 
 1246         /*
 1247          * We can ignore up to 8KB of dirty registers by masking off the
 1248          * lower 13 bits in exception_restore() or epc_syscall(). This
 1249          * should be enough for a couple of years, but if there are more
 1250          * than 8KB of dirty registers, we lose track of the bottom of
 1251          * the kernel stack. The solution is to copy the active part of
 1252          * the kernel stack down 1 page (or 2, but not more than that)
 1253          * so that we always have less than 8KB of dirty registers.
 1254          */
 1255         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
 1256             ("Whoa there! We have more than 8KB of dirty registers!"));
 1257 
 1258         bzero(&tf->tf_special, sizeof(tf->tf_special));
 1259         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {      /* break syscalls. */
 1260                 bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
 1261                 bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
 1262                 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
 1263                 tf->tf_special.bspstore = IA64_BACKINGSTORE;
 1264                 /*
 1265                  * Copy the arguments onto the kernel register stack so that
 1266                  * they get loaded by the loadrs instruction. Skip over the
 1267                  * NaT collection points.
 1268                  */
 1269                 kst = ksttop - 1;
 1270                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
 1271                         *kst-- = 0;
 1272                 *kst-- = 0;
 1273                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
 1274                         *kst-- = 0;
 1275                 *kst-- = ps_strings;
 1276                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
 1277                         *kst-- = 0;
 1278                 *kst = stack;
 1279                 tf->tf_special.ndirty = (ksttop - kst) << 3;
 1280         } else {                                /* epc syscalls (default). */
 1281                 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
 1282                 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
 1283                 /*
 1284                  * Write values for out0, out1 and out2 to the user's backing
 1285                  * store and arrange for them to be restored into the user's
 1286                  * initial register frame.
 1287                  * Assumes that (bspstore & 0x1f8) < 0x1e0.
 1288                  */
 1289                 suword((caddr_t)tf->tf_special.bspstore - 24, stack);
 1290                 suword((caddr_t)tf->tf_special.bspstore - 16, ps_strings);
 1291                 suword((caddr_t)tf->tf_special.bspstore -  8, 0);
 1292         }
 1293 
 1294         tf->tf_special.iip = entry;
 1295         tf->tf_special.sp = (stack & ~15) - 16;
 1296         tf->tf_special.rsc = 0xf;
 1297         tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
 1298         tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
 1299             IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
 1300             IA64_PSR_CPL_USER;
 1301 }
 1302 
 1303 int
 1304 ptrace_set_pc(struct thread *td, unsigned long addr)
 1305 {
 1306         uint64_t slot;
 1307 
 1308         switch (addr & 0xFUL) {
 1309         case 0:
 1310                 slot = IA64_PSR_RI_0;
 1311                 break;
 1312         case 1:
 1313                 /* XXX we need to deal with MLX bundles here */
 1314                 slot = IA64_PSR_RI_1;
 1315                 break;
 1316         case 2:
 1317                 slot = IA64_PSR_RI_2;
 1318                 break;
 1319         default:
 1320                 return (EINVAL);
 1321         }
 1322 
 1323         td->td_frame->tf_special.iip = addr & ~0x0FULL;
 1324         td->td_frame->tf_special.psr =
 1325             (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
 1326         return (0);
 1327 }
 1328 
 1329 int
 1330 ptrace_single_step(struct thread *td)
 1331 {
 1332         struct trapframe *tf;
 1333 
 1334         /*
 1335          * There's no way to set single stepping when we're leaving the
 1336          * kernel through the EPC syscall path. The way we solve this is
 1337          * by enabling the lower-privilege trap so that we re-enter the
 1338          * kernel as soon as the privilege level changes. See trap.c for
 1339          * how we proceed from there.
 1340          */
 1341         tf = td->td_frame;
 1342         if (tf->tf_flags & FRAME_SYSCALL)
 1343                 tf->tf_special.psr |= IA64_PSR_LP;
 1344         else
 1345                 tf->tf_special.psr |= IA64_PSR_SS;
 1346         return (0);
 1347 }
 1348 
 1349 int
 1350 ptrace_clear_single_step(struct thread *td)
 1351 {
 1352         struct trapframe *tf;
 1353 
 1354         /*
 1355          * Clear any and all status bits we may use to implement single
 1356          * stepping.
 1357          */
 1358         tf = td->td_frame;
 1359         tf->tf_special.psr &= ~IA64_PSR_SS;
 1360         tf->tf_special.psr &= ~IA64_PSR_LP;
 1361         tf->tf_special.psr &= ~IA64_PSR_TB;
 1362         return (0);
 1363 }
 1364 
 1365 int
 1366 fill_regs(struct thread *td, struct reg *regs)
 1367 {
 1368         struct trapframe *tf;
 1369 
 1370         tf = td->td_frame;
 1371         regs->r_special = tf->tf_special;
 1372         regs->r_scratch = tf->tf_scratch;
 1373         save_callee_saved(&regs->r_preserved);
 1374         return (0);
 1375 }
 1376 
 1377 int
 1378 set_regs(struct thread *td, struct reg *regs)
 1379 {
 1380         struct trapframe *tf;
 1381         int error;
 1382 
 1383         tf = td->td_frame;
 1384         error = ia64_flush_dirty(td, &tf->tf_special);
 1385         if (!error) {
 1386                 tf->tf_special = regs->r_special;
 1387                 tf->tf_special.bspstore += tf->tf_special.ndirty;
 1388                 tf->tf_special.ndirty = 0;
 1389                 tf->tf_scratch = regs->r_scratch;
 1390                 restore_callee_saved(&regs->r_preserved);
 1391         }
 1392         return (error);
 1393 }
 1394 
 1395 int
 1396 fill_dbregs(struct thread *td, struct dbreg *dbregs)
 1397 {
 1398 
 1399         return (ENOSYS);
 1400 }
 1401 
 1402 int
 1403 set_dbregs(struct thread *td, struct dbreg *dbregs)
 1404 {
 1405 
 1406         return (ENOSYS);
 1407 }
 1408 
 1409 int
 1410 fill_fpregs(struct thread *td, struct fpreg *fpregs)
 1411 {
 1412         struct trapframe *frame = td->td_frame;
 1413         struct pcb *pcb = td->td_pcb;
 1414 
 1415         /* Save the high FP registers. */
 1416         ia64_highfp_save(td);
 1417 
 1418         fpregs->fpr_scratch = frame->tf_scratch_fp;
 1419         save_callee_saved_fp(&fpregs->fpr_preserved);
 1420         fpregs->fpr_high = pcb->pcb_high_fp;
 1421         return (0);
 1422 }
 1423 
 1424 int
 1425 set_fpregs(struct thread *td, struct fpreg *fpregs)
 1426 {
 1427         struct trapframe *frame = td->td_frame;
 1428         struct pcb *pcb = td->td_pcb;
 1429 
 1430         /* Throw away the high FP registers (should be redundant). */
 1431         ia64_highfp_drop(td);
 1432 
 1433         frame->tf_scratch_fp = fpregs->fpr_scratch;
 1434         restore_callee_saved_fp(&fpregs->fpr_preserved);
 1435         pcb->pcb_high_fp = fpregs->fpr_high;
 1436         return (0);
 1437 }
 1438 
 1439 /*
 1440  * High FP register functions.
 1441  */
 1442 
 1443 int
 1444 ia64_highfp_drop(struct thread *td)
 1445 {
 1446         struct pcb *pcb;
 1447         struct pcpu *cpu;
 1448         struct thread *thr;
 1449 
 1450         mtx_lock_spin(&td->td_md.md_highfp_mtx);
 1451         pcb = td->td_pcb;
 1452         cpu = pcb->pcb_fpcpu;
 1453         if (cpu == NULL) {
 1454                 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1455                 return (0);
 1456         }
 1457         pcb->pcb_fpcpu = NULL;
 1458         thr = cpu->pc_fpcurthread;
 1459         cpu->pc_fpcurthread = NULL;
 1460         mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1461 
 1462         /* Post-mortem sanity checking. */
 1463         KASSERT(thr == td, ("Inconsistent high FP state"));
 1464         return (1);
 1465 }
 1466 
 1467 int
 1468 ia64_highfp_save(struct thread *td)
 1469 {
 1470         struct pcb *pcb;
 1471         struct pcpu *cpu;
 1472         struct thread *thr;
 1473 
 1474         /* Don't save if the high FP registers weren't modified. */
 1475         if ((td->td_frame->tf_special.psr & IA64_PSR_MFH) == 0)
 1476                 return (ia64_highfp_drop(td));
 1477 
 1478         mtx_lock_spin(&td->td_md.md_highfp_mtx);
 1479         pcb = td->td_pcb;
 1480         cpu = pcb->pcb_fpcpu;
 1481         if (cpu == NULL) {
 1482                 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1483                 return (0);
 1484         }
 1485 #ifdef SMP
 1486         if (td == curthread)
 1487                 sched_pin();
 1488         if (cpu != pcpup) {
 1489                 mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1490                 ipi_send(cpu, IPI_HIGH_FP);
 1491                 if (td == curthread)
 1492                         sched_unpin();
 1493                 while (pcb->pcb_fpcpu == cpu)
 1494                         DELAY(100);
 1495                 return (1);
 1496         } else {
 1497                 save_high_fp(&pcb->pcb_high_fp);
 1498                 if (td == curthread)
 1499                         sched_unpin();
 1500         }
 1501 #else
 1502         save_high_fp(&pcb->pcb_high_fp);
 1503 #endif
 1504         pcb->pcb_fpcpu = NULL;
 1505         thr = cpu->pc_fpcurthread;
 1506         cpu->pc_fpcurthread = NULL;
 1507         mtx_unlock_spin(&td->td_md.md_highfp_mtx);
 1508 
 1509         /* Post-mortem sanity cxhecking. */
 1510         KASSERT(thr == td, ("Inconsistent high FP state"));
 1511         return (1);
 1512 }
 1513 
 1514 int
 1515 sysbeep(int pitch, int period)
 1516 {
 1517         return (ENODEV);
 1518 }

Cache object: b12a488e5e825bf27ce48190f6e1d7a9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.