The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ia64/ia64/machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003,2004 Marcel Moolenaar
    3  * Copyright (c) 2000,2001 Doug Rabson
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  * $FreeBSD: releng/5.3/sys/ia64/ia64/machdep.c 134976 2004-09-09 10:03:21Z julian $
   28  */
   29 
   30 #include "opt_compat.h"
   31 #include "opt_ddb.h"
   32 #include "opt_kstack_pages.h"
   33 #include "opt_msgbuf.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/eventhandler.h>
   38 #include <sys/kdb.h>
   39 #include <sys/sysproto.h>
   40 #include <sys/signalvar.h>
   41 #include <sys/imgact.h>
   42 #include <sys/kernel.h>
   43 #include <sys/proc.h>
   44 #include <sys/lock.h>
   45 #include <sys/pcpu.h>
   46 #include <sys/malloc.h>
   47 #include <sys/reboot.h>
   48 #include <sys/bio.h>
   49 #include <sys/buf.h>
   50 #include <sys/mbuf.h>
   51 #include <sys/vmmeter.h>
   52 #include <sys/msgbuf.h>
   53 #include <sys/exec.h>
   54 #include <sys/sysctl.h>
   55 #include <sys/uio.h>
   56 #include <sys/linker.h>
   57 #include <sys/random.h>
   58 #include <sys/cons.h>
   59 #include <sys/uuid.h>
   60 #include <sys/syscall.h>
   61 #include <net/netisr.h>
   62 #include <vm/vm.h>
   63 #include <vm/vm_kern.h>
   64 #include <vm/vm_page.h>
   65 #include <vm/vm_map.h>
   66 #include <vm/vm_extern.h>
   67 #include <vm/vm_object.h>
   68 #include <vm/vm_pager.h>
   69 #include <sys/user.h>
   70 #include <sys/ptrace.h>
   71 #include <machine/clock.h>
   72 #include <machine/cpu.h>
   73 #include <machine/md_var.h>
   74 #include <machine/reg.h>
   75 #include <machine/fpu.h>
   76 #include <machine/mca.h>
   77 #include <machine/pal.h>
   78 #include <machine/sal.h>
   79 #ifdef SMP
   80 #include <machine/smp.h>
   81 #endif
   82 #include <machine/bootinfo.h>
   83 #include <machine/mutex.h>
   84 #include <machine/vmparam.h>
   85 #include <machine/elf.h>
   86 #include <ddb/ddb.h>
   87 #include <sys/vnode.h>
   88 #include <sys/ucontext.h>
   89 #include <machine/sigframe.h>
   90 #include <machine/efi.h>
   91 #include <machine/unwind.h>
   92 #include <i386/include/specialreg.h>
   93 
   94 u_int64_t processor_frequency;
   95 u_int64_t bus_frequency;
   96 u_int64_t itc_frequency;
   97 int cold = 1;
   98 
   99 u_int64_t pa_bootinfo;
  100 struct bootinfo bootinfo;
  101 
  102 struct pcpu early_pcpu;
  103 extern char kstack[]; 
  104 struct user *proc0uarea;
  105 vm_offset_t proc0kstack;
  106 
  107 extern u_int64_t kernel_text[], _end[];
  108 
  109 extern u_int64_t ia64_gateway_page[];
  110 extern u_int64_t break_sigtramp[];
  111 extern u_int64_t epc_sigtramp[];
  112 
  113 FPSWA_INTERFACE *fpswa_interface;
  114 
  115 u_int64_t ia64_pal_base;
  116 u_int64_t ia64_port_base;
  117 
  118 char machine[] = MACHINE;
  119 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
  120 
  121 static char cpu_model[64];
  122 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
  123     "The CPU model name");
  124 
  125 static char cpu_family[64];
  126 SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
  127     "The CPU family name");
  128 
  129 #ifdef DDB
  130 extern vm_offset_t ksym_start, ksym_end;
  131 #endif
  132 
  133 static void cpu_startup(void *);
  134 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
  135 
  136 struct msgbuf *msgbufp=0;
  137 
  138 long Maxmem = 0;
  139 
  140 vm_offset_t phys_avail[100];
  141 
  142 /* must be 2 less so 0 0 can signal end of chunks */
  143 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
  144 
  145 void mi_startup(void);          /* XXX should be in a MI header */
  146 
  147 struct kva_md_info kmi;
  148 
  149 #define Mhz     1000000L
  150 #define Ghz     (1000L*Mhz)
  151 
  152 static void
  153 identifycpu(void)
  154 {
  155         char vendor[17];
  156         char *family_name, *model_name;
  157         u_int64_t features, tmp;
  158         int number, revision, model, family, archrev;
  159 
  160         /*
  161          * Assumes little-endian.
  162          */
  163         *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
  164         *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
  165         vendor[16] = '\0';
  166 
  167         tmp = ia64_get_cpuid(3);
  168         number = (tmp >> 0) & 0xff;
  169         revision = (tmp >> 8) & 0xff;
  170         model = (tmp >> 16) & 0xff;
  171         family = (tmp >> 24) & 0xff;
  172         archrev = (tmp >> 32) & 0xff;
  173 
  174         family_name = model_name = "unknown";
  175         switch (family) {
  176         case 0x07:
  177                 family_name = "Itanium";
  178                 model_name = "Merced";
  179                 break;
  180         case 0x1f:
  181                 family_name = "Itanium 2";
  182                 switch (model) {
  183                 case 0x00:
  184                         model_name = "McKinley";
  185                         break;
  186                 case 0x01:
  187                         /*
  188                          * Deerfield is a low-voltage variant based on the
  189                          * Madison core. We need circumstantial evidence
  190                          * (i.e. the clock frequency) to identify those.
  191                          * Allow for roughly 1% error margin.
  192                          */
  193                         tmp = processor_frequency >> 7;
  194                         if ((processor_frequency - tmp) < 1*Ghz &&
  195                             (processor_frequency + tmp) >= 1*Ghz)
  196                                 model_name = "Deerfield";
  197                         else
  198                                 model_name = "Madison";
  199                         break;
  200                 }
  201                 break;
  202         }
  203         snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
  204         snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
  205 
  206         features = ia64_get_cpuid(4);
  207 
  208         printf("CPU: %s (", model_name);
  209         if (processor_frequency) {
  210                 printf("%ld.%02ld-Mhz ",
  211                     (processor_frequency + 4999) / Mhz,
  212                     ((processor_frequency + 4999) / (Mhz/100)) % 100);
  213         }
  214         printf("%s)\n", family_name);
  215         printf("  Origin = \"%s\"  Revision = %d\n", vendor, revision);
  216         printf("  Features = 0x%b\n", (u_int32_t) features,
  217             "\020"
  218             "\001LB"    /* long branch (brl) instruction. */
  219             "\002SD"    /* Spontaneous deferral. */
  220             "\003AO"    /* 16-byte atomic operations (ld, st, cmpxchg). */ );
  221 }
  222 
  223 static void
  224 cpu_startup(dummy)
  225         void *dummy;
  226 {
  227 
  228         /*
  229          * Good {morning,afternoon,evening,night}.
  230          */
  231         identifycpu();
  232 
  233         /* startrtclock(); */
  234 #ifdef PERFMON
  235         perfmon_init();
  236 #endif
  237         printf("real memory  = %ld (%ld MB)\n", ia64_ptob(Maxmem),
  238             ia64_ptob(Maxmem) / 1048576);
  239 
  240         /*
  241          * Display any holes after the first chunk of extended memory.
  242          */
  243         if (bootverbose) {
  244                 int indx;
  245 
  246                 printf("Physical memory chunk(s):\n");
  247                 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
  248                         int size1 = phys_avail[indx + 1] - phys_avail[indx];
  249 
  250                         printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx],
  251                             phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE);
  252                 }
  253         }
  254 
  255         vm_ksubmap_init(&kmi);
  256 
  257         printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
  258             ptoa(cnt.v_free_count) / 1048576);
  259  
  260         if (fpswa_interface == NULL)
  261                 printf("Warning: no FPSWA package supplied\n");
  262         else
  263                 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
  264                     (long)fpswa_interface->Revision,
  265                     (void *)fpswa_interface->Fpswa);
  266 
  267         /*
  268          * Set up buffers, so they can be used to read disk labels.
  269          */
  270         bufinit();
  271         vm_pager_bufferinit();
  272 
  273         /*
  274          * Traverse the MADT to discover IOSAPIC and Local SAPIC
  275          * information.
  276          */
  277         ia64_probe_sapics();
  278         ia64_mca_init();
  279 }
  280 
  281 void
  282 cpu_boot(int howto)
  283 {
  284 
  285         ia64_efi_runtime->ResetSystem(EfiResetWarm, EFI_SUCCESS, 0, 0);
  286 }
  287 
  288 void
  289 cpu_halt()
  290 {
  291 
  292         ia64_efi_runtime->ResetSystem(EfiResetWarm, EFI_SUCCESS, 0, 0);
  293 }
  294 
  295 static void
  296 cpu_idle_default(void)
  297 {
  298         struct ia64_pal_result res;
  299 
  300         res = ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
  301 }
  302 
  303 void
  304 cpu_idle()
  305 {
  306         (*cpu_idle_hook)();
  307 }
  308 
  309 /* Other subsystems (e.g., ACPI) can hook this later. */
  310 void (*cpu_idle_hook)(void) = cpu_idle_default;
  311 
  312 void
  313 cpu_reset()
  314 {
  315 
  316         cpu_boot(0);
  317 }
  318 
  319 void
  320 cpu_switch(struct thread *old, struct thread *new)
  321 {
  322         struct pcb *oldpcb, *newpcb;
  323 
  324         oldpcb = old->td_pcb;
  325 #if COMPAT_IA32
  326         ia32_savectx(oldpcb);
  327 #endif
  328         if (PCPU_GET(fpcurthread) == old)
  329                 old->td_frame->tf_special.psr |= IA64_PSR_DFH;
  330         if (!savectx(oldpcb)) {
  331                 newpcb = new->td_pcb;
  332                 oldpcb->pcb_current_pmap =
  333                     pmap_switch(newpcb->pcb_current_pmap);
  334                 PCPU_SET(curthread, new);
  335 #if COMPAT_IA32
  336                 ia32_restorectx(newpcb);
  337 #endif
  338                 if (PCPU_GET(fpcurthread) == new)
  339                         new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
  340                 restorectx(newpcb);
  341                 /* We should not get here. */
  342                 panic("cpu_switch: restorectx() returned");
  343                 /* NOTREACHED */
  344         }
  345 }
  346 
  347 void
  348 cpu_throw(struct thread *old __unused, struct thread *new)
  349 {
  350         struct pcb *newpcb;
  351 
  352         newpcb = new->td_pcb;
  353         (void)pmap_switch(newpcb->pcb_current_pmap);
  354         PCPU_SET(curthread, new);
  355 #if COMPAT_IA32
  356         ia32_restorectx(newpcb);
  357 #endif
  358         restorectx(newpcb);
  359         /* We should not get here. */
  360         panic("cpu_throw: restorectx() returned");
  361         /* NOTREACHED */
  362 }
  363 
  364 void
  365 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
  366 {
  367         size_t pcpusz;
  368 
  369         /*
  370          * Make sure the PCB is 16-byte aligned by making the PCPU
  371          * a multiple of 16 bytes. We assume the PCPU is 16-byte
  372          * aligned itself.
  373          */
  374         pcpusz = (sizeof(struct pcpu) + 15) & ~15;
  375         KASSERT(size >= pcpusz + sizeof(struct pcb),
  376             ("%s: too small an allocation for pcpu", __func__));
  377         pcpu->pc_pcb = (struct pcb *)((char*)pcpu + pcpusz);
  378         pcpu->pc_acpi_id = cpuid;
  379 }
  380 
  381 void
  382 map_pal_code(void)
  383 {
  384         struct ia64_pte pte;
  385         u_int64_t psr;
  386 
  387         if (ia64_pal_base == 0)
  388                 return;
  389 
  390         bzero(&pte, sizeof(pte));
  391         pte.pte_p = 1;
  392         pte.pte_ma = PTE_MA_WB;
  393         pte.pte_a = 1;
  394         pte.pte_d = 1;
  395         pte.pte_pl = PTE_PL_KERN;
  396         pte.pte_ar = PTE_AR_RWX;
  397         pte.pte_ppn = ia64_pal_base >> 12;
  398 
  399         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
  400             "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2));
  401 
  402         __asm __volatile("mov   %0=psr" : "=r"(psr));
  403         __asm __volatile("rsm   psr.ic|psr.i");
  404         __asm __volatile("srlz.i");
  405         __asm __volatile("mov   cr.ifa=%0" ::
  406             "r"(IA64_PHYS_TO_RR7(ia64_pal_base)));
  407         __asm __volatile("mov   cr.itir=%0" :: "r"(IA64_ID_PAGE_SHIFT << 2));
  408         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(*(u_int64_t*)&pte));
  409         __asm __volatile("srlz.d");             /* XXX not needed. */
  410         __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(*(u_int64_t*)&pte));
  411         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
  412         __asm __volatile("srlz.i");
  413 }
  414 
  415 void
  416 map_gateway_page(void)
  417 {
  418         struct ia64_pte pte;
  419         u_int64_t psr;
  420 
  421         bzero(&pte, sizeof(pte));
  422         pte.pte_p = 1;
  423         pte.pte_ma = PTE_MA_WB;
  424         pte.pte_a = 1;
  425         pte.pte_d = 1;
  426         pte.pte_pl = PTE_PL_KERN;
  427         pte.pte_ar = PTE_AR_X_RX;
  428         pte.pte_ppn = IA64_RR_MASK((u_int64_t)ia64_gateway_page) >> 12;
  429 
  430         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
  431             "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
  432 
  433         __asm __volatile("mov   %0=psr" : "=r"(psr));
  434         __asm __volatile("rsm   psr.ic|psr.i");
  435         __asm __volatile("srlz.i");
  436         __asm __volatile("mov   cr.ifa=%0" :: "r"(VM_MAX_ADDRESS));
  437         __asm __volatile("mov   cr.itir=%0" :: "r"(PAGE_SHIFT << 2));
  438         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(*(u_int64_t*)&pte));
  439         __asm __volatile("srlz.d");             /* XXX not needed. */
  440         __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(*(u_int64_t*)&pte));
  441         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
  442         __asm __volatile("srlz.i");
  443 
  444         /* Expose the mapping to userland in ar.k5 */
  445         ia64_set_k5(VM_MAX_ADDRESS);
  446 }
  447 
  448 static void
  449 calculate_frequencies(void)
  450 {
  451         struct ia64_sal_result sal;
  452         struct ia64_pal_result pal;
  453 
  454         sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
  455         pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
  456 
  457         if (sal.sal_status == 0 && pal.pal_status == 0) {
  458                 if (bootverbose) {
  459                         printf("Platform clock frequency %ld Hz\n",
  460                                sal.sal_result[0]);
  461                         printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
  462                                "ITC ratio %ld/%ld\n",
  463                                pal.pal_result[0] >> 32,
  464                                pal.pal_result[0] & ((1L << 32) - 1),
  465                                pal.pal_result[1] >> 32,
  466                                pal.pal_result[1] & ((1L << 32) - 1),
  467                                pal.pal_result[2] >> 32,
  468                                pal.pal_result[2] & ((1L << 32) - 1));
  469                 }
  470                 processor_frequency =
  471                         sal.sal_result[0] * (pal.pal_result[0] >> 32)
  472                         / (pal.pal_result[0] & ((1L << 32) - 1));
  473                 bus_frequency =
  474                         sal.sal_result[0] * (pal.pal_result[1] >> 32)
  475                         / (pal.pal_result[1] & ((1L << 32) - 1));
  476                 itc_frequency =
  477                         sal.sal_result[0] * (pal.pal_result[2] >> 32)
  478                         / (pal.pal_result[2] & ((1L << 32) - 1));
  479         }
  480 }
  481 
  482 void
  483 ia64_init(void)
  484 {
  485         int phys_avail_cnt;
  486         vm_offset_t kernstart, kernend;
  487         vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
  488         char *p;
  489         EFI_MEMORY_DESCRIPTOR *md, *mdp;
  490         int mdcount, i, metadata_missing;
  491 
  492         /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
  493 
  494         /*
  495          * TODO: Disable interrupts, floating point etc.
  496          * Maybe flush cache and tlb
  497          */
  498         ia64_set_fpsr(IA64_FPSR_DEFAULT);
  499 
  500         /*
  501          * TODO: Get critical system information (if possible, from the
  502          * information provided by the boot program).
  503          */
  504 
  505         /*
  506          * pa_bootinfo is the physical address of the bootinfo block as
  507          * passed to us by the loader and set in locore.s.
  508          */
  509         bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
  510 
  511         if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
  512                 bzero(&bootinfo, sizeof(bootinfo));
  513                 bootinfo.bi_kernend = (vm_offset_t) round_page(_end);
  514         }
  515 
  516         /*
  517          * Look for the I/O ports first - we need them for console
  518          * probing.
  519          */
  520         mdcount = bootinfo.bi_memmap_size / bootinfo.bi_memdesc_size;
  521         md = (EFI_MEMORY_DESCRIPTOR *) IA64_PHYS_TO_RR7(bootinfo.bi_memmap);
  522 
  523         for (i = 0, mdp = md; i < mdcount; i++,
  524             mdp = NextMemoryDescriptor(mdp, bootinfo.bi_memdesc_size)) {
  525                 if (mdp->Type == EfiMemoryMappedIOPortSpace)
  526                         ia64_port_base = IA64_PHYS_TO_RR6(mdp->PhysicalStart);
  527                 else if (mdp->Type == EfiPalCode)
  528                         ia64_pal_base = mdp->PhysicalStart;
  529         }
  530 
  531         metadata_missing = 0;
  532         if (bootinfo.bi_modulep)
  533                 preload_metadata = (caddr_t)bootinfo.bi_modulep;
  534         else
  535                 metadata_missing = 1;
  536         if (envmode == 1)
  537                 kern_envp = static_env;
  538         else
  539                 kern_envp = (caddr_t)bootinfo.bi_envp;
  540 
  541         /*
  542          * Look at arguments passed to us and compute boothowto.
  543          */
  544         boothowto = bootinfo.bi_boothowto;
  545 
  546         /*
  547          * Catch case of boot_verbose set in environment.
  548          */
  549         if ((p = getenv("boot_verbose")) != NULL) {
  550                 if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) {
  551                         boothowto |= RB_VERBOSE;
  552                 }
  553                 freeenv(p);
  554         }
  555 
  556         if (boothowto & RB_VERBOSE)
  557                 bootverbose = 1;
  558 
  559         /*
  560          * Initialize the console before we print anything out.
  561          */
  562         cninit();
  563 
  564         /* OUTPUT NOW ALLOWED */
  565 
  566         if (ia64_pal_base != 0) {
  567                 ia64_pal_base &= ~IA64_ID_PAGE_MASK;
  568                 /*
  569                  * We use a TR to map the first 256M of memory - this might
  570                  * cover the palcode too.
  571                  */
  572                 if (ia64_pal_base == 0)
  573                         printf("PAL code mapped by the kernel's TR\n");
  574         } else
  575                 printf("PAL code not found\n");
  576 
  577         /*
  578          * Wire things up so we can call the firmware.
  579          */
  580         map_pal_code();
  581         ia64_efi_init();
  582         calculate_frequencies();
  583 
  584         /*
  585          * Find the beginning and end of the kernel.
  586          */
  587         kernstart = trunc_page(kernel_text);
  588 #ifdef DDB
  589         ksym_start = bootinfo.bi_symtab;
  590         ksym_end = bootinfo.bi_esymtab;
  591         kernend = (vm_offset_t)round_page(ksym_end);
  592 #else
  593         kernend = (vm_offset_t)round_page(_end);
  594 #endif
  595 
  596         /* But if the bootstrap tells us otherwise, believe it! */
  597         if (bootinfo.bi_kernend)
  598                 kernend = round_page(bootinfo.bi_kernend);
  599         if (metadata_missing)
  600                 printf("WARNING: loader(8) metadata is missing!\n");
  601 
  602         /* Get FPSWA interface */
  603         fpswa_interface = (FPSWA_INTERFACE*)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa);
  604 
  605         /* Init basic tunables, including hz */
  606         init_param1();
  607 
  608         p = getenv("kernelname");
  609         if (p) {
  610                 strncpy(kernelname, p, sizeof(kernelname) - 1);
  611                 freeenv(p);
  612         }
  613 
  614         kernstartpfn = atop(IA64_RR_MASK(kernstart));
  615         kernendpfn = atop(IA64_RR_MASK(kernend));
  616 
  617         /*
  618          * Size the memory regions and load phys_avail[] with the results.
  619          */
  620 
  621         /*
  622          * Find out how much memory is available, by looking at
  623          * the memory descriptors.
  624          */
  625 
  626 #ifdef DEBUG_MD
  627         printf("Memory descriptor count: %d\n", mdcount);
  628 #endif
  629 
  630         phys_avail_cnt = 0;
  631         for (i = 0, mdp = md; i < mdcount; i++,
  632                  mdp = NextMemoryDescriptor(mdp, bootinfo.bi_memdesc_size)) {
  633 #ifdef DEBUG_MD
  634                 printf("MD %d: type %d pa 0x%lx cnt 0x%lx\n", i,
  635                        mdp->Type,
  636                        mdp->PhysicalStart,
  637                        mdp->NumberOfPages);
  638 #endif
  639 
  640                 pfn0 = ia64_btop(round_page(mdp->PhysicalStart));
  641                 pfn1 = ia64_btop(trunc_page(mdp->PhysicalStart
  642                                             + mdp->NumberOfPages * 4096));
  643                 if (pfn1 <= pfn0)
  644                         continue;
  645 
  646                 if (mdp->Type != EfiConventionalMemory)
  647                         continue;
  648 
  649                 /*
  650                  * Wimp out for now since we do not DTRT here with
  651                  * pci bus mastering (no bounce buffering, for example).
  652                  */
  653                 if (pfn0 >= ia64_btop(0x100000000UL)) {
  654                         printf("Skipping memory chunk start 0x%lx\n",
  655                             mdp->PhysicalStart);
  656                         continue;
  657                 }
  658                 if (pfn1 >= ia64_btop(0x100000000UL)) {
  659                         printf("Skipping memory chunk end 0x%lx\n",
  660                             mdp->PhysicalStart + mdp->NumberOfPages * 4096);
  661                         continue;
  662                 }
  663 
  664                 /*
  665                  * We have a memory descriptor that describes conventional
  666                  * memory that is for general use. We must determine if the
  667                  * loader has put the kernel in this region.
  668                  */
  669                 physmem += (pfn1 - pfn0);
  670                 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
  671                         /*
  672                          * Must compute the location of the kernel
  673                          * within the segment.
  674                          */
  675 #ifdef DEBUG_MD
  676                         printf("Descriptor %d contains kernel\n", i);
  677 #endif
  678                         if (pfn0 < kernstartpfn) {
  679                                 /*
  680                                  * There is a chunk before the kernel.
  681                                  */
  682 #ifdef DEBUG_MD
  683                                 printf("Loading chunk before kernel: "
  684                                        "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
  685 #endif
  686                                 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
  687                                 phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
  688                                 phys_avail_cnt += 2;
  689                         }
  690                         if (kernendpfn < pfn1) {
  691                                 /*
  692                                  * There is a chunk after the kernel.
  693                                  */
  694 #ifdef DEBUG_MD
  695                                 printf("Loading chunk after kernel: "
  696                                        "0x%lx / 0x%lx\n", kernendpfn, pfn1);
  697 #endif
  698                                 phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
  699                                 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
  700                                 phys_avail_cnt += 2;
  701                         }
  702                 } else {
  703                         /*
  704                          * Just load this cluster as one chunk.
  705                          */
  706 #ifdef DEBUG_MD
  707                         printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
  708                                pfn0, pfn1);
  709 #endif
  710                         phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
  711                         phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
  712                         phys_avail_cnt += 2;
  713                         
  714                 }
  715         }
  716         phys_avail[phys_avail_cnt] = 0;
  717 
  718         Maxmem = physmem;
  719         init_param2(physmem);
  720 
  721         /*
  722          * Initialize error message buffer (at end of core).
  723          */
  724         msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
  725         msgbufinit(msgbufp, MSGBUF_SIZE);
  726 
  727         proc_linkup(&proc0, &ksegrp0, &thread0);
  728         /*
  729          * Init mapping for u page(s) for proc 0
  730          */
  731         proc0uarea = (struct user *)pmap_steal_memory(UAREA_PAGES * PAGE_SIZE);
  732         proc0kstack = (vm_offset_t)kstack;
  733         proc0.p_uarea = proc0uarea;
  734         thread0.td_kstack = proc0kstack;
  735         thread0.td_pcb = (struct pcb *)
  736             (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
  737         /*
  738          * Setup the global data for the bootstrap cpu.
  739          */
  740         pcpup = (struct pcpu *)pmap_steal_memory(PAGE_SIZE);
  741         ia64_set_k4((u_int64_t)pcpup);
  742         pcpu_init(pcpup, 0, PAGE_SIZE);
  743         PCPU_SET(curthread, &thread0);
  744 
  745         /*
  746          * Initialize the rest of proc 0's PCB.
  747          *
  748          * Set the kernel sp, reserving space for an (empty) trapframe,
  749          * and make proc0's trapframe pointer point to it for sanity.
  750          * Initialise proc0's backing store to start after u area.
  751          */
  752         thread0.td_frame = (struct trapframe *)thread0.td_pcb - 1;
  753         thread0.td_frame->tf_length = sizeof(struct trapframe);
  754         thread0.td_frame->tf_flags = FRAME_SYSCALL;
  755         thread0.td_pcb->pcb_special.sp =
  756             (u_int64_t)thread0.td_frame - 16;
  757         thread0.td_pcb->pcb_special.bspstore = (u_int64_t)proc0kstack;
  758 
  759         mutex_init();
  760 
  761         /*
  762          * Initialize the virtual memory system.
  763          */
  764         pmap_bootstrap();
  765 
  766         /*
  767          * Initialize debuggers, and break into them if appropriate.
  768          */
  769         kdb_init();
  770 
  771 #ifdef KDB
  772         if (boothowto & RB_KDB)
  773                 kdb_enter("Boot flags requested debugger\n");
  774 #endif
  775 
  776         ia64_set_tpr(0);
  777 
  778         /*
  779          * Save our current context so that we have a known (maybe even
  780          * sane) context as the initial context for new threads that are
  781          * forked from us. If any of those threads (including thread0)
  782          * does something wrong, we may be lucky and return here where
  783          * we're ready for them with a nice panic.
  784          */
  785         if (!savectx(thread0.td_pcb))
  786                 mi_startup();
  787 
  788         /* We should not get here. */
  789         panic("ia64_init: Whooaa there!");
  790         /* NOTREACHED */
  791 }
  792 
  793 void
  794 bzero(void *buf, size_t len)
  795 {
  796         caddr_t p = buf;
  797 
  798         while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
  799                 *p++ = 0;
  800                 len--;
  801         }
  802         while (len >= sizeof(u_long) * 8) {
  803                 *(u_long*) p = 0;
  804                 *((u_long*) p + 1) = 0;
  805                 *((u_long*) p + 2) = 0;
  806                 *((u_long*) p + 3) = 0;
  807                 len -= sizeof(u_long) * 8;
  808                 *((u_long*) p + 4) = 0;
  809                 *((u_long*) p + 5) = 0;
  810                 *((u_long*) p + 6) = 0;
  811                 *((u_long*) p + 7) = 0;
  812                 p += sizeof(u_long) * 8;
  813         }
  814         while (len >= sizeof(u_long)) {
  815                 *(u_long*) p = 0;
  816                 len -= sizeof(u_long);
  817                 p += sizeof(u_long);
  818         }
  819         while (len) {
  820                 *p++ = 0;
  821                 len--;
  822         }
  823 }
  824 
  825 void
  826 DELAY(int n)
  827 {
  828         u_int64_t start, end, now;
  829 
  830         start = ia64_get_itc();
  831         end = start + (itc_frequency * n) / 1000000;
  832         /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
  833         do {
  834                 now = ia64_get_itc();
  835         } while (now < end || (now > start && end < start));
  836 }
  837 
  838 /*
  839  * Send an interrupt (signal) to a process.
  840  */
  841 void
  842 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
  843 {
  844         struct proc *p;
  845         struct thread *td;
  846         struct trapframe *tf;
  847         struct sigacts *psp;
  848         struct sigframe sf, *sfp;
  849         u_int64_t sbs, sp;
  850         int oonstack;
  851 
  852         td = curthread;
  853         p = td->td_proc;
  854         PROC_LOCK_ASSERT(p, MA_OWNED);
  855         psp = p->p_sigacts;
  856         mtx_assert(&psp->ps_mtx, MA_OWNED);
  857         tf = td->td_frame;
  858         sp = tf->tf_special.sp;
  859         oonstack = sigonstack(sp);
  860         sbs = 0;
  861 
  862         /* save user context */
  863         bzero(&sf, sizeof(struct sigframe));
  864         sf.sf_uc.uc_sigmask = *mask;
  865         sf.sf_uc.uc_stack = td->td_sigstk;
  866         sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
  867             ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
  868 
  869         /*
  870          * Allocate and validate space for the signal handler
  871          * context. Note that if the stack is in P0 space, the
  872          * call to grow() is a nop, and the useracc() check
  873          * will fail if the process has not already allocated
  874          * the space with a `brk'.
  875          */
  876         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
  877             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  878                 sbs = (u_int64_t)td->td_sigstk.ss_sp;
  879                 sbs = (sbs + 15) & ~15;
  880                 sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
  881 #if defined(COMPAT_43)
  882                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  883 #endif
  884         } else
  885                 sfp = (struct sigframe *)sp;
  886         sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
  887 
  888         /* Fill in the siginfo structure for POSIX handlers. */
  889         if (SIGISMEMBER(psp->ps_siginfo, sig)) {
  890                 sf.sf_si.si_signo = sig;
  891                 sf.sf_si.si_code = code;
  892                 sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
  893                 code = (u_int64_t)&sfp->sf_si;
  894         }
  895 
  896         mtx_unlock(&psp->ps_mtx);
  897         PROC_UNLOCK(p);
  898 
  899         get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
  900 
  901         /* Copy the frame out to userland. */
  902         if (copyout(&sf, sfp, sizeof(sf)) != 0) {
  903                 /*
  904                  * Process has trashed its stack; give it an illegal
  905                  * instruction to halt it in its tracks.
  906                  */
  907                 PROC_LOCK(p);
  908                 sigexit(td, SIGILL);
  909                 return;
  910         }
  911 
  912         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
  913                 tf->tf_special.psr &= ~IA64_PSR_RI;
  914                 tf->tf_special.iip = ia64_get_k5() +
  915                     ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
  916         } else
  917                 tf->tf_special.iip = ia64_get_k5() +
  918                     ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
  919 
  920         /*
  921          * Setup the trapframe to return to the signal trampoline. We pass
  922          * information to the trampoline in the following registers:
  923          *
  924          *      gp      new backing store or NULL
  925          *      r8      signal number
  926          *      r9      signal code or siginfo pointer
  927          *      r10     signal handler (function descriptor)
  928          */
  929         tf->tf_special.sp = (u_int64_t)sfp - 16;
  930         tf->tf_special.gp = sbs;
  931         tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
  932         tf->tf_special.ndirty = 0;
  933         tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
  934         tf->tf_scratch.gr8 = sig;
  935         tf->tf_scratch.gr9 = code;
  936         tf->tf_scratch.gr10 = (u_int64_t)catcher;
  937 
  938         PROC_LOCK(p);
  939         mtx_lock(&psp->ps_mtx);
  940 }
  941 
  942 /*
  943  * Build siginfo_t for SA thread
  944  */
  945 void
  946 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
  947 {
  948         struct proc *p;
  949         struct thread *td;
  950 
  951         td = curthread;
  952         p = td->td_proc;
  953         PROC_LOCK_ASSERT(p, MA_OWNED);
  954 
  955         bzero(si, sizeof(*si));
  956         si->si_signo = sig;
  957         si->si_code = code;
  958         /* XXXKSE fill other fields */
  959 }
  960 
  961 /*
  962  * System call to cleanup state after a signal
  963  * has been taken.  Reset signal mask and
  964  * stack state from context left by sendsig (above).
  965  * Return to previous pc and psl as specified by
  966  * context left by sendsig. Check carefully to
  967  * make sure that the user has not modified the
  968  * state to gain improper privileges.
  969  *
  970  * MPSAFE
  971  */
  972 int
  973 sigreturn(struct thread *td,
  974         struct sigreturn_args /* {
  975                 ucontext_t *sigcntxp;
  976         } */ *uap)
  977 {
  978         ucontext_t uc;
  979         struct trapframe *tf;
  980         struct proc *p;
  981         struct pcb *pcb;
  982 
  983         tf = td->td_frame;
  984         p = td->td_proc;
  985         pcb = td->td_pcb;
  986 
  987         /*
  988          * Fetch the entire context structure at once for speed.
  989          * We don't use a normal argument to simplify RSE handling.
  990          */
  991         if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
  992                 return (EFAULT);
  993 
  994         set_mcontext(td, &uc.uc_mcontext);
  995 
  996         PROC_LOCK(p);
  997 #if defined(COMPAT_43)
  998         if (sigonstack(tf->tf_special.sp))
  999                 td->td_sigstk.ss_flags |= SS_ONSTACK;
 1000         else
 1001                 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
 1002 #endif
 1003         td->td_sigmask = uc.uc_sigmask;
 1004         SIG_CANTMASK(td->td_sigmask);
 1005         signotify(td);
 1006         PROC_UNLOCK(p);
 1007 
 1008         return (EJUSTRETURN);
 1009 }
 1010 
 1011 #ifdef COMPAT_FREEBSD4
 1012 int
 1013 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
 1014 {
 1015 
 1016         return sigreturn(td, (struct sigreturn_args *)uap);
 1017 }
 1018 #endif
 1019 
 1020 /*
 1021  * Construct a PCB from a trapframe. This is called from kdb_trap() where
 1022  * we want to start a backtrace from the function that caused us to enter
 1023  * the debugger. We have the context in the trapframe, but base the trace
 1024  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
 1025  * enough for a backtrace.
 1026  */
 1027 void
 1028 makectx(struct trapframe *tf, struct pcb *pcb)
 1029 {
 1030 
 1031         pcb->pcb_special = tf->tf_special;
 1032         pcb->pcb_special.__spare = ~0UL;        /* XXX see unwind.c */
 1033         save_callee_saved(&pcb->pcb_preserved);
 1034         save_callee_saved_fp(&pcb->pcb_preserved_fp);
 1035 }
 1036 
 1037 void
 1038 ia64_flush_dirty(struct thread *td, struct _special *r)
 1039 {
 1040         uint64_t bspst, kstk, rnat;
 1041 
 1042         if (r->ndirty == 0)
 1043                 return;
 1044 
 1045         kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
 1046         __asm __volatile("mov   ar.rsc=0;;");
 1047         __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
 1048         /* Make sure we have all the user registers written out. */
 1049         if (bspst - kstk < r->ndirty) {
 1050                 __asm __volatile("flushrs;;");
 1051                 __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
 1052         }
 1053         __asm __volatile("mov   %0=ar.rnat;;" : "=r"(rnat));
 1054         __asm __volatile("mov   ar.rsc=3");
 1055         copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
 1056         kstk += r->ndirty;
 1057         r->rnat = (bspst > kstk && (bspst & 0x1ffUL) < (kstk & 0x1ffUL))
 1058             ? *(uint64_t*)(kstk | 0x1f8UL) : rnat;
 1059         r->bspstore += r->ndirty;
 1060         r->ndirty = 0;
 1061 }
 1062 
 1063 int
 1064 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
 1065 {
 1066         struct trapframe *tf;
 1067 
 1068         tf = td->td_frame;
 1069         bzero(mc, sizeof(*mc));
 1070         mc->mc_special = tf->tf_special;
 1071         ia64_flush_dirty(td, &mc->mc_special);
 1072         if (tf->tf_flags & FRAME_SYSCALL) {
 1073                 mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
 1074                 mc->mc_scratch = tf->tf_scratch;
 1075                 if (flags & GET_MC_CLEAR_RET) {
 1076                         mc->mc_scratch.gr8 = 0;
 1077                         mc->mc_scratch.gr9 = 0;
 1078                         mc->mc_scratch.gr10 = 0;
 1079                         mc->mc_scratch.gr11 = 0;
 1080                 }
 1081         } else {
 1082                 mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
 1083                 mc->mc_scratch = tf->tf_scratch;
 1084                 mc->mc_scratch_fp = tf->tf_scratch_fp;
 1085                 /*
 1086                  * XXX If the thread never used the high FP registers, we
 1087                  * probably shouldn't waste time saving them.
 1088                  */
 1089                 ia64_highfp_save(td);
 1090                 mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
 1091                 mc->mc_high_fp = td->td_pcb->pcb_high_fp;
 1092         }
 1093         save_callee_saved(&mc->mc_preserved);
 1094         save_callee_saved_fp(&mc->mc_preserved_fp);
 1095         return (0);
 1096 }
 1097 
 1098 int
 1099 set_mcontext(struct thread *td, const mcontext_t *mc)
 1100 {
 1101         struct _special s;
 1102         struct trapframe *tf;
 1103         uint64_t psrmask;
 1104 
 1105         tf = td->td_frame;
 1106 
 1107         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
 1108             ("Whoa there! We have more than 8KB of dirty registers!"));
 1109 
 1110         s = mc->mc_special;
 1111         /*
 1112          * Only copy the user mask and the restart instruction bit from
 1113          * the new context.
 1114          */
 1115         psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
 1116             IA64_PSR_MFH | IA64_PSR_RI;
 1117         s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
 1118         /* We don't have any dirty registers of the new context. */
 1119         s.ndirty = 0;
 1120         if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
 1121                 /*
 1122                  * We can get an async context passed to us while we
 1123                  * entered the kernel through a syscall: sigreturn(2)
 1124                  * and kse_switchin(2) both take contexts that could
 1125                  * previously be the result of a trap or interrupt.
 1126                  * Hence, we cannot assert that the trapframe is not
 1127                  * a syscall frame, but we can assert that it's at
 1128                  * least an expected syscall.
 1129                  */
 1130                 if (tf->tf_flags & FRAME_SYSCALL) {
 1131                         KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn ||
 1132                             tf->tf_scratch.gr15 == SYS_kse_switchin, ("foo"));
 1133                         tf->tf_flags &= ~FRAME_SYSCALL;
 1134                 }
 1135                 tf->tf_scratch = mc->mc_scratch;
 1136                 tf->tf_scratch_fp = mc->mc_scratch_fp;
 1137                 if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
 1138                         td->td_pcb->pcb_high_fp = mc->mc_high_fp;
 1139         } else {
 1140                 KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
 1141                 if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
 1142                         s.cfm = tf->tf_special.cfm;
 1143                         s.iip = tf->tf_special.iip;
 1144                         tf->tf_scratch.gr15 = 0;        /* Clear syscall nr. */
 1145                 } else
 1146                         tf->tf_scratch = mc->mc_scratch;
 1147         }
 1148         tf->tf_special = s;
 1149         restore_callee_saved(&mc->mc_preserved);
 1150         restore_callee_saved_fp(&mc->mc_preserved_fp);
 1151 
 1152         if (mc->mc_flags & _MC_FLAGS_KSE_SET_MBOX)
 1153                 suword((caddr_t)mc->mc_special.ifa, mc->mc_special.isr);
 1154 
 1155         return (0);
 1156 }
 1157 
 1158 /*
 1159  * Clear registers on exec.
 1160  */
 1161 void
 1162 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
 1163 {
 1164         struct trapframe *tf;
 1165         uint64_t *ksttop, *kst;
 1166 
 1167         tf = td->td_frame;
 1168         ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
 1169             (tf->tf_special.bspstore & 0x1ffUL));
 1170 
 1171         /*
 1172          * We can ignore up to 8KB of dirty registers by masking off the
 1173          * lower 13 bits in exception_restore() or epc_syscall(). This
 1174          * should be enough for a couple of years, but if there are more
 1175          * than 8KB of dirty registers, we lose track of the bottom of
 1176          * the kernel stack. The solution is to copy the active part of
 1177          * the kernel stack down 1 page (or 2, but not more than that)
 1178          * so that we always have less than 8KB of dirty registers.
 1179          */
 1180         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
 1181             ("Whoa there! We have more than 8KB of dirty registers!"));
 1182 
 1183         bzero(&tf->tf_special, sizeof(tf->tf_special));
 1184         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {      /* break syscalls. */
 1185                 bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
 1186                 bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
 1187                 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
 1188                 tf->tf_special.bspstore = IA64_BACKINGSTORE;
 1189                 /*
 1190                  * Copy the arguments onto the kernel register stack so that
 1191                  * they get loaded by the loadrs instruction. Skip over the
 1192                  * NaT collection points.
 1193                  */
 1194                 kst = ksttop - 1;
 1195                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
 1196                         *kst-- = 0;
 1197                 *kst-- = 0;
 1198                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
 1199                         *kst-- = 0;
 1200                 *kst-- = ps_strings;
 1201                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
 1202                         *kst-- = 0;
 1203                 *kst = stack;
 1204                 tf->tf_special.ndirty = (ksttop - kst) << 3;
 1205         } else {                                /* epc syscalls (default). */
 1206                 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
 1207                 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
 1208                 /*
 1209                  * Write values for out0, out1 and out2 to the user's backing
 1210                  * store and arrange for them to be restored into the user's
 1211                  * initial register frame.
 1212                  * Assumes that (bspstore & 0x1f8) < 0x1e0.
 1213                  */
 1214                 suword((caddr_t)tf->tf_special.bspstore - 24, stack);
 1215                 suword((caddr_t)tf->tf_special.bspstore - 16, ps_strings);
 1216                 suword((caddr_t)tf->tf_special.bspstore -  8, 0);
 1217         }
 1218 
 1219         tf->tf_special.iip = entry;
 1220         tf->tf_special.sp = (stack & ~15) - 16;
 1221         tf->tf_special.rsc = 0xf;
 1222         tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
 1223         tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
 1224             IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
 1225             IA64_PSR_CPL_USER;
 1226 }
 1227 
 1228 int
 1229 ptrace_set_pc(struct thread *td, unsigned long addr)
 1230 {
 1231         uint64_t slot;
 1232 
 1233         switch (addr & 0xFUL) {
 1234         case 0:
 1235                 slot = IA64_PSR_RI_0;
 1236                 break;
 1237         case 1:
 1238                 /* XXX we need to deal with MLX bundles here */
 1239                 slot = IA64_PSR_RI_1;
 1240                 break;
 1241         case 2:
 1242                 slot = IA64_PSR_RI_2;
 1243                 break;
 1244         default:
 1245                 return (EINVAL);
 1246         }
 1247 
 1248         td->td_frame->tf_special.iip = addr & ~0x0FULL;
 1249         td->td_frame->tf_special.psr =
 1250             (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
 1251         return (0);
 1252 }
 1253 
 1254 int
 1255 ptrace_single_step(struct thread *td)
 1256 {
 1257         struct trapframe *tf;
 1258 
 1259         /*
 1260          * There's no way to set single stepping when we're leaving the
 1261          * kernel through the EPC syscall path. The way we solve this is
 1262          * by enabling the lower-privilege trap so that we re-enter the
 1263          * kernel as soon as the privilege level changes. See trap.c for
 1264          * how we proceed from there.
 1265          */
 1266         tf = td->td_frame;
 1267         if (tf->tf_flags & FRAME_SYSCALL)
 1268                 tf->tf_special.psr |= IA64_PSR_LP;
 1269         else
 1270                 tf->tf_special.psr |= IA64_PSR_SS;
 1271         return (0);
 1272 }
 1273 
 1274 int
 1275 ptrace_clear_single_step(struct thread *td)
 1276 {
 1277         struct trapframe *tf;
 1278 
 1279         /*
 1280          * Clear any and all status bits we may use to implement single
 1281          * stepping.
 1282          */
 1283         tf = td->td_frame;
 1284         tf->tf_special.psr &= ~IA64_PSR_SS;
 1285         tf->tf_special.psr &= ~IA64_PSR_LP;
 1286         tf->tf_special.psr &= ~IA64_PSR_TB;
 1287         return (0);
 1288 }
 1289 
 1290 int
 1291 fill_regs(struct thread *td, struct reg *regs)
 1292 {
 1293         struct trapframe *tf;
 1294 
 1295         tf = td->td_frame;
 1296         regs->r_special = tf->tf_special;
 1297         regs->r_scratch = tf->tf_scratch;
 1298         save_callee_saved(&regs->r_preserved);
 1299         return (0);
 1300 }
 1301 
 1302 int
 1303 set_regs(struct thread *td, struct reg *regs)
 1304 {
 1305         struct trapframe *tf;
 1306 
 1307         tf = td->td_frame;
 1308         ia64_flush_dirty(td, &tf->tf_special);
 1309         tf->tf_special = regs->r_special;
 1310         tf->tf_special.bspstore += tf->tf_special.ndirty;
 1311         tf->tf_special.ndirty = 0;
 1312         tf->tf_scratch = regs->r_scratch;
 1313         restore_callee_saved(&regs->r_preserved);
 1314         return (0);
 1315 }
 1316 
 1317 int
 1318 fill_dbregs(struct thread *td, struct dbreg *dbregs)
 1319 {
 1320 
 1321         return (ENOSYS);
 1322 }
 1323 
 1324 int
 1325 set_dbregs(struct thread *td, struct dbreg *dbregs)
 1326 {
 1327 
 1328         return (ENOSYS);
 1329 }
 1330 
 1331 int
 1332 fill_fpregs(struct thread *td, struct fpreg *fpregs)
 1333 {
 1334         struct trapframe *frame = td->td_frame;
 1335         struct pcb *pcb = td->td_pcb;
 1336 
 1337         /* Save the high FP registers. */
 1338         ia64_highfp_save(td);
 1339 
 1340         fpregs->fpr_scratch = frame->tf_scratch_fp;
 1341         save_callee_saved_fp(&fpregs->fpr_preserved);
 1342         fpregs->fpr_high = pcb->pcb_high_fp;
 1343         return (0);
 1344 }
 1345 
 1346 int
 1347 set_fpregs(struct thread *td, struct fpreg *fpregs)
 1348 {
 1349         struct trapframe *frame = td->td_frame;
 1350         struct pcb *pcb = td->td_pcb;
 1351 
 1352         /* Throw away the high FP registers (should be redundant). */
 1353         ia64_highfp_drop(td);
 1354 
 1355         frame->tf_scratch_fp = fpregs->fpr_scratch;
 1356         restore_callee_saved_fp(&fpregs->fpr_preserved);
 1357         pcb->pcb_high_fp = fpregs->fpr_high;
 1358         return (0);
 1359 }
 1360 
 1361 /*
 1362  * High FP register functions.
 1363  * XXX no synchronization yet.
 1364  */
 1365 
 1366 int
 1367 ia64_highfp_drop(struct thread *td)
 1368 {
 1369         struct pcb *pcb;
 1370         struct pcpu *cpu;
 1371         struct thread *thr;
 1372 
 1373         pcb = td->td_pcb;
 1374         cpu = pcb->pcb_fpcpu;
 1375         if (cpu == NULL)
 1376                 return (0);
 1377         pcb->pcb_fpcpu = NULL;
 1378         thr = cpu->pc_fpcurthread;
 1379         cpu->pc_fpcurthread = NULL;
 1380 
 1381         /* Post-mortem sanity checking. */
 1382         KASSERT(thr == td, ("Inconsistent high FP state"));
 1383         return (1);
 1384 }
 1385 
 1386 int
 1387 ia64_highfp_save(struct thread *td)
 1388 {
 1389         struct pcb *pcb;
 1390         struct pcpu *cpu;
 1391         struct thread *thr;
 1392 
 1393         /* Don't save if the high FP registers weren't modified. */
 1394         if ((td->td_frame->tf_special.psr & IA64_PSR_MFH) == 0)
 1395                 return (ia64_highfp_drop(td));
 1396 
 1397         pcb = td->td_pcb;
 1398         cpu = pcb->pcb_fpcpu;
 1399         if (cpu == NULL)
 1400                 return (0);
 1401 #ifdef SMP
 1402         if (cpu != pcpup) {
 1403                 ipi_send(cpu->pc_lid, IPI_HIGH_FP);
 1404                 while (pcb->pcb_fpcpu != cpu)
 1405                         DELAY(100);
 1406                 return (1);
 1407         }
 1408 #endif
 1409         save_high_fp(&pcb->pcb_high_fp);
 1410         pcb->pcb_fpcpu = NULL;
 1411         thr = cpu->pc_fpcurthread;
 1412         cpu->pc_fpcurthread = NULL;
 1413 
 1414         /* Post-mortem sanity cxhecking. */
 1415         KASSERT(thr == td, ("Inconsistent high FP state"));
 1416         return (1);
 1417 }
 1418 
 1419 int
 1420 sysbeep(int pitch, int period)
 1421 {
 1422         return (ENODEV);
 1423 }

Cache object: 2d06b2fdb4b93a98a64d1ab864f517eb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.