The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/aim/aim_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
    3  * Copyright (C) 1995, 1996 TooLs GmbH.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. All advertising materials mentioning features or use of this software
   15  *    must display the following acknowledgement:
   16  *      This product includes software developed by TooLs GmbH.
   17  * 4. The name of TooLs GmbH may not be used to endorse or promote products
   18  *    derived from this software without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
   21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   23  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 /*-
   32  * Copyright (C) 2001 Benno Rice
   33  * All rights reserved.
   34  *
   35  * Redistribution and use in source and binary forms, with or without
   36  * modification, are permitted provided that the following conditions
   37  * are met:
   38  * 1. Redistributions of source code must retain the above copyright
   39  *    notice, this list of conditions and the following disclaimer.
   40  * 2. Redistributions in binary form must reproduce the above copyright
   41  *    notice, this list of conditions and the following disclaimer in the
   42  *    documentation and/or other materials provided with the distribution.
   43  *
   44  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
   45  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   46  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   47  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   49  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   50  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   51  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   52  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   53  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   54  *      $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
   55  */
   56 
   57 #include <sys/cdefs.h>
   58 __FBSDID("$FreeBSD$");
   59 
   60 #include "opt_ddb.h"
   61 #include "opt_kstack_pages.h"
   62 #include "opt_platform.h"
   63 
   64 #include <sys/endian.h>
   65 #include <sys/param.h>
   66 #include <sys/proc.h>
   67 #include <sys/systm.h>
   68 #include <sys/bio.h>
   69 #include <sys/buf.h>
   70 #include <sys/bus.h>
   71 #include <sys/cons.h>
   72 #include <sys/cpu.h>
   73 #include <sys/eventhandler.h>
   74 #include <sys/exec.h>
   75 #include <sys/imgact.h>
   76 #include <sys/kdb.h>
   77 #include <sys/kernel.h>
   78 #include <sys/ktr.h>
   79 #include <sys/linker.h>
   80 #include <sys/lock.h>
   81 #include <sys/malloc.h>
   82 #include <sys/mbuf.h>
   83 #include <sys/msgbuf.h>
   84 #include <sys/mutex.h>
   85 #include <sys/ptrace.h>
   86 #include <sys/reboot.h>
   87 #include <sys/rwlock.h>
   88 #include <sys/signalvar.h>
   89 #include <sys/syscallsubr.h>
   90 #include <sys/sysctl.h>
   91 #include <sys/sysent.h>
   92 #include <sys/sysproto.h>
   93 #include <sys/ucontext.h>
   94 #include <sys/uio.h>
   95 #include <sys/vmmeter.h>
   96 #include <sys/vnode.h>
   97 
   98 #include <net/netisr.h>
   99 
  100 #include <vm/vm.h>
  101 #include <vm/vm_extern.h>
  102 #include <vm/vm_kern.h>
  103 #include <vm/vm_page.h>
  104 #include <vm/vm_map.h>
  105 #include <vm/vm_object.h>
  106 #include <vm/vm_pager.h>
  107 
  108 #include <machine/altivec.h>
  109 #ifndef __powerpc64__
  110 #include <machine/bat.h>
  111 #endif
  112 #include <machine/cpu.h>
  113 #include <machine/elf.h>
  114 #include <machine/fpu.h>
  115 #include <machine/hid.h>
  116 #include <machine/kdb.h>
  117 #include <machine/md_var.h>
  118 #include <machine/metadata.h>
  119 #include <machine/mmuvar.h>
  120 #include <machine/pcb.h>
  121 #include <machine/sigframe.h>
  122 #include <machine/spr.h>
  123 #include <machine/trap.h>
  124 #include <machine/vmparam.h>
  125 #include <machine/ofw_machdep.h>
  126 
  127 #include <ddb/ddb.h>
  128 
  129 #include <dev/ofw/openfirm.h>
  130 
  131 #ifdef __powerpc64__
  132 #include "mmu_oea64.h"
  133 #endif
  134 
  135 #ifndef __powerpc64__
  136 struct bat      battable[16];
  137 #endif
  138 
  139 int radix_mmu = 0;
  140 
  141 #ifndef __powerpc64__
  142 /* Bits for running on 64-bit systems in 32-bit mode. */
  143 extern void     *testppc64, *testppc64size;
  144 extern void     *restorebridge, *restorebridgesize;
  145 extern void     *rfid_patch, *rfi_patch1, *rfi_patch2;
  146 extern void     *trapcode64;
  147 
  148 extern Elf_Addr _GLOBAL_OFFSET_TABLE_[];
  149 #endif
  150 
  151 extern void     *rstcode, *rstcodeend;
  152 extern void     *trapcode, *trapcodeend;
  153 extern void     *hypertrapcode, *hypertrapcodeend;
  154 extern void     *generictrap, *generictrap64;
  155 extern void     *alitrap, *aliend;
  156 extern void     *dsitrap, *dsiend;
  157 extern void     *decrint, *decrsize;
  158 extern void     *extint, *extsize;
  159 extern void     *dblow, *dbend;
  160 extern void     *imisstrap, *imisssize;
  161 extern void     *dlmisstrap, *dlmisssize;
  162 extern void     *dsmisstrap, *dsmisssize;
  163 
  164 extern void *ap_pcpu;
  165 extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
  166 extern void __restartkernel_virtual(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
  167 
  168 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
  169     void *mdp, uint32_t mdp_cookie);
  170 void aim_cpu_init(vm_offset_t toc);
  171 
  172 void
  173 aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
  174     uint32_t mdp_cookie)
  175 {
  176         register_t      scratch;
  177 
  178         /*
  179          * If running from an FDT, make sure we are in real mode to avoid
  180          * tromping on firmware page tables. Everything in the kernel assumes
  181          * 1:1 mappings out of firmware, so this won't break anything not
  182          * already broken. This doesn't work if there is live OF, since OF
  183          * may internally use non-1:1 mappings.
  184          */
  185         if (ofentry == 0)
  186                 mtmsr(mfmsr() & ~(PSL_IR | PSL_DR));
  187 
  188 #ifdef __powerpc64__
  189         /*
  190          * Relocate to high memory so that the kernel
  191          * can execute from the direct map.
  192          *
  193          * If we are in virtual mode already, use a special entry point
  194          * that sets up a temporary DMAP to execute from until we can
  195          * properly set up the MMU.
  196          */
  197         if ((vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) {
  198                 if (mfmsr() & PSL_DR) {
  199                         __restartkernel_virtual(fdt, 0, ofentry, mdp,
  200                             mdp_cookie, DMAP_BASE_ADDRESS, mfmsr());
  201                 } else {
  202                         __restartkernel(fdt, 0, ofentry, mdp, mdp_cookie,
  203                             DMAP_BASE_ADDRESS, mfmsr());
  204                 }
  205         }
  206 #endif
  207 
  208         /* Various very early CPU fix ups */
  209         switch (mfpvr() >> 16) {
  210                 /*
  211                  * PowerPC 970 CPUs have a misfeature requested by Apple that
  212                  * makes them pretend they have a 32-byte cacheline. Turn this
  213                  * off before we measure the cacheline size.
  214                  */
  215                 case IBM970:
  216                 case IBM970FX:
  217                 case IBM970MP:
  218                 case IBM970GX:
  219                         scratch = mfspr(SPR_HID5);
  220                         scratch &= ~HID5_970_DCBZ_SIZE_HI;
  221                         mtspr(SPR_HID5, scratch);
  222                         break;
  223         #ifdef __powerpc64__
  224                 case IBMPOWER7:
  225                 case IBMPOWER7PLUS:
  226                 case IBMPOWER8:
  227                 case IBMPOWER8E:
  228                 case IBMPOWER8NVL:
  229                 case IBMPOWER9:
  230                         /* XXX: get from ibm,slb-size in device tree */
  231                         n_slbs = 32;
  232                         break;
  233         #endif
  234         }
  235 }
  236 
  237 void
  238 aim_cpu_init(vm_offset_t toc)
  239 {
  240         size_t          trap_offset, trapsize;
  241         vm_offset_t     trap;
  242         register_t      msr;
  243         uint8_t         *cache_check;
  244         int             cacheline_warn;
  245 #ifndef __powerpc64__
  246         register_t      scratch;
  247         int             ppc64;
  248 #endif
  249 
  250         trap_offset = 0;
  251         cacheline_warn = 0;
  252 
  253         /* General setup for AIM CPUs */
  254         psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI;
  255 
  256 #ifdef __powerpc64__
  257         psl_kernset |= PSL_SF;
  258         if (mfmsr() & PSL_HV)
  259                 psl_kernset |= PSL_HV;
  260 
  261 #if BYTE_ORDER == LITTLE_ENDIAN
  262         psl_kernset |= PSL_LE;
  263 #endif
  264 
  265 #endif
  266         psl_userset = psl_kernset | PSL_PR;
  267 #ifdef __powerpc64__
  268         psl_userset32 = psl_userset & ~PSL_SF;
  269 #endif
  270 
  271         /*
  272          * Zeroed bits in this variable signify that the value of the bit
  273          * in its position is allowed to vary between userspace contexts.
  274          *
  275          * All other bits are required to be identical for every userspace
  276          * context. The actual *value* of the bit is determined by
  277          * psl_userset and/or psl_userset32, and is not allowed to change.
  278          *
  279          * Remember to update this set when implementing support for
  280          * *conditionally* enabling a processor facility. Failing to do
  281          * this will cause swapcontext() in userspace to break when a
  282          * process uses a conditionally-enabled facility.
  283          *
  284          * When *unconditionally* implementing support for a processor
  285          * facility, update psl_userset / psl_userset32 instead.
  286          *
  287          * See the access control check in set_mcontext().
  288          */
  289         psl_userstatic = ~(PSL_VSX | PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1);
  290         /*
  291          * Mask bits from the SRR1 that aren't really the MSR:
  292          * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64)
  293          */
  294         psl_userstatic &= ~0x783f0000UL;
  295 
  296         /*
  297          * Initialize the interrupt tables and figure out our cache line
  298          * size and whether or not we need the 64-bit bridge code.
  299          */
  300 
  301         /*
  302          * Disable translation in case the vector area hasn't been
  303          * mapped (G5). Note that no OFW calls can be made until
  304          * translation is re-enabled.
  305          */
  306 
  307         msr = mfmsr();
  308         mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
  309 
  310         /*
  311          * Measure the cacheline size using dcbz
  312          *
  313          * Use EXC_PGM as a playground. We are about to overwrite it
  314          * anyway, we know it exists, and we know it is cache-aligned.
  315          */
  316 
  317         cache_check = (void *)EXC_PGM;
  318 
  319         for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
  320                 cache_check[cacheline_size] = 0xff;
  321 
  322         __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
  323 
  324         /* Find the first byte dcbz did not zero to get the cache line size */
  325         for (cacheline_size = 0; cacheline_size < 0x100 &&
  326             cache_check[cacheline_size] == 0; cacheline_size++);
  327 
  328         /* Work around psim bug */
  329         if (cacheline_size == 0) {
  330                 cacheline_warn = 1;
  331                 cacheline_size = 32;
  332         }
  333 
  334         #ifndef __powerpc64__
  335         /*
  336          * Figure out whether we need to use the 64 bit PMAP. This works by
  337          * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
  338          * and setting ppc64 = 0 if that causes a trap.
  339          */
  340 
  341         ppc64 = 1;
  342 
  343         bcopy(&testppc64, (void *)EXC_PGM,  (size_t)&testppc64size);
  344         __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
  345 
  346         __asm __volatile("\
  347                 mfmsr %0;       \
  348                 mtsprg2 %1;     \
  349                                 \
  350                 mtmsrd %0;      \
  351                 mfsprg2 %1;"
  352             : "=r"(scratch), "=r"(ppc64));
  353 
  354         if (ppc64)
  355                 cpu_features |= PPC_FEATURE_64;
  356 
  357         /*
  358          * Now copy restorebridge into all the handlers, if necessary,
  359          * and set up the trap tables.
  360          */
  361 
  362         if (cpu_features & PPC_FEATURE_64) {
  363                 /* Patch the two instances of rfi -> rfid */
  364                 bcopy(&rfid_patch,&rfi_patch1,4);
  365         #ifdef KDB
  366                 /* rfi_patch2 is at the end of dbleave */
  367                 bcopy(&rfid_patch,&rfi_patch2,4);
  368         #endif
  369         }
  370         #else /* powerpc64 */
  371         cpu_features |= PPC_FEATURE_64;
  372         #endif
  373 
  374         trapsize = (size_t)&trapcodeend - (size_t)&trapcode;
  375 
  376         /*
  377          * Copy generic handler into every possible trap. Special cases will get
  378          * different ones in a minute.
  379          */
  380         for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20)
  381                 bcopy(&trapcode, (void *)trap, trapsize);
  382 
  383         #ifndef __powerpc64__
  384         if (cpu_features & PPC_FEATURE_64) {
  385                 /*
  386                  * Copy a code snippet to restore 32-bit bridge mode
  387                  * to the top of every non-generic trap handler
  388                  */
  389 
  390                 trap_offset += (size_t)&restorebridgesize;
  391                 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
  392                 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
  393                 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
  394                 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
  395                 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
  396                 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
  397                 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
  398         } else {
  399                 /*
  400                  * Use an IBAT and a DBAT to map the bottom 256M segment.
  401                  *
  402                  * It is very important to do it *now* to avoid taking a
  403                  * fault in .text / .data before the MMU is bootstrapped,
  404                  * because until then, the translation data has not been
  405                  * copied over from OpenFirmware, so our DSI/ISI will fail
  406                  * to find a match.
  407                  */
  408 
  409                 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
  410                 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
  411 
  412                 __asm (".balign 32; \n"
  413                     "mtibatu 0,%0; mtibatl 0,%1; isync; \n"
  414                     "mtdbatu 0,%0; mtdbatl 0,%1; isync"
  415                     :: "r"(battable[0].batu), "r"(battable[0].batl));
  416         }
  417         #else
  418         trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode;
  419         bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize);
  420         bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize);
  421         bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize);
  422         bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize);
  423         #endif
  424 
  425         bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend -
  426             (size_t)&rstcode);
  427 
  428 #ifdef KDB
  429         bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend -
  430             (size_t)&dblow);
  431         bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend -
  432             (size_t)&dblow);
  433         bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend -
  434             (size_t)&dblow);
  435         bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend -
  436             (size_t)&dblow);
  437 #endif
  438         bcopy(&alitrap,  (void *)(EXC_ALI + trap_offset),  (size_t)&aliend -
  439             (size_t)&alitrap);
  440         bcopy(&dsitrap,  (void *)(EXC_DSI + trap_offset),  (size_t)&dsiend -
  441             (size_t)&dsitrap);
  442 
  443         /* Set address of generictrap for self-reloc calculations */
  444         *((void **)TRAP_GENTRAP) = &generictrap;
  445         #ifdef __powerpc64__
  446         /* Set TOC base so that the interrupt code can get at it */
  447         *((void **)TRAP_ENTRY) = &generictrap;
  448         *((register_t *)TRAP_TOCBASE) = toc;
  449         #else
  450         /* Set branch address for trap code */
  451         if (cpu_features & PPC_FEATURE_64)
  452                 *((void **)TRAP_ENTRY) = &generictrap64;
  453         else
  454                 *((void **)TRAP_ENTRY) = &generictrap;
  455         *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_;
  456 
  457         /* G2-specific TLB miss helper handlers */
  458         bcopy(&imisstrap, (void *)EXC_IMISS,  (size_t)&imisssize);
  459         bcopy(&dlmisstrap, (void *)EXC_DLMISS,  (size_t)&dlmisssize);
  460         bcopy(&dsmisstrap, (void *)EXC_DSMISS,  (size_t)&dsmisssize);
  461         #endif
  462         __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
  463 
  464         /*
  465          * Restore MSR
  466          */
  467         mtmsr(msr);
  468 
  469         /* Warn if cachline size was not determined */
  470         if (cacheline_warn == 1) {
  471                 printf("WARNING: cacheline size undetermined, setting to 32\n");
  472         }
  473 
  474         /*
  475          * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
  476          * in case the platform module had a better idea of what we
  477          * should do.
  478          */
  479         if (radix_mmu)
  480                 pmap_mmu_install(MMU_TYPE_RADIX, BUS_PROBE_GENERIC);
  481         else if (cpu_features & PPC_FEATURE_64)
  482                 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
  483         else
  484                 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
  485 }
  486 
  487 /*
  488  * Shutdown the CPU as much as possible.
  489  */
  490 void
  491 cpu_halt(void)
  492 {
  493 
  494         OF_exit();
  495 }
  496 
  497 int
  498 ptrace_single_step(struct thread *td)
  499 {
  500         struct trapframe *tf;
  501 
  502         tf = td->td_frame;
  503         tf->srr1 |= PSL_SE;
  504 
  505         return (0);
  506 }
  507 
  508 int
  509 ptrace_clear_single_step(struct thread *td)
  510 {
  511         struct trapframe *tf;
  512 
  513         tf = td->td_frame;
  514         tf->srr1 &= ~PSL_SE;
  515 
  516         return (0);
  517 }
  518 
  519 void
  520 kdb_cpu_clear_singlestep(void)
  521 {
  522 
  523         kdb_frame->srr1 &= ~PSL_SE;
  524 }
  525 
  526 void
  527 kdb_cpu_set_singlestep(void)
  528 {
  529 
  530         kdb_frame->srr1 |= PSL_SE;
  531 }
  532 
  533 /*
  534  * Initialise a struct pcpu.
  535  */
  536 void
  537 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
  538 {
  539 #ifdef __powerpc64__
  540 /* Copy the SLB contents from the current CPU */
  541 memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb));
  542 #endif
  543 }
  544 
  545 /* Return 0 on handled success, otherwise signal number. */
  546 int
  547 cpu_machine_check(struct thread *td, struct trapframe *frame, int *ucode)
  548 {
  549 #ifdef __powerpc64__
  550         /*
  551          * This block is 64-bit CPU specific currently.  Punt running in 32-bit
  552          * mode on 64-bit CPUs.
  553          */
  554         /* Check if the important information is in DSISR */
  555         if ((frame->srr1 & SRR1_MCHK_DATA) != 0) {
  556                 printf("Machine check, DSISR: %016lx\n", frame->cpu.aim.dsisr);
  557                 /* SLB multi-hit is recoverable. */
  558                 if ((frame->cpu.aim.dsisr & DSISR_MC_SLB_MULTIHIT) != 0)
  559                         return (0);
  560                 if ((frame->cpu.aim.dsisr &
  561                     (DSISR_MC_DERAT_MULTIHIT | DSISR_MC_TLB_MULTIHIT)) != 0) {
  562                         pmap_tlbie_all();
  563                         return (0);
  564                 }
  565                 /* TODO: Add other machine check recovery procedures. */
  566         } else {
  567                 if ((frame->srr1 & SRR1_MCHK_IFETCH_M) == SRR1_MCHK_IFETCH_SLBMH)
  568                         return (0);
  569         }
  570 #endif
  571         *ucode = BUS_OBJERR;
  572         return (SIGBUS);
  573 }
  574 
  575 #ifndef __powerpc64__
  576 uint64_t
  577 va_to_vsid(pmap_t pm, vm_offset_t va)
  578 {
  579         return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
  580 }
  581 
  582 #endif
  583 
  584 void
  585 pmap_early_io_map_init(void)
  586 {
  587         if ((cpu_features2 & PPC_FEATURE2_ARCH_3_00) == 0)
  588                 radix_mmu = 0;
  589         else {
  590                 radix_mmu = 1;
  591                 TUNABLE_INT_FETCH("radix_mmu", &radix_mmu);
  592         }
  593 
  594         /*
  595          * When using Radix, set the start and end of kva early, to be able to
  596          * use KVAs on pmap_early_io_map and avoid issues when remapping them
  597          * later.
  598          */
  599         if (radix_mmu) {
  600                 virtual_avail = VM_MIN_KERNEL_ADDRESS;
  601                 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
  602         }
  603 }
  604 
  605 /*
  606  * These functions need to provide addresses that both (a) work in real mode
  607  * (or whatever mode/circumstances the kernel is in in early boot (now)) and
  608  * (b) can still, in principle, work once the kernel is going. Because these
  609  * rely on existing mappings/real mode, unmap is a no-op.
  610  */
  611 vm_offset_t
  612 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
  613 {
  614         KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
  615 
  616         /*
  617          * If we have the MMU up in early boot, assume it is 1:1. Otherwise,
  618          * try to get the address in a memory region compatible with the
  619          * direct map for efficiency later.
  620          * Except for Radix MMU, for which current implementation doesn't
  621          * support mapping arbitrary virtual addresses, such as the ones
  622          * generated by "direct mapping" I/O addresses. In this case, use
  623          * addresses from KVA area.
  624          */
  625         if (mfmsr() & PSL_DR)
  626                 return (pa);
  627         else if (radix_mmu) {
  628                 vm_offset_t va;
  629 
  630                 va = virtual_avail;
  631                 virtual_avail += round_page(size + pa - trunc_page(pa));
  632                 return (va);
  633         } else
  634                 return (DMAP_BASE_ADDRESS + pa);
  635 }
  636 
  637 void
  638 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
  639 {
  640 
  641         KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
  642 }
  643 
  644 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
  645 void
  646 flush_disable_caches(void)
  647 {
  648         register_t msr;
  649         register_t msscr0;
  650         register_t cache_reg;
  651         volatile uint32_t *memp;
  652         int i;
  653         int x;
  654 
  655         msr = mfmsr();
  656         powerpc_sync();
  657         mtmsr(msr & ~(PSL_EE | PSL_DR));
  658         msscr0 = mfspr(SPR_MSSCR0);
  659         msscr0 &= ~MSSCR0_L2PFE;
  660         mtspr(SPR_MSSCR0, msscr0);
  661         powerpc_sync();
  662         isync();
  663         /* 7e00066c: dssall */
  664         __asm__ __volatile__(".long 0x7e00066c; sync");
  665         powerpc_sync();
  666         isync();
  667         __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
  668         __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
  669         __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
  670 
  671         /* Lock the L1 Data cache. */
  672         mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF);
  673         powerpc_sync();
  674         isync();
  675 
  676         mtspr(SPR_LDSTCR, 0);
  677 
  678         /*
  679          * Perform this in two stages: Flush the cache starting in RAM, then do it
  680          * from ROM.
  681          */
  682         memp = (volatile uint32_t *)0x00000000;
  683         for (i = 0; i < 128 * 1024; i++) {
  684                 (void)*memp;
  685                 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
  686                 memp += 32/sizeof(*memp);
  687         }
  688 
  689         memp = (volatile uint32_t *)0xfff00000;
  690         x = 0xfe;
  691 
  692         for (; x != 0xff;) {
  693                 mtspr(SPR_LDSTCR, x);
  694                 for (i = 0; i < 128; i++) {
  695                         (void)*memp;
  696                         __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
  697                         memp += 32/sizeof(*memp);
  698                 }
  699                 x = ((x << 1) | 1) & 0xff;
  700         }
  701         mtspr(SPR_LDSTCR, 0);
  702 
  703         cache_reg = mfspr(SPR_L2CR);
  704         if (cache_reg & L2CR_L2E) {
  705                 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450);
  706                 mtspr(SPR_L2CR, cache_reg);
  707                 powerpc_sync();
  708                 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF);
  709                 while (mfspr(SPR_L2CR) & L2CR_L2HWF)
  710                         ; /* Busy wait for cache to flush */
  711                 powerpc_sync();
  712                 cache_reg &= ~L2CR_L2E;
  713                 mtspr(SPR_L2CR, cache_reg);
  714                 powerpc_sync();
  715                 mtspr(SPR_L2CR, cache_reg | L2CR_L2I);
  716                 powerpc_sync();
  717                 while (mfspr(SPR_L2CR) & L2CR_L2I)
  718                         ; /* Busy wait for L2 cache invalidate */
  719                 powerpc_sync();
  720         }
  721 
  722         cache_reg = mfspr(SPR_L3CR);
  723         if (cache_reg & L3CR_L3E) {
  724                 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO);
  725                 mtspr(SPR_L3CR, cache_reg);
  726                 powerpc_sync();
  727                 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF);
  728                 while (mfspr(SPR_L3CR) & L3CR_L3HWF)
  729                         ; /* Busy wait for cache to flush */
  730                 powerpc_sync();
  731                 cache_reg &= ~L3CR_L3E;
  732                 mtspr(SPR_L3CR, cache_reg);
  733                 powerpc_sync();
  734                 mtspr(SPR_L3CR, cache_reg | L3CR_L3I);
  735                 powerpc_sync();
  736                 while (mfspr(SPR_L3CR) & L3CR_L3I)
  737                         ; /* Busy wait for L3 cache invalidate */
  738                 powerpc_sync();
  739         }
  740 
  741         mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE);
  742         powerpc_sync();
  743         isync();
  744 
  745         mtmsr(msr);
  746 }
  747 
  748 #ifndef __powerpc64__
  749 void
  750 mpc745x_sleep(void)
  751 {
  752         static u_quad_t timebase = 0;
  753         static register_t sprgs[4];
  754         static register_t srrs[2];
  755 
  756         jmp_buf resetjb;
  757         struct thread *fputd;
  758         struct thread *vectd;
  759         register_t hid0;
  760         register_t msr;
  761         register_t saved_msr;
  762 
  763         ap_pcpu = pcpup;
  764 
  765         PCPU_SET(restore, &resetjb);
  766 
  767         saved_msr = mfmsr();
  768         fputd = PCPU_GET(fputhread);
  769         vectd = PCPU_GET(vecthread);
  770         if (fputd != NULL)
  771                 save_fpu(fputd);
  772         if (vectd != NULL)
  773                 save_vec(vectd);
  774         if (setjmp(resetjb) == 0) {
  775                 sprgs[0] = mfspr(SPR_SPRG0);
  776                 sprgs[1] = mfspr(SPR_SPRG1);
  777                 sprgs[2] = mfspr(SPR_SPRG2);
  778                 sprgs[3] = mfspr(SPR_SPRG3);
  779                 srrs[0] = mfspr(SPR_SRR0);
  780                 srrs[1] = mfspr(SPR_SRR1);
  781                 timebase = mftb();
  782                 powerpc_sync();
  783                 flush_disable_caches();
  784                 hid0 = mfspr(SPR_HID0);
  785                 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP;
  786                 powerpc_sync();
  787                 isync();
  788                 msr = mfmsr() | PSL_POW;
  789                 mtspr(SPR_HID0, hid0);
  790                 powerpc_sync();
  791 
  792                 while (1)
  793                         mtmsr(msr);
  794         }
  795         /* XXX: The mttb() means this *only* works on single-CPU systems. */
  796         mttb(timebase);
  797         PCPU_SET(curthread, curthread);
  798         PCPU_SET(curpcb, curthread->td_pcb);
  799         pmap_activate(curthread);
  800         powerpc_sync();
  801         mtspr(SPR_SPRG0, sprgs[0]);
  802         mtspr(SPR_SPRG1, sprgs[1]);
  803         mtspr(SPR_SPRG2, sprgs[2]);
  804         mtspr(SPR_SPRG3, sprgs[3]);
  805         mtspr(SPR_SRR0, srrs[0]);
  806         mtspr(SPR_SRR1, srrs[1]);
  807         mtmsr(saved_msr);
  808         if (fputd == curthread)
  809                 enable_fpu(curthread);
  810         if (vectd == curthread)
  811                 enable_vec(curthread);
  812         powerpc_sync();
  813 }
  814 #endif

Cache object: 068a6185dc15ac1d61818108e7d6d2c9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.