The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/aim/mp_cpudep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2008 Marcel Moolenaar
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/param.h>
   33 #include <sys/systm.h>
   34 #include <sys/kernel.h>
   35 #include <sys/bus.h>
   36 #include <sys/pcpu.h>
   37 #include <sys/proc.h>
   38 #include <sys/sched.h>
   39 #include <sys/smp.h>
   40 
   41 #include <machine/bus.h>
   42 #include <machine/cpu.h>
   43 #include <machine/hid.h>
   44 #include <machine/intr_machdep.h>
   45 #include <machine/pcb.h>
   46 #include <machine/psl.h>
   47 #include <machine/smp.h>
   48 #include <machine/spr.h>
   49 #include <machine/trap.h>
   50 
   51 #include <dev/ofw/openfirm.h>
   52 #include <machine/ofw_machdep.h>
   53 
   54 void *ap_pcpu;
   55 
   56 static register_t bsp_state[8] __aligned(8);
   57 
   58 static void cpudep_save_config(void *dummy);
   59 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
   60 
   61 void
   62 cpudep_ap_early_bootstrap(void)
   63 {
   64 #ifndef __powerpc64__
   65         register_t reg;
   66 #endif
   67 
   68         switch (mfpvr() >> 16) {
   69         case IBM970:
   70         case IBM970FX:
   71         case IBM970MP:
   72                 /* Set HIOR to 0 */
   73                 __asm __volatile("mtspr 311,%0" :: "r"(0));
   74                 powerpc_sync();
   75 
   76                 /* Restore HID4 and HID5, which are necessary for the MMU */
   77 
   78 #ifdef __powerpc64__
   79                 mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync();
   80                 mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync();
   81 #else
   82                 __asm __volatile("ld %0, 16(%2); sync; isync;   \
   83                     mtspr %1, %0; sync; isync;"
   84                     : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state));
   85                 __asm __volatile("ld %0, 24(%2); sync; isync;   \
   86                     mtspr %1, %0; sync; isync;"
   87                     : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state));
   88 #endif
   89                 powerpc_sync();
   90                 break;
   91         case IBMPOWER8:
   92         case IBMPOWER8E:
   93         case IBMPOWER8NVL:
   94         case IBMPOWER9:
   95 #ifdef __powerpc64__
   96                 if (mfmsr() & PSL_HV) {
   97                         isync();
   98                         /*
   99                          * Direct interrupts to SRR instead of HSRR and
  100                          * reset LPCR otherwise
  101                          */
  102                         mtspr(SPR_LPID, 0);
  103                         isync();
  104 
  105                         mtspr(SPR_LPCR, lpcr);
  106                         isync();
  107 
  108                         /*
  109                          * Nuke FSCR, to be managed on a per-process basis
  110                          * later.
  111                          */
  112                         mtspr(SPR_FSCR, 0);
  113                 }
  114 #endif
  115                 break;
  116         }
  117 
  118         __asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
  119         powerpc_sync();
  120 }
  121 
  122 uintptr_t
  123 cpudep_ap_bootstrap(void)
  124 {
  125         register_t msr, sp;
  126 
  127         msr = psl_kernset & ~PSL_EE;
  128         mtmsr(msr);
  129 
  130         pcpup->pc_curthread = pcpup->pc_idlethread;
  131 #ifdef __powerpc64__
  132         __asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread));
  133 #else
  134         __asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread));
  135 #endif
  136         pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
  137         sp = pcpup->pc_curpcb->pcb_sp;
  138         schedinit_ap();
  139 
  140         return (sp);
  141 }
  142 
  143 static register_t
  144 mpc74xx_l2_enable(register_t l2cr_config)
  145 {
  146         register_t ccr, bit;
  147         uint16_t        vers;
  148 
  149         vers = mfpvr() >> 16;
  150         switch (vers) {
  151         case MPC7400:
  152         case MPC7410:
  153                 bit = L2CR_L2IP;
  154                 break;
  155         default:
  156                 bit = L2CR_L2I;
  157                 break;
  158         }
  159 
  160         ccr = mfspr(SPR_L2CR);
  161         if (ccr & L2CR_L2E)
  162                 return (ccr);
  163 
  164         /* Configure L2 cache. */
  165         ccr = l2cr_config & ~L2CR_L2E;
  166         mtspr(SPR_L2CR, ccr | L2CR_L2I);
  167         do {
  168                 ccr = mfspr(SPR_L2CR);
  169         } while (ccr & bit);
  170         powerpc_sync();
  171         mtspr(SPR_L2CR, l2cr_config);
  172         powerpc_sync();
  173 
  174         return (l2cr_config);
  175 }
  176 
  177 static register_t
  178 mpc745x_l3_enable(register_t l3cr_config)
  179 {
  180         register_t ccr;
  181 
  182         ccr = mfspr(SPR_L3CR);
  183         if (ccr & L3CR_L3E)
  184                 return (ccr);
  185 
  186         /* Configure L3 cache. */
  187         ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN);
  188         mtspr(SPR_L3CR, ccr);
  189         ccr |= 0x4000000;       /* Magic, but documented. */
  190         mtspr(SPR_L3CR, ccr);
  191         ccr |= L3CR_L3CLKEN;
  192         mtspr(SPR_L3CR, ccr);
  193         mtspr(SPR_L3CR, ccr | L3CR_L3I);
  194         while (mfspr(SPR_L3CR) & L3CR_L3I)
  195                 ;
  196         mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN);
  197         powerpc_sync();
  198         DELAY(100);
  199         mtspr(SPR_L3CR, ccr);
  200         powerpc_sync();
  201         DELAY(100);
  202         ccr |= L3CR_L3E;
  203         mtspr(SPR_L3CR, ccr);
  204         powerpc_sync();
  205 
  206         return(ccr);
  207 }
  208 
  209 static register_t
  210 mpc74xx_l1d_enable(void)
  211 {
  212         register_t hid;
  213 
  214         hid = mfspr(SPR_HID0);
  215         if (hid & HID0_DCE)
  216                 return (hid);
  217 
  218         /* Enable L1 D-cache */
  219         hid |= HID0_DCE;
  220         powerpc_sync();
  221         mtspr(SPR_HID0, hid | HID0_DCFI);
  222         powerpc_sync();
  223 
  224         return (hid);
  225 }
  226 
  227 static register_t
  228 mpc74xx_l1i_enable(void)
  229 {
  230         register_t hid;
  231 
  232         hid = mfspr(SPR_HID0);
  233         if (hid & HID0_ICE)
  234                 return (hid);
  235 
  236         /* Enable L1 I-cache */
  237         hid |= HID0_ICE;
  238         isync();
  239         mtspr(SPR_HID0, hid | HID0_ICFI);
  240         isync();
  241 
  242         return (hid);
  243 }
  244 
  245 static void
  246 cpudep_save_config(void *dummy)
  247 {
  248         uint16_t        vers;
  249 
  250         vers = mfpvr() >> 16;
  251 
  252         switch(vers) {
  253         case IBM970:
  254         case IBM970FX:
  255         case IBM970MP:
  256                 #ifdef __powerpc64__
  257                 bsp_state[0] = mfspr(SPR_HID0);
  258                 bsp_state[1] = mfspr(SPR_HID1);
  259                 bsp_state[2] = mfspr(SPR_HID4);
  260                 bsp_state[3] = mfspr(SPR_HID5);
  261                 #else
  262                 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
  263                     : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
  264                 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
  265                     : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1));
  266                 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
  267                     : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
  268                 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
  269                     : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
  270                 #endif
  271 
  272                 powerpc_sync();
  273 
  274                 break;
  275         case IBMCELLBE:
  276                 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */
  277                 if (mfmsr() & PSL_HV) {
  278                         bsp_state[0] = mfspr(SPR_HID0);
  279                         bsp_state[1] = mfspr(SPR_HID1);
  280                         bsp_state[2] = mfspr(SPR_HID4);
  281                         bsp_state[3] = mfspr(SPR_HID6);
  282 
  283                         bsp_state[4] = mfspr(SPR_CELL_TSCR);
  284                 }
  285                 #endif
  286 
  287                 bsp_state[5] = mfspr(SPR_CELL_TSRL);
  288 
  289                 break;
  290         case MPC7450:
  291         case MPC7455:
  292         case MPC7457:
  293                 /* Only MPC745x CPUs have an L3 cache. */
  294                 bsp_state[3] = mfspr(SPR_L3CR);
  295 
  296                 /* Fallthrough */
  297         case MPC7400:
  298         case MPC7410:
  299         case MPC7447A:
  300         case MPC7448:
  301                 bsp_state[2] = mfspr(SPR_L2CR);
  302                 bsp_state[1] = mfspr(SPR_HID1);
  303                 bsp_state[0] = mfspr(SPR_HID0);
  304                 break;
  305         }
  306 }
  307 
  308 void
  309 cpudep_ap_setup()
  310 {
  311 #ifndef __powerpc64__
  312         register_t      reg;
  313 #endif
  314         uint16_t        vers;
  315 
  316         vers = mfpvr() >> 16;
  317 
  318         switch(vers) {
  319         case IBM970:
  320         case IBM970FX:
  321         case IBM970MP:
  322                 /*
  323                  * The 970 has strange rules about how to update HID registers.
  324                  * See Table 2-3, 970MP manual
  325                  *
  326                  * Note: HID4 and HID5 restored already in
  327                  * cpudep_ap_early_bootstrap()
  328                  */
  329 
  330                 __asm __volatile("mtasr %0; sync" :: "r"(0));
  331         #ifdef __powerpc64__
  332                 __asm __volatile(" \
  333                         sync; isync;                                    \
  334                         mtspr   %1, %0;                                 \
  335                         mfspr   %0, %1; mfspr   %0, %1; mfspr   %0, %1; \
  336                         mfspr   %0, %1; mfspr   %0, %1; mfspr   %0, %1; \
  337                         sync; isync" 
  338                     :: "r"(bsp_state[0]), "K"(SPR_HID0));
  339                 __asm __volatile("sync; isync;  \
  340                     mtspr %1, %0; mtspr %1, %0; sync; isync"
  341                     :: "r"(bsp_state[1]), "K"(SPR_HID1));
  342         #else
  343                 __asm __volatile(" \
  344                         ld      %0,0(%2);                               \
  345                         sync; isync;                                    \
  346                         mtspr   %1, %0;                                 \
  347                         mfspr   %0, %1; mfspr   %0, %1; mfspr   %0, %1; \
  348                         mfspr   %0, %1; mfspr   %0, %1; mfspr   %0, %1; \
  349                         sync; isync" 
  350                     : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state));
  351                 __asm __volatile("ld %0, 8(%2); sync; isync;    \
  352                     mtspr %1, %0; mtspr %1, %0; sync; isync"
  353                     : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state));
  354         #endif
  355 
  356                 powerpc_sync();
  357                 break;
  358         case IBMCELLBE:
  359                 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */
  360                 if (mfmsr() & PSL_HV) {
  361                         mtspr(SPR_HID0, bsp_state[0]);
  362                         mtspr(SPR_HID1, bsp_state[1]);
  363                         mtspr(SPR_HID4, bsp_state[2]);
  364                         mtspr(SPR_HID6, bsp_state[3]);
  365 
  366                         mtspr(SPR_CELL_TSCR, bsp_state[4]);
  367                 }
  368                 #endif
  369 
  370                 mtspr(SPR_CELL_TSRL, bsp_state[5]);
  371 
  372                 break;
  373         case MPC7400:
  374         case MPC7410:
  375         case MPC7447A:
  376         case MPC7448:
  377         case MPC7450:
  378         case MPC7455:
  379         case MPC7457:
  380                 /* XXX: Program the CPU ID into PIR */
  381                 __asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid)));
  382 
  383                 powerpc_sync();
  384                 isync();
  385 
  386                 mtspr(SPR_HID0, bsp_state[0]); isync();
  387                 mtspr(SPR_HID1, bsp_state[1]); isync();
  388 
  389                 /* Now enable the L3 cache. */
  390                 switch (vers) {
  391                 case MPC7450:
  392                 case MPC7455:
  393                 case MPC7457:
  394                         /* Only MPC745x CPUs have an L3 cache. */
  395                         mpc745x_l3_enable(bsp_state[3]);
  396                 default:
  397                         break;
  398                 }
  399                 
  400                 mpc74xx_l2_enable(bsp_state[2]);
  401                 mpc74xx_l1d_enable();
  402                 mpc74xx_l1i_enable();
  403 
  404                 break;
  405         case IBMPOWER7:
  406         case IBMPOWER7PLUS:
  407         case IBMPOWER8:
  408         case IBMPOWER8E:
  409         case IBMPOWER8NVL:
  410         case IBMPOWER9:
  411 #ifdef __powerpc64__
  412                 if (mfmsr() & PSL_HV) {
  413                         mtspr(SPR_LPCR, mfspr(SPR_LPCR) | lpcr |
  414                             LPCR_PECE_WAKESET);
  415                         isync();
  416                 }
  417 #endif
  418                 break;
  419         default:
  420 #ifdef __powerpc64__
  421                 if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */
  422                         break;
  423 #endif
  424                 printf("WARNING: Unknown CPU type. Cache performace may be "
  425                     "suboptimal.\n");
  426                 break;
  427         }
  428 }

Cache object: a7bc37ca5abf15f6dde9f7aa794e4963


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.