The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/vfp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2014 Ian Lepore <ian@freebsd.org>
    5  * Copyright (c) 2012 Mark Tinguely
    6  *
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #ifdef VFP
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/proc.h>
   38 #include <sys/imgact_elf.h>
   39 #include <sys/kernel.h>
   40 
   41 #include <machine/armreg.h>
   42 #include <machine/elf.h>
   43 #include <machine/frame.h>
   44 #include <machine/md_var.h>
   45 #include <machine/pcb.h>
   46 #include <machine/undefined.h>
   47 #include <machine/vfp.h>
   48 
   49 /* function prototypes */
   50 static int vfp_bounce(u_int, u_int, struct trapframe *, int);
   51 static void vfp_restore(struct vfp_state *);
   52 
   53 extern int vfp_exists;
   54 static struct undefined_handler vfp10_uh, vfp11_uh;
   55 /* If true the VFP unit has 32 double registers, otherwise it has 16 */
   56 static int is_d32;
   57 
   58 /*
   59  * About .fpu directives in this file...
   60  *
   61  * We should need simply .fpu vfpv3, but clang 3.5 has a quirk where setting
   62  * vfpv3 doesn't imply that vfp2 features are also available -- both have to be
   63  * explicitly set to get all the features of both.  This is probably a bug in
   64  * clang, so it may get fixed and require changes here some day.  Other changes
   65  * are probably coming in clang too, because there is email and open PRs
   66  * indicating they want to completely disable the ability to use .fpu and
   67  * similar directives in inline asm.  That would be catastrophic for us,
   68  * hopefully they come to their senses.  There was also some discusion of a new
   69  * syntax such as .push fpu=vfpv3; ...; .pop fpu; and that would be ideal for
   70  * us, better than what we have now really.
   71  *
   72  * For gcc, each .fpu directive completely overrides the prior directive, unlike
   73  * with clang, but luckily on gcc saying v3 implies all the v2 features as well.
   74  */
   75 
   76 #define fmxr(reg, val) \
   77     __asm __volatile("  .fpu vfpv2\n .fpu vfpv3\n"                      \
   78                      "  vmsr    " __STRING(reg) ", %0"   :: "r"(val));
   79 
   80 #define fmrx(reg) \
   81 ({ u_int val = 0;\
   82     __asm __volatile(" .fpu vfpv2\n .fpu vfpv3\n"                       \
   83                      "  vmrs    %0, " __STRING(reg) : "=r"(val));       \
   84     val; \
   85 })
   86 
   87 static u_int
   88 get_coprocessorACR(void)
   89 {
   90         u_int val;
   91         __asm __volatile("mrc p15, 0, %0, c1, c0, 2" : "=r" (val) : : "cc");
   92         return val;
   93 }
   94 
   95 static void
   96 set_coprocessorACR(u_int val)
   97 {
   98         __asm __volatile("mcr p15, 0, %0, c1, c0, 2\n\t"
   99          : : "r" (val) : "cc");
  100         isb();
  101 }
  102 
  103         /* called for each cpu */
  104 void
  105 vfp_init(void)
  106 {
  107         u_int fpsid, fpexc, tmp;
  108         u_int coproc, vfp_arch;
  109 
  110         coproc = get_coprocessorACR();
  111         coproc |= COPROC10 | COPROC11;
  112         set_coprocessorACR(coproc);
  113 
  114         fpsid = fmrx(fpsid);            /* read the vfp system id */
  115         fpexc = fmrx(fpexc);            /* read the vfp exception reg */
  116 
  117         if (!(fpsid & VFPSID_HARDSOFT_IMP)) {
  118                 vfp_exists = 1;
  119                 is_d32 = 0;
  120                 PCPU_SET(vfpsid, fpsid);        /* save the fpsid */
  121                 elf_hwcap |= HWCAP_VFP;
  122 
  123                 vfp_arch =
  124                     (fpsid & VFPSID_SUBVERSION2_MASK) >> VFPSID_SUBVERSION_OFF;
  125 
  126                 if (vfp_arch >= VFP_ARCH3) {
  127                         tmp = fmrx(mvfr0);
  128                         PCPU_SET(vfpmvfr0, tmp);
  129                         elf_hwcap |= HWCAP_VFPv3;
  130 
  131                         if ((tmp & VMVFR0_RB_MASK) == 2) {
  132                                 elf_hwcap |= HWCAP_VFPD32;
  133                                 is_d32 = 1;
  134                         } else
  135                                 elf_hwcap |= HWCAP_VFPv3D16;
  136 
  137                         tmp = fmrx(mvfr1);
  138                         PCPU_SET(vfpmvfr1, tmp);
  139 
  140                         if (PCPU_GET(cpuid) == 0) {
  141                                 if ((tmp & VMVFR1_FZ_MASK) == 0x1) {
  142                                         /* Denormals arithmetic support */
  143                                         initial_fpscr &= ~VFPSCR_FZ;
  144                                         thread0.td_pcb->pcb_vfpstate.fpscr =
  145                                             initial_fpscr;
  146                                 }
  147                         }
  148 
  149                         if ((tmp & VMVFR1_LS_MASK) >> VMVFR1_LS_OFF == 1 &&
  150                             (tmp & VMVFR1_I_MASK) >> VMVFR1_I_OFF == 1 &&
  151                             (tmp & VMVFR1_SP_MASK) >> VMVFR1_SP_OFF == 1)
  152                                 elf_hwcap |= HWCAP_NEON;
  153                         if ((tmp & VMVFR1_FMAC_MASK) >>  VMVFR1_FMAC_OFF == 1)
  154                                 elf_hwcap |= HWCAP_VFPv4;
  155                 }
  156 
  157                 /* initialize the coprocess 10 and 11 calls
  158                  * These are called to restore the registers and enable
  159                  * the VFP hardware.
  160                  */
  161                 if (vfp10_uh.uh_handler == NULL) {
  162                         vfp10_uh.uh_handler = vfp_bounce;
  163                         vfp11_uh.uh_handler = vfp_bounce;
  164                         install_coproc_handler_static(10, &vfp10_uh);
  165                         install_coproc_handler_static(11, &vfp11_uh);
  166                 }
  167         }
  168 }
  169 
  170 SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
  171 
  172 /* start VFP unit, restore the vfp registers from the PCB  and retry
  173  * the instruction
  174  */
  175 static int
  176 vfp_bounce(u_int addr, u_int insn, struct trapframe *frame, int code)
  177 {
  178         u_int cpu, fpexc;
  179         struct pcb *curpcb;
  180         ksiginfo_t ksi;
  181 
  182         if ((code & FAULT_USER) == 0)
  183                 panic("undefined floating point instruction in supervisor mode");
  184 
  185         critical_enter();
  186 
  187         /*
  188          * If the VFP is already on and we got an undefined instruction, then
  189          * something tried to executate a truly invalid instruction that maps to
  190          * the VFP.
  191          */
  192         fpexc = fmrx(fpexc);
  193         if (fpexc & VFPEXC_EN) {
  194                 /* Clear any exceptions */
  195                 fmxr(fpexc, fpexc & ~(VFPEXC_EX | VFPEXC_FP2V));
  196 
  197                 /* kill the process - we do not handle emulation */
  198                 critical_exit();
  199 
  200                 if (fpexc & VFPEXC_EX) {
  201                         /* We have an exception, signal a SIGFPE */
  202                         ksiginfo_init_trap(&ksi);
  203                         ksi.ksi_signo = SIGFPE;
  204                         if (fpexc & VFPEXC_UFC)
  205                                 ksi.ksi_code = FPE_FLTUND;
  206                         else if (fpexc & VFPEXC_OFC)
  207                                 ksi.ksi_code = FPE_FLTOVF;
  208                         else if (fpexc & VFPEXC_IOC)
  209                                 ksi.ksi_code = FPE_FLTINV;
  210                         ksi.ksi_addr = (void *)addr;
  211                         trapsignal(curthread, &ksi);
  212                         return 0;
  213                 }
  214 
  215                 return 1;
  216         }
  217 
  218         /*
  219          * If the last time this thread used the VFP it was on this core, and
  220          * the last thread to use the VFP on this core was this thread, then the
  221          * VFP state is valid, otherwise restore this thread's state to the VFP.
  222          */
  223         fmxr(fpexc, fpexc | VFPEXC_EN);
  224         curpcb = curthread->td_pcb;
  225         cpu = PCPU_GET(cpuid);
  226         if (curpcb->pcb_vfpcpu != cpu || curthread != PCPU_GET(fpcurthread)) {
  227                 vfp_restore(&curpcb->pcb_vfpstate);
  228                 curpcb->pcb_vfpcpu = cpu;
  229                 PCPU_SET(fpcurthread, curthread);
  230         }
  231 
  232         critical_exit();
  233         return (0);
  234 }
  235 
  236 /*
  237  * Restore the given state to the VFP hardware.
  238  */
  239 static void
  240 vfp_restore(struct vfp_state *vfpsave)
  241 {
  242         uint32_t fpexc;
  243 
  244         /* On vfpv3 we may need to restore FPINST and FPINST2 */
  245         fpexc = vfpsave->fpexec;
  246         if (fpexc & VFPEXC_EX) {
  247                 fmxr(fpinst, vfpsave->fpinst);
  248                 if (fpexc & VFPEXC_FP2V)
  249                         fmxr(fpinst2, vfpsave->fpinst2);
  250         }
  251         fmxr(fpscr, vfpsave->fpscr);
  252 
  253         __asm __volatile(
  254             " .fpu      vfpv2\n"
  255             " .fpu      vfpv3\n"
  256             " vldmia    %0!, {d0-d15}\n"        /* d0-d15 */
  257             " cmp       %1, #0\n"               /* -D16 or -D32? */
  258             " vldmiane  %0!, {d16-d31}\n"       /* d16-d31 */
  259             " addeq     %0, %0, #128\n"         /* skip missing regs */
  260             : "+&r" (vfpsave) : "r" (is_d32) : "cc"
  261             );
  262 
  263         fmxr(fpexc, fpexc);
  264 }
  265 
  266 /*
  267  * If the VFP is on, save its current state and turn it off if requested to do
  268  * so.  If the VFP is not on, does not change the values at *vfpsave.  Caller is
  269  * responsible for preventing a context switch while this is running.
  270  */
  271 void
  272 vfp_store(struct vfp_state *vfpsave, boolean_t disable_vfp)
  273 {
  274         uint32_t fpexc;
  275 
  276         fpexc = fmrx(fpexc);            /* Is the vfp enabled? */
  277         if (fpexc & VFPEXC_EN) {
  278                 vfpsave->fpexec = fpexc;
  279                 vfpsave->fpscr = fmrx(fpscr);
  280 
  281                 /* On vfpv3 we may need to save FPINST and FPINST2 */
  282                 if (fpexc & VFPEXC_EX) {
  283                         vfpsave->fpinst = fmrx(fpinst);
  284                         if (fpexc & VFPEXC_FP2V)
  285                                 vfpsave->fpinst2 = fmrx(fpinst2);
  286                         fpexc &= ~VFPEXC_EX;
  287                 }
  288 
  289                 __asm __volatile(
  290                     " .fpu      vfpv2\n"
  291                     " .fpu      vfpv3\n"
  292                     " vstmia    %0!, {d0-d15}\n"        /* d0-d15 */
  293                     " cmp       %1, #0\n"               /* -D16 or -D32? */
  294                     " vstmiane  %0!, {d16-d31}\n"       /* d16-d31 */
  295                     " addeq     %0, %0, #128\n"         /* skip missing regs */
  296                     : "+&r" (vfpsave) : "r" (is_d32) : "cc"
  297                     );
  298 
  299                 if (disable_vfp)
  300                         fmxr(fpexc , fpexc & ~VFPEXC_EN);
  301         }
  302 }
  303 
  304 /*
  305  * The current thread is dying.  If the state currently in the hardware belongs
  306  * to the current thread, set fpcurthread to NULL to indicate that the VFP
  307  * hardware state does not belong to any thread.  If the VFP is on, turn it off.
  308  * Called only from cpu_throw(), so we don't have to worry about a context
  309  * switch here.
  310  */
  311 void
  312 vfp_discard(struct thread *td)
  313 {
  314         u_int tmp;
  315 
  316         if (PCPU_GET(fpcurthread) == td)
  317                 PCPU_SET(fpcurthread, NULL);
  318 
  319         tmp = fmrx(fpexc);
  320         if (tmp & VFPEXC_EN)
  321                 fmxr(fpexc, tmp & ~VFPEXC_EN);
  322 }
  323 
  324 #endif

Cache object: c8f741ef39faffe89119ef33d3b32f4f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.