The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/swtch.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $       */
    2 
    3 /*-
    4  * Copyright 2003 Wasabi Systems, Inc.
    5  * All rights reserved.
    6  *
    7  * Written by Steve C. Woodford for Wasabi Systems, Inc.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed for the NetBSD Project by
   20  *      Wasabi Systems, Inc.
   21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   22  *    or promote products derived from this software without specific prior
   23  *    written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  */
   37 /*-
   38  * Copyright (c) 1994-1998 Mark Brinicombe.
   39  * Copyright (c) 1994 Brini.
   40  * All rights reserved.
   41  *
   42  * This code is derived from software written for Brini by Mark Brinicombe
   43  *
   44  * Redistribution and use in source and binary forms, with or without
   45  * modification, are permitted provided that the following conditions
   46  * are met:
   47  * 1. Redistributions of source code must retain the above copyright
   48  *    notice, this list of conditions and the following disclaimer.
   49  * 2. Redistributions in binary form must reproduce the above copyright
   50  *    notice, this list of conditions and the following disclaimer in the
   51  *    documentation and/or other materials provided with the distribution.
   52  * 3. All advertising materials mentioning features or use of this software
   53  *    must display the following acknowledgement:
   54  *      This product includes software developed by Brini.
   55  * 4. The name of the company nor the name of the author may be used to
   56  *    endorse or promote products derived from this software without specific
   57  *    prior written permission.
   58  *
   59  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
   60  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   61  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   62  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   63  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   64  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   65  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   69  * SUCH DAMAGE.
   70  *
   71  * RiscBSD kernel project
   72  *
   73  * cpuswitch.S
   74  *
   75  * cpu switching functions
   76  *
   77  * Created      : 15/10/94
   78  *
   79  */
   80 
   81 #include "assym.s"
   82 #include "opt_sched.h"
   83 
   84 #include <machine/asm.h>
   85 #include <machine/asmacros.h>
   86 #include <machine/armreg.h>
   87 #include <machine/vfp.h>
   88 
   89 __FBSDID("$FreeBSD: releng/10.2/sys/arm/arm/swtch.S 283339 2015-05-23 23:27:00Z ian $");
   90 
   91 #define DOMAIN_CLIENT   0x01
   92 
   93 #if defined(_ARM_ARCH_6) && defined(SMP)
   94 #define GET_PCPU(tmp, tmp2) \
   95         mrc     p15, 0, tmp, c0, c0, 5; \
   96         and     tmp, tmp, #0xf;         \
   97         ldr     tmp2, .Lcurpcpu+4;      \
   98         mul     tmp, tmp, tmp2;         \
   99         ldr     tmp2, .Lcurpcpu;        \
  100         add     tmp, tmp, tmp2;
  101 #else
  102 
  103 #define GET_PCPU(tmp, tmp2) \
  104         ldr     tmp, .Lcurpcpu
  105 #endif
  106 
  107 #ifdef VFP
  108         .fpu vfp        /* allow VFP instructions */
  109 #endif
  110 
  111 .Lcurpcpu:
  112         .word   _C_LABEL(__pcpu)
  113         .word   PCPU_SIZE
  114 .Lcpufuncs:     
  115         .word   _C_LABEL(cpufuncs)
  116 .Lblocked_lock:
  117         .word   _C_LABEL(blocked_lock)
  118 
  119 /*
  120  * cpu_throw(oldtd, newtd)
  121  *
  122  * Remove current thread state, then select the next thread to run
  123  * and load its state.
  124  * r0 = oldtd
  125  * r1 = newtd
  126  */
  127 ENTRY(cpu_throw)
  128         mov     r5, r1
  129 
  130         /*
  131          * r0 = oldtd
  132          * r5 = newtd
  133          */
  134 
  135 #ifdef VFP                              /* This thread is dying, disable */
  136         bl      _C_LABEL(vfp_discard)   /* VFP without preserving state. */
  137 #endif
  138 
  139         GET_PCPU(r7, r9)
  140         ldr     r7, [r5, #(TD_PCB)]             /* r7 = new thread's PCB */
  141   
  142         /* Switch to lwp0 context */
  143 
  144         ldr     r9, .Lcpufuncs
  145 #if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT)
  146         mov     lr, pc
  147         ldr     pc, [r9, #CF_IDCACHE_WBINV_ALL]
  148 #endif
  149         ldr     r0, [r7, #(PCB_PL1VEC)]
  150         ldr     r1, [r7, #(PCB_DACR)]
  151         /*
  152          * r0 = Pointer to L1 slot for vector_page (or NULL)
  153          * r1 = lwp0's DACR
  154          * r5 = lwp0
  155          * r7 = lwp0's PCB
  156          * r9 = cpufuncs
  157          */
  158 
  159         /*
  160          * Ensure the vector table is accessible by fixing up lwp0's L1
  161          */
  162         cmp     r0, #0                  /* No need to fixup vector table? */
  163         ldrne   r3, [r0]                /* But if yes, fetch current value */
  164         ldrne   r2, [r7, #(PCB_L1VEC)]  /* Fetch new vector_page value */
  165         mcr     p15, 0, r1, c3, c0, 0   /* Update DACR for lwp0's context */
  166         cmpne   r3, r2                  /* Stuffing the same value? */
  167         strne   r2, [r0]                /* Store if not. */
  168 
  169 #ifdef PMAP_INCLUDE_PTE_SYNC
  170         /*
  171          * Need to sync the cache to make sure that last store is
  172          * visible to the MMU.
  173          */
  174         movne   r1, #4
  175         movne   lr, pc
  176         ldrne   pc, [r9, #CF_DCACHE_WB_RANGE]
  177 #endif /* PMAP_INCLUDE_PTE_SYNC */
  178 
  179         /*
  180          * Note: We don't do the same optimisation as cpu_switch() with
  181          * respect to avoiding flushing the TLB if we're switching to
  182          * the same L1 since this process' VM space may be about to go
  183          * away, so we don't want *any* turds left in the TLB.
  184          */
  185 
  186         /* Switch the memory to the new process */
  187         ldr     r0, [r7, #(PCB_PAGEDIR)]
  188         mov     lr, pc
  189         ldr     pc, [r9, #CF_CONTEXT_SWITCH]
  190 
  191         GET_PCPU(r6, r4)
  192         /* Hook in a new pcb */
  193         str     r7, [r6, #PC_CURPCB]
  194         /* We have a new curthread now so make a note it */
  195         str     r5, [r6, #PC_CURTHREAD]
  196 #ifndef ARM_TP_ADDRESS
  197         mcr     p15, 0, r5, c13, c0, 4
  198 #endif
  199         /* Set the new tp */
  200         ldr     r6, [r5, #(TD_MD + MD_TP)]
  201 #ifdef ARM_TP_ADDRESS
  202         ldr     r4, =ARM_TP_ADDRESS
  203         str     r6, [r4]
  204         ldr     r6, [r5, #(TD_MD + MD_RAS_START)]
  205         str     r6, [r4, #4] /* ARM_RAS_START */
  206         ldr     r6, [r5, #(TD_MD + MD_RAS_END)]
  207         str     r6, [r4, #8] /* ARM_RAS_END */
  208 #else
  209         mcr p15, 0, r6, c13, c0, 3
  210 #endif
  211         /* Restore all the saved registers and exit */
  212         add     r3, r7, #PCB_R4
  213         ldmia   r3, {r4-r12, sp, pc}
  214 END(cpu_throw)
  215 
  216 /*
  217  * cpu_switch(oldtd, newtd, lock)
  218  *
  219  * Save the current thread state, then select the next thread to run
  220  * and load its state.
  221  * r0 = oldtd
  222  * r1 = newtd
  223  * r2 = lock (new lock for old thread)
  224  */
  225 ENTRY(cpu_switch)
  226         /* Interrupts are disabled. */
  227         /* Save all the registers in the old thread's pcb. */
  228         ldr     r3, [r0, #(TD_PCB)]
  229 
  230         /* Restore all the saved registers and exit */
  231         add     r3, #(PCB_R4)
  232         stmia   r3, {r4-r12, sp, lr, pc}
  233 
  234         mov     r6, r2 /* Save the mutex */
  235 
  236         /* rem: r0 = old lwp */
  237         /* rem: interrupts are disabled */
  238 
  239         /* Process is now on a processor. */
  240         /* We have a new curthread now so make a note it */
  241         GET_PCPU(r7, r2)
  242         str     r1, [r7, #PC_CURTHREAD]
  243 #ifndef ARM_TP_ADDRESS
  244         mcr     p15, 0, r1, c13, c0, 4
  245 #endif
  246 
  247         /* Hook in a new pcb */
  248         ldr     r2, [r1, #TD_PCB]
  249         str     r2, [r7, #PC_CURPCB]
  250 
  251         /* Stage two : Save old context */
  252 
  253         /* Get the user structure for the old thread. */
  254         ldr     r2, [r0, #(TD_PCB)]
  255         mov     r4, r0 /* Save the old thread. */
  256 
  257 #ifdef ARM_TP_ADDRESS
  258         /* Store the old tp; userland can change it on armv4. */
  259         ldr     r3, =ARM_TP_ADDRESS
  260         ldr     r9, [r3]
  261         str     r9, [r0, #(TD_MD + MD_TP)]
  262         ldr     r9, [r3, #4]
  263         str     r9, [r0, #(TD_MD + MD_RAS_START)]
  264         ldr     r9, [r3, #8]
  265         str     r9, [r0, #(TD_MD + MD_RAS_END)]
  266 
  267         /* Set the new tp */
  268         ldr     r9, [r1, #(TD_MD + MD_TP)]
  269         str     r9, [r3]
  270         ldr     r9, [r1, #(TD_MD + MD_RAS_START)]
  271         str     r9, [r3, #4]
  272         ldr     r9, [r1, #(TD_MD + MD_RAS_END)]
  273         str     r9, [r3, #8]
  274 #else
  275         /* 
  276          * Set new tp.  No need to store the old one first, userland can't 
  277          * change it directly on armv6.
  278          */
  279         ldr     r9, [r1, #(TD_MD + MD_TP)]
  280         mcr p15, 0, r9, c13, c0, 3
  281 #endif
  282         
  283         /* Get the user structure for the new process in r9 */
  284         ldr     r9, [r1, #(TD_PCB)]
  285 
  286         /* rem: r2 = old PCB */
  287         /* rem: r9 = new PCB */
  288         /* rem: interrupts are enabled */
  289 
  290 #ifdef VFP
  291         fmrx    r0, fpexc               /* If the VFP is enabled */
  292         tst     r0, #(VFPEXC_EN)        /* the current thread has */
  293         movne   r1, #1                  /* used it, so go save */
  294         addne   r0, r2, #(PCB_VFPSTATE) /* the state into the PCB */
  295         blne    _C_LABEL(vfp_store)     /* and disable the VFP. */
  296 #endif
  297 
  298         /* r0-r3 now free! */
  299 
  300         /* Third phase : restore saved context */
  301 
  302         /* rem: r2 = old PCB */
  303         /* rem: r9 = new PCB */
  304 
  305         ldr     r5, [r9, #(PCB_DACR)]           /* r5 = new DACR */
  306         mov     r2, #DOMAIN_CLIENT
  307         cmp     r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
  308         beq     .Lcs_context_switched        /* Yup. Don't flush cache */
  309         mrc     p15, 0, r0, c3, c0, 0           /* r0 = old DACR */
  310         /*
  311          * Get the new L1 table pointer into r11.  If we're switching to
  312          * an LWP with the same address space as the outgoing one, we can
  313          * skip the cache purge and the TTB load.
  314          *
  315          * To avoid data dep stalls that would happen anyway, we try
  316          * and get some useful work done in the mean time.
  317          */
  318         mrc     p15, 0, r10, c2, c0, 0          /* r10 = old L1 */
  319         ldr     r11, [r9, #(PCB_PAGEDIR)]       /* r11 = new L1 */
  320 
  321         teq     r10, r11                        /* Same L1? */
  322         cmpeq   r0, r5                          /* Same DACR? */
  323         beq     .Lcs_context_switched           /* yes! */
  324 
  325 #if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT)
  326         /*
  327          * Definately need to flush the cache.
  328          */
  329 
  330         ldr     r1, .Lcpufuncs
  331         mov     lr, pc
  332         ldr     pc, [r1, #CF_IDCACHE_WBINV_ALL]
  333 #endif
  334 .Lcs_cache_purge_skipped:
  335         /* rem: r6 = lock */
  336         /* rem: r9 = new PCB */
  337         /* rem: r10 = old L1 */
  338         /* rem: r11 = new L1 */
  339 
  340         mov     r2, #0x00000000
  341         ldr     r7, [r9, #(PCB_PL1VEC)]
  342 
  343         /*
  344          * Ensure the vector table is accessible by fixing up the L1
  345          */
  346         cmp     r7, #0                  /* No need to fixup vector table? */
  347         ldrne   r2, [r7]                /* But if yes, fetch current value */
  348         ldrne   r0, [r9, #(PCB_L1VEC)]  /* Fetch new vector_page value */
  349         mcr     p15, 0, r5, c3, c0, 0   /* Update DACR for new context */
  350         cmpne   r2, r0                  /* Stuffing the same value? */
  351 #ifndef PMAP_INCLUDE_PTE_SYNC
  352         strne   r0, [r7]                /* Nope, update it */
  353 #else
  354         beq     .Lcs_same_vector
  355         str     r0, [r7]                /* Otherwise, update it */
  356 
  357         /*
  358          * Need to sync the cache to make sure that last store is
  359          * visible to the MMU.
  360          */
  361         ldr     r2, .Lcpufuncs
  362         mov     r0, r7
  363         mov     r1, #4
  364         mov     lr, pc
  365         ldr     pc, [r2, #CF_DCACHE_WB_RANGE]
  366 
  367 .Lcs_same_vector:
  368 #endif /* PMAP_INCLUDE_PTE_SYNC */
  369 
  370         cmp     r10, r11                /* Switching to the same L1? */
  371         ldr     r10, .Lcpufuncs
  372         beq     .Lcs_same_l1            /* Yup. */
  373         /*
  374          * Do a full context switch, including full TLB flush.
  375          */
  376         mov     r0, r11
  377         mov     lr, pc
  378         ldr     pc, [r10, #CF_CONTEXT_SWITCH]
  379 
  380         b       .Lcs_context_switched
  381 
  382         /*
  383          * We're switching to a different process in the same L1.
  384          * In this situation, we only need to flush the TLB for the
  385          * vector_page mapping, and even then only if r7 is non-NULL.
  386          */
  387 .Lcs_same_l1:
  388         cmp     r7, #0
  389         movne   r0, #0                  /* We *know* vector_page's VA is 0x0 */
  390         movne   lr, pc
  391         ldrne   pc, [r10, #CF_TLB_FLUSHID_SE]
  392 
  393 .Lcs_context_switched:
  394 
  395         /* Release the old thread */
  396         str     r6, [r4, #TD_LOCK]
  397 #if defined(SCHED_ULE) && defined(SMP)
  398         ldr     r6, .Lblocked_lock
  399         GET_CURTHREAD_PTR(r3)
  400 1:
  401         ldr     r4, [r3, #TD_LOCK]
  402         cmp     r4, r6
  403         beq     1b
  404 #endif
  405         
  406         /* XXXSCW: Safe to re-enable FIQs here */
  407 
  408         /* rem: r9 = new PCB */
  409 
  410         /* Restore all the saved registers and exit */
  411         add     r3, r9, #PCB_R4
  412         ldmia   r3, {r4-r12, sp, pc}
  413 END(cpu_switch)
  414 
  415 ENTRY(savectx)
  416         stmfd   sp!, {lr}
  417         sub     sp, sp, #4
  418         
  419         /* Store all the registers in the thread's pcb */
  420         add     r3, r0, #(PCB_R4)
  421         stmia   r3, {r4-r12, sp, lr, pc}
  422 #ifdef VFP
  423         fmrx    r2, fpexc               /* If the VFP is enabled */
  424         tst     r2, #(VFPEXC_EN)        /* the current thread has */
  425         movne   r1, #1                  /* used it, so go save */
  426         addne   r0, r0, #(PCB_VFPSTATE) /* the state into the PCB */
  427         blne    _C_LABEL(vfp_store)     /* and disable the VFP. */
  428 #endif
  429         add     sp, sp, #4;
  430         ldmfd   sp!, {pc}
  431 END(savectx)
  432 
  433 ENTRY(fork_trampoline)
  434         STOP_UNWINDING  /* EABI: Don't unwind beyond the thread enty point. */
  435         mov     fp, #0  /* OABI: Stack traceback via fp stops here. */
  436         mov     r2, sp
  437         mov     r1, r5
  438         mov     r0, r4
  439         ldr     lr, =swi_exit           /* Go finish forking, then return */
  440         b       _C_LABEL(fork_exit)     /* to userland via swi_exit code. */
  441 END(fork_trampoline)
  442 

Cache object: 01a1a7596f3379ed59caeae4c9f7ede1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.