The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc_asm_xscale.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $  */
    2 
    3 /*-
    4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
    5  * All rights reserved.
    6  *
    7  * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed for the NetBSD Project by
   20  *      Wasabi Systems, Inc.
   21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   22  *    or promote products derived from this software without specific prior
   23  *    written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  *
   37  */
   38 
   39 /*-
   40  * Copyright (c) 2001 Matt Thomas.
   41  * Copyright (c) 1997,1998 Mark Brinicombe.
   42  * Copyright (c) 1997 Causality Limited
   43  * All rights reserved.
   44  *
   45  * Redistribution and use in source and binary forms, with or without
   46  * modification, are permitted provided that the following conditions
   47  * are met:
   48  * 1. Redistributions of source code must retain the above copyright
   49  *    notice, this list of conditions and the following disclaimer.
   50  * 2. Redistributions in binary form must reproduce the above copyright
   51  *    notice, this list of conditions and the following disclaimer in the
   52  *    documentation and/or other materials provided with the distribution.
   53  * 3. All advertising materials mentioning features or use of this software
   54  *    must display the following acknowledgement:
   55  *      This product includes software developed by Causality Limited.
   56  * 4. The name of Causality Limited may not be used to endorse or promote
   57  *    products derived from this software without specific prior written
   58  *    permission.
   59  *
   60  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
   61  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   62  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   63  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
   64  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   65  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   66  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   67  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   68  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   69  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   70  * SUCH DAMAGE.
   71  *
   72  * XScale assembly functions for CPU / MMU / TLB specific operations
   73  */
   74 #include <machine/armreg.h>
   75 #include <machine/asm.h>
   76 __FBSDID("$FreeBSD: releng/10.2/sys/arm/arm/cpufunc_asm_xscale.S 278613 2015-02-12 03:50:33Z ian $");
   77 
   78 /*
   79  * Size of the XScale core D-cache.
   80  */
   81 #define DCACHE_SIZE             0x00008000
   82 
   83 .Lblock_userspace_access:
   84         .word   _C_LABEL(block_userspace_access)
   85 
   86 /*
   87  * CPWAIT -- Canonical method to wait for CP15 update.
   88  * From: Intel 80200 manual, section 2.3.3.
   89  *
   90  * NOTE: Clobbers the specified temp reg.
   91  */
   92 #define CPWAIT_BRANCH                                                    \
   93         sub     pc, pc, #4
   94 
   95 #define CPWAIT(tmp)                                                      \
   96         mrc     p15, 0, tmp, c2, c0, 0  /* arbitrary read of CP15 */    ;\
   97         mov     tmp, tmp                /* wait for it to complete */   ;\
   98         CPWAIT_BRANCH                   /* branch to next insn */
   99 
  100 #define CPWAIT_AND_RETURN_SHIFTER       lsr #32
  101 
  102 #define CPWAIT_AND_RETURN(tmp)                                           \
  103         mrc     p15, 0, tmp, c2, c0, 0  /* arbitrary read of CP15 */    ;\
  104         /* Wait for it to complete and branch to the return address */   \
  105         sub     pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
  106 
  107 ENTRY(xscale_cpwait)
  108         CPWAIT_AND_RETURN(r0)
  109 END(xscale_cpwait)
  110 
  111 /*
  112  * We need a separate cpu_control() entry point, since we have to
  113  * invalidate the Branch Target Buffer in the event the BPRD bit
  114  * changes in the control register.
  115  */
  116 ENTRY(xscale_control)
  117         mrc     p15, 0, r3, c1, c0, 0   /* Read the control register */
  118         bic     r2, r3, r0              /* Clear bits */
  119         eor     r2, r2, r1              /* XOR bits */
  120 
  121         teq     r2, r3                  /* Only write if there was a change */
  122         mcrne   p15, 0, r0, c7, c5, 6   /* Invalidate the BTB */
  123         mcrne   p15, 0, r2, c1, c0, 0   /* Write new control register */
  124         mov     r0, r3                  /* Return old value */
  125 
  126         CPWAIT_AND_RETURN(r1)
  127 END(xscale_control)
  128 
  129 /*
  130  * Functions to set the MMU Translation Table Base register
  131  *
  132  * We need to clean and flush the cache as it uses virtual
  133  * addresses that are about to change.
  134  */
  135 ENTRY(xscale_setttb)
  136 #ifdef CACHE_CLEAN_BLOCK_INTR
  137         mrs     r3, cpsr
  138         orr     r1, r3, #(PSR_I | PSR_F)
  139         msr     cpsr_fsxc, r1
  140 #else
  141         ldr     r3, .Lblock_userspace_access
  142         ldr     r2, [r3]
  143         orr     r1, r2, #1
  144         str     r1, [r3]
  145 #endif
  146         stmfd   sp!, {r0-r3, lr}
  147         bl      _C_LABEL(xscale_cache_cleanID)
  148         mcr     p15, 0, r0, c7, c5, 0   /* invalidate I$ and BTB */
  149         mcr     p15, 0, r0, c7, c10, 4  /* drain write and fill buffer */
  150 
  151         CPWAIT(r0)
  152 
  153         ldmfd   sp!, {r0-r3, lr}
  154 
  155         /* Write the TTB */
  156         mcr     p15, 0, r0, c2, c0, 0
  157 
  158         /* If we have updated the TTB we must flush the TLB */
  159         mcr     p15, 0, r0, c8, c7, 0   /* invalidate I+D TLB */
  160 
  161         /* The cleanID above means we only need to flush the I cache here */
  162         mcr     p15, 0, r0, c7, c5, 0   /* invalidate I$ and BTB */
  163 
  164         CPWAIT(r0)
  165 
  166 #ifdef CACHE_CLEAN_BLOCK_INTR
  167         msr     cpsr_fsxc, r3
  168 #else
  169         str     r2, [r3]
  170 #endif
  171         RET
  172 END(xscale_setttb)
  173 
  174 /*
  175  * TLB functions
  176  *
  177  */
  178 ENTRY(xscale_tlb_flushID_SE)
  179         mcr     p15, 0, r0, c8, c6, 1   /* flush D tlb single entry */
  180         mcr     p15, 0, r0, c8, c5, 1   /* flush I tlb single entry */
  181         CPWAIT_AND_RETURN(r0)
  182 END(xscale_tlb_flushID_SE)
  183 
  184 /*
  185  * Cache functions
  186  */
  187 ENTRY(xscale_cache_flushID)
  188         mcr     p15, 0, r0, c7, c7, 0   /* flush I+D cache */
  189         CPWAIT_AND_RETURN(r0)
  190 END(xscale_cache_flushID)
  191 
  192 ENTRY(xscale_cache_flushI)
  193         mcr     p15, 0, r0, c7, c5, 0   /* flush I cache */
  194         CPWAIT_AND_RETURN(r0)
  195 END(xscale_cache_flushI)
  196 
  197 ENTRY(xscale_cache_flushD)
  198         mcr     p15, 0, r0, c7, c6, 0   /* flush D cache */
  199         CPWAIT_AND_RETURN(r0)
  200 END(xscale_cache_flushD)
  201 
  202 ENTRY(xscale_cache_flushI_SE)
  203         mcr     p15, 0, r0, c7, c5, 1   /* flush I cache single entry */
  204         CPWAIT_AND_RETURN(r0)
  205 END(xscale_cache_flushI_SE)
  206 
  207 ENTRY(xscale_cache_flushD_SE)
  208         /*
  209          * Errata (rev < 2): Must clean-dcache-line to an address
  210          * before invalidate-dcache-line to an address, or dirty
  211          * bits will not be cleared in the dcache array.
  212          */
  213         mcr     p15, 0, r0, c7, c10, 1
  214         mcr     p15, 0, r0, c7, c6, 1   /* flush D cache single entry */
  215         CPWAIT_AND_RETURN(r0)
  216 END(xscale_cache_flushD_SE)
  217 
  218 ENTRY(xscale_cache_cleanD_E)
  219         mcr     p15, 0, r0, c7, c10, 1  /* clean D cache entry */
  220         CPWAIT_AND_RETURN(r0)
  221 END(xscale_cache_cleanD_E)
  222 
  223 /*
  224  * Information for the XScale cache clean/purge functions:
  225  *
  226  *      * Virtual address of the memory region to use
  227  *      * Size of memory region
  228  *
  229  * Note the virtual address for the Data cache clean operation
  230  * does not need to be backed by physical memory, since no loads
  231  * will actually be performed by the allocate-line operation.
  232  *
  233  * Note that the Mini-Data cache MUST be cleaned by executing
  234  * loads from memory mapped into a region reserved exclusively
  235  * for cleaning of the Mini-Data cache.
  236  */
  237         .data
  238 
  239         .global _C_LABEL(xscale_cache_clean_addr)
  240 _C_LABEL(xscale_cache_clean_addr):
  241         .word   0x00000000
  242 
  243         .global _C_LABEL(xscale_cache_clean_size)
  244 _C_LABEL(xscale_cache_clean_size):
  245         .word   DCACHE_SIZE
  246 
  247         .global _C_LABEL(xscale_minidata_clean_addr)
  248 _C_LABEL(xscale_minidata_clean_addr):
  249         .word   0x00000000
  250 
  251         .global _C_LABEL(xscale_minidata_clean_size)
  252 _C_LABEL(xscale_minidata_clean_size):
  253         .word   0x00000800
  254 
  255         .text
  256 
  257 .Lxscale_cache_clean_addr:
  258         .word   _C_LABEL(xscale_cache_clean_addr)
  259 .Lxscale_cache_clean_size:
  260         .word   _C_LABEL(xscale_cache_clean_size)
  261 
  262 .Lxscale_minidata_clean_addr:
  263         .word   _C_LABEL(xscale_minidata_clean_addr)
  264 .Lxscale_minidata_clean_size:
  265         .word   _C_LABEL(xscale_minidata_clean_size)
  266 
  267 #ifdef CACHE_CLEAN_BLOCK_INTR
  268 #define XSCALE_CACHE_CLEAN_BLOCK                                        \
  269         mrs     r3, cpsr                                        ;       \
  270         orr     r0, r3, #(PSR_I | PSR_F)                        ;       \
  271         msr     cpsr_fsxc, r0
  272 
  273 #define XSCALE_CACHE_CLEAN_UNBLOCK                                      \
  274         msr     cpsr_fsxc, r3
  275 #else
  276 #define XSCALE_CACHE_CLEAN_BLOCK                                        \
  277         ldr     r3, .Lblock_userspace_access                    ;       \
  278         ldr     ip, [r3]                                        ;       \
  279         orr     r0, ip, #1                                      ;       \
  280         str     r0, [r3]
  281 
  282 #define XSCALE_CACHE_CLEAN_UNBLOCK                                      \
  283         str     ip, [r3]
  284 #endif /* CACHE_CLEAN_BLOCK_INTR */
  285 
  286 #define XSCALE_CACHE_CLEAN_PROLOGUE                                     \
  287         XSCALE_CACHE_CLEAN_BLOCK                                ;       \
  288         ldr     r2, .Lxscale_cache_clean_addr                   ;       \
  289         ldmia   r2, {r0, r1}                                    ;       \
  290         /*                                                              \
  291          * BUG ALERT!                                                   \
  292          *                                                              \
  293          * The XScale core has a strange cache eviction bug, which      \
  294          * requires us to use 2x the cache size for the cache clean     \
  295          * and for that area to be aligned to 2 * cache size.           \
  296          *                                                              \
  297          * The work-around is to use 2 areas for cache clean, and to    \
  298          * alternate between them whenever this is done.  No one knows  \
  299          * why the work-around works (mmm!).                            \
  300          */                                                             \
  301         eor     r0, r0, #(DCACHE_SIZE)                          ;       \
  302         str     r0, [r2]                                        ;       \
  303         add     r0, r0, r1
  304 
  305 #define XSCALE_CACHE_CLEAN_EPILOGUE                                     \
  306         XSCALE_CACHE_CLEAN_UNBLOCK
  307 
  308 ENTRY_NP(xscale_cache_syncI)
  309 
  310 EENTRY_NP(xscale_cache_purgeID)
  311         mcr     p15, 0, r0, c7, c5, 0   /* flush I cache (D cleaned below) */
  312 EENTRY_NP(xscale_cache_cleanID)
  313 EENTRY_NP(xscale_cache_purgeD)
  314 EENTRY(xscale_cache_cleanD)
  315         XSCALE_CACHE_CLEAN_PROLOGUE
  316 
  317 1:      subs    r0, r0, #32
  318         mcr     p15, 0, r0, c7, c2, 5   /* allocate cache line */
  319         subs    r1, r1, #32
  320         bne     1b
  321 
  322         CPWAIT(r0)
  323 
  324         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  325 
  326         CPWAIT(r0)
  327 
  328         XSCALE_CACHE_CLEAN_EPILOGUE
  329         RET
  330 EEND(xscale_cache_cleanD)
  331 EEND(xscale_cache_purgeD)
  332 EEND(xscale_cache_cleanID)
  333 EEND(xscale_cache_purgeID)
  334 END(xscale_cache_syncI)
  335 
  336 /*
  337  * Clean the mini-data cache.
  338  *
  339  * It's expected that we only use the mini-data cache for
  340  * kernel addresses, so there is no need to purge it on
  341  * context switch, and no need to prevent userspace access
  342  * while we clean it.
  343  */
  344 ENTRY(xscale_cache_clean_minidata)
  345         ldr     r2, .Lxscale_minidata_clean_addr
  346         ldmia   r2, {r0, r1}
  347 1:      ldr     r3, [r0], #32
  348         subs    r1, r1, #32
  349         bne     1b
  350 
  351         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  352 
  353         CPWAIT_AND_RETURN(r1)
  354 END(xscale_cache_clean_minidata)
  355 
  356 ENTRY(xscale_cache_purgeID_E)
  357         mcr     p15, 0, r0, c7, c10, 1  /* clean D cache entry */
  358         CPWAIT(r1)
  359         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  360         mcr     p15, 0, r0, c7, c5, 1   /* flush I cache single entry */
  361         mcr     p15, 0, r0, c7, c6, 1   /* flush D cache single entry */
  362         CPWAIT_AND_RETURN(r1)
  363 END(xscale_cache_purgeID_E)
  364 
  365 ENTRY(xscale_cache_purgeD_E)
  366         mcr     p15, 0, r0, c7, c10, 1  /* clean D cache entry */
  367         CPWAIT(r1)
  368         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  369         mcr     p15, 0, r0, c7, c6, 1   /* flush D cache single entry */
  370         CPWAIT_AND_RETURN(r1)
  371 END(xscale_cache_purgeD_E)
  372 
  373 /*
  374  * Soft functions
  375  */
  376 /* xscale_cache_syncI is identical to xscale_cache_purgeID */
  377 
  378 EENTRY(xscale_cache_cleanID_rng)
  379 ENTRY(xscale_cache_cleanD_rng)
  380         cmp     r1, #0x4000
  381         bcs     _C_LABEL(xscale_cache_cleanID)
  382 
  383         and     r2, r0, #0x1f
  384         add     r1, r1, r2
  385         bic     r0, r0, #0x1f
  386 
  387 1:      mcr     p15, 0, r0, c7, c10, 1  /* clean D cache entry */
  388         add     r0, r0, #32
  389         subs    r1, r1, #32
  390         bhi     1b
  391 
  392         CPWAIT(r0)
  393 
  394         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  395 
  396         CPWAIT_AND_RETURN(r0)
  397 /*END(xscale_cache_cleanID_rng)*/
  398 END(xscale_cache_cleanD_rng)
  399 
  400 ENTRY(xscale_cache_purgeID_rng)
  401         cmp     r1, #0x4000
  402         bcs     _C_LABEL(xscale_cache_purgeID)
  403 
  404         and     r2, r0, #0x1f
  405         add     r1, r1, r2
  406         bic     r0, r0, #0x1f
  407 
  408 1:      mcr     p15, 0, r0, c7, c10, 1  /* clean D cache entry */
  409         mcr     p15, 0, r0, c7, c6, 1   /* flush D cache single entry */
  410         mcr     p15, 0, r0, c7, c5, 1   /* flush I cache single entry */
  411         add     r0, r0, #32
  412         subs    r1, r1, #32
  413         bhi     1b
  414 
  415         CPWAIT(r0)
  416 
  417         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  418 
  419         CPWAIT_AND_RETURN(r0)
  420 END(xscale_cache_purgeID_rng)
  421 
  422 ENTRY(xscale_cache_purgeD_rng)
  423         cmp     r1, #0x4000
  424         bcs     _C_LABEL(xscale_cache_purgeD)
  425 
  426         and     r2, r0, #0x1f
  427         add     r1, r1, r2
  428         bic     r0, r0, #0x1f
  429 
  430 1:      mcr     p15, 0, r0, c7, c10, 1  /* clean D cache entry */
  431         mcr     p15, 0, r0, c7, c6, 1   /* flush D cache single entry */
  432         add     r0, r0, #32
  433         subs    r1, r1, #32
  434         bhi     1b
  435 
  436         CPWAIT(r0)
  437 
  438         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  439 
  440         CPWAIT_AND_RETURN(r0)
  441 END(xscale_cache_purgeD_rng)
  442 
  443 ENTRY(xscale_cache_syncI_rng)
  444         cmp     r1, #0x4000
  445         bcs     _C_LABEL(xscale_cache_syncI)
  446 
  447         and     r2, r0, #0x1f
  448         add     r1, r1, r2
  449         bic     r0, r0, #0x1f
  450 
  451 1:      mcr     p15, 0, r0, c7, c10, 1  /* clean D cache entry */
  452         mcr     p15, 0, r0, c7, c5, 1   /* flush I cache single entry */
  453         add     r0, r0, #32
  454         subs    r1, r1, #32
  455         bhi     1b
  456 
  457         CPWAIT(r0)
  458 
  459         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  460 
  461         CPWAIT_AND_RETURN(r0)
  462 END(xscale_cache_syncI_rng)
  463 
  464 ENTRY(xscale_cache_flushD_rng)
  465         and     r2, r0, #0x1f
  466         add     r1, r1, r2
  467         bic     r0, r0, #0x1f
  468 
  469 1:      mcr     p15, 0, r0, c7, c6, 1   /* flush D cache single entry */
  470         add     r0, r0, #32
  471         subs    r1, r1, #32
  472         bhi     1b
  473 
  474         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  475 
  476         CPWAIT_AND_RETURN(r0)
  477 END(xscale_cache_flushD_rng)
  478 
  479 /*
  480  * Context switch.
  481  *
  482  * These is the CPU-specific parts of the context switcher cpu_switch()
  483  * These functions actually perform the TTB reload.
  484  *
  485  * NOTE: Special calling convention
  486  *      r1, r4-r13 must be preserved
  487  */
  488 ENTRY(xscale_context_switch)
  489         /*
  490          * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
  491          * Thus the data cache will contain only kernel data and the
  492          * instruction cache will contain only kernel code, and all
  493          * kernel mappings are shared by all processes.
  494          */
  495 
  496         /* Write the TTB */
  497         mcr     p15, 0, r0, c2, c0, 0
  498 
  499         /* If we have updated the TTB we must flush the TLB */
  500         mcr     p15, 0, r0, c8, c7, 0   /* flush the I+D tlb */
  501 
  502         CPWAIT_AND_RETURN(r0)
  503 END(xscale_context_switch)
  504 
  505 /*
  506  * xscale_cpu_sleep
  507  *
  508  * This is called when there is nothing on any of the run queues.
  509  * We go into IDLE mode so that any IRQ or FIQ will awaken us.
  510  *
  511  * If this is called with anything other than ARM_SLEEP_MODE_IDLE,
  512  * ignore it.
  513  */
  514 ENTRY(xscale_cpu_sleep)
  515         tst     r0, #0x00000000
  516         bne     1f
  517         mov     r0, #0x1
  518         mcr     p14, 0, r0, c7, c0, 0
  519 
  520 1:
  521         RET
  522 END(xscale_cpu_sleep)
  523 

Cache object: 5e95bffef42f513883e22d0ca1c71154


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.