The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/locore-v6.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
    3  * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
    4  * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
    5  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
    6  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  */
   30 
   31 #include "assym.inc"
   32 #include <sys/syscall.h>
   33 #include <machine/asm.h>
   34 #include <machine/asmacros.h>
   35 #include <machine/armreg.h>
   36 #include <machine/sysreg.h>
   37 #include <machine/pte-v6.h>
   38 
   39 __FBSDID("$FreeBSD$");
   40 
   41 /* We map 64MB of kernel unless overridden in assym.inc by the kernel option. */
   42 #ifndef LOCORE_MAP_MB
   43 #define LOCORE_MAP_MB   64
   44 #endif
   45 
   46 #if __ARM_ARCH >= 7
   47 #if defined(__ARM_ARCH_7VE__) || defined(__clang__)
   48 /*
   49  * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__
   50  * when enabled. llvm >= 3.6 supports it too.
   51  */
   52 .arch_extension virt
   53 #endif
   54 #endif /* __ARM_ARCH >= 7 */
   55 
   56 /* A small statically-allocated stack used only during initarm() and AP startup. */
   57 #define INIT_ARM_STACK_SIZE     2048
   58 
   59         .text
   60         .align  2
   61 
   62         .globl kernbase
   63         .set kernbase,KERNVIRTADDR
   64 
   65 #if __ARM_ARCH >= 7
   66 #define HANDLE_HYP                                                      \
   67         /* Leave HYP mode */                                            ;\
   68         mrs     r0, cpsr                                                ;\
   69         and     r0, r0, #(PSR_MODE)   /* Mode is in the low 5 bits of CPSR */ ;\
   70         teq     r0, #(PSR_HYP32_MODE) /* Hyp Mode? */                   ;\
   71         bne     1f                                                      ;\
   72         /* Install Hypervisor Stub Exception Vector */                  ;\
   73         bl hypervisor_stub_vect_install                                 ;\
   74         mov     r0, 0                                                   ;\
   75         adr     r1, hypmode_enabled                                     ;\
   76         str     r0, [r1]                                                ;\
   77         /* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\
   78         mrs     r0, cpsr                                                ;\
   79         bic     r0, r0, #(PSR_MODE)                                     ;\
   80         orr     r0, r0, #(PSR_SVC32_MODE)                               ;\
   81         orr     r0, r0, #(PSR_I | PSR_F | PSR_A)                        ;\
   82         msr     spsr_cxsf, r0                                           ;\
   83         /* Exit hypervisor mode */                                      ;\
   84         adr     lr, 2f                                                  ;\
   85         MSR_ELR_HYP(14)                                                 ;\
   86         ERET                                                            ;\
   87 1:                                                                      ;\
   88         mov     r0, -1                                                  ;\
   89         adr     r1, hypmode_enabled                                     ;\
   90         str     r0, [r1]                                                ;\
   91 2:
   92 #else
   93 #define HANDLE_HYP
   94 #endif /* __ARM_ARCH >= 7 */
   95 
   96 /*
   97  * On entry for FreeBSD boot ABI:
   98  *      r0 - metadata pointer or 0 (boothowto on AT91's boot2)
   99  *      r1 - if (r0 == 0) then metadata pointer
  100  * On entry for Linux boot ABI:
  101  *      r0 - 0
  102  *      r1 - machine type (passed as arg2 to initarm)
  103  *      r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
  104  *
  105  * For both types of boot we gather up the args, put them in a struct arm_boot_params
  106  * structure and pass that to initarm.
  107  */
  108         .globl  btext
  109 btext:
  110 ASENTRY_NP(_start)
  111         STOP_UNWINDING          /* Can't unwind into the bootloader! */
  112 
  113         /* Make sure interrupts are disabled. */
  114         cpsid   ifa
  115 
  116         mov     r8, r0          /* 0 or boot mode from boot2 */
  117         mov     r9, r1          /* Save Machine type */
  118         mov     r10, r2         /* Save meta data */
  119         mov     r11, r3         /* Future expansion */
  120 
  121         # If HYP-MODE is active, install an exception vector stub
  122         HANDLE_HYP
  123 
  124         /*
  125          * Check whether data cache is enabled.  If it is, then we know
  126          * current tags are valid (not power-on garbage values) and there
  127          * might be dirty lines that need cleaning.  Disable cache to prevent
  128          * new lines being allocated, then call wbinv_poc_all to clean it.
  129          */
  130         mrc     CP15_SCTLR(r7)
  131         tst     r7, #CPU_CONTROL_DC_ENABLE
  132         blne    dcache_wbinv_poc_all
  133 
  134         /* ! Do not write to memory between wbinv and disabling cache ! */
  135 
  136         /*
  137          * Now there are no dirty lines, but there may still be lines marked
  138          * valid.  Disable all caches and the MMU, and invalidate everything
  139          * before setting up new page tables and re-enabling the mmu.
  140          */
  141 1:
  142         bic     r7, #CPU_CONTROL_DC_ENABLE
  143         bic     r7, #CPU_CONTROL_AFLT_ENABLE
  144         bic     r7, #CPU_CONTROL_MMU_ENABLE
  145         bic     r7, #CPU_CONTROL_IC_ENABLE
  146         bic     r7, #CPU_CONTROL_BPRD_ENABLE
  147         bic     r7, #CPU_CONTROL_SW_ENABLE
  148         orr     r7, #CPU_CONTROL_UNAL_ENABLE
  149         orr     r7, #CPU_CONTROL_VECRELOC
  150         mcr     CP15_SCTLR(r7)
  151         DSB
  152         ISB
  153         bl      dcache_inv_poc_all
  154         mcr     CP15_ICIALLU
  155         DSB
  156         ISB
  157 
  158         /*
  159          * Build page table from scratch.
  160          */
  161 
  162         /* 
  163          * Figure out the physical address we're loaded at by assuming this
  164          * entry point code is in the first L1 section and so if we clear the
  165          * offset bits of the pc that will give us the section-aligned load
  166          * address, which remains in r5 throughout all the following code.
  167          */
  168         ldr     r2, =(L1_S_OFFSET)
  169         bic     r5, pc, r2
  170 
  171         /* Find the delta between VA and PA, result stays in r0 throughout. */
  172         adr     r0, Lpagetable
  173         bl      translate_va_to_pa
  174 
  175         /* 
  176          * First map the entire 4GB address space as VA=PA.  It's mapped as
  177          * normal (cached) memory because it's for things like accessing the
  178          * parameters passed in from the bootloader, which might be at any
  179          * physical address, different for every platform.
  180          */
  181         mov     r1, #0
  182         mov     r2, #0
  183         mov     r3, #4096
  184         bl      build_pagetables
  185 
  186         /* 
  187          * Next we map the kernel starting at the physical load address, mapped
  188          * to the VA the kernel is linked for.  The default size we map is 64MiB
  189          * but it can be overridden with a kernel option.
  190          */
  191         mov     r1, r5
  192         ldr     r2, =(KERNVIRTADDR)
  193         ldr     r3, =(LOCORE_MAP_MB)
  194         bl      build_pagetables
  195 
  196         /* Create a device mapping for early_printf if specified. */
  197 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
  198         ldr     r1, =SOCDEV_PA
  199         ldr     r2, =SOCDEV_VA
  200         mov     r3, #1
  201         bl      build_device_pagetables
  202 #endif
  203         bl      init_mmu
  204 
  205         /* Transition the PC from physical to virtual addressing. */
  206         ldr     pc, =1f
  207 1:
  208 
  209         /* Setup stack, clear BSS */
  210         ldr     r1, =.Lstart
  211         ldmia   r1, {r1, r2, sp}        /* Set initial stack and */
  212         add     sp, sp, #INIT_ARM_STACK_SIZE
  213         sub     r2, r2, r1              /* get zero init data */
  214         mov     r3, #0
  215 2:
  216         str     r3, [r1], #0x0004       /* get zero init data */
  217         subs    r2, r2, #4
  218         bgt     2b
  219 
  220         mov     r1, #28                 /* loader info size is 28 bytes also second arg */
  221         subs    sp, sp, r1              /* allocate arm_boot_params struct on stack */
  222         mov     r0, sp                  /* loader info pointer is first arg */
  223         bic     sp, sp, #7              /* align stack to 8 bytes */
  224         str     r1, [r0]                /* Store length of loader info */
  225         str     r8, [r0, #4]            /* Store r0 from boot loader */
  226         str     r9, [r0, #8]            /* Store r1 from boot loader */
  227         str     r10, [r0, #12]          /* store r2 from boot loader */
  228         str     r11, [r0, #16]          /* store r3 from boot loader */
  229         str     r5, [r0, #20]           /* store the physical address */
  230         adr     r4, Lpagetable          /* load the pagetable address */
  231         ldr     r5, [r4, #4]
  232         str     r5, [r0, #24]           /* store the pagetable address */
  233         mov     fp, #0                  /* trace back starts here */
  234         bl      _C_LABEL(initarm)       /* Off we go */
  235 
  236         /* init arm will return the new stack pointer. */
  237         mov     sp, r0
  238 
  239         bl      _C_LABEL(mi_startup)    /* call mi_startup()! */
  240 
  241         ldr     r0, =.Lmainreturned
  242         b       _C_LABEL(panic)
  243         /* NOTREACHED */
  244 END(_start)
  245 
  246 #define VA_TO_PA_POINTER(name, table)    \
  247 name:                                   ;\
  248         .word   .                       ;\
  249         .word   table
  250 
  251 /*
  252  * Returns the physical address of a magic va to pa pointer.
  253  * r0     - The pagetable data pointer. This must be built using the
  254  *          VA_TO_PA_POINTER macro.
  255  *          e.g.
  256  *            VA_TO_PA_POINTER(Lpagetable, pagetable)
  257  *            ...
  258  *            adr  r0, Lpagetable
  259  *            bl   translate_va_to_pa
  260  *            r0 will now contain the physical address of pagetable
  261  * r1, r2 - Trashed
  262  */
  263 translate_va_to_pa:
  264         ldr     r1, [r0]
  265         sub     r2, r1, r0
  266         /* At this point: r2 = VA - PA */
  267 
  268         /*
  269          * Find the physical address of the table. After these two
  270          * instructions:
  271          * r1 = va(pagetable)
  272          *
  273          * r0 = va(pagetable) - (VA - PA)
  274          *    = va(pagetable) - VA + PA
  275          *    = pa(pagetable)
  276          */
  277         ldr     r1, [r0, #4]
  278         sub     r0, r1, r2
  279         mov     pc, lr
  280 
  281 /*
  282  * Init MMU
  283  * r0 - the table base address
  284  */
  285 
  286 ASENTRY_NP(init_mmu)
  287 
  288         /* Setup TLB and MMU registers */
  289         mcr     CP15_TTBR0(r0)          /* Set TTB */
  290         mov     r0, #0
  291         mcr     CP15_CONTEXTIDR(r0)     /* Set ASID to 0 */
  292 
  293         /* Set the Domain Access register */
  294         mov     r0, #DOMAIN_CLIENT      /* Only domain #0 is used */
  295         mcr     CP15_DACR(r0)
  296 
  297         /*
  298          * Set TEX remap registers
  299          *  - All is set to uncacheable memory
  300          */
  301         ldr     r0, =0xAAAAA
  302         mcr     CP15_PRRR(r0)
  303         mov     r0, #0
  304         mcr     CP15_NMRR(r0)
  305         mcr     CP15_TLBIALL            /* Flush TLB */
  306         DSB
  307         ISB
  308 
  309         /* Enable MMU */
  310         mrc     CP15_SCTLR(r0)
  311         orr     r0, r0, #CPU_CONTROL_MMU_ENABLE
  312         orr     r0, r0, #CPU_CONTROL_V6_EXTPAGE
  313         orr     r0, r0, #CPU_CONTROL_TR_ENABLE
  314         orr     r0, r0, #CPU_CONTROL_AF_ENABLE
  315         mcr     CP15_SCTLR(r0)
  316         DSB
  317         ISB
  318         mcr     CP15_TLBIALL            /* Flush TLB */
  319         mcr     CP15_BPIALL             /* Flush Branch predictor */
  320         DSB
  321         ISB
  322 
  323         mov     pc, lr
  324 END(init_mmu)
  325 
  326 
  327 /*
  328  * Init SMP coherent mode, enable caching and switch to final MMU table.
  329  * Called with disabled caches
  330  * r0 - The table base address
  331  * r1 - clear bits for aux register
  332  * r2 - set bits for aux register
  333  */
  334 ASENTRY_NP(reinit_mmu)
  335         push    {r4-r11, lr}
  336         mov     r4, r0
  337         mov     r5, r1
  338         mov     r6, r2
  339 
  340         /* !! Be very paranoid here !! */
  341         /* !! We cannot write single bit here !! */
  342 
  343 #if 0   /* XXX writeback shouldn't be necessary */
  344         /* Write back and invalidate all integrated caches */
  345         bl      dcache_wbinv_poc_all
  346 #else
  347         bl      dcache_inv_pou_all
  348 #endif
  349         mcr     CP15_ICIALLU
  350         DSB
  351         ISB
  352 
  353         /* Set auxiliary register */
  354         mrc     CP15_ACTLR(r7)
  355         bic     r8, r7, r5              /* Mask bits */
  356         eor     r8, r8, r6              /* Set bits */
  357         teq     r7, r8
  358         mcrne   CP15_ACTLR(r8)
  359         DSB
  360         ISB
  361 
  362         /* Enable caches. */
  363         mrc     CP15_SCTLR(r7)
  364         orr     r7, #CPU_CONTROL_DC_ENABLE
  365         orr     r7, #CPU_CONTROL_IC_ENABLE
  366         orr     r7, #CPU_CONTROL_BPRD_ENABLE
  367         mcr     CP15_SCTLR(r7)
  368         DSB
  369 
  370         mcr     CP15_TTBR0(r4)          /* Set new TTB */
  371         DSB
  372         ISB
  373 
  374         mcr     CP15_TLBIALL            /* Flush TLB */
  375         mcr     CP15_BPIALL             /* Flush Branch predictor */
  376         DSB
  377         ISB
  378 
  379 #if 0 /* XXX writeback shouldn't be necessary */
  380         /* Write back and invalidate all integrated caches */
  381         bl      dcache_wbinv_poc_all
  382 #else
  383         bl      dcache_inv_pou_all
  384 #endif
  385         mcr     CP15_ICIALLU
  386         DSB
  387         ISB
  388 
  389         pop     {r4-r11, pc}
  390 END(reinit_mmu)
  391 
  392 
  393 /*
  394  * Builds the page table
  395  * r0 - The table base address
  396  * r1 - The physical address (trashed)
  397  * r2 - The virtual address (trashed)
  398  * r3 - The number of 1MiB sections
  399  * r4 - Trashed
  400  *
  401  * Addresses must be 1MiB aligned
  402  */
  403 build_device_pagetables:
  404         ldr     r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
  405         b       1f
  406 build_pagetables:
  407         /* Set the required page attributed */
  408         ldr     r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
  409 1:
  410         orr     r1, r4
  411 
  412         /* Move the virtual address to the correct bit location */
  413         lsr     r2, #(PTE1_SHIFT - 2)
  414 
  415         mov     r4, r3
  416 2:
  417         str     r1, [r0, r2]
  418         add     r2, r2, #4
  419         add     r1, r1, #(PTE1_SIZE)
  420         adds    r4, r4, #-1
  421         bhi     2b
  422 
  423         mov     pc, lr
  424 
  425 VA_TO_PA_POINTER(Lpagetable, boot_pt1)
  426 
  427         .global _C_LABEL(hypmode_enabled)
  428 _C_LABEL(hypmode_enabled):
  429         .word 0
  430 
  431 .Lstart:
  432         .word   _edata                  /* Note that these three items are */
  433         .word   _ebss                   /* loaded with a single ldmia and */
  434         .word   svcstk                  /* must remain in order together. */
  435 
  436 .Lmainreturned:
  437         .asciz  "main() returned"
  438         .align  2
  439 
  440         .bss
  441 svcstk:
  442         .space  INIT_ARM_STACK_SIZE * MAXCPU
  443 
  444 /*
  445  * Memory for the initial pagetable. We are unable to place this in
  446  * the bss as this will be cleared after the table is loaded.
  447  */
  448         .section ".init_pagetable", "aw", %nobits
  449         .align  14 /* 16KiB aligned */
  450         .globl  boot_pt1
  451 boot_pt1:
  452         .space  L1_TABLE_SIZE
  453 
  454         .text
  455         .align  2
  456 
  457 #if defined(SMP)
  458 
  459 ASENTRY_NP(mpentry)
  460         /* Make sure interrupts are disabled. */
  461         cpsid   ifa
  462 
  463         HANDLE_HYP
  464 
  465         /* Setup core, disable all caches. */
  466         mrc     CP15_SCTLR(r0)
  467         bic     r0, #CPU_CONTROL_MMU_ENABLE
  468         bic     r0, #CPU_CONTROL_AFLT_ENABLE
  469         bic     r0, #CPU_CONTROL_DC_ENABLE
  470         bic     r0, #CPU_CONTROL_IC_ENABLE
  471         bic     r0, #CPU_CONTROL_BPRD_ENABLE
  472         bic     r0, #CPU_CONTROL_SW_ENABLE
  473         orr     r0, #CPU_CONTROL_UNAL_ENABLE
  474         orr     r0, #CPU_CONTROL_VECRELOC
  475         mcr     CP15_SCTLR(r0)
  476         DSB
  477         ISB
  478 
  479         /* Invalidate L1 cache I+D cache */
  480         bl      dcache_inv_pou_all
  481         mcr     CP15_ICIALLU
  482         DSB
  483         ISB
  484 
  485         /* Find the delta between VA and PA */
  486         adr     r0, Lpagetable
  487         bl      translate_va_to_pa
  488 
  489         bl      init_mmu
  490 
  491         adr     r1, .Lstart+8           /* Get initstack pointer from */
  492         ldr     sp, [r1]                /* startup data. */
  493         mrc     CP15_MPIDR(r0)          /* Get processor id number. */
  494         and     r0, r0, #0x0f
  495         mov     r1, #INIT_ARM_STACK_SIZE
  496         mul     r2, r1, r0              /* Point sp to initstack */
  497         add     sp, sp, r2              /* area for this processor. */
  498 
  499         /* Switch to virtual addresses. */
  500         ldr     pc, =1f
  501 1:
  502         mov     fp, #0                  /* trace back starts here */
  503         bl      _C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
  504 
  505         adr     r0, .Lmpreturned
  506         b       _C_LABEL(panic)
  507         /* NOTREACHED */
  508 END(mpentry)
  509 
  510 .Lmpreturned:
  511         .asciz  "init_secondary() returned"
  512         .align  2
  513 #endif
  514 
  515 ENTRY_NP(cpu_halt)
  516 
  517         /* XXX re-implement !!! */
  518         cpsid   ifa
  519         bl      dcache_wbinv_poc_all
  520 
  521         ldr     r4, .Lcpu_reset_address
  522         ldr     r4, [r4]
  523         teq     r4, #0
  524         movne   pc, r4
  525 1:
  526         WFI
  527         b       1b
  528 
  529         /*
  530          * _cpu_reset_address contains the address to branch to, to complete
  531          * the cpu reset after turning the MMU off
  532          * This variable is provided by the hardware specific code
  533          */
  534 .Lcpu_reset_address:
  535         .word   _C_LABEL(cpu_reset_address)
  536 END(cpu_halt)
  537 
  538 
  539 /*
  540  * setjump + longjmp
  541  */
  542 ENTRY(setjmp)
  543         stmia   r0, {r4-r14}
  544         mov     r0, #0x00000000
  545         RET
  546 END(setjmp)
  547 
  548 ENTRY(longjmp)
  549         ldmia   r0, {r4-r14}
  550         mov     r0, #0x00000001
  551         RET
  552 END(longjmp)
  553 
  554         .data
  555         .global _C_LABEL(esym)
  556 _C_LABEL(esym): .word   _C_LABEL(end)
  557 
  558 ENTRY_NP(abort)
  559         b       _C_LABEL(abort)
  560 END(abort)
  561 
  562 ENTRY_NP(sigcode)
  563         mov     r0, sp
  564         add     r0, r0, #SIGF_UC
  565 
  566         /*
  567          * Call the sigreturn system call.
  568          *
  569          * We have to load r7 manually rather than using
  570          * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
  571          * correct. Using the alternative places esigcode at the address
  572          * of the data rather than the address one past the data.
  573          */
  574 
  575         ldr     r7, [pc, #12]   /* Load SYS_sigreturn */
  576         swi     SYS_sigreturn
  577 
  578         /* Well if that failed we better exit quick ! */
  579 
  580         ldr     r7, [pc, #8]    /* Load SYS_exit */
  581         swi     SYS_exit
  582 
  583         /* Branch back to retry SYS_sigreturn */
  584         b       . - 16
  585 END(sigcode)
  586         .word   SYS_sigreturn
  587         .word   SYS_exit
  588 
  589         .align  2
  590         .global _C_LABEL(esigcode)
  591                 _C_LABEL(esigcode):
  592 
  593         .data
  594         .global szsigcode
  595 szsigcode:
  596         .long esigcode-sigcode
  597 
  598 /* End of locore.S */

Cache object: 5c3c6edba04ddf5636bb5111fc585562


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.