The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/isa/apic_ipl.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, by Steve Passe
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. The name of the developer may NOT be used to endorse or promote products
   11  *    derived from this software without specific prior written permission.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   23  * SUCH DAMAGE.
   24  *
   25  * $FreeBSD$
   26  */
   27 
   28 
   29         .data
   30         ALIGN_DATA
   31 
   32 /*
   33  * Routines used by splz_unpend to build an interrupt frame from a
   34  * trap frame.  The _vec[] routines build the proper frame on the stack,
   35  * then call one of _Xintr0 thru _XintrNN.
   36  *
   37  * used by:
   38  *   i386/isa/apic_ipl.s (this file):   splz_unpend JUMPs to HWIs.
   39  *   i386/isa/clock.c:                  setup _vec[clock] to point at _vec8254.
   40  */
   41         .globl _vec
   42 _vec:
   43         .long    vec0,  vec1,  vec2,  vec3,  vec4,  vec5,  vec6,  vec7
   44         .long    vec8,  vec9, vec10, vec11, vec12, vec13, vec14, vec15
   45         .long   vec16, vec17, vec18, vec19, vec20, vec21, vec22, vec23
   46 
   47 /*
   48  * Note:
   49  *      This is the UP equivilant of _imen.
   50  *      It is OPAQUE, and must NOT be accessed directly.
   51  *      It MUST be accessed along with the IO APIC as a 'critical region'.
   52  *      Accessed by:
   53  *              INTREN()
   54  *              INTRDIS()
   55  *              MAYBE_MASK_IRQ
   56  *              MAYBE_UNMASK_IRQ
   57  *              imen_dump()
   58  */
   59         .p2align 2                              /* MUST be 32bit aligned */
   60         .globl _apic_imen
   61 _apic_imen:
   62         .long   HWI_MASK
   63 
   64 
   65 /*
   66  * 
   67  */
   68         .text
   69         SUPERALIGN_TEXT
   70 
   71 /*
   72  * splz() -     dispatch pending interrupts after cpl reduced
   73  *
   74  * Interrupt priority mechanism
   75  *      -- soft splXX masks with group mechanism (cpl)
   76  *      -- h/w masks for currently active or unused interrupts (imen)
   77  *      -- ipending = active interrupts currently masked by cpl
   78  */
   79 
   80 ENTRY(splz)
   81         /*
   82          * The caller has restored cpl and checked that (ipending & ~cpl)
   83          * is nonzero.  However, since ipending can change at any time
   84          * (by an interrupt or, with SMP, by another cpu), we have to
   85          * repeat the check.  At the moment we must own the MP lock in
   86          * the SMP case because the interruput handlers require it.  We
   87          * loop until no unmasked pending interrupts remain.  
   88          *
   89          * No new unmaksed pending interrupts will be added during the
   90          * loop because, being unmasked, the interrupt code will be able
   91          * to execute the interrupts.
   92          *
   93          * Interrupts come in two flavors:  Hardware interrupts and software
   94          * interrupts.  We have to detect the type of interrupt (based on the
   95          * position of the interrupt bit) and call the appropriate dispatch
   96          * routine.
   97          * 
   98          * NOTE: "bsfl %ecx,%ecx" is undefined when %ecx is 0 so we can't
   99          * rely on the secondary btrl tests.
  100          */
  101         movl    _cpl,%eax
  102 splz_next:
  103         /*
  104          * We don't need any locking here.  (ipending & ~cpl) cannot grow 
  105          * while we're looking at it - any interrupt will shrink it to 0.
  106          */
  107         movl    %eax,%ecx
  108         notl    %ecx                    /* set bit = unmasked level */
  109         andl    _ipending,%ecx          /* set bit = unmasked pending INT */
  110         jne     splz_unpend
  111         ret
  112 
  113         ALIGN_TEXT
  114 splz_unpend:
  115         bsfl    %ecx,%ecx
  116         lock
  117         btrl    %ecx,_ipending
  118         jnc     splz_next
  119         cmpl    $NHWI,%ecx
  120         jae     splz_swi
  121         /*
  122          * We would prefer to call the intr handler directly here but that
  123          * doesn't work for badly behaved handlers that want the interrupt
  124          * frame.  Also, there's a problem determining the unit number.
  125          * We should change the interface so that the unit number is not
  126          * determined at config time.
  127          *
  128          * The vec[] routines build the proper frame on the stack,
  129          * then call one of _Xintr0 thru _XintrNN.
  130          */
  131         jmp     *_vec(,%ecx,4)
  132 
  133         ALIGN_TEXT
  134 splz_swi:
  135         pushl   %eax
  136         orl     imasks(,%ecx,4),%eax
  137         movl    %eax,_cpl
  138         call    *_ihandlers(,%ecx,4)
  139         popl    %eax
  140         movl    %eax,_cpl
  141         jmp     splz_next
  142 
  143 /*
  144  * Fake clock interrupt(s) so that they appear to come from our caller instead
  145  * of from here, so that system profiling works.
  146  * XXX do this more generally (for all vectors; look up the C entry point).
  147  * XXX frame bogusness stops us from just jumping to the C entry point.
  148  * We have to clear iactive since this is an unpend call, and it will be
  149  * set from the time of the original INT.
  150  */
  151 
  152 /*
  153  * The 'generic' vector stubs.
  154  */
  155 
  156 #define BUILD_VEC(irq_num)                                              \
  157         ALIGN_TEXT ;                                                    \
  158 __CONCAT(vec,irq_num): ;                                                \
  159         popl    %eax ;                                                  \
  160         pushfl ;                                                        \
  161         pushl   $KCSEL ;                                                \
  162         pushl   %eax ;                                                  \
  163         cli ;                                                           \
  164         lock ;                                  /* MP-safe */           \
  165         andl    $~IRQ_BIT(irq_num), iactive ;   /* lazy masking */      \
  166         MEXITCOUNT ;                                                    \
  167         APIC_ITRACE(apic_itrace_splz, irq_num, APIC_ITRACE_SPLZ) ;      \
  168         jmp     __CONCAT(_Xintr,irq_num)
  169 
  170 
  171         BUILD_VEC(0)
  172         BUILD_VEC(1)
  173         BUILD_VEC(2)
  174         BUILD_VEC(3)
  175         BUILD_VEC(4)
  176         BUILD_VEC(5)
  177         BUILD_VEC(6)
  178         BUILD_VEC(7)
  179         BUILD_VEC(8)
  180         BUILD_VEC(9)
  181         BUILD_VEC(10)
  182         BUILD_VEC(11)
  183         BUILD_VEC(12)
  184         BUILD_VEC(13)
  185         BUILD_VEC(14)
  186         BUILD_VEC(15)
  187         BUILD_VEC(16)                   /* 8 additional INTs in IO APIC */
  188         BUILD_VEC(17)
  189         BUILD_VEC(18)
  190         BUILD_VEC(19)
  191         BUILD_VEC(20)
  192         BUILD_VEC(21)
  193         BUILD_VEC(22)
  194         BUILD_VEC(23)
  195 
  196 
  197 /******************************************************************************
  198  * XXX FIXME: figure out where these belong.
  199  */
  200 
  201 /* this nonsense is to verify that masks ALWAYS have 1 and only 1 bit set */
  202 #define QUALIFY_MASKS_NOT
  203 
  204 #ifdef QUALIFY_MASKS
  205 #define QUALIFY_MASK            \
  206         btrl    %ecx, %eax ;    \
  207         andl    %eax, %eax ;    \
  208         jz      1f ;            \
  209         pushl   $bad_mask ;     \
  210         call    _panic ;        \
  211 1:
  212 
  213 bad_mask:       .asciz  "bad mask"
  214 #else
  215 #define QUALIFY_MASK
  216 #endif
  217 
  218 /*
  219  * (soon to be) MP-safe function to clear ONE INT mask bit.
  220  * The passed arg is a 32bit u_int MASK.
  221  * It sets the associated bit in _apic_imen.
  222  * It sets the mask bit of the associated IO APIC register.
  223  */
  224 ENTRY(INTREN)
  225         pushfl                          /* save state of EI flag */
  226         cli                             /* prevent recursion */
  227         IMASK_LOCK                      /* enter critical reg */
  228 
  229         movl    8(%esp), %eax           /* mask into %eax */
  230         bsfl    %eax, %ecx              /* get pin index */
  231         btrl    %ecx, _apic_imen        /* update _apic_imen */
  232 
  233         QUALIFY_MASK
  234 
  235         shll    $4, %ecx
  236         movl    CNAME(int_to_apicintpin) + 8(%ecx), %edx
  237         movl    CNAME(int_to_apicintpin) + 12(%ecx), %ecx
  238         testl   %edx, %edx
  239         jz      1f
  240 
  241         movl    %ecx, (%edx)            /* write the target register index */
  242         movl    16(%edx), %eax          /* read the target register data */
  243         andl    $~IOART_INTMASK, %eax   /* clear mask bit */
  244         movl    %eax, 16(%edx)          /* write the APIC register data */
  245 1:      
  246         IMASK_UNLOCK                    /* exit critical reg */
  247         popfl                           /* restore old state of EI flag */
  248         ret
  249 
  250 /*
  251  * (soon to be) MP-safe function to set ONE INT mask bit.
  252  * The passed arg is a 32bit u_int MASK.
  253  * It clears the associated bit in _apic_imen.
  254  * It clears the mask bit of the associated IO APIC register.
  255  */
  256 ENTRY(INTRDIS)
  257         pushfl                          /* save state of EI flag */
  258         cli                             /* prevent recursion */
  259         IMASK_LOCK                      /* enter critical reg */
  260 
  261         movl    8(%esp), %eax           /* mask into %eax */
  262         bsfl    %eax, %ecx              /* get pin index */
  263         btsl    %ecx, _apic_imen        /* update _apic_imen */
  264 
  265         QUALIFY_MASK
  266 
  267         shll    $4, %ecx
  268         movl    CNAME(int_to_apicintpin) + 8(%ecx), %edx
  269         movl    CNAME(int_to_apicintpin) + 12(%ecx), %ecx
  270         testl   %edx, %edx
  271         jz      1f
  272 
  273         movl    %ecx, (%edx)            /* write the target register index */
  274         movl    16(%edx), %eax          /* read the target register data */
  275         orl     $IOART_INTMASK, %eax    /* set mask bit */
  276         movl    %eax, 16(%edx)          /* write the APIC register data */
  277 1:      
  278         IMASK_UNLOCK                    /* exit critical reg */
  279         popfl                           /* restore old state of EI flag */
  280         ret
  281 
  282 
  283 /******************************************************************************
  284  *
  285  */
  286 
  287 
  288 /*
  289  * void write_ioapic_mask(int apic, u_int mask); 
  290  */
  291 
  292 #define _INT_MASK       0x00010000
  293 #define _PIN_MASK       0x00ffffff
  294 
  295 #define _OLD_ESI          0(%esp)
  296 #define _OLD_EBX          4(%esp)
  297 #define _RETADDR          8(%esp)
  298 #define _APIC            12(%esp)
  299 #define _MASK            16(%esp)
  300 
  301         ALIGN_TEXT
  302 write_ioapic_mask:
  303         pushl %ebx                      /* scratch */
  304         pushl %esi                      /* scratch */
  305 
  306         movl    _apic_imen, %ebx
  307         xorl    _MASK, %ebx             /* %ebx = _apic_imen ^ mask */
  308         andl    $_PIN_MASK, %ebx        /* %ebx = _apic_imen & 0x00ffffff */
  309         jz      all_done                /* no change, return */
  310 
  311         movl    _APIC, %esi             /* APIC # */
  312         movl    _ioapic, %ecx
  313         movl    (%ecx,%esi,4), %esi     /* %esi holds APIC base address */
  314 
  315 next_loop:                              /* %ebx = diffs, %esi = APIC base */
  316         bsfl    %ebx, %ecx              /* %ecx = index if 1st/next set bit */
  317         jz      all_done
  318 
  319         btrl    %ecx, %ebx              /* clear this bit in diffs */
  320         leal    16(,%ecx,2), %edx       /* calculate register index */
  321 
  322         movl    %edx, (%esi)            /* write the target register index */
  323         movl    16(%esi), %eax          /* read the target register data */
  324 
  325         btl     %ecx, _MASK             /* test for mask or unmask */
  326         jnc     clear                   /* bit is clear */
  327         orl     $_INT_MASK, %eax        /* set mask bit */
  328         jmp     write
  329 clear:  andl    $~_INT_MASK, %eax       /* clear mask bit */
  330 
  331 write:  movl    %eax, 16(%esi)          /* write the APIC register data */
  332 
  333         jmp     next_loop               /* try another pass */
  334 
  335 all_done:
  336         popl    %esi
  337         popl    %ebx
  338         ret
  339 
  340 #undef _OLD_ESI
  341 #undef _OLD_EBX
  342 #undef _RETADDR
  343 #undef _APIC
  344 #undef _MASK
  345 
  346 #undef _PIN_MASK
  347 #undef _INT_MASK
  348 
  349 #ifdef oldcode
  350 
  351 _INTREN:
  352         movl _apic_imen, %eax
  353         notl %eax                       /* mask = ~mask */
  354         andl _apic_imen, %eax           /* %eax = _apic_imen & ~mask */
  355 
  356         pushl %eax                      /* new (future) _apic_imen value */
  357         pushl $0                        /* APIC# arg */
  358         call write_ioapic_mask          /* modify the APIC registers */
  359 
  360         addl $4, %esp                   /* remove APIC# arg from stack */
  361         popl _apic_imen                 /* _apic_imen |= mask */
  362         ret
  363 
  364 _INTRDIS:
  365         movl _apic_imen, %eax
  366         orl 4(%esp), %eax               /* %eax = _apic_imen | mask */
  367 
  368         pushl %eax                      /* new (future) _apic_imen value */
  369         pushl $0                        /* APIC# arg */
  370         call write_ioapic_mask          /* modify the APIC registers */
  371 
  372         addl $4, %esp                   /* remove APIC# arg from stack */
  373         popl _apic_imen                 /* _apic_imen |= mask */
  374         ret
  375 
  376 #endif /* oldcode */
  377 
  378 
  379 #ifdef ready
  380 
  381 /*
  382  * u_int read_io_apic_mask(int apic); 
  383  */
  384         ALIGN_TEXT
  385 read_io_apic_mask:
  386         ret
  387 
  388 /*
  389  * Set INT mask bit for each bit set in 'mask'.
  390  * Ignore INT mask bit for all others.
  391  *
  392  * void set_io_apic_mask(apic, u_int32_t bits); 
  393  */
  394         ALIGN_TEXT
  395 set_io_apic_mask:
  396         ret
  397 
  398 /*
  399  * void set_ioapic_maskbit(int apic, int bit); 
  400  */
  401         ALIGN_TEXT
  402 set_ioapic_maskbit:
  403         ret
  404 
  405 /*
  406  * Clear INT mask bit for each bit set in 'mask'.
  407  * Ignore INT mask bit for all others.
  408  *
  409  * void clr_io_apic_mask(int apic, u_int32_t bits); 
  410  */
  411         ALIGN_TEXT
  412 clr_io_apic_mask:
  413         ret
  414 
  415 /*
  416  * void clr_ioapic_maskbit(int apic, int bit); 
  417  */
  418         ALIGN_TEXT
  419 clr_ioapic_maskbit:
  420         ret
  421 
  422 #endif /** ready */
  423 
  424 /******************************************************************************
  425  * 
  426  */
  427 
  428 /*
  429  * u_int io_apic_write(int apic, int select);
  430  */
  431 ENTRY(io_apic_read)
  432         movl    4(%esp), %ecx           /* APIC # */
  433         movl    _ioapic, %eax
  434         movl    (%eax,%ecx,4), %edx     /* APIC base register address */
  435         movl    8(%esp), %eax           /* target register index */
  436         movl    %eax, (%edx)            /* write the target register index */
  437         movl    16(%edx), %eax          /* read the APIC register data */
  438         ret                             /* %eax = register value */
  439 
  440 /*
  441  * void io_apic_write(int apic, int select, int value);
  442  */
  443 ENTRY(io_apic_write)
  444         movl    4(%esp), %ecx           /* APIC # */
  445         movl    _ioapic, %eax
  446         movl    (%eax,%ecx,4), %edx     /* APIC base register address */
  447         movl    8(%esp), %eax           /* target register index */
  448         movl    %eax, (%edx)            /* write the target register index */
  449         movl    12(%esp), %eax          /* target register value */
  450         movl    %eax, 16(%edx)          /* write the APIC register data */
  451         ret                             /* %eax = void */
  452 
  453 /*
  454  * Send an EOI to the local APIC.
  455  */
  456 ENTRY(apic_eoi)
  457         movl    $0, _lapic+0xb0
  458         ret

Cache object: b084be80df7b9e73bff223729717ca3b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.