The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/cpu_switch.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 Peter Wemm.
    3  * Copyright (c) 1990 The Regents of the University of California.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to Berkeley by
    7  * William Jolitz.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 4. Neither the name of the University nor the names of its contributors
   18  *    may be used to endorse or promote products derived from this software
   19  *    without specific prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   31  * SUCH DAMAGE.
   32  *
   33  * $FreeBSD: releng/8.0/sys/amd64/amd64/cpu_switch.S 195486 2009-07-09 09:34:11Z kib $
   34  */
   35 
   36 #include <machine/asmacros.h>
   37 #include <machine/specialreg.h>
   38 
   39 #include "assym.s"
   40 #include "opt_sched.h"
   41 
   42 /*****************************************************************************/
   43 /* Scheduling                                                                */
   44 /*****************************************************************************/
   45 
   46         .text
   47 
   48 #ifdef SMP
   49 #define LK      lock ;
   50 #else
   51 #define LK
   52 #endif
   53 
   54 #if defined(SCHED_ULE) && defined(SMP)
   55 #define SETLK   xchgq
   56 #else
   57 #define SETLK   movq
   58 #endif
   59 
   60 /*
   61  * cpu_throw()
   62  *
   63  * This is the second half of cpu_switch(). It is used when the current
   64  * thread is either a dummy or slated to die, and we no longer care
   65  * about its state.  This is only a slight optimization and is probably
   66  * not worth it anymore.  Note that we need to clear the pm_active bits so
   67  * we do need the old proc if it still exists.
   68  * %rdi = oldtd
   69  * %rsi = newtd
   70  */
   71 ENTRY(cpu_throw)
   72         testq   %rdi,%rdi
   73         jnz     1f
   74         movq    PCPU(IDLETHREAD),%rdi
   75 1:
   76         movq    TD_PCB(%rdi),%r8                /* Old pcb */
   77         movl    PCPU(CPUID), %eax
   78         /* release bit from old pm_active */
   79         movq    TD_PROC(%rdi), %rdx             /* oldtd->td_proc */
   80         movq    P_VMSPACE(%rdx), %rdx           /* proc->p_vmspace */
   81         LK btrl %eax, VM_PMAP+PM_ACTIVE(%rdx)   /* clear old */
   82         movq    TD_PCB(%rsi),%r8                /* newtd->td_proc */
   83         movq    PCB_CR3(%r8),%rdx
   84         movq    %rdx,%cr3                       /* new address space */
   85         jmp     swact
   86 END(cpu_throw)
   87 
   88 /*
   89  * cpu_switch(old, new, mtx)
   90  *
   91  * Save the current thread state, then select the next thread to run
   92  * and load its state.
   93  * %rdi = oldtd
   94  * %rsi = newtd
   95  * %rdx = mtx
   96  */
   97 ENTRY(cpu_switch)
   98         /* Switch to new thread.  First, save context. */
   99         movq    TD_PCB(%rdi),%r8
  100         movb    $1,PCB_FULL_IRET(%r8)
  101 
  102         movq    (%rsp),%rax                     /* Hardware registers */
  103         movq    %r15,PCB_R15(%r8)
  104         movq    %r14,PCB_R14(%r8)
  105         movq    %r13,PCB_R13(%r8)
  106         movq    %r12,PCB_R12(%r8)
  107         movq    %rbp,PCB_RBP(%r8)
  108         movq    %rsp,PCB_RSP(%r8)
  109         movq    %rbx,PCB_RBX(%r8)
  110         movq    %rax,PCB_RIP(%r8)
  111 
  112         testl   $PCB_DBREGS,PCB_FLAGS(%r8)
  113         jnz     store_dr                        /* static predict not taken */
  114 done_store_dr:
  115 
  116         /* have we used fp, and need a save? */
  117         cmpq    %rdi,PCPU(FPCURTHREAD)
  118         jne     1f
  119         addq    $PCB_SAVEFPU,%r8
  120         clts
  121         fxsave  (%r8)
  122         smsw    %ax
  123         orb     $CR0_TS,%al
  124         lmsw    %ax
  125         xorl    %eax,%eax
  126         movq    %rax,PCPU(FPCURTHREAD)
  127 1:
  128 
  129         /* Save is done.  Now fire up new thread. Leave old vmspace. */
  130         movq    TD_PCB(%rsi),%r8
  131 
  132         /* switch address space */
  133         movq    PCB_CR3(%r8),%rcx
  134         movq    %cr3,%rax
  135         cmpq    %rcx,%rax                       /* Same address space? */
  136         jne     swinact
  137         SETLK   %rdx, TD_LOCK(%rdi)             /* Release the old thread */
  138         jmp     sw1
  139 swinact:
  140         movq    %rcx,%cr3                       /* new address space */
  141         movl    PCPU(CPUID), %eax
  142         /* Release bit from old pmap->pm_active */
  143         movq    TD_PROC(%rdi), %rcx             /* oldproc */
  144         movq    P_VMSPACE(%rcx), %rcx
  145         LK btrl %eax, VM_PMAP+PM_ACTIVE(%rcx)   /* clear old */
  146         SETLK   %rdx, TD_LOCK(%rdi)             /* Release the old thread */
  147 swact:
  148         /* Set bit in new pmap->pm_active */
  149         movq    TD_PROC(%rsi),%rdx              /* newproc */
  150         movq    P_VMSPACE(%rdx), %rdx
  151         LK btsl %eax, VM_PMAP+PM_ACTIVE(%rdx)   /* set new */
  152 
  153 sw1:
  154 #if defined(SCHED_ULE) && defined(SMP)
  155         /* Wait for the new thread to become unblocked */
  156         movq    $blocked_lock, %rdx
  157 1:
  158         movq    TD_LOCK(%rsi),%rcx
  159         cmpq    %rcx, %rdx
  160         pause
  161         je      1b
  162 #endif
  163         /*
  164          * At this point, we've switched address spaces and are ready
  165          * to load up the rest of the next context.
  166          */
  167 
  168         /* Skip loading user fsbase/gsbase for kthreads */
  169         testl   $TDP_KTHREAD,TD_PFLAGS(%rsi)
  170         jnz     do_kthread
  171 
  172         /*
  173          * Load ldt register
  174          */
  175         movq    TD_PROC(%rsi),%rcx
  176         cmpq    $0, P_MD+MD_LDT(%rcx)
  177         jne     do_ldt
  178         xorl    %eax,%eax
  179 ld_ldt: lldt    %ax
  180 
  181         /* Restore fs base in GDT */
  182         movl    PCB_FSBASE(%r8),%eax
  183         movq    PCPU(FS32P),%rdx
  184         movw    %ax,2(%rdx)
  185         shrl    $16,%eax
  186         movb    %al,4(%rdx)
  187         shrl    $8,%eax
  188         movb    %al,7(%rdx)
  189 
  190         /* Restore gs base in GDT */
  191         movl    PCB_GSBASE(%r8),%eax
  192         movq    PCPU(GS32P),%rdx
  193         movw    %ax,2(%rdx)
  194         shrl    $16,%eax
  195         movb    %al,4(%rdx)
  196         shrl    $8,%eax
  197         movb    %al,7(%rdx)
  198 
  199 do_kthread:
  200         /* Do we need to reload tss ? */
  201         movq    PCPU(TSSP),%rax
  202         movq    PCB_TSSP(%r8),%rdx
  203         testq   %rdx,%rdx
  204         cmovzq  PCPU(COMMONTSSP),%rdx
  205         cmpq    %rax,%rdx
  206         jne     do_tss
  207 done_tss:
  208         movq    %r8,PCPU(RSP0)
  209         movq    %r8,PCPU(CURPCB)
  210         /* Update the TSS_RSP0 pointer for the next interrupt */
  211         movq    %r8,COMMON_TSS_RSP0(%rdx)
  212         movq    %rsi,PCPU(CURTHREAD)            /* into next thread */
  213 
  214         /* Test if debug registers should be restored. */
  215         testl   $PCB_DBREGS,PCB_FLAGS(%r8)
  216         jnz     load_dr                         /* static predict not taken */
  217 done_load_dr:
  218 
  219         /* Restore context. */
  220         movq    PCB_R15(%r8),%r15
  221         movq    PCB_R14(%r8),%r14
  222         movq    PCB_R13(%r8),%r13
  223         movq    PCB_R12(%r8),%r12
  224         movq    PCB_RBP(%r8),%rbp
  225         movq    PCB_RSP(%r8),%rsp
  226         movq    PCB_RBX(%r8),%rbx
  227         movq    PCB_RIP(%r8),%rax
  228         movq    %rax,(%rsp)
  229         ret
  230 
  231         /*
  232          * We order these strangely for several reasons.
  233          * 1: I wanted to use static branch prediction hints
  234          * 2: Most athlon64/opteron cpus don't have them.  They define
  235          *    a forward branch as 'predict not taken'.  Intel cores have
  236          *    the 'rep' prefix to invert this.
  237          * So, to make it work on both forms of cpu we do the detour.
  238          * We use jumps rather than call in order to avoid the stack.
  239          */
  240 
  241 store_dr:
  242         movq    %dr7,%rax                       /* yes, do the save */
  243         movq    %dr0,%r15
  244         movq    %dr1,%r14
  245         movq    %dr2,%r13
  246         movq    %dr3,%r12
  247         movq    %dr6,%r11
  248         andq    $0x0000fc00, %rax               /* disable all watchpoints */
  249         movq    %r15,PCB_DR0(%r8)
  250         movq    %r14,PCB_DR1(%r8)
  251         movq    %r13,PCB_DR2(%r8)
  252         movq    %r12,PCB_DR3(%r8)
  253         movq    %r11,PCB_DR6(%r8)
  254         movq    %rax,PCB_DR7(%r8)
  255         movq    %rax,%dr7
  256         jmp     done_store_dr
  257 
  258 load_dr:
  259         movq    %dr7,%rax
  260         movq    PCB_DR0(%r8),%r15
  261         movq    PCB_DR1(%r8),%r14
  262         movq    PCB_DR2(%r8),%r13
  263         movq    PCB_DR3(%r8),%r12
  264         movq    PCB_DR6(%r8),%r11
  265         movq    PCB_DR7(%r8),%rcx
  266         movq    %r15,%dr0
  267         movq    %r14,%dr1
  268         /* Preserve reserved bits in %dr7 */
  269         andq    $0x0000fc00,%rax
  270         andq    $~0x0000fc00,%rcx
  271         movq    %r13,%dr2
  272         movq    %r12,%dr3
  273         orq     %rcx,%rax
  274         movq    %r11,%dr6
  275         movq    %rax,%dr7
  276         jmp     done_load_dr
  277 
  278 do_tss: movq    %rdx,PCPU(TSSP)
  279         movq    %rdx,%rcx
  280         movq    PCPU(TSS),%rax
  281         movw    %rcx,2(%rax)
  282         shrq    $16,%rcx
  283         movb    %cl,4(%rax)
  284         shrq    $8,%rcx
  285         movb    %cl,7(%rax)
  286         shrq    $8,%rcx
  287         movl    %ecx,8(%rax)
  288         movb    $0x89,5(%rax)   /* unset busy */
  289         movl    $TSSSEL,%eax
  290         ltr     %ax
  291         jmp     done_tss
  292 
  293 do_ldt: movq    PCPU(LDT),%rax
  294         movq    P_MD+MD_LDT_SD(%rcx),%rdx
  295         movq    %rdx,(%rax)
  296         movq    P_MD+MD_LDT_SD+8(%rcx),%rdx
  297         movq    %rdx,8(%rax)
  298         movl    $LDTSEL,%eax
  299         jmp     ld_ldt
  300 END(cpu_switch)
  301 
  302 /*
  303  * savectx(pcb)
  304  * Update pcb, saving current processor state.
  305  */
  306 ENTRY(savectx)
  307         /* Fetch PCB. */
  308         movq    %rdi,%rcx
  309 
  310         /* Save caller's return address. */
  311         movq    (%rsp),%rax
  312         movq    %rax,PCB_RIP(%rcx)
  313 
  314         movq    %cr3,%rax
  315         movq    %rax,PCB_CR3(%rcx)
  316 
  317         movq    %rbx,PCB_RBX(%rcx)
  318         movq    %rsp,PCB_RSP(%rcx)
  319         movq    %rbp,PCB_RBP(%rcx)
  320         movq    %r12,PCB_R12(%rcx)
  321         movq    %r13,PCB_R13(%rcx)
  322         movq    %r14,PCB_R14(%rcx)
  323         movq    %r15,PCB_R15(%rcx)
  324 
  325         /*
  326          * If fpcurthread == NULL, then the fpu h/w state is irrelevant and the
  327          * state had better already be in the pcb.  This is true for forks
  328          * but not for dumps (the old book-keeping with FP flags in the pcb
  329          * always lost for dumps because the dump pcb has 0 flags).
  330          *
  331          * If fpcurthread != NULL, then we have to save the fpu h/w state to
  332          * fpcurthread's pcb and copy it to the requested pcb, or save to the
  333          * requested pcb and reload.  Copying is easier because we would
  334          * have to handle h/w bugs for reloading.  We used to lose the
  335          * parent's fpu state for forks by forgetting to reload.
  336          */
  337         pushfq
  338         cli
  339         movq    PCPU(FPCURTHREAD),%rax
  340         testq   %rax,%rax
  341         je      1f
  342 
  343         movq    TD_PCB(%rax),%rdi
  344         leaq    PCB_SAVEFPU(%rdi),%rdi
  345         clts
  346         fxsave  (%rdi)
  347         smsw    %ax
  348         orb     $CR0_TS,%al
  349         lmsw    %ax
  350 
  351         movq    $PCB_SAVEFPU_SIZE,%rdx  /* arg 3 */
  352         leaq    PCB_SAVEFPU(%rcx),%rsi  /* arg 2 */
  353         /* arg 1 (%rdi) already loaded */
  354         call    bcopy
  355 1:
  356         popfq
  357 
  358         ret
  359 END(savectx)
  360 
  361 /*
  362  * savectx2(xpcb)
  363  * Update xpcb, saving current processor state.
  364  */
  365 ENTRY(savectx2)
  366         /* Fetch XPCB. */
  367         movq    %rdi,%r8
  368 
  369         /* Save caller's return address. */
  370         movq    (%rsp),%rax
  371         movq    %rax,PCB_RIP(%r8)
  372 
  373         movq    %rbx,PCB_RBX(%r8)
  374         movq    %rsp,PCB_RSP(%r8)
  375         movq    %rbp,PCB_RBP(%r8)
  376         movq    %r12,PCB_R12(%r8)
  377         movq    %r13,PCB_R13(%r8)
  378         movq    %r14,PCB_R14(%r8)
  379         movq    %r15,PCB_R15(%r8)
  380 
  381         movq    %cr0,%rax
  382         movq    %rax,XPCB_CR0(%r8)
  383         movq    %cr2,%rax
  384         movq    %rax,XPCB_CR2(%r8)
  385         movq    %cr4,%rax
  386         movq    %rax,XPCB_CR4(%r8)
  387 
  388         movq    %dr0,%rax
  389         movq    %rax,PCB_DR0(%r8)
  390         movq    %dr1,%rax
  391         movq    %rax,PCB_DR1(%r8)
  392         movq    %dr2,%rax
  393         movq    %rax,PCB_DR2(%r8)
  394         movq    %dr3,%rax
  395         movq    %rax,PCB_DR3(%r8)
  396         movq    %dr6,%rax
  397         movq    %rax,PCB_DR6(%r8)
  398         movq    %dr7,%rax
  399         movq    %rax,PCB_DR7(%r8)
  400 
  401         sgdt    XPCB_GDT(%r8)
  402         sidt    XPCB_IDT(%r8)
  403         sldt    XPCB_LDT(%r8)
  404         str     XPCB_TR(%r8)
  405 
  406         movl    $MSR_FSBASE,%ecx
  407         rdmsr
  408         shlq    $32,%rdx
  409         leaq    (%rax,%rdx),%rax
  410         movq    %rax,PCB_FSBASE(%r8)
  411         movl    $MSR_GSBASE,%ecx
  412         rdmsr
  413         shlq    $32,%rdx
  414         leaq    (%rax,%rdx),%rax
  415         movq    %rax,PCB_GSBASE(%r8)
  416         movl    $MSR_KGSBASE,%ecx
  417         rdmsr
  418         shlq    $32,%rdx
  419         leaq    (%rax,%rdx),%rax
  420         movq    %rax,XPCB_KGSBASE(%r8)
  421 
  422         movl    $1, %eax
  423         ret
  424 END(savectx2)

Cache object: 1bb2768219793bb8d65239c58018de8b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.