The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/locore.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * William Jolitz.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)locore.s      7.3 (Berkeley) 5/13/91
   33  * $FreeBSD$
   34  *
   35  *              originally from: locore.s, by William F. Jolitz
   36  *
   37  *              Substantially rewritten by David Greenman, Rod Grimes,
   38  *                      Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
   39  *                      and many others.
   40  */
   41 
   42 #include "opt_bootp.h"
   43 #include "opt_compat.h"
   44 #include "opt_nfsroot.h"
   45 #include "opt_pmap.h"
   46 
   47 #include <sys/syscall.h>
   48 #include <sys/reboot.h>
   49 
   50 #include <machine/asmacros.h>
   51 #include <machine/cputypes.h>
   52 #include <machine/psl.h>
   53 #include <machine/pmap.h>
   54 #include <machine/specialreg.h>
   55 
   56 #include "assym.s"
   57 
   58 /*
   59  *      XXX
   60  *
   61  * Note: This version greatly munged to avoid various assembler errors
   62  * that may be fixed in newer versions of gas. Perhaps newer versions
   63  * will have more pleasant appearance.
   64  */
   65 
   66 /*
   67  * PTmap is recursive pagemap at top of virtual address space.
   68  * Within PTmap, the page directory can be found (third indirection).
   69  */
   70         .globl  PTmap,PTD,PTDpde
   71         .set    PTmap,(PTDPTDI << PDRSHIFT)
   72         .set    PTD,PTmap + (PTDPTDI * PAGE_SIZE)
   73         .set    PTDpde,PTD + (PTDPTDI * PDESIZE)
   74 
   75 /*
   76  * Compiled KERNBASE location and the kernel load address
   77  */
   78         .globl  kernbase
   79         .set    kernbase,KERNBASE
   80         .globl  kernload
   81         .set    kernload,KERNLOAD
   82 
   83 /*
   84  * Globals
   85  */
   86         .data
   87         ALIGN_DATA                      /* just to be sure */
   88 
   89         .space  0x2000                  /* space for tmpstk - temporary stack */
   90 tmpstk:
   91 
   92         .globl  bootinfo
   93 bootinfo:       .space  BOOTINFO_SIZE   /* bootinfo that we can handle */
   94 
   95                 .globl KERNend
   96 KERNend:        .long   0               /* phys addr end of kernel (just after bss) */
   97 physfree:       .long   0               /* phys addr of next free page */
   98 
   99         .globl  IdlePTD
  100 IdlePTD:        .long   0               /* phys addr of kernel PTD */
  101 
  102 #ifdef PAE
  103         .globl  IdlePDPT
  104 IdlePDPT:       .long   0               /* phys addr of kernel PDPT */
  105 #endif
  106 
  107         .globl  KPTmap
  108 KPTmap:         .long   0               /* address of kernel page tables */
  109 
  110         .globl  KPTphys
  111 KPTphys:        .long   0               /* phys addr of kernel page tables */
  112 
  113         .globl  proc0kstack
  114 proc0uarea:     .long   0               /* address of proc 0 uarea (unused)*/
  115 proc0kstack:    .long   0               /* address of proc 0 kstack space */
  116 p0upa:          .long   0               /* phys addr of proc0 UAREA (unused) */
  117 p0kpa:          .long   0               /* phys addr of proc0's STACK */
  118 
  119 vm86phystk:     .long   0               /* PA of vm86/bios stack */
  120 
  121         .globl  vm86paddr, vm86pa
  122 vm86paddr:      .long   0               /* address of vm86 region */
  123 vm86pa:         .long   0               /* phys addr of vm86 region */
  124 
  125 #ifdef PC98
  126         .globl  pc98_system_parameter
  127 pc98_system_parameter:
  128         .space  0x240
  129 #endif
  130 
  131 /**********************************************************************
  132  *
  133  * Some handy macros
  134  *
  135  */
  136 
  137 #define R(foo) ((foo)-KERNBASE)
  138 
  139 #define ALLOCPAGES(foo) \
  140         movl    R(physfree), %esi ; \
  141         movl    $((foo)*PAGE_SIZE), %eax ; \
  142         addl    %esi, %eax ; \
  143         movl    %eax, R(physfree) ; \
  144         movl    %esi, %edi ; \
  145         movl    $((foo)*PAGE_SIZE),%ecx ; \
  146         xorl    %eax,%eax ; \
  147         cld ; \
  148         rep ; \
  149         stosb
  150 
  151 /*
  152  * fillkpt
  153  *      eax = page frame address
  154  *      ebx = index into page table
  155  *      ecx = how many pages to map
  156  *      base = base address of page dir/table
  157  *      prot = protection bits
  158  */
  159 #define fillkpt(base, prot)               \
  160         shll    $PTESHIFT,%ebx          ; \
  161         addl    base,%ebx               ; \
  162         orl     $PG_V,%eax              ; \
  163         orl     prot,%eax               ; \
  164 1:      movl    %eax,(%ebx)             ; \
  165         addl    $PAGE_SIZE,%eax         ; /* increment physical address */ \
  166         addl    $PTESIZE,%ebx           ; /* next pte */ \
  167         loop    1b
  168 
  169 /*
  170  * fillkptphys(prot)
  171  *      eax = physical address
  172  *      ecx = how many pages to map
  173  *      prot = protection bits
  174  */
  175 #define fillkptphys(prot)                 \
  176         movl    %eax, %ebx              ; \
  177         shrl    $PAGE_SHIFT, %ebx       ; \
  178         fillkpt(R(KPTphys), prot)
  179 
  180         .text
  181 /**********************************************************************
  182  *
  183  * This is where the bootblocks start us, set the ball rolling...
  184  *
  185  */
  186 NON_GPROF_ENTRY(btext)
  187 
  188 #ifdef PC98
  189         /* save SYSTEM PARAMETER for resume (NS/T or other) */
  190         movl    $0xa1400,%esi
  191         movl    $R(pc98_system_parameter),%edi
  192         movl    $0x0240,%ecx
  193         cld
  194         rep
  195         movsb
  196 #else   /* IBM-PC */
  197 /* Tell the bios to warmboot next time */
  198         movw    $0x1234,0x472
  199 #endif  /* PC98 */
  200 
  201 /* Set up a real frame in case the double return in newboot is executed. */
  202         pushl   %ebp
  203         movl    %esp, %ebp
  204 
  205 /* Don't trust what the BIOS gives for eflags. */
  206         pushl   $PSL_KERNEL
  207         popfl
  208 
  209 /*
  210  * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
  211  * to set %cs, %ds, %es and %ss.
  212  */
  213         mov     %ds, %ax
  214         mov     %ax, %fs
  215         mov     %ax, %gs
  216 
  217 /*
  218  * Clear the bss.  Not all boot programs do it, and it is our job anyway.
  219  *
  220  * XXX we don't check that there is memory for our bss and page tables
  221  * before using it.
  222  *
  223  * Note: we must be careful to not overwrite an active gdt or idt.  They
  224  * inactive from now until we switch to new ones, since we don't load any
  225  * more segment registers or permit interrupts until after the switch.
  226  */
  227         movl    $R(end),%ecx
  228         movl    $R(edata),%edi
  229         subl    %edi,%ecx
  230         xorl    %eax,%eax
  231         cld
  232         rep
  233         stosb
  234 
  235         call    recover_bootinfo
  236 
  237 /* Get onto a stack that we can trust. */
  238 /*
  239  * XXX this step is delayed in case recover_bootinfo needs to return via
  240  * the old stack, but it need not be, since recover_bootinfo actually
  241  * returns via the old frame.
  242  */
  243         movl    $R(tmpstk),%esp
  244 
  245 #ifdef PC98
  246         /* pc98_machine_type & M_EPSON_PC98 */
  247         testb   $0x02,R(pc98_system_parameter)+220
  248         jz      3f
  249         /* epson_machine_id <= 0x0b */
  250         cmpb    $0x0b,R(pc98_system_parameter)+224
  251         ja      3f
  252 
  253         /* count up memory */
  254         movl    $0x100000,%eax          /* next, talley remaining memory */
  255         movl    $0xFFF-0x100,%ecx
  256 1:      movl    0(%eax),%ebx            /* save location to check */
  257         movl    $0xa55a5aa5,0(%eax)     /* write test pattern */
  258         cmpl    $0xa55a5aa5,0(%eax)     /* does not check yet for rollover */
  259         jne     2f
  260         movl    %ebx,0(%eax)            /* restore memory */
  261         addl    $PAGE_SIZE,%eax
  262         loop    1b
  263 2:      subl    $0x100000,%eax
  264         shrl    $17,%eax
  265         movb    %al,R(pc98_system_parameter)+1
  266 3:
  267 
  268         movw    R(pc98_system_parameter+0x86),%ax
  269         movw    %ax,R(cpu_id)
  270 #endif
  271 
  272         call    identify_cpu
  273         call    create_pagetables
  274 
  275 /*
  276  * If the CPU has support for VME, turn it on.
  277  */ 
  278         testl   $CPUID_VME, R(cpu_feature)
  279         jz      1f
  280         movl    %cr4, %eax
  281         orl     $CR4_VME, %eax
  282         movl    %eax, %cr4
  283 1:
  284 
  285 /* Now enable paging */
  286 #ifdef PAE
  287         movl    R(IdlePDPT), %eax
  288         movl    %eax, %cr3
  289         movl    %cr4, %eax
  290         orl     $CR4_PAE, %eax
  291         movl    %eax, %cr4
  292 #else
  293         movl    R(IdlePTD), %eax
  294         movl    %eax,%cr3               /* load ptd addr into mmu */
  295 #endif
  296         movl    %cr0,%eax               /* get control word */
  297         orl     $CR0_PE|CR0_PG,%eax     /* enable paging */
  298         movl    %eax,%cr0               /* and let's page NOW! */
  299 
  300         pushl   $begin                  /* jump to high virtualized address */
  301         ret
  302 
  303 /* now running relocated at KERNBASE where the system is linked to run */
  304 begin:
  305         /* set up bootstrap stack */
  306         movl    proc0kstack,%eax        /* location of in-kernel stack */
  307                         /* bootstrap stack end location */
  308         leal    (KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
  309 
  310         xorl    %ebp,%ebp               /* mark end of frames */
  311 
  312 #ifdef PAE
  313         movl    IdlePDPT,%esi
  314 #else
  315         movl    IdlePTD,%esi
  316 #endif
  317         movl    %esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
  318 
  319         pushl   physfree                /* value of first for init386(first) */
  320         call    init386                 /* wire 386 chip for unix operation */
  321 
  322         /*
  323          * Clean up the stack in a way that db_numargs() understands, so
  324          * that backtraces in ddb don't underrun the stack.  Traps for
  325          * inaccessible memory are more fatal than usual this early.
  326          */
  327         addl    $4,%esp
  328 
  329         call    mi_startup              /* autoconfiguration, mountroot etc */
  330         /* NOTREACHED */
  331         addl    $0,%esp                 /* for db_numargs() again */
  332 
  333 /*
  334  * Signal trampoline, copied to top of user stack
  335  */
  336 NON_GPROF_ENTRY(sigcode)
  337         calll   *SIGF_HANDLER(%esp)
  338         leal    SIGF_UC(%esp),%eax      /* get ucontext */
  339         pushl   %eax
  340         testl   $PSL_VM,UC_EFLAGS(%eax)
  341         jne     1f
  342         movl    UC_GS(%eax),%gs         /* restore %gs */
  343 1:
  344         movl    $SYS_sigreturn,%eax
  345         pushl   %eax                    /* junk to fake return addr. */
  346         int     $0x80                   /* enter kernel with args */
  347                                         /* on stack */
  348 1:
  349         jmp     1b
  350 
  351 #ifdef COMPAT_FREEBSD4
  352         ALIGN_TEXT
  353 freebsd4_sigcode:
  354         calll   *SIGF_HANDLER(%esp)
  355         leal    SIGF_UC4(%esp),%eax     /* get ucontext */
  356         pushl   %eax
  357         testl   $PSL_VM,UC4_EFLAGS(%eax)
  358         jne     1f
  359         movl    UC4_GS(%eax),%gs        /* restore %gs */
  360 1:
  361         movl    $344,%eax               /* 4.x SYS_sigreturn */
  362         pushl   %eax                    /* junk to fake return addr. */
  363         int     $0x80                   /* enter kernel with args */
  364                                         /* on stack */
  365 1:
  366         jmp     1b
  367 #endif
  368 
  369 #ifdef COMPAT_43
  370         ALIGN_TEXT
  371 osigcode:
  372         call    *SIGF_HANDLER(%esp)     /* call signal handler */
  373         lea     SIGF_SC(%esp),%eax      /* get sigcontext */
  374         pushl   %eax
  375         testl   $PSL_VM,SC_PS(%eax)
  376         jne     9f
  377         movl    SC_GS(%eax),%gs         /* restore %gs */
  378 9:
  379         movl    $103,%eax               /* 3.x SYS_sigreturn */
  380         pushl   %eax                    /* junk to fake return addr. */
  381         int     $0x80                   /* enter kernel with args */
  382 0:      jmp     0b
  383 #endif /* COMPAT_43 */
  384 
  385         ALIGN_TEXT
  386 esigcode:
  387 
  388         .data
  389         .globl  szsigcode
  390 szsigcode:
  391         .long   esigcode-sigcode
  392 #ifdef COMPAT_FREEBSD4
  393         .globl  szfreebsd4_sigcode
  394 szfreebsd4_sigcode:
  395         .long   esigcode-freebsd4_sigcode
  396 #endif
  397 #ifdef COMPAT_43
  398         .globl  szosigcode
  399 szosigcode:
  400         .long   esigcode-osigcode
  401 #endif
  402         .text
  403 
  404 /**********************************************************************
  405  *
  406  * Recover the bootinfo passed to us from the boot program
  407  *
  408  */
  409 recover_bootinfo:
  410         /*
  411          * This code is called in different ways depending on what loaded
  412          * and started the kernel.  This is used to detect how we get the
  413          * arguments from the other code and what we do with them.
  414          *
  415          * Old disk boot blocks:
  416          *      (*btext)(howto, bootdev, cyloffset, esym);
  417          *      [return address == 0, and can NOT be returned to]
  418          *      [cyloffset was not supported by the FreeBSD boot code
  419          *       and always passed in as 0]
  420          *      [esym is also known as total in the boot code, and
  421          *       was never properly supported by the FreeBSD boot code]
  422          *
  423          * Old diskless netboot code:
  424          *      (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
  425          *      [return address != 0, and can NOT be returned to]
  426          *      If we are being booted by this code it will NOT work,
  427          *      so we are just going to halt if we find this case.
  428          *
  429          * New uniform boot code:
  430          *      (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
  431          *      [return address != 0, and can be returned to]
  432          *
  433          * There may seem to be a lot of wasted arguments in here, but
  434          * that is so the newer boot code can still load very old kernels
  435          * and old boot code can load new kernels.
  436          */
  437 
  438         /*
  439          * The old style disk boot blocks fake a frame on the stack and
  440          * did an lret to get here.  The frame on the stack has a return
  441          * address of 0.
  442          */
  443         cmpl    $0,4(%ebp)
  444         je      olddiskboot
  445 
  446         /*
  447          * We have some form of return address, so this is either the
  448          * old diskless netboot code, or the new uniform code.  That can
  449          * be detected by looking at the 5th argument, if it is 0
  450          * we are being booted by the new uniform boot code.
  451          */
  452         cmpl    $0,24(%ebp)
  453         je      newboot
  454 
  455         /*
  456          * Seems we have been loaded by the old diskless boot code, we
  457          * don't stand a chance of running as the diskless structure
  458          * changed considerably between the two, so just halt.
  459          */
  460          hlt
  461 
  462         /*
  463          * We have been loaded by the new uniform boot code.
  464          * Let's check the bootinfo version, and if we do not understand
  465          * it we return to the loader with a status of 1 to indicate this error
  466          */
  467 newboot:
  468         movl    28(%ebp),%ebx           /* &bootinfo.version */
  469         movl    BI_VERSION(%ebx),%eax
  470         cmpl    $1,%eax                 /* We only understand version 1 */
  471         je      1f
  472         movl    $1,%eax                 /* Return status */
  473         leave
  474         /*
  475          * XXX this returns to our caller's caller (as is required) since
  476          * we didn't set up a frame and our caller did.
  477          */
  478         ret
  479 
  480 1:
  481         /*
  482          * If we have a kernelname copy it in
  483          */
  484         movl    BI_KERNELNAME(%ebx),%esi
  485         cmpl    $0,%esi
  486         je      2f                      /* No kernelname */
  487         movl    $MAXPATHLEN,%ecx        /* Brute force!!! */
  488         movl    $R(kernelname),%edi
  489         cmpb    $'/',(%esi)             /* Make sure it starts with a slash */
  490         je      1f
  491         movb    $'/',(%edi)
  492         incl    %edi
  493         decl    %ecx
  494 1:
  495         cld
  496         rep
  497         movsb
  498 
  499 2:
  500         /*
  501          * Determine the size of the boot loader's copy of the bootinfo
  502          * struct.  This is impossible to do properly because old versions
  503          * of the struct don't contain a size field and there are 2 old
  504          * versions with the same version number.
  505          */
  506         movl    $BI_ENDCOMMON,%ecx      /* prepare for sizeless version */
  507         testl   $RB_BOOTINFO,8(%ebp)    /* bi_size (and bootinfo) valid? */
  508         je      got_bi_size             /* no, sizeless version */
  509         movl    BI_SIZE(%ebx),%ecx
  510 got_bi_size:
  511 
  512         /*
  513          * Copy the common part of the bootinfo struct
  514          */
  515         movl    %ebx,%esi
  516         movl    $R(bootinfo),%edi
  517         cmpl    $BOOTINFO_SIZE,%ecx
  518         jbe     got_common_bi_size
  519         movl    $BOOTINFO_SIZE,%ecx
  520 got_common_bi_size:
  521         cld
  522         rep
  523         movsb
  524 
  525 #ifdef NFS_ROOT
  526 #ifndef BOOTP_NFSV3
  527         /*
  528          * If we have a nfs_diskless structure copy it in
  529          */
  530         movl    BI_NFS_DISKLESS(%ebx),%esi
  531         cmpl    $0,%esi
  532         je      olddiskboot
  533         movl    $R(nfs_diskless),%edi
  534         movl    $NFSDISKLESS_SIZE,%ecx
  535         cld
  536         rep
  537         movsb
  538         movl    $R(nfs_diskless_valid),%edi
  539         movl    $1,(%edi)
  540 #endif
  541 #endif
  542 
  543         /*
  544          * The old style disk boot.
  545          *      (*btext)(howto, bootdev, cyloffset, esym);
  546          * Note that the newer boot code just falls into here to pick
  547          * up howto and bootdev, cyloffset and esym are no longer used
  548          */
  549 olddiskboot:
  550         movl    8(%ebp),%eax
  551         movl    %eax,R(boothowto)
  552         movl    12(%ebp),%eax
  553         movl    %eax,R(bootdev)
  554 
  555         ret
  556 
  557 
  558 /**********************************************************************
  559  *
  560  * Identify the CPU and initialize anything special about it
  561  *
  562  */
  563 identify_cpu:
  564 
  565         /* Try to toggle alignment check flag; does not exist on 386. */
  566         pushfl
  567         popl    %eax
  568         movl    %eax,%ecx
  569         orl     $PSL_AC,%eax
  570         pushl   %eax
  571         popfl
  572         pushfl
  573         popl    %eax
  574         xorl    %ecx,%eax
  575         andl    $PSL_AC,%eax
  576         pushl   %ecx
  577         popfl
  578 
  579         testl   %eax,%eax
  580         jnz     try486
  581 
  582         /* NexGen CPU does not have aligment check flag. */
  583         pushfl
  584         movl    $0x5555, %eax
  585         xorl    %edx, %edx
  586         movl    $2, %ecx
  587         clc
  588         divl    %ecx
  589         jz      trynexgen
  590         popfl
  591         movl    $CPU_386,R(cpu)
  592         jmp     3f
  593 
  594 trynexgen:
  595         popfl
  596         movl    $CPU_NX586,R(cpu)
  597         movl    $0x4778654e,R(cpu_vendor)       # store vendor string
  598         movl    $0x72446e65,R(cpu_vendor+4)
  599         movl    $0x6e657669,R(cpu_vendor+8)
  600         movl    $0,R(cpu_vendor+12)
  601         jmp     3f
  602 
  603 try486: /* Try to toggle identification flag; does not exist on early 486s. */
  604         pushfl
  605         popl    %eax
  606         movl    %eax,%ecx
  607         xorl    $PSL_ID,%eax
  608         pushl   %eax
  609         popfl
  610         pushfl
  611         popl    %eax
  612         xorl    %ecx,%eax
  613         andl    $PSL_ID,%eax
  614         pushl   %ecx
  615         popfl
  616 
  617         testl   %eax,%eax
  618         jnz     trycpuid
  619         movl    $CPU_486,R(cpu)
  620 
  621         /*
  622          * Check Cyrix CPU
  623          * Cyrix CPUs do not change the undefined flags following
  624          * execution of the divide instruction which divides 5 by 2.
  625          *
  626          * Note: CPUID is enabled on M2, so it passes another way.
  627          */
  628         pushfl
  629         movl    $0x5555, %eax
  630         xorl    %edx, %edx
  631         movl    $2, %ecx
  632         clc
  633         divl    %ecx
  634         jnc     trycyrix
  635         popfl
  636         jmp     3f              /* You may use Intel CPU. */
  637 
  638 trycyrix:
  639         popfl
  640         /*
  641          * IBM Bluelighting CPU also doesn't change the undefined flags.
  642          * Because IBM doesn't disclose the information for Bluelighting
  643          * CPU, we couldn't distinguish it from Cyrix's (including IBM
  644          * brand of Cyrix CPUs).
  645          */
  646         movl    $0x69727943,R(cpu_vendor)       # store vendor string
  647         movl    $0x736e4978,R(cpu_vendor+4)
  648         movl    $0x64616574,R(cpu_vendor+8)
  649         jmp     3f
  650 
  651 trycpuid:       /* Use the `cpuid' instruction. */
  652         xorl    %eax,%eax
  653         cpuid                                   # cpuid 0
  654         movl    %eax,R(cpu_high)                # highest capability
  655         movl    %ebx,R(cpu_vendor)              # store vendor string
  656         movl    %edx,R(cpu_vendor+4)
  657         movl    %ecx,R(cpu_vendor+8)
  658         movb    $0,R(cpu_vendor+12)
  659 
  660         movl    $1,%eax
  661         cpuid                                   # cpuid 1
  662         movl    %eax,R(cpu_id)                  # store cpu_id
  663         movl    %ebx,R(cpu_procinfo)            # store cpu_procinfo
  664         movl    %edx,R(cpu_feature)             # store cpu_feature
  665         movl    %ecx,R(cpu_feature2)            # store cpu_feature2
  666         rorl    $8,%eax                         # extract family type
  667         andl    $15,%eax
  668         cmpl    $5,%eax
  669         jae     1f
  670 
  671         /* less than Pentium; must be 486 */
  672         movl    $CPU_486,R(cpu)
  673         jmp     3f
  674 1:
  675         /* a Pentium? */
  676         cmpl    $5,%eax
  677         jne     2f
  678         movl    $CPU_586,R(cpu)
  679         jmp     3f
  680 2:
  681         /* Greater than Pentium...call it a Pentium Pro */
  682         movl    $CPU_686,R(cpu)
  683 3:
  684         ret
  685 
  686 
  687 /**********************************************************************
  688  *
  689  * Create the first page directory and its page tables.
  690  *
  691  */
  692 
  693 create_pagetables:
  694 
  695 /* Find end of kernel image (rounded up to a page boundary). */
  696         movl    $R(_end),%esi
  697 
  698 /* Include symbols, if any. */
  699         movl    R(bootinfo+BI_ESYMTAB),%edi
  700         testl   %edi,%edi
  701         je      over_symalloc
  702         movl    %edi,%esi
  703         movl    $KERNBASE,%edi
  704         addl    %edi,R(bootinfo+BI_SYMTAB)
  705         addl    %edi,R(bootinfo+BI_ESYMTAB)
  706 over_symalloc:
  707 
  708 /* If we are told where the end of the kernel space is, believe it. */
  709         movl    R(bootinfo+BI_KERNEND),%edi
  710         testl   %edi,%edi
  711         je      no_kernend
  712         movl    %edi,%esi
  713 no_kernend:
  714 
  715         addl    $PDRMASK,%esi           /* Play conservative for now, and */
  716         andl    $~PDRMASK,%esi          /*   ... wrap to next 4M. */
  717         movl    %esi,R(KERNend)         /* save end of kernel */
  718         movl    %esi,R(physfree)        /* next free page is at end of kernel */
  719 
  720 /* Allocate Kernel Page Tables */
  721         ALLOCPAGES(NKPT)
  722         movl    %esi,R(KPTphys)
  723         addl    $(KERNBASE-(KPTDI<<(PDRSHIFT-PAGE_SHIFT+PTESHIFT))),%esi
  724         movl    %esi,R(KPTmap)
  725 
  726 /* Allocate Page Table Directory */
  727 #ifdef PAE
  728         /* XXX only need 32 bytes (easier for now) */
  729         ALLOCPAGES(1)
  730         movl    %esi,R(IdlePDPT)
  731 #endif
  732         ALLOCPAGES(NPGPTD)
  733         movl    %esi,R(IdlePTD)
  734 
  735 /* Allocate KSTACK */
  736         ALLOCPAGES(KSTACK_PAGES)
  737         movl    %esi,R(p0kpa)
  738         addl    $KERNBASE, %esi
  739         movl    %esi, R(proc0kstack)
  740 
  741         ALLOCPAGES(1)                   /* vm86/bios stack */
  742         movl    %esi,R(vm86phystk)
  743 
  744         ALLOCPAGES(3)                   /* pgtable + ext + IOPAGES */
  745         movl    %esi,R(vm86pa)
  746         addl    $KERNBASE, %esi
  747         movl    %esi, R(vm86paddr)
  748 
  749 /*
  750  * Enable PSE and PGE.
  751  */
  752 #ifndef DISABLE_PSE
  753         testl   $CPUID_PSE, R(cpu_feature)
  754         jz      1f
  755         movl    $PG_PS, R(pseflag)
  756         movl    %cr4, %eax
  757         orl     $CR4_PSE, %eax
  758         movl    %eax, %cr4
  759 1:
  760 #endif
  761 #ifndef DISABLE_PG_G
  762         testl   $CPUID_PGE, R(cpu_feature)
  763         jz      2f
  764         movl    $PG_G, R(pgeflag)
  765         movl    %cr4, %eax
  766         orl     $CR4_PGE, %eax
  767         movl    %eax, %cr4
  768 2:
  769 #endif
  770 
  771 /*
  772  * Initialize page table pages mapping physical address zero through the
  773  * end of the kernel.  All of the page table entries allow read and write
  774  * access.  Write access to the first physical page is required by bios32
  775  * calls, and write access to the first 1 MB of physical memory is required
  776  * by ACPI for implementing suspend and resume.  We do this even
  777  * if we've enabled PSE above, we'll just switch the corresponding kernel
  778  * PDEs before we turn on paging.
  779  *
  780  * XXX: We waste some pages here in the PSE case!  DON'T BLINDLY REMOVE
  781  * THIS!  SMP needs the page table to be there to map the kernel P==V.
  782  */
  783         xorl    %eax, %eax
  784         movl    R(KERNend),%ecx
  785         shrl    $PAGE_SHIFT,%ecx
  786         fillkptphys($PG_RW)
  787 
  788 /* Map page table pages. */
  789         movl    R(KPTphys),%eax
  790         movl    $NKPT,%ecx
  791         fillkptphys($PG_RW)
  792 
  793 /* Map page directory. */
  794 #ifdef PAE
  795         movl    R(IdlePDPT), %eax
  796         movl    $1, %ecx
  797         fillkptphys($PG_RW)
  798 #endif
  799 
  800         movl    R(IdlePTD), %eax
  801         movl    $NPGPTD, %ecx
  802         fillkptphys($PG_RW)
  803 
  804 /* Map proc0's KSTACK in the physical way ... */
  805         movl    R(p0kpa), %eax
  806         movl    $(KSTACK_PAGES), %ecx
  807         fillkptphys($PG_RW)
  808 
  809 /* Map ISA hole */
  810         movl    $ISA_HOLE_START, %eax
  811         movl    $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
  812         fillkptphys($PG_RW)
  813 
  814 /* Map space for the vm86 region */
  815         movl    R(vm86phystk), %eax
  816         movl    $4, %ecx
  817         fillkptphys($PG_RW)
  818 
  819 /* Map page 0 into the vm86 page table */
  820         movl    $0, %eax
  821         movl    $0, %ebx
  822         movl    $1, %ecx
  823         fillkpt(R(vm86pa), $PG_RW|PG_U)
  824 
  825 /* ...likewise for the ISA hole */
  826         movl    $ISA_HOLE_START, %eax
  827         movl    $ISA_HOLE_START>>PAGE_SHIFT, %ebx
  828         movl    $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
  829         fillkpt(R(vm86pa), $PG_RW|PG_U)
  830 
  831 /*
  832  * Create an identity mapping for low physical memory, including the kernel.
  833  * The part of this mapping that covers the first 1 MB of physical memory
  834  * becomes a permanent part of the kernel's address space.  The rest of this
  835  * mapping is destroyed in pmap_bootstrap().  Ordinarily, the same page table
  836  * pages are shared by the identity mapping and the kernel's native mapping.
  837  * However, the permanent identity mapping cannot contain PG_G mappings.
  838  * Thus, if the kernel is loaded within the permanent identity mapping, that
  839  * page table page must be duplicated and not shared.
  840  *
  841  * N.B. Due to errata concerning large pages and physical address zero,
  842  * a PG_PS mapping is not used.
  843  */
  844         movl    R(KPTphys), %eax
  845         xorl    %ebx, %ebx
  846         movl    $NKPT, %ecx
  847         fillkpt(R(IdlePTD), $PG_RW)
  848 #if KERNLOAD < (1 << PDRSHIFT)
  849         testl   $PG_G, R(pgeflag)
  850         jz      1f
  851         ALLOCPAGES(1)
  852         movl    %esi, %edi
  853         movl    R(IdlePTD), %eax
  854         movl    (%eax), %esi
  855         movl    %edi, (%eax)
  856         movl    $PAGE_SIZE, %ecx
  857         cld
  858         rep
  859         movsb
  860 1:      
  861 #endif
  862 
  863 /*
  864  * For the non-PSE case, install PDEs for PTs covering the KVA.
  865  * For the PSE case, do the same, but clobber the ones corresponding
  866  * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS')
  867  * PDEs immediately after.
  868  */
  869         movl    R(KPTphys), %eax
  870         movl    $KPTDI, %ebx
  871         movl    $NKPT, %ecx
  872         fillkpt(R(IdlePTD), $PG_RW)
  873         cmpl    $0,R(pseflag)
  874         je      done_pde
  875 
  876         movl    R(KERNend), %ecx
  877         movl    $KERNLOAD, %eax
  878         subl    %eax, %ecx
  879         shrl    $PDRSHIFT, %ecx
  880         movl    $(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx
  881         shll    $PDESHIFT, %ebx
  882         addl    R(IdlePTD), %ebx
  883         orl     $(PG_V|PG_RW|PG_PS), %eax
  884 1:      movl    %eax, (%ebx)
  885         addl    $(1 << PDRSHIFT), %eax
  886         addl    $PDESIZE, %ebx
  887         loop    1b
  888 
  889 done_pde:
  890 /* install a pde recursively mapping page directory as a page table */
  891         movl    R(IdlePTD), %eax
  892         movl    $PTDPTDI, %ebx
  893         movl    $NPGPTD,%ecx
  894         fillkpt(R(IdlePTD), $PG_RW)
  895 
  896 #ifdef PAE
  897         movl    R(IdlePTD), %eax
  898         xorl    %ebx, %ebx
  899         movl    $NPGPTD, %ecx
  900         fillkpt(R(IdlePDPT), $0x0)
  901 #endif
  902 
  903         ret

Cache object: 195c1e4cd5cf8bc7556b952bb5d4e859


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.