FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/locore.s
1 /*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
33 * $FreeBSD$
34 *
35 * originally from: locore.s, by William F. Jolitz
36 *
37 * Substantially rewritten by David Greenman, Rod Grimes,
38 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
39 * and many others.
40 */
41
42 #include "opt_bootp.h"
43 #include "opt_compat.h"
44 #include "opt_nfsroot.h"
45 #include "opt_pmap.h"
46
47 #include <sys/syscall.h>
48 #include <sys/reboot.h>
49
50 #include <machine/asmacros.h>
51 #include <machine/cputypes.h>
52 #include <machine/psl.h>
53 #include <machine/pmap.h>
54 #include <machine/specialreg.h>
55
56 #include "assym.s"
57
58 /*
59 * XXX
60 *
61 * Note: This version greatly munged to avoid various assembler errors
62 * that may be fixed in newer versions of gas. Perhaps newer versions
63 * will have more pleasant appearance.
64 */
65
66 /*
67 * PTmap is recursive pagemap at top of virtual address space.
68 * Within PTmap, the page directory can be found (third indirection).
69 */
70 .globl PTmap,PTD,PTDpde
71 .set PTmap,(PTDPTDI << PDRSHIFT)
72 .set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
73 .set PTDpde,PTD + (PTDPTDI * PDESIZE)
74
75 #ifdef SMP
76 /*
77 * Define layout of per-cpu address space.
78 * This is "constructed" in locore.s on the BSP and in mp_machdep.c
79 * for each AP. DO NOT REORDER THESE WITHOUT UPDATING THE REST!
80 */
81 .globl SMP_prvspace
82 .set SMP_prvspace,(MPPTDI << PDRSHIFT)
83 #endif /* SMP */
84
85 /*
86 * Compiled KERNBASE location and the kernel load address
87 */
88 .globl kernbase
89 .set kernbase,KERNBASE
90 .globl kernload
91 .set kernload,KERNLOAD
92
93 /*
94 * Globals
95 */
96 .data
97 ALIGN_DATA /* just to be sure */
98
99 .space 0x2000 /* space for tmpstk - temporary stack */
100 tmpstk:
101
102 .globl bootinfo
103 bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
104
105 .globl KERNend
106 KERNend: .long 0 /* phys addr end of kernel (just after bss) */
107 physfree: .long 0 /* phys addr of next free page */
108
109 #ifdef SMP
110 .globl cpu0prvpage
111 cpu0pp: .long 0 /* phys addr cpu0 private pg */
112 cpu0prvpage: .long 0 /* relocated version */
113
114 .globl SMPpt
115 SMPptpa: .long 0 /* phys addr SMP page table */
116 SMPpt: .long 0 /* relocated version */
117 #endif /* SMP */
118
119 .globl IdlePTD
120 IdlePTD: .long 0 /* phys addr of kernel PTD */
121
122 #ifdef PAE
123 .globl IdlePDPT
124 IdlePDPT: .long 0 /* phys addr of kernel PDPT */
125 #endif
126
127 #ifdef SMP
128 .globl KPTphys
129 #endif
130 KPTphys: .long 0 /* phys addr of kernel page tables */
131
132 .globl proc0kstack
133 proc0uarea: .long 0 /* address of proc 0 uarea (unused)*/
134 proc0kstack: .long 0 /* address of proc 0 kstack space */
135 p0upa: .long 0 /* phys addr of proc0 UAREA (unused) */
136 p0kpa: .long 0 /* phys addr of proc0's STACK */
137
138 vm86phystk: .long 0 /* PA of vm86/bios stack */
139
140 .globl vm86paddr, vm86pa
141 vm86paddr: .long 0 /* address of vm86 region */
142 vm86pa: .long 0 /* phys addr of vm86 region */
143
144 #ifdef PC98
145 .globl pc98_system_parameter
146 pc98_system_parameter:
147 .space 0x240
148 #endif
149
150 /**********************************************************************
151 *
152 * Some handy macros
153 *
154 */
155
156 #define R(foo) ((foo)-KERNBASE)
157
158 #define ALLOCPAGES(foo) \
159 movl R(physfree), %esi ; \
160 movl $((foo)*PAGE_SIZE), %eax ; \
161 addl %esi, %eax ; \
162 movl %eax, R(physfree) ; \
163 movl %esi, %edi ; \
164 movl $((foo)*PAGE_SIZE),%ecx ; \
165 xorl %eax,%eax ; \
166 cld ; \
167 rep ; \
168 stosb
169
170 /*
171 * fillkpt
172 * eax = page frame address
173 * ebx = index into page table
174 * ecx = how many pages to map
175 * base = base address of page dir/table
176 * prot = protection bits
177 */
178 #define fillkpt(base, prot) \
179 shll $PTESHIFT,%ebx ; \
180 addl base,%ebx ; \
181 orl $PG_V,%eax ; \
182 orl prot,%eax ; \
183 1: movl %eax,(%ebx) ; \
184 addl $PAGE_SIZE,%eax ; /* increment physical address */ \
185 addl $PTESIZE,%ebx ; /* next pte */ \
186 loop 1b
187
188 /*
189 * fillkptphys(prot)
190 * eax = physical address
191 * ecx = how many pages to map
192 * prot = protection bits
193 */
194 #define fillkptphys(prot) \
195 movl %eax, %ebx ; \
196 shrl $PAGE_SHIFT, %ebx ; \
197 fillkpt(R(KPTphys), prot)
198
199 .text
200 /**********************************************************************
201 *
202 * This is where the bootblocks start us, set the ball rolling...
203 *
204 */
205 NON_GPROF_ENTRY(btext)
206
207 #ifdef PC98
208 /* save SYSTEM PARAMETER for resume (NS/T or other) */
209 movl $0xa1400,%esi
210 movl $R(pc98_system_parameter),%edi
211 movl $0x0240,%ecx
212 cld
213 rep
214 movsb
215 #else /* IBM-PC */
216 /* Tell the bios to warmboot next time */
217 movw $0x1234,0x472
218 #endif /* PC98 */
219
220 /* Set up a real frame in case the double return in newboot is executed. */
221 pushl %ebp
222 movl %esp, %ebp
223
224 /* Don't trust what the BIOS gives for eflags. */
225 pushl $PSL_KERNEL
226 popfl
227
228 /*
229 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap
230 * to set %cs, %ds, %es and %ss.
231 */
232 mov %ds, %ax
233 mov %ax, %fs
234 mov %ax, %gs
235
236 /*
237 * Clear the bss. Not all boot programs do it, and it is our job anyway.
238 *
239 * XXX we don't check that there is memory for our bss and page tables
240 * before using it.
241 *
242 * Note: we must be careful to not overwrite an active gdt or idt. They
243 * inactive from now until we switch to new ones, since we don't load any
244 * more segment registers or permit interrupts until after the switch.
245 */
246 movl $R(end),%ecx
247 movl $R(edata),%edi
248 subl %edi,%ecx
249 xorl %eax,%eax
250 cld
251 rep
252 stosb
253
254 call recover_bootinfo
255
256 /* Get onto a stack that we can trust. */
257 /*
258 * XXX this step is delayed in case recover_bootinfo needs to return via
259 * the old stack, but it need not be, since recover_bootinfo actually
260 * returns via the old frame.
261 */
262 movl $R(tmpstk),%esp
263
264 #ifdef PC98
265 /* pc98_machine_type & M_EPSON_PC98 */
266 testb $0x02,R(pc98_system_parameter)+220
267 jz 3f
268 /* epson_machine_id <= 0x0b */
269 cmpb $0x0b,R(pc98_system_parameter)+224
270 ja 3f
271
272 /* count up memory */
273 movl $0x100000,%eax /* next, talley remaining memory */
274 movl $0xFFF-0x100,%ecx
275 1: movl 0(%eax),%ebx /* save location to check */
276 movl $0xa55a5aa5,0(%eax) /* write test pattern */
277 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */
278 jne 2f
279 movl %ebx,0(%eax) /* restore memory */
280 addl $PAGE_SIZE,%eax
281 loop 1b
282 2: subl $0x100000,%eax
283 shrl $17,%eax
284 movb %al,R(pc98_system_parameter)+1
285 3:
286
287 movw R(pc98_system_parameter+0x86),%ax
288 movw %ax,R(cpu_id)
289 #endif
290
291 call identify_cpu
292 call create_pagetables
293
294 /*
295 * If the CPU has support for VME, turn it on.
296 */
297 testl $CPUID_VME, R(cpu_feature)
298 jz 1f
299 movl %cr4, %eax
300 orl $CR4_VME, %eax
301 movl %eax, %cr4
302 1:
303
304 /* Now enable paging */
305 #ifdef PAE
306 movl R(IdlePDPT), %eax
307 movl %eax, %cr3
308 movl %cr4, %eax
309 orl $CR4_PAE, %eax
310 movl %eax, %cr4
311 #else
312 movl R(IdlePTD), %eax
313 movl %eax,%cr3 /* load ptd addr into mmu */
314 #endif
315 movl %cr0,%eax /* get control word */
316 orl $CR0_PE|CR0_PG,%eax /* enable paging */
317 movl %eax,%cr0 /* and let's page NOW! */
318
319 pushl $begin /* jump to high virtualized address */
320 ret
321
322 /* now running relocated at KERNBASE where the system is linked to run */
323 begin:
324 /* set up bootstrap stack */
325 movl proc0kstack,%eax /* location of in-kernel stack */
326 /* bootstrap stack end location */
327 leal (KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
328
329 xorl %ebp,%ebp /* mark end of frames */
330
331 #ifdef PAE
332 movl IdlePDPT,%esi
333 #else
334 movl IdlePTD,%esi
335 #endif
336 movl %esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
337
338 pushl physfree /* value of first for init386(first) */
339 call init386 /* wire 386 chip for unix operation */
340
341 /*
342 * Clean up the stack in a way that db_numargs() understands, so
343 * that backtraces in ddb don't underrun the stack. Traps for
344 * inaccessible memory are more fatal than usual this early.
345 */
346 addl $4,%esp
347
348 call mi_startup /* autoconfiguration, mountroot etc */
349 /* NOTREACHED */
350 addl $0,%esp /* for db_numargs() again */
351
352 /*
353 * Signal trampoline, copied to top of user stack
354 */
355 NON_GPROF_ENTRY(sigcode)
356 calll *SIGF_HANDLER(%esp)
357 leal SIGF_UC(%esp),%eax /* get ucontext */
358 pushl %eax
359 testl $PSL_VM,UC_EFLAGS(%eax)
360 jne 1f
361 movl UC_GS(%eax),%gs /* restore %gs */
362 1:
363 movl $SYS_sigreturn,%eax
364 pushl %eax /* junk to fake return addr. */
365 int $0x80 /* enter kernel with args */
366 /* on stack */
367 1:
368 jmp 1b
369
370 #ifdef COMPAT_FREEBSD4
371 ALIGN_TEXT
372 freebsd4_sigcode:
373 calll *SIGF_HANDLER(%esp)
374 leal SIGF_UC4(%esp),%eax /* get ucontext */
375 pushl %eax
376 testl $PSL_VM,UC4_EFLAGS(%eax)
377 jne 1f
378 movl UC4_GS(%eax),%gs /* restore %gs */
379 1:
380 movl $344,%eax /* 4.x SYS_sigreturn */
381 pushl %eax /* junk to fake return addr. */
382 int $0x80 /* enter kernel with args */
383 /* on stack */
384 1:
385 jmp 1b
386 #endif
387
388 #ifdef COMPAT_43
389 ALIGN_TEXT
390 osigcode:
391 call *SIGF_HANDLER(%esp) /* call signal handler */
392 lea SIGF_SC(%esp),%eax /* get sigcontext */
393 pushl %eax
394 testl $PSL_VM,SC_PS(%eax)
395 jne 9f
396 movl SC_GS(%eax),%gs /* restore %gs */
397 9:
398 movl $103,%eax /* 3.x SYS_sigreturn */
399 pushl %eax /* junk to fake return addr. */
400 int $0x80 /* enter kernel with args */
401 0: jmp 0b
402 #endif /* COMPAT_43 */
403
404 ALIGN_TEXT
405 esigcode:
406
407 .data
408 .globl szsigcode
409 szsigcode:
410 .long esigcode-sigcode
411 #ifdef COMPAT_FREEBSD4
412 .globl szfreebsd4_sigcode
413 szfreebsd4_sigcode:
414 .long esigcode-freebsd4_sigcode
415 #endif
416 #ifdef COMPAT_43
417 .globl szosigcode
418 szosigcode:
419 .long esigcode-osigcode
420 #endif
421 .text
422
423 /**********************************************************************
424 *
425 * Recover the bootinfo passed to us from the boot program
426 *
427 */
428 recover_bootinfo:
429 /*
430 * This code is called in different ways depending on what loaded
431 * and started the kernel. This is used to detect how we get the
432 * arguments from the other code and what we do with them.
433 *
434 * Old disk boot blocks:
435 * (*btext)(howto, bootdev, cyloffset, esym);
436 * [return address == 0, and can NOT be returned to]
437 * [cyloffset was not supported by the FreeBSD boot code
438 * and always passed in as 0]
439 * [esym is also known as total in the boot code, and
440 * was never properly supported by the FreeBSD boot code]
441 *
442 * Old diskless netboot code:
443 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
444 * [return address != 0, and can NOT be returned to]
445 * If we are being booted by this code it will NOT work,
446 * so we are just going to halt if we find this case.
447 *
448 * New uniform boot code:
449 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
450 * [return address != 0, and can be returned to]
451 *
452 * There may seem to be a lot of wasted arguments in here, but
453 * that is so the newer boot code can still load very old kernels
454 * and old boot code can load new kernels.
455 */
456
457 /*
458 * The old style disk boot blocks fake a frame on the stack and
459 * did an lret to get here. The frame on the stack has a return
460 * address of 0.
461 */
462 cmpl $0,4(%ebp)
463 je olddiskboot
464
465 /*
466 * We have some form of return address, so this is either the
467 * old diskless netboot code, or the new uniform code. That can
468 * be detected by looking at the 5th argument, if it is 0
469 * we are being booted by the new uniform boot code.
470 */
471 cmpl $0,24(%ebp)
472 je newboot
473
474 /*
475 * Seems we have been loaded by the old diskless boot code, we
476 * don't stand a chance of running as the diskless structure
477 * changed considerably between the two, so just halt.
478 */
479 hlt
480
481 /*
482 * We have been loaded by the new uniform boot code.
483 * Let's check the bootinfo version, and if we do not understand
484 * it we return to the loader with a status of 1 to indicate this error
485 */
486 newboot:
487 movl 28(%ebp),%ebx /* &bootinfo.version */
488 movl BI_VERSION(%ebx),%eax
489 cmpl $1,%eax /* We only understand version 1 */
490 je 1f
491 movl $1,%eax /* Return status */
492 leave
493 /*
494 * XXX this returns to our caller's caller (as is required) since
495 * we didn't set up a frame and our caller did.
496 */
497 ret
498
499 1:
500 /*
501 * If we have a kernelname copy it in
502 */
503 movl BI_KERNELNAME(%ebx),%esi
504 cmpl $0,%esi
505 je 2f /* No kernelname */
506 movl $MAXPATHLEN,%ecx /* Brute force!!! */
507 movl $R(kernelname),%edi
508 cmpb $'/',(%esi) /* Make sure it starts with a slash */
509 je 1f
510 movb $'/',(%edi)
511 incl %edi
512 decl %ecx
513 1:
514 cld
515 rep
516 movsb
517
518 2:
519 /*
520 * Determine the size of the boot loader's copy of the bootinfo
521 * struct. This is impossible to do properly because old versions
522 * of the struct don't contain a size field and there are 2 old
523 * versions with the same version number.
524 */
525 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */
526 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */
527 je got_bi_size /* no, sizeless version */
528 movl BI_SIZE(%ebx),%ecx
529 got_bi_size:
530
531 /*
532 * Copy the common part of the bootinfo struct
533 */
534 movl %ebx,%esi
535 movl $R(bootinfo),%edi
536 cmpl $BOOTINFO_SIZE,%ecx
537 jbe got_common_bi_size
538 movl $BOOTINFO_SIZE,%ecx
539 got_common_bi_size:
540 cld
541 rep
542 movsb
543
544 #ifdef NFS_ROOT
545 #ifndef BOOTP_NFSV3
546 /*
547 * If we have a nfs_diskless structure copy it in
548 */
549 movl BI_NFS_DISKLESS(%ebx),%esi
550 cmpl $0,%esi
551 je olddiskboot
552 movl $R(nfs_diskless),%edi
553 movl $NFSDISKLESS_SIZE,%ecx
554 cld
555 rep
556 movsb
557 movl $R(nfs_diskless_valid),%edi
558 movl $1,(%edi)
559 #endif
560 #endif
561
562 /*
563 * The old style disk boot.
564 * (*btext)(howto, bootdev, cyloffset, esym);
565 * Note that the newer boot code just falls into here to pick
566 * up howto and bootdev, cyloffset and esym are no longer used
567 */
568 olddiskboot:
569 movl 8(%ebp),%eax
570 movl %eax,R(boothowto)
571 movl 12(%ebp),%eax
572 movl %eax,R(bootdev)
573
574 ret
575
576
577 /**********************************************************************
578 *
579 * Identify the CPU and initialize anything special about it
580 *
581 */
582 identify_cpu:
583
584 /* Try to toggle alignment check flag; does not exist on 386. */
585 pushfl
586 popl %eax
587 movl %eax,%ecx
588 orl $PSL_AC,%eax
589 pushl %eax
590 popfl
591 pushfl
592 popl %eax
593 xorl %ecx,%eax
594 andl $PSL_AC,%eax
595 pushl %ecx
596 popfl
597
598 testl %eax,%eax
599 jnz try486
600
601 /* NexGen CPU does not have aligment check flag. */
602 pushfl
603 movl $0x5555, %eax
604 xorl %edx, %edx
605 movl $2, %ecx
606 clc
607 divl %ecx
608 jz trynexgen
609 popfl
610 movl $CPU_386,R(cpu)
611 jmp 3f
612
613 trynexgen:
614 popfl
615 movl $CPU_NX586,R(cpu)
616 movl $0x4778654e,R(cpu_vendor) # store vendor string
617 movl $0x72446e65,R(cpu_vendor+4)
618 movl $0x6e657669,R(cpu_vendor+8)
619 movl $0,R(cpu_vendor+12)
620 jmp 3f
621
622 try486: /* Try to toggle identification flag; does not exist on early 486s. */
623 pushfl
624 popl %eax
625 movl %eax,%ecx
626 xorl $PSL_ID,%eax
627 pushl %eax
628 popfl
629 pushfl
630 popl %eax
631 xorl %ecx,%eax
632 andl $PSL_ID,%eax
633 pushl %ecx
634 popfl
635
636 testl %eax,%eax
637 jnz trycpuid
638 movl $CPU_486,R(cpu)
639
640 /*
641 * Check Cyrix CPU
642 * Cyrix CPUs do not change the undefined flags following
643 * execution of the divide instruction which divides 5 by 2.
644 *
645 * Note: CPUID is enabled on M2, so it passes another way.
646 */
647 pushfl
648 movl $0x5555, %eax
649 xorl %edx, %edx
650 movl $2, %ecx
651 clc
652 divl %ecx
653 jnc trycyrix
654 popfl
655 jmp 3f /* You may use Intel CPU. */
656
657 trycyrix:
658 popfl
659 /*
660 * IBM Bluelighting CPU also doesn't change the undefined flags.
661 * Because IBM doesn't disclose the information for Bluelighting
662 * CPU, we couldn't distinguish it from Cyrix's (including IBM
663 * brand of Cyrix CPUs).
664 */
665 movl $0x69727943,R(cpu_vendor) # store vendor string
666 movl $0x736e4978,R(cpu_vendor+4)
667 movl $0x64616574,R(cpu_vendor+8)
668 jmp 3f
669
670 trycpuid: /* Use the `cpuid' instruction. */
671 xorl %eax,%eax
672 cpuid # cpuid 0
673 movl %eax,R(cpu_high) # highest capability
674 movl %ebx,R(cpu_vendor) # store vendor string
675 movl %edx,R(cpu_vendor+4)
676 movl %ecx,R(cpu_vendor+8)
677 movb $0,R(cpu_vendor+12)
678
679 movl $1,%eax
680 cpuid # cpuid 1
681 movl %eax,R(cpu_id) # store cpu_id
682 movl %ebx,R(cpu_procinfo) # store cpu_procinfo
683 movl %edx,R(cpu_feature) # store cpu_feature
684 movl %ecx,R(cpu_feature2) # store cpu_feature2
685 rorl $8,%eax # extract family type
686 andl $15,%eax
687 cmpl $5,%eax
688 jae 1f
689
690 /* less than Pentium; must be 486 */
691 movl $CPU_486,R(cpu)
692 jmp 3f
693 1:
694 /* a Pentium? */
695 cmpl $5,%eax
696 jne 2f
697 movl $CPU_586,R(cpu)
698 jmp 3f
699 2:
700 /* Greater than Pentium...call it a Pentium Pro */
701 movl $CPU_686,R(cpu)
702 3:
703 ret
704
705
706 /**********************************************************************
707 *
708 * Create the first page directory and its page tables.
709 *
710 */
711
712 create_pagetables:
713
714 /* Find end of kernel image (rounded up to a page boundary). */
715 movl $R(_end),%esi
716
717 /* Include symbols, if any. */
718 movl R(bootinfo+BI_ESYMTAB),%edi
719 testl %edi,%edi
720 je over_symalloc
721 movl %edi,%esi
722 movl $KERNBASE,%edi
723 addl %edi,R(bootinfo+BI_SYMTAB)
724 addl %edi,R(bootinfo+BI_ESYMTAB)
725 over_symalloc:
726
727 /* If we are told where the end of the kernel space is, believe it. */
728 movl R(bootinfo+BI_KERNEND),%edi
729 testl %edi,%edi
730 je no_kernend
731 movl %edi,%esi
732 no_kernend:
733
734 addl $PDRMASK,%esi /* Play conservative for now, and */
735 andl $~PDRMASK,%esi /* ... wrap to next 4M. */
736 movl %esi,R(KERNend) /* save end of kernel */
737 movl %esi,R(physfree) /* next free page is at end of kernel */
738
739 /* Allocate Kernel Page Tables */
740 ALLOCPAGES(NKPT)
741 movl %esi,R(KPTphys)
742
743 /* Allocate Page Table Directory */
744 #ifdef PAE
745 /* XXX only need 32 bytes (easier for now) */
746 ALLOCPAGES(1)
747 movl %esi,R(IdlePDPT)
748 #endif
749 ALLOCPAGES(NPGPTD)
750 movl %esi,R(IdlePTD)
751
752 /* Allocate KSTACK */
753 ALLOCPAGES(KSTACK_PAGES)
754 movl %esi,R(p0kpa)
755 addl $KERNBASE, %esi
756 movl %esi, R(proc0kstack)
757
758 ALLOCPAGES(1) /* vm86/bios stack */
759 movl %esi,R(vm86phystk)
760
761 ALLOCPAGES(3) /* pgtable + ext + IOPAGES */
762 movl %esi,R(vm86pa)
763 addl $KERNBASE, %esi
764 movl %esi, R(vm86paddr)
765
766 #ifdef SMP
767 /* Allocate cpu0's private data page */
768 ALLOCPAGES(1)
769 movl %esi,R(cpu0pp)
770 addl $KERNBASE, %esi
771 movl %esi, R(cpu0prvpage) /* relocated to KVM space */
772
773 /* Allocate SMP page table page */
774 ALLOCPAGES(1)
775 movl %esi,R(SMPptpa)
776 addl $KERNBASE, %esi
777 movl %esi, R(SMPpt) /* relocated to KVM space */
778 #endif /* SMP */
779
780 /*
781 * Enable PSE and PGE.
782 */
783 #ifndef DISABLE_PSE
784 testl $CPUID_PSE, R(cpu_feature)
785 jz 1f
786 movl $PG_PS, R(pseflag)
787 movl %cr4, %eax
788 orl $CR4_PSE, %eax
789 movl %eax, %cr4
790 1:
791 #endif
792 #ifndef DISABLE_PG_G
793 testl $CPUID_PGE, R(cpu_feature)
794 jz 2f
795 movl $PG_G, R(pgeflag)
796 movl %cr4, %eax
797 orl $CR4_PGE, %eax
798 movl %eax, %cr4
799 2:
800 #endif
801
802 /*
803 * Initialize page table pages mapping physical address zero through the
804 * end of the kernel. All of the page table entries allow read and write
805 * access. Write access to the first physical page is required by bios32
806 * calls, and write access to the first 1 MB of physical memory is required
807 * by ACPI for implementing suspend and resume. We do this even
808 * if we've enabled PSE above, we'll just switch the corresponding kernel
809 * PDEs before we turn on paging.
810 *
811 * XXX: We waste some pages here in the PSE case! DON'T BLINDLY REMOVE
812 * THIS! SMP needs the page table to be there to map the kernel P==V.
813 */
814 xorl %eax, %eax
815 movl R(KERNend),%ecx
816 shrl $PAGE_SHIFT,%ecx
817 fillkptphys($PG_RW)
818
819 /* Map page directory. */
820 #ifdef PAE
821 movl R(IdlePDPT), %eax
822 movl $1, %ecx
823 fillkptphys($PG_RW)
824 #endif
825
826 movl R(IdlePTD), %eax
827 movl $NPGPTD, %ecx
828 fillkptphys($PG_RW)
829
830 /* Map proc0's KSTACK in the physical way ... */
831 movl R(p0kpa), %eax
832 movl $(KSTACK_PAGES), %ecx
833 fillkptphys($PG_RW)
834
835 /* Map ISA hole */
836 movl $ISA_HOLE_START, %eax
837 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
838 fillkptphys($PG_RW)
839
840 /* Map space for the vm86 region */
841 movl R(vm86phystk), %eax
842 movl $4, %ecx
843 fillkptphys($PG_RW)
844
845 /* Map page 0 into the vm86 page table */
846 movl $0, %eax
847 movl $0, %ebx
848 movl $1, %ecx
849 fillkpt(R(vm86pa), $PG_RW|PG_U)
850
851 /* ...likewise for the ISA hole */
852 movl $ISA_HOLE_START, %eax
853 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
854 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
855 fillkpt(R(vm86pa), $PG_RW|PG_U)
856
857 #ifdef SMP
858 /* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
859 movl R(cpu0pp), %eax
860 movl $1, %ecx
861 fillkptphys($PG_RW)
862
863 /* Map SMP page table page into global kmem FWIW */
864 movl R(SMPptpa), %eax
865 movl $1, %ecx
866 fillkptphys($PG_RW)
867
868 /* Map the private page into the SMP page table */
869 movl R(cpu0pp), %eax
870 movl $0, %ebx /* pte offset = 0 */
871 movl $1, %ecx /* one private page coming right up */
872 fillkpt(R(SMPptpa), $PG_RW)
873
874 /* ... and put the page table table in the pde. */
875 movl R(SMPptpa), %eax
876 movl $MPPTDI, %ebx
877 movl $1, %ecx
878 fillkpt(R(IdlePTD), $PG_RW)
879
880 /* Fakeup VA for the local apic to allow early traps. */
881 ALLOCPAGES(1)
882 movl %esi, %eax
883 movl $(NPTEPG-1), %ebx /* pte offset = NTEPG-1 */
884 movl $1, %ecx /* one private pt coming right up */
885 fillkpt(R(SMPptpa), $PG_RW)
886 #endif /* SMP */
887
888 /*
889 * Create an identity mapping for low physical memory, including the kernel.
890 * The part of this mapping that covers the first 1 MB of physical memory
891 * becomes a permanent part of the kernel's address space. The rest of this
892 * mapping is destroyed in pmap_bootstrap(). Ordinarily, the same page table
893 * pages are shared by the identity mapping and the kernel's native mapping.
894 * However, the permanent identity mapping cannot contain PG_G mappings.
895 * Thus, if the kernel is loaded within the permanent identity mapping, that
896 * page table page must be duplicated and not shared.
897 *
898 * N.B. Due to errata concerning large pages and physical address zero,
899 * a PG_PS mapping is not used.
900 */
901 movl R(KPTphys), %eax
902 xorl %ebx, %ebx
903 movl $NKPT, %ecx
904 fillkpt(R(IdlePTD), $PG_RW)
905 #if KERNLOAD < (1 << PDRSHIFT)
906 testl $PG_G, R(pgeflag)
907 jz 1f
908 ALLOCPAGES(1)
909 movl %esi, %edi
910 movl R(IdlePTD), %eax
911 movl (%eax), %esi
912 movl %edi, (%eax)
913 movl $PAGE_SIZE, %ecx
914 cld
915 rep
916 movsb
917 1:
918 #endif
919
920 /*
921 * For the non-PSE case, install PDEs for PTs covering the KVA.
922 * For the PSE case, do the same, but clobber the ones corresponding
923 * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS')
924 * PDEs immediately after.
925 */
926 movl R(KPTphys), %eax
927 movl $KPTDI, %ebx
928 movl $NKPT, %ecx
929 fillkpt(R(IdlePTD), $PG_RW)
930 cmpl $0,R(pseflag)
931 je done_pde
932
933 movl R(KERNend), %ecx
934 movl $KERNLOAD, %eax
935 subl %eax, %ecx
936 shrl $PDRSHIFT, %ecx
937 movl $(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx
938 shll $PDESHIFT, %ebx
939 addl R(IdlePTD), %ebx
940 orl $(PG_V|PG_RW|PG_PS), %eax
941 1: movl %eax, (%ebx)
942 addl $(1 << PDRSHIFT), %eax
943 addl $PDESIZE, %ebx
944 loop 1b
945
946 done_pde:
947 /* install a pde recursively mapping page directory as a page table */
948 movl R(IdlePTD), %eax
949 movl $PTDPTDI, %ebx
950 movl $NPGPTD,%ecx
951 fillkpt(R(IdlePTD), $PG_RW)
952
953 #ifdef PAE
954 movl R(IdlePTD), %eax
955 xorl %ebx, %ebx
956 movl $NPGPTD, %ecx
957 fillkpt(R(IdlePDPT), $0x0)
958 #endif
959
960 ret
Cache object: 7ca536aecbcc596704a9ea27fc73ff18
|