FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/locore.s
1 /*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
37 * $FreeBSD: releng/5.2/sys/i386/i386/locore.s 121986 2003-11-03 21:53:38Z jhb $
38 *
39 * originally from: locore.s, by William F. Jolitz
40 *
41 * Substantially rewritten by David Greenman, Rod Grimes,
42 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
43 * and many others.
44 */
45
46 #include "opt_bootp.h"
47 #include "opt_compat.h"
48 #include "opt_nfsroot.h"
49 #include "opt_pmap.h"
50
51 #include <sys/syscall.h>
52 #include <sys/reboot.h>
53
54 #include <machine/asmacros.h>
55 #include <machine/cputypes.h>
56 #include <machine/psl.h>
57 #include <machine/pmap.h>
58 #include <machine/specialreg.h>
59
60 #include "assym.s"
61
62 /*
63 * XXX
64 *
65 * Note: This version greatly munged to avoid various assembler errors
66 * that may be fixed in newer versions of gas. Perhaps newer versions
67 * will have more pleasant appearance.
68 */
69
70 /*
71 * PTmap is recursive pagemap at top of virtual address space.
72 * Within PTmap, the page directory can be found (third indirection).
73 */
74 .globl PTmap,PTD,PTDpde
75 .set PTmap,(PTDPTDI << PDRSHIFT)
76 .set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
77 .set PTDpde,PTD + (PTDPTDI * PDESIZE)
78
79 #ifdef SMP
80 /*
81 * Define layout of per-cpu address space.
82 * This is "constructed" in locore.s on the BSP and in mp_machdep.c
83 * for each AP. DO NOT REORDER THESE WITHOUT UPDATING THE REST!
84 */
85 .globl SMP_prvspace
86 .set SMP_prvspace,(MPPTDI << PDRSHIFT)
87 #endif /* SMP */
88
89 /*
90 * Compiled KERNBASE location and the kernel load address
91 */
92 .globl kernbase
93 .set kernbase,KERNBASE
94 .globl kernload
95 .set kernload,KERNLOAD
96
97 /*
98 * Globals
99 */
100 .data
101 ALIGN_DATA /* just to be sure */
102
103 .space 0x2000 /* space for tmpstk - temporary stack */
104 tmpstk:
105
106 .globl bootinfo
107 bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
108
109 .globl KERNend
110 KERNend: .long 0 /* phys addr end of kernel (just after bss) */
111 physfree: .long 0 /* phys addr of next free page */
112
113 #ifdef SMP
114 .globl cpu0prvpage
115 cpu0pp: .long 0 /* phys addr cpu0 private pg */
116 cpu0prvpage: .long 0 /* relocated version */
117
118 .globl SMPpt
119 SMPptpa: .long 0 /* phys addr SMP page table */
120 SMPpt: .long 0 /* relocated version */
121 #endif /* SMP */
122
123 .globl IdlePTD
124 IdlePTD: .long 0 /* phys addr of kernel PTD */
125
126 #ifdef PAE
127 .globl IdlePDPT
128 IdlePDPT: .long 0 /* phys addr of kernel PDPT */
129 #endif
130
131 #ifdef SMP
132 .globl KPTphys
133 #endif
134 KPTphys: .long 0 /* phys addr of kernel page tables */
135
136 .globl proc0uarea, proc0kstack
137 proc0uarea: .long 0 /* address of proc 0 uarea space */
138 proc0kstack: .long 0 /* address of proc 0 kstack space */
139 p0upa: .long 0 /* phys addr of proc0's UAREA */
140 p0kpa: .long 0 /* phys addr of proc0's STACK */
141
142 vm86phystk: .long 0 /* PA of vm86/bios stack */
143
144 .globl vm86paddr, vm86pa
145 vm86paddr: .long 0 /* address of vm86 region */
146 vm86pa: .long 0 /* phys addr of vm86 region */
147
148 #ifdef PC98
149 .globl pc98_system_parameter
150 pc98_system_parameter:
151 .space 0x240
152 #endif
153
154 /**********************************************************************
155 *
156 * Some handy macros
157 *
158 */
159
160 #define R(foo) ((foo)-KERNBASE)
161
162 #define ALLOCPAGES(foo) \
163 movl R(physfree), %esi ; \
164 movl $((foo)*PAGE_SIZE), %eax ; \
165 addl %esi, %eax ; \
166 movl %eax, R(physfree) ; \
167 movl %esi, %edi ; \
168 movl $((foo)*PAGE_SIZE),%ecx ; \
169 xorl %eax,%eax ; \
170 cld ; \
171 rep ; \
172 stosb
173
174 /*
175 * fillkpt
176 * eax = page frame address
177 * ebx = index into page table
178 * ecx = how many pages to map
179 * base = base address of page dir/table
180 * prot = protection bits
181 */
182 #define fillkpt(base, prot) \
183 shll $PTESHIFT,%ebx ; \
184 addl base,%ebx ; \
185 orl $PG_V,%eax ; \
186 orl prot,%eax ; \
187 1: movl %eax,(%ebx) ; \
188 addl $PAGE_SIZE,%eax ; /* increment physical address */ \
189 addl $PTESIZE,%ebx ; /* next pte */ \
190 loop 1b
191
192 /*
193 * fillkptphys(prot)
194 * eax = physical address
195 * ecx = how many pages to map
196 * prot = protection bits
197 */
198 #define fillkptphys(prot) \
199 movl %eax, %ebx ; \
200 shrl $PAGE_SHIFT, %ebx ; \
201 fillkpt(R(KPTphys), prot)
202
203 .text
204 /**********************************************************************
205 *
206 * This is where the bootblocks start us, set the ball rolling...
207 *
208 */
209 NON_GPROF_ENTRY(btext)
210
211 #ifdef PC98
212 /* save SYSTEM PARAMETER for resume (NS/T or other) */
213 movl $0xa1400,%esi
214 movl $R(pc98_system_parameter),%edi
215 movl $0x0240,%ecx
216 cld
217 rep
218 movsb
219 #else /* IBM-PC */
220 /* Tell the bios to warmboot next time */
221 movw $0x1234,0x472
222 #endif /* PC98 */
223
224 /* Set up a real frame in case the double return in newboot is executed. */
225 pushl %ebp
226 movl %esp, %ebp
227
228 /* Don't trust what the BIOS gives for eflags. */
229 pushl $PSL_KERNEL
230 popfl
231
232 /*
233 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap
234 * to set %cs, %ds, %es and %ss.
235 */
236 mov %ds, %ax
237 mov %ax, %fs
238 mov %ax, %gs
239
240 /*
241 * Clear the bss. Not all boot programs do it, and it is our job anyway.
242 *
243 * XXX we don't check that there is memory for our bss and page tables
244 * before using it.
245 *
246 * Note: we must be careful to not overwrite an active gdt or idt. They
247 * inactive from now until we switch to new ones, since we don't load any
248 * more segment registers or permit interrupts until after the switch.
249 */
250 movl $R(end),%ecx
251 movl $R(edata),%edi
252 subl %edi,%ecx
253 xorl %eax,%eax
254 cld
255 rep
256 stosb
257
258 call recover_bootinfo
259
260 /* Get onto a stack that we can trust. */
261 /*
262 * XXX this step is delayed in case recover_bootinfo needs to return via
263 * the old stack, but it need not be, since recover_bootinfo actually
264 * returns via the old frame.
265 */
266 movl $R(tmpstk),%esp
267
268 #ifdef PC98
269 /* pc98_machine_type & M_EPSON_PC98 */
270 testb $0x02,R(pc98_system_parameter)+220
271 jz 3f
272 /* epson_machine_id <= 0x0b */
273 cmpb $0x0b,R(pc98_system_parameter)+224
274 ja 3f
275
276 /* count up memory */
277 movl $0x100000,%eax /* next, talley remaining memory */
278 movl $0xFFF-0x100,%ecx
279 1: movl 0(%eax),%ebx /* save location to check */
280 movl $0xa55a5aa5,0(%eax) /* write test pattern */
281 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */
282 jne 2f
283 movl %ebx,0(%eax) /* restore memory */
284 addl $PAGE_SIZE,%eax
285 loop 1b
286 2: subl $0x100000,%eax
287 shrl $17,%eax
288 movb %al,R(pc98_system_parameter)+1
289 3:
290
291 movw R(pc98_system_parameter+0x86),%ax
292 movw %ax,R(cpu_id)
293 #endif
294
295 call identify_cpu
296 call create_pagetables
297
298 /*
299 * If the CPU has support for VME, turn it on.
300 */
301 testl $CPUID_VME, R(cpu_feature)
302 jz 1f
303 movl %cr4, %eax
304 orl $CR4_VME, %eax
305 movl %eax, %cr4
306 1:
307
308 /* Now enable paging */
309 #ifdef PAE
310 movl R(IdlePDPT), %eax
311 movl %eax, %cr3
312 movl %cr4, %eax
313 orl $CR4_PAE, %eax
314 movl %eax, %cr4
315 #else
316 movl R(IdlePTD), %eax
317 movl %eax,%cr3 /* load ptd addr into mmu */
318 #endif
319 movl %cr0,%eax /* get control word */
320 orl $CR0_PE|CR0_PG,%eax /* enable paging */
321 movl %eax,%cr0 /* and let's page NOW! */
322
323 pushl $begin /* jump to high virtualized address */
324 ret
325
326 /* now running relocated at KERNBASE where the system is linked to run */
327 begin:
328 /* set up bootstrap stack */
329 movl proc0kstack,%eax /* location of in-kernel stack */
330 /* bootstrap stack end location */
331 leal (KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
332
333 xorl %ebp,%ebp /* mark end of frames */
334
335 #ifdef PAE
336 movl IdlePDPT,%esi
337 #else
338 movl IdlePTD,%esi
339 #endif
340 movl %esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
341
342 pushl physfree /* value of first for init386(first) */
343 call init386 /* wire 386 chip for unix operation */
344
345 /*
346 * Clean up the stack in a way that db_numargs() understands, so
347 * that backtraces in ddb don't underrun the stack. Traps for
348 * inaccessible memory are more fatal than usual this early.
349 */
350 addl $4,%esp
351
352 call mi_startup /* autoconfiguration, mountroot etc */
353 /* NOTREACHED */
354 addl $0,%esp /* for db_numargs() again */
355
356 /*
357 * Signal trampoline, copied to top of user stack
358 */
359 NON_GPROF_ENTRY(sigcode)
360 calll *SIGF_HANDLER(%esp)
361 leal SIGF_UC(%esp),%eax /* get ucontext */
362 pushl %eax
363 testl $PSL_VM,UC_EFLAGS(%eax)
364 jne 1f
365 movl UC_GS(%eax),%gs /* restore %gs */
366 1:
367 movl $SYS_sigreturn,%eax
368 pushl %eax /* junk to fake return addr. */
369 int $0x80 /* enter kernel with args */
370 /* on stack */
371 1:
372 jmp 1b
373
374 #ifdef COMPAT_FREEBSD4
375 ALIGN_TEXT
376 freebsd4_sigcode:
377 calll *SIGF_HANDLER(%esp)
378 leal SIGF_UC4(%esp),%eax /* get ucontext */
379 pushl %eax
380 testl $PSL_VM,UC4_EFLAGS(%eax)
381 jne 1f
382 movl UC4_GS(%eax),%gs /* restore %gs */
383 1:
384 movl $344,%eax /* 4.x SYS_sigreturn */
385 pushl %eax /* junk to fake return addr. */
386 int $0x80 /* enter kernel with args */
387 /* on stack */
388 1:
389 jmp 1b
390 #endif
391
392 #ifdef COMPAT_43
393 ALIGN_TEXT
394 osigcode:
395 call *SIGF_HANDLER(%esp) /* call signal handler */
396 lea SIGF_SC(%esp),%eax /* get sigcontext */
397 pushl %eax
398 testl $PSL_VM,SC_PS(%eax)
399 jne 9f
400 movl SC_GS(%eax),%gs /* restore %gs */
401 9:
402 movl $103,%eax /* 3.x SYS_sigreturn */
403 pushl %eax /* junk to fake return addr. */
404 int $0x80 /* enter kernel with args */
405 0: jmp 0b
406 #endif /* COMPAT_43 */
407
408 ALIGN_TEXT
409 esigcode:
410
411 .data
412 .globl szsigcode
413 szsigcode:
414 .long esigcode-sigcode
415 #ifdef COMPAT_FREEBSD4
416 .globl szfreebsd4_sigcode
417 szfreebsd4_sigcode:
418 .long esigcode-freebsd4_sigcode
419 #endif
420 #ifdef COMPAT_43
421 .globl szosigcode
422 szosigcode:
423 .long esigcode-osigcode
424 #endif
425 .text
426
427 /**********************************************************************
428 *
429 * Recover the bootinfo passed to us from the boot program
430 *
431 */
432 recover_bootinfo:
433 /*
434 * This code is called in different ways depending on what loaded
435 * and started the kernel. This is used to detect how we get the
436 * arguments from the other code and what we do with them.
437 *
438 * Old disk boot blocks:
439 * (*btext)(howto, bootdev, cyloffset, esym);
440 * [return address == 0, and can NOT be returned to]
441 * [cyloffset was not supported by the FreeBSD boot code
442 * and always passed in as 0]
443 * [esym is also known as total in the boot code, and
444 * was never properly supported by the FreeBSD boot code]
445 *
446 * Old diskless netboot code:
447 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
448 * [return address != 0, and can NOT be returned to]
449 * If we are being booted by this code it will NOT work,
450 * so we are just going to halt if we find this case.
451 *
452 * New uniform boot code:
453 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
454 * [return address != 0, and can be returned to]
455 *
456 * There may seem to be a lot of wasted arguments in here, but
457 * that is so the newer boot code can still load very old kernels
458 * and old boot code can load new kernels.
459 */
460
461 /*
462 * The old style disk boot blocks fake a frame on the stack and
463 * did an lret to get here. The frame on the stack has a return
464 * address of 0.
465 */
466 cmpl $0,4(%ebp)
467 je olddiskboot
468
469 /*
470 * We have some form of return address, so this is either the
471 * old diskless netboot code, or the new uniform code. That can
472 * be detected by looking at the 5th argument, if it is 0
473 * we are being booted by the new uniform boot code.
474 */
475 cmpl $0,24(%ebp)
476 je newboot
477
478 /*
479 * Seems we have been loaded by the old diskless boot code, we
480 * don't stand a chance of running as the diskless structure
481 * changed considerably between the two, so just halt.
482 */
483 hlt
484
485 /*
486 * We have been loaded by the new uniform boot code.
487 * Let's check the bootinfo version, and if we do not understand
488 * it we return to the loader with a status of 1 to indicate this error
489 */
490 newboot:
491 movl 28(%ebp),%ebx /* &bootinfo.version */
492 movl BI_VERSION(%ebx),%eax
493 cmpl $1,%eax /* We only understand version 1 */
494 je 1f
495 movl $1,%eax /* Return status */
496 leave
497 /*
498 * XXX this returns to our caller's caller (as is required) since
499 * we didn't set up a frame and our caller did.
500 */
501 ret
502
503 1:
504 /*
505 * If we have a kernelname copy it in
506 */
507 movl BI_KERNELNAME(%ebx),%esi
508 cmpl $0,%esi
509 je 2f /* No kernelname */
510 movl $MAXPATHLEN,%ecx /* Brute force!!! */
511 movl $R(kernelname),%edi
512 cmpb $'/',(%esi) /* Make sure it starts with a slash */
513 je 1f
514 movb $'/',(%edi)
515 incl %edi
516 decl %ecx
517 1:
518 cld
519 rep
520 movsb
521
522 2:
523 /*
524 * Determine the size of the boot loader's copy of the bootinfo
525 * struct. This is impossible to do properly because old versions
526 * of the struct don't contain a size field and there are 2 old
527 * versions with the same version number.
528 */
529 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */
530 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */
531 je got_bi_size /* no, sizeless version */
532 movl BI_SIZE(%ebx),%ecx
533 got_bi_size:
534
535 /*
536 * Copy the common part of the bootinfo struct
537 */
538 movl %ebx,%esi
539 movl $R(bootinfo),%edi
540 cmpl $BOOTINFO_SIZE,%ecx
541 jbe got_common_bi_size
542 movl $BOOTINFO_SIZE,%ecx
543 got_common_bi_size:
544 cld
545 rep
546 movsb
547
548 #ifdef NFS_ROOT
549 #ifndef BOOTP_NFSV3
550 /*
551 * If we have a nfs_diskless structure copy it in
552 */
553 movl BI_NFS_DISKLESS(%ebx),%esi
554 cmpl $0,%esi
555 je olddiskboot
556 movl $R(nfs_diskless),%edi
557 movl $NFSDISKLESS_SIZE,%ecx
558 cld
559 rep
560 movsb
561 movl $R(nfs_diskless_valid),%edi
562 movl $1,(%edi)
563 #endif
564 #endif
565
566 /*
567 * The old style disk boot.
568 * (*btext)(howto, bootdev, cyloffset, esym);
569 * Note that the newer boot code just falls into here to pick
570 * up howto and bootdev, cyloffset and esym are no longer used
571 */
572 olddiskboot:
573 movl 8(%ebp),%eax
574 movl %eax,R(boothowto)
575 movl 12(%ebp),%eax
576 movl %eax,R(bootdev)
577
578 ret
579
580
581 /**********************************************************************
582 *
583 * Identify the CPU and initialize anything special about it
584 *
585 */
586 identify_cpu:
587
588 /* Try to toggle alignment check flag; does not exist on 386. */
589 pushfl
590 popl %eax
591 movl %eax,%ecx
592 orl $PSL_AC,%eax
593 pushl %eax
594 popfl
595 pushfl
596 popl %eax
597 xorl %ecx,%eax
598 andl $PSL_AC,%eax
599 pushl %ecx
600 popfl
601
602 testl %eax,%eax
603 jnz try486
604
605 /* NexGen CPU does not have aligment check flag. */
606 pushfl
607 movl $0x5555, %eax
608 xorl %edx, %edx
609 movl $2, %ecx
610 clc
611 divl %ecx
612 jz trynexgen
613 popfl
614 movl $CPU_386,R(cpu)
615 jmp 3f
616
617 trynexgen:
618 popfl
619 movl $CPU_NX586,R(cpu)
620 movl $0x4778654e,R(cpu_vendor) # store vendor string
621 movl $0x72446e65,R(cpu_vendor+4)
622 movl $0x6e657669,R(cpu_vendor+8)
623 movl $0,R(cpu_vendor+12)
624 jmp 3f
625
626 try486: /* Try to toggle identification flag; does not exist on early 486s. */
627 pushfl
628 popl %eax
629 movl %eax,%ecx
630 xorl $PSL_ID,%eax
631 pushl %eax
632 popfl
633 pushfl
634 popl %eax
635 xorl %ecx,%eax
636 andl $PSL_ID,%eax
637 pushl %ecx
638 popfl
639
640 testl %eax,%eax
641 jnz trycpuid
642 movl $CPU_486,R(cpu)
643
644 /*
645 * Check Cyrix CPU
646 * Cyrix CPUs do not change the undefined flags following
647 * execution of the divide instruction which divides 5 by 2.
648 *
649 * Note: CPUID is enabled on M2, so it passes another way.
650 */
651 pushfl
652 movl $0x5555, %eax
653 xorl %edx, %edx
654 movl $2, %ecx
655 clc
656 divl %ecx
657 jnc trycyrix
658 popfl
659 jmp 3f /* You may use Intel CPU. */
660
661 trycyrix:
662 popfl
663 /*
664 * IBM Bluelighting CPU also doesn't change the undefined flags.
665 * Because IBM doesn't disclose the information for Bluelighting
666 * CPU, we couldn't distinguish it from Cyrix's (including IBM
667 * brand of Cyrix CPUs).
668 */
669 movl $0x69727943,R(cpu_vendor) # store vendor string
670 movl $0x736e4978,R(cpu_vendor+4)
671 movl $0x64616574,R(cpu_vendor+8)
672 jmp 3f
673
674 trycpuid: /* Use the `cpuid' instruction. */
675 xorl %eax,%eax
676 cpuid # cpuid 0
677 movl %eax,R(cpu_high) # highest capability
678 movl %ebx,R(cpu_vendor) # store vendor string
679 movl %edx,R(cpu_vendor+4)
680 movl %ecx,R(cpu_vendor+8)
681 movb $0,R(cpu_vendor+12)
682
683 movl $1,%eax
684 cpuid # cpuid 1
685 movl %eax,R(cpu_id) # store cpu_id
686 movl %ebx,R(cpu_procinfo) # store cpu_procinfo
687 movl %edx,R(cpu_feature) # store cpu_feature
688 rorl $8,%eax # extract family type
689 andl $15,%eax
690 cmpl $5,%eax
691 jae 1f
692
693 /* less than Pentium; must be 486 */
694 movl $CPU_486,R(cpu)
695 jmp 3f
696 1:
697 /* a Pentium? */
698 cmpl $5,%eax
699 jne 2f
700 movl $CPU_586,R(cpu)
701 jmp 3f
702 2:
703 /* Greater than Pentium...call it a Pentium Pro */
704 movl $CPU_686,R(cpu)
705 3:
706 ret
707
708
709 /**********************************************************************
710 *
711 * Create the first page directory and its page tables.
712 *
713 */
714
715 create_pagetables:
716
717 /* Find end of kernel image (rounded up to a page boundary). */
718 movl $R(_end),%esi
719
720 /* Include symbols, if any. */
721 movl R(bootinfo+BI_ESYMTAB),%edi
722 testl %edi,%edi
723 je over_symalloc
724 movl %edi,%esi
725 movl $KERNBASE,%edi
726 addl %edi,R(bootinfo+BI_SYMTAB)
727 addl %edi,R(bootinfo+BI_ESYMTAB)
728 over_symalloc:
729
730 /* If we are told where the end of the kernel space is, believe it. */
731 movl R(bootinfo+BI_KERNEND),%edi
732 testl %edi,%edi
733 je no_kernend
734 movl %edi,%esi
735 no_kernend:
736
737 addl $PDRMASK,%esi /* Play conservative for now, and */
738 andl $~PDRMASK,%esi /* ... wrap to next 4M. */
739 movl %esi,R(KERNend) /* save end of kernel */
740 movl %esi,R(physfree) /* next free page is at end of kernel */
741
742 /* Allocate Kernel Page Tables */
743 ALLOCPAGES(NKPT)
744 movl %esi,R(KPTphys)
745
746 /* Allocate Page Table Directory */
747 #ifdef PAE
748 /* XXX only need 32 bytes (easier for now) */
749 ALLOCPAGES(1)
750 movl %esi,R(IdlePDPT)
751 #endif
752 ALLOCPAGES(NPGPTD)
753 movl %esi,R(IdlePTD)
754
755 /* Allocate UPAGES */
756 ALLOCPAGES(UAREA_PAGES)
757 movl %esi,R(p0upa)
758 addl $KERNBASE, %esi
759 movl %esi, R(proc0uarea)
760
761 ALLOCPAGES(KSTACK_PAGES)
762 movl %esi,R(p0kpa)
763 addl $KERNBASE, %esi
764 movl %esi, R(proc0kstack)
765
766 ALLOCPAGES(1) /* vm86/bios stack */
767 movl %esi,R(vm86phystk)
768
769 ALLOCPAGES(3) /* pgtable + ext + IOPAGES */
770 movl %esi,R(vm86pa)
771 addl $KERNBASE, %esi
772 movl %esi, R(vm86paddr)
773
774 #ifdef SMP
775 /* Allocate cpu0's private data page */
776 ALLOCPAGES(1)
777 movl %esi,R(cpu0pp)
778 addl $KERNBASE, %esi
779 movl %esi, R(cpu0prvpage) /* relocated to KVM space */
780
781 /* Allocate SMP page table page */
782 ALLOCPAGES(1)
783 movl %esi,R(SMPptpa)
784 addl $KERNBASE, %esi
785 movl %esi, R(SMPpt) /* relocated to KVM space */
786 #endif /* SMP */
787
788 /* Map page zero read-write so bios32 calls can use it */
789 xorl %eax, %eax
790 movl $PG_RW,%edx
791 movl $1,%ecx
792 fillkptphys(%edx)
793
794 /* Map read-only from page 1 to the beginning of the kernel text section */
795 movl $PAGE_SIZE, %eax
796 xorl %edx,%edx
797 movl $R(btext),%ecx
798 addl $PAGE_MASK,%ecx
799 subl %eax,%ecx
800 shrl $PAGE_SHIFT,%ecx
801 fillkptphys(%edx)
802
803 /*
804 * Enable PSE and PGE.
805 */
806 #ifndef DISABLE_PSE
807 testl $CPUID_PSE, R(cpu_feature)
808 jz 1f
809 movl $PG_PS, R(pseflag)
810 movl %cr4, %eax
811 orl $CR4_PSE, %eax
812 movl %eax, %cr4
813 1:
814 #endif
815 #ifndef DISABLE_PG_G
816 testl $CPUID_PGE, R(cpu_feature)
817 jz 2f
818 movl $PG_G, R(pgeflag)
819 movl %cr4, %eax
820 orl $CR4_PGE, %eax
821 movl %eax, %cr4
822 2:
823 #endif
824
825 /*
826 * Write page tables for the kernel starting at btext and
827 * until the end. Make sure to map read+write. We do this even
828 * if we've enabled PSE above, we'll just switch the corresponding kernel
829 * PDEs before we turn on paging.
830 *
831 * XXX: We waste some pages here in the PSE case! DON'T BLINDLY REMOVE
832 * THIS! SMP needs the page table to be there to map the kernel P==V.
833 */
834 movl $R(btext),%eax
835 addl $PAGE_MASK, %eax
836 andl $~PAGE_MASK, %eax
837 movl $PG_RW,%edx
838 movl R(KERNend),%ecx
839 subl %eax,%ecx
840 shrl $PAGE_SHIFT,%ecx
841 fillkptphys(%edx)
842
843 /* Map page directory. */
844 #ifdef PAE
845 movl R(IdlePDPT), %eax
846 movl $1, %ecx
847 fillkptphys($PG_RW)
848 #endif
849
850 movl R(IdlePTD), %eax
851 movl $NPGPTD, %ecx
852 fillkptphys($PG_RW)
853
854 /* Map proc0's UPAGES in the physical way ... */
855 movl R(p0upa), %eax
856 movl $(UAREA_PAGES), %ecx
857 fillkptphys($PG_RW)
858
859 /* Map proc0's KSTACK in the physical way ... */
860 movl R(p0kpa), %eax
861 movl $(KSTACK_PAGES), %ecx
862 fillkptphys($PG_RW)
863
864 /* Map ISA hole */
865 movl $ISA_HOLE_START, %eax
866 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
867 fillkptphys($PG_RW)
868
869 /* Map space for the vm86 region */
870 movl R(vm86phystk), %eax
871 movl $4, %ecx
872 fillkptphys($PG_RW)
873
874 /* Map page 0 into the vm86 page table */
875 movl $0, %eax
876 movl $0, %ebx
877 movl $1, %ecx
878 fillkpt(R(vm86pa), $PG_RW|PG_U)
879
880 /* ...likewise for the ISA hole */
881 movl $ISA_HOLE_START, %eax
882 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
883 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
884 fillkpt(R(vm86pa), $PG_RW|PG_U)
885
886 #ifdef SMP
887 /* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
888 movl R(cpu0pp), %eax
889 movl $1, %ecx
890 fillkptphys($PG_RW)
891
892 /* Map SMP page table page into global kmem FWIW */
893 movl R(SMPptpa), %eax
894 movl $1, %ecx
895 fillkptphys($PG_RW)
896
897 /* Map the private page into the SMP page table */
898 movl R(cpu0pp), %eax
899 movl $0, %ebx /* pte offset = 0 */
900 movl $1, %ecx /* one private page coming right up */
901 fillkpt(R(SMPptpa), $PG_RW)
902
903 /* ... and put the page table table in the pde. */
904 movl R(SMPptpa), %eax
905 movl $MPPTDI, %ebx
906 movl $1, %ecx
907 fillkpt(R(IdlePTD), $PG_RW)
908
909 /* Fakeup VA for the local apic to allow early traps. */
910 ALLOCPAGES(1)
911 movl %esi, %eax
912 movl $(NPTEPG-1), %ebx /* pte offset = NTEPG-1 */
913 movl $1, %ecx /* one private pt coming right up */
914 fillkpt(R(SMPptpa), $PG_RW)
915 #endif /* SMP */
916
917 /* install a pde for temporary double map of bottom of VA */
918 movl R(KPTphys), %eax
919 xorl %ebx, %ebx
920 movl $NKPT, %ecx
921 fillkpt(R(IdlePTD), $PG_RW)
922
923 /*
924 * For the non-PSE case, install PDEs for PTs covering the kernel.
925 * For the PSE case, do the same, but clobber the ones corresponding
926 * to the kernel (from btext to KERNend) with 4M ('PS') PDEs immediately
927 * after.
928 */
929 movl R(KPTphys), %eax
930 movl $KPTDI, %ebx
931 movl $NKPT, %ecx
932 fillkpt(R(IdlePTD), $PG_RW)
933 cmpl $0,R(pseflag)
934 je done_pde
935
936 movl R(KERNend), %ecx
937 movl $KERNLOAD, %eax
938 subl %eax, %ecx
939 shrl $PDRSHIFT, %ecx
940 movl $(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx
941 shll $PDESHIFT, %ebx
942 addl R(IdlePTD), %ebx
943 orl $(PG_V|PG_RW|PG_PS), %eax
944 1: movl %eax, (%ebx)
945 addl $(1 << PDRSHIFT), %eax
946 addl $PDESIZE, %ebx
947 loop 1b
948
949 done_pde:
950 /* install a pde recursively mapping page directory as a page table */
951 movl R(IdlePTD), %eax
952 movl $PTDPTDI, %ebx
953 movl $NPGPTD,%ecx
954 fillkpt(R(IdlePTD), $PG_RW)
955
956 #ifdef PAE
957 movl R(IdlePTD), %eax
958 xorl %ebx, %ebx
959 movl $NPGPTD, %ecx
960 fillkpt(R(IdlePDPT), $0x0)
961 #endif
962
963 ret
Cache object: 2af494a36cecdb92bcdd9bff189414ea
|