FreeBSD/Linux Kernel Cross Reference
sys/i386/start.s
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: start.s,v $
29 * Revision 2.18 93/05/10 17:46:38 rvb
30 * Use C comments
31 * [93/05/04 17:17:00 rvb]
32 *
33 * Revision 2.17 93/02/04 07:58:02 danner
34 * Replace lost ps2 asm_startup include.
35 *
36 * Revision 2.16 93/01/14 17:29:39 danner
37 * Boot_info changed.
38 * [92/12/10 17:43:44 af]
39 *
40 * Revision 2.15 92/04/04 11:51:04 rpd
41 * Changed #-style comments to /-style, for ANSI preprocessors.
42 *
43 * Revision 2.14 92/01/03 20:08:53 dbg
44 * Move symbol table and bootstrap image out of BSS.
45 * [91/08/02 dbg]
46 *
47 * Revision 2.13 91/07/31 17:40:55 dbg
48 * Add pointers to interrupt stack (for uniprocessor).
49 * [91/07/30 16:57:13 dbg]
50 *
51 * Revision 2.12 91/06/19 11:55:39 rvb
52 * cputypes.h->platforms.h
53 * [91/06/12 13:45:27 rvb]
54 *
55 * Revision 2.11 91/05/14 16:16:59 mrt
56 * Correcting copyright
57 *
58 * Revision 2.10 91/05/08 12:42:43 dbg
59 * Put parentheses around substituted immediate expressions, so
60 * that they will pass through the GNU preprocessor.
61 *
62 * Moved model-specific code to machine-dependent directories.
63 * Added startup code for multiple CPUs.
64 * [91/04/26 14:38:55 dbg]
65 *
66 * Revision 2.9 91/02/05 17:14:50 mrt
67 * Changed to new Mach copyright
68 * [91/02/01 17:38:15 mrt]
69 *
70 * Revision 2.8 90/12/20 16:37:02 jeffreyh
71 * Changes for __STDC__
72 * [90/12/07 15:43:21 jeffreyh]
73 *
74 *
75 * Revision 2.7 90/12/04 14:46:38 jsb
76 * iPSC2 -> iPSC386; ipsc2_foo -> ipsc_foo;
77 * changes for merged intel/pmap.{c,h}.
78 * [90/12/04 11:20:35 jsb]
79 *
80 * Revision 2.6 90/11/24 15:14:56 jsb
81 * Added AT386 conditional around "BIOS/DOS hack".
82 * [90/11/24 11:44:47 jsb]
83 *
84 * Revision 2.5 90/11/05 14:27:51 rpd
85 * Since we steal pages after esym for page tables, use first_avail
86 * to record the last page +1 that we stole.
87 * Tell bios to warm boot on reboot.
88 * [90/09/05 rvb]
89 *
90 * Revision 2.4 90/09/23 17:45:20 jsb
91 * Added support for iPSC386.
92 * [90/09/21 16:42:34 jsb]
93 *
94 * Revision 2.3 90/08/27 21:58:29 dbg
95 * Change fix_desc to match new fake_descriptor format.
96 * [90/07/25 dbg]
97 *
98 * Revision 2.2 90/05/03 15:37:40 dbg
99 * Created.
100 * [90/02/14 dbg]
101 *
102 */
103
104 #include <platforms.h>
105 #include <cpus.h>
106 #include <mach_kdb.h>
107
108 #include <i386/asm.h>
109 #include <i386/proc_reg.h>
110 #include <assym.s>
111
112 #if NCPUS > 1
113
114 #ifdef SYMMETRY
115 #include <sqt/asm_macros.h>
116 #endif
117
118 #endif NCPUS > 1
119
120 /*
121 * GAS won't handle an intersegment jump with a relocatable offset.
122 */
123 #define LJMP(segment,address) \
124 .byte 0xea ;\
125 .long address ;\
126 .word segment
127
128
129 #define KVTOPHYS (-KERNELBASE)
130 #define KVTOLINEAR (0)
131
132 #define PA(addr) (addr)+KVTOPHYS
133 #define VA(addr) (addr)-KVTOPHYS
134
135 .data
136 /*
137 * interrupt and bootup stack for initial processor.
138 */
139 .align 4
140 .globl _intstack
141 _intstack:
142 .set ., .+4096
143 .globl _eintstack
144 _eintstack:
145
146 #if NCPUS == 1
147 .globl _int_stack_high /* all interrupt stacks */
148 _int_stack_high: /* must lie below this */
149 .long _eintstack /* address */
150
151 .globl _int_stack_top /* top of interrupt stack */
152 _int_stack_top:
153 .long _eintstack
154
155 #endif
156
157 /*
158 * Pointers to GDT and IDT. These contain linear addresses.
159 */
160 .align 8
161 .globl EXT(gdtptr)
162 LEXT(gdtptr)
163 .word Times(8,GDTSZ)-1
164 .long EXT(gdt)+KVTOLINEAR
165
166 .align 8
167 .globl EXT(idtptr)
168 LEXT(idtptr)
169 .word Times(8,IDTSZ)-1
170 .long EXT(idt)+KVTOLINEAR
171
172 #if NCPUS > 1
173 .data
174 .globl _start_lock
175 .align 2
176 _start_lock:
177 .long 0 /* lock for 'upyet' */
178 .globl _upyet
179 _upyet:
180 .long 0 /* 1 when OK for other processors */
181 /* to start */
182 .globl _mp_boot_pde
183 _mp_boot_pde:
184 .long 0
185 #endif NCPUS > 1
186
187 /*
188 * All CPUs start here.
189 *
190 * Environment:
191 * protected mode, no paging, flat 32-bit address space.
192 * (Code/data/stack segments have base == 0, limit == 4G)
193 */
194 .text
195 .align 2
196 .globl EXT(pstart)
197 LEXT(pstart)
198 mov $0,%ax /* fs, gs must be zeroed; */
199 mov %ax,%fs /* some bootstrappers don`t do this */
200 mov %ax,%gs /* for us */
201
202 #if NCPUS > 1
203 jmp 1f
204 0: cmpl $0,PA(_start_lock)
205 jne 0b
206 1: movl $1,%eax
207 xchgl %eax,PA(_start_lock) /* locked */
208 testl %eax,%eax
209 jnz 0b
210
211 cmpl $0,PA(_upyet) /* are we first? */
212 jne _slave_start /* no -- system already up. */
213 #endif NCPUS > 1
214
215 /*
216 * Move symbol table out of the way of BSS.
217 *
218 * When kernel is loaded, at the start of BSS we have a struct boot_info:
219 * _edata:
220 * .long magic_number
221 * .long sym_size
222 * .long boot_size
223 * .long load_info_size
224 * sym_start:
225 * kernel symbols
226 * .align 2
227 * boot_start:
228 * bootstrap image
229 * .align 2
230 * load_info_start:
231 * bootstrap load information
232 *
233 * all of which must be moved somewhere else, since it
234 * is sitting in the kernel BSS. In addition, the bootstrap
235 * image must be moved to a machine page boundary, so that we get:
236 *
237 * _edata:
238 * BSS
239 * _end: <- kern_sym_start (VA)
240 * kernel symbols .
241 * .align 2 . (kern_sym_size)
242 * .
243 * .align VAX_PAGE_SIZE
244 * <- boot_start (VA)
245 * bootstrap image
246 * <- load_info_start (VA)
247 * load information
248 * <- %ebx (PA)
249 *
250 */
251
252 lea PA(_edata),%esi /* point to symbol size word */
253 movl BI_SYM_SIZE(%esi),%edx /* get symbol size */
254
255 lea PA(_end)+NBPG-1(%edx),%edi
256 /* point after BSS, add symbol */
257 /* size, and round up to */
258 andl $-NBPG,%edi /* machine page boundary */
259
260 lea -KVTOPHYS(%edi),%eax /* save virtual address */
261 movl %eax,PA(_boot_start) /* of start of bootstrap */
262 movl BI_BOOT_SIZE(%esi),%ecx /* get size of bootstrap */
263 movl %ecx,PA(_boot_size) /* save size of bootstrap */
264 lea -KVTOPHYS(%edi,%ecx),%eax
265 movl %eax,PA(_load_info_start)
266 /* save virtual address */
267 /* of start of loader info */
268 movl BI_LOAD_INFO_SIZE(%esi),%eax /* get size of loader info */
269 movl %eax,PA(_load_info_size)
270 /* save size of loader info */
271 addl %eax,%ecx /* get total size to move */
272
273 leal BI_SIZE(%esi,%edx),%esi /* point to start of boot image - source */
274
275 leal (%edi,%ecx),%ebx /* point to new location of */
276 /* end of bootstrap - next */
277 /* available physical address */
278
279 lea -4(%esi,%ecx),%esi /* point to end of src - 4 */
280 lea -4(%edi,%ecx),%edi /* point to end of dst - 4 */
281 shrl $2,%ecx /* move by longs */
282 std /* move backwards */
283 rep
284 movsl /* move bootstrap and loader_info */
285
286 movl $_end,PA(_kern_sym_start)
287 /* save virtual address */
288 /* of start of symbols */
289 movl %edx,PA(_kern_sym_size) /* save symbol table size */
290 testl %edx,%edx /* any symbols? */
291 jz 0f /* if so: */
292
293 /* %esi already points to start of boot-4 */
294 /* == end of symbol table (source) - 4 */
295 leal PA(_end)-4(%edx),%edi /* point to end of dst - 4 */
296 movl %edx,%ecx /* copy size */
297 shrl $2,%ecx /* move by longs */
298 std /* move backwards */
299 rep
300 movsl /* move symbols */
301 0:
302 cld /* reset direction flag */
303
304 /*
305 * Get startup parameters.
306 */
307
308 #ifdef SYMMETRY
309 #include <sqt/asm_startup.h>
310 #endif
311 #ifdef AT386
312 #include <i386at/asm_startup.h>
313 #endif
314 #ifdef iPSC386
315 #include <i386ipsc/asm_startup.h>
316 #endif
317 #ifdef PS2
318 #include <i386ps2/asm_startup.h>
319 #endif
320
321 /*
322 * Build initial page table directory and page tables.
323 * %ebx holds first available physical address.
324 */
325
326 addl $(NBPG-1),%ebx /* round first avail physical addr */
327 andl $(-NBPG),%ebx /* to machine page size */
328 leal -KVTOPHYS(%ebx),%eax /* convert to virtual address */
329 movl %eax,PA(_kpde) /* save as kernel page table directory */
330 movl %ebx,%cr3 /* set physical address in CR3 now */
331
332 movl %ebx,%edi /* clear page table directory */
333 movl $(PTES_PER_PAGE),%ecx /* one page of ptes */
334 xorl %eax,%eax
335 cld
336 rep
337 stosl /* edi now points to next page */
338
339 /*
340 * Use next few pages for page tables.
341 */
342 addl $(KERNELBASEPDE),%ebx /* point to pde for kernel base */
343 movl %edi,%esi /* point to end of current pte page */
344
345 /*
346 * Enter 1-1 mappings for kernel and for kernel page tables.
347 */
348 movl $(INTEL_PTE_KERNEL),%eax /* set up pte prototype */
349 0:
350 cmpl %esi,%edi /* at end of pte page? */
351 jb 1f /* if so: */
352 movl %edi,%edx /* get pte address (physical) */
353 andl $(-NBPG),%edx /* mask out offset in page */
354 orl $(INTEL_PTE_KERNEL),%edx /* add pte bits */
355 movl %edx,(%ebx) /* set pde */
356 addl $4,%ebx /* point to next pde */
357 movl %edi,%esi /* point to */
358 addl $(NBPG),%esi /* end of new pte page */
359 1:
360 movl %eax,(%edi) /* set pte */
361 addl $4,%edi /* advance to next pte */
362 addl $(NBPG),%eax /* advance to next phys page */
363 cmpl %edi,%eax /* have we mapped this pte page yet? */
364 jb 0b /* loop if not */
365
366 /*
367 * Zero rest of last pte page.
368 */
369 xor %eax,%eax /* don`t map yet */
370 2: cmpl %esi,%edi /* at end of pte page? */
371 jae 3f
372 movl %eax,(%edi) /* zero mapping */
373 addl $4,%edi
374 jmp 2b
375 3:
376
377 #if NCPUS > 1
378 /*
379 * Grab (waste?) another page for a bootstrap page directory
380 * for the other CPUs. We don't want the running CPUs to see
381 * addresses 0..3fffff mapped 1-1.
382 */
383 movl %edi,PA(_mp_boot_pde) /* save its physical address */
384 movl $(PTES_PER_PAGE),%ecx /* and clear it */
385 rep
386 stosl
387 #endif NCPUS > 1
388 movl %edi,PA(_first_avail) /* save first available phys addr */
389
390 /*
391 * pmap_bootstrap will enter rest of mappings.
392 */
393
394 /*
395 * Fix initial descriptor tables.
396 */
397 lea PA(_idt),%esi /* fix IDT */
398 movl $(IDTSZ),%ecx
399 movl $(PA(fix_idt_ret)),%ebx
400 jmp fix_desc /* (cannot use stack) */
401 fix_idt_ret:
402
403 lea PA(_gdt),%esi /* fix GDT */
404 movl $(GDTSZ),%ecx
405 movl $(PA(fix_gdt_ret)),%ebx
406 jmp fix_desc /* (cannot use stack) */
407 fix_gdt_ret:
408
409 lea PA(_ldt),%esi /* fix LDT */
410 movl $(LDTSZ),%ecx
411 movl $(PA(fix_ldt_ret)),%ebx
412 jmp fix_desc /* (cannot use stack) */
413 fix_ldt_ret:
414
415 /*
416 * Turn on paging.
417 */
418 movl %cr3,%eax /* retrieve kernel PDE phys address */
419 movl KERNELBASEPDE(%eax),%ecx
420 /* point to pte for KERNELBASE */
421 movl %ecx,(%eax) /* set it also as pte for location */
422 /* 0..3fffff, so that the code */
423 /* that enters paged mode is mapped */
424 /* to identical addresses after */
425 /* paged mode is enabled */
426
427 movl $_pag_start,%ebx /* first paged code address */
428
429 movl %cr0,%eax
430 orl $(CR0_PG),%eax /* set PG bit in CR0 */
431 movl %eax,%cr0 /* to enable paging */
432
433 jmp *%ebx /* flush prefetch queue */
434
435 /*
436 * We are now paging, and can run with correct addresses.
437 */
438 _pag_start:
439 lgdt EXT(gdtptr) /* load GDT */
440 lidt EXT(idtptr) /* load IDT */
441 LJMP(KERNEL_CS,_vstart) /* switch to kernel code segment */
442
443 /*
444 * Master is now running with correct addresses.
445 */
446 _vstart:
447 mov $(KERNEL_DS),%ax /* set kernel data segment */
448 mov %ax,%ds
449 mov %ax,%es
450 mov %ax,%ss
451 mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */
452 /* for traps to kernel */
453
454 movw $(KERNEL_LDT),%ax /* get LDT segment */
455 lldt %ax /* load LDT */
456 movw $(KERNEL_TSS),%ax
457 ltr %ax /* set up KTSS */
458
459 lea EXT(eintstack),%esp /* switch to the bootup stack */
460 call EXT(machine_startup) /* run C code */
461 /*NOTREACHED*/
462 hlt
463
464 #if NCPUS > 1
465 /*
466 * We aren't the first. Call slave_main to initialize the processor
467 * and get Mach going on it.
468 */
469 .align 2
470 .globl EXT(slave_start)
471 LEXT(slave_start)
472 cli /* disable interrupts, so we don`t */
473 /* need IDT for a while */
474 movl PA(_kpde),%ebx /* get PDE virtual address */
475 addl $(KVTOPHYS),%ebx /* convert to physical address */
476
477 movl PA(_mp_boot_pde),%edx /* point to the bootstrap PDE */
478 movl KERNELBASEPDE(%ebx),%eax
479 /* point to pte for KERNELBASE */
480 movl %eax,KERNELBASEPDE(%edx)
481 /* set in bootstrap PDE */
482 movl %eax,(%edx) /* set it also as pte for location */
483 /* 0..3fffff, so that the code */
484 /* that enters paged mode is mapped */
485 /* to identical addresses after */
486 /* paged mode is enabled */
487 movl %edx,%cr3 /* use bootstrap PDE to enable paging */
488
489 movl $_spag_start,%edx /* first paged code address */
490
491 movl %cr0,%eax
492 orl $(CR0_PG),%eax /* set PG bit in CR0 */
493 movl %eax,%cr0 /* to enable paging */
494
495 jmp *%edx /* flush prefetch queue. */
496
497 /*
498 * We are now paging, and can run with correct addresses.
499 */
500 _spag_start:
501 movl %ebx,%cr3 /* switch to the real kernel PDE */
502
503 lgdt EXT(gdtptr) /* load GDT */
504 lidt EXT(idtptr) /* load IDT */
505 LJMP(KERNEL_CS,_svstart) /* switch to kernel code segment */
506
507 /*
508 * Slave is now running with correct addresses.
509 */
510 _svstart:
511 mov $(KERNEL_DS),%ax /* set kernel data segment */
512 mov %ax,%ds
513 mov %ax,%es
514 mov %ax,%ss
515
516 CPU_NUMBER(%eax) /* get CPU number */
517 movl EXT(interrupt_stack)(,%eax,4),%esp
518 /* get stack */
519 addl $(INTSTACK_SIZE),%esp /* point to top */
520 xorl %ebp,%ebp /* for completeness */
521
522 movl $0,%ecx /* unlock start_lock */
523 xchgl %ecx,_start_lock /* since we are no longer using */
524 /* bootstrap stack */
525
526 /*
527 * switch to the per-cpu descriptor tables
528 */
529 pushl %eax /* pass CPU number */
530 call _mp_desc_init /* set up local table */
531 /* pointer returned in %eax */
532 subl $4,%esp /* get space to build pseudo-descriptors */
533
534 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
535 lea MP_GDT+KVTOLINEAR(%eax),%edx
536 movl %edx,2(%esp) /* point to local GDT (linear address) */
537 lgdt 0(%esp) /* load new GDT */
538
539 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
540 lea MP_IDT+KVTOLINEAR(%eax),%edx
541 movl %edx,2(%esp) /* point to local IDT (linear address) */
542 lidt 0(%esp) /* load new IDT */
543
544 movw $(KERNEL_LDT),%ax
545 lldt %ax /* load new LDT */
546
547 movw $(KERNEL_TSS),%ax
548 ltr %ax /* load new KTSS */
549
550 call _slave_main /* start MACH */
551 /*NOTREACHED*/
552 hlt
553 #endif NCPUS > 1
554
555 /*
556 * Convert a descriptor from fake to real format.
557 *
558 * Calls from assembly code:
559 * %ebx = return address (physical) CANNOT USE STACK
560 * %esi = descriptor table address (physical)
561 * %ecx = number of descriptors
562 *
563 * Calls from C:
564 * 0(%esp) = return address
565 * 4(%esp) = descriptor table address (physical)
566 * 8(%esp) = number of descriptors
567 *
568 * Fake descriptor format:
569 * bytes 0..3 base 31..0
570 * bytes 4..5 limit 15..0
571 * byte 6 access byte 2 | limit 19..16
572 * byte 7 access byte 1
573 *
574 * Real descriptor format:
575 * bytes 0..1 limit 15..0
576 * bytes 2..3 base 15..0
577 * byte 4 base 23..16
578 * byte 5 access byte 1
579 * byte 6 access byte 2 | limit 19..16
580 * byte 7 base 31..24
581 *
582 * Fake gate format:
583 * bytes 0..3 offset
584 * bytes 4..5 selector
585 * byte 6 word count << 4 (to match fake descriptor)
586 * byte 7 access byte 1
587 *
588 * Real gate format:
589 * bytes 0..1 offset 15..0
590 * bytes 2..3 selector
591 * byte 4 word count
592 * byte 5 access byte 1
593 * bytes 6..7 offset 31..16
594 */
595 .globl EXT(fix_desc)
596 LEXT(fix_desc)
597 pushl %ebp /* set up */
598 movl %esp,%ebp /* stack frame */
599 pushl %esi /* save registers */
600 pushl %ebx
601 movl B_ARG0,%esi /* point to first descriptor */
602 movl B_ARG1,%ecx /* get number of descriptors */
603 lea 0f,%ebx /* get return address */
604 jmp fix_desc /* call internal routine */
605 0: popl %ebx /* restore registers */
606 popl %esi
607 leave /* pop stack frame */
608 ret /* return */
609
610 fix_desc:
611 0:
612 movw 6(%esi),%dx /* get access byte */
613 movb %dh,%al
614 andb $0x14,%al
615 cmpb $0x04,%al /* gate or descriptor? */
616 je 1f
617
618 /* descriptor */
619 movl 0(%esi),%eax /* get base in eax */
620 rol $16,%eax /* swap 15..0 with 31..16 */
621 /* (15..0 in correct place) */
622 movb %al,%dl /* combine bits 23..16 with ACC1 */
623 /* in dh/dl */
624 movb %ah,7(%esi) /* store bits 31..24 in correct place */
625 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
626 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
627 movw %dx,4(%esi) /* store bytes 4..5 */
628 jmp 2f
629
630 /* gate */
631 1:
632 movw 4(%esi),%ax /* get selector */
633 shrb $4,%dl /* shift word count to proper place */
634 movw %dx,4(%esi) /* store word count / ACC1 */
635 movw 2(%esi),%dx /* get offset 16..31 */
636 movw %dx,6(%esi) /* store in correct place */
637 movw %ax,2(%esi) /* store selector in correct place */
638 2:
639 addl $8,%esi /* bump to next descriptor */
640 loop 0b /* repeat */
641 jmp *%ebx /* all done */
642
643 /*
644 * put arg in kbd leds and spin a while
645 * eats eax, ecx, edx
646 */
647 #define K_RDWR 0x60
648 #define K_CMD_LEDS 0xed
649 #define K_STATUS 0x64
650 #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
651 #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
652
653 ENTRY(set_kbd_leds)
654 mov S_ARG0,%cl /* save led value */
655
656 0: inb $(K_STATUS),%al /* get kbd status */
657 testb $(K_IBUF_FULL),%al /* input busy? */
658 jne 0b /* loop until not */
659
660 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
661 outb %al,$(K_RDWR) /* to kbd */
662
663 0: inb $(K_STATUS),%al /* get kbd status */
664 testb $(K_OBUF_FULL),%al /* output present? */
665 je 0b /* loop if not */
666
667 inb $(K_RDWR),%al /* read status (and discard) */
668
669 0: inb $(K_STATUS),%al /* get kbd status */
670 testb $(K_IBUF_FULL),%al /* input busy? */
671 jne 0b /* loop until not */
672
673 mov %cl,%al /* move led value */
674 outb %al,$(K_RDWR) /* to kbd */
675
676 movl $10000000,%ecx /* spin */
677 0: nop
678 nop
679 loop 0b /* a while */
680
681 ret
682
Cache object: ad76ce05ca14fc3b53e8e1fac48dd602
|