The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/i386/mp_desc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 /*
   26  * @OSF_COPYRIGHT@
   27  */
   28 /* 
   29  * Mach Operating System
   30  * Copyright (c) 1991,1990 Carnegie Mellon University
   31  * All Rights Reserved.
   32  * 
   33  * Permission to use, copy, modify and distribute this software and its
   34  * documentation is hereby granted, provided that both the copyright
   35  * notice and this permission notice appear in all copies of the
   36  * software, derivative works or modified versions, and any portions
   37  * thereof, and that both notices appear in supporting documentation.
   38  * 
   39  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   40  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   41  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   42  * 
   43  * Carnegie Mellon requests users of this software to return to
   44  * 
   45  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   46  *  School of Computer Science
   47  *  Carnegie Mellon University
   48  *  Pittsburgh PA 15213-3890
   49  * 
   50  * any improvements or extensions that they make and grant Carnegie Mellon
   51  * the rights to redistribute these changes.
   52  */
   53 
   54 /*
   55  */
   56 
   57 #include <cpus.h>
   58 
   59 #if     NCPUS > 1
   60 
   61 #include <kern/cpu_number.h>
   62 #include <kern/cpu_data.h>
   63 #include <mach/machine.h>
   64 #include <vm/vm_kern.h>
   65 
   66 #include <i386/mp_desc.h>
   67 #include <i386/lock.h>
   68 #include <i386/misc_protos.h>
   69 #include <i386/mp.h>
   70 
   71 #include <kern/misc_protos.h>
   72 
   73 #include <mach_kdb.h>
   74 
   75 /*
   76  * The i386 needs an interrupt stack to keep the PCB stack from being
   77  * overrun by interrupts.  All interrupt stacks MUST lie at lower addresses
   78  * than any thread`s kernel stack.
   79  */
   80 
   81 /*
   82  * Addresses of bottom and top of interrupt stacks.
   83  */
   84 vm_offset_t     interrupt_stack[NCPUS];
   85 vm_offset_t     int_stack_top[NCPUS];
   86 
   87 /*
   88  * Barrier address.
   89  */
   90 vm_offset_t     int_stack_high;
   91 
   92 /*
   93  * First cpu`s interrupt stack.
   94  */
   95 extern char             intstack[];     /* bottom */
   96 extern char             eintstack[];    /* top */
   97 
   98 /*
   99  * We allocate interrupt stacks from physical memory.
  100  */
  101 extern
  102 vm_offset_t     avail_start;
  103 
  104 /*
  105  * Multiprocessor i386/i486 systems use a separate copy of the
  106  * GDT, IDT, LDT, and kernel TSS per processor.  The first three
  107  * are separate to avoid lock contention: the i386 uses locked
  108  * memory cycles to access the descriptor tables.  The TSS is
  109  * separate since each processor needs its own kernel stack,
  110  * and since using a TSS marks it busy.
  111  */
  112 
  113 /*
  114  * Allocated descriptor tables.
  115  */
  116 struct mp_desc_table    *mp_desc_table[NCPUS] = { 0 };
  117 
  118 /*
  119  * Pointer to TSS for access in load_context.
  120  */
  121 struct i386_tss         *mp_ktss[NCPUS] = { 0 };
  122 
  123 #if     MACH_KDB
  124 /*
  125  * Pointer to TSS for debugger use.
  126  */
  127 struct i386_tss         *mp_dbtss[NCPUS] = { 0 };
  128 #endif  /* MACH_KDB */
  129 
  130 /*
  131  * Pointer to GDT to reset the KTSS busy bit.
  132  */
  133 struct fake_descriptor  *mp_gdt[NCPUS] = { 0 };
  134 struct fake_descriptor  *mp_idt[NCPUS] = { 0 };
  135 struct fake_descriptor  *mp_ldt[NCPUS] = { 0 };
  136 
  137 /*
  138  * Allocate and initialize the per-processor descriptor tables.
  139  */
  140 
  141 struct fake_descriptor ldt_desc_pattern = {
  142         (unsigned int) 0,
  143         LDTSZ * sizeof(struct fake_descriptor) - 1,
  144         0,
  145         ACC_P|ACC_PL_K|ACC_LDT
  146 };
  147 struct fake_descriptor tss_desc_pattern = {
  148         (unsigned int) 0,
  149         sizeof(struct i386_tss),
  150         0,
  151         ACC_P|ACC_PL_K|ACC_TSS
  152 };
  153 
  154 struct fake_descriptor cpudata_desc_pattern = {
  155         (unsigned int) 0,
  156         sizeof(cpu_data_t)-1,
  157         SZ_32,
  158         ACC_P|ACC_PL_K|ACC_DATA_W
  159 };
  160 
  161 struct mp_desc_table *
  162 mp_desc_init(
  163         int     mycpu)
  164 {
  165         register struct mp_desc_table *mpt;
  166 
  167         if (mycpu == master_cpu) {
  168             /*
  169              * Master CPU uses the tables built at boot time.
  170              * Just set the TSS and GDT pointers.
  171              */
  172             mp_ktss[mycpu] = &ktss;
  173 #if     MACH_KDB
  174             mp_dbtss[mycpu] = &dbtss;
  175 #endif  /* MACH_KDB */
  176             mp_gdt[mycpu] = gdt;
  177             mp_idt[mycpu] = idt;
  178             mp_ldt[mycpu] = ldt;
  179             return 0;
  180         }
  181         else {
  182             mpt = mp_desc_table[mycpu];
  183             mp_ktss[mycpu] = &mpt->ktss;
  184             mp_gdt[mycpu] = mpt->gdt;
  185             mp_idt[mycpu] = mpt->idt;
  186             mp_ldt[mycpu] = mpt->ldt;
  187 
  188             /*
  189              * Copy the tables
  190              */
  191             bcopy((char *)idt,
  192                   (char *)mpt->idt,
  193                   sizeof(idt));
  194             bcopy((char *)gdt,
  195                   (char *)mpt->gdt,
  196                   sizeof(gdt));
  197             bcopy((char *)ldt,
  198                   (char *)mpt->ldt,
  199                   sizeof(ldt));
  200             bzero((char *)&mpt->ktss,
  201                   sizeof(struct i386_tss));
  202 #if 0
  203             bzero((char *)&cpu_data[mycpu],
  204                   sizeof(cpu_data_t));
  205 #endif
  206             /* I am myself */
  207             cpu_data[mycpu].cpu_number = mycpu;
  208 
  209 #if     MACH_KDB
  210             mp_dbtss[mycpu] = &mpt->dbtss;
  211             bcopy((char *)&dbtss,
  212                   (char *)&mpt->dbtss,
  213                   sizeof(struct i386_tss));
  214 #endif  /* MACH_KDB */
  215 
  216             /*
  217              * Fix up the entries in the GDT to point to
  218              * this LDT and this TSS.
  219              */
  220             mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
  221             mpt->gdt[sel_idx(KERNEL_LDT)].offset =
  222                 LINEAR_KERNEL_ADDRESS + (unsigned int) mpt->ldt;
  223             fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
  224 
  225             mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
  226             mpt->gdt[sel_idx(KERNEL_TSS)].offset =
  227                 LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->ktss;
  228             fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
  229 
  230             mpt->gdt[sel_idx(CPU_DATA)] = cpudata_desc_pattern;
  231             mpt->gdt[sel_idx(CPU_DATA)].offset =
  232                 LINEAR_KERNEL_ADDRESS + (unsigned int) &cpu_data[mycpu];
  233             fix_desc(&mpt->gdt[sel_idx(CPU_DATA)], 1);
  234 
  235 #if     MACH_KDB
  236             mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
  237             mpt->gdt[sel_idx(DEBUG_TSS)].offset =
  238                     LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->dbtss;
  239             fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1);
  240 
  241             mpt->dbtss.esp0 = (int)(db_task_stack_store +
  242                     (INTSTACK_SIZE * (mycpu + 1)) - sizeof (natural_t));
  243             mpt->dbtss.esp = mpt->dbtss.esp0;
  244             mpt->dbtss.eip = (int)&db_task_start;
  245 #endif  /* MACH_KDB */
  246 
  247             mpt->ktss.ss0 = KERNEL_DS;
  248             mpt->ktss.io_bit_map_offset = 0x0FFF;       /* no IO bitmap */
  249 
  250             return mpt;
  251         }
  252 }
  253 
  254 /*
  255  * Called after all CPUs have been found, but before the VM system
  256  * is running.  The machine array must show which CPUs exist.
  257  */
  258 void
  259 interrupt_stack_alloc(void)
  260 {
  261         register int            i;
  262         int                     cpu_count;
  263         vm_offset_t             stack_start;
  264         struct mp_desc_table    *mpt;
  265 
  266         /*
  267          * Number of CPUs possible.
  268          */
  269         cpu_count = wncpu;
  270 
  271         /*
  272          * Allocate an interrupt stack for each CPU except for
  273          * the master CPU (which uses the bootstrap stack)
  274          */
  275         stack_start = phystokv(avail_start);
  276         avail_start = round_page(avail_start + INTSTACK_SIZE*(cpu_count-1));
  277         bzero((char *)stack_start, INTSTACK_SIZE*(cpu_count-1));
  278 
  279         /*
  280          * Set up pointers to the top of the interrupt stack.
  281          */
  282         for (i = 0; i < cpu_count; i++) {
  283             if (i == master_cpu) {
  284                 interrupt_stack[i] = (vm_offset_t) intstack;
  285                 int_stack_top[i]   = (vm_offset_t) eintstack;
  286             }
  287             else {
  288                 interrupt_stack[i] = stack_start;
  289                 int_stack_top[i]   = stack_start + INTSTACK_SIZE;
  290 
  291                 stack_start += INTSTACK_SIZE;
  292             }
  293         }
  294 
  295         /*
  296          * Allocate descriptor tables for each CPU except for
  297          * the master CPU (which already has them initialized)
  298          */
  299 
  300         mpt = (struct mp_desc_table *) phystokv(avail_start);
  301         avail_start = round_page((vm_offset_t)avail_start +
  302                                  sizeof(struct mp_desc_table)*(cpu_count-1));
  303         for (i = 0; i < cpu_count; i++)
  304             if (i != master_cpu)
  305                 mp_desc_table[i] = mpt++;
  306 
  307 
  308         /*
  309          * Set up the barrier address.  All thread stacks MUST
  310          * be above this address.
  311          */
  312         /*
  313          * intstack is at higher addess than stack_start for AT mps
  314          * so int_stack_high must point at eintstack.
  315          * XXX
  316          * But what happens if a kernel stack gets allocated below
  317          * 1 Meg ? Probably never happens, there is only 640 K available
  318          * There.
  319          */
  320         int_stack_high = (vm_offset_t) eintstack;
  321 }
  322 
  323 #endif /* NCPUS > 1 */

Cache object: 15c8f1c533f887fcb9ce63d06c1df9b0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.