FreeBSD/Linux Kernel Cross Reference
sys/i386/mp_desc.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991,1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26
27 /*
28 * HISTORY
29 * $Log: mp_desc.c,v $
30 * Revision 2.5 91/11/12 11:50:51 rvb
31 * Added simple_lock_pause.
32 * [91/11/12 rpd]
33 *
34 * Revision 2.4 91/07/31 17:39:10 dbg
35 * Move interrupt-stack allocation for multiprocessors here.
36 * [91/07/30 16:54:10 dbg]
37 *
38 * Revision 2.3 91/05/14 16:12:08 mrt
39 * Correcting copyright
40 *
41 * Revision 2.2 91/05/08 12:39:33 dbg
42 * Created.
43 * [91/03/21 dbg]
44 *
45 */
46
47 #include <cpus.h>
48
49 #if NCPUS > 1
50
51 #include <kern/cpu_number.h>
52 #include <mach/machine.h>
53 #include <vm/vm_kern.h>
54
55 #include <i386/mp_desc.h>
56 #include <i386/lock.h>
57
58 /*
59 * The i386 needs an interrupt stack to keep the PCB stack from being
60 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
61 * than any thread`s kernel stack.
62 */
63
64 /*
65 * Addresses of bottom and top of interrupt stacks.
66 */
67 vm_offset_t interrupt_stack[NCPUS];
68 vm_offset_t int_stack_top[NCPUS];
69
70 /*
71 * Barrier address.
72 */
73 vm_offset_t int_stack_high;
74
75 /*
76 * First cpu`s interrupt stack.
77 */
78 char intstack[]; /* bottom */
79 char eintstack[]; /* top */
80
81 /*
82 * We allocate interrupt stacks from physical memory.
83 */
84 extern
85 vm_offset_t avail_start;
86
87 /*
88 * Multiprocessor i386/i486 systems use a separate copy of the
89 * GDT, IDT, LDT, and kernel TSS per processor. The first three
90 * are separate to avoid lock contention: the i386 uses locked
91 * memory cycles to access the descriptor tables. The TSS is
92 * separate since each processor needs its own kernel stack,
93 * and since using a TSS marks it busy.
94 */
95
96 /*
97 * Allocated descriptor tables.
98 */
99 struct mp_desc_table *mp_desc_table[NCPUS] = { 0 };
100
101 /*
102 * Pointer to TSS for access in load_context.
103 */
104 struct i386_tss *mp_ktss[NCPUS] = { 0 };
105
106 /*
107 * Pointer to GDT to reset the KTSS busy bit.
108 */
109 struct fake_descriptor *mp_gdt[NCPUS] = { 0 };
110
111 /*
112 * Boot-time tables, for initialization and master processor.
113 */
114 extern struct fake_descriptor idt[IDTSZ];
115 extern struct fake_descriptor gdt[GDTSZ];
116 extern struct fake_descriptor ldt[LDTSZ];
117 extern struct i386_tss ktss;
118
119 /*
120 * Allocate and initialize the per-processor descriptor tables.
121 */
122
123 struct fake_descriptor ldt_desc_pattern = {
124 (unsigned int) 0,
125 LDTSZ * sizeof(struct fake_descriptor) - 1,
126 0,
127 ACC_P|ACC_PL_K|ACC_LDT
128 };
129 struct fake_descriptor tss_desc_pattern = {
130 (unsigned int) 0,
131 sizeof(struct i386_tss),
132 0,
133 ACC_P|ACC_PL_K|ACC_TSS
134 };
135
136 struct mp_desc_table *
137 mp_desc_init(mycpu)
138 register int mycpu;
139 {
140 register struct mp_desc_table *mpt;
141
142 if (mycpu == master_cpu) {
143 /*
144 * Master CPU uses the tables built at boot time.
145 * Just set the TSS and GDT pointers.
146 */
147 mp_ktss[mycpu] = &ktss;
148 mp_gdt[mycpu] = gdt;
149 return 0;
150 }
151 else {
152 /*
153 * Other CPUs allocate the table from the bottom of
154 * the interrupt stack.
155 */
156 mpt = (struct mp_desc_table *) interrupt_stack[mycpu];
157
158 mp_desc_table[mycpu] = mpt;
159 mp_ktss[mycpu] = &mpt->ktss;
160 mp_gdt[mycpu] = mpt->gdt;
161
162 /*
163 * Copy the tables
164 */
165 bcopy((char *)idt,
166 (char *)mpt->idt,
167 sizeof(idt));
168 bcopy((char *)gdt,
169 (char *)mpt->gdt,
170 sizeof(gdt));
171 bcopy((char *)ldt,
172 (char *)mpt->ldt,
173 sizeof(ldt));
174 bzero((char *)&mpt->ktss,
175 sizeof(struct i386_tss));
176
177 /*
178 * Fix up the entries in the GDT to point to
179 * this LDT and this TSS.
180 */
181 mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
182 mpt->gdt[sel_idx(KERNEL_LDT)].offset =
183 (unsigned int) mpt->ldt;
184 fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
185
186 mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
187 mpt->gdt[sel_idx(KERNEL_TSS)].offset =
188 (unsigned int) &mpt->ktss;
189 fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
190
191 mpt->ktss.ss0 = KERNEL_DS;
192 mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
193
194 return mpt;
195 }
196 }
197
198
199 /*
200 * Called after all CPUs have been found, but before the VM system
201 * is running. The machine array must show which CPUs exist.
202 */
203 void
204 interrupt_stack_alloc()
205 {
206 register int i;
207 int cpu_count;
208 vm_offset_t stack_start;
209
210 /*
211 * Count the number of CPUs.
212 */
213 cpu_count = 0;
214 for (i = 0; i < NCPUS; i++)
215 if (machine_slot[i].is_cpu)
216 cpu_count++;
217
218 /*
219 * Allocate an interrupt stack for each CPU except for
220 * the master CPU (which uses the bootstrap stack)
221 */
222 stack_start = phystokv(avail_start);
223 avail_start = round_page(avail_start + INTSTACK_SIZE*(cpu_count-1));
224
225 /*
226 * Set up pointers to the top of the interrupt stack.
227 */
228 for (i = 0; i < NCPUS; i++) {
229 if (i == master_cpu) {
230 interrupt_stack[i] = (vm_offset_t) intstack;
231 int_stack_top[i] = (vm_offset_t) eintstack;
232 }
233 else if (machine_slot[i].is_cpu) {
234 interrupt_stack[i] = stack_start;
235 int_stack_top[i] = stack_start + INTSTACK_SIZE;
236
237 stack_start += INTSTACK_SIZE;
238 }
239 }
240
241 /*
242 * Set up the barrier address. All thread stacks MUST
243 * be above this address.
244 */
245 int_stack_high = stack_start;
246 }
247
248 /* XXX should be adjusted per CPU speed */
249 int simple_lock_pause_loop = 100;
250
251 unsigned int simple_lock_pause_count = 0; /* debugging */
252
253 void
254 simple_lock_pause()
255 {
256 static volatile int dummy;
257 int i;
258
259 simple_lock_pause_count++;
260
261 /*
262 * Used in loops that are trying to acquire locks out-of-order.
263 */
264
265 for (i = 0; i < simple_lock_pause_loop; i++)
266 dummy++; /* keep the compiler from optimizing the loop away */
267 }
268 #endif /* NCPUS > 1 */
Cache object: 993220a95b6eb4e5b2a2fbfbf578e82e
|