FreeBSD/Linux Kernel Cross Reference
sys/i386/mp_desc.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993,1991,1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26
27 /*
28 * HISTORY
29 * $Log: mp_desc.c,v $
30 * Revision 2.6 93/11/17 16:36:48 dbg
31 * Added ANSI function prototypes.
32 * [93/11/03 dbg]
33 *
34 * Revision 2.5 91/11/12 11:50:51 rvb
35 * Added simple_lock_pause.
36 * [91/11/12 rpd]
37 *
38 * Revision 2.4 91/07/31 17:39:10 dbg
39 * Move interrupt-stack allocation for multiprocessors here.
40 * [91/07/30 16:54:10 dbg]
41 *
42 * Revision 2.3 91/05/14 16:12:08 mrt
43 * Correcting copyright
44 *
45 * Revision 2.2 91/05/08 12:39:33 dbg
46 * Created.
47 * [91/03/21 dbg]
48 *
49 */
50
51 #include <cpus.h>
52
53 #if NCPUS > 1
54
55 #include <kern/cpu_number.h>
56 #include <kern/machine.h>
57 #include <kern/memory.h>
58 #include <vm/vm_kern.h>
59
60 #include <i386/mp_desc.h>
61 #include <i386/lock.h>
62
63 /*
64 * Routine to convert descriptors from "fake" format
65 * (one that the compiler and linker can handle) to
66 * "real" format (that the machine understands)
67 */
68
69 extern void fix_desc(
70 struct fake_descriptor *desc_array,
71 int count);
72
73 /*
74 * The i386 needs an interrupt stack to keep the PCB stack from being
75 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
76 * than any thread`s kernel stack.
77 */
78
79 /*
80 * Addresses of bottom and top of interrupt stacks.
81 */
82 vm_offset_t interrupt_stack[NCPUS];
83 vm_offset_t int_stack_top[NCPUS];
84
85 /*
86 * Barrier address.
87 */
88 vm_offset_t int_stack_high;
89
90 /*
91 * First cpu`s interrupt stack.
92 */
93 char intstack[]; /* bottom */
94 char eintstack[]; /* top */
95
96 /*
97 * We allocate interrupt stacks from physical memory.
98 */
99 extern
100 vm_offset_t avail_start;
101
102 /*
103 * Multiprocessor i386/i486 systems use a separate copy of the
104 * GDT, IDT, LDT, and kernel TSS per processor. The first three
105 * are separate to avoid lock contention: the i386 uses locked
106 * memory cycles to access the descriptor tables. The TSS is
107 * separate since each processor needs its own kernel stack,
108 * and since using a TSS marks it busy.
109 */
110
111 /*
112 * Allocated descriptor tables.
113 */
114 struct mp_desc_table *mp_desc_table[NCPUS] = { 0 };
115
116 /*
117 * Pointer to TSS for access in load_context.
118 */
119 struct i386_tss *mp_ktss[NCPUS] = { 0 };
120
121 /*
122 * Pointer to GDT to reset the KTSS busy bit.
123 */
124 struct fake_descriptor *mp_gdt[NCPUS] = { 0 };
125
126 /*
127 * Boot-time tables, for initialization and master processor.
128 */
129 extern struct fake_descriptor idt[IDTSZ];
130 extern struct fake_descriptor gdt[GDTSZ];
131 extern struct fake_descriptor ldt[LDTSZ];
132 extern struct i386_tss ktss;
133
134 /*
135 * Allocate and initialize the per-processor descriptor tables.
136 */
137
138 struct fake_descriptor ldt_desc_pattern = {
139 (unsigned int) 0,
140 LDTSZ * sizeof(struct fake_descriptor) - 1,
141 0,
142 ACC_P|ACC_PL_K|ACC_LDT
143 };
144 struct fake_descriptor tss_desc_pattern = {
145 (unsigned int) 0,
146 sizeof(struct i386_tss),
147 0,
148 ACC_P|ACC_PL_K|ACC_TSS
149 };
150
151 struct mp_desc_table *
152 mp_desc_init(
153 register int mycpu)
154 {
155 register struct mp_desc_table *mpt;
156
157 if (mycpu == master_cpu) {
158 /*
159 * Master CPU uses the tables built at boot time.
160 * Just set the TSS and GDT pointers.
161 */
162 mp_ktss[mycpu] = &ktss;
163 mp_gdt[mycpu] = gdt;
164 return 0;
165 }
166 else {
167 /*
168 * Other CPUs allocate the table from the bottom of
169 * the interrupt stack.
170 */
171 mpt = (struct mp_desc_table *) interrupt_stack[mycpu];
172
173 mp_desc_table[mycpu] = mpt;
174 mp_ktss[mycpu] = &mpt->ktss;
175 mp_gdt[mycpu] = mpt->gdt;
176
177 /*
178 * Copy the tables
179 */
180 bcopy((char *)idt,
181 (char *)mpt->idt,
182 sizeof(idt));
183 bcopy((char *)gdt,
184 (char *)mpt->gdt,
185 sizeof(gdt));
186 bcopy((char *)ldt,
187 (char *)mpt->ldt,
188 sizeof(ldt));
189 bzero((char *)&mpt->ktss,
190 sizeof(struct i386_tss));
191
192 /*
193 * Fix up the entries in the GDT to point to
194 * this LDT and this TSS.
195 */
196 mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
197 mpt->gdt[sel_idx(KERNEL_LDT)].offset =
198 (unsigned int) mpt->ldt;
199 fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
200
201 mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
202 mpt->gdt[sel_idx(KERNEL_TSS)].offset =
203 (unsigned int) &mpt->ktss;
204 fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
205
206 mpt->ktss.ss0 = KERNEL_DS;
207 mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
208
209 return mpt;
210 }
211 }
212
213
214 /*
215 * Called after all CPUs have been found, but before the VM system
216 * is running. The machine array must show which CPUs exist.
217 */
218 void
219 interrupt_stack_alloc(void)
220 {
221 register int i;
222 int cpu_count;
223 vm_offset_t stack_start;
224
225 /*
226 * Count the number of CPUs.
227 */
228 cpu_count = 0;
229 for (i = 0; i < NCPUS; i++)
230 if (machine_slot[i].is_cpu)
231 cpu_count++;
232
233 /*
234 * Allocate an interrupt stack for each CPU except for
235 * the master CPU (which uses the bootstrap stack)
236 */
237 stack_start = phystokv(avail_start);
238 avail_start = round_page(avail_start + INTSTACK_SIZE*(cpu_count-1));
239
240 /*
241 * Set up pointers to the top of the interrupt stack.
242 */
243 for (i = 0; i < NCPUS; i++) {
244 if (i == master_cpu) {
245 interrupt_stack[i] = (vm_offset_t) intstack;
246 int_stack_top[i] = (vm_offset_t) eintstack;
247 }
248 else if (machine_slot[i].is_cpu) {
249 interrupt_stack[i] = stack_start;
250 int_stack_top[i] = stack_start + INTSTACK_SIZE;
251
252 stack_start += INTSTACK_SIZE;
253 }
254 }
255
256 /*
257 * Set up the barrier address. All thread stacks MUST
258 * be above this address.
259 */
260 int_stack_high = stack_start;
261 }
262
263 /* XXX should be adjusted per CPU speed */
264 int simple_lock_pause_loop = 100;
265
266 unsigned int simple_lock_pause_count = 0; /* debugging */
267
268 void
269 simple_lock_pause(void)
270 {
271 static volatile int dummy;
272 int i;
273
274 simple_lock_pause_count++;
275
276 /*
277 * Used in loops that are trying to acquire locks out-of-order.
278 */
279
280 for (i = 0; i < simple_lock_pause_loop; i++)
281 dummy++; /* keep the compiler from optimizing the loop away */
282 }
283 #endif /* NCPUS > 1 */
Cache object: 1497d9faea5247bb64a9fe103e6be2b2
|