FreeBSD/Linux Kernel Cross Reference
sys/i386/user_ldt.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26
27 /*
28 * HISTORY
29 * $Log: user_ldt.c,v $
30 * Revision 2.3 92/02/23 19:45:12 elf
31 * Eliminate keep_wired argument from vm_map_copyin().
32 * [92/02/23 danner]
33 *
34 * Revision 2.2 92/01/03 20:10:02 dbg
35 * Created.
36 * [91/08/20 dbg]
37 *
38 */
39
40 /*
41 * User LDT management.
42 * Each thread in a task may have its own LDT.
43 */
44
45 #include <kern/kalloc.h>
46 #include <kern/thread.h>
47
48 #include <vm/vm_kern.h>
49
50 #include <i386/seg.h>
51 #include <i386/thread.h>
52 #include <i386/user_ldt.h>
53
54 char acc_type[8][3] = {
55 /* code stack data */
56 { 0, 0, 1 }, /* data */
57 { 0, 1, 1 }, /* data, writable */
58 { 0, 0, 1 }, /* data, expand-down */
59 { 0, 1, 1 }, /* data, writable, expand-down */
60 { 1, 0, 0 }, /* code */
61 { 1, 0, 1 }, /* code, readable */
62 { 1, 0, 0 }, /* code, conforming */
63 { 1, 0, 1 }, /* code, readable, conforming */
64 };
65
66 extern struct fake_descriptor ldt[]; /* for system call gate */
67
68 boolean_t selector_check(thread, sel, type)
69 thread_t thread;
70 int sel;
71 int type; /* code, stack, data */
72 {
73 struct user_ldt *ldt;
74 int access;
75
76 ldt = thread->pcb->ims.ldt;
77 if (ldt == 0) {
78 switch (type) {
79 case S_CODE:
80 return sel == USER_CS;
81 case S_STACK:
82 return sel == USER_DS;
83 case S_DATA:
84 return sel == 0 ||
85 sel == USER_CS ||
86 sel == USER_DS;
87 }
88 }
89
90 if (type != S_DATA && sel == 0)
91 return FALSE;
92 if ((sel & (SEL_LDT|SEL_PL)) != (SEL_LDT|SEL_PL_U)
93 || sel > ldt->desc.limit_low)
94 return FALSE;
95
96 access = ldt->ldt[sel_idx(sel)].access;
97
98 if ((access & (ACC_P|ACC_PL|ACC_TYPE_USER))
99 != (ACC_P|ACC_PL_U|ACC_TYPE_USER))
100 return FALSE;
101 /* present, pl == pl.user, not system */
102
103 return acc_type[(access & 0xe)>>1][type];
104 }
105
106 /*
107 * Add the descriptors to the LDT, starting with
108 * the descriptor for 'first_selector'.
109 */
110 kern_return_t
111 i386_set_ldt(thread, first_selector, desc_list, count, desc_list_inline)
112 thread_t thread;
113 int first_selector;
114 struct real_descriptor *desc_list;
115 unsigned int count;
116 boolean_t desc_list_inline;
117 {
118 user_ldt_t new_ldt, old_ldt, temp;
119 struct real_descriptor *dp;
120 int i;
121 pcb_t pcb;
122 vm_size_t ldt_size_needed;
123 int first_desc = sel_idx(first_selector);
124 vm_map_copy_t old_copy_object;
125
126 if (thread == THREAD_NULL)
127 return KERN_INVALID_ARGUMENT;
128 if (first_desc < 0 || first_desc > 8191)
129 return KERN_INVALID_ARGUMENT;
130 if (first_desc + count >= 8192)
131 return KERN_INVALID_ARGUMENT;
132
133 /*
134 * If desc_list is not inline, it is in copyin form.
135 * We must copy it out to the kernel map, and wire
136 * it down (we touch it while the PCB is locked).
137 *
138 * We make a copy of the copyin object, and clear
139 * out the old one, so that returning KERN_INVALID_ARGUMENT
140 * will not try to deallocate the data twice.
141 */
142 if (!desc_list_inline) {
143 kern_return_t kr;
144 vm_offset_t dst_addr;
145
146 old_copy_object = (vm_map_copy_t) desc_list;
147
148 kr = vm_map_copyout(ipc_kernel_map, &dst_addr,
149 vm_map_copy_copy(old_copy_object));
150 if (kr != KERN_SUCCESS)
151 return kr;
152
153 (void) vm_map_pageable(ipc_kernel_map,
154 dst_addr,
155 dst_addr + count * sizeof(struct real_descriptor),
156 VM_PROT_READ|VM_PROT_WRITE);
157 desc_list = (struct real_descriptor *)dst_addr;
158 }
159
160 for (i = 0, dp = desc_list;
161 i < count;
162 i++, dp++)
163 {
164 switch (dp->access & ~ACC_A) {
165 case 0:
166 case ACC_P:
167 /* valid empty descriptor */
168 break;
169 case ACC_P | ACC_CALL_GATE:
170 /* Mach kernel call */
171 *dp = *(struct real_descriptor *)
172 &ldt[sel_idx(USER_SCALL)];
173 break;
174 case ACC_P | ACC_PL_U | ACC_DATA:
175 case ACC_P | ACC_PL_U | ACC_DATA_W:
176 case ACC_P | ACC_PL_U | ACC_DATA_E:
177 case ACC_P | ACC_PL_U | ACC_DATA_EW:
178 case ACC_P | ACC_PL_U | ACC_CODE:
179 case ACC_P | ACC_PL_U | ACC_CODE_R:
180 case ACC_P | ACC_PL_U | ACC_CODE_C:
181 case ACC_P | ACC_PL_U | ACC_CODE_CR:
182 case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
183 case ACC_P | ACC_PL_U | ACC_CALL_GATE:
184 break;
185 default:
186 return KERN_INVALID_ARGUMENT;
187 }
188 }
189 ldt_size_needed = sizeof(struct real_descriptor)
190 * (first_desc + count - 1);
191
192 pcb = thread->pcb;
193 new_ldt = 0;
194 Retry:
195 simple_lock(&pcb->lock);
196 old_ldt = pcb->ims.ldt;
197 if (old_ldt == 0 ||
198 old_ldt->desc.limit_low + 1 < ldt_size_needed)
199 {
200 /*
201 * No old LDT, or not big enough
202 */
203 if (new_ldt == 0) {
204 simple_unlock(&pcb->lock);
205
206 new_ldt = (user_ldt_t) kalloc(ldt_size_needed
207 + sizeof(struct real_descriptor));
208 new_ldt->desc.limit_low = ldt_size_needed - 1;
209 new_ldt->desc.limit_high = 0;
210 new_ldt->desc.base_low = ((vm_offset_t)new_ldt) & 0xffff;
211 new_ldt->desc.base_med = (((vm_offset_t)new_ldt) >> 16)
212 & 0xff;
213 new_ldt->desc.base_high = ((vm_offset_t)new_ldt) >> 24;
214 new_ldt->desc.access = ACC_P | ACC_LDT;
215 new_ldt->desc.granularity = 0;
216
217 goto Retry;
218 }
219
220 /*
221 * Have new LDT. Copy descriptors from old to new.
222 */
223 if (old_ldt)
224 bcopy((char *)&old_ldt->ldt[0],
225 (char *)&new_ldt->ldt[0],
226 old_ldt->desc.limit_low + 1);
227
228 temp = old_ldt;
229 old_ldt = new_ldt; /* use new LDT from now on */
230 new_ldt = temp; /* discard old LDT */
231
232 pcb->ims.ldt = old_ldt; /* new LDT for thread */
233 }
234
235 /*
236 * Install new descriptors.
237 */
238 bcopy((char *)desc_list,
239 (char *)&old_ldt->ldt[first_desc],
240 count * sizeof(struct real_descriptor));
241
242 simple_unlock(&pcb->lock);
243
244 if (old_ldt)
245 kfree((char *)old_ldt,
246 old_ldt->desc.limit_low + 1
247 + sizeof(struct real_descriptor));
248
249 /*
250 * Free the descriptor list, if it was
251 * out-of-line. Also discard the original
252 * copy object for it.
253 */
254 if (!desc_list_inline) {
255 (void) kmem_free(ipc_kernel_map,
256 (vm_offset_t) desc_list,
257 count * sizeof(struct real_descriptor));
258 vm_map_copy_discard(old_copy_object);
259 }
260
261 return KERN_SUCCESS;
262 }
263
264 kern_return_t
265 i386_get_ldt(thread, first_selector, selector_count, desc_list, count)
266 thread_t thread;
267 int first_selector;
268 int selector_count; /* number wanted */
269 struct real_descriptor **desc_list; /* in/out */
270 unsigned int *count; /* in/out */
271 {
272 struct user_ldt *user_ldt;
273 pcb_t pcb;
274 int first_desc = sel_idx(first_selector);
275 unsigned int ldt_count;
276 vm_size_t ldt_size;
277 vm_size_t size, size_needed;
278 vm_offset_t addr;
279
280 if (thread == THREAD_NULL)
281 return KERN_INVALID_ARGUMENT;
282 if (first_desc < 0 || first_desc > 8191)
283 return KERN_INVALID_ARGUMENT;
284 if (first_desc + selector_count >= 8192)
285 return KERN_INVALID_ARGUMENT;
286
287 addr = 0;
288 size = 0;
289
290 for (;;) {
291 simple_lock(&pcb->lock);
292 user_ldt = pcb->ims.ldt;
293 if (user_ldt == 0) {
294 simple_unlock(&pcb->lock);
295 if (addr)
296 kmem_free(ipc_kernel_map, addr, size);
297 *count = 0;
298 return KERN_SUCCESS;
299 }
300
301 /*
302 * Find how many descriptors we should return.
303 */
304 ldt_count = (user_ldt->desc.limit_low + 1) /
305 sizeof (struct real_descriptor);
306 ldt_count -= first_desc;
307 if (ldt_count > selector_count)
308 ldt_count = selector_count;
309
310 ldt_size = ldt_count * sizeof(struct real_descriptor);
311
312 /*
313 * Do we have the memory we need?
314 */
315 if (ldt_count <= *count)
316 break; /* fits in-line */
317
318 size_needed = round_page(ldt_size);
319 if (size_needed <= size)
320 break;
321
322 /*
323 * Unlock the pcb and allocate more memory
324 */
325 simple_unlock(&pcb->lock);
326
327 if (size != 0)
328 kmem_free(ipc_kernel_map, addr, size);
329
330 size = size_needed;
331
332 if (kmem_alloc(ipc_kernel_map, &addr, size)
333 != KERN_SUCCESS)
334 return KERN_RESOURCE_SHORTAGE;
335 }
336
337 /*
338 * copy out the descriptors
339 */
340 bcopy((char *)&user_ldt[first_desc],
341 (char *)*desc_list,
342 ldt_size);
343 *count = ldt_count;
344 simple_unlock(&pcb->lock);
345
346 if (addr) {
347 vm_size_t size_used, size_left;
348 vm_map_copy_t memory;
349
350 /*
351 * Free any unused memory beyond the end of the last page used
352 */
353 size_used = round_page(ldt_size);
354 if (size_used != size)
355 kmem_free(ipc_kernel_map,
356 addr + size_used, size - size_used);
357
358 /*
359 * Zero the remainder of the page being returned.
360 */
361 size_left = size_used - ldt_size;
362 if (size_left > 0)
363 bzero((char *)addr + ldt_size, size_left);
364
365 /*
366 * Make memory into copyin form - this unwires it.
367 */
368 (void) vm_map_copyin(ipc_kernel_map, addr, size_used, TRUE, &memory);
369 *desc_list = (struct real_descriptor *)memory;
370 }
371
372 return KERN_SUCCESS;
373 }
374
375 void
376 user_ldt_free(user_ldt)
377 user_ldt_t user_ldt;
378 {
379 kfree((char *)user_ldt,
380 user_ldt->desc.limit_low + 1
381 + sizeof(struct real_descriptor));
382 }
Cache object: 2ed581383141310b5fbfdb3df5f91891
|