1 /*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/5.3/sys/i386/i386/sys_machdep.c 145951 2005-05-06 02:40:32Z cperciva $");
34
35 #include "opt_kstack_pages.h"
36 #include "opt_mac.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/lock.h>
41 #include <sys/mac.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/smp.h>
46 #include <sys/sysproto.h>
47 #include <sys/user.h>
48
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53
54 #include <machine/cpu.h>
55 #include <machine/pcb_ext.h> /* pcb.h included by sys/user.h */
56 #include <machine/proc.h>
57 #include <machine/sysarch.h>
58
59 #include <vm/vm_kern.h> /* for kernel_map */
60
61 #define MAX_LD 8192
62 #define LD_PER_PAGE 512
63 #define NEW_MAX_LD(num) ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
64 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
65
66
67
68 static int i386_get_ldt(struct thread *, char *);
69 static int i386_set_ldt(struct thread *, char *);
70 static int i386_set_ldt_data(struct thread *, int start, int num,
71 union descriptor *descs);
72 static int i386_ldt_grow(struct thread *td, int len);
73 static int i386_get_ioperm(struct thread *, char *);
74 static int i386_set_ioperm(struct thread *, char *);
75 #ifdef SMP
76 static void set_user_ldt_rv(struct thread *);
77 #endif
78
79 #ifndef _SYS_SYSPROTO_H_
80 struct sysarch_args {
81 int op;
82 char *parms;
83 };
84 #endif
85
86 int
87 sysarch(td, uap)
88 struct thread *td;
89 register struct sysarch_args *uap;
90 {
91 int error;
92
93 mtx_lock(&Giant);
94 switch(uap->op) {
95 case I386_GET_LDT:
96 error = i386_get_ldt(td, uap->parms);
97 break;
98
99 case I386_SET_LDT:
100 error = i386_set_ldt(td, uap->parms);
101 break;
102 case I386_GET_IOPERM:
103 error = i386_get_ioperm(td, uap->parms);
104 break;
105 case I386_SET_IOPERM:
106 error = i386_set_ioperm(td, uap->parms);
107 break;
108 case I386_VM86:
109 error = vm86_sysarch(td, uap->parms);
110 break;
111 default:
112 error = EINVAL;
113 break;
114 }
115 mtx_unlock(&Giant);
116 return (error);
117 }
118
119 int
120 i386_extend_pcb(struct thread *td)
121 {
122 int i, offset;
123 u_long *addr;
124 struct pcb_ext *ext;
125 struct soft_segment_descriptor ssd = {
126 0, /* segment base address (overwritten) */
127 ctob(IOPAGES + 1) - 1, /* length */
128 SDT_SYS386TSS, /* segment type */
129 0, /* priority level */
130 1, /* descriptor present */
131 0, 0,
132 0, /* default 32 size */
133 0 /* granularity */
134 };
135
136 if (td->td_proc->p_flag & P_SA)
137 return (EINVAL); /* XXXKSE */
138 /* XXXKSE All the code below only works in 1:1 needs changing */
139 ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1));
140 if (ext == 0)
141 return (ENOMEM);
142 bzero(ext, sizeof(struct pcb_ext));
143 /* -16 is so we can convert a trapframe into vm86trapframe inplace */
144 ext->ext_tss.tss_esp0 = td->td_kstack + ctob(KSTACK_PAGES) -
145 sizeof(struct pcb) - 16;
146 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
147 /*
148 * The last byte of the i/o map must be followed by an 0xff byte.
149 * We arbitrarily allocate 16 bytes here, to keep the starting
150 * address on a doubleword boundary.
151 */
152 offset = PAGE_SIZE - 16;
153 ext->ext_tss.tss_ioopt =
154 (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
155 ext->ext_iomap = (caddr_t)ext + offset;
156 ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
157
158 addr = (u_long *)ext->ext_vm86.vm86_intmap;
159 for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
160 *addr++ = ~0;
161
162 ssd.ssd_base = (unsigned)&ext->ext_tss;
163 ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext);
164 ssdtosd(&ssd, &ext->ext_tssd);
165
166 KASSERT(td->td_proc == curthread->td_proc, ("giving TSS to !curproc"));
167 KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!"));
168 mtx_lock_spin(&sched_lock);
169 td->td_pcb->pcb_ext = ext;
170
171 /* switch to the new TSS after syscall completes */
172 td->td_flags |= TDF_NEEDRESCHED;
173 mtx_unlock_spin(&sched_lock);
174
175 return 0;
176 }
177
178 static int
179 i386_set_ioperm(td, args)
180 struct thread *td;
181 char *args;
182 {
183 int i, error;
184 struct i386_ioperm_args ua;
185 char *iomap;
186
187 if ((error = copyin(args, &ua, sizeof(struct i386_ioperm_args))) != 0)
188 return (error);
189
190 #ifdef MAC
191 if ((error = mac_check_sysarch_ioperm(td->td_ucred)) != 0)
192 return (error);
193 #endif
194 if ((error = suser(td)) != 0)
195 return (error);
196 if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
197 return (error);
198 /*
199 * XXX
200 * While this is restricted to root, we should probably figure out
201 * whether any other driver is using this i/o address, as so not to
202 * cause confusion. This probably requires a global 'usage registry'.
203 */
204
205 if (td->td_pcb->pcb_ext == 0)
206 if ((error = i386_extend_pcb(td)) != 0)
207 return (error);
208 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
209
210 if (ua.start + ua.length > IOPAGES * PAGE_SIZE * NBBY)
211 return (EINVAL);
212
213 for (i = ua.start; i < ua.start + ua.length; i++) {
214 if (ua.enable)
215 iomap[i >> 3] &= ~(1 << (i & 7));
216 else
217 iomap[i >> 3] |= (1 << (i & 7));
218 }
219 return (error);
220 }
221
222 static int
223 i386_get_ioperm(td, args)
224 struct thread *td;
225 char *args;
226 {
227 int i, state, error;
228 struct i386_ioperm_args ua;
229 char *iomap;
230
231 if ((error = copyin(args, &ua, sizeof(struct i386_ioperm_args))) != 0)
232 return (error);
233 if (ua.start >= IOPAGES * PAGE_SIZE * NBBY)
234 return (EINVAL);
235
236 if (td->td_pcb->pcb_ext == 0) {
237 ua.length = 0;
238 goto done;
239 }
240
241 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
242
243 i = ua.start;
244 state = (iomap[i >> 3] >> (i & 7)) & 1;
245 ua.enable = !state;
246 ua.length = 1;
247
248 for (i = ua.start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
249 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
250 break;
251 ua.length++;
252 }
253
254 done:
255 error = copyout(&ua, args, sizeof(struct i386_ioperm_args));
256 return (error);
257 }
258
259 /*
260 * Update the GDT entry pointing to the LDT to point to the LDT of the
261 * current process.
262 *
263 * This must be called with sched_lock held. Unfortunately, we can't use a
264 * mtx_assert() here because cpu_switch() calls this function after changing
265 * curproc but before sched_lock's owner is updated in mi_switch().
266 */
267 void
268 set_user_ldt(struct mdproc *mdp)
269 {
270 struct proc_ldt *pldt;
271
272 pldt = mdp->md_ldt;
273 #ifdef SMP
274 gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd;
275 #else
276 gdt[GUSERLDT_SEL].sd = pldt->ldt_sd;
277 #endif
278 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
279 PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
280 }
281
282 #ifdef SMP
283 static void
284 set_user_ldt_rv(struct thread *td)
285 {
286
287 if (td->td_proc != curthread->td_proc)
288 return;
289
290 set_user_ldt(&td->td_proc->p_md);
291 }
292 #endif
293
294 /*
295 * Must be called with either sched_lock free or held but not recursed.
296 * If it does not return NULL, it will return with it owned.
297 */
298 struct proc_ldt *
299 user_ldt_alloc(struct mdproc *mdp, int len)
300 {
301 struct proc_ldt *pldt, *new_ldt;
302
303 if (mtx_owned(&sched_lock))
304 mtx_unlock_spin(&sched_lock);
305 mtx_assert(&sched_lock, MA_NOTOWNED);
306 MALLOC(new_ldt, struct proc_ldt *, sizeof(struct proc_ldt),
307 M_SUBPROC, M_WAITOK);
308
309 new_ldt->ldt_len = len = NEW_MAX_LD(len);
310 new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map,
311 len * sizeof(union descriptor));
312 if (new_ldt->ldt_base == NULL) {
313 FREE(new_ldt, M_SUBPROC);
314 return NULL;
315 }
316 new_ldt->ldt_refcnt = 1;
317 new_ldt->ldt_active = 0;
318
319 mtx_lock_spin(&sched_lock);
320 gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
321 gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
322 ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
323
324 if ((pldt = mdp->md_ldt)) {
325 if (len > pldt->ldt_len)
326 len = pldt->ldt_len;
327 bcopy(pldt->ldt_base, new_ldt->ldt_base,
328 len * sizeof(union descriptor));
329 } else {
330 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
331 }
332 return new_ldt;
333 }
334
335 /*
336 * Must be called either with sched_lock free or held but not recursed.
337 * If md_ldt is not NULL, it will return with sched_lock released.
338 */
339 void
340 user_ldt_free(struct thread *td)
341 {
342 struct mdproc *mdp = &td->td_proc->p_md;
343 struct proc_ldt *pldt = mdp->md_ldt;
344
345 if (pldt == NULL)
346 return;
347
348 if (!mtx_owned(&sched_lock))
349 mtx_lock_spin(&sched_lock);
350 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
351 if (td == PCPU_GET(curthread)) {
352 lldt(_default_ldt);
353 PCPU_SET(currentldt, _default_ldt);
354 }
355
356 mdp->md_ldt = NULL;
357 if (--pldt->ldt_refcnt == 0) {
358 mtx_unlock_spin(&sched_lock);
359 kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
360 pldt->ldt_len * sizeof(union descriptor));
361 FREE(pldt, M_SUBPROC);
362 } else
363 mtx_unlock_spin(&sched_lock);
364 }
365
366 static int
367 i386_get_ldt(td, args)
368 struct thread *td;
369 char *args;
370 {
371 int error = 0;
372 struct proc_ldt *pldt = td->td_proc->p_md.md_ldt;
373 int nldt, num;
374 union descriptor *lp;
375 struct i386_ldt_args ua, *uap = &ua;
376
377 if ((error = copyin(args, uap, sizeof(struct i386_ldt_args))) < 0)
378 return(error);
379
380 #ifdef DEBUG
381 printf("i386_get_ldt: start=%d num=%d descs=%p\n",
382 uap->start, uap->num, (void *)uap->descs);
383 #endif
384
385 if (pldt) {
386 nldt = pldt->ldt_len;
387 num = min(uap->num, nldt);
388 lp = &((union descriptor *)(pldt->ldt_base))[uap->start];
389 } else {
390 nldt = sizeof(ldt)/sizeof(ldt[0]);
391 num = min(uap->num, nldt);
392 lp = &ldt[uap->start];
393 }
394
395 if ((uap->start > (unsigned int)nldt) ||
396 ((unsigned int)num > (unsigned int)nldt) ||
397 ((unsigned int)(uap->start + num) > (unsigned int)nldt))
398 return(EINVAL);
399
400 error = copyout(lp, uap->descs, num * sizeof(union descriptor));
401 if (!error)
402 td->td_retval[0] = num;
403
404 return(error);
405 }
406
407 static int ldt_warnings;
408 #define NUM_LDT_WARNINGS 10
409
410 static int
411 i386_set_ldt(td, args)
412 struct thread *td;
413 char *args;
414 {
415 int error = 0, i;
416 int largest_ld;
417 struct mdproc *mdp = &td->td_proc->p_md;
418 struct proc_ldt *pldt = 0;
419 struct i386_ldt_args ua, *uap = &ua;
420 union descriptor *descs, *dp;
421 int descs_size;
422
423 if ((error = copyin(args, uap, sizeof(struct i386_ldt_args))) < 0)
424 return(error);
425
426 #ifdef DEBUG
427 printf("i386_set_ldt: start=%d num=%d descs=%p\n",
428 uap->start, uap->num, (void *)uap->descs);
429 #endif
430
431 if (uap->descs == NULL) {
432 /* Free descriptors */
433 if (uap->start == 0 && uap->num == 0) {
434 /*
435 * Treat this as a special case, so userland needn't
436 * know magic number NLDT.
437 */
438 uap->start = NLDT;
439 uap->num = MAX_LD - NLDT;
440 }
441 if (uap->start <= LUDATA_SEL || uap->num <= 0)
442 return (EINVAL);
443 mtx_lock_spin(&sched_lock);
444 pldt = mdp->md_ldt;
445 if (pldt == NULL || uap->start >= pldt->ldt_len) {
446 mtx_unlock_spin(&sched_lock);
447 return (0);
448 }
449 largest_ld = uap->start + uap->num;
450 if (largest_ld > pldt->ldt_len)
451 largest_ld = pldt->ldt_len;
452 i = largest_ld - uap->start;
453 bzero(&((union descriptor *)(pldt->ldt_base))[uap->start],
454 sizeof(union descriptor) * i);
455 mtx_unlock_spin(&sched_lock);
456 return (0);
457 }
458
459 if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
460 /* complain a for a while if using old methods */
461 if (ldt_warnings++ < NUM_LDT_WARNINGS) {
462 printf("Warning: pid %d used static ldt allocation.\n",
463 td->td_proc->p_pid);
464 printf("See the i386_set_ldt man page for more info\n");
465 }
466 /* verify range of descriptors to modify */
467 largest_ld = uap->start + uap->num;
468 if (uap->start >= MAX_LD ||
469 uap->num < 0 || largest_ld > MAX_LD) {
470 return (EINVAL);
471 }
472 }
473
474 descs_size = uap->num * sizeof(union descriptor);
475 descs = (union descriptor *)kmem_alloc(kernel_map, descs_size);
476 if (descs == NULL)
477 return (ENOMEM);
478 error = copyin(uap->descs, descs, descs_size);
479 if (error) {
480 kmem_free(kernel_map, (vm_offset_t)descs, descs_size);
481 return (error);
482 }
483
484 /* Check descriptors for access violations */
485 for (i = 0; i < uap->num; i++) {
486 dp = &descs[i];
487
488 switch (dp->sd.sd_type) {
489 case SDT_SYSNULL: /* system null */
490 dp->sd.sd_p = 0;
491 break;
492 case SDT_SYS286TSS: /* system 286 TSS available */
493 case SDT_SYSLDT: /* system local descriptor table */
494 case SDT_SYS286BSY: /* system 286 TSS busy */
495 case SDT_SYSTASKGT: /* system task gate */
496 case SDT_SYS286IGT: /* system 286 interrupt gate */
497 case SDT_SYS286TGT: /* system 286 trap gate */
498 case SDT_SYSNULL2: /* undefined by Intel */
499 case SDT_SYS386TSS: /* system 386 TSS available */
500 case SDT_SYSNULL3: /* undefined by Intel */
501 case SDT_SYS386BSY: /* system 386 TSS busy */
502 case SDT_SYSNULL4: /* undefined by Intel */
503 case SDT_SYS386IGT: /* system 386 interrupt gate */
504 case SDT_SYS386TGT: /* system 386 trap gate */
505 case SDT_SYS286CGT: /* system 286 call gate */
506 case SDT_SYS386CGT: /* system 386 call gate */
507 /* I can't think of any reason to allow a user proc
508 * to create a segment of these types. They are
509 * for OS use only.
510 */
511 kmem_free(kernel_map, (vm_offset_t)descs, descs_size);
512 return (EACCES);
513 /*NOTREACHED*/
514
515 /* memory segment types */
516 case SDT_MEMEC: /* memory execute only conforming */
517 case SDT_MEMEAC: /* memory execute only accessed conforming */
518 case SDT_MEMERC: /* memory execute read conforming */
519 case SDT_MEMERAC: /* memory execute read accessed conforming */
520 /* Must be "present" if executable and conforming. */
521 if (dp->sd.sd_p == 0) {
522 kmem_free(kernel_map, (vm_offset_t)descs,
523 descs_size);
524 return (EACCES);
525 }
526 break;
527 case SDT_MEMRO: /* memory read only */
528 case SDT_MEMROA: /* memory read only accessed */
529 case SDT_MEMRW: /* memory read write */
530 case SDT_MEMRWA: /* memory read write accessed */
531 case SDT_MEMROD: /* memory read only expand dwn limit */
532 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
533 case SDT_MEMRWD: /* memory read write expand dwn limit */
534 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
535 case SDT_MEME: /* memory execute only */
536 case SDT_MEMEA: /* memory execute only accessed */
537 case SDT_MEMER: /* memory execute read */
538 case SDT_MEMERA: /* memory execute read accessed */
539 break;
540 default:
541 kmem_free(kernel_map, (vm_offset_t)descs, descs_size);
542 return(EINVAL);
543 /*NOTREACHED*/
544 }
545
546 /* Only user (ring-3) descriptors may be present. */
547 if ((dp->sd.sd_p != 0) && (dp->sd.sd_dpl != SEL_UPL)) {
548 kmem_free(kernel_map, (vm_offset_t)descs, descs_size);
549 return (EACCES);
550 }
551 }
552
553 if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
554 /* Allocate a free slot */
555 pldt = mdp->md_ldt;
556 if (pldt == NULL) {
557 error = i386_ldt_grow(td, NLDT+1);
558 if (error) {
559 kmem_free(kernel_map, (vm_offset_t)descs,
560 descs_size);
561 return (error);
562 }
563 pldt = mdp->md_ldt;
564 }
565 again:
566 mtx_lock_spin(&sched_lock);
567 /*
568 * start scanning a bit up to leave room for NVidia and
569 * Wine, which still user the "Blat" method of allocation.
570 */
571 dp = &((union descriptor *)(pldt->ldt_base))[NLDT];
572 for (i = NLDT; i < pldt->ldt_len; ++i) {
573 if (dp->sd.sd_type == SDT_SYSNULL)
574 break;
575 dp++;
576 }
577 if (i >= pldt->ldt_len) {
578 mtx_unlock_spin(&sched_lock);
579 error = i386_ldt_grow(td, pldt->ldt_len+1);
580 if (error) {
581 kmem_free(kernel_map, (vm_offset_t)descs,
582 descs_size);
583 return (error);
584 }
585 goto again;
586 }
587 uap->start = i;
588 error = i386_set_ldt_data(td, i, 1, descs);
589 mtx_unlock_spin(&sched_lock);
590 } else {
591 largest_ld = uap->start + uap->num;
592 error = i386_ldt_grow(td, largest_ld);
593 if (error == 0) {
594 mtx_lock_spin(&sched_lock);
595 error = i386_set_ldt_data(td, uap->start, uap->num,
596 descs);
597 mtx_unlock_spin(&sched_lock);
598 }
599 }
600 kmem_free(kernel_map, (vm_offset_t)descs, descs_size);
601 if (error == 0)
602 td->td_retval[0] = uap->start;
603 return (error);
604 }
605
606 static int
607 i386_set_ldt_data(struct thread *td, int start, int num,
608 union descriptor *descs)
609 {
610 struct mdproc *mdp = &td->td_proc->p_md;
611 struct proc_ldt *pldt = mdp->md_ldt;
612
613 mtx_assert(&sched_lock, MA_OWNED);
614
615 /* Fill in range */
616 bcopy(descs,
617 &((union descriptor *)(pldt->ldt_base))[start],
618 num * sizeof(union descriptor));
619 return (0);
620 }
621
622 static int
623 i386_ldt_grow(struct thread *td, int len)
624 {
625 struct mdproc *mdp = &td->td_proc->p_md;
626 struct proc_ldt *pldt;
627 caddr_t old_ldt_base;
628 int old_ldt_len;
629
630 if (len > MAX_LD)
631 return (ENOMEM);
632 if (len < NLDT+1)
633 len = NLDT+1;
634 pldt = mdp->md_ldt;
635 /* allocate user ldt */
636 if (!pldt || len > pldt->ldt_len) {
637 struct proc_ldt *new_ldt = user_ldt_alloc(mdp, len);
638 if (new_ldt == NULL)
639 return (ENOMEM);
640 pldt = mdp->md_ldt;
641 /* sched_lock was held by user_ldt_alloc */
642 if (pldt) {
643 if (new_ldt->ldt_len > pldt->ldt_len) {
644 old_ldt_base = pldt->ldt_base;
645 old_ldt_len = pldt->ldt_len;
646 pldt->ldt_sd = new_ldt->ldt_sd;
647 pldt->ldt_base = new_ldt->ldt_base;
648 pldt->ldt_len = new_ldt->ldt_len;
649 mtx_unlock_spin(&sched_lock);
650 kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
651 old_ldt_len * sizeof(union descriptor));
652 FREE(new_ldt, M_SUBPROC);
653 mtx_lock_spin(&sched_lock);
654 } else {
655 /*
656 * If other threads already did the work,
657 * do nothing
658 */
659 mtx_unlock_spin(&sched_lock);
660 kmem_free(kernel_map,
661 (vm_offset_t)new_ldt->ldt_base,
662 new_ldt->ldt_len * sizeof(union descriptor));
663 FREE(new_ldt, M_SUBPROC);
664 return (0);
665 }
666 } else {
667 mdp->md_ldt = pldt = new_ldt;
668 }
669 #ifdef SMP
670 mtx_unlock_spin(&sched_lock);
671 /* signal other cpus to reload ldt */
672 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
673 NULL, td);
674 #else
675 set_user_ldt(mdp);
676 mtx_unlock_spin(&sched_lock);
677 #endif
678 }
679 return (0);
680 }
Cache object: 0498bf344938de1e0fd373553a8e6d63
|