1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/11.2/sys/amd64/amd64/sys_machdep.c 331722 2018-03-29 02:50:57Z eadler $");
35
36 #include "opt_capsicum.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/capsicum.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/priv.h>
46 #include <sys/proc.h>
47 #include <sys/smp.h>
48 #include <sys/sysproto.h>
49 #include <sys/uio.h>
50
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_kern.h> /* for kernel_map */
54 #include <vm/vm_extern.h>
55
56 #include <machine/frame.h>
57 #include <machine/md_var.h>
58 #include <machine/pcb.h>
59 #include <machine/specialreg.h>
60 #include <machine/sysarch.h>
61 #include <machine/tss.h>
62 #include <machine/vmparam.h>
63
64 #include <security/audit/audit.h>
65
66 static void user_ldt_deref(struct proc_ldt *pldt);
67 static void user_ldt_derefl(struct proc_ldt *pldt);
68
69 #define MAX_LD 8192
70
71 int max_ldt_segment = 512;
72 SYSCTL_INT(_machdep, OID_AUTO, max_ldt_segment, CTLFLAG_RDTUN,
73 &max_ldt_segment, 0,
74 "Maximum number of allowed LDT segments in the single address space");
75
76 static void
77 max_ldt_segment_init(void *arg __unused)
78 {
79
80 if (max_ldt_segment <= 0)
81 max_ldt_segment = 1;
82 if (max_ldt_segment > MAX_LD)
83 max_ldt_segment = MAX_LD;
84 }
85 SYSINIT(maxldt, SI_SUB_VM_CONF, SI_ORDER_ANY, max_ldt_segment_init, NULL);
86
87 #ifndef _SYS_SYSPROTO_H_
88 struct sysarch_args {
89 int op;
90 char *parms;
91 };
92 #endif
93
94 int
95 sysarch_ldt(struct thread *td, struct sysarch_args *uap, int uap_space)
96 {
97 struct i386_ldt_args *largs, la;
98 struct user_segment_descriptor *lp;
99 int error = 0;
100
101 /*
102 * XXXKIB check that the BSM generation code knows to encode
103 * the op argument.
104 */
105 AUDIT_ARG_CMD(uap->op);
106 if (uap_space == UIO_USERSPACE) {
107 error = copyin(uap->parms, &la, sizeof(struct i386_ldt_args));
108 if (error != 0)
109 return (error);
110 largs = &la;
111 } else
112 largs = (struct i386_ldt_args *)uap->parms;
113
114 switch (uap->op) {
115 case I386_GET_LDT:
116 error = amd64_get_ldt(td, largs);
117 break;
118 case I386_SET_LDT:
119 if (largs->descs != NULL && largs->num > max_ldt_segment)
120 return (EINVAL);
121 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
122 if (largs->descs != NULL) {
123 lp = malloc(largs->num * sizeof(struct
124 user_segment_descriptor), M_TEMP, M_WAITOK);
125 error = copyin(largs->descs, lp, largs->num *
126 sizeof(struct user_segment_descriptor));
127 if (error == 0)
128 error = amd64_set_ldt(td, largs, lp);
129 free(lp, M_TEMP);
130 } else {
131 error = amd64_set_ldt(td, largs, NULL);
132 }
133 break;
134 }
135 return (error);
136 }
137
138 void
139 update_gdt_gsbase(struct thread *td, uint32_t base)
140 {
141 struct user_segment_descriptor *sd;
142
143 if (td != curthread)
144 return;
145 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
146 critical_enter();
147 sd = PCPU_GET(gs32p);
148 sd->sd_lobase = base & 0xffffff;
149 sd->sd_hibase = (base >> 24) & 0xff;
150 critical_exit();
151 }
152
153 void
154 update_gdt_fsbase(struct thread *td, uint32_t base)
155 {
156 struct user_segment_descriptor *sd;
157
158 if (td != curthread)
159 return;
160 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
161 critical_enter();
162 sd = PCPU_GET(fs32p);
163 sd->sd_lobase = base & 0xffffff;
164 sd->sd_hibase = (base >> 24) & 0xff;
165 critical_exit();
166 }
167
168 int
169 sysarch(struct thread *td, struct sysarch_args *uap)
170 {
171 int error = 0;
172 struct pcb *pcb = curthread->td_pcb;
173 uint32_t i386base;
174 uint64_t a64base;
175 struct i386_ioperm_args iargs;
176 struct i386_get_xfpustate i386xfpu;
177 struct amd64_get_xfpustate a64xfpu;
178
179 #ifdef CAPABILITY_MODE
180 /*
181 * When adding new operations, add a new case statement here to
182 * explicitly indicate whether or not the operation is safe to
183 * perform in capability mode.
184 */
185 if (IN_CAPABILITY_MODE(td)) {
186 switch (uap->op) {
187 case I386_GET_LDT:
188 case I386_SET_LDT:
189 case I386_GET_IOPERM:
190 case I386_GET_FSBASE:
191 case I386_SET_FSBASE:
192 case I386_GET_GSBASE:
193 case I386_SET_GSBASE:
194 case I386_GET_XFPUSTATE:
195 case AMD64_GET_FSBASE:
196 case AMD64_SET_FSBASE:
197 case AMD64_GET_GSBASE:
198 case AMD64_SET_GSBASE:
199 case AMD64_GET_XFPUSTATE:
200 break;
201
202 case I386_SET_IOPERM:
203 default:
204 #ifdef KTRACE
205 if (KTRPOINT(td, KTR_CAPFAIL))
206 ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL);
207 #endif
208 return (ECAPMODE);
209 }
210 }
211 #endif
212
213 if (uap->op == I386_GET_LDT || uap->op == I386_SET_LDT)
214 return (sysarch_ldt(td, uap, UIO_USERSPACE));
215 /*
216 * XXXKIB check that the BSM generation code knows to encode
217 * the op argument.
218 */
219 AUDIT_ARG_CMD(uap->op);
220 switch (uap->op) {
221 case I386_GET_IOPERM:
222 case I386_SET_IOPERM:
223 if ((error = copyin(uap->parms, &iargs,
224 sizeof(struct i386_ioperm_args))) != 0)
225 return (error);
226 break;
227 case I386_GET_XFPUSTATE:
228 if ((error = copyin(uap->parms, &i386xfpu,
229 sizeof(struct i386_get_xfpustate))) != 0)
230 return (error);
231 a64xfpu.addr = (void *)(uintptr_t)i386xfpu.addr;
232 a64xfpu.len = i386xfpu.len;
233 break;
234 case AMD64_GET_XFPUSTATE:
235 if ((error = copyin(uap->parms, &a64xfpu,
236 sizeof(struct amd64_get_xfpustate))) != 0)
237 return (error);
238 break;
239 default:
240 break;
241 }
242
243 switch (uap->op) {
244 case I386_GET_IOPERM:
245 error = amd64_get_ioperm(td, &iargs);
246 if (error == 0)
247 error = copyout(&iargs, uap->parms,
248 sizeof(struct i386_ioperm_args));
249 break;
250 case I386_SET_IOPERM:
251 error = amd64_set_ioperm(td, &iargs);
252 break;
253 case I386_GET_FSBASE:
254 update_pcb_bases(pcb);
255 i386base = pcb->pcb_fsbase;
256 error = copyout(&i386base, uap->parms, sizeof(i386base));
257 break;
258 case I386_SET_FSBASE:
259 error = copyin(uap->parms, &i386base, sizeof(i386base));
260 if (!error) {
261 set_pcb_flags(pcb, PCB_FULL_IRET);
262 pcb->pcb_fsbase = i386base;
263 td->td_frame->tf_fs = _ufssel;
264 update_gdt_fsbase(td, i386base);
265 }
266 break;
267 case I386_GET_GSBASE:
268 update_pcb_bases(pcb);
269 i386base = pcb->pcb_gsbase;
270 error = copyout(&i386base, uap->parms, sizeof(i386base));
271 break;
272 case I386_SET_GSBASE:
273 error = copyin(uap->parms, &i386base, sizeof(i386base));
274 if (!error) {
275 set_pcb_flags(pcb, PCB_FULL_IRET);
276 pcb->pcb_gsbase = i386base;
277 td->td_frame->tf_gs = _ugssel;
278 update_gdt_gsbase(td, i386base);
279 }
280 break;
281 case AMD64_GET_FSBASE:
282 update_pcb_bases(pcb);
283 error = copyout(&pcb->pcb_fsbase, uap->parms,
284 sizeof(pcb->pcb_fsbase));
285 break;
286
287 case AMD64_SET_FSBASE:
288 error = copyin(uap->parms, &a64base, sizeof(a64base));
289 if (!error) {
290 if (a64base < VM_MAXUSER_ADDRESS) {
291 set_pcb_flags(pcb, PCB_FULL_IRET);
292 pcb->pcb_fsbase = a64base;
293 td->td_frame->tf_fs = _ufssel;
294 } else
295 error = EINVAL;
296 }
297 break;
298
299 case AMD64_GET_GSBASE:
300 update_pcb_bases(pcb);
301 error = copyout(&pcb->pcb_gsbase, uap->parms,
302 sizeof(pcb->pcb_gsbase));
303 break;
304
305 case AMD64_SET_GSBASE:
306 error = copyin(uap->parms, &a64base, sizeof(a64base));
307 if (!error) {
308 if (a64base < VM_MAXUSER_ADDRESS) {
309 set_pcb_flags(pcb, PCB_FULL_IRET);
310 pcb->pcb_gsbase = a64base;
311 td->td_frame->tf_gs = _ugssel;
312 } else
313 error = EINVAL;
314 }
315 break;
316
317 case I386_GET_XFPUSTATE:
318 case AMD64_GET_XFPUSTATE:
319 if (a64xfpu.len > cpu_max_ext_state_size -
320 sizeof(struct savefpu))
321 return (EINVAL);
322 fpugetregs(td);
323 error = copyout((char *)(get_pcb_user_save_td(td) + 1),
324 a64xfpu.addr, a64xfpu.len);
325 break;
326
327 default:
328 error = EINVAL;
329 break;
330 }
331 return (error);
332 }
333
334 int
335 amd64_set_ioperm(td, uap)
336 struct thread *td;
337 struct i386_ioperm_args *uap;
338 {
339 char *iomap;
340 struct amd64tss *tssp;
341 struct system_segment_descriptor *tss_sd;
342 struct pcb *pcb;
343 u_int i;
344 int error;
345
346 if ((error = priv_check(td, PRIV_IO)) != 0)
347 return (error);
348 if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
349 return (error);
350 if (uap->start > uap->start + uap->length ||
351 uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
352 return (EINVAL);
353
354 /*
355 * XXX
356 * While this is restricted to root, we should probably figure out
357 * whether any other driver is using this i/o address, as so not to
358 * cause confusion. This probably requires a global 'usage registry'.
359 */
360 pcb = td->td_pcb;
361 if (pcb->pcb_tssp == NULL) {
362 tssp = (struct amd64tss *)kmem_malloc(kernel_arena,
363 ctob(IOPAGES + 1), M_WAITOK);
364 pmap_pti_add_kva((vm_offset_t)tssp, (vm_offset_t)tssp +
365 ctob(IOPAGES + 1), false);
366 iomap = (char *)&tssp[1];
367 memset(iomap, 0xff, IOPERM_BITMAP_SIZE);
368 critical_enter();
369 /* Takes care of tss_rsp0. */
370 memcpy(tssp, &common_tss[PCPU_GET(cpuid)],
371 sizeof(struct amd64tss));
372 tssp->tss_iobase = sizeof(*tssp);
373 pcb->pcb_tssp = tssp;
374 tss_sd = PCPU_GET(tss);
375 tss_sd->sd_lobase = (u_long)tssp & 0xffffff;
376 tss_sd->sd_hibase = ((u_long)tssp >> 24) & 0xfffffffffful;
377 tss_sd->sd_type = SDT_SYSTSS;
378 ltr(GSEL(GPROC0_SEL, SEL_KPL));
379 PCPU_SET(tssp, tssp);
380 critical_exit();
381 } else
382 iomap = (char *)&pcb->pcb_tssp[1];
383 for (i = uap->start; i < uap->start + uap->length; i++) {
384 if (uap->enable)
385 iomap[i >> 3] &= ~(1 << (i & 7));
386 else
387 iomap[i >> 3] |= (1 << (i & 7));
388 }
389 return (error);
390 }
391
392 int
393 amd64_get_ioperm(td, uap)
394 struct thread *td;
395 struct i386_ioperm_args *uap;
396 {
397 int i, state;
398 char *iomap;
399
400 if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
401 return (EINVAL);
402 if (td->td_pcb->pcb_tssp == NULL) {
403 uap->length = 0;
404 goto done;
405 }
406
407 iomap = (char *)&td->td_pcb->pcb_tssp[1];
408
409 i = uap->start;
410 state = (iomap[i >> 3] >> (i & 7)) & 1;
411 uap->enable = !state;
412 uap->length = 1;
413
414 for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
415 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
416 break;
417 uap->length++;
418 }
419
420 done:
421 return (0);
422 }
423
424 /*
425 * Update the GDT entry pointing to the LDT to point to the LDT of the
426 * current process.
427 */
428 static void
429 set_user_ldt(struct mdproc *mdp)
430 {
431
432 *PCPU_GET(ldt) = mdp->md_ldt_sd;
433 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
434 }
435
436 static void
437 set_user_ldt_rv(struct vmspace *vmsp)
438 {
439 struct thread *td;
440
441 td = curthread;
442 if (vmsp != td->td_proc->p_vmspace)
443 return;
444
445 set_user_ldt(&td->td_proc->p_md);
446 }
447
448 struct proc_ldt *
449 user_ldt_alloc(struct proc *p, int force)
450 {
451 struct proc_ldt *pldt, *new_ldt;
452 struct mdproc *mdp;
453 struct soft_segment_descriptor sldt;
454 vm_offset_t sva;
455 vm_size_t sz;
456
457 mtx_assert(&dt_lock, MA_OWNED);
458 mdp = &p->p_md;
459 if (!force && mdp->md_ldt != NULL)
460 return (mdp->md_ldt);
461 mtx_unlock(&dt_lock);
462 new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
463 sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
464 sva = kmem_malloc(kernel_arena, sz, M_WAITOK | M_ZERO);
465 new_ldt->ldt_base = (caddr_t)sva;
466 pmap_pti_add_kva(sva, sva + sz, false);
467 new_ldt->ldt_refcnt = 1;
468 sldt.ssd_base = sva;
469 sldt.ssd_limit = sz - 1;
470 sldt.ssd_type = SDT_SYSLDT;
471 sldt.ssd_dpl = SEL_KPL;
472 sldt.ssd_p = 1;
473 sldt.ssd_long = 0;
474 sldt.ssd_def32 = 0;
475 sldt.ssd_gran = 0;
476 mtx_lock(&dt_lock);
477 pldt = mdp->md_ldt;
478 if (pldt != NULL && !force) {
479 pmap_pti_remove_kva(sva, sva + sz);
480 kmem_free(kernel_arena, sva, sz);
481 free(new_ldt, M_SUBPROC);
482 return (pldt);
483 }
484
485 if (pldt != NULL) {
486 bcopy(pldt->ldt_base, new_ldt->ldt_base, max_ldt_segment *
487 sizeof(struct user_segment_descriptor));
488 user_ldt_derefl(pldt);
489 }
490 critical_enter();
491 ssdtosyssd(&sldt, &p->p_md.md_ldt_sd);
492 atomic_thread_fence_rel();
493 mdp->md_ldt = new_ldt;
494 critical_exit();
495 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv, NULL,
496 p->p_vmspace);
497
498 return (mdp->md_ldt);
499 }
500
501 void
502 user_ldt_free(struct thread *td)
503 {
504 struct proc *p = td->td_proc;
505 struct mdproc *mdp = &p->p_md;
506 struct proc_ldt *pldt;
507
508 mtx_assert(&dt_lock, MA_OWNED);
509 if ((pldt = mdp->md_ldt) == NULL) {
510 mtx_unlock(&dt_lock);
511 return;
512 }
513
514 critical_enter();
515 mdp->md_ldt = NULL;
516 atomic_thread_fence_rel();
517 bzero(&mdp->md_ldt_sd, sizeof(mdp->md_ldt_sd));
518 if (td == curthread)
519 lldt(GSEL(GNULL_SEL, SEL_KPL));
520 critical_exit();
521 user_ldt_deref(pldt);
522 }
523
524 static void
525 user_ldt_derefl(struct proc_ldt *pldt)
526 {
527 vm_offset_t sva;
528 vm_size_t sz;
529
530 if (--pldt->ldt_refcnt == 0) {
531 sva = (vm_offset_t)pldt->ldt_base;
532 sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
533 pmap_pti_remove_kva(sva, sva + sz);
534 kmem_free(kernel_arena, sva, sz);
535 free(pldt, M_SUBPROC);
536 }
537 }
538
539 static void
540 user_ldt_deref(struct proc_ldt *pldt)
541 {
542
543 mtx_assert(&dt_lock, MA_OWNED);
544 user_ldt_derefl(pldt);
545 mtx_unlock(&dt_lock);
546 }
547
548 /*
549 * Note for the authors of compat layers (linux, etc): copyout() in
550 * the function below is not a problem since it presents data in
551 * arch-specific format (i.e. i386-specific in this case), not in
552 * the OS-specific one.
553 */
554 int
555 amd64_get_ldt(struct thread *td, struct i386_ldt_args *uap)
556 {
557 struct proc_ldt *pldt;
558 struct user_segment_descriptor *lp;
559 uint64_t *data;
560 u_int i, num;
561 int error;
562
563 #ifdef DEBUG
564 printf("amd64_get_ldt: start=%u num=%u descs=%p\n",
565 uap->start, uap->num, (void *)uap->descs);
566 #endif
567
568 pldt = td->td_proc->p_md.md_ldt;
569 if (pldt == NULL || uap->start >= max_ldt_segment || uap->num == 0) {
570 td->td_retval[0] = 0;
571 return (0);
572 }
573 num = min(uap->num, max_ldt_segment - uap->start);
574 lp = &((struct user_segment_descriptor *)(pldt->ldt_base))[uap->start];
575 data = malloc(num * sizeof(struct user_segment_descriptor), M_TEMP,
576 M_WAITOK);
577 mtx_lock(&dt_lock);
578 for (i = 0; i < num; i++)
579 data[i] = ((volatile uint64_t *)lp)[i];
580 mtx_unlock(&dt_lock);
581 error = copyout(data, uap->descs, num *
582 sizeof(struct user_segment_descriptor));
583 free(data, M_TEMP);
584 if (error == 0)
585 td->td_retval[0] = num;
586 return (error);
587 }
588
589 int
590 amd64_set_ldt(struct thread *td, struct i386_ldt_args *uap,
591 struct user_segment_descriptor *descs)
592 {
593 struct mdproc *mdp;
594 struct proc_ldt *pldt;
595 struct user_segment_descriptor *dp;
596 struct proc *p;
597 u_int largest_ld, i;
598 int error;
599
600 #ifdef DEBUG
601 printf("amd64_set_ldt: start=%u num=%u descs=%p\n",
602 uap->start, uap->num, (void *)uap->descs);
603 #endif
604 mdp = &td->td_proc->p_md;
605 error = 0;
606
607 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
608 p = td->td_proc;
609 if (descs == NULL) {
610 /* Free descriptors */
611 if (uap->start == 0 && uap->num == 0)
612 uap->num = max_ldt_segment;
613 if (uap->num == 0)
614 return (EINVAL);
615 if ((pldt = mdp->md_ldt) == NULL ||
616 uap->start >= max_ldt_segment)
617 return (0);
618 largest_ld = uap->start + uap->num;
619 if (largest_ld > max_ldt_segment)
620 largest_ld = max_ldt_segment;
621 if (largest_ld < uap->start)
622 return (EINVAL);
623 mtx_lock(&dt_lock);
624 for (i = uap->start; i < largest_ld; i++)
625 ((volatile uint64_t *)(pldt->ldt_base))[i] = 0;
626 mtx_unlock(&dt_lock);
627 return (0);
628 }
629
630 if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
631 /* verify range of descriptors to modify */
632 largest_ld = uap->start + uap->num;
633 if (uap->start >= max_ldt_segment ||
634 largest_ld > max_ldt_segment ||
635 largest_ld < uap->start)
636 return (EINVAL);
637 }
638
639 /* Check descriptors for access violations */
640 for (i = 0; i < uap->num; i++) {
641 dp = &descs[i];
642
643 switch (dp->sd_type) {
644 case SDT_SYSNULL: /* system null */
645 dp->sd_p = 0;
646 break;
647 case SDT_SYS286TSS:
648 case SDT_SYSLDT:
649 case SDT_SYS286BSY:
650 case SDT_SYS286CGT:
651 case SDT_SYSTASKGT:
652 case SDT_SYS286IGT:
653 case SDT_SYS286TGT:
654 case SDT_SYSNULL2:
655 case SDT_SYSTSS:
656 case SDT_SYSNULL3:
657 case SDT_SYSBSY:
658 case SDT_SYSCGT:
659 case SDT_SYSNULL4:
660 case SDT_SYSIGT:
661 case SDT_SYSTGT:
662 return (EACCES);
663
664 /* memory segment types */
665 case SDT_MEMEC: /* memory execute only conforming */
666 case SDT_MEMEAC: /* memory execute only accessed conforming */
667 case SDT_MEMERC: /* memory execute read conforming */
668 case SDT_MEMERAC: /* memory execute read accessed conforming */
669 /* Must be "present" if executable and conforming. */
670 if (dp->sd_p == 0)
671 return (EACCES);
672 break;
673 case SDT_MEMRO: /* memory read only */
674 case SDT_MEMROA: /* memory read only accessed */
675 case SDT_MEMRW: /* memory read write */
676 case SDT_MEMRWA: /* memory read write accessed */
677 case SDT_MEMROD: /* memory read only expand dwn limit */
678 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
679 case SDT_MEMRWD: /* memory read write expand dwn limit */
680 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
681 case SDT_MEME: /* memory execute only */
682 case SDT_MEMEA: /* memory execute only accessed */
683 case SDT_MEMER: /* memory execute read */
684 case SDT_MEMERA: /* memory execute read accessed */
685 break;
686 default:
687 return(EINVAL);
688 }
689
690 /* Only user (ring-3) descriptors may be present. */
691 if ((dp->sd_p != 0) && (dp->sd_dpl != SEL_UPL))
692 return (EACCES);
693 }
694
695 if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
696 /* Allocate a free slot */
697 mtx_lock(&dt_lock);
698 pldt = user_ldt_alloc(p, 0);
699 if (pldt == NULL) {
700 mtx_unlock(&dt_lock);
701 return (ENOMEM);
702 }
703
704 /*
705 * start scanning a bit up to leave room for NVidia and
706 * Wine, which still user the "Blat" method of allocation.
707 */
708 i = 16;
709 dp = &((struct user_segment_descriptor *)(pldt->ldt_base))[i];
710 for (; i < max_ldt_segment; ++i, ++dp) {
711 if (dp->sd_type == SDT_SYSNULL)
712 break;
713 }
714 if (i >= max_ldt_segment) {
715 mtx_unlock(&dt_lock);
716 return (ENOSPC);
717 }
718 uap->start = i;
719 error = amd64_set_ldt_data(td, i, 1, descs);
720 mtx_unlock(&dt_lock);
721 } else {
722 largest_ld = uap->start + uap->num;
723 if (largest_ld > max_ldt_segment)
724 return (EINVAL);
725 mtx_lock(&dt_lock);
726 if (user_ldt_alloc(p, 0) != NULL) {
727 error = amd64_set_ldt_data(td, uap->start, uap->num,
728 descs);
729 }
730 mtx_unlock(&dt_lock);
731 }
732 if (error == 0)
733 td->td_retval[0] = uap->start;
734 return (error);
735 }
736
737 int
738 amd64_set_ldt_data(struct thread *td, int start, int num,
739 struct user_segment_descriptor *descs)
740 {
741 struct mdproc *mdp;
742 struct proc_ldt *pldt;
743 volatile uint64_t *dst, *src;
744 int i;
745
746 mtx_assert(&dt_lock, MA_OWNED);
747
748 mdp = &td->td_proc->p_md;
749 pldt = mdp->md_ldt;
750 dst = (volatile uint64_t *)(pldt->ldt_base);
751 src = (volatile uint64_t *)descs;
752 for (i = 0; i < num; i++)
753 dst[start + i] = src[i];
754 return (0);
755 }
Cache object: d287395bbd1b1a5d0405af1959f8e5b0
|