1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * Copyright (c) 1989, 1990 William Jolitz
6 * Copyright (c) 1994 John Dyson
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department, and William Jolitz.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
42 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include "opt_isa.h"
49 #include "opt_npx.h"
50 #include "opt_reset.h"
51 #include "opt_cpu.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/bio.h>
56 #include <sys/buf.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/lock.h>
60 #include <sys/malloc.h>
61 #include <sys/mbuf.h>
62 #include <sys/mutex.h>
63 #include <sys/proc.h>
64 #include <sys/sysent.h>
65 #include <sys/sf_buf.h>
66 #include <sys/smp.h>
67 #include <sys/sched.h>
68 #include <sys/sysctl.h>
69 #include <sys/unistd.h>
70 #include <sys/vnode.h>
71 #include <sys/vmmeter.h>
72
73 #include <machine/cpu.h>
74 #include <machine/cputypes.h>
75 #include <machine/md_var.h>
76 #include <machine/pcb.h>
77 #include <machine/pcb_ext.h>
78 #include <machine/smp.h>
79 #include <machine/vm86.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_extern.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_param.h>
87
88 _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
89 "__OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf.");
90
91 union savefpu *
92 get_pcb_user_save_td(struct thread *td)
93 {
94 vm_offset_t p;
95
96 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
97 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
98 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
99 return ((union savefpu *)p);
100 }
101
102 union savefpu *
103 get_pcb_user_save_pcb(struct pcb *pcb)
104 {
105 vm_offset_t p;
106
107 p = (vm_offset_t)(pcb + 1);
108 return ((union savefpu *)p);
109 }
110
111 struct pcb *
112 get_pcb_td(struct thread *td)
113 {
114 vm_offset_t p;
115
116 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
117 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
118 sizeof(struct pcb);
119 return ((struct pcb *)p);
120 }
121
122 void *
123 alloc_fpusave(int flags)
124 {
125 void *res;
126 struct savefpu_ymm *sf;
127
128 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
129 if (use_xsave) {
130 sf = (struct savefpu_ymm *)res;
131 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
132 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
133 }
134 return (res);
135 }
136
137 /*
138 * Common code shared between cpu_fork() and cpu_copy_thread() for
139 * initializing a thread.
140 */
141 static void
142 copy_thread(struct thread *td1, struct thread *td2)
143 {
144 struct pcb *pcb2;
145
146 pcb2 = td2->td_pcb;
147
148 /* Ensure that td1's pcb is up to date for user threads. */
149 if ((td2->td_pflags & TDP_KTHREAD) == 0) {
150 MPASS(td1 == curthread);
151 td1->td_pcb->pcb_gs = rgs();
152 critical_enter();
153 if (PCPU_GET(fpcurthread) == td1)
154 npxsave(td1->td_pcb->pcb_save);
155 critical_exit();
156 }
157
158 /* Copy td1's pcb */
159 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
160
161 /* Properly initialize pcb_save */
162 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
163
164 /* Kernel threads start with clean NPX and segment bases. */
165 if ((td2->td_pflags & TDP_KTHREAD) != 0) {
166 pcb2->pcb_gs = _udatasel;
167 set_fsbase(td2, 0);
168 set_gsbase(td2, 0);
169 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE |
170 PCB_KERNNPX | PCB_KERNNPX_THR);
171 } else {
172 MPASS((pcb2->pcb_flags & (PCB_KERNNPX | PCB_KERNNPX_THR)) == 0);
173 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
174 cpu_max_ext_state_size);
175 }
176
177 /*
178 * Set registers for trampoline to user mode. Leave space for the
179 * return address on stack. These are the kernel mode register values.
180 */
181 pcb2->pcb_edi = 0;
182 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
183 pcb2->pcb_ebp = 0;
184 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *); /* trampoline arg */
185 pcb2->pcb_ebx = (int)td2; /* trampoline arg */
186 pcb2->pcb_eip = (int)fork_trampoline + setidt_disp;
187 /*
188 * If we didn't copy the pcb, we'd need to do the following registers:
189 * pcb2->pcb_cr3: cloned above.
190 * pcb2->pcb_dr*: cloned above.
191 * pcb2->pcb_savefpu: cloned above.
192 * pcb2->pcb_flags: cloned above.
193 * pcb2->pcb_onfault: cloned above (always NULL here?).
194 * pcb2->pcb_gs: cloned above.
195 * pcb2->pcb_ext: cleared below.
196 */
197 pcb2->pcb_ext = NULL;
198
199 /* Setup to release spin count in fork_exit(). */
200 td2->td_md.md_spinlock_count = 1;
201 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
202 }
203
204 /*
205 * Finish a fork operation, with process p2 nearly set up.
206 * Copy and update the pcb, set up the stack so that the child
207 * ready to run and return to user mode.
208 */
209 void
210 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
211 {
212 struct proc *p1;
213 struct pcb *pcb2;
214 struct mdproc *mdp2;
215
216 p1 = td1->td_proc;
217 if ((flags & RFPROC) == 0) {
218 if ((flags & RFMEM) == 0) {
219 /* unshare user LDT */
220 struct mdproc *mdp1 = &p1->p_md;
221 struct proc_ldt *pldt, *pldt1;
222
223 mtx_lock_spin(&dt_lock);
224 if ((pldt1 = mdp1->md_ldt) != NULL &&
225 pldt1->ldt_refcnt > 1) {
226 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len);
227 if (pldt == NULL)
228 panic("could not copy LDT");
229 mdp1->md_ldt = pldt;
230 set_user_ldt(mdp1);
231 user_ldt_deref(pldt1);
232 } else
233 mtx_unlock_spin(&dt_lock);
234 }
235 return;
236 }
237
238 /* Point the pcb to the top of the stack */
239 pcb2 = get_pcb_td(td2);
240 td2->td_pcb = pcb2;
241
242 copy_thread(td1, td2);
243
244 /* Reset debug registers in the new process */
245 x86_clear_dbregs(pcb2);
246
247 /* Point mdproc and then copy over td1's contents */
248 mdp2 = &p2->p_md;
249 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
250
251 /*
252 * Copy the trap frame for the return to user mode as if from a
253 * syscall. This copies most of the user mode register values.
254 * The -VM86_STACK_SPACE (-16) is so we can expand the trapframe
255 * if we go to vm86.
256 */
257 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb -
258 VM86_STACK_SPACE) - 1;
259 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
260
261 /* Set child return values. */
262 p2->p_sysent->sv_set_fork_retval(td2);
263
264 /*
265 * If the parent process has the trap bit set (i.e. a debugger
266 * had single stepped the process to the system call), we need
267 * to clear the trap flag from the new frame.
268 */
269 td2->td_frame->tf_eflags &= ~PSL_T;
270
271 /* Set cr3 for the new process. */
272 pcb2->pcb_cr3 = pmap_get_cr3(vmspace_pmap(p2->p_vmspace));
273
274 /*
275 * XXX don't copy the i/o pages. this should probably be fixed.
276 */
277 pcb2->pcb_ext = NULL;
278
279 /* Copy the LDT, if necessary. */
280 mtx_lock_spin(&dt_lock);
281 if (mdp2->md_ldt != NULL) {
282 if (flags & RFMEM) {
283 mdp2->md_ldt->ldt_refcnt++;
284 } else {
285 mdp2->md_ldt = user_ldt_alloc(mdp2,
286 mdp2->md_ldt->ldt_len);
287 if (mdp2->md_ldt == NULL)
288 panic("could not copy LDT");
289 }
290 }
291 mtx_unlock_spin(&dt_lock);
292
293 /*
294 * Now, cpu_switch() can schedule the new process.
295 * pcb_esp is loaded pointing to the cpu_switch() stack frame
296 * containing the return address when exiting cpu_switch.
297 * This will normally be to fork_trampoline(), which will have
298 * %ebx loaded with the new proc's pointer. fork_trampoline()
299 * will set up a stack to call fork_return(p, frame); to complete
300 * the return to user-mode.
301 */
302 }
303
304 void
305 x86_set_fork_retval(struct thread *td)
306 {
307 struct trapframe * frame = td->td_frame;
308
309 frame->tf_eax = 0; /* Child returns zero */
310 frame->tf_eflags &= ~PSL_C; /* success */
311 frame->tf_edx = 1; /* System V emulation */
312 }
313
314 /*
315 * Intercept the return address from a freshly forked process that has NOT
316 * been scheduled yet.
317 *
318 * This is needed to make kernel threads stay in kernel mode.
319 */
320 void
321 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
322 {
323 /*
324 * Note that the trap frame follows the args, so the function
325 * is really called like this: func(arg, frame);
326 */
327 td->td_pcb->pcb_esi = (int) func; /* function */
328 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
329 }
330
331 void
332 cpu_exit(struct thread *td)
333 {
334
335 /*
336 * If this process has a custom LDT, release it. Reset pc->pcb_gs
337 * and %gs before we free it in case they refer to an LDT entry.
338 */
339 mtx_lock_spin(&dt_lock);
340 if (td->td_proc->p_md.md_ldt) {
341 td->td_pcb->pcb_gs = _udatasel;
342 load_gs(_udatasel);
343 user_ldt_free(td);
344 } else
345 mtx_unlock_spin(&dt_lock);
346 }
347
348 void
349 cpu_thread_exit(struct thread *td)
350 {
351
352 critical_enter();
353 if (td == PCPU_GET(fpcurthread))
354 npxdrop();
355 critical_exit();
356
357 /* Disable any hardware breakpoints. */
358 if (td->td_pcb->pcb_flags & PCB_DBREGS) {
359 reset_dbregs();
360 td->td_pcb->pcb_flags &= ~PCB_DBREGS;
361 }
362 }
363
364 void
365 cpu_thread_clean(struct thread *td)
366 {
367 struct pcb *pcb;
368
369 pcb = td->td_pcb;
370 if (pcb->pcb_ext != NULL) {
371 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
372 /*
373 * XXX do we need to move the TSS off the allocated pages
374 * before freeing them? (not done here)
375 */
376 pmap_trm_free(pcb->pcb_ext, ctob(IOPAGES + 1));
377 pcb->pcb_ext = NULL;
378 }
379 }
380
381 void
382 cpu_thread_swapin(struct thread *td)
383 {
384 }
385
386 void
387 cpu_thread_swapout(struct thread *td)
388 {
389 }
390
391 void
392 cpu_thread_alloc(struct thread *td)
393 {
394 struct pcb *pcb;
395 struct xstate_hdr *xhdr;
396
397 td->td_pcb = pcb = get_pcb_td(td);
398 td->td_frame = (struct trapframe *)((caddr_t)pcb -
399 VM86_STACK_SPACE) - 1;
400 pcb->pcb_ext = NULL;
401 pcb->pcb_save = get_pcb_user_save_pcb(pcb);
402 if (use_xsave) {
403 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
404 bzero(xhdr, sizeof(*xhdr));
405 xhdr->xstate_bv = xsave_mask;
406 }
407 }
408
409 void
410 cpu_thread_free(struct thread *td)
411 {
412
413 cpu_thread_clean(td);
414 }
415
416 bool
417 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
418 {
419
420 return (true);
421 }
422
423 int
424 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
425 int com __unused, void *data __unused)
426 {
427
428 return (EINVAL);
429 }
430
431 void
432 cpu_set_syscall_retval(struct thread *td, int error)
433 {
434
435 switch (error) {
436 case 0:
437 td->td_frame->tf_eax = td->td_retval[0];
438 td->td_frame->tf_edx = td->td_retval[1];
439 td->td_frame->tf_eflags &= ~PSL_C;
440 break;
441
442 case ERESTART:
443 /*
444 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int
445 * 0x80 is 2 bytes. We saved this in tf_err.
446 */
447 td->td_frame->tf_eip -= td->td_frame->tf_err;
448 break;
449
450 case EJUSTRETURN:
451 break;
452
453 default:
454 td->td_frame->tf_eax = error;
455 td->td_frame->tf_eflags |= PSL_C;
456 break;
457 }
458 }
459
460 /*
461 * Initialize machine state, mostly pcb and trap frame for a new
462 * thread, about to return to userspace. Put enough state in the new
463 * thread's PCB to get it to go back to the fork_return(), which
464 * finalizes the thread state and handles peculiarities of the first
465 * return to userspace for the new thread.
466 */
467 void
468 cpu_copy_thread(struct thread *td, struct thread *td0)
469 {
470 copy_thread(td0, td);
471
472 /*
473 * Copy user general-purpose registers.
474 *
475 * Some of these registers are rewritten by cpu_set_upcall()
476 * and linux_set_upcall().
477 */
478 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
479
480 /* If the current thread has the trap bit set (i.e. a debugger had
481 * single stepped the process to the system call), we need to clear
482 * the trap flag from the new frame. Otherwise, the new thread will
483 * receive a (likely unexpected) SIGTRAP when it executes the first
484 * instruction after returning to userland.
485 */
486 td->td_frame->tf_eflags &= ~PSL_T;
487 }
488
489 /*
490 * Set that machine state for performing an upcall that starts
491 * the entry function with the given argument.
492 */
493 void
494 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
495 stack_t *stack)
496 {
497
498 /*
499 * Do any extra cleaning that needs to be done.
500 * The thread may have optional components
501 * that are not present in a fresh thread.
502 * This may be a recycled thread so make it look
503 * as though it's newly allocated.
504 */
505 cpu_thread_clean(td);
506
507 /*
508 * Set the trap frame to point at the beginning of the entry
509 * function.
510 */
511 td->td_frame->tf_ebp = 0;
512 td->td_frame->tf_esp =
513 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
514 td->td_frame->tf_eip = (int)entry;
515
516 /* Return address sentinel value to stop stack unwinding. */
517 suword((void *)td->td_frame->tf_esp, 0);
518
519 /* Pass the argument to the entry point. */
520 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
521 (int)arg);
522 }
523
524 int
525 cpu_set_user_tls(struct thread *td, void *tls_base)
526 {
527 struct segment_descriptor sd;
528 uint32_t base;
529
530 /*
531 * Construct a descriptor and store it in the pcb for
532 * the next context switch. Also store it in the gdt
533 * so that the load of tf_fs into %fs will activate it
534 * at return to userland.
535 */
536 base = (uint32_t)tls_base;
537 sd.sd_lobase = base & 0xffffff;
538 sd.sd_hibase = (base >> 24) & 0xff;
539 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
540 sd.sd_hilimit = 0xf;
541 sd.sd_type = SDT_MEMRWA;
542 sd.sd_dpl = SEL_UPL;
543 sd.sd_p = 1;
544 sd.sd_xx = 0;
545 sd.sd_def32 = 1;
546 sd.sd_gran = 1;
547 critical_enter();
548 /* set %gs */
549 td->td_pcb->pcb_gsd = sd;
550 if (td == curthread) {
551 PCPU_GET(fsgs_gdt)[1] = sd;
552 load_gs(GSEL(GUGS_SEL, SEL_UPL));
553 }
554 critical_exit();
555 return (0);
556 }
557
558 /*
559 * Convert kernel VA to physical address
560 */
561 vm_paddr_t
562 kvtop(void *addr)
563 {
564 vm_paddr_t pa;
565
566 pa = pmap_kextract((vm_offset_t)addr);
567 if (pa == 0)
568 panic("kvtop: zero page frame");
569 return (pa);
570 }
571
572 /*
573 * Get an sf_buf from the freelist. May block if none are available.
574 */
575 void
576 sf_buf_map(struct sf_buf *sf, int flags)
577 {
578
579 pmap_sf_buf_map(sf);
580 #ifdef SMP
581 sf_buf_shootdown(sf, flags);
582 #endif
583 }
584
585 #ifdef SMP
586 static void
587 sf_buf_shootdown_curcpu_cb(pmap_t pmap __unused,
588 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused)
589 {
590 }
591
592 void
593 sf_buf_shootdown(struct sf_buf *sf, int flags)
594 {
595 cpuset_t other_cpus;
596 u_int cpuid;
597
598 sched_pin();
599 cpuid = PCPU_GET(cpuid);
600 if (!CPU_ISSET(cpuid, &sf->cpumask)) {
601 CPU_SET(cpuid, &sf->cpumask);
602 invlpg(sf->kva);
603 }
604 if ((flags & SFB_CPUPRIVATE) == 0) {
605 other_cpus = all_cpus;
606 CPU_CLR(cpuid, &other_cpus);
607 CPU_ANDNOT(&other_cpus, &other_cpus, &sf->cpumask);
608 if (!CPU_EMPTY(&other_cpus)) {
609 CPU_OR(&sf->cpumask, &sf->cpumask, &other_cpus);
610 smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap,
611 sf_buf_shootdown_curcpu_cb);
612 }
613 }
614 sched_unpin();
615 }
616 #endif
617
618 /*
619 * MD part of sf_buf_free().
620 */
621 int
622 sf_buf_unmap(struct sf_buf *sf)
623 {
624
625 return (0);
626 }
627
628 static void
629 sf_buf_invalidate(struct sf_buf *sf)
630 {
631 vm_page_t m = sf->m;
632
633 /*
634 * Use pmap_qenter to update the pte for
635 * existing mapping, in particular, the PAT
636 * settings are recalculated.
637 */
638 pmap_qenter(sf->kva, &m, 1);
639 pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE);
640 }
641
642 /*
643 * Invalidate the cache lines that may belong to the page, if
644 * (possibly old) mapping of the page by sf buffer exists. Returns
645 * TRUE when mapping was found and cache invalidated.
646 */
647 boolean_t
648 sf_buf_invalidate_cache(vm_page_t m)
649 {
650
651 return (sf_buf_process_page(m, sf_buf_invalidate));
652 }
Cache object: c70d5a7bc2ad599989292e96582f845e
|