1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * Copyright (c) 1989, 1990 William Jolitz
6 * Copyright (c) 1994 John Dyson
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department, and William Jolitz.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
42 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include "opt_isa.h"
49 #include "opt_npx.h"
50 #include "opt_reset.h"
51 #include "opt_cpu.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/bio.h>
56 #include <sys/buf.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/lock.h>
60 #include <sys/malloc.h>
61 #include <sys/mbuf.h>
62 #include <sys/mutex.h>
63 #include <sys/proc.h>
64 #include <sys/sysent.h>
65 #include <sys/sf_buf.h>
66 #include <sys/smp.h>
67 #include <sys/sched.h>
68 #include <sys/sysctl.h>
69 #include <sys/unistd.h>
70 #include <sys/vnode.h>
71 #include <sys/vmmeter.h>
72
73 #include <machine/cpu.h>
74 #include <machine/cputypes.h>
75 #include <machine/md_var.h>
76 #include <machine/pcb.h>
77 #include <machine/pcb_ext.h>
78 #include <machine/smp.h>
79 #include <machine/vm86.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_extern.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_param.h>
87
88 _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
89 "__OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf.");
90
91 union savefpu *
92 get_pcb_user_save_td(struct thread *td)
93 {
94 vm_offset_t p;
95
96 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
97 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
98 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
99 return ((union savefpu *)p);
100 }
101
102 union savefpu *
103 get_pcb_user_save_pcb(struct pcb *pcb)
104 {
105 vm_offset_t p;
106
107 p = (vm_offset_t)(pcb + 1);
108 return ((union savefpu *)p);
109 }
110
111 struct pcb *
112 get_pcb_td(struct thread *td)
113 {
114 vm_offset_t p;
115
116 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
117 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
118 sizeof(struct pcb);
119 return ((struct pcb *)p);
120 }
121
122 void *
123 alloc_fpusave(int flags)
124 {
125 void *res;
126 struct savefpu_ymm *sf;
127
128 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
129 if (use_xsave) {
130 sf = (struct savefpu_ymm *)res;
131 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
132 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
133 }
134 return (res);
135 }
136
137 /*
138 * Common code shared between cpu_fork() and cpu_copy_thread() for
139 * initializing a thread.
140 */
141 static void
142 copy_thread(struct thread *td1, struct thread *td2)
143 {
144 struct pcb *pcb2;
145
146 pcb2 = td2->td_pcb;
147
148 /* Ensure that td1's pcb is up to date for user threads. */
149 if ((td2->td_pflags & TDP_KTHREAD) == 0) {
150 MPASS(td1 == curthread);
151 td1->td_pcb->pcb_gs = rgs();
152 critical_enter();
153 if (PCPU_GET(fpcurthread) == td1)
154 npxsave(td1->td_pcb->pcb_save);
155 critical_exit();
156 }
157
158 /* Copy td1's pcb */
159 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
160
161 /* Properly initialize pcb_save */
162 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
163
164 /* Kernel threads start with clean NPX and segment bases. */
165 if ((td2->td_pflags & TDP_KTHREAD) != 0) {
166 pcb2->pcb_gs = _udatasel;
167 set_fsbase(td2, 0);
168 set_gsbase(td2, 0);
169 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE |
170 PCB_KERNNPX | PCB_KERNNPX_THR);
171 } else {
172 MPASS((pcb2->pcb_flags & (PCB_KERNNPX | PCB_KERNNPX_THR)) == 0);
173 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
174 cpu_max_ext_state_size);
175 }
176
177 /*
178 * Set registers for trampoline to user mode. Leave space for the
179 * return address on stack. These are the kernel mode register values.
180 */
181 pcb2->pcb_edi = 0;
182 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
183 pcb2->pcb_ebp = 0;
184 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *); /* trampoline arg */
185 pcb2->pcb_ebx = (int)td2; /* trampoline arg */
186 pcb2->pcb_eip = (int)fork_trampoline + setidt_disp;
187 /*
188 * If we didn't copy the pcb, we'd need to do the following registers:
189 * pcb2->pcb_cr3: cloned above.
190 * pcb2->pcb_dr*: cloned above.
191 * pcb2->pcb_savefpu: cloned above.
192 * pcb2->pcb_flags: cloned above.
193 * pcb2->pcb_onfault: cloned above (always NULL here?).
194 * pcb2->pcb_gs: cloned above.
195 * pcb2->pcb_ext: cleared below.
196 */
197 pcb2->pcb_ext = NULL;
198
199 /* Setup to release spin count in fork_exit(). */
200 td2->td_md.md_spinlock_count = 1;
201 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
202 }
203
204 /*
205 * Finish a fork operation, with process p2 nearly set up.
206 * Copy and update the pcb, set up the stack so that the child
207 * ready to run and return to user mode.
208 */
209 void
210 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
211 {
212 struct proc *p1;
213 struct pcb *pcb2;
214 struct mdproc *mdp2;
215
216 p1 = td1->td_proc;
217 if ((flags & RFPROC) == 0) {
218 if ((flags & RFMEM) == 0) {
219 /* unshare user LDT */
220 struct mdproc *mdp1 = &p1->p_md;
221 struct proc_ldt *pldt, *pldt1;
222
223 mtx_lock_spin(&dt_lock);
224 if ((pldt1 = mdp1->md_ldt) != NULL &&
225 pldt1->ldt_refcnt > 1) {
226 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len);
227 if (pldt == NULL)
228 panic("could not copy LDT");
229 mdp1->md_ldt = pldt;
230 set_user_ldt(mdp1);
231 user_ldt_deref(pldt1);
232 } else
233 mtx_unlock_spin(&dt_lock);
234 }
235 return;
236 }
237
238 /* Point the pcb to the top of the stack */
239 pcb2 = get_pcb_td(td2);
240 td2->td_pcb = pcb2;
241
242 copy_thread(td1, td2);
243
244 /* Reset debug registers in the new process */
245 x86_clear_dbregs(pcb2);
246
247 /* Point mdproc and then copy over td1's contents */
248 mdp2 = &p2->p_md;
249 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
250
251 /*
252 * Copy the trap frame for the return to user mode as if from a
253 * syscall. This copies most of the user mode register values.
254 * The -VM86_STACK_SPACE (-16) is so we can expand the trapframe
255 * if we go to vm86.
256 */
257 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb -
258 VM86_STACK_SPACE) - 1;
259 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
260
261 td2->td_frame->tf_eax = 0; /* Child returns zero */
262 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
263 td2->td_frame->tf_edx = 1;
264
265 /*
266 * If the parent process has the trap bit set (i.e. a debugger
267 * had single stepped the process to the system call), we need
268 * to clear the trap flag from the new frame.
269 */
270 td2->td_frame->tf_eflags &= ~PSL_T;
271
272 /* Set cr3 for the new process. */
273 pcb2->pcb_cr3 = pmap_get_cr3(vmspace_pmap(p2->p_vmspace));
274
275 /*
276 * XXX don't copy the i/o pages. this should probably be fixed.
277 */
278 pcb2->pcb_ext = NULL;
279
280 /* Copy the LDT, if necessary. */
281 mtx_lock_spin(&dt_lock);
282 if (mdp2->md_ldt != NULL) {
283 if (flags & RFMEM) {
284 mdp2->md_ldt->ldt_refcnt++;
285 } else {
286 mdp2->md_ldt = user_ldt_alloc(mdp2,
287 mdp2->md_ldt->ldt_len);
288 if (mdp2->md_ldt == NULL)
289 panic("could not copy LDT");
290 }
291 }
292 mtx_unlock_spin(&dt_lock);
293
294 /*
295 * Now, cpu_switch() can schedule the new process.
296 * pcb_esp is loaded pointing to the cpu_switch() stack frame
297 * containing the return address when exiting cpu_switch.
298 * This will normally be to fork_trampoline(), which will have
299 * %ebx loaded with the new proc's pointer. fork_trampoline()
300 * will set up a stack to call fork_return(p, frame); to complete
301 * the return to user-mode.
302 */
303 }
304
305 /*
306 * Intercept the return address from a freshly forked process that has NOT
307 * been scheduled yet.
308 *
309 * This is needed to make kernel threads stay in kernel mode.
310 */
311 void
312 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
313 {
314 /*
315 * Note that the trap frame follows the args, so the function
316 * is really called like this: func(arg, frame);
317 */
318 td->td_pcb->pcb_esi = (int) func; /* function */
319 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
320 }
321
322 void
323 cpu_exit(struct thread *td)
324 {
325
326 /*
327 * If this process has a custom LDT, release it. Reset pc->pcb_gs
328 * and %gs before we free it in case they refer to an LDT entry.
329 */
330 mtx_lock_spin(&dt_lock);
331 if (td->td_proc->p_md.md_ldt) {
332 td->td_pcb->pcb_gs = _udatasel;
333 load_gs(_udatasel);
334 user_ldt_free(td);
335 } else
336 mtx_unlock_spin(&dt_lock);
337 }
338
339 void
340 cpu_thread_exit(struct thread *td)
341 {
342
343 critical_enter();
344 if (td == PCPU_GET(fpcurthread))
345 npxdrop();
346 critical_exit();
347
348 /* Disable any hardware breakpoints. */
349 if (td->td_pcb->pcb_flags & PCB_DBREGS) {
350 reset_dbregs();
351 td->td_pcb->pcb_flags &= ~PCB_DBREGS;
352 }
353 }
354
355 void
356 cpu_thread_clean(struct thread *td)
357 {
358 struct pcb *pcb;
359
360 pcb = td->td_pcb;
361 if (pcb->pcb_ext != NULL) {
362 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
363 /*
364 * XXX do we need to move the TSS off the allocated pages
365 * before freeing them? (not done here)
366 */
367 pmap_trm_free(pcb->pcb_ext, ctob(IOPAGES + 1));
368 pcb->pcb_ext = NULL;
369 }
370 }
371
372 void
373 cpu_thread_swapin(struct thread *td)
374 {
375 }
376
377 void
378 cpu_thread_swapout(struct thread *td)
379 {
380 }
381
382 void
383 cpu_thread_alloc(struct thread *td)
384 {
385 struct pcb *pcb;
386 struct xstate_hdr *xhdr;
387
388 td->td_pcb = pcb = get_pcb_td(td);
389 td->td_frame = (struct trapframe *)((caddr_t)pcb -
390 VM86_STACK_SPACE) - 1;
391 pcb->pcb_ext = NULL;
392 pcb->pcb_save = get_pcb_user_save_pcb(pcb);
393 if (use_xsave) {
394 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
395 bzero(xhdr, sizeof(*xhdr));
396 xhdr->xstate_bv = xsave_mask;
397 }
398 }
399
400 void
401 cpu_thread_free(struct thread *td)
402 {
403
404 cpu_thread_clean(td);
405 }
406
407 bool
408 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
409 {
410
411 return (true);
412 }
413
414 int
415 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
416 int com __unused, void *data __unused)
417 {
418
419 return (EINVAL);
420 }
421
422 void
423 cpu_set_syscall_retval(struct thread *td, int error)
424 {
425
426 switch (error) {
427 case 0:
428 td->td_frame->tf_eax = td->td_retval[0];
429 td->td_frame->tf_edx = td->td_retval[1];
430 td->td_frame->tf_eflags &= ~PSL_C;
431 break;
432
433 case ERESTART:
434 /*
435 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int
436 * 0x80 is 2 bytes. We saved this in tf_err.
437 */
438 td->td_frame->tf_eip -= td->td_frame->tf_err;
439 break;
440
441 case EJUSTRETURN:
442 break;
443
444 default:
445 td->td_frame->tf_eax = error;
446 td->td_frame->tf_eflags |= PSL_C;
447 break;
448 }
449 }
450
451 /*
452 * Initialize machine state, mostly pcb and trap frame for a new
453 * thread, about to return to userspace. Put enough state in the new
454 * thread's PCB to get it to go back to the fork_return(), which
455 * finalizes the thread state and handles peculiarities of the first
456 * return to userspace for the new thread.
457 */
458 void
459 cpu_copy_thread(struct thread *td, struct thread *td0)
460 {
461 copy_thread(td0, td);
462
463 /*
464 * Copy user general-purpose registers.
465 *
466 * Some of these registers are rewritten by cpu_set_upcall()
467 * and linux_set_upcall().
468 */
469 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
470
471 /* If the current thread has the trap bit set (i.e. a debugger had
472 * single stepped the process to the system call), we need to clear
473 * the trap flag from the new frame. Otherwise, the new thread will
474 * receive a (likely unexpected) SIGTRAP when it executes the first
475 * instruction after returning to userland.
476 */
477 td->td_frame->tf_eflags &= ~PSL_T;
478 }
479
480 /*
481 * Set that machine state for performing an upcall that starts
482 * the entry function with the given argument.
483 */
484 void
485 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
486 stack_t *stack)
487 {
488
489 /*
490 * Do any extra cleaning that needs to be done.
491 * The thread may have optional components
492 * that are not present in a fresh thread.
493 * This may be a recycled thread so make it look
494 * as though it's newly allocated.
495 */
496 cpu_thread_clean(td);
497
498 /*
499 * Set the trap frame to point at the beginning of the entry
500 * function.
501 */
502 td->td_frame->tf_ebp = 0;
503 td->td_frame->tf_esp =
504 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
505 td->td_frame->tf_eip = (int)entry;
506
507 /* Return address sentinel value to stop stack unwinding. */
508 suword((void *)td->td_frame->tf_esp, 0);
509
510 /* Pass the argument to the entry point. */
511 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
512 (int)arg);
513 }
514
515 int
516 cpu_set_user_tls(struct thread *td, void *tls_base)
517 {
518 struct segment_descriptor sd;
519 uint32_t base;
520
521 /*
522 * Construct a descriptor and store it in the pcb for
523 * the next context switch. Also store it in the gdt
524 * so that the load of tf_fs into %fs will activate it
525 * at return to userland.
526 */
527 base = (uint32_t)tls_base;
528 sd.sd_lobase = base & 0xffffff;
529 sd.sd_hibase = (base >> 24) & 0xff;
530 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
531 sd.sd_hilimit = 0xf;
532 sd.sd_type = SDT_MEMRWA;
533 sd.sd_dpl = SEL_UPL;
534 sd.sd_p = 1;
535 sd.sd_xx = 0;
536 sd.sd_def32 = 1;
537 sd.sd_gran = 1;
538 critical_enter();
539 /* set %gs */
540 td->td_pcb->pcb_gsd = sd;
541 if (td == curthread) {
542 PCPU_GET(fsgs_gdt)[1] = sd;
543 load_gs(GSEL(GUGS_SEL, SEL_UPL));
544 }
545 critical_exit();
546 return (0);
547 }
548
549 /*
550 * Convert kernel VA to physical address
551 */
552 vm_paddr_t
553 kvtop(void *addr)
554 {
555 vm_paddr_t pa;
556
557 pa = pmap_kextract((vm_offset_t)addr);
558 if (pa == 0)
559 panic("kvtop: zero page frame");
560 return (pa);
561 }
562
563 /*
564 * Get an sf_buf from the freelist. May block if none are available.
565 */
566 void
567 sf_buf_map(struct sf_buf *sf, int flags)
568 {
569
570 pmap_sf_buf_map(sf);
571 #ifdef SMP
572 sf_buf_shootdown(sf, flags);
573 #endif
574 }
575
576 #ifdef SMP
577 static void
578 sf_buf_shootdown_curcpu_cb(pmap_t pmap __unused,
579 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused)
580 {
581 }
582
583 void
584 sf_buf_shootdown(struct sf_buf *sf, int flags)
585 {
586 cpuset_t other_cpus;
587 u_int cpuid;
588
589 sched_pin();
590 cpuid = PCPU_GET(cpuid);
591 if (!CPU_ISSET(cpuid, &sf->cpumask)) {
592 CPU_SET(cpuid, &sf->cpumask);
593 invlpg(sf->kva);
594 }
595 if ((flags & SFB_CPUPRIVATE) == 0) {
596 other_cpus = all_cpus;
597 CPU_CLR(cpuid, &other_cpus);
598 CPU_ANDNOT(&other_cpus, &sf->cpumask);
599 if (!CPU_EMPTY(&other_cpus)) {
600 CPU_OR(&sf->cpumask, &other_cpus);
601 smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap,
602 sf_buf_shootdown_curcpu_cb);
603 }
604 }
605 sched_unpin();
606 }
607 #endif
608
609 /*
610 * MD part of sf_buf_free().
611 */
612 int
613 sf_buf_unmap(struct sf_buf *sf)
614 {
615
616 return (0);
617 }
618
619 static void
620 sf_buf_invalidate(struct sf_buf *sf)
621 {
622 vm_page_t m = sf->m;
623
624 /*
625 * Use pmap_qenter to update the pte for
626 * existing mapping, in particular, the PAT
627 * settings are recalculated.
628 */
629 pmap_qenter(sf->kva, &m, 1);
630 pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE);
631 }
632
633 /*
634 * Invalidate the cache lines that may belong to the page, if
635 * (possibly old) mapping of the page by sf buffer exists. Returns
636 * TRUE when mapping was found and cache invalidated.
637 */
638 boolean_t
639 sf_buf_invalidate_cache(vm_page_t m)
640 {
641
642 return (sf_buf_process_page(m, sf_buf_invalidate));
643 }
644
645 /*
646 * Software interrupt handler for queued VM system processing.
647 */
648 void
649 swi_vm(void *dummy)
650 {
651 if (busdma_swi_pending != 0)
652 busdma_swi();
653 }
654
655 /*
656 * Tell whether this address is in some physical memory region.
657 * Currently used by the kernel coredump code in order to avoid
658 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
659 * or other unpredictable behaviour.
660 */
661
662 int
663 is_physical_memory(vm_paddr_t addr)
664 {
665
666 #ifdef DEV_ISA
667 /* The ISA ``memory hole''. */
668 if (addr >= 0xa0000 && addr < 0x100000)
669 return 0;
670 #endif
671
672 /*
673 * stuff other tests for known memory-mapped devices (PCI?)
674 * here
675 */
676
677 return 1;
678 }
Cache object: 8b097ea227a475340d801c7d1b7c173e
|