1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * Copyright (c) 1989, 1990 William Jolitz
6 * Copyright (c) 1994 John Dyson
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department, and William Jolitz.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
42 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include "opt_isa.h"
49 #include "opt_npx.h"
50 #include "opt_reset.h"
51 #include "opt_cpu.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/bio.h>
56 #include <sys/buf.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/lock.h>
60 #include <sys/malloc.h>
61 #include <sys/mbuf.h>
62 #include <sys/mutex.h>
63 #include <sys/proc.h>
64 #include <sys/sysent.h>
65 #include <sys/sf_buf.h>
66 #include <sys/smp.h>
67 #include <sys/sched.h>
68 #include <sys/sysctl.h>
69 #include <sys/unistd.h>
70 #include <sys/vnode.h>
71 #include <sys/vmmeter.h>
72
73 #include <machine/cpu.h>
74 #include <machine/cputypes.h>
75 #include <machine/md_var.h>
76 #include <machine/pcb.h>
77 #include <machine/pcb_ext.h>
78 #include <machine/smp.h>
79 #include <machine/vm86.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_extern.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_param.h>
87
88 _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
89 "__OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf.");
90
91 union savefpu *
92 get_pcb_user_save_td(struct thread *td)
93 {
94 vm_offset_t p;
95
96 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
97 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
98 KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
99 return ((union savefpu *)p);
100 }
101
102 union savefpu *
103 get_pcb_user_save_pcb(struct pcb *pcb)
104 {
105 vm_offset_t p;
106
107 p = (vm_offset_t)(pcb + 1);
108 return ((union savefpu *)p);
109 }
110
111 struct pcb *
112 get_pcb_td(struct thread *td)
113 {
114 vm_offset_t p;
115
116 p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
117 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
118 sizeof(struct pcb);
119 return ((struct pcb *)p);
120 }
121
122 void *
123 alloc_fpusave(int flags)
124 {
125 void *res;
126 struct savefpu_ymm *sf;
127
128 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
129 if (use_xsave) {
130 sf = (struct savefpu_ymm *)res;
131 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
132 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
133 }
134 return (res);
135 }
136 /*
137 * Finish a fork operation, with process p2 nearly set up.
138 * Copy and update the pcb, set up the stack so that the child
139 * ready to run and return to user mode.
140 */
141 void
142 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
143 {
144 struct proc *p1;
145 struct pcb *pcb2;
146 struct mdproc *mdp2;
147
148 p1 = td1->td_proc;
149 if ((flags & RFPROC) == 0) {
150 if ((flags & RFMEM) == 0) {
151 /* unshare user LDT */
152 struct mdproc *mdp1 = &p1->p_md;
153 struct proc_ldt *pldt, *pldt1;
154
155 mtx_lock_spin(&dt_lock);
156 if ((pldt1 = mdp1->md_ldt) != NULL &&
157 pldt1->ldt_refcnt > 1) {
158 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len);
159 if (pldt == NULL)
160 panic("could not copy LDT");
161 mdp1->md_ldt = pldt;
162 set_user_ldt(mdp1);
163 user_ldt_deref(pldt1);
164 } else
165 mtx_unlock_spin(&dt_lock);
166 }
167 return;
168 }
169
170 /* Ensure that td1's pcb is up to date. */
171 if (td1 == curthread)
172 td1->td_pcb->pcb_gs = rgs();
173 critical_enter();
174 if (PCPU_GET(fpcurthread) == td1)
175 npxsave(td1->td_pcb->pcb_save);
176 critical_exit();
177
178 /* Point the pcb to the top of the stack */
179 pcb2 = get_pcb_td(td2);
180 td2->td_pcb = pcb2;
181
182 /* Copy td1's pcb */
183 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
184
185 /* Properly initialize pcb_save */
186 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
187 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
188 cpu_max_ext_state_size);
189
190 /* Point mdproc and then copy over td1's contents */
191 mdp2 = &p2->p_md;
192 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
193
194 /*
195 * Create a new fresh stack for the new process.
196 * Copy the trap frame for the return to user mode as if from a
197 * syscall. This copies most of the user mode register values.
198 * The -VM86_STACK_SPACE (-16) is so we can expand the trapframe
199 * if we go to vm86.
200 */
201 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb -
202 VM86_STACK_SPACE) - 1;
203 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
204
205 td2->td_frame->tf_eax = 0; /* Child returns zero */
206 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
207 td2->td_frame->tf_edx = 1;
208
209 /*
210 * If the parent process has the trap bit set (i.e. a debugger
211 * had single stepped the process to the system call), we need
212 * to clear the trap flag from the new frame.
213 */
214 td2->td_frame->tf_eflags &= ~PSL_T;
215
216 /*
217 * Set registers for trampoline to user mode. Leave space for the
218 * return address on stack. These are the kernel mode register values.
219 */
220 pcb2->pcb_cr3 = pmap_get_cr3(vmspace_pmap(p2->p_vmspace));
221 pcb2->pcb_edi = 0;
222 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
223 pcb2->pcb_ebp = 0;
224 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
225 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
226 pcb2->pcb_eip = (int)fork_trampoline + setidt_disp;
227 /*-
228 * pcb2->pcb_dr*: cloned above.
229 * pcb2->pcb_savefpu: cloned above.
230 * pcb2->pcb_flags: cloned above.
231 * pcb2->pcb_onfault: cloned above (always NULL here?).
232 * pcb2->pcb_gs: cloned above.
233 * pcb2->pcb_ext: cleared below.
234 */
235
236 /*
237 * XXX don't copy the i/o pages. this should probably be fixed.
238 */
239 pcb2->pcb_ext = 0;
240
241 /* Copy the LDT, if necessary. */
242 mtx_lock_spin(&dt_lock);
243 if (mdp2->md_ldt != NULL) {
244 if (flags & RFMEM) {
245 mdp2->md_ldt->ldt_refcnt++;
246 } else {
247 mdp2->md_ldt = user_ldt_alloc(mdp2,
248 mdp2->md_ldt->ldt_len);
249 if (mdp2->md_ldt == NULL)
250 panic("could not copy LDT");
251 }
252 }
253 mtx_unlock_spin(&dt_lock);
254
255 /* Setup to release spin count in fork_exit(). */
256 td2->td_md.md_spinlock_count = 1;
257 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
258
259 /*
260 * Now, cpu_switch() can schedule the new process.
261 * pcb_esp is loaded pointing to the cpu_switch() stack frame
262 * containing the return address when exiting cpu_switch.
263 * This will normally be to fork_trampoline(), which will have
264 * %ebx loaded with the new proc's pointer. fork_trampoline()
265 * will set up a stack to call fork_return(p, frame); to complete
266 * the return to user-mode.
267 */
268 }
269
270 /*
271 * Intercept the return address from a freshly forked process that has NOT
272 * been scheduled yet.
273 *
274 * This is needed to make kernel threads stay in kernel mode.
275 */
276 void
277 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
278 {
279 /*
280 * Note that the trap frame follows the args, so the function
281 * is really called like this: func(arg, frame);
282 */
283 td->td_pcb->pcb_esi = (int) func; /* function */
284 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
285 }
286
287 void
288 cpu_exit(struct thread *td)
289 {
290
291 /*
292 * If this process has a custom LDT, release it. Reset pc->pcb_gs
293 * and %gs before we free it in case they refer to an LDT entry.
294 */
295 mtx_lock_spin(&dt_lock);
296 if (td->td_proc->p_md.md_ldt) {
297 td->td_pcb->pcb_gs = _udatasel;
298 load_gs(_udatasel);
299 user_ldt_free(td);
300 } else
301 mtx_unlock_spin(&dt_lock);
302 }
303
304 void
305 cpu_thread_exit(struct thread *td)
306 {
307
308 critical_enter();
309 if (td == PCPU_GET(fpcurthread))
310 npxdrop();
311 critical_exit();
312
313 /* Disable any hardware breakpoints. */
314 if (td->td_pcb->pcb_flags & PCB_DBREGS) {
315 reset_dbregs();
316 td->td_pcb->pcb_flags &= ~PCB_DBREGS;
317 }
318 }
319
320 void
321 cpu_thread_clean(struct thread *td)
322 {
323 struct pcb *pcb;
324
325 pcb = td->td_pcb;
326 if (pcb->pcb_ext != NULL) {
327 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
328 /*
329 * XXX do we need to move the TSS off the allocated pages
330 * before freeing them? (not done here)
331 */
332 pmap_trm_free(pcb->pcb_ext, ctob(IOPAGES + 1));
333 pcb->pcb_ext = NULL;
334 }
335 }
336
337 void
338 cpu_thread_swapin(struct thread *td)
339 {
340 }
341
342 void
343 cpu_thread_swapout(struct thread *td)
344 {
345 }
346
347 void
348 cpu_thread_alloc(struct thread *td)
349 {
350 struct pcb *pcb;
351 struct xstate_hdr *xhdr;
352
353 td->td_pcb = pcb = get_pcb_td(td);
354 td->td_frame = (struct trapframe *)((caddr_t)pcb -
355 VM86_STACK_SPACE) - 1;
356 pcb->pcb_ext = NULL;
357 pcb->pcb_save = get_pcb_user_save_pcb(pcb);
358 if (use_xsave) {
359 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
360 bzero(xhdr, sizeof(*xhdr));
361 xhdr->xstate_bv = xsave_mask;
362 }
363 }
364
365 void
366 cpu_thread_free(struct thread *td)
367 {
368
369 cpu_thread_clean(td);
370 }
371
372 bool
373 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
374 {
375
376 return (true);
377 }
378
379 int
380 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
381 int com __unused, void *data __unused)
382 {
383
384 return (EINVAL);
385 }
386
387 void
388 cpu_set_syscall_retval(struct thread *td, int error)
389 {
390
391 switch (error) {
392 case 0:
393 td->td_frame->tf_eax = td->td_retval[0];
394 td->td_frame->tf_edx = td->td_retval[1];
395 td->td_frame->tf_eflags &= ~PSL_C;
396 break;
397
398 case ERESTART:
399 /*
400 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int
401 * 0x80 is 2 bytes. We saved this in tf_err.
402 */
403 td->td_frame->tf_eip -= td->td_frame->tf_err;
404 break;
405
406 case EJUSTRETURN:
407 break;
408
409 default:
410 td->td_frame->tf_eax = error;
411 td->td_frame->tf_eflags |= PSL_C;
412 break;
413 }
414 }
415
416 /*
417 * Initialize machine state, mostly pcb and trap frame for a new
418 * thread, about to return to userspace. Put enough state in the new
419 * thread's PCB to get it to go back to the fork_return(), which
420 * finalizes the thread state and handles peculiarities of the first
421 * return to userspace for the new thread.
422 */
423 void
424 cpu_copy_thread(struct thread *td, struct thread *td0)
425 {
426 struct pcb *pcb2;
427
428 /* Point the pcb to the top of the stack. */
429 pcb2 = td->td_pcb;
430
431 /*
432 * Copy the upcall pcb. This loads kernel regs.
433 * Those not loaded individually below get their default
434 * values here.
435 */
436 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
437 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE |
438 PCB_KERNNPX);
439 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
440 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
441 cpu_max_ext_state_size);
442
443 /*
444 * Create a new fresh stack for the new thread.
445 */
446 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
447
448 /* If the current thread has the trap bit set (i.e. a debugger had
449 * single stepped the process to the system call), we need to clear
450 * the trap flag from the new frame. Otherwise, the new thread will
451 * receive a (likely unexpected) SIGTRAP when it executes the first
452 * instruction after returning to userland.
453 */
454 td->td_frame->tf_eflags &= ~PSL_T;
455
456 /*
457 * Set registers for trampoline to user mode. Leave space for the
458 * return address on stack. These are the kernel mode register values.
459 */
460 pcb2->pcb_edi = 0;
461 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
462 pcb2->pcb_ebp = 0;
463 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
464 pcb2->pcb_ebx = (int)td; /* trampoline arg */
465 pcb2->pcb_eip = (int)fork_trampoline + setidt_disp;
466 pcb2->pcb_gs = rgs();
467 /*
468 * If we didn't copy the pcb, we'd need to do the following registers:
469 * pcb2->pcb_cr3: cloned above.
470 * pcb2->pcb_dr*: cloned above.
471 * pcb2->pcb_savefpu: cloned above.
472 * pcb2->pcb_flags: cloned above.
473 * pcb2->pcb_onfault: cloned above (always NULL here?).
474 * pcb2->pcb_gs: cloned above.
475 * pcb2->pcb_ext: cleared below.
476 */
477 pcb2->pcb_ext = NULL;
478
479 /* Setup to release spin count in fork_exit(). */
480 td->td_md.md_spinlock_count = 1;
481 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
482 }
483
484 /*
485 * Set that machine state for performing an upcall that starts
486 * the entry function with the given argument.
487 */
488 void
489 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
490 stack_t *stack)
491 {
492
493 /*
494 * Do any extra cleaning that needs to be done.
495 * The thread may have optional components
496 * that are not present in a fresh thread.
497 * This may be a recycled thread so make it look
498 * as though it's newly allocated.
499 */
500 cpu_thread_clean(td);
501
502 /*
503 * Set the trap frame to point at the beginning of the entry
504 * function.
505 */
506 td->td_frame->tf_ebp = 0;
507 td->td_frame->tf_esp =
508 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
509 td->td_frame->tf_eip = (int)entry;
510
511 /* Return address sentinel value to stop stack unwinding. */
512 suword((void *)td->td_frame->tf_esp, 0);
513
514 /* Pass the argument to the entry point. */
515 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
516 (int)arg);
517 }
518
519 int
520 cpu_set_user_tls(struct thread *td, void *tls_base)
521 {
522 struct segment_descriptor sd;
523 uint32_t base;
524
525 /*
526 * Construct a descriptor and store it in the pcb for
527 * the next context switch. Also store it in the gdt
528 * so that the load of tf_fs into %fs will activate it
529 * at return to userland.
530 */
531 base = (uint32_t)tls_base;
532 sd.sd_lobase = base & 0xffffff;
533 sd.sd_hibase = (base >> 24) & 0xff;
534 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
535 sd.sd_hilimit = 0xf;
536 sd.sd_type = SDT_MEMRWA;
537 sd.sd_dpl = SEL_UPL;
538 sd.sd_p = 1;
539 sd.sd_xx = 0;
540 sd.sd_def32 = 1;
541 sd.sd_gran = 1;
542 critical_enter();
543 /* set %gs */
544 td->td_pcb->pcb_gsd = sd;
545 if (td == curthread) {
546 PCPU_GET(fsgs_gdt)[1] = sd;
547 load_gs(GSEL(GUGS_SEL, SEL_UPL));
548 }
549 critical_exit();
550 return (0);
551 }
552
553 /*
554 * Convert kernel VA to physical address
555 */
556 vm_paddr_t
557 kvtop(void *addr)
558 {
559 vm_paddr_t pa;
560
561 pa = pmap_kextract((vm_offset_t)addr);
562 if (pa == 0)
563 panic("kvtop: zero page frame");
564 return (pa);
565 }
566
567 /*
568 * Get an sf_buf from the freelist. May block if none are available.
569 */
570 void
571 sf_buf_map(struct sf_buf *sf, int flags)
572 {
573
574 pmap_sf_buf_map(sf);
575 #ifdef SMP
576 sf_buf_shootdown(sf, flags);
577 #endif
578 }
579
580 #ifdef SMP
581 static void
582 sf_buf_shootdown_curcpu_cb(pmap_t pmap __unused,
583 vm_offset_t addr1 __unused, vm_offset_t addr2 __unused)
584 {
585 }
586
587 void
588 sf_buf_shootdown(struct sf_buf *sf, int flags)
589 {
590 cpuset_t other_cpus;
591 u_int cpuid;
592
593 sched_pin();
594 cpuid = PCPU_GET(cpuid);
595 if (!CPU_ISSET(cpuid, &sf->cpumask)) {
596 CPU_SET(cpuid, &sf->cpumask);
597 invlpg(sf->kva);
598 }
599 if ((flags & SFB_CPUPRIVATE) == 0) {
600 other_cpus = all_cpus;
601 CPU_CLR(cpuid, &other_cpus);
602 CPU_ANDNOT(&other_cpus, &sf->cpumask);
603 if (!CPU_EMPTY(&other_cpus)) {
604 CPU_OR(&sf->cpumask, &other_cpus);
605 smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap,
606 sf_buf_shootdown_curcpu_cb);
607 }
608 }
609 sched_unpin();
610 }
611 #endif
612
613 /*
614 * MD part of sf_buf_free().
615 */
616 int
617 sf_buf_unmap(struct sf_buf *sf)
618 {
619
620 return (0);
621 }
622
623 static void
624 sf_buf_invalidate(struct sf_buf *sf)
625 {
626 vm_page_t m = sf->m;
627
628 /*
629 * Use pmap_qenter to update the pte for
630 * existing mapping, in particular, the PAT
631 * settings are recalculated.
632 */
633 pmap_qenter(sf->kva, &m, 1);
634 pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE);
635 }
636
637 /*
638 * Invalidate the cache lines that may belong to the page, if
639 * (possibly old) mapping of the page by sf buffer exists. Returns
640 * TRUE when mapping was found and cache invalidated.
641 */
642 boolean_t
643 sf_buf_invalidate_cache(vm_page_t m)
644 {
645
646 return (sf_buf_process_page(m, sf_buf_invalidate));
647 }
648
649 /*
650 * Software interrupt handler for queued VM system processing.
651 */
652 void
653 swi_vm(void *dummy)
654 {
655 if (busdma_swi_pending != 0)
656 busdma_swi();
657 }
658
659 /*
660 * Tell whether this address is in some physical memory region.
661 * Currently used by the kernel coredump code in order to avoid
662 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
663 * or other unpredictable behaviour.
664 */
665
666 int
667 is_physical_memory(vm_paddr_t addr)
668 {
669
670 #ifdef DEV_ISA
671 /* The ISA ``memory hole''. */
672 if (addr >= 0xa0000 && addr < 0x100000)
673 return 0;
674 #endif
675
676 /*
677 * stuff other tests for known memory-mapped devices (PCI?)
678 * here
679 */
680
681 return 1;
682 }
Cache object: 91d660f555f90337850599ca9a444f29
|