The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/vm_machdep.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-4-Clause
    3  *
    4  * Copyright (c) 1982, 1986 The Regents of the University of California.
    5  * Copyright (c) 1989, 1990 William Jolitz
    6  * Copyright (c) 1994 John Dyson
    7  * All rights reserved.
    8  *
    9  * This code is derived from software contributed to Berkeley by
   10  * the Systems Programming Group of the University of Utah Computer
   11  * Science Department, and William Jolitz.
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  * 3. All advertising materials mentioning features or use of this software
   22  *    must display the following acknowledgement:
   23  *      This product includes software developed by the University of
   24  *      California, Berkeley and its contributors.
   25  * 4. Neither the name of the University nor the names of its contributors
   26  *    may be used to endorse or promote products derived from this software
   27  *    without specific prior written permission.
   28  *
   29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   39  * SUCH DAMAGE.
   40  *
   41  *      from: @(#)vm_machdep.c  7.3 (Berkeley) 5/13/91
   42  *      Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
   43  */
   44 
   45 #include <sys/cdefs.h>
   46 __FBSDID("$FreeBSD: head/sys/amd64/amd64/vm_machdep.c 345228 2019-03-16 11:44:33Z kib $");
   47 
   48 #include "opt_isa.h"
   49 #include "opt_cpu.h"
   50 
   51 #include <sys/param.h>
   52 #include <sys/systm.h>
   53 #include <sys/bio.h>
   54 #include <sys/buf.h>
   55 #include <sys/kernel.h>
   56 #include <sys/ktr.h>
   57 #include <sys/lock.h>
   58 #include <sys/malloc.h>
   59 #include <sys/mbuf.h>
   60 #include <sys/mutex.h>
   61 #include <sys/pioctl.h>
   62 #include <sys/priv.h>
   63 #include <sys/proc.h>
   64 #include <sys/procctl.h>
   65 #include <sys/smp.h>
   66 #include <sys/sysctl.h>
   67 #include <sys/sysent.h>
   68 #include <sys/unistd.h>
   69 #include <sys/vnode.h>
   70 #include <sys/vmmeter.h>
   71 #include <sys/wait.h>
   72 
   73 #include <machine/cpu.h>
   74 #include <machine/md_var.h>
   75 #include <machine/pcb.h>
   76 #include <machine/smp.h>
   77 #include <machine/specialreg.h>
   78 #include <machine/tss.h>
   79 
   80 #include <vm/vm.h>
   81 #include <vm/vm_extern.h>
   82 #include <vm/vm_kern.h>
   83 #include <vm/vm_page.h>
   84 #include <vm/vm_map.h>
   85 #include <vm/vm_param.h>
   86 
   87 _Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread),
   88     "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread.");
   89 _Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb),
   90     "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb.");
   91 _Static_assert(OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf),
   92     "OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf.");
   93 
   94 struct savefpu *
   95 get_pcb_user_save_td(struct thread *td)
   96 {
   97         vm_offset_t p;
   98 
   99         p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
  100             roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN);
  101         KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area"));
  102         return ((struct savefpu *)p);
  103 }
  104 
  105 struct savefpu *
  106 get_pcb_user_save_pcb(struct pcb *pcb)
  107 {
  108         vm_offset_t p;
  109 
  110         p = (vm_offset_t)(pcb + 1);
  111         return ((struct savefpu *)p);
  112 }
  113 
  114 struct pcb *
  115 get_pcb_td(struct thread *td)
  116 {
  117         vm_offset_t p;
  118 
  119         p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
  120             roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) -
  121             sizeof(struct pcb);
  122         return ((struct pcb *)p);
  123 }
  124 
  125 void *
  126 alloc_fpusave(int flags)
  127 {
  128         void *res;
  129         struct savefpu_ymm *sf;
  130 
  131         res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags);
  132         if (use_xsave) {
  133                 sf = (struct savefpu_ymm *)res;
  134                 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd));
  135                 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask;
  136         }
  137         return (res);
  138 }
  139 
  140 /*
  141  * Finish a fork operation, with process p2 nearly set up.
  142  * Copy and update the pcb, set up the stack so that the child
  143  * ready to run and return to user mode.
  144  */
  145 void
  146 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
  147 {
  148         struct proc *p1;
  149         struct pcb *pcb2;
  150         struct mdproc *mdp1, *mdp2;
  151         struct proc_ldt *pldt;
  152 
  153         p1 = td1->td_proc;
  154         if ((flags & RFPROC) == 0) {
  155                 if ((flags & RFMEM) == 0) {
  156                         /* unshare user LDT */
  157                         mdp1 = &p1->p_md;
  158                         mtx_lock(&dt_lock);
  159                         if ((pldt = mdp1->md_ldt) != NULL &&
  160                             pldt->ldt_refcnt > 1 &&
  161                             user_ldt_alloc(p1, 1) == NULL)
  162                                 panic("could not copy LDT");
  163                         mtx_unlock(&dt_lock);
  164                 }
  165                 return;
  166         }
  167 
  168         /* Ensure that td1's pcb is up to date. */
  169         fpuexit(td1);
  170         update_pcb_bases(td1->td_pcb);
  171 
  172         /* Point the pcb to the top of the stack */
  173         pcb2 = get_pcb_td(td2);
  174         td2->td_pcb = pcb2;
  175 
  176         /* Copy td1's pcb */
  177         bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
  178 
  179         /* Properly initialize pcb_save */
  180         pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
  181         bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2),
  182             cpu_max_ext_state_size);
  183 
  184         /* Point mdproc and then copy over td1's contents */
  185         mdp2 = &p2->p_md;
  186         bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
  187 
  188         /*
  189          * Create a new fresh stack for the new process.
  190          * Copy the trap frame for the return to user mode as if from a
  191          * syscall.  This copies most of the user mode register values.
  192          */
  193         td2->td_frame = (struct trapframe *)td2->td_pcb - 1;
  194         bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
  195 
  196         td2->td_frame->tf_rax = 0;              /* Child returns zero */
  197         td2->td_frame->tf_rflags &= ~PSL_C;     /* success */
  198         td2->td_frame->tf_rdx = 1;
  199 
  200         /*
  201          * If the parent process has the trap bit set (i.e. a debugger had
  202          * single stepped the process to the system call), we need to clear
  203          * the trap flag from the new frame unless the debugger had set PF_FORK
  204          * on the parent.  Otherwise, the child will receive a (likely
  205          * unexpected) SIGTRAP when it executes the first instruction after
  206          * returning  to userland.
  207          */
  208         if ((p1->p_pfsflags & PF_FORK) == 0)
  209                 td2->td_frame->tf_rflags &= ~PSL_T;
  210 
  211         /*
  212          * Set registers for trampoline to user mode.  Leave space for the
  213          * return address on stack.  These are the kernel mode register values.
  214          */
  215         pcb2->pcb_r12 = (register_t)fork_return;        /* fork_trampoline argument */
  216         pcb2->pcb_rbp = 0;
  217         pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *);
  218         pcb2->pcb_rbx = (register_t)td2;                /* fork_trampoline argument */
  219         pcb2->pcb_rip = (register_t)fork_trampoline;
  220         /*-
  221          * pcb2->pcb_dr*:       cloned above.
  222          * pcb2->pcb_savefpu:   cloned above.
  223          * pcb2->pcb_flags:     cloned above.
  224          * pcb2->pcb_onfault:   cloned above (always NULL here?).
  225          * pcb2->pcb_[fg]sbase: cloned above
  226          */
  227 
  228         /* Setup to release spin count in fork_exit(). */
  229         td2->td_md.md_spinlock_count = 1;
  230         td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
  231         td2->td_md.md_invl_gen.gen = 0;
  232 
  233         /* As an i386, do not copy io permission bitmap. */
  234         pcb2->pcb_tssp = NULL;
  235 
  236         /* New segment registers. */
  237         set_pcb_flags_raw(pcb2, PCB_FULL_IRET);
  238 
  239         /* Copy the LDT, if necessary. */
  240         mdp1 = &td1->td_proc->p_md;
  241         mdp2 = &p2->p_md;
  242         if (mdp1->md_ldt == NULL) {
  243                 mdp2->md_ldt = NULL;
  244                 return;
  245         }
  246         mtx_lock(&dt_lock);
  247         if (mdp1->md_ldt != NULL) {
  248                 if (flags & RFMEM) {
  249                         mdp1->md_ldt->ldt_refcnt++;
  250                         mdp2->md_ldt = mdp1->md_ldt;
  251                         bcopy(&mdp1->md_ldt_sd, &mdp2->md_ldt_sd, sizeof(struct
  252                             system_segment_descriptor));
  253                 } else {
  254                         mdp2->md_ldt = NULL;
  255                         mdp2->md_ldt = user_ldt_alloc(p2, 0);
  256                         if (mdp2->md_ldt == NULL)
  257                                 panic("could not copy LDT");
  258                         amd64_set_ldt_data(td2, 0, max_ldt_segment,
  259                             (struct user_segment_descriptor *)
  260                             mdp1->md_ldt->ldt_base);
  261                 }
  262         } else
  263                 mdp2->md_ldt = NULL;
  264         mtx_unlock(&dt_lock);
  265 
  266         /*
  267          * Now, cpu_switch() can schedule the new process.
  268          * pcb_rsp is loaded pointing to the cpu_switch() stack frame
  269          * containing the return address when exiting cpu_switch.
  270          * This will normally be to fork_trampoline(), which will have
  271          * %ebx loaded with the new proc's pointer.  fork_trampoline()
  272          * will set up a stack to call fork_return(p, frame); to complete
  273          * the return to user-mode.
  274          */
  275 }
  276 
  277 /*
  278  * Intercept the return address from a freshly forked process that has NOT
  279  * been scheduled yet.
  280  *
  281  * This is needed to make kernel threads stay in kernel mode.
  282  */
  283 void
  284 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
  285 {
  286         /*
  287          * Note that the trap frame follows the args, so the function
  288          * is really called like this:  func(arg, frame);
  289          */
  290         td->td_pcb->pcb_r12 = (long) func;      /* function */
  291         td->td_pcb->pcb_rbx = (long) arg;       /* first arg */
  292 }
  293 
  294 void
  295 cpu_exit(struct thread *td)
  296 {
  297 
  298         /*
  299          * If this process has a custom LDT, release it.
  300          */
  301         if (td->td_proc->p_md.md_ldt != NULL)
  302                 user_ldt_free(td);
  303 }
  304 
  305 void
  306 cpu_thread_exit(struct thread *td)
  307 {
  308         struct pcb *pcb;
  309 
  310         critical_enter();
  311         if (td == PCPU_GET(fpcurthread))
  312                 fpudrop();
  313         critical_exit();
  314 
  315         pcb = td->td_pcb;
  316 
  317         /* Disable any hardware breakpoints. */
  318         if (pcb->pcb_flags & PCB_DBREGS) {
  319                 reset_dbregs();
  320                 clear_pcb_flags(pcb, PCB_DBREGS);
  321         }
  322 }
  323 
  324 void
  325 cpu_thread_clean(struct thread *td)
  326 {
  327         struct pcb *pcb;
  328 
  329         pcb = td->td_pcb;
  330 
  331         /*
  332          * Clean TSS/iomap
  333          */
  334         if (pcb->pcb_tssp != NULL) {
  335                 pmap_pti_remove_kva((vm_offset_t)pcb->pcb_tssp,
  336                     (vm_offset_t)pcb->pcb_tssp + ctob(IOPAGES + 1));
  337                 kmem_free((vm_offset_t)pcb->pcb_tssp, ctob(IOPAGES + 1));
  338                 pcb->pcb_tssp = NULL;
  339         }
  340 }
  341 
  342 void
  343 cpu_thread_swapin(struct thread *td)
  344 {
  345 }
  346 
  347 void
  348 cpu_thread_swapout(struct thread *td)
  349 {
  350 }
  351 
  352 void
  353 cpu_thread_alloc(struct thread *td)
  354 {
  355         struct pcb *pcb;
  356         struct xstate_hdr *xhdr;
  357 
  358         td->td_pcb = pcb = get_pcb_td(td);
  359         td->td_frame = (struct trapframe *)pcb - 1;
  360         pcb->pcb_save = get_pcb_user_save_pcb(pcb);
  361         if (use_xsave) {
  362                 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1);
  363                 bzero(xhdr, sizeof(*xhdr));
  364                 xhdr->xstate_bv = xsave_mask;
  365         }
  366 }
  367 
  368 void
  369 cpu_thread_free(struct thread *td)
  370 {
  371 
  372         cpu_thread_clean(td);
  373 }
  374 
  375 bool
  376 cpu_exec_vmspace_reuse(struct proc *p, vm_map_t map)
  377 {
  378 
  379         return (((curproc->p_md.md_flags & P_MD_KPTI) != 0) ==
  380             (vm_map_pmap(map)->pm_ucr3 != PMAP_NO_CR3));
  381 }
  382 
  383 static void
  384 cpu_procctl_kpti(struct proc *p, int com, int *val)
  385 {
  386 
  387         if (com == PROC_KPTI_CTL) {
  388                 if (pti && *val == PROC_KPTI_CTL_ENABLE_ON_EXEC)
  389                         p->p_md.md_flags |= P_MD_KPTI;
  390                 if (*val == PROC_KPTI_CTL_DISABLE_ON_EXEC)
  391                         p->p_md.md_flags &= ~P_MD_KPTI;
  392         } else /* PROC_KPTI_STATUS */ {
  393                 *val = (p->p_md.md_flags & P_MD_KPTI) != 0 ?
  394                     PROC_KPTI_CTL_ENABLE_ON_EXEC:
  395                     PROC_KPTI_CTL_DISABLE_ON_EXEC;
  396                 if (vmspace_pmap(p->p_vmspace)->pm_ucr3 != PMAP_NO_CR3)
  397                         *val |= PROC_KPTI_STATUS_ACTIVE;
  398         }
  399 }
  400 
  401 int
  402 cpu_procctl(struct thread *td, int idtype, id_t id, int com, void *data)
  403 {
  404         struct proc *p;
  405         int error, val;
  406 
  407         switch (com) {
  408         case PROC_KPTI_CTL:
  409         case PROC_KPTI_STATUS:
  410                 if (idtype != P_PID) {
  411                         error = EINVAL;
  412                         break;
  413                 }
  414                 if (com == PROC_KPTI_CTL) {
  415                         /* sad but true and not a joke */
  416                         error = priv_check(td, PRIV_IO);
  417                         if (error != 0)
  418                                 break;
  419                         error = copyin(data, &val, sizeof(val));
  420                         if (error != 0)
  421                                 break;
  422                         if (val != PROC_KPTI_CTL_ENABLE_ON_EXEC &&
  423                             val != PROC_KPTI_CTL_DISABLE_ON_EXEC) {
  424                                 error = EINVAL;
  425                                 break;
  426                         }
  427                 }
  428                 error = pget(id, PGET_CANSEE | PGET_NOTWEXIT | PGET_NOTID, &p);
  429                 if (error == 0) {
  430                         cpu_procctl_kpti(p, com, &val);
  431                         PROC_UNLOCK(p);
  432                         if (com == PROC_KPTI_STATUS)
  433                                 error = copyout(&val, data, sizeof(val));
  434                 }
  435                 break;
  436         default:
  437                 error = EINVAL;
  438                 break;
  439         }
  440         return (error);
  441 }
  442 
  443 void
  444 cpu_set_syscall_retval(struct thread *td, int error)
  445 {
  446         struct trapframe *frame;
  447 
  448         frame = td->td_frame;
  449         if (__predict_true(error == 0)) {
  450                 frame->tf_rax = td->td_retval[0];
  451                 frame->tf_rdx = td->td_retval[1];
  452                 frame->tf_rflags &= ~PSL_C;
  453                 return;
  454         }
  455 
  456         switch (error) {
  457         case ERESTART:
  458                 /*
  459                  * Reconstruct pc, we know that 'syscall' is 2 bytes,
  460                  * lcall $X,y is 7 bytes, int 0x80 is 2 bytes.
  461                  * We saved this in tf_err.
  462                  * %r10 (which was holding the value of %rcx) is restored
  463                  * for the next iteration.
  464                  * %r10 restore is only required for freebsd/amd64 processes,
  465                  * but shall be innocent for any ia32 ABI.
  466                  *
  467                  * Require full context restore to get the arguments
  468                  * in the registers reloaded at return to usermode.
  469                  */
  470                 frame->tf_rip -= frame->tf_err;
  471                 frame->tf_r10 = frame->tf_rcx;
  472                 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
  473                 break;
  474 
  475         case EJUSTRETURN:
  476                 break;
  477 
  478         default:
  479                 frame->tf_rax = SV_ABI_ERRNO(td->td_proc, error);
  480                 frame->tf_rflags |= PSL_C;
  481                 break;
  482         }
  483 }
  484 
  485 /*
  486  * Initialize machine state, mostly pcb and trap frame for a new
  487  * thread, about to return to userspace.  Put enough state in the new
  488  * thread's PCB to get it to go back to the fork_return(), which
  489  * finalizes the thread state and handles peculiarities of the first
  490  * return to userspace for the new thread.
  491  */
  492 void
  493 cpu_copy_thread(struct thread *td, struct thread *td0)
  494 {
  495         struct pcb *pcb2;
  496 
  497         /* Point the pcb to the top of the stack. */
  498         pcb2 = td->td_pcb;
  499 
  500         /*
  501          * Copy the upcall pcb.  This loads kernel regs.
  502          * Those not loaded individually below get their default
  503          * values here.
  504          */
  505         update_pcb_bases(td0->td_pcb);
  506         bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
  507         clear_pcb_flags(pcb2, PCB_FPUINITDONE | PCB_USERFPUINITDONE |
  508             PCB_KERNFPU);
  509         pcb2->pcb_save = get_pcb_user_save_pcb(pcb2);
  510         bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save,
  511             cpu_max_ext_state_size);
  512         set_pcb_flags_raw(pcb2, PCB_FULL_IRET);
  513 
  514         /*
  515          * Create a new fresh stack for the new thread.
  516          */
  517         bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
  518 
  519         /* If the current thread has the trap bit set (i.e. a debugger had
  520          * single stepped the process to the system call), we need to clear
  521          * the trap flag from the new frame. Otherwise, the new thread will
  522          * receive a (likely unexpected) SIGTRAP when it executes the first
  523          * instruction after returning to userland.
  524          */
  525         td->td_frame->tf_rflags &= ~PSL_T;
  526 
  527         /*
  528          * Set registers for trampoline to user mode.  Leave space for the
  529          * return address on stack.  These are the kernel mode register values.
  530          */
  531         pcb2->pcb_r12 = (register_t)fork_return;            /* trampoline arg */
  532         pcb2->pcb_rbp = 0;
  533         pcb2->pcb_rsp = (register_t)td->td_frame - sizeof(void *);      /* trampoline arg */
  534         pcb2->pcb_rbx = (register_t)td;                     /* trampoline arg */
  535         pcb2->pcb_rip = (register_t)fork_trampoline;
  536         /*
  537          * If we didn't copy the pcb, we'd need to do the following registers:
  538          * pcb2->pcb_dr*:       cloned above.
  539          * pcb2->pcb_savefpu:   cloned above.
  540          * pcb2->pcb_onfault:   cloned above (always NULL here?).
  541          * pcb2->pcb_[fg]sbase: cloned above
  542          */
  543 
  544         /* Setup to release spin count in fork_exit(). */
  545         td->td_md.md_spinlock_count = 1;
  546         td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
  547 }
  548 
  549 /*
  550  * Set that machine state for performing an upcall that starts
  551  * the entry function with the given argument.
  552  */
  553 void
  554 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
  555     stack_t *stack)
  556 {
  557 
  558         /* 
  559          * Do any extra cleaning that needs to be done.
  560          * The thread may have optional components
  561          * that are not present in a fresh thread.
  562          * This may be a recycled thread so make it look
  563          * as though it's newly allocated.
  564          */
  565         cpu_thread_clean(td);
  566 
  567 #ifdef COMPAT_FREEBSD32
  568         if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
  569                 /*
  570                  * Set the trap frame to point at the beginning of the entry
  571                  * function.
  572                  */
  573                 td->td_frame->tf_rbp = 0;
  574                 td->td_frame->tf_rsp =
  575                    (((uintptr_t)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
  576                 td->td_frame->tf_rip = (uintptr_t)entry;
  577 
  578                 /* Return address sentinel value to stop stack unwinding. */
  579                 suword32((void *)td->td_frame->tf_rsp, 0);
  580 
  581                 /* Pass the argument to the entry point. */
  582                 suword32((void *)(td->td_frame->tf_rsp + sizeof(int32_t)),
  583                     (uint32_t)(uintptr_t)arg);
  584 
  585                 return;
  586         }
  587 #endif
  588 
  589         /*
  590          * Set the trap frame to point at the beginning of the uts
  591          * function.
  592          */
  593         td->td_frame->tf_rbp = 0;
  594         td->td_frame->tf_rsp =
  595             ((register_t)stack->ss_sp + stack->ss_size) & ~0x0f;
  596         td->td_frame->tf_rsp -= 8;
  597         td->td_frame->tf_rip = (register_t)entry;
  598         td->td_frame->tf_ds = _udatasel;
  599         td->td_frame->tf_es = _udatasel;
  600         td->td_frame->tf_fs = _ufssel;
  601         td->td_frame->tf_gs = _ugssel;
  602         td->td_frame->tf_flags = TF_HASSEGS;
  603 
  604         /* Return address sentinel value to stop stack unwinding. */
  605         suword((void *)td->td_frame->tf_rsp, 0);
  606 
  607         /* Pass the argument to the entry point. */
  608         td->td_frame->tf_rdi = (register_t)arg;
  609 }
  610 
  611 int
  612 cpu_set_user_tls(struct thread *td, void *tls_base)
  613 {
  614         struct pcb *pcb;
  615 
  616         if ((u_int64_t)tls_base >= VM_MAXUSER_ADDRESS)
  617                 return (EINVAL);
  618 
  619         pcb = td->td_pcb;
  620         set_pcb_flags(pcb, PCB_FULL_IRET);
  621 #ifdef COMPAT_FREEBSD32
  622         if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
  623                 pcb->pcb_gsbase = (register_t)tls_base;
  624                 return (0);
  625         }
  626 #endif
  627         pcb->pcb_fsbase = (register_t)tls_base;
  628         return (0);
  629 }
  630 
  631 /*
  632  * Software interrupt handler for queued VM system processing.
  633  */   
  634 void  
  635 swi_vm(void *dummy) 
  636 {     
  637         if (busdma_swi_pending != 0)
  638                 busdma_swi();
  639 }
  640 
  641 /*
  642  * Tell whether this address is in some physical memory region.
  643  * Currently used by the kernel coredump code in order to avoid
  644  * dumping the ``ISA memory hole'' which could cause indefinite hangs,
  645  * or other unpredictable behaviour.
  646  */
  647 
  648 int
  649 is_physical_memory(vm_paddr_t addr)
  650 {
  651 
  652 #ifdef DEV_ISA
  653         /* The ISA ``memory hole''. */
  654         if (addr >= 0xa0000 && addr < 0x100000)
  655                 return 0;
  656 #endif
  657 
  658         /*
  659          * stuff other tests for known memory-mapped devices (PCI?)
  660          * here
  661          */
  662 
  663         return 1;
  664 }

Cache object: 4fb876c9005e19ca8ff0788649394207


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.