The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/x86/xen/hvm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2008, 2013 Citrix Systems, Inc.
    3  * Copyright (c) 2012 Spectra Logic Corporation
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/10.4/sys/x86/xen/hvm.c 305672 2016-09-09 19:57:32Z jhb $");
   30 
   31 #include <sys/param.h>
   32 #include <sys/bus.h>
   33 #include <sys/kernel.h>
   34 #include <sys/malloc.h>
   35 #include <sys/proc.h>
   36 #include <sys/smp.h>
   37 #include <sys/systm.h>
   38 
   39 #include <vm/vm.h>
   40 #include <vm/pmap.h>
   41 
   42 #include <dev/pci/pcivar.h>
   43 
   44 #include <machine/cpufunc.h>
   45 #include <machine/cpu.h>
   46 #include <machine/smp.h>
   47 
   48 #include <x86/apicreg.h>
   49 
   50 #include <xen/xen-os.h>
   51 #include <xen/features.h>
   52 #include <xen/gnttab.h>
   53 #include <xen/hypervisor.h>
   54 #include <xen/hvm.h>
   55 #include <xen/xen_intr.h>
   56 
   57 #include <xen/interface/hvm/params.h>
   58 #include <xen/interface/vcpu.h>
   59 
   60 /*--------------------------- Forward Declarations ---------------------------*/
   61 #ifdef SMP
   62 static driver_filter_t xen_smp_rendezvous_action;
   63 static driver_filter_t xen_invltlb;
   64 static driver_filter_t xen_invlpg;
   65 static driver_filter_t xen_invlrng;
   66 static driver_filter_t xen_invlcache;
   67 #ifdef __i386__
   68 static driver_filter_t xen_lazypmap;
   69 #endif
   70 static driver_filter_t xen_ipi_bitmap_handler;
   71 static driver_filter_t xen_cpustop_handler;
   72 static driver_filter_t xen_cpususpend_handler;
   73 static driver_filter_t xen_cpustophard_handler;
   74 static void xen_ipi_vectored(u_int vector, int dest);
   75 #endif
   76 static void xen_hvm_cpu_init(void);
   77 
   78 /*---------------------------- Extern Declarations ---------------------------*/
   79 #ifdef __i386__
   80 extern void pmap_lazyfix_action(void);
   81 #endif
   82 #ifdef __amd64__
   83 extern int pmap_pcid_enabled;
   84 #endif
   85 
   86 /*---------------------------------- Macros ----------------------------------*/
   87 #define IPI_TO_IDX(ipi) ((ipi) - APIC_IPI_INTS)
   88 
   89 /*-------------------------------- Local Types -------------------------------*/
   90 enum xen_hvm_init_type {
   91         XEN_HVM_INIT_COLD,
   92         XEN_HVM_INIT_CANCELLED_SUSPEND,
   93         XEN_HVM_INIT_RESUME
   94 };
   95 
   96 struct xen_ipi_handler
   97 {
   98         driver_filter_t *filter;
   99         const char      *description;
  100 };
  101 
  102 /*-------------------------------- Global Data -------------------------------*/
  103 enum xen_domain_type xen_domain_type = XEN_NATIVE;
  104 
  105 #ifdef SMP
  106 struct cpu_ops xen_hvm_cpu_ops = {
  107         .ipi_vectored   = lapic_ipi_vectored,
  108         .cpu_init       = xen_hvm_cpu_init,
  109         .cpu_resume     = xen_hvm_cpu_init
  110 };
  111 #endif
  112 
  113 static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support");
  114 
  115 #ifdef SMP
  116 static struct xen_ipi_handler xen_ipis[] = 
  117 {
  118         [IPI_TO_IDX(IPI_RENDEZVOUS)]    = { xen_smp_rendezvous_action,  "r"   },
  119         [IPI_TO_IDX(IPI_INVLTLB)]       = { xen_invltlb,                "itlb"},
  120         [IPI_TO_IDX(IPI_INVLPG)]        = { xen_invlpg,                 "ipg" },
  121         [IPI_TO_IDX(IPI_INVLRNG)]       = { xen_invlrng,                "irg" },
  122         [IPI_TO_IDX(IPI_INVLCACHE)]     = { xen_invlcache,              "ic"  },
  123 #ifdef __i386__
  124         [IPI_TO_IDX(IPI_LAZYPMAP)]      = { xen_lazypmap,               "lp"  },
  125 #endif
  126         [IPI_TO_IDX(IPI_BITMAP_VECTOR)] = { xen_ipi_bitmap_handler,     "b"   },
  127         [IPI_TO_IDX(IPI_STOP)]          = { xen_cpustop_handler,        "st"  },
  128         [IPI_TO_IDX(IPI_SUSPEND)]       = { xen_cpususpend_handler,     "sp"  },
  129         [IPI_TO_IDX(IPI_STOP_HARD)]     = { xen_cpustophard_handler,    "sth" },
  130 };
  131 #endif
  132 
  133 /**
  134  * If non-zero, the hypervisor has been configured to use a direct
  135  * IDT event callback for interrupt injection.
  136  */
  137 int xen_vector_callback_enabled;
  138 
  139 /*------------------------------- Per-CPU Data -------------------------------*/
  140 DPCPU_DEFINE(struct vcpu_info, vcpu_local_info);
  141 DPCPU_DEFINE(struct vcpu_info *, vcpu_info);
  142 #ifdef SMP
  143 DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
  144 #endif
  145 
  146 /*------------------ Hypervisor Access Shared Memory Regions -----------------*/
  147 /** Hypercall table accessed via HYPERVISOR_*_op() methods. */
  148 char *hypercall_stubs;
  149 shared_info_t *HYPERVISOR_shared_info;
  150 
  151 
  152 /*------------------------------ Sysctl tunables -----------------------------*/
  153 int xen_disable_pv_disks = 0;
  154 int xen_disable_pv_nics = 0;
  155 TUNABLE_INT("hw.xen.disable_pv_disks", &xen_disable_pv_disks);
  156 TUNABLE_INT("hw.xen.disable_pv_nics", &xen_disable_pv_nics);
  157 
  158 #ifdef SMP
  159 /*---------------------------- XEN PV IPI Handlers ---------------------------*/
  160 /*
  161  * This are C clones of the ASM functions found in apic_vector.s
  162  */
  163 static int
  164 xen_ipi_bitmap_handler(void *arg)
  165 {
  166         struct trapframe *frame;
  167 
  168         frame = arg;
  169         ipi_bitmap_handler(*frame);
  170         return (FILTER_HANDLED);
  171 }
  172 
  173 static int
  174 xen_smp_rendezvous_action(void *arg)
  175 {
  176 #ifdef COUNT_IPIS
  177         (*ipi_rendezvous_counts[PCPU_GET(cpuid)])++;
  178 #endif /* COUNT_IPIS */
  179 
  180         smp_rendezvous_action();
  181         return (FILTER_HANDLED);
  182 }
  183 
  184 static int
  185 xen_invltlb(void *arg)
  186 {
  187 
  188         invltlb_handler();
  189         return (FILTER_HANDLED);
  190 }
  191 
  192 #ifdef __amd64__
  193 static int
  194 xen_invltlb_pcid(void *arg)
  195 {
  196 
  197         invltlb_pcid_handler();
  198         return (FILTER_HANDLED);
  199 }
  200 #endif
  201 
  202 static int
  203 xen_invlpg(void *arg)
  204 {
  205 
  206         invlpg_handler();
  207         return (FILTER_HANDLED);
  208 }
  209 
  210 #ifdef __amd64__
  211 static int
  212 xen_invlpg_pcid(void *arg)
  213 {
  214 
  215         invlpg_pcid_handler();
  216         return (FILTER_HANDLED);
  217 }
  218 #endif
  219 
  220 static int
  221 xen_invlrng(void *arg)
  222 {
  223 
  224         invlrng_handler();
  225         return (FILTER_HANDLED);
  226 }
  227 
  228 static int
  229 xen_invlcache(void *arg)
  230 {
  231 
  232         invlcache_handler();
  233         return (FILTER_HANDLED);
  234 }
  235 
  236 #ifdef __i386__
  237 static int
  238 xen_lazypmap(void *arg)
  239 {
  240 
  241         pmap_lazyfix_action();
  242         return (FILTER_HANDLED);
  243 }
  244 #endif
  245 
  246 static int
  247 xen_cpustop_handler(void *arg)
  248 {
  249 
  250         cpustop_handler();
  251         return (FILTER_HANDLED);
  252 }
  253 
  254 static int
  255 xen_cpususpend_handler(void *arg)
  256 {
  257 
  258         cpususpend_handler();
  259         return (FILTER_HANDLED);
  260 }
  261 
  262 static int
  263 xen_cpustophard_handler(void *arg)
  264 {
  265 
  266         ipi_nmi_handler();
  267         return (FILTER_HANDLED);
  268 }
  269 
  270 /* Xen PV IPI sender */
  271 static void
  272 xen_ipi_vectored(u_int vector, int dest)
  273 {
  274         xen_intr_handle_t *ipi_handle;
  275         int ipi_idx, to_cpu, self;
  276 
  277         ipi_idx = IPI_TO_IDX(vector);
  278         if (ipi_idx > nitems(xen_ipis))
  279                 panic("IPI out of range");
  280 
  281         switch(dest) {
  282         case APIC_IPI_DEST_SELF:
  283                 ipi_handle = DPCPU_GET(ipi_handle);
  284                 xen_intr_signal(ipi_handle[ipi_idx]);
  285                 break;
  286         case APIC_IPI_DEST_ALL:
  287                 CPU_FOREACH(to_cpu) {
  288                         ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
  289                         xen_intr_signal(ipi_handle[ipi_idx]);
  290                 }
  291                 break;
  292         case APIC_IPI_DEST_OTHERS:
  293                 self = PCPU_GET(cpuid);
  294                 CPU_FOREACH(to_cpu) {
  295                         if (to_cpu != self) {
  296                                 ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
  297                                 xen_intr_signal(ipi_handle[ipi_idx]);
  298                         }
  299                 }
  300                 break;
  301         default:
  302                 to_cpu = apic_cpuid(dest);
  303                 ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
  304                 xen_intr_signal(ipi_handle[ipi_idx]);
  305                 break;
  306         }
  307 }
  308 
  309 /*---------------------- XEN diverged cpu operations -------------------------*/
  310 static void
  311 xen_cpu_ipi_init(int cpu)
  312 {
  313         xen_intr_handle_t *ipi_handle;
  314         const struct xen_ipi_handler *ipi;
  315         device_t dev;
  316         int idx, rc;
  317 
  318         ipi_handle = DPCPU_ID_GET(cpu, ipi_handle);
  319         dev = pcpu_find(cpu)->pc_device;
  320         KASSERT((dev != NULL), ("NULL pcpu device_t"));
  321 
  322         for (ipi = xen_ipis, idx = 0; idx < nitems(xen_ipis); ipi++, idx++) {
  323 
  324                 if (ipi->filter == NULL) {
  325                         ipi_handle[idx] = NULL;
  326                         continue;
  327                 }
  328 
  329                 rc = xen_intr_alloc_and_bind_ipi(dev, cpu, ipi->filter,
  330                     INTR_TYPE_TTY, &ipi_handle[idx]);
  331                 if (rc != 0)
  332                         panic("Unable to allocate a XEN IPI port");
  333                 xen_intr_describe(ipi_handle[idx], "%s", ipi->description);
  334         }
  335 }
  336 
  337 static void
  338 xen_setup_cpus(void)
  339 {
  340         int i;
  341 
  342         if (!xen_hvm_domain() || !xen_vector_callback_enabled)
  343                 return;
  344 
  345 #ifdef __amd64__
  346         if (pmap_pcid_enabled) {
  347                 xen_ipis[IPI_TO_IDX(IPI_INVLTLB)].filter = xen_invltlb_pcid;
  348                 xen_ipis[IPI_TO_IDX(IPI_INVLPG)].filter = xen_invlpg_pcid;
  349         }
  350 #endif
  351         CPU_FOREACH(i)
  352                 xen_cpu_ipi_init(i);
  353 
  354         /* Set the xen pv ipi ops to replace the native ones */
  355         cpu_ops.ipi_vectored = xen_ipi_vectored;
  356 }
  357 #endif
  358 
  359 /*---------------------- XEN Hypervisor Probe and Setup ----------------------*/
  360 static uint32_t
  361 xen_hvm_cpuid_base(void)
  362 {
  363         uint32_t base, regs[4];
  364 
  365         for (base = 0x40000000; base < 0x40010000; base += 0x100) {
  366                 do_cpuid(base, regs);
  367                 if (!memcmp("XenVMMXenVMM", &regs[1], 12)
  368                     && (regs[0] - base) >= 2)
  369                         return (base);
  370         }
  371         return (0);
  372 }
  373 
  374 /*
  375  * Allocate and fill in the hypcall page.
  376  */
  377 static int
  378 xen_hvm_init_hypercall_stubs(void)
  379 {
  380         uint32_t base, regs[4];
  381         int i;
  382 
  383         base = xen_hvm_cpuid_base();
  384         if (base == 0)
  385                 return (ENXIO);
  386 
  387         if (hypercall_stubs == NULL) {
  388                 int major, minor;
  389 
  390                 do_cpuid(base + 1, regs);
  391 
  392                 major = regs[0] >> 16;
  393                 minor = regs[0] & 0xffff;
  394                 printf("XEN: Hypervisor version %d.%d detected.\n", major,
  395                         minor);
  396 
  397 #ifdef SMP
  398                 if (((major < 4) || (major == 4 && minor <= 5)) &&
  399                     msix_disable_migration == -1) {
  400                         /*
  401                          * Xen hypervisors prior to 4.6.0 do not properly
  402                          * handle updates to enabled MSI-X table entries,
  403                          * so disable MSI-X interrupt migration in that
  404                          * case.
  405                          */
  406                         if (bootverbose)
  407                                 printf(
  408 "Disabling MSI-X interrupt migration due to Xen hypervisor bug.\n"
  409 "Set machdep.msix_disable_migration=0 to forcefully enable it.\n");
  410                         msix_disable_migration = 1;
  411                 }
  412 #endif
  413         }
  414 
  415         /*
  416          * Find the hypercall pages.
  417          */
  418         do_cpuid(base + 2, regs);
  419         
  420         if (hypercall_stubs == NULL) {
  421                 size_t call_region_size;
  422 
  423                 call_region_size = regs[0] * PAGE_SIZE;
  424                 hypercall_stubs = malloc(call_region_size, M_XENHVM, M_NOWAIT);
  425                 if (hypercall_stubs == NULL)
  426                         panic("Unable to allocate Xen hypercall region");
  427         }
  428 
  429         for (i = 0; i < regs[0]; i++)
  430                 wrmsr(regs[1], vtophys(hypercall_stubs + i * PAGE_SIZE) + i);
  431 
  432         return (0);
  433 }
  434 
  435 static void
  436 xen_hvm_init_shared_info_page(void)
  437 {
  438         struct xen_add_to_physmap xatp;
  439 
  440         if (HYPERVISOR_shared_info == NULL) {
  441                 HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT);
  442                 if (HYPERVISOR_shared_info == NULL)
  443                         panic("Unable to allocate Xen shared info page");
  444         }
  445 
  446         xatp.domid = DOMID_SELF;
  447         xatp.idx = 0;
  448         xatp.space = XENMAPSPACE_shared_info;
  449         xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT;
  450         if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
  451                 panic("HYPERVISOR_memory_op failed");
  452 }
  453 
  454 /*
  455  * Tell the hypervisor how to contact us for event channel callbacks.
  456  */
  457 void
  458 xen_hvm_set_callback(device_t dev)
  459 {
  460         struct xen_hvm_param xhp;
  461         int irq;
  462 
  463         if (xen_vector_callback_enabled)
  464                 return;
  465 
  466         xhp.domid = DOMID_SELF;
  467         xhp.index = HVM_PARAM_CALLBACK_IRQ;
  468         if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
  469                 int error;
  470 
  471                 xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
  472                 error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
  473                 if (error == 0) {
  474                         xen_vector_callback_enabled = 1;
  475                         return;
  476                 }
  477                 printf("Xen HVM callback vector registration failed (%d). "
  478                     "Falling back to emulated device interrupt\n", error);
  479         }
  480         xen_vector_callback_enabled = 0;
  481         if (dev == NULL) {
  482                 /*
  483                  * Called from early boot or resume.
  484                  * xenpci will invoke us again later.
  485                  */
  486                 return;
  487         }
  488 
  489         irq = pci_get_irq(dev);
  490         if (irq < 16) {
  491                 xhp.value = HVM_CALLBACK_GSI(irq);
  492         } else {
  493                 u_int slot;
  494                 u_int pin;
  495 
  496                 slot = pci_get_slot(dev);
  497                 pin = pci_get_intpin(dev) - 1;
  498                 xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
  499         }
  500 
  501         if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
  502                 panic("Can't set evtchn callback");
  503 }
  504 
  505 #define XEN_MAGIC_IOPORT 0x10
  506 enum {
  507         XMI_MAGIC                        = 0x49d2,
  508         XMI_UNPLUG_IDE_DISKS             = 0x01,
  509         XMI_UNPLUG_NICS                  = 0x02,
  510         XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
  511 };
  512 
  513 static void
  514 xen_hvm_disable_emulated_devices(void)
  515 {
  516         u_short disable_devs = 0;
  517 
  518         if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC)
  519                 return;
  520 
  521         if (xen_disable_pv_disks == 0) {
  522                 if (bootverbose)
  523                         printf("XEN: disabling emulated disks\n");
  524                 disable_devs |= XMI_UNPLUG_IDE_DISKS;
  525         }
  526         if (xen_disable_pv_nics == 0) {
  527                 if (bootverbose)
  528                         printf("XEN: disabling emulated nics\n");
  529                 disable_devs |= XMI_UNPLUG_NICS;
  530         }
  531 
  532         if (disable_devs != 0)
  533                 outw(XEN_MAGIC_IOPORT, disable_devs);
  534 }
  535 
  536 static void
  537 xen_hvm_init(enum xen_hvm_init_type init_type)
  538 {
  539         int error;
  540         int i;
  541 
  542         if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND)
  543                 return;
  544 
  545         error = xen_hvm_init_hypercall_stubs();
  546 
  547         switch (init_type) {
  548         case XEN_HVM_INIT_COLD:
  549                 if (error != 0)
  550                         return;
  551 
  552                 setup_xen_features();
  553 #ifdef SMP
  554                 cpu_ops = xen_hvm_cpu_ops;
  555 #endif
  556                 vm_guest = VM_GUEST_XEN;
  557                 break;
  558         case XEN_HVM_INIT_RESUME:
  559                 if (error != 0)
  560                         panic("Unable to init Xen hypercall stubs on resume");
  561 
  562                 /* Clear stale vcpu_info. */
  563                 CPU_FOREACH(i)
  564                         DPCPU_ID_SET(i, vcpu_info, NULL);
  565                 break;
  566         default:
  567                 panic("Unsupported HVM initialization type");
  568         }
  569 
  570         xen_vector_callback_enabled = 0;
  571         xen_domain_type = XEN_HVM_DOMAIN;
  572         xen_hvm_init_shared_info_page();
  573         xen_hvm_set_callback(NULL);
  574         xen_hvm_disable_emulated_devices();
  575 } 
  576 
  577 void
  578 xen_hvm_suspend(void)
  579 {
  580 }
  581 
  582 void
  583 xen_hvm_resume(bool suspend_cancelled)
  584 {
  585 
  586         xen_hvm_init(suspend_cancelled ?
  587             XEN_HVM_INIT_CANCELLED_SUSPEND : XEN_HVM_INIT_RESUME);
  588 
  589         /* Register vcpu_info area for CPU#0. */
  590         xen_hvm_cpu_init();
  591 }
  592  
  593 static void
  594 xen_hvm_sysinit(void *arg __unused)
  595 {
  596         xen_hvm_init(XEN_HVM_INIT_COLD);
  597 }
  598 
  599 static void
  600 xen_set_vcpu_id(void)
  601 {
  602         struct pcpu *pc;
  603         int i;
  604 
  605         /* Set vcpu_id to acpi_id */
  606         CPU_FOREACH(i) {
  607                 pc = pcpu_find(i);
  608                 pc->pc_vcpu_id = pc->pc_acpi_id;
  609                 if (bootverbose)
  610                         printf("XEN: CPU %u has VCPU ID %u\n",
  611                                i, pc->pc_vcpu_id);
  612         }
  613 }
  614 
  615 static void
  616 xen_hvm_cpu_init(void)
  617 {
  618         struct vcpu_register_vcpu_info info;
  619         struct vcpu_info *vcpu_info;
  620         int cpu, rc;
  621 
  622         if (!xen_domain())
  623                 return;
  624 
  625         if (DPCPU_GET(vcpu_info) != NULL) {
  626                 /*
  627                  * vcpu_info is already set.  We're resuming
  628                  * from a failed migration and our pre-suspend
  629                  * configuration is still valid.
  630                  */
  631                 return;
  632         }
  633 
  634         vcpu_info = DPCPU_PTR(vcpu_local_info);
  635         cpu = PCPU_GET(vcpu_id);
  636         info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT;
  637         info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info));
  638 
  639         rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
  640         if (rc != 0)
  641                 DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]);
  642         else
  643                 DPCPU_SET(vcpu_info, vcpu_info);
  644 }
  645 
  646 SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_sysinit, NULL);
  647 #ifdef SMP
  648 SYSINIT(xen_setup_cpus, SI_SUB_SMP, SI_ORDER_FIRST, xen_setup_cpus, NULL);
  649 #endif
  650 SYSINIT(xen_hvm_cpu_init, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_cpu_init, NULL);
  651 SYSINIT(xen_set_vcpu_id, SI_SUB_CPU, SI_ORDER_ANY, xen_set_vcpu_id, NULL);

Cache object: da76afd49bb769c5c06b1bff0272784b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.