The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/vmd/vmd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause
    3  *
    4  * Copyright (c) 2021 Alexander Motin <mav@FreeBSD.org>
    5  * Copyright 2019 Cisco Systems, Inc.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/types.h>
   34 #include <sys/param.h>
   35 #include <sys/bus.h>
   36 #include <sys/conf.h>
   37 #include <sys/kernel.h>
   38 #include <sys/limits.h>
   39 #include <sys/module.h>
   40 #include <sys/sysctl.h>
   41 #include <sys/systm.h>
   42 #include <sys/malloc.h>
   43 
   44 #include <machine/bus.h>
   45 #include <machine/resource.h>
   46 #include <machine/intr_machdep.h>
   47 #include <sys/rman.h>
   48 #include <sys/lock.h>
   49 #include <sys/mutex.h>
   50 
   51 #include <sys/pciio.h>
   52 #include <dev/pci/pcivar.h>
   53 #include <dev/pci/pcireg.h>
   54 #include <dev/pci/pci_private.h>
   55 #include <dev/pci/pcib_private.h>
   56 
   57 #include <dev/vmd/vmd.h>
   58 
   59 #include "pcib_if.h"
   60 
   61 struct vmd_type {
   62         u_int16_t       vmd_vid;
   63         u_int16_t       vmd_did;
   64         char            *vmd_name;
   65         int             flags;
   66 #define BUS_RESTRICT    1
   67 #define VECTOR_OFFSET   2
   68 #define CAN_BYPASS_MSI  4
   69 };
   70 
   71 #define VMD_CAP         0x40
   72 #define VMD_BUS_RESTRICT        0x1
   73 
   74 #define VMD_CONFIG      0x44
   75 #define VMD_BYPASS_MSI          0x2
   76 #define VMD_BUS_START(x)        ((x >> 8) & 0x3)
   77 
   78 #define VMD_LOCK        0x70
   79 
   80 SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
   81     "Intel Volume Management Device tuning parameters");
   82 
   83 /*
   84  * By default all VMD devices remap children MSI/MSI-X interrupts into their
   85  * own.  It creates additional isolation, but also complicates things due to
   86  * sharing, etc.  Fortunately some VMD devices can bypass the remapping.
   87  */
   88 static int vmd_bypass_msi = 1;
   89 SYSCTL_INT(_hw_vmd, OID_AUTO, bypass_msi, CTLFLAG_RWTUN, &vmd_bypass_msi, 0,
   90     "Bypass MSI remapping on capable hardware");
   91 
   92 /*
   93  * All MSIs within a group share address, so VMD can't distinguish them.
   94  * It makes no sense to use more than one per device, only if required by
   95  * some specific device drivers.
   96  */
   97 static int vmd_max_msi = 1;
   98 SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0,
   99     "Maximum number of MSI vectors per device");
  100 
  101 /*
  102  * MSI-X can use different addresses, but we have limited number of MSI-X
  103  * we can route to, so use conservative default to try to avoid sharing.
  104  */
  105 static int vmd_max_msix = 3;
  106 SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0,
  107     "Maximum number of MSI-X vectors per device");
  108 
  109 static struct vmd_type vmd_devs[] = {
  110         { 0x8086, 0x201d, "Intel Volume Management Device", 0 },
  111         { 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT | CAN_BYPASS_MSI },
  112         { 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
  113         { 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
  114         { 0x8086, 0x7d0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
  115         { 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
  116         { 0x8086, 0xa77f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
  117         { 0x8086, 0xad0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
  118         { 0, 0, NULL, 0 }
  119 };
  120 
  121 static int
  122 vmd_probe(device_t dev)
  123 {
  124         struct vmd_type *t;
  125         uint16_t vid, did;
  126 
  127         vid = pci_get_vendor(dev);
  128         did = pci_get_device(dev);
  129         for (t = vmd_devs; t->vmd_name != NULL; t++) {
  130                 if (vid == t->vmd_vid && did == t->vmd_did) {
  131                         device_set_desc(dev, t->vmd_name);
  132                         return (BUS_PROBE_DEFAULT);
  133                 }
  134         }
  135         return (ENXIO);
  136 }
  137 
  138 static void
  139 vmd_free(struct vmd_softc *sc)
  140 {
  141         struct vmd_irq *vi;
  142         struct vmd_irq_user *u;
  143         int i;
  144 
  145         if (sc->psc.bus.rman.rm_end != 0)
  146                 rman_fini(&sc->psc.bus.rman);
  147         if (sc->psc.mem.rman.rm_end != 0)
  148                 rman_fini(&sc->psc.mem.rman);
  149         while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) {
  150                 LIST_REMOVE(u, viu_link);
  151                 free(u, M_DEVBUF);
  152         }
  153         if (sc->vmd_irq != NULL) {
  154                 for (i = 0; i < sc->vmd_msix_count; i++) {
  155                         vi = &sc->vmd_irq[i];
  156                         if (vi->vi_res == NULL)
  157                                 continue;
  158                         bus_teardown_intr(sc->psc.dev, vi->vi_res,
  159                             vi->vi_handle);
  160                         bus_release_resource(sc->psc.dev, SYS_RES_IRQ,
  161                             vi->vi_rid, vi->vi_res);
  162                 }
  163         }
  164         free(sc->vmd_irq, M_DEVBUF);
  165         sc->vmd_irq = NULL;
  166         pci_release_msi(sc->psc.dev);
  167         for (i = 0; i < VMD_MAX_BAR; i++) {
  168                 if (sc->vmd_regs_res[i] != NULL)
  169                         bus_release_resource(sc->psc.dev, SYS_RES_MEMORY,
  170                             sc->vmd_regs_rid[i], sc->vmd_regs_res[i]);
  171         }
  172 }
  173 
  174 /* Hidden PCI Roots are hidden in BAR(0). */
  175 
  176 static uint32_t
  177 vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
  178 {
  179         struct vmd_softc *sc;
  180         bus_addr_t offset;
  181 
  182         sc = device_get_softc(dev);
  183         if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
  184                 return (0xffffffff);
  185 
  186         offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
  187 
  188         switch (width) {
  189         case 4:
  190                 return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle,
  191                     offset));
  192         case 2:
  193                 return (bus_space_read_2(sc->vmd_btag, sc->vmd_bhandle,
  194                     offset));
  195         case 1:
  196                 return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle,
  197                     offset));
  198         default:
  199                 __assert_unreachable();
  200                 return (0xffffffff);
  201         }
  202 }
  203 
  204 static void
  205 vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
  206     uint32_t val, int width)
  207 {
  208         struct vmd_softc *sc;
  209         bus_addr_t offset;
  210 
  211         sc = device_get_softc(dev);
  212         if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
  213                 return;
  214 
  215         offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
  216 
  217         switch (width) {
  218         case 4:
  219                 return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle,
  220                     offset, val));
  221         case 2:
  222                 return (bus_space_write_2(sc->vmd_btag, sc->vmd_bhandle,
  223                     offset, val));
  224         case 1:
  225                 return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle,
  226                     offset, val));
  227         default:
  228                 __assert_unreachable();
  229         }
  230 }
  231 
  232 static void
  233 vmd_set_msi_bypass(device_t dev, bool enable)
  234 {
  235         uint16_t val;
  236 
  237         val = pci_read_config(dev, VMD_CONFIG, 2);
  238         if (enable)
  239                 val |= VMD_BYPASS_MSI;
  240         else
  241                 val &= ~VMD_BYPASS_MSI;
  242         pci_write_config(dev, VMD_CONFIG, val, 2);
  243 }
  244 
  245 static int
  246 vmd_intr(void *arg)
  247 {
  248         /*
  249          * We have nothing to do here, but we have to register some interrupt
  250          * handler to make PCI code setup and enable the MSI-X vector.
  251          */
  252         return (FILTER_STRAY);
  253 }
  254 
  255 static int
  256 vmd_attach(device_t dev)
  257 {
  258         struct vmd_softc *sc;
  259         struct pcib_secbus *bus;
  260         struct pcib_window *w;
  261         struct vmd_type *t;
  262         struct vmd_irq *vi;
  263         uint16_t vid, did;
  264         uint32_t bar;
  265         int i, j, error;
  266         char buf[64];
  267 
  268         sc = device_get_softc(dev);
  269         bzero(sc, sizeof(*sc));
  270         sc->psc.dev = dev;
  271         sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev);
  272 
  273         pci_enable_busmaster(dev);
  274 
  275         for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) {
  276                 sc->vmd_regs_rid[i] = PCIR_BAR(j);
  277                 bar = pci_read_config(dev, PCIR_BAR(0), 4);
  278                 if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
  279                     PCIM_BAR_MEM_64)
  280                         j++;
  281                 if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev,
  282                     SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) {
  283                         device_printf(dev, "Cannot allocate resources\n");
  284                         goto fail;
  285                 }
  286         }
  287 
  288         sc->vmd_btag = rman_get_bustag(sc->vmd_regs_res[0]);
  289         sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_res[0]);
  290 
  291         vid = pci_get_vendor(dev);
  292         did = pci_get_device(dev);
  293         for (t = vmd_devs; t->vmd_name != NULL; t++) {
  294                 if (vid == t->vmd_vid && did == t->vmd_did)
  295                         break;
  296         }
  297 
  298         sc->vmd_bus_start = 0;
  299         if ((t->flags & BUS_RESTRICT) &&
  300             (pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) {
  301                 switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) {
  302                 case 0:
  303                         sc->vmd_bus_start = 0;
  304                         break;
  305                 case 1:
  306                         sc->vmd_bus_start = 128;
  307                         break;
  308                 case 2:
  309                         sc->vmd_bus_start = 224;
  310                         break;
  311                 default:
  312                         device_printf(dev, "Unknown bus offset\n");
  313                         goto fail;
  314                 }
  315         }
  316         sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start +
  317             (rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1);
  318 
  319         bus = &sc->psc.bus;
  320         bus->sec = sc->vmd_bus_start;
  321         bus->sub = sc->vmd_bus_end;
  322         bus->dev = dev;
  323         bus->rman.rm_start = 0;
  324         bus->rman.rm_end = PCI_BUSMAX;
  325         bus->rman.rm_type = RMAN_ARRAY;
  326         snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
  327         bus->rman.rm_descr = strdup(buf, M_DEVBUF);
  328         error = rman_init(&bus->rman);
  329         if (error) {
  330                 device_printf(dev, "Failed to initialize bus rman\n");
  331                 bus->rman.rm_end = 0;
  332                 goto fail;
  333         }
  334         error = rman_manage_region(&bus->rman, sc->vmd_bus_start,
  335             sc->vmd_bus_end);
  336         if (error) {
  337                 device_printf(dev, "Failed to add resource to bus rman\n");
  338                 goto fail;
  339         }
  340 
  341         w = &sc->psc.mem;
  342         w->rman.rm_type = RMAN_ARRAY;
  343         snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev));
  344         w->rman.rm_descr = strdup(buf, M_DEVBUF);
  345         error = rman_init(&w->rman);
  346         if (error) {
  347                 device_printf(dev, "Failed to initialize memory rman\n");
  348                 w->rman.rm_end = 0;
  349                 goto fail;
  350         }
  351         error = rman_manage_region(&w->rman,
  352             rman_get_start(sc->vmd_regs_res[1]),
  353             rman_get_end(sc->vmd_regs_res[1]));
  354         if (error) {
  355                 device_printf(dev, "Failed to add resource to memory rman\n");
  356                 goto fail;
  357         }
  358         error = rman_manage_region(&w->rman,
  359             rman_get_start(sc->vmd_regs_res[2]) + 0x2000,
  360             rman_get_end(sc->vmd_regs_res[2]));
  361         if (error) {
  362                 device_printf(dev, "Failed to add resource to memory rman\n");
  363                 goto fail;
  364         }
  365 
  366         LIST_INIT(&sc->vmd_users);
  367         sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0;
  368         sc->vmd_msix_count = pci_msix_count(dev);
  369         if (vmd_bypass_msi && (t->flags & CAN_BYPASS_MSI)) {
  370                 sc->vmd_msix_count = 0;
  371                 vmd_set_msi_bypass(dev, true);
  372         } else if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
  373                 sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
  374                     sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO);
  375                 for (i = 0; i < sc->vmd_msix_count; i++) {
  376                         vi = &sc->vmd_irq[i];
  377                         vi->vi_rid = i + 1;
  378                         vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
  379                             &vi->vi_rid, RF_ACTIVE | RF_SHAREABLE);
  380                         if (vi->vi_res == NULL) {
  381                                 device_printf(dev, "Failed to allocate irq\n");
  382                                 goto fail;
  383                         }
  384                         vi->vi_irq = rman_get_start(vi->vi_res);
  385                         if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC |
  386                             INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) {
  387                                 device_printf(dev, "Can't set up interrupt\n");
  388                                 bus_release_resource(dev, SYS_RES_IRQ,
  389                                     vi->vi_rid, vi->vi_res);
  390                                 vi->vi_res = NULL;
  391                                 goto fail;
  392                         }
  393                 }
  394                 vmd_set_msi_bypass(dev, false);
  395         }
  396 
  397         sc->vmd_dma_tag = bus_get_dma_tag(dev);
  398 
  399         sc->psc.child = device_add_child(dev, "pci", -1);
  400         return (bus_generic_attach(dev));
  401 
  402 fail:
  403         vmd_free(sc);
  404         return (ENXIO);
  405 }
  406 
  407 static int
  408 vmd_detach(device_t dev)
  409 {
  410         struct vmd_softc *sc = device_get_softc(dev);
  411         int error;
  412 
  413         error = bus_generic_detach(dev);
  414         if (error)
  415                 return (error);
  416         error = device_delete_children(dev);
  417         if (error)
  418                 return (error);
  419         if (sc->vmd_msix_count == 0)
  420                 vmd_set_msi_bypass(dev, false);
  421         vmd_free(sc);
  422         return (0);
  423 }
  424 
  425 static bus_dma_tag_t
  426 vmd_get_dma_tag(device_t dev, device_t child)
  427 {
  428         struct vmd_softc *sc = device_get_softc(dev);
  429 
  430         return (sc->vmd_dma_tag);
  431 }
  432 
  433 static struct resource *
  434 vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
  435     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
  436 {
  437         struct vmd_softc *sc = device_get_softc(dev);
  438         struct resource *res;
  439 
  440         switch (type) {
  441         case SYS_RES_IRQ:
  442                 /* VMD harwdare does not support legacy interrupts. */
  443                 if (*rid == 0)
  444                         return (NULL);
  445                 return (bus_generic_alloc_resource(dev, child, type, rid,
  446                     start, end, count, flags | RF_SHAREABLE));
  447         case SYS_RES_MEMORY:
  448                 res = rman_reserve_resource(&sc->psc.mem.rman, start, end,
  449                     count, flags, child);
  450                 if (res == NULL)
  451                         return (NULL);
  452                 if (bootverbose)
  453                         device_printf(dev,
  454                             "allocated memory range (%#jx-%#jx) for rid %d of %s\n",
  455                             rman_get_start(res), rman_get_end(res), *rid,
  456                             pcib_child_name(child));
  457                 break;
  458         case PCI_RES_BUS:
  459                 res = rman_reserve_resource(&sc->psc.bus.rman, start, end,
  460                     count, flags, child);
  461                 if (res == NULL)
  462                         return (NULL);
  463                 if (bootverbose)
  464                         device_printf(dev,
  465                             "allocated bus range (%ju-%ju) for rid %d of %s\n",
  466                             rman_get_start(res), rman_get_end(res), *rid,
  467                             pcib_child_name(child));
  468                 break;
  469         default:
  470                 /* VMD harwdare does not support I/O ports. */
  471                 return (NULL);
  472         }
  473         rman_set_rid(res, *rid);
  474         return (res);
  475 }
  476 
  477 static int
  478 vmd_adjust_resource(device_t dev, device_t child, int type,
  479     struct resource *r, rman_res_t start, rman_res_t end)
  480 {
  481 
  482         if (type == SYS_RES_IRQ) {
  483                 return (bus_generic_adjust_resource(dev, child, type, r,
  484                     start, end));
  485         }
  486         return (rman_adjust_resource(r, start, end));
  487 }
  488 
  489 static int
  490 vmd_release_resource(device_t dev, device_t child, int type, int rid,
  491     struct resource *r)
  492 {
  493 
  494         if (type == SYS_RES_IRQ) {
  495                 return (bus_generic_release_resource(dev, child, type, rid,
  496                     r));
  497         }
  498         return (rman_release_resource(r));
  499 }
  500 
  501 static int
  502 vmd_route_interrupt(device_t dev, device_t child, int pin)
  503 {
  504 
  505         /* VMD harwdare does not support legacy interrupts. */
  506         return (PCI_INVALID_IRQ);
  507 }
  508 
  509 static int
  510 vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount,
  511     int *irqs)
  512 {
  513         struct vmd_softc *sc = device_get_softc(dev);
  514         struct vmd_irq_user *u;
  515         int i, ibest = 0, best = INT_MAX;
  516 
  517         if (sc->vmd_msix_count == 0) {
  518                 return (PCIB_ALLOC_MSI(device_get_parent(device_get_parent(dev)),
  519                     child, count, maxcount, irqs));
  520         }
  521 
  522         if (count > vmd_max_msi)
  523                 return (ENOSPC);
  524         LIST_FOREACH(u, &sc->vmd_users, viu_link) {
  525                 if (u->viu_child == child)
  526                         return (EBUSY);
  527         }
  528 
  529         for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
  530                 if (best > sc->vmd_irq[i].vi_nusers) {
  531                         best = sc->vmd_irq[i].vi_nusers;
  532                         ibest = i;
  533                 }
  534         }
  535 
  536         u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
  537         u->viu_child = child;
  538         u->viu_vector = ibest;
  539         LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
  540         sc->vmd_irq[ibest].vi_nusers += count;
  541 
  542         for (i = 0; i < count; i++)
  543                 irqs[i] = sc->vmd_irq[ibest].vi_irq;
  544         return (0);
  545 }
  546 
  547 static int
  548 vmd_release_msi(device_t dev, device_t child, int count, int *irqs)
  549 {
  550         struct vmd_softc *sc = device_get_softc(dev);
  551         struct vmd_irq_user *u;
  552 
  553         if (sc->vmd_msix_count == 0) {
  554                 return (PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)),
  555                     child, count, irqs));
  556         }
  557 
  558         LIST_FOREACH(u, &sc->vmd_users, viu_link) {
  559                 if (u->viu_child == child) {
  560                         sc->vmd_irq[u->viu_vector].vi_nusers -= count;
  561                         LIST_REMOVE(u, viu_link);
  562                         free(u, M_DEVBUF);
  563                         return (0);
  564                 }
  565         }
  566         return (EINVAL);
  567 }
  568 
  569 static int
  570 vmd_alloc_msix(device_t dev, device_t child, int *irq)
  571 {
  572         struct vmd_softc *sc = device_get_softc(dev);
  573         struct vmd_irq_user *u;
  574         int i, ibest = 0, best = INT_MAX;
  575 
  576         if (sc->vmd_msix_count == 0) {
  577                 return (PCIB_ALLOC_MSIX(device_get_parent(device_get_parent(dev)),
  578                     child, irq));
  579         }
  580 
  581         i = 0;
  582         LIST_FOREACH(u, &sc->vmd_users, viu_link) {
  583                 if (u->viu_child == child)
  584                         i++;
  585         }
  586         if (i >= vmd_max_msix)
  587                 return (ENOSPC);
  588 
  589         for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
  590                 if (best > sc->vmd_irq[i].vi_nusers) {
  591                         best = sc->vmd_irq[i].vi_nusers;
  592                         ibest = i;
  593                 }
  594         }
  595 
  596         u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
  597         u->viu_child = child;
  598         u->viu_vector = ibest;
  599         LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
  600         sc->vmd_irq[ibest].vi_nusers++;
  601 
  602         *irq = sc->vmd_irq[ibest].vi_irq;
  603         return (0);
  604 }
  605 
  606 static int
  607 vmd_release_msix(device_t dev, device_t child, int irq)
  608 {
  609         struct vmd_softc *sc = device_get_softc(dev);
  610         struct vmd_irq_user *u;
  611 
  612         if (sc->vmd_msix_count == 0) {
  613                 return (PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
  614                     child, irq));
  615         }
  616 
  617         LIST_FOREACH(u, &sc->vmd_users, viu_link) {
  618                 if (u->viu_child == child &&
  619                     sc->vmd_irq[u->viu_vector].vi_irq == irq) {
  620                         sc->vmd_irq[u->viu_vector].vi_nusers--;
  621                         LIST_REMOVE(u, viu_link);
  622                         free(u, M_DEVBUF);
  623                         return (0);
  624                 }
  625         }
  626         return (EINVAL);
  627 }
  628 
  629 static int
  630 vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data)
  631 {
  632         struct vmd_softc *sc = device_get_softc(dev);
  633         int i;
  634 
  635         if (sc->vmd_msix_count == 0) {
  636                 return (PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)),
  637                     child, irq, addr, data));
  638         }
  639 
  640         for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
  641                 if (sc->vmd_irq[i].vi_irq == irq)
  642                         break;
  643         }
  644         if (i >= sc->vmd_msix_count)
  645                 return (EINVAL);
  646         *addr = MSI_INTEL_ADDR_BASE | (i << 12);
  647         *data = 0;
  648         return (0);
  649 }
  650 
  651 static device_method_t vmd_pci_methods[] = {
  652         /* Device interface */
  653         DEVMETHOD(device_probe,                 vmd_probe),
  654         DEVMETHOD(device_attach,                vmd_attach),
  655         DEVMETHOD(device_detach,                vmd_detach),
  656         DEVMETHOD(device_suspend,               bus_generic_suspend),
  657         DEVMETHOD(device_resume,                bus_generic_resume),
  658         DEVMETHOD(device_shutdown,              bus_generic_shutdown),
  659 
  660         /* Bus interface */
  661         DEVMETHOD(bus_get_dma_tag,              vmd_get_dma_tag),
  662         DEVMETHOD(bus_read_ivar,                pcib_read_ivar),
  663         DEVMETHOD(bus_write_ivar,               pcib_write_ivar),
  664         DEVMETHOD(bus_alloc_resource,           vmd_alloc_resource),
  665         DEVMETHOD(bus_adjust_resource,          vmd_adjust_resource),
  666         DEVMETHOD(bus_release_resource,         vmd_release_resource),
  667         DEVMETHOD(bus_activate_resource,        bus_generic_activate_resource),
  668         DEVMETHOD(bus_deactivate_resource,      bus_generic_deactivate_resource),
  669         DEVMETHOD(bus_setup_intr,               bus_generic_setup_intr),
  670         DEVMETHOD(bus_teardown_intr,            bus_generic_teardown_intr),
  671 
  672         /* pcib interface */
  673         DEVMETHOD(pcib_maxslots,                pcib_maxslots),
  674         DEVMETHOD(pcib_read_config,             vmd_read_config),
  675         DEVMETHOD(pcib_write_config,            vmd_write_config),
  676         DEVMETHOD(pcib_route_interrupt,         vmd_route_interrupt),
  677         DEVMETHOD(pcib_alloc_msi,               vmd_alloc_msi),
  678         DEVMETHOD(pcib_release_msi,             vmd_release_msi),
  679         DEVMETHOD(pcib_alloc_msix,              vmd_alloc_msix),
  680         DEVMETHOD(pcib_release_msix,            vmd_release_msix),
  681         DEVMETHOD(pcib_map_msi,                 vmd_map_msi),
  682         DEVMETHOD(pcib_request_feature,         pcib_request_feature_allow),
  683 
  684         DEVMETHOD_END
  685 };
  686 
  687 DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
  688 DRIVER_MODULE(vmd, pci, vmd_pci_driver, NULL, NULL);
  689 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
  690     vmd_devs, nitems(vmd_devs) - 1);

Cache object: 5d8ae1df296f64d7b075f032626958e2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.