The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/pci.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: pci.c,v 1.126 2022/11/27 22:55:31 kn Exp $    */
    2 /*      $NetBSD: pci.c,v 1.31 1997/06/06 23:48:04 thorpej Exp $ */
    3 
    4 /*
    5  * Copyright (c) 1995, 1996 Christopher G. Demetriou.  All rights reserved.
    6  * Copyright (c) 1994 Charles Hannum.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by Charles Hannum.
   19  * 4. The name of the author may not be used to endorse or promote products
   20  *    derived from this software without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 /*
   35  * PCI bus autoconfiguration.
   36  */
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/device.h>
   41 #include <sys/malloc.h>
   42 
   43 #include <dev/pci/pcireg.h>
   44 #include <dev/pci/pcivar.h>
   45 #include <dev/pci/pcidevs.h>
   46 #include <dev/pci/ppbreg.h>
   47 
   48 int pcimatch(struct device *, void *, void *);
   49 void pciattach(struct device *, struct device *, void *);
   50 int pcidetach(struct device *, int);
   51 int pciactivate(struct device *, int);
   52 void pci_suspend(struct pci_softc *);
   53 void pci_powerdown(struct pci_softc *);
   54 void pci_resume(struct pci_softc *);
   55 
   56 struct msix_vector {
   57         uint32_t mv_ma;
   58         uint32_t mv_mau32;
   59         uint32_t mv_md;
   60         uint32_t mv_vc;
   61 };
   62 
   63 #define NMAPREG                 ((PCI_MAPREG_END - PCI_MAPREG_START) / \
   64                                     sizeof(pcireg_t))
   65 struct pci_dev {
   66         struct device *pd_dev;
   67         LIST_ENTRY(pci_dev) pd_next;
   68         pcitag_t pd_tag;        /* pci register tag */
   69         pcireg_t pd_csr;
   70         pcireg_t pd_bhlc;
   71         pcireg_t pd_int;
   72         pcireg_t pd_map[NMAPREG];
   73         pcireg_t pd_mask[NMAPREG];
   74         pcireg_t pd_msi_mc;
   75         pcireg_t pd_msi_ma;
   76         pcireg_t pd_msi_mau32;
   77         pcireg_t pd_msi_md;
   78         pcireg_t pd_msix_mc;
   79         struct msix_vector *pd_msix_table;
   80         int pd_pmcsr_state;
   81         int pd_vga_decode;
   82 };
   83 
   84 #ifdef APERTURE
   85 extern int allowaperture;
   86 #endif
   87 
   88 const struct cfattach pci_ca = {
   89         sizeof(struct pci_softc), pcimatch, pciattach, pcidetach, pciactivate
   90 };
   91 
   92 struct cfdriver pci_cd = {
   93         NULL, "pci", DV_DULL
   94 };
   95 
   96 int     pci_ndomains;
   97 
   98 struct proc *pci_vga_proc;
   99 struct pci_softc *pci_vga_pci;
  100 pcitag_t pci_vga_tag;
  101 
  102 int     pci_dopm;
  103 
  104 int     pciprint(void *, const char *);
  105 int     pcisubmatch(struct device *, void *, void *);
  106 
  107 #ifdef PCI_MACHDEP_ENUMERATE_BUS
  108 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
  109 #else
  110 int pci_enumerate_bus(struct pci_softc *,
  111     int (*)(struct pci_attach_args *), struct pci_attach_args *);
  112 #endif
  113 int     pci_reserve_resources(struct pci_attach_args *);
  114 int     pci_primary_vga(struct pci_attach_args *);
  115 
  116 /*
  117  * Important note about PCI-ISA bridges:
  118  *
  119  * Callbacks are used to configure these devices so that ISA/EISA bridges
  120  * can attach their child busses after PCI configuration is done.
  121  *
  122  * This works because:
  123  *      (1) there can be at most one ISA/EISA bridge per PCI bus, and
  124  *      (2) any ISA/EISA bridges must be attached to primary PCI
  125  *          busses (i.e. bus zero).
  126  *
  127  * That boils down to: there can only be one of these outstanding
  128  * at a time, it is cleared when configuring PCI bus 0 before any
  129  * subdevices have been found, and it is run after all subdevices
  130  * of PCI bus 0 have been found.
  131  *
  132  * This is needed because there are some (legacy) PCI devices which
  133  * can show up as ISA/EISA devices as well (the prime example of which
  134  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
  135  * and the bridge is seen before the video board is, the board can show
  136  * up as an ISA device, and that can (bogusly) complicate the PCI device's
  137  * attach code, or make the PCI device not be properly attached at all.
  138  *
  139  * We use the generic config_defer() facility to achieve this.
  140  */
  141 
  142 int
  143 pcimatch(struct device *parent, void *match, void *aux)
  144 {
  145         struct cfdata *cf = match;
  146         struct pcibus_attach_args *pba = aux;
  147 
  148         if (strcmp(pba->pba_busname, cf->cf_driver->cd_name))
  149                 return (0);
  150 
  151         /* Check the locators */
  152         if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
  153             cf->pcibuscf_bus != pba->pba_bus)
  154                 return (0);
  155 
  156         /* sanity */
  157         if (pba->pba_bus < 0 || pba->pba_bus > 255)
  158                 return (0);
  159 
  160         /*
  161          * XXX check other (hardware?) indicators
  162          */
  163 
  164         return (1);
  165 }
  166 
  167 void
  168 pciattach(struct device *parent, struct device *self, void *aux)
  169 {
  170         struct pcibus_attach_args *pba = aux;
  171         struct pci_softc *sc = (struct pci_softc *)self;
  172 
  173         pci_attach_hook(parent, self, pba);
  174 
  175         printf("\n");
  176 
  177         LIST_INIT(&sc->sc_devs);
  178 
  179         sc->sc_iot = pba->pba_iot;
  180         sc->sc_memt = pba->pba_memt;
  181         sc->sc_dmat = pba->pba_dmat;
  182         sc->sc_pc = pba->pba_pc;
  183         sc->sc_flags = pba->pba_flags;
  184         sc->sc_ioex = pba->pba_ioex;
  185         sc->sc_memex = pba->pba_memex;
  186         sc->sc_pmemex = pba->pba_pmemex;
  187         sc->sc_busex = pba->pba_busex;
  188         sc->sc_domain = pba->pba_domain;
  189         sc->sc_bus = pba->pba_bus;
  190         sc->sc_bridgetag = pba->pba_bridgetag;
  191         sc->sc_bridgeih = pba->pba_bridgeih;
  192         sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
  193         sc->sc_intrswiz = pba->pba_intrswiz;
  194         sc->sc_intrtag = pba->pba_intrtag;
  195 
  196         /* Reserve our own bus number. */
  197         if (sc->sc_busex)
  198                 extent_alloc_region(sc->sc_busex, sc->sc_bus, 1, EX_NOWAIT);
  199 
  200         pci_enumerate_bus(sc, pci_reserve_resources, NULL);
  201 
  202         /* Find the VGA device that's currently active. */
  203         if (pci_enumerate_bus(sc, pci_primary_vga, NULL))
  204                 pci_vga_pci = sc;
  205 
  206         pci_enumerate_bus(sc, NULL, NULL);
  207 }
  208 
  209 int
  210 pcidetach(struct device *self, int flags)
  211 {
  212         return pci_detach_devices((struct pci_softc *)self, flags);
  213 }
  214 
  215 int
  216 pciactivate(struct device *self, int act)
  217 {
  218         int rv = 0;
  219 
  220         switch (act) {
  221         case DVACT_SUSPEND:
  222                 rv = config_activate_children(self, act);
  223                 pci_suspend((struct pci_softc *)self);
  224                 break;
  225         case DVACT_RESUME:
  226                 pci_resume((struct pci_softc *)self);
  227                 rv = config_activate_children(self, act);
  228                 break;
  229         case DVACT_POWERDOWN:
  230                 rv = config_activate_children(self, act);
  231                 pci_powerdown((struct pci_softc *)self);
  232                 break;
  233         default:
  234                 rv = config_activate_children(self, act);
  235                 break;
  236         }
  237         return (rv);
  238 }
  239 
  240 void
  241 pci_suspend(struct pci_softc *sc)
  242 {
  243         struct pci_dev *pd;
  244         pcireg_t bhlc, reg;
  245         int off, i;
  246 
  247         LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
  248                 /*
  249                  * Only handle header type 0 here; PCI-PCI bridges and
  250                  * CardBus bridges need special handling, which will
  251                  * be done in their specific drivers.
  252                  */
  253                 bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
  254                 if (PCI_HDRTYPE_TYPE(bhlc) != 0)
  255                         continue;
  256 
  257                 /* Save registers that may get lost. */
  258                 for (i = 0; i < NMAPREG; i++)
  259                         pd->pd_map[i] = pci_conf_read(sc->sc_pc, pd->pd_tag,
  260                             PCI_MAPREG_START + (i * 4));
  261                 pd->pd_csr = pci_conf_read(sc->sc_pc, pd->pd_tag,
  262                     PCI_COMMAND_STATUS_REG);
  263                 pd->pd_bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag,
  264                     PCI_BHLC_REG);
  265                 pd->pd_int = pci_conf_read(sc->sc_pc, pd->pd_tag,
  266                     PCI_INTERRUPT_REG);
  267 
  268                 if (pci_get_capability(sc->sc_pc, pd->pd_tag,
  269                     PCI_CAP_MSI, &off, &reg)) {
  270                         pd->pd_msi_ma = pci_conf_read(sc->sc_pc, pd->pd_tag,
  271                             off + PCI_MSI_MA);
  272                         if (reg & PCI_MSI_MC_C64) {
  273                                 pd->pd_msi_mau32 = pci_conf_read(sc->sc_pc,
  274                                     pd->pd_tag, off + PCI_MSI_MAU32);
  275                                 pd->pd_msi_md = pci_conf_read(sc->sc_pc,
  276                                     pd->pd_tag, off + PCI_MSI_MD64);
  277                         } else {
  278                                 pd->pd_msi_md = pci_conf_read(sc->sc_pc,
  279                                     pd->pd_tag, off + PCI_MSI_MD32);
  280                         }
  281                         pd->pd_msi_mc = reg;
  282                 }
  283 
  284                 pci_suspend_msix(sc->sc_pc, pd->pd_tag, sc->sc_memt,
  285                     &pd->pd_msix_mc, pd->pd_msix_table);
  286         }
  287 }
  288 
  289 void
  290 pci_powerdown(struct pci_softc *sc)
  291 {
  292         struct pci_dev *pd;
  293         pcireg_t bhlc;
  294 
  295         LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
  296                 /*
  297                  * Only handle header type 0 here; PCI-PCI bridges and
  298                  * CardBus bridges need special handling, which will
  299                  * be done in their specific drivers.
  300                  */
  301                 bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
  302                 if (PCI_HDRTYPE_TYPE(bhlc) != 0)
  303                         continue;
  304 
  305                 if (pci_dopm) {
  306                         /*
  307                          * Place the device into the lowest possible
  308                          * power state.
  309                          */
  310                         pd->pd_pmcsr_state = pci_get_powerstate(sc->sc_pc,
  311                             pd->pd_tag);
  312                         pci_set_powerstate(sc->sc_pc, pd->pd_tag,
  313                             pci_min_powerstate(sc->sc_pc, pd->pd_tag));
  314                 }
  315         }
  316 }
  317 
  318 void
  319 pci_resume(struct pci_softc *sc)
  320 {
  321         struct pci_dev *pd;
  322         pcireg_t bhlc, reg;
  323         int off, i;
  324 
  325         LIST_FOREACH(pd, &sc->sc_devs, pd_next) {
  326                 /*
  327                  * Only handle header type 0 here; PCI-PCI bridges and
  328                  * CardBus bridges need special handling, which will
  329                  * be done in their specific drivers.
  330                  */
  331                 bhlc = pci_conf_read(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG);
  332                 if (PCI_HDRTYPE_TYPE(bhlc) != 0)
  333                         continue;
  334 
  335                 /* Restore power. */
  336                 if (pci_dopm)
  337                         pci_set_powerstate(sc->sc_pc, pd->pd_tag,
  338                             pd->pd_pmcsr_state);
  339 
  340                 /* Restore the registers saved above. */
  341                 for (i = 0; i < NMAPREG; i++)
  342                         pci_conf_write(sc->sc_pc, pd->pd_tag,
  343                             PCI_MAPREG_START + (i * 4), pd->pd_map[i]);
  344                 reg = pci_conf_read(sc->sc_pc, pd->pd_tag,
  345                     PCI_COMMAND_STATUS_REG);
  346                 pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
  347                     (reg & 0xffff0000) | (pd->pd_csr & 0x0000ffff));
  348                 pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_BHLC_REG,
  349                     pd->pd_bhlc);
  350                 pci_conf_write(sc->sc_pc, pd->pd_tag, PCI_INTERRUPT_REG,
  351                     pd->pd_int);
  352 
  353                 if (pci_get_capability(sc->sc_pc, pd->pd_tag,
  354                     PCI_CAP_MSI, &off, &reg)) {
  355                         pci_conf_write(sc->sc_pc, pd->pd_tag,
  356                             off + PCI_MSI_MA, pd->pd_msi_ma);
  357                         if (reg & PCI_MSI_MC_C64) {
  358                                 pci_conf_write(sc->sc_pc, pd->pd_tag,
  359                                     off + PCI_MSI_MAU32, pd->pd_msi_mau32);
  360                                 pci_conf_write(sc->sc_pc, pd->pd_tag,
  361                                     off + PCI_MSI_MD64, pd->pd_msi_md);
  362                         } else {
  363                                 pci_conf_write(sc->sc_pc, pd->pd_tag,
  364                                     off + PCI_MSI_MD32, pd->pd_msi_md);
  365                         }
  366                         pci_conf_write(sc->sc_pc, pd->pd_tag,
  367                             off + PCI_MSI_MC, pd->pd_msi_mc);
  368                 }
  369 
  370                 pci_resume_msix(sc->sc_pc, pd->pd_tag, sc->sc_memt,
  371                     pd->pd_msix_mc, pd->pd_msix_table);
  372         }
  373 }
  374 
  375 int
  376 pciprint(void *aux, const char *pnp)
  377 {
  378         struct pci_attach_args *pa = aux;
  379         char devinfo[256];
  380 
  381         if (pnp) {
  382                 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo,
  383                     sizeof devinfo);
  384                 printf("%s at %s", devinfo, pnp);
  385         }
  386         printf(" dev %d function %d", pa->pa_device, pa->pa_function);
  387         if (!pnp) {
  388                 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo,
  389                     sizeof devinfo);
  390                 printf(" %s", devinfo);
  391         }
  392 
  393         return (UNCONF);
  394 }
  395 
  396 int
  397 pcisubmatch(struct device *parent, void *match,  void *aux)
  398 {
  399         struct cfdata *cf = match;
  400         struct pci_attach_args *pa = aux;
  401 
  402         if (cf->pcicf_dev != PCI_UNK_DEV &&
  403             cf->pcicf_dev != pa->pa_device)
  404                 return (0);
  405         if (cf->pcicf_function != PCI_UNK_FUNCTION &&
  406             cf->pcicf_function != pa->pa_function)
  407                 return (0);
  408 
  409         return ((*cf->cf_attach->ca_match)(parent, match, aux));
  410 }
  411 
  412 int
  413 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
  414     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
  415 {
  416         pci_chipset_tag_t pc = sc->sc_pc;
  417         struct pci_attach_args pa;
  418         struct pci_dev *pd;
  419         pcireg_t id, class, intr, bhlcr, cap;
  420         int pin, bus, device, function;
  421         int off, ret = 0;
  422         uint64_t addr;
  423 
  424         pci_decompose_tag(pc, tag, &bus, &device, &function);
  425 
  426         bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
  427         if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
  428                 return (0);
  429 
  430         id = pci_conf_read(pc, tag, PCI_ID_REG);
  431         class = pci_conf_read(pc, tag, PCI_CLASS_REG);
  432 
  433         /* Invalid vendor ID value? */
  434         if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
  435                 return (0);
  436         /* XXX Not invalid, but we've done this ~forever. */
  437         if (PCI_VENDOR(id) == 0)
  438                 return (0);
  439 
  440         pa.pa_iot = sc->sc_iot;
  441         pa.pa_memt = sc->sc_memt;
  442         pa.pa_dmat = sc->sc_dmat;
  443         pa.pa_pc = pc;
  444         pa.pa_ioex = sc->sc_ioex;
  445         pa.pa_memex = sc->sc_memex;
  446         pa.pa_pmemex = sc->sc_pmemex;
  447         pa.pa_busex = sc->sc_busex;
  448         pa.pa_domain = sc->sc_domain;
  449         pa.pa_bus = bus;
  450         pa.pa_device = device;
  451         pa.pa_function = function;
  452         pa.pa_tag = tag;
  453         pa.pa_id = id;
  454         pa.pa_class = class;
  455         pa.pa_bridgetag = sc->sc_bridgetag;
  456         pa.pa_bridgeih = sc->sc_bridgeih;
  457 
  458         /* This is a simplification of the NetBSD code.
  459            We don't support turning off I/O or memory
  460            on broken hardware. <csapuntz@stanford.edu> */
  461         pa.pa_flags = sc->sc_flags;
  462         pa.pa_flags |= PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
  463 
  464         if (sc->sc_bridgetag == NULL) {
  465                 pa.pa_intrswiz = 0;
  466                 pa.pa_intrtag = tag;
  467         } else {
  468                 pa.pa_intrswiz = sc->sc_intrswiz + device;
  469                 pa.pa_intrtag = sc->sc_intrtag;
  470         }
  471 
  472         intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
  473 
  474         pin = PCI_INTERRUPT_PIN(intr);
  475         pa.pa_rawintrpin = pin;
  476         if (pin == PCI_INTERRUPT_PIN_NONE) {
  477                 /* no interrupt */
  478                 pa.pa_intrpin = 0;
  479         } else {
  480                 /*
  481                  * swizzle it based on the number of busses we're
  482                  * behind and our device number.
  483                  */
  484                 pa.pa_intrpin =         /* XXX */
  485                     ((pin + pa.pa_intrswiz - 1) % 4) + 1;
  486         }
  487         pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
  488 
  489         if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSI, &off, &cap)) {
  490                 /*
  491                  * XXX Should we enable MSI mapping ourselves on
  492                  * systems that have it disabled?
  493                  */
  494                 if (cap & PCI_HT_MSI_ENABLED) {
  495                         if ((cap & PCI_HT_MSI_FIXED) == 0) {
  496                                 addr = pci_conf_read(pc, tag,
  497                                     off + PCI_HT_MSI_ADDR);
  498                                 addr |= (uint64_t)pci_conf_read(pc, tag,
  499                                     off + PCI_HT_MSI_ADDR_HI32) << 32;
  500                         } else
  501                                 addr = PCI_HT_MSI_FIXED_ADDR;
  502 
  503                         /* 
  504                          * XXX This will fail to enable MSI on systems
  505                          * that don't use the canonical address.
  506                          */
  507                         if (addr == PCI_HT_MSI_FIXED_ADDR)
  508                                 pa.pa_flags |= PCI_FLAGS_MSI_ENABLED;
  509                 }
  510         }
  511 
  512         /*
  513          * Give the MD code a chance to alter pci_attach_args and/or
  514          * skip devices.
  515          */
  516         if (pci_probe_device_hook(pc, &pa) != 0)
  517                 return (0);
  518 
  519         if (match != NULL) {
  520                 ret = (*match)(&pa);
  521                 if (ret != 0 && pap != NULL)
  522                         *pap = pa;
  523         } else {
  524                 pcireg_t address, csr;
  525                 int i, reg, reg_start, reg_end;
  526                 int s;
  527 
  528                 pd = malloc(sizeof *pd, M_DEVBUF, M_ZERO | M_WAITOK);
  529                 pd->pd_tag = tag;
  530                 LIST_INSERT_HEAD(&sc->sc_devs, pd, pd_next);
  531 
  532                 switch (PCI_HDRTYPE_TYPE(bhlcr)) {
  533                 case 0:
  534                         reg_start = PCI_MAPREG_START;
  535                         reg_end = PCI_MAPREG_END;
  536                         break;
  537                 case 1: /* PCI-PCI bridge */
  538                         reg_start = PCI_MAPREG_START;
  539                         reg_end = PCI_MAPREG_PPB_END;
  540                         break;
  541                 case 2: /* PCI-CardBus bridge */
  542                         reg_start = PCI_MAPREG_START;
  543                         reg_end = PCI_MAPREG_PCB_END;
  544                         break;
  545                 default:
  546                         return (0);
  547                 }
  548 
  549                 pd->pd_msix_table = pci_alloc_msix_table(sc->sc_pc, pd->pd_tag);
  550 
  551                 s = splhigh();
  552                 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
  553                 if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
  554                         pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr &
  555                             ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE));
  556 
  557                 for (reg = reg_start, i = 0; reg < reg_end; reg += 4, i++) {
  558                         address = pci_conf_read(pc, tag, reg);
  559                         pci_conf_write(pc, tag, reg, 0xffffffff);
  560                         pd->pd_mask[i] = pci_conf_read(pc, tag, reg);
  561                         pci_conf_write(pc, tag, reg, address);
  562                 }
  563 
  564                 if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
  565                         pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
  566                 splx(s);
  567 
  568                 if ((PCI_CLASS(class) == PCI_CLASS_DISPLAY &&
  569                     PCI_SUBCLASS(class) == PCI_SUBCLASS_DISPLAY_VGA) ||
  570                     (PCI_CLASS(class) == PCI_CLASS_PREHISTORIC &&
  571                     PCI_SUBCLASS(class) == PCI_SUBCLASS_PREHISTORIC_VGA))
  572                         pd->pd_vga_decode = 1;
  573 
  574                 pd->pd_dev = config_found_sm(&sc->sc_dev, &pa, pciprint,
  575                     pcisubmatch);
  576                 if (pd->pd_dev)
  577                         pci_dev_postattach(pd->pd_dev, &pa);
  578         }
  579 
  580         return (ret);
  581 }
  582 
  583 int
  584 pci_detach_devices(struct pci_softc *sc, int flags)
  585 {
  586         struct pci_dev *pd, *next;
  587         int ret;
  588 
  589         ret = config_detach_children(&sc->sc_dev, flags);
  590         if (ret != 0)
  591                 return (ret);
  592 
  593         for (pd = LIST_FIRST(&sc->sc_devs); pd != NULL; pd = next) {
  594                 pci_free_msix_table(sc->sc_pc, pd->pd_tag, pd->pd_msix_table);
  595                 next = LIST_NEXT(pd, pd_next);
  596                 free(pd, M_DEVBUF, sizeof *pd);
  597         }
  598         LIST_INIT(&sc->sc_devs);
  599 
  600         return (0);
  601 }
  602 
  603 int
  604 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
  605     int *offset, pcireg_t *value)
  606 {
  607         pcireg_t reg;
  608         unsigned int ofs;
  609 
  610         reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
  611         if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
  612                 return (0);
  613 
  614         /* Determine the Capability List Pointer register to start with. */
  615         reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
  616         switch (PCI_HDRTYPE_TYPE(reg)) {
  617         case 0: /* standard device header */
  618         case 1: /* PCI-PCI bridge header */
  619                 ofs = PCI_CAPLISTPTR_REG;
  620                 break;
  621         case 2: /* PCI-CardBus bridge header */
  622                 ofs = PCI_CARDBUS_CAPLISTPTR_REG;
  623                 break;
  624         default:
  625                 return (0);
  626         }
  627 
  628         ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
  629         while (ofs != 0) {
  630                 /*
  631                  * Some devices, like parts of the NVIDIA C51 chipset,
  632                  * have a broken Capabilities List.  So we need to do
  633                  * a sanity check here.
  634                  */
  635                 if ((ofs & 3) || (ofs < 0x40))
  636                         return (0);
  637                 reg = pci_conf_read(pc, tag, ofs);
  638                 if (PCI_CAPLIST_CAP(reg) == capid) {
  639                         if (offset)
  640                                 *offset = ofs;
  641                         if (value)
  642                                 *value = reg;
  643                         return (1);
  644                 }
  645                 ofs = PCI_CAPLIST_NEXT(reg);
  646         }
  647 
  648         return (0);
  649 }
  650 
  651 int
  652 pci_get_ht_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
  653     int *offset, pcireg_t *value)
  654 {
  655         pcireg_t reg;
  656         unsigned int ofs;
  657 
  658         if (pci_get_capability(pc, tag, PCI_CAP_HT, &ofs, NULL) == 0)
  659                 return (0);
  660 
  661         while (ofs != 0) {
  662 #ifdef DIAGNOSTIC
  663                 if ((ofs & 3) || (ofs < 0x40))
  664                         panic("pci_get_ht_capability");
  665 #endif
  666                 reg = pci_conf_read(pc, tag, ofs);
  667                 if (PCI_HT_CAP(reg) == capid) {
  668                         if (offset)
  669                                 *offset = ofs;
  670                         if (value)
  671                                 *value = reg;
  672                         return (1);
  673                 }
  674                 ofs = PCI_CAPLIST_NEXT(reg);
  675         }
  676 
  677         return (0);
  678 }
  679 
  680 int
  681 pci_get_ext_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
  682     int *offset, pcireg_t *value)
  683 {
  684         pcireg_t reg;
  685         unsigned int ofs;
  686 
  687         /* Make sure this is a PCI Express device. */
  688         if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, NULL, NULL) == 0)
  689                 return (0);
  690 
  691         /* Scan PCI Express extended capabilities. */
  692         ofs = PCI_PCIE_ECAP;
  693         while (ofs != 0) {
  694 #ifdef DIAGNOSTIC
  695                 if ((ofs & 3) || (ofs < PCI_PCIE_ECAP))
  696                         panic("pci_get_ext_capability");
  697 #endif
  698                 reg = pci_conf_read(pc, tag, ofs);
  699                 if (PCI_PCIE_ECAP_ID(reg) == capid) {
  700                         if (offset)
  701                                 *offset = ofs;
  702                         if (value)
  703                                 *value = reg;
  704                         return (1);
  705                 }
  706                 ofs = PCI_PCIE_ECAP_NEXT(reg);
  707         }
  708 
  709         return (0);
  710 }
  711 
  712 uint16_t
  713 pci_requester_id(pci_chipset_tag_t pc, pcitag_t tag)
  714 {
  715         int bus, dev, func;
  716 
  717         pci_decompose_tag(pc, tag, &bus, &dev, &func);
  718         return ((bus << 8) | (dev << 3) | func);
  719 }
  720 
  721 int
  722 pci_find_device(struct pci_attach_args *pa,
  723     int (*match)(struct pci_attach_args *))
  724 {
  725         extern struct cfdriver pci_cd;
  726         struct device *pcidev;
  727         int i;
  728 
  729         for (i = 0; i < pci_cd.cd_ndevs; i++) {
  730                 pcidev = pci_cd.cd_devs[i];
  731                 if (pcidev != NULL &&
  732                     pci_enumerate_bus((struct pci_softc *)pcidev,
  733                                       match, pa) != 0)
  734                         return (1);
  735         }
  736         return (0);
  737 }
  738 
  739 int
  740 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
  741 {
  742         pcireg_t reg;
  743         int offset;
  744 
  745         if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
  746                 reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
  747                 return (reg & PCI_PMCSR_STATE_MASK);
  748         }
  749         return (PCI_PMCSR_STATE_D0);
  750 }
  751 
  752 int
  753 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int state)
  754 {
  755         pcireg_t reg;
  756         int offset, ostate = state;
  757 
  758         /*
  759          * Warn the firmware that we are going to put the device
  760          * into the given state.
  761          */
  762         pci_set_powerstate_md(pc, tag, state, 1);
  763 
  764         if (pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, 0)) {
  765                 if (state == PCI_PMCSR_STATE_D3) {
  766                         /*
  767                          * The PCI Power Management spec says we
  768                          * should disable I/O and memory space as well
  769                          * as bus mastering before we place the device
  770                          * into D3.
  771                          */
  772                         reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
  773                         reg &= ~PCI_COMMAND_IO_ENABLE;
  774                         reg &= ~PCI_COMMAND_MEM_ENABLE;
  775                         reg &= ~PCI_COMMAND_MASTER_ENABLE;
  776                         pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg);
  777                 }
  778                 reg = pci_conf_read(pc, tag, offset + PCI_PMCSR);
  779                 if ((reg & PCI_PMCSR_STATE_MASK) != state) {
  780                         ostate = reg & PCI_PMCSR_STATE_MASK;
  781 
  782                         pci_conf_write(pc, tag, offset + PCI_PMCSR,
  783                             (reg & ~PCI_PMCSR_STATE_MASK) | state);
  784                         if (state == PCI_PMCSR_STATE_D3 ||
  785                             ostate == PCI_PMCSR_STATE_D3)
  786                                 delay(10 * 1000);
  787                 }
  788         }
  789 
  790         /*
  791          * Warn the firmware that the device is now in the given
  792          * state.
  793          */
  794         pci_set_powerstate_md(pc, tag, state, 0);
  795 
  796         return (ostate);
  797 }
  798 
  799 #ifndef PCI_MACHDEP_ENUMERATE_BUS
  800 /*
  801  * Generic PCI bus enumeration routine.  Used unless machine-dependent
  802  * code needs to provide something else.
  803  */
  804 int
  805 pci_enumerate_bus(struct pci_softc *sc,
  806     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
  807 {
  808         pci_chipset_tag_t pc = sc->sc_pc;
  809         int device, function, nfunctions, ret;
  810         int maxndevs = sc->sc_maxndevs;
  811         const struct pci_quirkdata *qd;
  812         pcireg_t id, bhlcr, cap;
  813         pcitag_t tag;
  814 
  815         /*
  816          * PCIe downstream ports and root ports should only forward
  817          * configuration requests for device number 0.  However, not
  818          * all hardware implements this correctly, and some devices
  819          * will respond to other device numbers making the device show
  820          * up 32 times.  Prevent this by only scanning a single
  821          * device.
  822          */
  823         if (sc->sc_bridgetag && pci_get_capability(pc, *sc->sc_bridgetag,
  824             PCI_CAP_PCIEXPRESS, NULL, &cap)) {
  825                 switch (PCI_PCIE_XCAP_TYPE(cap)) {
  826                 case PCI_PCIE_XCAP_TYPE_RP:
  827                 case PCI_PCIE_XCAP_TYPE_DOWN:
  828                 case PCI_PCIE_XCAP_TYPE_PCI2PCIE:
  829                         maxndevs = 1;
  830                         break;
  831                 }
  832         }
  833 
  834         for (device = 0; device < maxndevs; device++) {
  835                 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
  836 
  837                 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
  838                 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
  839                         continue;
  840 
  841                 id = pci_conf_read(pc, tag, PCI_ID_REG);
  842 
  843                 /* Invalid vendor ID value? */
  844                 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
  845                         continue;
  846                 /* XXX Not invalid, but we've done this ~forever. */
  847                 if (PCI_VENDOR(id) == 0)
  848                         continue;
  849 
  850                 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
  851 
  852                 if (qd != NULL &&
  853                       (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
  854                         nfunctions = 8;
  855                 else if (qd != NULL &&
  856                       (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
  857                         nfunctions = 1;
  858                 else
  859                         nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
  860 
  861                 for (function = 0; function < nfunctions; function++) {
  862                         tag = pci_make_tag(pc, sc->sc_bus, device, function);
  863                         ret = pci_probe_device(sc, tag, match, pap);
  864                         if (match != NULL && ret != 0)
  865                                 return (ret);
  866                 }
  867         }
  868 
  869         return (0);
  870 }
  871 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
  872 
  873 int
  874 pci_reserve_resources(struct pci_attach_args *pa)
  875 {
  876         pci_chipset_tag_t pc = pa->pa_pc;
  877         pcitag_t tag = pa->pa_tag;
  878         pcireg_t bhlc, blr, type, bir;
  879         pcireg_t addr, mask;
  880         bus_addr_t base, limit;
  881         bus_size_t size;
  882         int reg, reg_start, reg_end, reg_rom;
  883         int bus, dev, func;
  884         int sec, sub;
  885         int flags;
  886         int s;
  887 
  888         pci_decompose_tag(pc, tag, &bus, &dev, &func);
  889 
  890         bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
  891         switch (PCI_HDRTYPE_TYPE(bhlc)) {
  892         case 0:
  893                 reg_start = PCI_MAPREG_START;
  894                 reg_end = PCI_MAPREG_END;
  895                 reg_rom = PCI_ROM_REG;
  896                 break;
  897         case 1: /* PCI-PCI bridge */
  898                 reg_start = PCI_MAPREG_START;
  899                 reg_end = PCI_MAPREG_PPB_END;
  900                 reg_rom = 0;    /* 0x38 */
  901                 break;
  902         case 2: /* PCI-CardBus bridge */
  903                 reg_start = PCI_MAPREG_START;
  904                 reg_end = PCI_MAPREG_PCB_END;
  905                 reg_rom = 0;
  906                 break;
  907         default:
  908                 return (0);
  909         }
  910     
  911         for (reg = reg_start; reg < reg_end; reg += 4) {
  912                 if (!pci_mapreg_probe(pc, tag, reg, &type))
  913                         continue;
  914 
  915                 if (pci_mapreg_info(pc, tag, reg, type, &base, &size, &flags))
  916                         continue;
  917 
  918                 if (base == 0)
  919                         continue;
  920 
  921                 switch (type) {
  922                 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
  923                 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
  924                         if (ISSET(flags, BUS_SPACE_MAP_PREFETCHABLE) &&
  925                             pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
  926                             base, size, EX_NOWAIT) == 0) {
  927                                 break;
  928                         }
  929 #ifdef __sparc64__
  930                         /*
  931                          * Certain SPARC T5 systems assign
  932                          * non-prefetchable 64-bit BARs of its onboard
  933                          * mpii(4) controllers addresses in the
  934                          * prefetchable memory range.  This is
  935                          * (probably) safe, as reads from the device
  936                          * registers mapped by these BARs are
  937                          * side-effect free.  So assume the firmware
  938                          * knows what it is doing.
  939                          */
  940                         if (base >= 0x100000000 &&
  941                             pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
  942                             base, size, EX_NOWAIT) == 0) {
  943                                 break;
  944                         }
  945 #endif
  946                         if (pa->pa_memex && extent_alloc_region(pa->pa_memex,
  947                             base, size, EX_NOWAIT)) {
  948                                 printf("%d:%d:%d: mem address conflict 0x%lx/0x%lx\n",
  949                                     bus, dev, func, base, size);
  950                                 pci_conf_write(pc, tag, reg, 0);
  951                                 if (type & PCI_MAPREG_MEM_TYPE_64BIT)
  952                                         pci_conf_write(pc, tag, reg + 4, 0);
  953                         }
  954                         break;
  955                 case PCI_MAPREG_TYPE_IO:
  956                         if (pa->pa_ioex && extent_alloc_region(pa->pa_ioex,
  957                             base, size, EX_NOWAIT)) {
  958                                 printf("%d:%d:%d: io address conflict 0x%lx/0x%lx\n",
  959                                     bus, dev, func, base, size);
  960                                 pci_conf_write(pc, tag, reg, 0);
  961                         }
  962                         break;
  963                 }
  964 
  965                 if (type & PCI_MAPREG_MEM_TYPE_64BIT)
  966                         reg += 4;
  967         }
  968 
  969         if (reg_rom != 0) {
  970                 s = splhigh();
  971                 addr = pci_conf_read(pc, tag, PCI_ROM_REG);
  972                 pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
  973                 mask = pci_conf_read(pc, tag, PCI_ROM_REG);
  974                 pci_conf_write(pc, tag, PCI_ROM_REG, addr);
  975                 splx(s);
  976 
  977                 base = PCI_ROM_ADDR(addr);
  978                 size = PCI_ROM_SIZE(mask);
  979                 if (base != 0 && size != 0) {
  980                         if (pa->pa_pmemex && extent_alloc_region(pa->pa_pmemex,
  981                             base, size, EX_NOWAIT) &&
  982                             pa->pa_memex && extent_alloc_region(pa->pa_memex,
  983                             base, size, EX_NOWAIT)) {
  984                                 printf("%d:%d:%d: rom address conflict 0x%lx/0x%lx\n",
  985                                     bus, dev, func, base, size);
  986                                 pci_conf_write(pc, tag, PCI_ROM_REG, 0);
  987                         }
  988                 }
  989         }
  990 
  991         if (PCI_HDRTYPE_TYPE(bhlc) != 1)
  992                 return (0);
  993 
  994         /* Figure out the I/O address range of the bridge. */
  995         blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS);
  996         base = (blr & 0x000000f0) << 8;
  997         limit = (blr & 0x000f000) | 0x00000fff;
  998         blr = pci_conf_read(pc, tag, PPB_REG_IO_HI);
  999         base |= (blr & 0x0000ffff) << 16;
 1000         limit |= (blr & 0xffff0000);
 1001         if (limit > base)
 1002                 size = (limit - base + 1);
 1003         else
 1004                 size = 0;
 1005         if (pa->pa_ioex && base > 0 && size > 0) {
 1006                 if (extent_alloc_region(pa->pa_ioex, base, size, EX_NOWAIT)) {
 1007                         printf("%d:%d:%d: bridge io address conflict 0x%lx/0x%lx\n",
 1008                             bus, dev, func, base, size);
 1009                         blr &= 0xffff0000;
 1010                         blr |= 0x000000f0;
 1011                         pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr);
 1012                 }
 1013         }
 1014 
 1015         /* Figure out the memory mapped I/O address range of the bridge. */
 1016         blr = pci_conf_read(pc, tag, PPB_REG_MEM);
 1017         base = (blr & 0x0000fff0) << 16;
 1018         limit = (blr & 0xfff00000) | 0x000fffff;
 1019         if (limit > base)
 1020                 size = (limit - base + 1);
 1021         else
 1022                 size = 0;
 1023         if (pa->pa_memex && base > 0 && size > 0) {
 1024                 if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
 1025                         printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
 1026                             bus, dev, func, base, size);
 1027                         pci_conf_write(pc, tag, PPB_REG_MEM, 0x0000fff0);
 1028                 }
 1029         }
 1030 
 1031         /* Figure out the prefetchable memory address range of the bridge. */
 1032         blr = pci_conf_read(pc, tag, PPB_REG_PREFMEM);
 1033         base = (blr & 0x0000fff0) << 16;
 1034         limit = (blr & 0xfff00000) | 0x000fffff;
 1035 #ifdef __LP64__
 1036         blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFBASE_HI32);
 1037         base |= ((uint64_t)blr) << 32;
 1038         blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFLIM_HI32);
 1039         limit |= ((uint64_t)blr) << 32;
 1040 #endif
 1041         if (limit > base)
 1042                 size = (limit - base + 1);
 1043         else
 1044                 size = 0;
 1045         if (pa->pa_pmemex && base > 0 && size > 0) {
 1046                 if (extent_alloc_region(pa->pa_pmemex, base, size, EX_NOWAIT)) {
 1047                         printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
 1048                             bus, dev, func, base, size);
 1049                         pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
 1050                 }
 1051         } else if (pa->pa_memex && base > 0 && size > 0) {
 1052                 if (extent_alloc_region(pa->pa_memex, base, size, EX_NOWAIT)) {
 1053                         printf("%d:%d:%d: bridge mem address conflict 0x%lx/0x%lx\n",
 1054                             bus, dev, func, base, size);
 1055                         pci_conf_write(pc, tag, PPB_REG_PREFMEM, 0x0000fff0);
 1056                 }
 1057         }
 1058 
 1059         /* Figure out the bus range handled by the bridge. */
 1060         bir = pci_conf_read(pc, tag, PPB_REG_BUSINFO);
 1061         sec = PPB_BUSINFO_SECONDARY(bir);
 1062         sub = PPB_BUSINFO_SUBORDINATE(bir);
 1063         if (pa->pa_busex && sub >= sec && sub > 0) {
 1064                 if (extent_alloc_region(pa->pa_busex, sec, sub - sec + 1,
 1065                     EX_NOWAIT)) {
 1066                         printf("%d:%d:%d: bridge bus conflict %d-%d\n",
 1067                             bus, dev, func, sec, sub);
 1068                 }
 1069         }
 1070 
 1071         return (0);
 1072 }
 1073 
 1074 /*
 1075  * Vital Product Data (PCI 2.2)
 1076  */
 1077 
 1078 int
 1079 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
 1080     pcireg_t *data)
 1081 {
 1082         uint32_t reg;
 1083         int ofs, i, j;
 1084 
 1085         KASSERT(data != NULL);
 1086         if ((offset + count) >= PCI_VPD_ADDRESS_MASK)
 1087                 return (EINVAL);
 1088 
 1089         if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
 1090                 return (ENXIO);
 1091 
 1092         for (i = 0; i < count; offset += sizeof(*data), i++) {
 1093                 reg &= 0x0000ffff;
 1094                 reg &= ~PCI_VPD_OPFLAG;
 1095                 reg |= PCI_VPD_ADDRESS(offset);
 1096                 pci_conf_write(pc, tag, ofs, reg);
 1097 
 1098                 /*
 1099                  * PCI 2.2 does not specify how long we should poll
 1100                  * for completion nor whether the operation can fail.
 1101                  */
 1102                 j = 0;
 1103                 do {
 1104                         if (j++ == 20)
 1105                                 return (EIO);
 1106                         delay(4);
 1107                         reg = pci_conf_read(pc, tag, ofs);
 1108                 } while ((reg & PCI_VPD_OPFLAG) == 0);
 1109                 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
 1110         }
 1111 
 1112         return (0);
 1113 }
 1114 
 1115 int
 1116 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
 1117     pcireg_t *data)
 1118 {
 1119         pcireg_t reg;
 1120         int ofs, i, j;
 1121 
 1122         KASSERT(data != NULL);
 1123         KASSERT((offset + count) < 0x7fff);
 1124 
 1125         if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
 1126                 return (1);
 1127 
 1128         for (i = 0; i < count; offset += sizeof(*data), i++) {
 1129                 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
 1130 
 1131                 reg &= 0x0000ffff;
 1132                 reg |= PCI_VPD_OPFLAG;
 1133                 reg |= PCI_VPD_ADDRESS(offset);
 1134                 pci_conf_write(pc, tag, ofs, reg);
 1135 
 1136                 /*
 1137                  * PCI 2.2 does not specify how long we should poll
 1138                  * for completion nor whether the operation can fail.
 1139                  */
 1140                 j = 0;
 1141                 do {
 1142                         if (j++ == 20)
 1143                                 return (1);
 1144                         delay(1);
 1145                         reg = pci_conf_read(pc, tag, ofs);
 1146                 } while (reg & PCI_VPD_OPFLAG);
 1147         }
 1148 
 1149         return (0);
 1150 }
 1151 
 1152 int
 1153 pci_matchbyid(struct pci_attach_args *pa, const struct pci_matchid *ids,
 1154     int nent)
 1155 {
 1156         const struct pci_matchid *pm;
 1157         int i;
 1158 
 1159         for (i = 0, pm = ids; i < nent; i++, pm++)
 1160                 if (PCI_VENDOR(pa->pa_id) == pm->pm_vid &&
 1161                     PCI_PRODUCT(pa->pa_id) == pm->pm_pid)
 1162                         return (1);
 1163         return (0);
 1164 }
 1165 
 1166 void
 1167 pci_disable_legacy_vga(struct device *dev)
 1168 {
 1169         struct pci_softc *pci;
 1170         struct pci_dev *pd;
 1171 
 1172         /* XXX Until we attach the drm drivers directly to pci. */
 1173         while (dev->dv_parent->dv_cfdata->cf_driver != &pci_cd)
 1174                 dev = dev->dv_parent;
 1175 
 1176         pci = (struct pci_softc *)dev->dv_parent;
 1177         LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
 1178                 if (pd->pd_dev == dev) {
 1179                         pd->pd_vga_decode = 0;
 1180                         break;
 1181                 }
 1182         }
 1183 }
 1184 
 1185 #ifdef USER_PCICONF
 1186 /*
 1187  * This is the user interface to PCI configuration space.
 1188  */
 1189   
 1190 #include <sys/pciio.h>
 1191 #include <sys/fcntl.h>
 1192 
 1193 #ifdef DEBUG
 1194 #define PCIDEBUG(x) printf x
 1195 #else
 1196 #define PCIDEBUG(x)
 1197 #endif
 1198 
 1199 void pci_disable_vga(pci_chipset_tag_t, pcitag_t);
 1200 void pci_enable_vga(pci_chipset_tag_t, pcitag_t);
 1201 void pci_route_vga(struct pci_softc *);
 1202 void pci_unroute_vga(struct pci_softc *);
 1203 
 1204 int pciopen(dev_t dev, int oflags, int devtype, struct proc *p);
 1205 int pciclose(dev_t dev, int flag, int devtype, struct proc *p);
 1206 int pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p);
 1207 
 1208 int
 1209 pciopen(dev_t dev, int oflags, int devtype, struct proc *p) 
 1210 {
 1211         PCIDEBUG(("pciopen ndevs: %d\n" , pci_cd.cd_ndevs));
 1212 
 1213         if (minor(dev) >= pci_ndomains) {
 1214                 return ENXIO;
 1215         }
 1216 
 1217 #ifndef APERTURE
 1218         if ((oflags & FWRITE) && securelevel > 0) {
 1219                 return EPERM;
 1220         }
 1221 #else
 1222         if ((oflags & FWRITE) && securelevel > 0 && allowaperture == 0) {
 1223                 return EPERM;
 1224         }
 1225 #endif
 1226         return (0);
 1227 }
 1228 
 1229 int
 1230 pciclose(dev_t dev, int flag, int devtype, struct proc *p)
 1231 {
 1232         PCIDEBUG(("pciclose\n"));
 1233 
 1234         pci_vga_proc = NULL;
 1235         return (0);
 1236 }
 1237 
 1238 int
 1239 pciioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
 1240 {
 1241         struct pcisel *sel = (struct pcisel *)data;
 1242         struct pci_io *io;
 1243         struct pci_dev *pd;
 1244         struct pci_rom *rom;
 1245         int i, error;
 1246         pcitag_t tag;
 1247         struct pci_softc *pci;
 1248         pci_chipset_tag_t pc;
 1249 
 1250         switch (cmd) {
 1251         case PCIOCREAD:
 1252         case PCIOCREADMASK:
 1253                 break;
 1254         case PCIOCWRITE:
 1255                 if (!(flag & FWRITE))
 1256                         return EPERM;
 1257                 break;
 1258         case PCIOCGETROMLEN:
 1259         case PCIOCGETROM:
 1260         case PCIOCGETVPD:
 1261                 break;
 1262         case PCIOCGETVGA:
 1263         case PCIOCSETVGA:
 1264                 if (pci_vga_pci == NULL)
 1265                         return EINVAL;
 1266                 break;
 1267         default:
 1268                 return ENOTTY;
 1269         }
 1270 
 1271         for (i = 0; i < pci_cd.cd_ndevs; i++) {
 1272                 pci = pci_cd.cd_devs[i];
 1273                 if (pci != NULL && pci->sc_domain == minor(dev) &&
 1274                     pci->sc_bus == sel->pc_bus)
 1275                         break;
 1276         }
 1277         if (i >= pci_cd.cd_ndevs)
 1278                 return ENXIO;
 1279 
 1280         /* Check bounds */
 1281         if (pci->sc_bus >= 256 || 
 1282             sel->pc_dev >= pci_bus_maxdevs(pci->sc_pc, pci->sc_bus) ||
 1283             sel->pc_func >= 8)
 1284                 return EINVAL;
 1285 
 1286         pc = pci->sc_pc;
 1287         LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
 1288                 int bus, dev, func;
 1289 
 1290                 pci_decompose_tag(pc, pd->pd_tag, &bus, &dev, &func);
 1291 
 1292                 if (bus == sel->pc_bus && dev == sel->pc_dev &&
 1293                     func == sel->pc_func)
 1294                         break;
 1295         }
 1296         if (pd == NULL)
 1297                 return ENXIO;
 1298 
 1299         tag = pci_make_tag(pc, sel->pc_bus, sel->pc_dev, sel->pc_func);
 1300 
 1301         switch (cmd) {
 1302         case PCIOCREAD:
 1303                 io = (struct pci_io *)data;
 1304                 switch (io->pi_width) {
 1305                 case 4:
 1306                         /* Configuration space bounds check */
 1307                         if (io->pi_reg < 0 ||
 1308                             io->pi_reg >= pci_conf_size(pc, tag))
 1309                                 return EINVAL;
 1310                         /* Make sure the register is properly aligned */
 1311                         if (io->pi_reg & 0x3) 
 1312                                 return EINVAL;
 1313                         io->pi_data = pci_conf_read(pc, tag, io->pi_reg);
 1314                         error = 0;
 1315                         break;
 1316                 default:
 1317                         error = EINVAL;
 1318                         break;
 1319                 }
 1320                 break;
 1321 
 1322         case PCIOCWRITE:
 1323                 io = (struct pci_io *)data;
 1324                 switch (io->pi_width) {
 1325                 case 4:
 1326                         /* Configuration space bounds check */
 1327                         if (io->pi_reg < 0 ||
 1328                             io->pi_reg >= pci_conf_size(pc, tag))
 1329                                 return EINVAL;
 1330                         /* Make sure the register is properly aligned */
 1331                         if (io->pi_reg & 0x3)
 1332                                 return EINVAL;
 1333                         pci_conf_write(pc, tag, io->pi_reg, io->pi_data);
 1334                         error = 0;
 1335                         break;
 1336                 default:
 1337                         error = EINVAL;
 1338                         break;
 1339                 }
 1340                 break;
 1341 
 1342         case PCIOCREADMASK:
 1343                 io = (struct pci_io *)data;
 1344 
 1345                 if (io->pi_width != 4 || io->pi_reg & 0x3 ||
 1346                     io->pi_reg < PCI_MAPREG_START ||
 1347                     io->pi_reg >= PCI_MAPREG_END)
 1348                         return (EINVAL);
 1349 
 1350                 i = (io->pi_reg - PCI_MAPREG_START) / 4;
 1351                 io->pi_data = pd->pd_mask[i];
 1352                 error = 0;
 1353                 break;
 1354 
 1355         case PCIOCGETROMLEN:
 1356         case PCIOCGETROM:
 1357         {
 1358                 pcireg_t addr, mask, bhlc;
 1359                 bus_space_handle_t h;
 1360                 bus_size_t len, off;
 1361                 char buf[256];
 1362                 int s;
 1363 
 1364                 rom = (struct pci_rom *)data;
 1365 
 1366                 bhlc = pci_conf_read(pc, tag, PCI_BHLC_REG);
 1367                 if (PCI_HDRTYPE_TYPE(bhlc) != 0)
 1368                         return (ENODEV);
 1369 
 1370                 s = splhigh();
 1371                 addr = pci_conf_read(pc, tag, PCI_ROM_REG);
 1372                 pci_conf_write(pc, tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
 1373                 mask = pci_conf_read(pc, tag, PCI_ROM_REG);
 1374                 pci_conf_write(pc, tag, PCI_ROM_REG, addr);
 1375                 splx(s);
 1376 
 1377                 /*
 1378                  * Section 6.2.5.2 `Expansion ROM Base Address Register',
 1379                  *
 1380                  * tells us that only the upper 21 bits are writable.
 1381                  * This means that the size of a ROM must be a
 1382                  * multiple of 2 KB.  So reading the ROM in chunks of
 1383                  * 256 bytes should work just fine.
 1384                  */
 1385                 if ((PCI_ROM_ADDR(addr) == 0 ||
 1386                      PCI_ROM_SIZE(mask) % sizeof(buf)) != 0)
 1387                         return (ENODEV);
 1388 
 1389                 /* If we're just after the size, skip reading the ROM. */
 1390                 if (cmd == PCIOCGETROMLEN) {
 1391                         error = 0;
 1392                         goto fail;
 1393                 }
 1394 
 1395                 if (rom->pr_romlen < PCI_ROM_SIZE(mask)) {
 1396                         error = ENOMEM;
 1397                         goto fail;
 1398                 }
 1399 
 1400                 error = bus_space_map(pci->sc_memt, PCI_ROM_ADDR(addr),
 1401                     PCI_ROM_SIZE(mask), 0, &h);
 1402                 if (error)
 1403                         goto fail;
 1404 
 1405                 off = 0;
 1406                 len = PCI_ROM_SIZE(mask);
 1407                 while (len > 0 && error == 0) {
 1408                         s = splhigh();
 1409                         pci_conf_write(pc, tag, PCI_ROM_REG,
 1410                             addr | PCI_ROM_ENABLE);
 1411                         bus_space_read_region_1(pci->sc_memt, h, off,
 1412                             buf, sizeof(buf));
 1413                         pci_conf_write(pc, tag, PCI_ROM_REG, addr);
 1414                         splx(s);
 1415 
 1416                         error = copyout(buf, rom->pr_rom + off, sizeof(buf));
 1417                         off += sizeof(buf);
 1418                         len -= sizeof(buf);
 1419                 }
 1420 
 1421                 bus_space_unmap(pci->sc_memt, h, PCI_ROM_SIZE(mask));
 1422 
 1423         fail:
 1424                 rom->pr_romlen = PCI_ROM_SIZE(mask);
 1425                 break;
 1426         }
 1427 
 1428         case PCIOCGETVPD: {
 1429                 struct pci_vpd_req *pv = (struct pci_vpd_req *)data;
 1430                 pcireg_t *data;
 1431                 size_t len;
 1432                 unsigned int i;
 1433                 int s;
 1434 
 1435                 CTASSERT(sizeof(*data) == sizeof(*pv->pv_data));
 1436 
 1437                 data = mallocarray(pv->pv_count, sizeof(*data), M_TEMP,
 1438                     M_WAITOK|M_CANFAIL);
 1439                 if (data == NULL) {
 1440                         error = ENOMEM;
 1441                         break;
 1442                 }
 1443 
 1444                 s = splhigh();
 1445                 error = pci_vpd_read(pc, tag, pv->pv_offset, pv->pv_count,
 1446                     data);
 1447                 splx(s);
 1448 
 1449                 len = pv->pv_count * sizeof(*pv->pv_data);
 1450 
 1451                 if (error == 0) {
 1452                         for (i = 0; i < pv->pv_count; i++)
 1453                                 data[i] = letoh32(data[i]);
 1454 
 1455                         error = copyout(data, pv->pv_data, len);
 1456                 }
 1457 
 1458                 free(data, M_TEMP, len);
 1459                 break;
 1460         }
 1461 
 1462         case PCIOCGETVGA:
 1463         {
 1464                 struct pci_vga *vga = (struct pci_vga *)data;
 1465                 struct pci_dev *pd;
 1466                 int bus, dev, func;
 1467 
 1468                 vga->pv_decode = 0;
 1469                 LIST_FOREACH(pd, &pci->sc_devs, pd_next) {
 1470                         pci_decompose_tag(pc, pd->pd_tag, NULL, &dev, &func);
 1471                         if (dev == sel->pc_dev && func == sel->pc_func) {
 1472                                 if (pd->pd_vga_decode)
 1473                                         vga->pv_decode = PCI_VGA_IO_ENABLE |
 1474                                             PCI_VGA_MEM_ENABLE;
 1475                                 break;
 1476                         }
 1477                 }
 1478 
 1479                 pci_decompose_tag(pci_vga_pci->sc_pc,
 1480                     pci_vga_tag, &bus, &dev, &func);
 1481                 vga->pv_sel.pc_bus = bus;
 1482                 vga->pv_sel.pc_dev = dev;
 1483                 vga->pv_sel.pc_func = func;
 1484                 error = 0;
 1485                 break;
 1486         }
 1487         case PCIOCSETVGA:
 1488         {
 1489                 struct pci_vga *vga = (struct pci_vga *)data;
 1490                 int bus, dev, func;
 1491 
 1492                 switch (vga->pv_lock) {
 1493                 case PCI_VGA_UNLOCK:
 1494                 case PCI_VGA_LOCK:
 1495                 case PCI_VGA_TRYLOCK:
 1496                         break;
 1497                 default:
 1498                         return (EINVAL);
 1499                 }
 1500 
 1501                 if (vga->pv_lock == PCI_VGA_UNLOCK) {
 1502                         if (pci_vga_proc != p)
 1503                                 return (EINVAL);
 1504                         pci_vga_proc = NULL;
 1505                         wakeup(&pci_vga_proc);
 1506                         return (0);
 1507                 }
 1508 
 1509                 while (pci_vga_proc != p && pci_vga_proc != NULL) {
 1510                         if (vga->pv_lock == PCI_VGA_TRYLOCK)
 1511                                 return (EBUSY);
 1512                         error = tsleep_nsec(&pci_vga_proc, PLOCK | PCATCH,
 1513                             "vgalk", INFSLP);
 1514                         if (error)
 1515                                 return (error);
 1516                 }
 1517                 pci_vga_proc = p;
 1518 
 1519                 pci_decompose_tag(pci_vga_pci->sc_pc,
 1520                     pci_vga_tag, &bus, &dev, &func);
 1521                 if (bus != vga->pv_sel.pc_bus || dev != vga->pv_sel.pc_dev ||
 1522                     func != vga->pv_sel.pc_func) {
 1523                         pci_disable_vga(pci_vga_pci->sc_pc, pci_vga_tag);
 1524                         if (pci != pci_vga_pci) {
 1525                                 pci_unroute_vga(pci_vga_pci);
 1526                                 pci_route_vga(pci);
 1527                                 pci_vga_pci = pci;
 1528                         }
 1529                         pci_enable_vga(pc, tag);
 1530                         pci_vga_tag = tag;
 1531                 }
 1532 
 1533                 error = 0;
 1534                 break;
 1535         }
 1536 
 1537         default:
 1538                 error = ENOTTY;
 1539                 break;
 1540         }
 1541 
 1542         return (error);
 1543 }
 1544 
 1545 void
 1546 pci_disable_vga(pci_chipset_tag_t pc, pcitag_t tag)
 1547 {
 1548         pcireg_t csr;
 1549 
 1550         csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
 1551         csr &= ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
 1552         pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
 1553 }
 1554 
 1555 void
 1556 pci_enable_vga(pci_chipset_tag_t pc, pcitag_t tag)
 1557 {
 1558         pcireg_t csr;
 1559 
 1560         csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
 1561         csr |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
 1562         pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
 1563 }
 1564 
 1565 void
 1566 pci_route_vga(struct pci_softc *sc)
 1567 {
 1568         pci_chipset_tag_t pc = sc->sc_pc;
 1569         pcireg_t bc;
 1570 
 1571         if (sc->sc_bridgetag == NULL)
 1572                 return;
 1573 
 1574         bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
 1575         bc |= PPB_BC_VGA_ENABLE;
 1576         pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
 1577 
 1578         pci_route_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
 1579 }
 1580 
 1581 void
 1582 pci_unroute_vga(struct pci_softc *sc)
 1583 {
 1584         pci_chipset_tag_t pc = sc->sc_pc;
 1585         pcireg_t bc;
 1586 
 1587         if (sc->sc_bridgetag == NULL)
 1588                 return;
 1589 
 1590         bc = pci_conf_read(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL);
 1591         bc &= ~PPB_BC_VGA_ENABLE;
 1592         pci_conf_write(pc, *sc->sc_bridgetag, PPB_REG_BRIDGECONTROL, bc);
 1593 
 1594         pci_unroute_vga((struct pci_softc *)sc->sc_dev.dv_parent->dv_parent);
 1595 }
 1596 #endif /* USER_PCICONF */
 1597 
 1598 int
 1599 pci_primary_vga(struct pci_attach_args *pa)
 1600 {
 1601         /* XXX For now, only handle the first PCI domain. */
 1602         if (pa->pa_domain != 0)
 1603                 return (0);
 1604 
 1605         if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY ||
 1606             PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA) &&
 1607             (PCI_CLASS(pa->pa_class) != PCI_CLASS_PREHISTORIC ||
 1608             PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_PREHISTORIC_VGA))
 1609                 return (0);
 1610 
 1611         if ((pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
 1612             & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
 1613             != (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
 1614                 return (0);
 1615 
 1616         pci_vga_tag = pa->pa_tag;
 1617 
 1618         return (1);
 1619 }
 1620 
 1621 #ifdef __HAVE_PCI_MSIX
 1622 
 1623 struct msix_vector *
 1624 pci_alloc_msix_table(pci_chipset_tag_t pc, pcitag_t tag)
 1625 {
 1626         struct msix_vector *table;
 1627         pcireg_t reg;
 1628         int tblsz;
 1629 
 1630         if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, &reg) == 0)
 1631                 return NULL;
 1632 
 1633         tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
 1634         table = mallocarray(tblsz, sizeof(*table), M_DEVBUF, M_WAITOK);
 1635 
 1636         return table;
 1637 }
 1638 
 1639 void
 1640 pci_free_msix_table(pci_chipset_tag_t pc, pcitag_t tag,
 1641     struct msix_vector *table)
 1642 {
 1643         pcireg_t reg;
 1644         int tblsz;
 1645 
 1646         if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, &reg) == 0)
 1647                 return;
 1648 
 1649         tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
 1650         free(table, M_DEVBUF, tblsz * sizeof(*table));
 1651 }
 1652 
 1653 void
 1654 pci_suspend_msix(pci_chipset_tag_t pc, pcitag_t tag,
 1655     bus_space_tag_t memt, pcireg_t *mc, struct msix_vector *table)
 1656 {
 1657         bus_space_handle_t memh;
 1658         pcireg_t reg;
 1659         int tblsz, i;
 1660 
 1661         if (pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, &reg) == 0)
 1662                 return;
 1663 
 1664         KASSERT(table != NULL);
 1665 
 1666         if (pci_msix_table_map(pc, tag, memt, &memh))
 1667                 return;
 1668 
 1669         tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
 1670         for (i = 0; i < tblsz; i++) {
 1671                 table[i].mv_ma = bus_space_read_4(memt, memh, PCI_MSIX_MA(i));
 1672                 table[i].mv_mau32 = bus_space_read_4(memt, memh,
 1673                     PCI_MSIX_MAU32(i));
 1674                 table[i].mv_md = bus_space_read_4(memt, memh, PCI_MSIX_MD(i));
 1675                 table[i].mv_vc = bus_space_read_4(memt, memh, PCI_MSIX_VC(i));
 1676         }
 1677 
 1678         pci_msix_table_unmap(pc, tag, memt, memh);
 1679         
 1680         *mc = reg;
 1681 }
 1682 
 1683 void
 1684 pci_resume_msix(pci_chipset_tag_t pc, pcitag_t tag,
 1685     bus_space_tag_t memt, pcireg_t mc, struct msix_vector *table)
 1686 {
 1687         bus_space_handle_t memh;
 1688         pcireg_t reg;
 1689         int tblsz, i;
 1690         int off;
 1691 
 1692         if (pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, &reg) == 0)
 1693                 return;
 1694 
 1695         KASSERT(table != NULL);
 1696 
 1697         if (pci_msix_table_map(pc, tag, memt, &memh))
 1698                 return;
 1699 
 1700         tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1;
 1701         for (i = 0; i < tblsz; i++) {
 1702                 bus_space_write_4(memt, memh, PCI_MSIX_MA(i), table[i].mv_ma);
 1703                 bus_space_write_4(memt, memh, PCI_MSIX_MAU32(i),
 1704                     table[i].mv_mau32);
 1705                 bus_space_write_4(memt, memh, PCI_MSIX_MD(i), table[i].mv_md);
 1706                 bus_space_barrier(memt, memh, PCI_MSIX_MA(i), 16,
 1707                     BUS_SPACE_BARRIER_WRITE);
 1708                 bus_space_write_4(memt, memh, PCI_MSIX_VC(i), table[i].mv_vc);
 1709                 bus_space_barrier(memt, memh, PCI_MSIX_VC(i), 4,
 1710                     BUS_SPACE_BARRIER_WRITE);
 1711         }
 1712 
 1713         pci_msix_table_unmap(pc, tag, memt, memh);
 1714 
 1715         pci_conf_write(pc, tag, off, mc);
 1716 }
 1717 
 1718 int
 1719 pci_intr_msix_count(struct pci_attach_args *pa)
 1720 {
 1721         pcireg_t reg;
 1722 
 1723         if ((pa->pa_flags & PCI_FLAGS_MSI_ENABLED) == 0)
 1724                 return (0);
 1725 
 1726         if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, NULL,
 1727             &reg) == 0)
 1728                 return (0);
 1729 
 1730         return (PCI_MSIX_MC_TBLSZ(reg) + 1);
 1731 }
 1732 
 1733 #else /* __HAVE_PCI_MSIX */
 1734 
 1735 struct msix_vector *
 1736 pci_alloc_msix_table(pci_chipset_tag_t pc, pcitag_t tag)
 1737 {
 1738         return NULL;
 1739 }
 1740 
 1741 void
 1742 pci_free_msix_table(pci_chipset_tag_t pc, pcitag_t tag,
 1743     struct msix_vector *table)
 1744 {
 1745 }
 1746 
 1747 void
 1748 pci_suspend_msix(pci_chipset_tag_t pc, pcitag_t tag,
 1749     bus_space_tag_t memt, pcireg_t *mc, struct msix_vector *table)
 1750 {
 1751 }
 1752 
 1753 void
 1754 pci_resume_msix(pci_chipset_tag_t pc, pcitag_t tag,
 1755     bus_space_tag_t memt, pcireg_t mc, struct msix_vector *table)
 1756 {
 1757 }
 1758 
 1759 int
 1760 pci_intr_msix_count(struct pci_attach_args *pa)
 1761 {
 1762         return (0);
 1763 }
 1764 
 1765 #endif /* __HAVE_PCI_MSIX */

Cache object: 2643ce971779c528c67b78c9e5b8c3d1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.