The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/compat/linuxkpi/common/src/linux_pci.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2015-2016 Mellanox Technologies, Ltd.
    3  * All rights reserved.
    4  * Copyright (c) 2020-2022 The FreeBSD Foundation
    5  *
    6  * Portions of this software were developed by Björn Zeeb
    7  * under sponsorship from the FreeBSD Foundation.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice unmodified, this list of conditions, and the following
   14  *    disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include <sys/param.h>
   35 #include <sys/systm.h>
   36 #include <sys/bus.h>
   37 #include <sys/malloc.h>
   38 #include <sys/kernel.h>
   39 #include <sys/sysctl.h>
   40 #include <sys/lock.h>
   41 #include <sys/mutex.h>
   42 #include <sys/fcntl.h>
   43 #include <sys/file.h>
   44 #include <sys/filio.h>
   45 #include <sys/pciio.h>
   46 #include <sys/pctrie.h>
   47 #include <sys/rwlock.h>
   48 
   49 #include <vm/vm.h>
   50 #include <vm/pmap.h>
   51 
   52 #include <machine/stdarg.h>
   53 
   54 #include <dev/pci/pcivar.h>
   55 #include <dev/pci/pci_private.h>
   56 #include <dev/pci/pci_iov.h>
   57 #include <dev/backlight/backlight.h>
   58 
   59 #include <linux/kernel.h>
   60 #include <linux/kobject.h>
   61 #include <linux/device.h>
   62 #include <linux/slab.h>
   63 #include <linux/module.h>
   64 #include <linux/cdev.h>
   65 #include <linux/file.h>
   66 #include <linux/sysfs.h>
   67 #include <linux/mm.h>
   68 #include <linux/io.h>
   69 #include <linux/vmalloc.h>
   70 #include <linux/pci.h>
   71 #include <linux/compat.h>
   72 
   73 #include <linux/backlight.h>
   74 
   75 #include "backlight_if.h"
   76 #include "pcib_if.h"
   77 
   78 /* Undef the linux function macro defined in linux/pci.h */
   79 #undef pci_get_class
   80 
   81 extern int linuxkpi_debug;
   82 
   83 SYSCTL_DECL(_compat_linuxkpi);
   84 
   85 static counter_u64_t lkpi_pci_nseg1_fail;
   86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD,
   87     &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment");
   88 
   89 static device_probe_t linux_pci_probe;
   90 static device_attach_t linux_pci_attach;
   91 static device_detach_t linux_pci_detach;
   92 static device_suspend_t linux_pci_suspend;
   93 static device_resume_t linux_pci_resume;
   94 static device_shutdown_t linux_pci_shutdown;
   95 static pci_iov_init_t linux_pci_iov_init;
   96 static pci_iov_uninit_t linux_pci_iov_uninit;
   97 static pci_iov_add_vf_t linux_pci_iov_add_vf;
   98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props);
   99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props);
  100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info);
  101 
  102 static device_method_t pci_methods[] = {
  103         DEVMETHOD(device_probe, linux_pci_probe),
  104         DEVMETHOD(device_attach, linux_pci_attach),
  105         DEVMETHOD(device_detach, linux_pci_detach),
  106         DEVMETHOD(device_suspend, linux_pci_suspend),
  107         DEVMETHOD(device_resume, linux_pci_resume),
  108         DEVMETHOD(device_shutdown, linux_pci_shutdown),
  109         DEVMETHOD(pci_iov_init, linux_pci_iov_init),
  110         DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit),
  111         DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf),
  112 
  113         /* backlight interface */
  114         DEVMETHOD(backlight_update_status, linux_backlight_update_status),
  115         DEVMETHOD(backlight_get_status, linux_backlight_get_status),
  116         DEVMETHOD(backlight_get_info, linux_backlight_get_info),
  117         DEVMETHOD_END
  118 };
  119 
  120 const char *pci_power_names[] = {
  121         "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold"
  122 };
  123 
  124 struct linux_dma_priv {
  125         uint64_t        dma_mask;
  126         bus_dma_tag_t   dmat;
  127         uint64_t        dma_coherent_mask;
  128         bus_dma_tag_t   dmat_coherent;
  129         struct mtx      lock;
  130         struct pctrie   ptree;
  131 };
  132 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)
  133 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)
  134 
  135 static int
  136 linux_pdev_dma_uninit(struct pci_dev *pdev)
  137 {
  138         struct linux_dma_priv *priv;
  139 
  140         priv = pdev->dev.dma_priv;
  141         if (priv->dmat)
  142                 bus_dma_tag_destroy(priv->dmat);
  143         if (priv->dmat_coherent)
  144                 bus_dma_tag_destroy(priv->dmat_coherent);
  145         mtx_destroy(&priv->lock);
  146         pdev->dev.dma_priv = NULL;
  147         free(priv, M_DEVBUF);
  148         return (0);
  149 }
  150 
  151 static int
  152 linux_pdev_dma_init(struct pci_dev *pdev)
  153 {
  154         struct linux_dma_priv *priv;
  155         int error;
  156 
  157         priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);
  158 
  159         mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF);
  160         pctrie_init(&priv->ptree);
  161 
  162         pdev->dev.dma_priv = priv;
  163 
  164         /* Create a default DMA tags. */
  165         error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64));
  166         if (error != 0)
  167                 goto err;
  168         /* Coherent is lower 32bit only by default in Linux. */
  169         error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32));
  170         if (error != 0)
  171                 goto err;
  172 
  173         return (error);
  174 
  175 err:
  176         linux_pdev_dma_uninit(pdev);
  177         return (error);
  178 }
  179 
  180 int
  181 linux_dma_tag_init(struct device *dev, u64 dma_mask)
  182 {
  183         struct linux_dma_priv *priv;
  184         int error;
  185 
  186         priv = dev->dma_priv;
  187 
  188         if (priv->dmat) {
  189                 if (priv->dma_mask == dma_mask)
  190                         return (0);
  191 
  192                 bus_dma_tag_destroy(priv->dmat);
  193         }
  194 
  195         priv->dma_mask = dma_mask;
  196 
  197         error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
  198             1, 0,                       /* alignment, boundary */
  199             dma_mask,                   /* lowaddr */
  200             BUS_SPACE_MAXADDR,          /* highaddr */
  201             NULL, NULL,                 /* filtfunc, filtfuncarg */
  202             BUS_SPACE_MAXSIZE,          /* maxsize */
  203             1,                          /* nsegments */
  204             BUS_SPACE_MAXSIZE,          /* maxsegsz */
  205             0,                          /* flags */
  206             NULL, NULL,                 /* lockfunc, lockfuncarg */
  207             &priv->dmat);
  208         return (-error);
  209 }
  210 
  211 int
  212 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask)
  213 {
  214         struct linux_dma_priv *priv;
  215         int error;
  216 
  217         priv = dev->dma_priv;
  218 
  219         if (priv->dmat_coherent) {
  220                 if (priv->dma_coherent_mask == dma_mask)
  221                         return (0);
  222 
  223                 bus_dma_tag_destroy(priv->dmat_coherent);
  224         }
  225 
  226         priv->dma_coherent_mask = dma_mask;
  227 
  228         error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
  229             1, 0,                       /* alignment, boundary */
  230             dma_mask,                   /* lowaddr */
  231             BUS_SPACE_MAXADDR,          /* highaddr */
  232             NULL, NULL,                 /* filtfunc, filtfuncarg */
  233             BUS_SPACE_MAXSIZE,          /* maxsize */
  234             1,                          /* nsegments */
  235             BUS_SPACE_MAXSIZE,          /* maxsegsz */
  236             0,                          /* flags */
  237             NULL, NULL,                 /* lockfunc, lockfuncarg */
  238             &priv->dmat_coherent);
  239         return (-error);
  240 }
  241 
  242 static struct pci_driver *
  243 linux_pci_find(device_t dev, const struct pci_device_id **idp)
  244 {
  245         const struct pci_device_id *id;
  246         struct pci_driver *pdrv;
  247         uint16_t vendor;
  248         uint16_t device;
  249         uint16_t subvendor;
  250         uint16_t subdevice;
  251 
  252         vendor = pci_get_vendor(dev);
  253         device = pci_get_device(dev);
  254         subvendor = pci_get_subvendor(dev);
  255         subdevice = pci_get_subdevice(dev);
  256 
  257         spin_lock(&pci_lock);
  258         list_for_each_entry(pdrv, &pci_drivers, node) {
  259                 for (id = pdrv->id_table; id->vendor != 0; id++) {
  260                         if (vendor == id->vendor &&
  261                             (PCI_ANY_ID == id->device || device == id->device) &&
  262                             (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) &&
  263                             (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) {
  264                                 *idp = id;
  265                                 spin_unlock(&pci_lock);
  266                                 return (pdrv);
  267                         }
  268                 }
  269         }
  270         spin_unlock(&pci_lock);
  271         return (NULL);
  272 }
  273 
  274 struct pci_dev *
  275 lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev)
  276 {
  277         struct pci_dev *pdev;
  278 
  279         KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__));
  280 
  281         spin_lock(&pci_lock);
  282         list_for_each_entry(pdev, &pci_devices, links) {
  283                 if (pdev->vendor == vendor && pdev->device == device)
  284                         break;
  285         }
  286         spin_unlock(&pci_lock);
  287 
  288         return (pdev);
  289 }
  290 
  291 static void
  292 lkpi_pci_dev_release(struct device *dev)
  293 {
  294 
  295         lkpi_devres_release_free_list(dev);
  296         spin_lock_destroy(&dev->devres_lock);
  297 }
  298 
  299 static void
  300 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
  301 {
  302 
  303         pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));
  304         pdev->vendor = pci_get_vendor(dev);
  305         pdev->device = pci_get_device(dev);
  306         pdev->subsystem_vendor = pci_get_subvendor(dev);
  307         pdev->subsystem_device = pci_get_subdevice(dev);
  308         pdev->class = pci_get_class(dev);
  309         pdev->revision = pci_get_revid(dev);
  310         pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);
  311         /*
  312          * This should be the upstream bridge; pci_upstream_bridge()
  313          * handles that case on demand as otherwise we'll shadow the
  314          * entire PCI hierarchy.
  315          */
  316         pdev->bus->self = pdev;
  317         pdev->bus->number = pci_get_bus(dev);
  318         pdev->bus->domain = pci_get_domain(dev);
  319         pdev->dev.bsddev = dev;
  320         pdev->dev.parent = &linux_root_device;
  321         pdev->dev.release = lkpi_pci_dev_release;
  322         INIT_LIST_HEAD(&pdev->dev.irqents);
  323         kobject_init(&pdev->dev.kobj, &linux_dev_ktype);
  324         kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev));
  325         kobject_add(&pdev->dev.kobj, &linux_root_device.kobj,
  326             kobject_name(&pdev->dev.kobj));
  327         spin_lock_init(&pdev->dev.devres_lock);
  328         INIT_LIST_HEAD(&pdev->dev.devres_head);
  329 }
  330 
  331 static void
  332 lkpinew_pci_dev_release(struct device *dev)
  333 {
  334         struct pci_dev *pdev;
  335 
  336         pdev = to_pci_dev(dev);
  337         if (pdev->root != NULL)
  338                 pci_dev_put(pdev->root);
  339         if (pdev->bus->self != pdev)
  340                 pci_dev_put(pdev->bus->self);
  341         free(pdev->bus, M_DEVBUF);
  342         if (pdev->msi_desc != NULL)
  343                 free(pdev->msi_desc, M_DEVBUF);
  344         free(pdev, M_DEVBUF);
  345 }
  346 
  347 struct pci_dev *
  348 lkpinew_pci_dev(device_t dev)
  349 {
  350         struct pci_dev *pdev;
  351 
  352         pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO);
  353         lkpifill_pci_dev(dev, pdev);
  354         pdev->dev.release = lkpinew_pci_dev_release;
  355 
  356         return (pdev);
  357 }
  358 
  359 struct pci_dev *
  360 lkpi_pci_get_class(unsigned int class, struct pci_dev *from)
  361 {
  362         device_t dev;
  363         device_t devfrom = NULL;
  364         struct pci_dev *pdev;
  365 
  366         if (from != NULL)
  367                 devfrom = from->dev.bsddev;
  368 
  369         dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom);
  370         if (dev == NULL)
  371                 return (NULL);
  372 
  373         pdev = lkpinew_pci_dev(dev);
  374         return (pdev);
  375 }
  376 
  377 struct pci_dev *
  378 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus,
  379     unsigned int devfn)
  380 {
  381         device_t dev;
  382         struct pci_dev *pdev;
  383 
  384         dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  385         if (dev == NULL)
  386                 return (NULL);
  387 
  388         pdev = lkpinew_pci_dev(dev);
  389         return (pdev);
  390 }
  391 
  392 static int
  393 linux_pci_probe(device_t dev)
  394 {
  395         const struct pci_device_id *id;
  396         struct pci_driver *pdrv;
  397 
  398         if ((pdrv = linux_pci_find(dev, &id)) == NULL)
  399                 return (ENXIO);
  400         if (device_get_driver(dev) != &pdrv->bsddriver)
  401                 return (ENXIO);
  402         device_set_desc(dev, pdrv->name);
  403 
  404         /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */
  405         if (pdrv->bsd_probe_return == 0)
  406                 return (BUS_PROBE_DEFAULT);
  407         else
  408                 return (pdrv->bsd_probe_return);
  409 }
  410 
  411 static int
  412 linux_pci_attach(device_t dev)
  413 {
  414         const struct pci_device_id *id;
  415         struct pci_driver *pdrv;
  416         struct pci_dev *pdev;
  417 
  418         pdrv = linux_pci_find(dev, &id);
  419         pdev = device_get_softc(dev);
  420 
  421         MPASS(pdrv != NULL);
  422         MPASS(pdev != NULL);
  423 
  424         return (linux_pci_attach_device(dev, pdrv, id, pdev));
  425 }
  426 
  427 int
  428 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
  429     const struct pci_device_id *id, struct pci_dev *pdev)
  430 {
  431         struct resource_list_entry *rle;
  432         device_t parent;
  433         uintptr_t rid;
  434         int error;
  435         bool isdrm;
  436 
  437         linux_set_current(curthread);
  438 
  439         parent = device_get_parent(dev);
  440         isdrm = pdrv != NULL && pdrv->isdrm;
  441 
  442         if (isdrm) {
  443                 struct pci_devinfo *dinfo;
  444 
  445                 dinfo = device_get_ivars(parent);
  446                 device_set_ivars(dev, dinfo);
  447         }
  448 
  449         lkpifill_pci_dev(dev, pdev);
  450         if (isdrm)
  451                 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid);
  452         else
  453                 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid);
  454         pdev->devfn = rid;
  455         pdev->pdrv = pdrv;
  456         rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false);
  457         if (rle != NULL)
  458                 pdev->dev.irq = rle->start;
  459         else
  460                 pdev->dev.irq = LINUX_IRQ_INVALID;
  461         pdev->irq = pdev->dev.irq;
  462         error = linux_pdev_dma_init(pdev);
  463         if (error)
  464                 goto out_dma_init;
  465 
  466         TAILQ_INIT(&pdev->mmio);
  467 
  468         spin_lock(&pci_lock);
  469         list_add(&pdev->links, &pci_devices);
  470         spin_unlock(&pci_lock);
  471 
  472         if (pdrv != NULL) {
  473                 error = pdrv->probe(pdev, id);
  474                 if (error)
  475                         goto out_probe;
  476         }
  477         return (0);
  478 
  479 out_probe:
  480         free(pdev->bus, M_DEVBUF);
  481         linux_pdev_dma_uninit(pdev);
  482 out_dma_init:
  483         spin_lock(&pci_lock);
  484         list_del(&pdev->links);
  485         spin_unlock(&pci_lock);
  486         put_device(&pdev->dev);
  487         return (-error);
  488 }
  489 
  490 static int
  491 linux_pci_detach(device_t dev)
  492 {
  493         struct pci_dev *pdev;
  494 
  495         pdev = device_get_softc(dev);
  496 
  497         MPASS(pdev != NULL);
  498 
  499         device_set_desc(dev, NULL);
  500 
  501         return (linux_pci_detach_device(pdev));
  502 }
  503 
  504 int
  505 linux_pci_detach_device(struct pci_dev *pdev)
  506 {
  507 
  508         linux_set_current(curthread);
  509 
  510         if (pdev->pdrv != NULL)
  511                 pdev->pdrv->remove(pdev);
  512 
  513         if (pdev->root != NULL)
  514                 pci_dev_put(pdev->root);
  515         free(pdev->bus, M_DEVBUF);
  516         linux_pdev_dma_uninit(pdev);
  517 
  518         spin_lock(&pci_lock);
  519         list_del(&pdev->links);
  520         spin_unlock(&pci_lock);
  521         put_device(&pdev->dev);
  522 
  523         return (0);
  524 }
  525 
  526 static int
  527 lkpi_pci_disable_dev(struct device *dev)
  528 {
  529 
  530         (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY);
  531         (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT);
  532         return (0);
  533 }
  534 
  535 struct pci_devres *
  536 lkpi_pci_devres_get_alloc(struct pci_dev *pdev)
  537 {
  538         struct pci_devres *dr;
  539 
  540         dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL);
  541         if (dr == NULL) {
  542                 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr),
  543                     GFP_KERNEL | __GFP_ZERO);
  544                 if (dr != NULL)
  545                         lkpi_devres_add(&pdev->dev, dr);
  546         }
  547 
  548         return (dr);
  549 }
  550 
  551 void
  552 lkpi_pci_devres_release(struct device *dev, void *p)
  553 {
  554         struct pci_devres *dr;
  555         struct pci_dev *pdev;
  556         int bar;
  557 
  558         pdev = to_pci_dev(dev);
  559         dr = p;
  560 
  561         if (pdev->msix_enabled)
  562                 lkpi_pci_disable_msix(pdev);
  563         if (pdev->msi_enabled)
  564                 lkpi_pci_disable_msi(pdev);
  565 
  566         if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0)
  567                 dr->enable_io = false;
  568 
  569         if (dr->region_mask == 0)
  570                 return;
  571         for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
  572 
  573                 if ((dr->region_mask & (1 << bar)) == 0)
  574                         continue;
  575                 pci_release_region(pdev, bar);
  576         }
  577 }
  578 
  579 struct pcim_iomap_devres *
  580 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev)
  581 {
  582         struct pcim_iomap_devres *dr;
  583 
  584         dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release,
  585             NULL, NULL);
  586         if (dr == NULL) {
  587                 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release,
  588                     sizeof(*dr), GFP_KERNEL | __GFP_ZERO);
  589                 if (dr != NULL)
  590                         lkpi_devres_add(&pdev->dev, dr);
  591         }
  592 
  593         if (dr == NULL)
  594                 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__);
  595 
  596         return (dr);
  597 }
  598 
  599 void
  600 lkpi_pcim_iomap_table_release(struct device *dev, void *p)
  601 {
  602         struct pcim_iomap_devres *dr;
  603         struct pci_dev *pdev;
  604         int bar;
  605 
  606         dr = p;
  607         pdev = to_pci_dev(dev);
  608         for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
  609 
  610                 if (dr->mmio_table[bar] == NULL)
  611                         continue;
  612 
  613                 pci_iounmap(pdev, dr->mmio_table[bar]);
  614         }
  615 }
  616 
  617 static int
  618 linux_pci_suspend(device_t dev)
  619 {
  620         const struct dev_pm_ops *pmops;
  621         struct pm_message pm = { };
  622         struct pci_dev *pdev;
  623         int error;
  624 
  625         error = 0;
  626         linux_set_current(curthread);
  627         pdev = device_get_softc(dev);
  628         pmops = pdev->pdrv->driver.pm;
  629 
  630         if (pdev->pdrv->suspend != NULL)
  631                 error = -pdev->pdrv->suspend(pdev, pm);
  632         else if (pmops != NULL && pmops->suspend != NULL) {
  633                 error = -pmops->suspend(&pdev->dev);
  634                 if (error == 0 && pmops->suspend_late != NULL)
  635                         error = -pmops->suspend_late(&pdev->dev);
  636         }
  637         return (error);
  638 }
  639 
  640 static int
  641 linux_pci_resume(device_t dev)
  642 {
  643         const struct dev_pm_ops *pmops;
  644         struct pci_dev *pdev;
  645         int error;
  646 
  647         error = 0;
  648         linux_set_current(curthread);
  649         pdev = device_get_softc(dev);
  650         pmops = pdev->pdrv->driver.pm;
  651 
  652         if (pdev->pdrv->resume != NULL)
  653                 error = -pdev->pdrv->resume(pdev);
  654         else if (pmops != NULL && pmops->resume != NULL) {
  655                 if (pmops->resume_early != NULL)
  656                         error = -pmops->resume_early(&pdev->dev);
  657                 if (error == 0 && pmops->resume != NULL)
  658                         error = -pmops->resume(&pdev->dev);
  659         }
  660         return (error);
  661 }
  662 
  663 static int
  664 linux_pci_shutdown(device_t dev)
  665 {
  666         struct pci_dev *pdev;
  667 
  668         linux_set_current(curthread);
  669         pdev = device_get_softc(dev);
  670         if (pdev->pdrv->shutdown != NULL)
  671                 pdev->pdrv->shutdown(pdev);
  672         return (0);
  673 }
  674 
  675 static int
  676 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)
  677 {
  678         struct pci_dev *pdev;
  679         int error;
  680 
  681         linux_set_current(curthread);
  682         pdev = device_get_softc(dev);
  683         if (pdev->pdrv->bsd_iov_init != NULL)
  684                 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config);
  685         else
  686                 error = EINVAL;
  687         return (error);
  688 }
  689 
  690 static void
  691 linux_pci_iov_uninit(device_t dev)
  692 {
  693         struct pci_dev *pdev;
  694 
  695         linux_set_current(curthread);
  696         pdev = device_get_softc(dev);
  697         if (pdev->pdrv->bsd_iov_uninit != NULL)
  698                 pdev->pdrv->bsd_iov_uninit(dev);
  699 }
  700 
  701 static int
  702 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
  703 {
  704         struct pci_dev *pdev;
  705         int error;
  706 
  707         linux_set_current(curthread);
  708         pdev = device_get_softc(dev);
  709         if (pdev->pdrv->bsd_iov_add_vf != NULL)
  710                 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config);
  711         else
  712                 error = EINVAL;
  713         return (error);
  714 }
  715 
  716 static int
  717 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc)
  718 {
  719         int error;
  720 
  721         linux_set_current(curthread);
  722         spin_lock(&pci_lock);
  723         list_add(&pdrv->node, &pci_drivers);
  724         spin_unlock(&pci_lock);
  725         if (pdrv->bsddriver.name == NULL)
  726                 pdrv->bsddriver.name = pdrv->name;
  727         pdrv->bsddriver.methods = pci_methods;
  728         pdrv->bsddriver.size = sizeof(struct pci_dev);
  729 
  730         bus_topo_lock();
  731         error = devclass_add_driver(dc, &pdrv->bsddriver,
  732             BUS_PASS_DEFAULT, &pdrv->bsdclass);
  733         bus_topo_unlock();
  734         return (-error);
  735 }
  736 
  737 int
  738 linux_pci_register_driver(struct pci_driver *pdrv)
  739 {
  740         devclass_t dc;
  741 
  742         dc = devclass_find("pci");
  743         if (dc == NULL)
  744                 return (-ENXIO);
  745         pdrv->isdrm = false;
  746         return (_linux_pci_register_driver(pdrv, dc));
  747 }
  748 
  749 struct resource_list_entry *
  750 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
  751     int type, int rid)
  752 {
  753         device_t dev;
  754         struct resource *res;
  755 
  756         KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,
  757             ("trying to reserve non-BAR type %d", type));
  758 
  759         dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
  760             device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
  761         res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0,
  762             1, 1, 0);
  763         if (res == NULL)
  764                 return (NULL);
  765         return (resource_list_find(rl, type, rid));
  766 }
  767 
  768 unsigned long
  769 pci_resource_start(struct pci_dev *pdev, int bar)
  770 {
  771         struct resource_list_entry *rle;
  772         rman_res_t newstart;
  773         device_t dev;
  774         int error;
  775 
  776         if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL)
  777                 return (0);
  778         dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
  779             device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
  780         error = bus_translate_resource(dev, rle->type, rle->start, &newstart);
  781         if (error != 0) {
  782                 device_printf(pdev->dev.bsddev,
  783                     "translate of %#jx failed: %d\n",
  784                     (uintmax_t)rle->start, error);
  785                 return (0);
  786         }
  787         return (newstart);
  788 }
  789 
  790 unsigned long
  791 pci_resource_len(struct pci_dev *pdev, int bar)
  792 {
  793         struct resource_list_entry *rle;
  794 
  795         if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL)
  796                 return (0);
  797         return (rle->count);
  798 }
  799 
  800 int
  801 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
  802 {
  803         struct resource *res;
  804         struct pci_devres *dr;
  805         struct pci_mmio_region *mmio;
  806         int rid;
  807         int type;
  808 
  809         type = pci_resource_type(pdev, bar);
  810         if (type < 0)
  811                 return (-ENODEV);
  812         rid = PCIR_BAR(bar);
  813         res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
  814             RF_ACTIVE|RF_SHAREABLE);
  815         if (res == NULL) {
  816                 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
  817                     "bar %d type %d rid %d\n",
  818                     __func__, bar, type, PCIR_BAR(bar));
  819                 return (-ENODEV);
  820         }
  821 
  822         /*
  823          * It seems there is an implicit devres tracking on these if the device
  824          * is managed; otherwise the resources are not automatiaclly freed on
  825          * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux
  826          * drivers.
  827          */
  828         dr = lkpi_pci_devres_find(pdev);
  829         if (dr != NULL) {
  830                 dr->region_mask |= (1 << bar);
  831                 dr->region_table[bar] = res;
  832         }
  833 
  834         /* Even if the device is not managed we need to track it for iomap. */
  835         mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
  836         mmio->rid = PCIR_BAR(bar);
  837         mmio->type = type;
  838         mmio->res = res;
  839         TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
  840 
  841         return (0);
  842 }
  843 
  844 struct resource *
  845 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused)
  846 {
  847         struct pci_mmio_region *mmio, *p;
  848         int type;
  849 
  850         type = pci_resource_type(pdev, bar);
  851         if (type < 0) {
  852                 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
  853                      __func__, bar, type);
  854                 return (NULL);
  855         }
  856 
  857         /*
  858          * Check for duplicate mappings.
  859          * This can happen if a driver calls pci_request_region() first.
  860          */
  861         TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
  862                 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {
  863                         return (mmio->res);
  864                 }
  865         }
  866 
  867         mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
  868         mmio->rid = PCIR_BAR(bar);
  869         mmio->type = type;
  870         mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
  871             &mmio->rid, RF_ACTIVE|RF_SHAREABLE);
  872         if (mmio->res == NULL) {
  873                 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
  874                     "bar %d type %d rid %d\n",
  875                     __func__, bar, type, PCIR_BAR(bar));
  876                 free(mmio, M_DEVBUF);
  877                 return (NULL);
  878         }
  879         TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
  880 
  881         return (mmio->res);
  882 }
  883 
  884 int
  885 linux_pci_register_drm_driver(struct pci_driver *pdrv)
  886 {
  887         devclass_t dc;
  888 
  889         dc = devclass_create("vgapci");
  890         if (dc == NULL)
  891                 return (-ENXIO);
  892         pdrv->isdrm = true;
  893         pdrv->name = "drmn";
  894         return (_linux_pci_register_driver(pdrv, dc));
  895 }
  896 
  897 void
  898 linux_pci_unregister_driver(struct pci_driver *pdrv)
  899 {
  900         devclass_t bus;
  901 
  902         bus = devclass_find("pci");
  903 
  904         spin_lock(&pci_lock);
  905         list_del(&pdrv->node);
  906         spin_unlock(&pci_lock);
  907         bus_topo_lock();
  908         if (bus != NULL)
  909                 devclass_delete_driver(bus, &pdrv->bsddriver);
  910         bus_topo_unlock();
  911 }
  912 
  913 void
  914 linux_pci_unregister_drm_driver(struct pci_driver *pdrv)
  915 {
  916         devclass_t bus;
  917 
  918         bus = devclass_find("vgapci");
  919 
  920         spin_lock(&pci_lock);
  921         list_del(&pdrv->node);
  922         spin_unlock(&pci_lock);
  923         bus_topo_lock();
  924         if (bus != NULL)
  925                 devclass_delete_driver(bus, &pdrv->bsddriver);
  926         bus_topo_unlock();
  927 }
  928 
  929 int
  930 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
  931     unsigned int flags)
  932 {
  933         int error;
  934 
  935         if (flags & PCI_IRQ_MSIX) {
  936                 struct msix_entry *entries;
  937                 int i;
  938 
  939                 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL);
  940                 if (entries == NULL) {
  941                         error = -ENOMEM;
  942                         goto out;
  943                 }
  944                 for (i = 0; i < maxv; ++i)
  945                         entries[i].entry = i;
  946                 error = pci_enable_msix(pdev, entries, maxv);
  947 out:
  948                 kfree(entries);
  949                 if (error == 0 && pdev->msix_enabled)
  950                         return (pdev->dev.irq_end - pdev->dev.irq_start);
  951         }
  952         if (flags & PCI_IRQ_MSI) {
  953                 if (pci_msi_count(pdev->dev.bsddev) < minv)
  954                         return (-ENOSPC);
  955                 /* We only support 1 vector in pci_enable_msi() */
  956                 if (minv != 1)
  957                         return (-ENOSPC);
  958                 error = pci_enable_msi(pdev);
  959                 if (error == 0 && pdev->msi_enabled)
  960                         return (pdev->dev.irq_end - pdev->dev.irq_start);
  961         }
  962         if (flags & PCI_IRQ_LEGACY) {
  963                 if (pdev->irq)
  964                         return (1);
  965         }
  966 
  967         return (-EINVAL);
  968 }
  969 
  970 struct msi_desc *
  971 lkpi_pci_msi_desc_alloc(int irq)
  972 {
  973         struct device *dev;
  974         struct pci_dev *pdev;
  975         struct msi_desc *desc;
  976         struct pci_devinfo *dinfo;
  977         struct pcicfg_msi *msi;
  978 
  979         dev = linux_pci_find_irq_dev(irq);
  980         if (dev == NULL)
  981                 return (NULL);
  982 
  983         pdev = to_pci_dev(dev);
  984         if (pdev->msi_desc != NULL)
  985                 return (pdev->msi_desc);
  986 
  987         dinfo = device_get_ivars(dev->bsddev);
  988         msi = &dinfo->cfg.msi;
  989 
  990         desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);
  991 
  992         desc->msi_attrib.is_64 =
  993            (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false;
  994         desc->msg.data = msi->msi_data;
  995 
  996         return (desc);
  997 }
  998 
  999 bool
 1000 pci_device_is_present(struct pci_dev *pdev)
 1001 {
 1002         device_t dev;
 1003 
 1004         dev = pdev->dev.bsddev;
 1005 
 1006         return (bus_child_present(dev));
 1007 }
 1008 
 1009 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t));
 1010 
 1011 struct linux_dma_obj {
 1012         void            *vaddr;
 1013         uint64_t        dma_addr;
 1014         bus_dmamap_t    dmamap;
 1015         bus_dma_tag_t   dmat;
 1016 };
 1017 
 1018 static uma_zone_t linux_dma_trie_zone;
 1019 static uma_zone_t linux_dma_obj_zone;
 1020 
 1021 static void
 1022 linux_dma_init(void *arg)
 1023 {
 1024 
 1025         linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie",
 1026             pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL,
 1027             UMA_ALIGN_PTR, 0);
 1028         linux_dma_obj_zone = uma_zcreate("linux_dma_object",
 1029             sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL,
 1030             UMA_ALIGN_PTR, 0);
 1031         lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK);
 1032 }
 1033 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL);
 1034 
 1035 static void
 1036 linux_dma_uninit(void *arg)
 1037 {
 1038 
 1039         counter_u64_free(lkpi_pci_nseg1_fail);
 1040         uma_zdestroy(linux_dma_obj_zone);
 1041         uma_zdestroy(linux_dma_trie_zone);
 1042 }
 1043 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL);
 1044 
 1045 static void *
 1046 linux_dma_trie_alloc(struct pctrie *ptree)
 1047 {
 1048 
 1049         return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT));
 1050 }
 1051 
 1052 static void
 1053 linux_dma_trie_free(struct pctrie *ptree, void *node)
 1054 {
 1055 
 1056         uma_zfree(linux_dma_trie_zone, node);
 1057 }
 1058 
 1059 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc,
 1060     linux_dma_trie_free);
 1061 
 1062 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
 1063 static dma_addr_t
 1064 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len,
 1065     bus_dma_tag_t dmat)
 1066 {
 1067         struct linux_dma_priv *priv;
 1068         struct linux_dma_obj *obj;
 1069         int error, nseg;
 1070         bus_dma_segment_t seg;
 1071 
 1072         priv = dev->dma_priv;
 1073 
 1074         /*
 1075          * If the resultant mapping will be entirely 1:1 with the
 1076          * physical address, short-circuit the remainder of the
 1077          * bus_dma API.  This avoids tracking collisions in the pctrie
 1078          * with the additional benefit of reducing overhead.
 1079          */
 1080         if (bus_dma_id_mapped(dmat, phys, len))
 1081                 return (phys);
 1082 
 1083         obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT);
 1084         if (obj == NULL) {
 1085                 return (0);
 1086         }
 1087         obj->dmat = dmat;
 1088 
 1089         DMA_PRIV_LOCK(priv);
 1090         if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) {
 1091                 DMA_PRIV_UNLOCK(priv);
 1092                 uma_zfree(linux_dma_obj_zone, obj);
 1093                 return (0);
 1094         }
 1095 
 1096         nseg = -1;
 1097         if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len,
 1098             BUS_DMA_NOWAIT, &seg, &nseg) != 0) {
 1099                 bus_dmamap_destroy(obj->dmat, obj->dmamap);
 1100                 DMA_PRIV_UNLOCK(priv);
 1101                 uma_zfree(linux_dma_obj_zone, obj);
 1102                 counter_u64_add(lkpi_pci_nseg1_fail, 1);
 1103                 if (linuxkpi_debug)
 1104                         dump_stack();
 1105                 return (0);
 1106         }
 1107 
 1108         KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
 1109         obj->dma_addr = seg.ds_addr;
 1110 
 1111         error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj);
 1112         if (error != 0) {
 1113                 bus_dmamap_unload(obj->dmat, obj->dmamap);
 1114                 bus_dmamap_destroy(obj->dmat, obj->dmamap);
 1115                 DMA_PRIV_UNLOCK(priv);
 1116                 uma_zfree(linux_dma_obj_zone, obj);
 1117                 return (0);
 1118         }
 1119         DMA_PRIV_UNLOCK(priv);
 1120         return (obj->dma_addr);
 1121 }
 1122 #else
 1123 static dma_addr_t
 1124 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys,
 1125     size_t len __unused, bus_dma_tag_t dmat __unused)
 1126 {
 1127         return (phys);
 1128 }
 1129 #endif
 1130 
 1131 dma_addr_t
 1132 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
 1133 {
 1134         struct linux_dma_priv *priv;
 1135 
 1136         priv = dev->dma_priv;
 1137         return (linux_dma_map_phys_common(dev, phys, len, priv->dmat));
 1138 }
 1139 
 1140 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
 1141 void
 1142 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
 1143 {
 1144         struct linux_dma_priv *priv;
 1145         struct linux_dma_obj *obj;
 1146 
 1147         priv = dev->dma_priv;
 1148 
 1149         if (pctrie_is_empty(&priv->ptree))
 1150                 return;
 1151 
 1152         DMA_PRIV_LOCK(priv);
 1153         obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
 1154         if (obj == NULL) {
 1155                 DMA_PRIV_UNLOCK(priv);
 1156                 return;
 1157         }
 1158         LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr);
 1159         bus_dmamap_unload(obj->dmat, obj->dmamap);
 1160         bus_dmamap_destroy(obj->dmat, obj->dmamap);
 1161         DMA_PRIV_UNLOCK(priv);
 1162 
 1163         uma_zfree(linux_dma_obj_zone, obj);
 1164 }
 1165 #else
 1166 void
 1167 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
 1168 {
 1169 }
 1170 #endif
 1171 
 1172 void *
 1173 linux_dma_alloc_coherent(struct device *dev, size_t size,
 1174     dma_addr_t *dma_handle, gfp_t flag)
 1175 {
 1176         struct linux_dma_priv *priv;
 1177         vm_paddr_t high;
 1178         size_t align;
 1179         void *mem;
 1180 
 1181         if (dev == NULL || dev->dma_priv == NULL) {
 1182                 *dma_handle = 0;
 1183                 return (NULL);
 1184         }
 1185         priv = dev->dma_priv;
 1186         if (priv->dma_coherent_mask)
 1187                 high = priv->dma_coherent_mask;
 1188         else
 1189                 /* Coherent is lower 32bit only by default in Linux. */
 1190                 high = BUS_SPACE_MAXADDR_32BIT;
 1191         align = PAGE_SIZE << get_order(size);
 1192         /* Always zero the allocation. */
 1193         flag |= M_ZERO;
 1194         mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
 1195             align, 0, VM_MEMATTR_DEFAULT);
 1196         if (mem != NULL) {
 1197                 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size,
 1198                     priv->dmat_coherent);
 1199                 if (*dma_handle == 0) {
 1200                         kmem_free(mem, size);
 1201                         mem = NULL;
 1202                 }
 1203         } else {
 1204                 *dma_handle = 0;
 1205         }
 1206         return (mem);
 1207 }
 1208 
 1209 struct lkpi_devres_dmam_coherent {
 1210         size_t size;
 1211         dma_addr_t *handle;
 1212         void *mem;
 1213 };
 1214 
 1215 static void
 1216 lkpi_dmam_free_coherent(struct device *dev, void *p)
 1217 {
 1218         struct lkpi_devres_dmam_coherent *dr;
 1219 
 1220         dr = p;
 1221         dma_free_coherent(dev, dr->size, dr->mem, *dr->handle);
 1222 }
 1223 
 1224 void *
 1225 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
 1226     gfp_t flag)
 1227 {
 1228         struct lkpi_devres_dmam_coherent *dr;
 1229 
 1230         dr = lkpi_devres_alloc(lkpi_dmam_free_coherent,
 1231             sizeof(*dr), GFP_KERNEL | __GFP_ZERO);
 1232 
 1233         if (dr == NULL)
 1234                 return (NULL);
 1235 
 1236         dr->size = size;
 1237         dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag);
 1238         dr->handle = dma_handle;
 1239         if (dr->mem == NULL) {
 1240                 lkpi_devres_free(dr);
 1241                 return (NULL);
 1242         }
 1243 
 1244         lkpi_devres_add(dev, dr);
 1245         return (dr->mem);
 1246 }
 1247 
 1248 void
 1249 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size,
 1250     bus_dmasync_op_t op)
 1251 {
 1252         struct linux_dma_priv *priv;
 1253         struct linux_dma_obj *obj;
 1254 
 1255         priv = dev->dma_priv;
 1256 
 1257         if (pctrie_is_empty(&priv->ptree))
 1258                 return;
 1259 
 1260         DMA_PRIV_LOCK(priv);
 1261         obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
 1262         if (obj == NULL) {
 1263                 DMA_PRIV_UNLOCK(priv);
 1264                 return;
 1265         }
 1266 
 1267         bus_dmamap_sync(obj->dmat, obj->dmamap, op);
 1268         DMA_PRIV_UNLOCK(priv);
 1269 }
 1270 
 1271 int
 1272 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
 1273     enum dma_data_direction direction, unsigned long attrs __unused)
 1274 {
 1275         struct linux_dma_priv *priv;
 1276         struct scatterlist *sg;
 1277         int i, nseg;
 1278         bus_dma_segment_t seg;
 1279 
 1280         priv = dev->dma_priv;
 1281 
 1282         DMA_PRIV_LOCK(priv);
 1283 
 1284         /* create common DMA map in the first S/G entry */
 1285         if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) {
 1286                 DMA_PRIV_UNLOCK(priv);
 1287                 return (0);
 1288         }
 1289 
 1290         /* load all S/G list entries */
 1291         for_each_sg(sgl, sg, nents, i) {
 1292                 nseg = -1;
 1293                 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map,
 1294                     sg_phys(sg), sg->length, BUS_DMA_NOWAIT,
 1295                     &seg, &nseg) != 0) {
 1296                         bus_dmamap_unload(priv->dmat, sgl->dma_map);
 1297                         bus_dmamap_destroy(priv->dmat, sgl->dma_map);
 1298                         DMA_PRIV_UNLOCK(priv);
 1299                         return (0);
 1300                 }
 1301                 KASSERT(nseg == 0,
 1302                     ("More than one segment (nseg=%d)", nseg + 1));
 1303 
 1304                 sg_dma_address(sg) = seg.ds_addr;
 1305         }
 1306 
 1307         switch (direction) {
 1308         case DMA_BIDIRECTIONAL:
 1309                 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
 1310                 break;
 1311         case DMA_TO_DEVICE:
 1312                 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
 1313                 break;
 1314         case DMA_FROM_DEVICE:
 1315                 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
 1316                 break;
 1317         default:
 1318                 break;
 1319         }
 1320 
 1321         DMA_PRIV_UNLOCK(priv);
 1322 
 1323         return (nents);
 1324 }
 1325 
 1326 void
 1327 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
 1328     int nents __unused, enum dma_data_direction direction,
 1329     unsigned long attrs __unused)
 1330 {
 1331         struct linux_dma_priv *priv;
 1332 
 1333         priv = dev->dma_priv;
 1334 
 1335         DMA_PRIV_LOCK(priv);
 1336 
 1337         switch (direction) {
 1338         case DMA_BIDIRECTIONAL:
 1339                 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
 1340                 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
 1341                 break;
 1342         case DMA_TO_DEVICE:
 1343                 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE);
 1344                 break;
 1345         case DMA_FROM_DEVICE:
 1346                 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
 1347                 break;
 1348         default:
 1349                 break;
 1350         }
 1351 
 1352         bus_dmamap_unload(priv->dmat, sgl->dma_map);
 1353         bus_dmamap_destroy(priv->dmat, sgl->dma_map);
 1354         DMA_PRIV_UNLOCK(priv);
 1355 }
 1356 
 1357 struct dma_pool {
 1358         struct device  *pool_device;
 1359         uma_zone_t      pool_zone;
 1360         struct mtx      pool_lock;
 1361         bus_dma_tag_t   pool_dmat;
 1362         size_t          pool_entry_size;
 1363         struct pctrie   pool_ptree;
 1364 };
 1365 
 1366 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock)
 1367 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock)
 1368 
 1369 static inline int
 1370 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags)
 1371 {
 1372         struct linux_dma_obj *obj = mem;
 1373         struct dma_pool *pool = arg;
 1374         int error, nseg;
 1375         bus_dma_segment_t seg;
 1376 
 1377         nseg = -1;
 1378         DMA_POOL_LOCK(pool);
 1379         error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap,
 1380             vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT,
 1381             &seg, &nseg);
 1382         DMA_POOL_UNLOCK(pool);
 1383         if (error != 0) {
 1384                 return (error);
 1385         }
 1386         KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
 1387         obj->dma_addr = seg.ds_addr;
 1388 
 1389         return (0);
 1390 }
 1391 
 1392 static void
 1393 dma_pool_obj_dtor(void *mem, int size, void *arg)
 1394 {
 1395         struct linux_dma_obj *obj = mem;
 1396         struct dma_pool *pool = arg;
 1397 
 1398         DMA_POOL_LOCK(pool);
 1399         bus_dmamap_unload(pool->pool_dmat, obj->dmamap);
 1400         DMA_POOL_UNLOCK(pool);
 1401 }
 1402 
 1403 static int
 1404 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused,
 1405     int flags)
 1406 {
 1407         struct dma_pool *pool = arg;
 1408         struct linux_dma_obj *obj;
 1409         int error, i;
 1410 
 1411         for (i = 0; i < count; i++) {
 1412                 obj = uma_zalloc(linux_dma_obj_zone, flags);
 1413                 if (obj == NULL)
 1414                         break;
 1415 
 1416                 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr,
 1417                     BUS_DMA_NOWAIT, &obj->dmamap);
 1418                 if (error!= 0) {
 1419                         uma_zfree(linux_dma_obj_zone, obj);
 1420                         break;
 1421                 }
 1422 
 1423                 store[i] = obj;
 1424         }
 1425 
 1426         return (i);
 1427 }
 1428 
 1429 static void
 1430 dma_pool_obj_release(void *arg, void **store, int count)
 1431 {
 1432         struct dma_pool *pool = arg;
 1433         struct linux_dma_obj *obj;
 1434         int i;
 1435 
 1436         for (i = 0; i < count; i++) {
 1437                 obj = store[i];
 1438                 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap);
 1439                 uma_zfree(linux_dma_obj_zone, obj);
 1440         }
 1441 }
 1442 
 1443 struct dma_pool *
 1444 linux_dma_pool_create(char *name, struct device *dev, size_t size,
 1445     size_t align, size_t boundary)
 1446 {
 1447         struct linux_dma_priv *priv;
 1448         struct dma_pool *pool;
 1449 
 1450         priv = dev->dma_priv;
 1451 
 1452         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 1453         pool->pool_device = dev;
 1454         pool->pool_entry_size = size;
 1455 
 1456         if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
 1457             align, boundary,            /* alignment, boundary */
 1458             priv->dma_mask,             /* lowaddr */
 1459             BUS_SPACE_MAXADDR,          /* highaddr */
 1460             NULL, NULL,                 /* filtfunc, filtfuncarg */
 1461             size,                       /* maxsize */
 1462             1,                          /* nsegments */
 1463             size,                       /* maxsegsz */
 1464             0,                          /* flags */
 1465             NULL, NULL,                 /* lockfunc, lockfuncarg */
 1466             &pool->pool_dmat)) {
 1467                 kfree(pool);
 1468                 return (NULL);
 1469         }
 1470 
 1471         pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor,
 1472             dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import,
 1473             dma_pool_obj_release, pool, 0);
 1474 
 1475         mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF);
 1476         pctrie_init(&pool->pool_ptree);
 1477 
 1478         return (pool);
 1479 }
 1480 
 1481 void
 1482 linux_dma_pool_destroy(struct dma_pool *pool)
 1483 {
 1484 
 1485         uma_zdestroy(pool->pool_zone);
 1486         bus_dma_tag_destroy(pool->pool_dmat);
 1487         mtx_destroy(&pool->pool_lock);
 1488         kfree(pool);
 1489 }
 1490 
 1491 void
 1492 lkpi_dmam_pool_destroy(struct device *dev, void *p)
 1493 {
 1494         struct dma_pool *pool;
 1495 
 1496         pool = *(struct dma_pool **)p;
 1497         LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree);
 1498         linux_dma_pool_destroy(pool);
 1499 }
 1500 
 1501 void *
 1502 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
 1503     dma_addr_t *handle)
 1504 {
 1505         struct linux_dma_obj *obj;
 1506 
 1507         obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK);
 1508         if (obj == NULL)
 1509                 return (NULL);
 1510 
 1511         DMA_POOL_LOCK(pool);
 1512         if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) {
 1513                 DMA_POOL_UNLOCK(pool);
 1514                 uma_zfree_arg(pool->pool_zone, obj, pool);
 1515                 return (NULL);
 1516         }
 1517         DMA_POOL_UNLOCK(pool);
 1518 
 1519         *handle = obj->dma_addr;
 1520         return (obj->vaddr);
 1521 }
 1522 
 1523 void
 1524 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)
 1525 {
 1526         struct linux_dma_obj *obj;
 1527 
 1528         DMA_POOL_LOCK(pool);
 1529         obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr);
 1530         if (obj == NULL) {
 1531                 DMA_POOL_UNLOCK(pool);
 1532                 return;
 1533         }
 1534         LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr);
 1535         DMA_POOL_UNLOCK(pool);
 1536 
 1537         uma_zfree_arg(pool->pool_zone, obj, pool);
 1538 }
 1539 
 1540 static int
 1541 linux_backlight_get_status(device_t dev, struct backlight_props *props)
 1542 {
 1543         struct pci_dev *pdev;
 1544 
 1545         linux_set_current(curthread);
 1546         pdev = device_get_softc(dev);
 1547 
 1548         props->brightness = pdev->dev.bd->props.brightness;
 1549         props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness;
 1550         props->nlevels = 0;
 1551 
 1552         return (0);
 1553 }
 1554 
 1555 static int
 1556 linux_backlight_get_info(device_t dev, struct backlight_info *info)
 1557 {
 1558         struct pci_dev *pdev;
 1559 
 1560         linux_set_current(curthread);
 1561         pdev = device_get_softc(dev);
 1562 
 1563         info->type = BACKLIGHT_TYPE_PANEL;
 1564         strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH);
 1565         return (0);
 1566 }
 1567 
 1568 static int
 1569 linux_backlight_update_status(device_t dev, struct backlight_props *props)
 1570 {
 1571         struct pci_dev *pdev;
 1572 
 1573         linux_set_current(curthread);
 1574         pdev = device_get_softc(dev);
 1575 
 1576         pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness *
 1577                 props->brightness / 100;
 1578         pdev->dev.bd->props.power = props->brightness == 0 ?
 1579                 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */;
 1580         return (pdev->dev.bd->ops->update_status(pdev->dev.bd));
 1581 }
 1582 
 1583 struct backlight_device *
 1584 linux_backlight_device_register(const char *name, struct device *dev,
 1585     void *data, const struct backlight_ops *ops, struct backlight_properties *props)
 1586 {
 1587 
 1588         dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO);
 1589         dev->bd->ops = ops;
 1590         dev->bd->props.type = props->type;
 1591         dev->bd->props.max_brightness = props->max_brightness;
 1592         dev->bd->props.brightness = props->brightness;
 1593         dev->bd->props.power = props->power;
 1594         dev->bd->data = data;
 1595         dev->bd->dev = dev;
 1596         dev->bd->name = strdup(name, M_DEVBUF);
 1597 
 1598         dev->backlight_dev = backlight_register(name, dev->bsddev);
 1599 
 1600         return (dev->bd);
 1601 }
 1602 
 1603 void
 1604 linux_backlight_device_unregister(struct backlight_device *bd)
 1605 {
 1606 
 1607         backlight_destroy(bd->dev->backlight_dev);
 1608         free(bd->name, M_DEVBUF);
 1609         free(bd, M_DEVBUF);
 1610 }

Cache object: fb4e64da281b51bf1df37477cd35f636


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.