The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/lib/devres.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #include <linux/pci.h>
    2 #include <linux/io.h>
    3 #include <linux/gfp.h>
    4 #include <linux/export.h>
    5 
    6 void devm_ioremap_release(struct device *dev, void *res)
    7 {
    8         iounmap(*(void __iomem **)res);
    9 }
   10 
   11 static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
   12 {
   13         return *(void **)res == match_data;
   14 }
   15 
   16 /**
   17  * devm_ioremap - Managed ioremap()
   18  * @dev: Generic device to remap IO address for
   19  * @offset: BUS offset to map
   20  * @size: Size of map
   21  *
   22  * Managed ioremap().  Map is automatically unmapped on driver detach.
   23  */
   24 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
   25                            unsigned long size)
   26 {
   27         void __iomem **ptr, *addr;
   28 
   29         ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
   30         if (!ptr)
   31                 return NULL;
   32 
   33         addr = ioremap(offset, size);
   34         if (addr) {
   35                 *ptr = addr;
   36                 devres_add(dev, ptr);
   37         } else
   38                 devres_free(ptr);
   39 
   40         return addr;
   41 }
   42 EXPORT_SYMBOL(devm_ioremap);
   43 
   44 /**
   45  * devm_ioremap_nocache - Managed ioremap_nocache()
   46  * @dev: Generic device to remap IO address for
   47  * @offset: BUS offset to map
   48  * @size: Size of map
   49  *
   50  * Managed ioremap_nocache().  Map is automatically unmapped on driver
   51  * detach.
   52  */
   53 void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
   54                                    unsigned long size)
   55 {
   56         void __iomem **ptr, *addr;
   57 
   58         ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
   59         if (!ptr)
   60                 return NULL;
   61 
   62         addr = ioremap_nocache(offset, size);
   63         if (addr) {
   64                 *ptr = addr;
   65                 devres_add(dev, ptr);
   66         } else
   67                 devres_free(ptr);
   68 
   69         return addr;
   70 }
   71 EXPORT_SYMBOL(devm_ioremap_nocache);
   72 
   73 /**
   74  * devm_iounmap - Managed iounmap()
   75  * @dev: Generic device to unmap for
   76  * @addr: Address to unmap
   77  *
   78  * Managed iounmap().  @addr must have been mapped using devm_ioremap*().
   79  */
   80 void devm_iounmap(struct device *dev, void __iomem *addr)
   81 {
   82         WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
   83                                (void *)addr));
   84         iounmap(addr);
   85 }
   86 EXPORT_SYMBOL(devm_iounmap);
   87 
   88 /**
   89  * devm_request_and_ioremap() - Check, request region, and ioremap resource
   90  * @dev: Generic device to handle the resource for
   91  * @res: resource to be handled
   92  *
   93  * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
   94  * everything is undone on driver detach. Checks arguments, so you can feed
   95  * it the result from e.g. platform_get_resource() directly. Returns the
   96  * remapped pointer or NULL on error. Usage example:
   97  *
   98  *      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   99  *      base = devm_request_and_ioremap(&pdev->dev, res);
  100  *      if (!base)
  101  *              return -EADDRNOTAVAIL;
  102  */
  103 void __iomem *devm_request_and_ioremap(struct device *dev,
  104                         struct resource *res)
  105 {
  106         resource_size_t size;
  107         const char *name;
  108         void __iomem *dest_ptr;
  109 
  110         BUG_ON(!dev);
  111 
  112         if (!res || resource_type(res) != IORESOURCE_MEM) {
  113                 dev_err(dev, "invalid resource\n");
  114                 return NULL;
  115         }
  116 
  117         size = resource_size(res);
  118         name = res->name ?: dev_name(dev);
  119 
  120         if (!devm_request_mem_region(dev, res->start, size, name)) {
  121                 dev_err(dev, "can't request region for resource %pR\n", res);
  122                 return NULL;
  123         }
  124 
  125         if (res->flags & IORESOURCE_CACHEABLE)
  126                 dest_ptr = devm_ioremap(dev, res->start, size);
  127         else
  128                 dest_ptr = devm_ioremap_nocache(dev, res->start, size);
  129 
  130         if (!dest_ptr) {
  131                 dev_err(dev, "ioremap failed for resource %pR\n", res);
  132                 devm_release_mem_region(dev, res->start, size);
  133         }
  134 
  135         return dest_ptr;
  136 }
  137 EXPORT_SYMBOL(devm_request_and_ioremap);
  138 
  139 #ifdef CONFIG_HAS_IOPORT
  140 /*
  141  * Generic iomap devres
  142  */
  143 static void devm_ioport_map_release(struct device *dev, void *res)
  144 {
  145         ioport_unmap(*(void __iomem **)res);
  146 }
  147 
  148 static int devm_ioport_map_match(struct device *dev, void *res,
  149                                  void *match_data)
  150 {
  151         return *(void **)res == match_data;
  152 }
  153 
  154 /**
  155  * devm_ioport_map - Managed ioport_map()
  156  * @dev: Generic device to map ioport for
  157  * @port: Port to map
  158  * @nr: Number of ports to map
  159  *
  160  * Managed ioport_map().  Map is automatically unmapped on driver
  161  * detach.
  162  */
  163 void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
  164                                unsigned int nr)
  165 {
  166         void __iomem **ptr, *addr;
  167 
  168         ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
  169         if (!ptr)
  170                 return NULL;
  171 
  172         addr = ioport_map(port, nr);
  173         if (addr) {
  174                 *ptr = addr;
  175                 devres_add(dev, ptr);
  176         } else
  177                 devres_free(ptr);
  178 
  179         return addr;
  180 }
  181 EXPORT_SYMBOL(devm_ioport_map);
  182 
  183 /**
  184  * devm_ioport_unmap - Managed ioport_unmap()
  185  * @dev: Generic device to unmap for
  186  * @addr: Address to unmap
  187  *
  188  * Managed ioport_unmap().  @addr must have been mapped using
  189  * devm_ioport_map().
  190  */
  191 void devm_ioport_unmap(struct device *dev, void __iomem *addr)
  192 {
  193         ioport_unmap(addr);
  194         WARN_ON(devres_destroy(dev, devm_ioport_map_release,
  195                                devm_ioport_map_match, (void *)addr));
  196 }
  197 EXPORT_SYMBOL(devm_ioport_unmap);
  198 
  199 #ifdef CONFIG_PCI
  200 /*
  201  * PCI iomap devres
  202  */
  203 #define PCIM_IOMAP_MAX  PCI_ROM_RESOURCE
  204 
  205 struct pcim_iomap_devres {
  206         void __iomem *table[PCIM_IOMAP_MAX];
  207 };
  208 
  209 static void pcim_iomap_release(struct device *gendev, void *res)
  210 {
  211         struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
  212         struct pcim_iomap_devres *this = res;
  213         int i;
  214 
  215         for (i = 0; i < PCIM_IOMAP_MAX; i++)
  216                 if (this->table[i])
  217                         pci_iounmap(dev, this->table[i]);
  218 }
  219 
  220 /**
  221  * pcim_iomap_table - access iomap allocation table
  222  * @pdev: PCI device to access iomap table for
  223  *
  224  * Access iomap allocation table for @dev.  If iomap table doesn't
  225  * exist and @pdev is managed, it will be allocated.  All iomaps
  226  * recorded in the iomap table are automatically unmapped on driver
  227  * detach.
  228  *
  229  * This function might sleep when the table is first allocated but can
  230  * be safely called without context and guaranteed to succed once
  231  * allocated.
  232  */
  233 void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
  234 {
  235         struct pcim_iomap_devres *dr, *new_dr;
  236 
  237         dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
  238         if (dr)
  239                 return dr->table;
  240 
  241         new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
  242         if (!new_dr)
  243                 return NULL;
  244         dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
  245         return dr->table;
  246 }
  247 EXPORT_SYMBOL(pcim_iomap_table);
  248 
  249 /**
  250  * pcim_iomap - Managed pcim_iomap()
  251  * @pdev: PCI device to iomap for
  252  * @bar: BAR to iomap
  253  * @maxlen: Maximum length of iomap
  254  *
  255  * Managed pci_iomap().  Map is automatically unmapped on driver
  256  * detach.
  257  */
  258 void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
  259 {
  260         void __iomem **tbl;
  261 
  262         BUG_ON(bar >= PCIM_IOMAP_MAX);
  263 
  264         tbl = (void __iomem **)pcim_iomap_table(pdev);
  265         if (!tbl || tbl[bar])   /* duplicate mappings not allowed */
  266                 return NULL;
  267 
  268         tbl[bar] = pci_iomap(pdev, bar, maxlen);
  269         return tbl[bar];
  270 }
  271 EXPORT_SYMBOL(pcim_iomap);
  272 
  273 /**
  274  * pcim_iounmap - Managed pci_iounmap()
  275  * @pdev: PCI device to iounmap for
  276  * @addr: Address to unmap
  277  *
  278  * Managed pci_iounmap().  @addr must have been mapped using pcim_iomap().
  279  */
  280 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
  281 {
  282         void __iomem **tbl;
  283         int i;
  284 
  285         pci_iounmap(pdev, addr);
  286 
  287         tbl = (void __iomem **)pcim_iomap_table(pdev);
  288         BUG_ON(!tbl);
  289 
  290         for (i = 0; i < PCIM_IOMAP_MAX; i++)
  291                 if (tbl[i] == addr) {
  292                         tbl[i] = NULL;
  293                         return;
  294                 }
  295         WARN_ON(1);
  296 }
  297 EXPORT_SYMBOL(pcim_iounmap);
  298 
  299 /**
  300  * pcim_iomap_regions - Request and iomap PCI BARs
  301  * @pdev: PCI device to map IO resources for
  302  * @mask: Mask of BARs to request and iomap
  303  * @name: Name used when requesting regions
  304  *
  305  * Request and iomap regions specified by @mask.
  306  */
  307 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
  308 {
  309         void __iomem * const *iomap;
  310         int i, rc;
  311 
  312         iomap = pcim_iomap_table(pdev);
  313         if (!iomap)
  314                 return -ENOMEM;
  315 
  316         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  317                 unsigned long len;
  318 
  319                 if (!(mask & (1 << i)))
  320                         continue;
  321 
  322                 rc = -EINVAL;
  323                 len = pci_resource_len(pdev, i);
  324                 if (!len)
  325                         goto err_inval;
  326 
  327                 rc = pci_request_region(pdev, i, name);
  328                 if (rc)
  329                         goto err_inval;
  330 
  331                 rc = -ENOMEM;
  332                 if (!pcim_iomap(pdev, i, 0))
  333                         goto err_region;
  334         }
  335 
  336         return 0;
  337 
  338  err_region:
  339         pci_release_region(pdev, i);
  340  err_inval:
  341         while (--i >= 0) {
  342                 if (!(mask & (1 << i)))
  343                         continue;
  344                 pcim_iounmap(pdev, iomap[i]);
  345                 pci_release_region(pdev, i);
  346         }
  347 
  348         return rc;
  349 }
  350 EXPORT_SYMBOL(pcim_iomap_regions);
  351 
  352 /**
  353  * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
  354  * @pdev: PCI device to map IO resources for
  355  * @mask: Mask of BARs to iomap
  356  * @name: Name used when requesting regions
  357  *
  358  * Request all PCI BARs and iomap regions specified by @mask.
  359  */
  360 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
  361                                    const char *name)
  362 {
  363         int request_mask = ((1 << 6) - 1) & ~mask;
  364         int rc;
  365 
  366         rc = pci_request_selected_regions(pdev, request_mask, name);
  367         if (rc)
  368                 return rc;
  369 
  370         rc = pcim_iomap_regions(pdev, mask, name);
  371         if (rc)
  372                 pci_release_selected_regions(pdev, request_mask);
  373         return rc;
  374 }
  375 EXPORT_SYMBOL(pcim_iomap_regions_request_all);
  376 
  377 /**
  378  * pcim_iounmap_regions - Unmap and release PCI BARs
  379  * @pdev: PCI device to map IO resources for
  380  * @mask: Mask of BARs to unmap and release
  381  *
  382  * Unmap and release regions specified by @mask.
  383  */
  384 void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
  385 {
  386         void __iomem * const *iomap;
  387         int i;
  388 
  389         iomap = pcim_iomap_table(pdev);
  390         if (!iomap)
  391                 return;
  392 
  393         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  394                 if (!(mask & (1 << i)))
  395                         continue;
  396 
  397                 pcim_iounmap(pdev, iomap[i]);
  398                 pci_release_region(pdev, i);
  399         }
  400 }
  401 EXPORT_SYMBOL(pcim_iounmap_regions);
  402 #endif /* CONFIG_PCI */
  403 #endif /* CONFIG_HAS_IOPORT */

Cache object: d4c76466acf22eb61589e810d25a0a8d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.