The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/pseries/plpar_iommu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2013, Nathan Whitehorn <nwhitehorn@FreeBSD.org>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice unmodified, this list of conditions, and the following
   12  *    disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/param.h>
   33 #include <sys/bus.h>
   34 #include <sys/kernel.h>
   35 #include <sys/libkern.h>
   36 #include <sys/module.h>
   37 #include <sys/vmem.h>
   38 
   39 #include <dev/ofw/ofw_bus.h>
   40 #include <dev/ofw/ofw_bus_subr.h>
   41 #include <dev/ofw/openfirm.h>
   42 
   43 #include <machine/bus.h>
   44 
   45 #include <powerpc/pseries/phyp-hvcall.h>
   46 #include <powerpc/pseries/plpar_iommu.h>
   47 
   48 MALLOC_DEFINE(M_PHYPIOMMU, "iommu", "IOMMU data for PAPR LPARs");
   49 
   50 struct papr_iommu_map {
   51         uint32_t iobn;
   52         vmem_t *vmem;
   53         struct papr_iommu_map *next;
   54 };
   55 
   56 static SLIST_HEAD(iommu_maps, iommu_map) iommu_map_head =
   57     SLIST_HEAD_INITIALIZER(iommu_map_head);
   58 static int papr_supports_stuff_tce = -1;
   59 
   60 struct iommu_map {
   61         uint32_t iobn;
   62         vmem_t *vmem;
   63 
   64         SLIST_ENTRY(iommu_map) entries;
   65 };
   66 
   67 struct dma_window {
   68         struct iommu_map *map;
   69         bus_addr_t start;
   70         bus_addr_t end;
   71 };
   72 
   73 int
   74 phyp_iommu_set_dma_tag(device_t bus, device_t dev, bus_dma_tag_t tag)
   75 {
   76         device_t p;
   77         phandle_t node;
   78         cell_t dma_acells, dma_scells, dmawindow[6];
   79         struct iommu_map *i;
   80         int cell;
   81 
   82         for (p = dev; device_get_parent(p) != NULL; p = device_get_parent(p)) {
   83                 if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
   84                         break;
   85                 if (ofw_bus_has_prop(p, "ibm,dma-window"))
   86                         break;
   87         }
   88 
   89         if (p == NULL)
   90                 return (ENXIO);
   91 
   92         node = ofw_bus_get_node(p);
   93         if (OF_getencprop(node, "ibm,#dma-size-cells", &dma_scells,
   94             sizeof(cell_t)) <= 0)
   95                 OF_searchencprop(node, "#size-cells", &dma_scells,
   96                     sizeof(cell_t));
   97         if (OF_getencprop(node, "ibm,#dma-address-cells", &dma_acells,
   98             sizeof(cell_t)) <= 0)
   99                 OF_searchencprop(node, "#address-cells", &dma_acells,
  100                     sizeof(cell_t));
  101 
  102         if (ofw_bus_has_prop(p, "ibm,my-dma-window"))
  103                 OF_getencprop(node, "ibm,my-dma-window", dmawindow,
  104                     sizeof(cell_t)*(dma_scells + dma_acells + 1));
  105         else
  106                 OF_getencprop(node, "ibm,dma-window", dmawindow,
  107                     sizeof(cell_t)*(dma_scells + dma_acells + 1));
  108 
  109         struct dma_window *window = malloc(sizeof(struct dma_window),
  110             M_PHYPIOMMU, M_WAITOK);
  111         window->start = 0;
  112         for (cell = 1; cell < 1 + dma_acells; cell++) {
  113                 window->start <<= 32;
  114                 window->start |= dmawindow[cell];
  115         }
  116         window->end = 0;
  117         for (; cell < 1 + dma_acells + dma_scells; cell++) {
  118                 window->end <<= 32;
  119                 window->end |= dmawindow[cell];
  120         }
  121         window->end += window->start;
  122 
  123         if (bootverbose)
  124                 device_printf(dev, "Mapping IOMMU domain %#x\n", dmawindow[0]);
  125         window->map = NULL;
  126         SLIST_FOREACH(i, &iommu_map_head, entries) {
  127                 if (i->iobn == dmawindow[0]) {
  128                         window->map = i;
  129                         break;
  130                 }
  131         }
  132 
  133         if (window->map == NULL) {
  134                 window->map = malloc(sizeof(struct iommu_map), M_PHYPIOMMU,
  135                     M_WAITOK);
  136                 window->map->iobn = dmawindow[0];
  137                 /*
  138                  * Allocate IOMMU range beginning at PAGE_SIZE. Some drivers
  139                  * (em(4), for example) do not like getting mappings at 0.
  140                  */
  141                 window->map->vmem = vmem_create("IOMMU mappings", PAGE_SIZE,
  142                     trunc_page(VMEM_ADDR_MAX) - PAGE_SIZE, PAGE_SIZE, 0,
  143                     M_BESTFIT | M_NOWAIT);
  144                 SLIST_INSERT_HEAD(&iommu_map_head, window->map, entries);
  145         }
  146 
  147         /*
  148          * Check experimentally whether we can use H_STUFF_TCE. It is required
  149          * by the spec but some firmware (e.g. QEMU) does not actually support
  150          * it
  151          */
  152         if (papr_supports_stuff_tce == -1)
  153                 papr_supports_stuff_tce = !(phyp_hcall(H_STUFF_TCE,
  154                     window->map->iobn, 0, 0, 0) == H_FUNCTION);
  155 
  156         bus_dma_tag_set_iommu(tag, bus, window);
  157 
  158         return (0);
  159 }
  160 
  161 int
  162 phyp_iommu_map(device_t dev, bus_dma_segment_t *segs, int *nsegs,
  163     bus_addr_t min, bus_addr_t max, bus_size_t alignment, bus_addr_t boundary,
  164     void *cookie)
  165 {
  166         struct dma_window *window = cookie;
  167         bus_addr_t minaddr, maxaddr;
  168         bus_addr_t alloced;
  169         bus_size_t allocsize;
  170         int error, i, j;
  171         uint64_t tce;
  172         minaddr = window->start;
  173         maxaddr = window->end;
  174 
  175         /* XXX: handle exclusion range in a more useful way */
  176         if (min < maxaddr)
  177                 maxaddr = min;
  178 
  179         /* XXX: consolidate segs? */
  180         for (i = 0; i < *nsegs; i++) {
  181                 allocsize = round_page(segs[i].ds_len +
  182                     (segs[i].ds_addr & PAGE_MASK));
  183                 error = vmem_xalloc(window->map->vmem, allocsize,
  184                     (alignment < PAGE_SIZE) ? PAGE_SIZE : alignment, 0,
  185                     boundary, minaddr, maxaddr, M_BESTFIT | M_NOWAIT, &alloced);
  186                 if (error != 0) {
  187                         panic("VMEM failure: %d\n", error);
  188                         return (error);
  189                 }
  190                 KASSERT(alloced % PAGE_SIZE == 0, ("Alloc not page aligned"));
  191                 KASSERT((alloced + (segs[i].ds_addr & PAGE_MASK)) %
  192                     alignment == 0,
  193                     ("Allocated segment does not match alignment constraint"));
  194 
  195                 tce = trunc_page(segs[i].ds_addr);
  196                 tce |= 0x3; /* read/write */
  197                 for (j = 0; j < allocsize; j += PAGE_SIZE) {
  198                         error = phyp_hcall(H_PUT_TCE, window->map->iobn,
  199                             alloced + j, tce + j);
  200                         if (error < 0) {
  201                                 panic("IOMMU mapping error: %d\n", error);
  202                                 return (ENOMEM);
  203                         }
  204                 }
  205 
  206                 segs[i].ds_addr = alloced + (segs[i].ds_addr & PAGE_MASK);
  207                 KASSERT(segs[i].ds_addr > 0, ("Address needs to be positive"));
  208                 KASSERT(segs[i].ds_addr + segs[i].ds_len < maxaddr,
  209                     ("Address not in range"));
  210                 if (error < 0) {
  211                         panic("IOMMU mapping error: %d\n", error);
  212                         return (ENOMEM);
  213                 }
  214         }
  215 
  216         return (0);
  217 }
  218 
  219 int
  220 phyp_iommu_unmap(device_t dev, bus_dma_segment_t *segs, int nsegs, void *cookie)
  221 {
  222         struct dma_window *window = cookie;
  223         bus_addr_t pageround;
  224         bus_size_t roundedsize;
  225         int i;
  226         bus_addr_t j;
  227 
  228         for (i = 0; i < nsegs; i++) {
  229                 pageround = trunc_page(segs[i].ds_addr);
  230                 roundedsize = round_page(segs[i].ds_len +
  231                     (segs[i].ds_addr & PAGE_MASK));
  232 
  233                 if (papr_supports_stuff_tce) {
  234                         phyp_hcall(H_STUFF_TCE, window->map->iobn, pageround, 0,
  235                             roundedsize/PAGE_SIZE);
  236                 } else {
  237                         for (j = 0; j < roundedsize; j += PAGE_SIZE)
  238                                 phyp_hcall(H_PUT_TCE, window->map->iobn,
  239                                     pageround + j, 0);
  240                 }
  241 
  242                 vmem_xfree(window->map->vmem, pageround, roundedsize);
  243         }
  244 
  245         return (0);
  246 }

Cache object: 6dbd39dd185d11b0ea81b3ccd72710f3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.