The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/sibyte/sb_zbpci.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Neelkanth Natu
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/param.h>
   28 #include <sys/types.h>
   29 #include <sys/kernel.h>
   30 #include <sys/systm.h>
   31 #include <sys/module.h>
   32 #include <sys/bus.h>
   33 #include <sys/rman.h>
   34 #include <sys/pcpu.h>
   35 #include <sys/smp.h>
   36 
   37 #include <vm/vm.h>
   38 #include <vm/vm_param.h>
   39 #include <vm/vm_kern.h>
   40 #include <vm/vm_extern.h>
   41 #include <vm/pmap.h>
   42 
   43 #include <dev/pci/pcireg.h>
   44 #include <dev/pci/pcivar.h>
   45 #include <dev/pci/pcib_private.h>
   46 
   47 #include <machine/resource.h>
   48 #include <machine/bus.h>
   49 
   50 #include "pcib_if.h"
   51 
   52 #include "sb_bus_space.h"
   53 #include "sb_scd.h"
   54 
   55 __FBSDID("$FreeBSD: releng/11.0/sys/mips/sibyte/sb_zbpci.c 298433 2016-04-21 19:57:40Z pfg $");
   56 
   57 static struct {
   58         vm_offset_t vaddr;
   59         vm_paddr_t  paddr;
   60 } zbpci_config_space[MAXCPU];
   61 
   62 static const vm_paddr_t CFG_PADDR_BASE = 0xFE000000;
   63 static const u_long PCI_IOSPACE_ADDR = 0xFC000000;
   64 static const u_long PCI_IOSPACE_SIZE = 0x02000000;
   65 
   66 #define PCI_MATCH_BYTE_LANES_START      0x40000000
   67 #define PCI_MATCH_BYTE_LANES_END        0x5FFFFFFF
   68 #define PCI_MATCH_BYTE_LANES_SIZE       0x20000000
   69 
   70 #define PCI_MATCH_BIT_LANES_MASK        (1 << 29)
   71 #define PCI_MATCH_BIT_LANES_START       0x60000000
   72 #define PCI_MATCH_BIT_LANES_END         0x7FFFFFFF
   73 #define PCI_MATCH_BIT_LANES_SIZE        0x20000000
   74 
   75 static struct rman port_rman;
   76 
   77 static int
   78 zbpci_probe(device_t dev)
   79 {
   80         
   81         device_set_desc(dev, "Broadcom/Sibyte PCI I/O Bridge");
   82         return (0);
   83 }
   84 
   85 static int
   86 zbpci_attach(device_t dev)
   87 {
   88         int n, rid, size;
   89         vm_offset_t va;
   90         struct resource *res;
   91         
   92         /*
   93          * Reserve the physical memory window used to map PCI I/O space.
   94          */
   95         rid = 0;
   96         res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
   97                                  PCI_IOSPACE_ADDR,
   98                                  PCI_IOSPACE_ADDR + PCI_IOSPACE_SIZE - 1,
   99                                  PCI_IOSPACE_SIZE, 0);
  100         if (res == NULL)
  101                 panic("Cannot allocate resource for PCI I/O space mapping.");
  102 
  103         port_rman.rm_start = 0;
  104         port_rman.rm_end = PCI_IOSPACE_SIZE - 1;
  105         port_rman.rm_type = RMAN_ARRAY;
  106         port_rman.rm_descr = "PCI I/O ports";
  107         if (rman_init(&port_rman) != 0 ||
  108             rman_manage_region(&port_rman, 0, PCI_IOSPACE_SIZE - 1) != 0)
  109                 panic("%s: port_rman", __func__);
  110 
  111         /*
  112          * Reserve the physical memory that is used to read/write to the
  113          * pci config space but don't activate it. We are using a page worth
  114          * of KVA as a window over this region.
  115          */
  116         rid = 1;
  117         size = (PCI_BUSMAX + 1) * (PCI_SLOTMAX + 1) * (PCI_FUNCMAX + 1) * 256;
  118         res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, CFG_PADDR_BASE,
  119                                  CFG_PADDR_BASE + size - 1, size, 0);
  120         if (res == NULL)
  121                 panic("Cannot allocate resource for config space accesses.");
  122 
  123         /*
  124          * Allocate the entire "match bit lanes" address space.
  125          */
  126 #if _BYTE_ORDER == _BIG_ENDIAN
  127         rid = 2;
  128         res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 
  129                                  PCI_MATCH_BIT_LANES_START,
  130                                  PCI_MATCH_BIT_LANES_END,
  131                                  PCI_MATCH_BIT_LANES_SIZE, 0);
  132         if (res == NULL)
  133                 panic("Cannot allocate resource for pci match bit lanes.");
  134 #endif  /* _BYTE_ORDER ==_BIG_ENDIAN */
  135 
  136         /*
  137          * Allocate KVA for accessing PCI config space.
  138          */
  139         va = kva_alloc(PAGE_SIZE * mp_ncpus);
  140         if (va == 0) {
  141                 device_printf(dev, "Cannot allocate virtual addresses for "
  142                                    "config space access.\n");
  143                 return (ENOMEM);
  144         }
  145 
  146         for (n = 0; n < mp_ncpus; ++n)
  147                 zbpci_config_space[n].vaddr = va + n * PAGE_SIZE;
  148 
  149         /*
  150          * Sibyte has the PCI bus hierarchy rooted at bus 0 and HT-PCI
  151          * hierarchy rooted at bus 1.
  152          */
  153         if (device_add_child(dev, "pci", 0) == NULL)
  154                 panic("zbpci_attach: could not add pci bus 0.\n");
  155 
  156         if (device_add_child(dev, "pci", 1) == NULL)
  157                 panic("zbpci_attach: could not add pci bus 1.\n");
  158 
  159         if (bootverbose)
  160                 device_printf(dev, "attached.\n");
  161 
  162         return (bus_generic_attach(dev));
  163 }
  164 
  165 static struct resource *
  166 zbpci_alloc_resource(device_t bus, device_t child, int type, int *rid,
  167                      rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
  168 {
  169         struct resource *res;
  170 
  171         /*
  172          * Handle PCI I/O port resources here and pass everything else to nexus.
  173          */
  174         if (type != SYS_RES_IOPORT) {
  175                 res = bus_generic_alloc_resource(bus, child, type, rid,
  176                                                  start, end, count, flags);
  177                 return (res);
  178         }
  179 
  180         res = rman_reserve_resource(&port_rman, start, end, count,
  181                                     flags, child);
  182         if (res == NULL)
  183                 return (NULL);
  184 
  185         rman_set_rid(res, *rid);
  186 
  187         /* Activate the resource is requested */
  188         if (flags & RF_ACTIVE) {
  189                 if (bus_activate_resource(child, type, *rid, res) != 0) {
  190                         rman_release_resource(res);
  191                         return (NULL);
  192                 }
  193         }
  194 
  195         return (res);
  196 }
  197 
  198 static int
  199 zbpci_activate_resource(device_t bus, device_t child, int type, int rid,
  200                         struct resource *res)
  201 {
  202         int error;
  203         void *vaddr;
  204         u_long orig_paddr, paddr, psize;
  205 
  206         paddr = rman_get_start(res);
  207         psize = rman_get_size(res);
  208         orig_paddr = paddr;
  209 
  210 #if _BYTE_ORDER == _BIG_ENDIAN
  211         /*
  212          * The CFE allocates PCI memory resources that map to the
  213          * "match byte lanes" address space. This address space works
  214          * best for DMA transfers because it does not do any automatic
  215          * byte swaps when data crosses the pci-cpu interface.
  216          *
  217          * This also makes it sub-optimal for accesses to PCI device
  218          * registers because it exposes the little-endian nature of
  219          * the PCI bus to the big-endian CPU. The Sibyte has another
  220          * address window called the "match bit lanes" window which
  221          * automatically swaps bytes when data crosses the pci-cpu
  222          * interface.
  223          *
  224          * We "assume" that any bus_space memory accesses done by the
  225          * CPU to a PCI device are register/configuration accesses and
  226          * are done through the "match bit lanes" window. Any DMA
  227          * transfers will continue to be through the "match byte lanes"
  228          * window because the PCI BAR registers will not be changed.
  229          */
  230         if (type == SYS_RES_MEMORY) {
  231                 if (paddr >= PCI_MATCH_BYTE_LANES_START &&
  232                     paddr + psize - 1 <= PCI_MATCH_BYTE_LANES_END) {
  233                         paddr |= PCI_MATCH_BIT_LANES_MASK;
  234                         rman_set_start(res, paddr);
  235                         rman_set_end(res, paddr + psize - 1);
  236                 }
  237         }
  238 #endif
  239 
  240         if (type != SYS_RES_IOPORT) {
  241                 error = bus_generic_activate_resource(bus, child, type,
  242                                                       rid, res);
  243 #if _BYTE_ORDER == _BIG_ENDIAN
  244                 if (type == SYS_RES_MEMORY) {
  245                         rman_set_start(res, orig_paddr);
  246                         rman_set_end(res, orig_paddr + psize - 1);
  247                 }
  248 #endif
  249                 return (error);
  250         }
  251 
  252         /*
  253          * Map the I/O space resource through the memory window starting
  254          * at PCI_IOSPACE_ADDR.
  255          */
  256         vaddr = pmap_mapdev(paddr + PCI_IOSPACE_ADDR, psize);
  257 
  258         rman_set_virtual(res, vaddr);
  259         rman_set_bustag(res, mips_bus_space_generic);
  260         rman_set_bushandle(res, (bus_space_handle_t)vaddr);
  261 
  262         return (rman_activate_resource(res));
  263 }
  264 
  265 static int
  266 zbpci_release_resource(device_t bus, device_t child, int type, int rid,
  267                        struct resource *r)
  268 {
  269         int error;
  270 
  271         if (type != SYS_RES_IOPORT)
  272                 return (bus_generic_release_resource(bus, child, type, rid, r));
  273 
  274         if (rman_get_flags(r) & RF_ACTIVE) {
  275                 error = bus_deactivate_resource(child, type, rid, r);
  276                 if (error)
  277                         return (error);
  278         }
  279 
  280         return (rman_release_resource(r));
  281 }
  282 
  283 static int
  284 zbpci_deactivate_resource(device_t bus, device_t child, int type, int rid,
  285                           struct resource *r)
  286 {
  287         vm_offset_t va;
  288 
  289         if (type != SYS_RES_IOPORT) {
  290                 return (bus_generic_deactivate_resource(bus, child, type,
  291                                                         rid, r));
  292         }
  293         
  294         va = (vm_offset_t)rman_get_virtual(r);
  295         pmap_unmapdev(va, rman_get_size(r));
  296 
  297         return (rman_deactivate_resource(r));
  298 }
  299 
  300 static int
  301 zbpci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
  302 {
  303         
  304         switch (which) {
  305         case PCIB_IVAR_DOMAIN:
  306                 *result = 0;                            /* single PCI domain */
  307                 return (0);
  308         case PCIB_IVAR_BUS:
  309                 *result = device_get_unit(child);       /* PCI bus 0 or 1 */
  310                 return (0);
  311         default:
  312                 return (ENOENT);
  313         }
  314 }
  315 
  316 /*
  317  * We rely on the CFE to have configured the intline correctly to point to
  318  * one of PCI-A/PCI-B/PCI-C/PCI-D in the interupt mapper.
  319  */
  320 static int
  321 zbpci_route_interrupt(device_t pcib, device_t dev, int pin)
  322 {
  323 
  324         return (PCI_INVALID_IRQ);
  325 }
  326 
  327 /*
  328  * This function is expected to be called in a critical section since it
  329  * changes the per-cpu pci config space va-to-pa mappings.
  330  */
  331 static vm_offset_t
  332 zbpci_config_space_va(int bus, int slot, int func, int reg, int bytes)
  333 {
  334         int cpu;
  335         vm_offset_t va_page;
  336         vm_paddr_t pa, pa_page;
  337 
  338         if (bus <= PCI_BUSMAX && slot <= PCI_SLOTMAX && func <= PCI_FUNCMAX &&
  339             reg <= PCI_REGMAX && (bytes == 1 || bytes == 2 || bytes == 4) &&
  340             ((reg & (bytes - 1)) == 0)) {
  341                 cpu = PCPU_GET(cpuid);
  342                 va_page = zbpci_config_space[cpu].vaddr;
  343                 pa = CFG_PADDR_BASE |
  344                      (bus << 16) | (slot << 11) | (func << 8) | reg;
  345 #if _BYTE_ORDER == _BIG_ENDIAN
  346                 pa = pa ^ (4 - bytes);
  347 #endif
  348                 pa_page = rounddown2(pa, PAGE_SIZE);
  349                 if (zbpci_config_space[cpu].paddr != pa_page) {
  350                         pmap_kremove(va_page);
  351                         pmap_kenter_attr(va_page, pa_page, PTE_C_UNCACHED);
  352                         zbpci_config_space[cpu].paddr = pa_page;
  353                 }
  354                 return (va_page + (pa - pa_page));
  355         } else {
  356                 return (0);
  357         }
  358 }
  359 
  360 static uint32_t
  361 zbpci_read_config(device_t dev, u_int b, u_int s, u_int f, u_int r, int w)
  362 {
  363         uint32_t data;
  364         vm_offset_t va;
  365 
  366         critical_enter();
  367 
  368         va = zbpci_config_space_va(b, s, f, r, w);
  369         if (va == 0) {
  370                 panic("zbpci_read_config: invalid %d/%d/%d[%d] %d\n",
  371                       b, s, f, r, w);
  372         }
  373 
  374         switch (w) {
  375         case 4:
  376                 data = *(uint32_t *)va;
  377                 break;
  378         case 2:
  379                 data = *(uint16_t *)va;
  380                 break;
  381         case 1:
  382                 data = *(uint8_t *)va;
  383                 break;
  384         default:
  385                 panic("zbpci_read_config: invalid width %d\n", w);
  386         }
  387 
  388         critical_exit();
  389 
  390         return (data);
  391 }
  392 
  393 static void
  394 zbpci_write_config(device_t d, u_int b, u_int s, u_int f, u_int r,
  395                    uint32_t data, int w)
  396 {
  397         vm_offset_t va;
  398 
  399         critical_enter();
  400 
  401         va = zbpci_config_space_va(b, s, f, r, w);
  402         if (va == 0) {
  403                 panic("zbpci_write_config: invalid %d/%d/%d[%d] %d/%d\n",
  404                       b, s, f, r, data, w);
  405         }
  406 
  407         switch (w) {
  408         case 4:
  409                 *(uint32_t *)va = data;
  410                 break;
  411         case 2:
  412                 *(uint16_t *)va = data;
  413                 break;
  414         case 1:
  415                 *(uint8_t *)va = data;
  416                 break;
  417         default:
  418                 panic("zbpci_write_config: invalid width %d\n", w);
  419         }
  420 
  421         critical_exit();
  422 }
  423 
  424 static device_method_t zbpci_methods[] ={
  425         /* Device interface */
  426         DEVMETHOD(device_probe,         zbpci_probe),
  427         DEVMETHOD(device_attach,        zbpci_attach),
  428         DEVMETHOD(device_detach,        bus_generic_detach),
  429         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  430         DEVMETHOD(device_suspend,       bus_generic_suspend),
  431         DEVMETHOD(device_resume,        bus_generic_resume),
  432 
  433         /* Bus interface */
  434         DEVMETHOD(bus_read_ivar,        zbpci_read_ivar),
  435         DEVMETHOD(bus_write_ivar,       bus_generic_write_ivar),
  436         DEVMETHOD(bus_alloc_resource,   zbpci_alloc_resource),
  437         DEVMETHOD(bus_activate_resource, zbpci_activate_resource),
  438         DEVMETHOD(bus_deactivate_resource, zbpci_deactivate_resource),
  439         DEVMETHOD(bus_release_resource, zbpci_release_resource),
  440         DEVMETHOD(bus_setup_intr,       bus_generic_setup_intr),
  441         DEVMETHOD(bus_teardown_intr,    bus_generic_teardown_intr),
  442         DEVMETHOD(bus_add_child,        bus_generic_add_child),
  443 
  444         /* pcib interface */
  445         DEVMETHOD(pcib_maxslots,        pcib_maxslots),
  446         DEVMETHOD(pcib_read_config,     zbpci_read_config),
  447         DEVMETHOD(pcib_write_config,    zbpci_write_config),
  448         DEVMETHOD(pcib_route_interrupt, zbpci_route_interrupt),
  449         
  450         { 0, 0 }
  451 };
  452 
  453 /*
  454  * The "zbpci" class inherits from the "pcib" base class. Therefore in
  455  * addition to drivers that belong to the "zbpci" class we will also
  456  * consider drivers belonging to the "pcib" when probing children of
  457  * "zbpci".
  458  */
  459 DEFINE_CLASS_1(zbpci, zbpci_driver, zbpci_methods, 0, pcib_driver);
  460 
  461 static devclass_t zbpci_devclass;
  462 
  463 DRIVER_MODULE(zbpci, zbbus, zbpci_driver, zbpci_devclass, 0, 0);
  464 
  465 /*
  466  * Big endian bus space routines
  467  */
  468 #if _BYTE_ORDER == _BIG_ENDIAN
  469 
  470 /*
  471  * The CPU correctly deals with the big-endian to little-endian swap if
  472  * we are accessing 4 bytes at a time. However if we want to read 1 or 2
  473  * bytes then we need to fudge the address generated by the CPU such that
  474  * it generates the right byte enables on the PCI bus.
  475  */
  476 static bus_addr_t
  477 sb_match_bit_lane_addr(bus_addr_t addr, int bytes)
  478 {
  479         vm_offset_t pa;
  480 
  481         pa = vtophys(addr);
  482         
  483         if (pa >= PCI_MATCH_BIT_LANES_START && pa <= PCI_MATCH_BIT_LANES_END)
  484                 return (addr ^ (4 - bytes));
  485         else
  486                 return (addr);
  487 }
  488 
  489 uint8_t
  490 sb_big_endian_read8(bus_addr_t addr)
  491 {
  492         bus_addr_t addr2;
  493 
  494         addr2 = sb_match_bit_lane_addr(addr, 1);
  495         return (readb(addr2));
  496 }
  497 
  498 uint16_t
  499 sb_big_endian_read16(bus_addr_t addr)
  500 {
  501         bus_addr_t addr2;
  502 
  503         addr2 = sb_match_bit_lane_addr(addr, 2);
  504         return (readw(addr2));
  505 }
  506 
  507 uint32_t
  508 sb_big_endian_read32(bus_addr_t addr)
  509 {
  510         bus_addr_t addr2;
  511 
  512         addr2 = sb_match_bit_lane_addr(addr, 4);
  513         return (readl(addr2));
  514 }
  515 
  516 void
  517 sb_big_endian_write8(bus_addr_t addr, uint8_t val)
  518 {
  519         bus_addr_t addr2;
  520 
  521         addr2 = sb_match_bit_lane_addr(addr, 1);
  522         writeb(addr2, val);
  523 }
  524 
  525 void
  526 sb_big_endian_write16(bus_addr_t addr, uint16_t val)
  527 {
  528         bus_addr_t addr2;
  529 
  530         addr2 = sb_match_bit_lane_addr(addr, 2);
  531         writew(addr2, val);
  532 }
  533 
  534 void
  535 sb_big_endian_write32(bus_addr_t addr, uint32_t val)
  536 {
  537         bus_addr_t addr2;
  538 
  539         addr2 = sb_match_bit_lane_addr(addr, 4);
  540         writel(addr2, val);
  541 }
  542 #endif  /* _BIG_ENDIAN */

Cache object: 532c569da4c663b5be13f0aff2697dc1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.