The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/sibyte/sb_zbpci.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Neelkanth Natu
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/param.h>
   28 #include <sys/types.h>
   29 #include <sys/kernel.h>
   30 #include <sys/systm.h>
   31 #include <sys/module.h>
   32 #include <sys/bus.h>
   33 #include <sys/rman.h>
   34 #include <sys/pcpu.h>
   35 #include <sys/smp.h>
   36 
   37 #include <vm/vm.h>
   38 #include <vm/vm_param.h>
   39 #include <vm/vm_kern.h>
   40 #include <vm/vm_extern.h>
   41 #include <vm/pmap.h>
   42 
   43 #include <dev/pci/pcireg.h>
   44 #include <dev/pci/pcivar.h>
   45 #include <dev/pci/pcib_private.h>
   46 
   47 #include <machine/pmap.h>
   48 #include <machine/resource.h>
   49 #include <machine/bus.h>
   50 
   51 #include "pcib_if.h"
   52 
   53 #include "sb_bus_space.h"
   54 #include "sb_scd.h"
   55 
   56 __FBSDID("$FreeBSD$");
   57 
   58 static struct {
   59         vm_offset_t vaddr;
   60         vm_paddr_t  paddr;
   61 } zbpci_config_space[MAXCPU];
   62 
   63 static const vm_paddr_t CFG_PADDR_BASE = 0xFE000000;
   64 static const u_long PCI_IOSPACE_ADDR = 0xFC000000;
   65 static const u_long PCI_IOSPACE_SIZE = 0x02000000;
   66 
   67 #define PCI_MATCH_BYTE_LANES_START      0x40000000
   68 #define PCI_MATCH_BYTE_LANES_END        0x5FFFFFFF
   69 #define PCI_MATCH_BYTE_LANES_SIZE       0x20000000
   70 
   71 #define PCI_MATCH_BIT_LANES_MASK        (1 << 29)
   72 #define PCI_MATCH_BIT_LANES_START       0x60000000
   73 #define PCI_MATCH_BIT_LANES_END         0x7FFFFFFF
   74 #define PCI_MATCH_BIT_LANES_SIZE        0x20000000
   75 
   76 static struct rman port_rman;
   77 
   78 static int
   79 zbpci_probe(device_t dev)
   80 {
   81         
   82         device_set_desc(dev, "Broadcom/Sibyte PCI I/O Bridge");
   83         return (0);
   84 }
   85 
   86 static int
   87 zbpci_attach(device_t dev)
   88 {
   89         int n, rid, size;
   90         vm_offset_t va;
   91         struct resource *res;
   92         
   93         /*
   94          * Reserve the physical memory window used to map PCI I/O space.
   95          */
   96         rid = 0;
   97         res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
   98                                  PCI_IOSPACE_ADDR,
   99                                  PCI_IOSPACE_ADDR + PCI_IOSPACE_SIZE - 1,
  100                                  PCI_IOSPACE_SIZE, 0);
  101         if (res == NULL)
  102                 panic("Cannot allocate resource for PCI I/O space mapping.");
  103 
  104         port_rman.rm_start = 0;
  105         port_rman.rm_end = PCI_IOSPACE_SIZE - 1;
  106         port_rman.rm_type = RMAN_ARRAY;
  107         port_rman.rm_descr = "PCI I/O ports";
  108         if (rman_init(&port_rman) != 0 ||
  109             rman_manage_region(&port_rman, 0, PCI_IOSPACE_SIZE - 1) != 0)
  110                 panic("%s: port_rman", __func__);
  111 
  112         /*
  113          * Reserve the physical memory that is used to read/write to the
  114          * pci config space but don't activate it. We are using a page worth
  115          * of KVA as a window over this region.
  116          */
  117         rid = 1;
  118         size = (PCI_BUSMAX + 1) * (PCI_SLOTMAX + 1) * (PCI_FUNCMAX + 1) * 256;
  119         res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, CFG_PADDR_BASE,
  120                                  CFG_PADDR_BASE + size - 1, size, 0);
  121         if (res == NULL)
  122                 panic("Cannot allocate resource for config space accesses.");
  123 
  124         /*
  125          * Allocate the entire "match bit lanes" address space.
  126          */
  127 #if _BYTE_ORDER == _BIG_ENDIAN
  128         rid = 2;
  129         res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 
  130                                  PCI_MATCH_BIT_LANES_START,
  131                                  PCI_MATCH_BIT_LANES_END,
  132                                  PCI_MATCH_BIT_LANES_SIZE, 0);
  133         if (res == NULL)
  134                 panic("Cannot allocate resource for pci match bit lanes.");
  135 #endif  /* _BYTE_ORDER ==_BIG_ENDIAN */
  136 
  137         /*
  138          * Allocate KVA for accessing PCI config space.
  139          */
  140         va = kmem_alloc_nofault(kernel_map, PAGE_SIZE * mp_ncpus);
  141         if (va == 0) {
  142                 device_printf(dev, "Cannot allocate virtual addresses for "
  143                                    "config space access.\n");
  144                 return (ENOMEM);
  145         }
  146 
  147         for (n = 0; n < mp_ncpus; ++n)
  148                 zbpci_config_space[n].vaddr = va + n * PAGE_SIZE;
  149 
  150         /*
  151          * Sibyte has the PCI bus hierarchy rooted at bus 0 and HT-PCI
  152          * hierarchy rooted at bus 1.
  153          */
  154         if (device_add_child(dev, "pci", 0) == NULL)
  155                 panic("zbpci_attach: could not add pci bus 0.\n");
  156 
  157         if (device_add_child(dev, "pci", 1) == NULL)
  158                 panic("zbpci_attach: could not add pci bus 1.\n");
  159 
  160         if (bootverbose)
  161                 device_printf(dev, "attached.\n");
  162 
  163         return (bus_generic_attach(dev));
  164 }
  165 
  166 static struct resource *
  167 zbpci_alloc_resource(device_t bus, device_t child, int type, int *rid,
  168                      u_long start, u_long end, u_long count, u_int flags)
  169 {
  170         struct resource *res;
  171 
  172         /*
  173          * Handle PCI I/O port resources here and pass everything else to nexus.
  174          */
  175         if (type != SYS_RES_IOPORT) {
  176                 res = bus_generic_alloc_resource(bus, child, type, rid,
  177                                                  start, end, count, flags);
  178                 return (res);
  179         }
  180 
  181         res = rman_reserve_resource(&port_rman, start, end, count,
  182                                     flags, child);
  183         if (res == NULL)
  184                 return (NULL);
  185 
  186         rman_set_rid(res, *rid);
  187 
  188         /* Activate the resource is requested */
  189         if (flags & RF_ACTIVE) {
  190                 if (bus_activate_resource(child, type, *rid, res) != 0) {
  191                         rman_release_resource(res);
  192                         return (NULL);
  193                 }
  194         }
  195 
  196         return (res);
  197 }
  198 
  199 static int
  200 zbpci_activate_resource(device_t bus, device_t child, int type, int rid,
  201                         struct resource *res)
  202 {
  203         int error;
  204         void *vaddr;
  205         u_long orig_paddr, paddr, psize;
  206 
  207         paddr = rman_get_start(res);
  208         psize = rman_get_size(res);
  209         orig_paddr = paddr;
  210 
  211 #if _BYTE_ORDER == _BIG_ENDIAN
  212         /*
  213          * The CFE allocates PCI memory resources that map to the
  214          * "match byte lanes" address space. This address space works
  215          * best for DMA transfers because it does not do any automatic
  216          * byte swaps when data crosses the pci-cpu interface.
  217          *
  218          * This also makes it sub-optimal for accesses to PCI device
  219          * registers because it exposes the little-endian nature of
  220          * the PCI bus to the big-endian CPU. The Sibyte has another
  221          * address window called the "match bit lanes" window which
  222          * automatically swaps bytes when data crosses the pci-cpu
  223          * interface.
  224          *
  225          * We "assume" that any bus_space memory accesses done by the
  226          * CPU to a PCI device are register/configuration accesses and
  227          * are done through the "match bit lanes" window. Any DMA
  228          * transfers will continue to be through the "match byte lanes"
  229          * window because the PCI BAR registers will not be changed.
  230          */
  231         if (type == SYS_RES_MEMORY) {
  232                 if (paddr >= PCI_MATCH_BYTE_LANES_START &&
  233                     paddr + psize - 1 <= PCI_MATCH_BYTE_LANES_END) {
  234                         paddr |= PCI_MATCH_BIT_LANES_MASK;
  235                         rman_set_start(res, paddr);
  236                         rman_set_end(res, paddr + psize - 1);
  237                 }
  238         }
  239 #endif
  240 
  241         if (type != SYS_RES_IOPORT) {
  242                 error = bus_generic_activate_resource(bus, child, type,
  243                                                       rid, res);
  244 #if _BYTE_ORDER == _BIG_ENDIAN
  245                 if (type == SYS_RES_MEMORY) {
  246                         rman_set_start(res, orig_paddr);
  247                         rman_set_end(res, orig_paddr + psize - 1);
  248                 }
  249 #endif
  250                 return (error);
  251         }
  252 
  253         /*
  254          * Map the I/O space resource through the memory window starting
  255          * at PCI_IOSPACE_ADDR.
  256          */
  257         vaddr = pmap_mapdev(paddr + PCI_IOSPACE_ADDR, psize);
  258 
  259         rman_set_virtual(res, vaddr);
  260         rman_set_bustag(res, mips_bus_space_generic);
  261         rman_set_bushandle(res, (bus_space_handle_t)vaddr);
  262 
  263         return (rman_activate_resource(res));
  264 }
  265 
  266 static int
  267 zbpci_release_resource(device_t bus, device_t child, int type, int rid,
  268                        struct resource *r)
  269 {
  270         int error;
  271 
  272         if (type != SYS_RES_IOPORT)
  273                 return (bus_generic_release_resource(bus, child, type, rid, r));
  274 
  275         if (rman_get_flags(r) & RF_ACTIVE) {
  276                 error = bus_deactivate_resource(child, type, rid, r);
  277                 if (error)
  278                         return (error);
  279         }
  280 
  281         return (rman_release_resource(r));
  282 }
  283 
  284 static int
  285 zbpci_deactivate_resource(device_t bus, device_t child, int type, int rid,
  286                           struct resource *r)
  287 {
  288         vm_offset_t va;
  289 
  290         if (type != SYS_RES_IOPORT) {
  291                 return (bus_generic_deactivate_resource(bus, child, type,
  292                                                         rid, r));
  293         }
  294         
  295         va = (vm_offset_t)rman_get_virtual(r);
  296         pmap_unmapdev(va, rman_get_size(r));
  297 
  298         return (rman_deactivate_resource(r));
  299 }
  300 
  301 static int
  302 zbpci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
  303 {
  304         
  305         switch (which) {
  306         case PCIB_IVAR_DOMAIN:
  307                 *result = 0;                            /* single PCI domain */
  308                 return (0);
  309         case PCIB_IVAR_BUS:
  310                 *result = device_get_unit(child);       /* PCI bus 0 or 1 */
  311                 return (0);
  312         default:
  313                 return (ENOENT);
  314         }
  315 }
  316 
  317 /*
  318  * We rely on the CFE to have configured the intline correctly to point to
  319  * one of PCI-A/PCI-B/PCI-C/PCI-D in the interupt mapper.
  320  */
  321 static int
  322 zbpci_route_interrupt(device_t pcib, device_t dev, int pin)
  323 {
  324 
  325         return (PCI_INVALID_IRQ);
  326 }
  327 
  328 /*
  329  * This function is expected to be called in a critical section since it
  330  * changes the per-cpu pci config space va-to-pa mappings.
  331  */
  332 static vm_offset_t
  333 zbpci_config_space_va(int bus, int slot, int func, int reg, int bytes)
  334 {
  335         int cpu;
  336         vm_offset_t va_page;
  337         vm_paddr_t pa, pa_page;
  338 
  339         if (bus <= PCI_BUSMAX && slot <= PCI_SLOTMAX && func <= PCI_FUNCMAX &&
  340             reg <= PCI_REGMAX && (bytes == 1 || bytes == 2 || bytes == 4) &&
  341             ((reg & (bytes - 1)) == 0)) {
  342                 cpu = PCPU_GET(cpuid);
  343                 va_page = zbpci_config_space[cpu].vaddr;
  344                 pa = CFG_PADDR_BASE |
  345                      (bus << 16) | (slot << 11) | (func << 8) | reg;
  346 #if _BYTE_ORDER == _BIG_ENDIAN
  347                 pa = pa ^ (4 - bytes);
  348 #endif
  349                 pa_page = pa & ~(PAGE_SIZE - 1);
  350                 if (zbpci_config_space[cpu].paddr != pa_page) {
  351                         pmap_kremove(va_page);
  352                         pmap_kenter_attr(va_page, pa_page, PTE_C_UNCACHED);
  353                         zbpci_config_space[cpu].paddr = pa_page;
  354                 }
  355                 return (va_page + (pa - pa_page));
  356         } else {
  357                 return (0);
  358         }
  359 }
  360 
  361 static uint32_t
  362 zbpci_read_config(device_t dev, u_int b, u_int s, u_int f, u_int r, int w)
  363 {
  364         uint32_t data;
  365         vm_offset_t va;
  366 
  367         critical_enter();
  368 
  369         va = zbpci_config_space_va(b, s, f, r, w);
  370         if (va == 0) {
  371                 panic("zbpci_read_config: invalid %d/%d/%d[%d] %d\n",
  372                       b, s, f, r, w);
  373         }
  374 
  375         switch (w) {
  376         case 4:
  377                 data = *(uint32_t *)va;
  378                 break;
  379         case 2:
  380                 data = *(uint16_t *)va;
  381                 break;
  382         case 1:
  383                 data = *(uint8_t *)va;
  384                 break;
  385         default:
  386                 panic("zbpci_read_config: invalid width %d\n", w);
  387         }
  388 
  389         critical_exit();
  390 
  391         return (data);
  392 }
  393 
  394 static void
  395 zbpci_write_config(device_t d, u_int b, u_int s, u_int f, u_int r,
  396                    uint32_t data, int w)
  397 {
  398         vm_offset_t va;
  399 
  400         critical_enter();
  401 
  402         va = zbpci_config_space_va(b, s, f, r, w);
  403         if (va == 0) {
  404                 panic("zbpci_write_config: invalid %d/%d/%d[%d] %d/%d\n",
  405                       b, s, f, r, data, w);
  406         }
  407 
  408         switch (w) {
  409         case 4:
  410                 *(uint32_t *)va = data;
  411                 break;
  412         case 2:
  413                 *(uint16_t *)va = data;
  414                 break;
  415         case 1:
  416                 *(uint8_t *)va = data;
  417                 break;
  418         default:
  419                 panic("zbpci_write_config: invalid width %d\n", w);
  420         }
  421 
  422         critical_exit();
  423 }
  424 
  425 static device_method_t zbpci_methods[] ={
  426         /* Device interface */
  427         DEVMETHOD(device_probe,         zbpci_probe),
  428         DEVMETHOD(device_attach,        zbpci_attach),
  429         DEVMETHOD(device_detach,        bus_generic_detach),
  430         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  431         DEVMETHOD(device_suspend,       bus_generic_suspend),
  432         DEVMETHOD(device_resume,        bus_generic_resume),
  433 
  434         /* Bus interface */
  435         DEVMETHOD(bus_read_ivar,        zbpci_read_ivar),
  436         DEVMETHOD(bus_write_ivar,       bus_generic_write_ivar),
  437         DEVMETHOD(bus_alloc_resource,   zbpci_alloc_resource),
  438         DEVMETHOD(bus_activate_resource, zbpci_activate_resource),
  439         DEVMETHOD(bus_deactivate_resource, zbpci_deactivate_resource),
  440         DEVMETHOD(bus_release_resource, zbpci_release_resource),
  441         DEVMETHOD(bus_setup_intr,       bus_generic_setup_intr),
  442         DEVMETHOD(bus_teardown_intr,    bus_generic_teardown_intr),
  443         DEVMETHOD(bus_add_child,        bus_generic_add_child),
  444 
  445         /* pcib interface */
  446         DEVMETHOD(pcib_maxslots,        pcib_maxslots),
  447         DEVMETHOD(pcib_read_config,     zbpci_read_config),
  448         DEVMETHOD(pcib_write_config,    zbpci_write_config),
  449         DEVMETHOD(pcib_route_interrupt, zbpci_route_interrupt),
  450         
  451         { 0, 0 }
  452 };
  453 
  454 /*
  455  * The "zbpci" class inherits from the "pcib" base class. Therefore in
  456  * addition to drivers that belong to the "zbpci" class we will also
  457  * consider drivers belonging to the "pcib" when probing children of
  458  * "zbpci".
  459  */
  460 DEFINE_CLASS_1(zbpci, zbpci_driver, zbpci_methods, 0, pcib_driver);
  461 
  462 static devclass_t zbpci_devclass;
  463 
  464 DRIVER_MODULE(zbpci, zbbus, zbpci_driver, zbpci_devclass, 0, 0);
  465 
  466 /*
  467  * Big endian bus space routines
  468  */
  469 #if _BYTE_ORDER == _BIG_ENDIAN
  470 
  471 /*
  472  * The CPU correctly deals with the big-endian to little-endian swap if
  473  * we are accessing 4 bytes at a time. However if we want to read 1 or 2
  474  * bytes then we need to fudge the address generated by the CPU such that
  475  * it generates the right byte enables on the PCI bus.
  476  */
  477 static bus_addr_t
  478 sb_match_bit_lane_addr(bus_addr_t addr, int bytes)
  479 {
  480         vm_offset_t pa;
  481 
  482         pa = vtophys(addr);
  483         
  484         if (pa >= PCI_MATCH_BIT_LANES_START && pa <= PCI_MATCH_BIT_LANES_END)
  485                 return (addr ^ (4 - bytes));
  486         else
  487                 return (addr);
  488 }
  489 
  490 uint8_t
  491 sb_big_endian_read8(bus_addr_t addr)
  492 {
  493         bus_addr_t addr2;
  494 
  495         addr2 = sb_match_bit_lane_addr(addr, 1);
  496         return (readb(addr2));
  497 }
  498 
  499 uint16_t
  500 sb_big_endian_read16(bus_addr_t addr)
  501 {
  502         bus_addr_t addr2;
  503 
  504         addr2 = sb_match_bit_lane_addr(addr, 2);
  505         return (readw(addr2));
  506 }
  507 
  508 uint32_t
  509 sb_big_endian_read32(bus_addr_t addr)
  510 {
  511         bus_addr_t addr2;
  512 
  513         addr2 = sb_match_bit_lane_addr(addr, 4);
  514         return (readl(addr2));
  515 }
  516 
  517 void
  518 sb_big_endian_write8(bus_addr_t addr, uint8_t val)
  519 {
  520         bus_addr_t addr2;
  521 
  522         addr2 = sb_match_bit_lane_addr(addr, 1);
  523         writeb(addr2, val);
  524 }
  525 
  526 void
  527 sb_big_endian_write16(bus_addr_t addr, uint16_t val)
  528 {
  529         bus_addr_t addr2;
  530 
  531         addr2 = sb_match_bit_lane_addr(addr, 2);
  532         writew(addr2, val);
  533 }
  534 
  535 void
  536 sb_big_endian_write32(bus_addr_t addr, uint32_t val)
  537 {
  538         bus_addr_t addr2;
  539 
  540         addr2 = sb_match_bit_lane_addr(addr, 4);
  541         writel(addr2, val);
  542 }
  543 #endif  /* _BIG_ENDIAN */

Cache object: ce539d468aa2e7b3e0cb903ff34b878d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.