The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/twe/twe_freebsd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2000 Michael Smith
    5  * Copyright (c) 2003 Paul Saab
    6  * Copyright (c) 2003 Vinod Kashyap
    7  * Copyright (c) 2000 BSDi
    8  * All rights reserved.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 /*
   36  * FreeBSD-specific code.
   37  */
   38 
   39 #include <dev/twe/twe_compat.h>
   40 #include <dev/twe/twereg.h>
   41 #include <dev/twe/tweio.h>
   42 #include <dev/twe/twevar.h>
   43 #include <dev/twe/twe_tables.h>
   44 
   45 #include <vm/vm.h>
   46 
   47 #ifdef TWE_DEBUG
   48 static u_int32_t        twed_bio_in;
   49 #define TWED_BIO_IN     twed_bio_in++
   50 static u_int32_t        twed_bio_out;
   51 #define TWED_BIO_OUT    twed_bio_out++
   52 #else
   53 #define TWED_BIO_IN
   54 #define TWED_BIO_OUT
   55 #endif
   56 
   57 static void     twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
   58 static void     twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
   59 
   60 /********************************************************************************
   61  ********************************************************************************
   62                                                          Control device interface
   63  ********************************************************************************
   64  ********************************************************************************/
   65 
   66 static  d_open_t                twe_open;
   67 static  d_close_t               twe_close;
   68 static  d_ioctl_t               twe_ioctl_wrapper;
   69 
   70 static struct cdevsw twe_cdevsw = {
   71         .d_version =    D_VERSION,
   72         .d_open =       twe_open,
   73         .d_close =      twe_close,
   74         .d_ioctl =      twe_ioctl_wrapper,
   75         .d_name =       "twe",
   76 };
   77 
   78 /********************************************************************************
   79  * Accept an open operation on the control device.
   80  */
   81 static int
   82 twe_open(struct cdev *dev, int flags, int fmt, struct thread *td)
   83 {
   84     struct twe_softc            *sc = (struct twe_softc *)dev->si_drv1;
   85 
   86     TWE_IO_LOCK(sc);
   87     if (sc->twe_state & TWE_STATE_DETACHING) {
   88         TWE_IO_UNLOCK(sc);
   89         return (ENXIO);
   90     }
   91     sc->twe_state |= TWE_STATE_OPEN;
   92     TWE_IO_UNLOCK(sc);
   93     return(0);
   94 }
   95 
   96 /********************************************************************************
   97  * Accept the last close on the control device.
   98  */
   99 static int
  100 twe_close(struct cdev *dev, int flags, int fmt, struct thread *td)
  101 {
  102     struct twe_softc            *sc = (struct twe_softc *)dev->si_drv1;
  103 
  104     TWE_IO_LOCK(sc);
  105     sc->twe_state &= ~TWE_STATE_OPEN;
  106     TWE_IO_UNLOCK(sc);
  107     return (0);
  108 }
  109 
  110 /********************************************************************************
  111  * Handle controller-specific control operations.
  112  */
  113 static int
  114 twe_ioctl_wrapper(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td)
  115 {
  116     struct twe_softc            *sc = (struct twe_softc *)dev->si_drv1;
  117     
  118     return(twe_ioctl(sc, cmd, addr));
  119 }
  120 
  121 /********************************************************************************
  122  ********************************************************************************
  123                                                              PCI device interface
  124  ********************************************************************************
  125  ********************************************************************************/
  126 
  127 static int      twe_probe(device_t dev);
  128 static int      twe_attach(device_t dev);
  129 static void     twe_free(struct twe_softc *sc);
  130 static int      twe_detach(device_t dev);
  131 static int      twe_shutdown(device_t dev);
  132 static int      twe_suspend(device_t dev);
  133 static int      twe_resume(device_t dev);
  134 static void     twe_pci_intr(void *arg);
  135 static void     twe_intrhook(void *arg);
  136 
  137 static device_method_t twe_methods[] = {
  138     /* Device interface */
  139     DEVMETHOD(device_probe,     twe_probe),
  140     DEVMETHOD(device_attach,    twe_attach),
  141     DEVMETHOD(device_detach,    twe_detach),
  142     DEVMETHOD(device_shutdown,  twe_shutdown),
  143     DEVMETHOD(device_suspend,   twe_suspend),
  144     DEVMETHOD(device_resume,    twe_resume),
  145 
  146     DEVMETHOD_END
  147 };
  148 
  149 static driver_t twe_pci_driver = {
  150         "twe",
  151         twe_methods,
  152         sizeof(struct twe_softc)
  153 };
  154 
  155 DRIVER_MODULE(twe, pci, twe_pci_driver, 0, 0);
  156 
  157 /********************************************************************************
  158  * Match a 3ware Escalade ATA RAID controller.
  159  */
  160 static int
  161 twe_probe(device_t dev)
  162 {
  163 
  164     debug_called(4);
  165 
  166     if ((pci_get_vendor(dev) == TWE_VENDOR_ID) &&
  167         ((pci_get_device(dev) == TWE_DEVICE_ID) || 
  168          (pci_get_device(dev) == TWE_DEVICE_ID_ASIC))) {
  169         device_set_desc_copy(dev, TWE_DEVICE_NAME ". Driver version " TWE_DRIVER_VERSION_STRING);
  170         return(BUS_PROBE_DEFAULT);
  171     }
  172     return(ENXIO);
  173 }
  174 
  175 /********************************************************************************
  176  * Allocate resources, initialise the controller.
  177  */
  178 static int
  179 twe_attach(device_t dev)
  180 {
  181     struct twe_softc    *sc;
  182     struct sysctl_oid   *sysctl_tree;
  183     int                 rid, error;
  184 
  185     debug_called(4);
  186 
  187     /*
  188      * Initialise the softc structure.
  189      */
  190     sc = device_get_softc(dev);
  191     sc->twe_dev = dev;
  192     mtx_init(&sc->twe_io_lock, "twe I/O", NULL, MTX_DEF);
  193     sx_init(&sc->twe_config_lock, "twe config");
  194 
  195     /*
  196      * XXX: This sysctl tree must stay at hw.tweX rather than using
  197      * the device_get_sysctl_tree() created by new-bus because
  198      * existing 3rd party binary tools such as tw_cli and 3dm2 use the
  199      * existence of this sysctl node to discover controllers.
  200      */
  201     sysctl_tree = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev),
  202         SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
  203         device_get_nameunit(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
  204     if (sysctl_tree == NULL) {
  205         twe_printf(sc, "cannot add sysctl tree node\n");
  206         return (ENXIO);
  207     }
  208     SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(sysctl_tree),
  209         OID_AUTO, "driver_version", CTLFLAG_RD, TWE_DRIVER_VERSION_STRING, 0,
  210         "TWE driver version");
  211 
  212     /*
  213      * Force the busmaster enable bit on, in case the BIOS forgot.
  214      */
  215     pci_enable_busmaster(dev);
  216 
  217     /*
  218      * Allocate the PCI register window.
  219      */
  220     rid = TWE_IO_CONFIG_REG;
  221     if ((sc->twe_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 
  222         RF_ACTIVE)) == NULL) {
  223         twe_printf(sc, "can't allocate register window\n");
  224         twe_free(sc);
  225         return(ENXIO);
  226     }
  227 
  228     /*
  229      * Allocate the parent bus DMA tag appropriate for PCI.
  230      */
  231     if (bus_dma_tag_create(bus_get_dma_tag(dev),                /* PCI parent */
  232                            1, 0,                                /* alignment, boundary */
  233                            BUS_SPACE_MAXADDR_32BIT,             /* lowaddr */
  234                            BUS_SPACE_MAXADDR,                   /* highaddr */
  235                            NULL, NULL,                          /* filter, filterarg */
  236                            BUS_SPACE_MAXSIZE_32BIT,             /* maxsize */
  237                            BUS_SPACE_UNRESTRICTED,              /* nsegments */
  238                            BUS_SPACE_MAXSIZE_32BIT,             /* maxsegsize */
  239                            0,                                   /* flags */
  240                            NULL,                                /* lockfunc */
  241                            NULL,                                /* lockarg */
  242                            &sc->twe_parent_dmat)) {
  243         twe_printf(sc, "can't allocate parent DMA tag\n");
  244         twe_free(sc);
  245         return(ENOMEM);
  246     }
  247 
  248     /* 
  249      * Allocate and connect our interrupt.
  250      */
  251     rid = 0;
  252     if ((sc->twe_irq = bus_alloc_resource_any(sc->twe_dev, SYS_RES_IRQ,
  253         &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
  254         twe_printf(sc, "can't allocate interrupt\n");
  255         twe_free(sc);
  256         return(ENXIO);
  257     }
  258     if (bus_setup_intr(sc->twe_dev, sc->twe_irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE,  
  259                        NULL, twe_pci_intr, sc, &sc->twe_intr)) {
  260         twe_printf(sc, "can't set up interrupt\n");
  261         twe_free(sc);
  262         return(ENXIO);
  263     }
  264 
  265     /*
  266      * Create DMA tag for mapping command's into controller-addressable space.
  267      */
  268     if (bus_dma_tag_create(sc->twe_parent_dmat,         /* parent */
  269                            1, 0,                        /* alignment, boundary */
  270                            BUS_SPACE_MAXADDR_32BIT,     /* lowaddr */
  271                            BUS_SPACE_MAXADDR,           /* highaddr */
  272                            NULL, NULL,                  /* filter, filterarg */
  273                            sizeof(TWE_Command) *
  274                            TWE_Q_LENGTH, 1,             /* maxsize, nsegments */
  275                            BUS_SPACE_MAXSIZE_32BIT,     /* maxsegsize */
  276                            0,                           /* flags */
  277                            NULL,                        /* lockfunc */
  278                            NULL,                        /* lockarg */
  279                            &sc->twe_cmd_dmat)) {
  280         twe_printf(sc, "can't allocate data buffer DMA tag\n");
  281         twe_free(sc);
  282         return(ENOMEM);
  283     }
  284     /*
  285      * Allocate memory and make it available for DMA.
  286      */
  287     if (bus_dmamem_alloc(sc->twe_cmd_dmat, (void **)&sc->twe_cmd,
  288                          BUS_DMA_NOWAIT, &sc->twe_cmdmap)) {
  289         twe_printf(sc, "can't allocate command memory\n");
  290         return(ENOMEM);
  291     }
  292     bus_dmamap_load(sc->twe_cmd_dmat, sc->twe_cmdmap, sc->twe_cmd,
  293                     sizeof(TWE_Command) * TWE_Q_LENGTH,
  294                     twe_setup_request_dmamap, sc, 0);
  295     bzero(sc->twe_cmd, sizeof(TWE_Command) * TWE_Q_LENGTH);
  296 
  297     /*
  298      * Create DMA tag for mapping objects into controller-addressable space.
  299      */
  300     if (bus_dma_tag_create(sc->twe_parent_dmat,         /* parent */
  301                            1, 0,                        /* alignment, boundary */
  302                            BUS_SPACE_MAXADDR_32BIT,     /* lowaddr */
  303                            BUS_SPACE_MAXADDR,           /* highaddr */
  304                            NULL, NULL,                  /* filter, filterarg */
  305                            (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE,/* maxsize */
  306                            TWE_MAX_SGL_LENGTH,          /* nsegments */
  307                            BUS_SPACE_MAXSIZE_32BIT,     /* maxsegsize */
  308                            BUS_DMA_ALLOCNOW,            /* flags */
  309                            busdma_lock_mutex,           /* lockfunc */
  310                            &sc->twe_io_lock,            /* lockarg */
  311                            &sc->twe_buffer_dmat)) {
  312         twe_printf(sc, "can't allocate data buffer DMA tag\n");
  313         twe_free(sc);
  314         return(ENOMEM);
  315     }
  316 
  317     /*
  318      * Create DMA tag for mapping objects into controller-addressable space.
  319      */
  320     if (bus_dma_tag_create(sc->twe_parent_dmat,         /* parent */
  321                            1, 0,                        /* alignment, boundary */
  322                            BUS_SPACE_MAXADDR_32BIT,     /* lowaddr */
  323                            BUS_SPACE_MAXADDR,           /* highaddr */
  324                            NULL, NULL,                  /* filter, filterarg */
  325                            DFLTPHYS, 1,                 /* maxsize, nsegments */
  326                            BUS_SPACE_MAXSIZE_32BIT,     /* maxsegsize */
  327                            0,                           /* flags */
  328                            NULL,                        /* lockfunc */
  329                            NULL,                        /* lockarg */
  330                            &sc->twe_immediate_dmat)) {
  331         twe_printf(sc, "can't allocate data buffer DMA tag\n");
  332         twe_free(sc);
  333         return(ENOMEM);
  334     }
  335     /*
  336      * Allocate memory for requests which cannot sleep or support continuation.
  337      */
  338      if (bus_dmamem_alloc(sc->twe_immediate_dmat, (void **)&sc->twe_immediate,
  339                           BUS_DMA_NOWAIT, &sc->twe_immediate_map)) {
  340         twe_printf(sc, "can't allocate memory for immediate requests\n");
  341         return(ENOMEM);
  342      }
  343 
  344     /*
  345      * Initialise the controller and driver core.
  346      */
  347     if ((error = twe_setup(sc))) {
  348         twe_free(sc);
  349         return(error);
  350     }
  351 
  352     /*
  353      * Print some information about the controller and configuration.
  354      */
  355     twe_describe_controller(sc);
  356 
  357     /*
  358      * Create the control device.
  359      */
  360     sc->twe_dev_t = make_dev(&twe_cdevsw, device_get_unit(sc->twe_dev), UID_ROOT, GID_OPERATOR,
  361                              S_IRUSR | S_IWUSR, "twe%d", device_get_unit(sc->twe_dev));
  362     sc->twe_dev_t->si_drv1 = sc;
  363     /*
  364      * Schedule ourselves to bring the controller up once interrupts are available.
  365      * This isn't strictly necessary, since we disable interrupts while probing the
  366      * controller, but it is more in keeping with common practice for other disk 
  367      * devices.
  368      */
  369     sc->twe_ich.ich_func = twe_intrhook;
  370     sc->twe_ich.ich_arg = sc;
  371     if (config_intrhook_establish(&sc->twe_ich) != 0) {
  372         twe_printf(sc, "can't establish configuration hook\n");
  373         twe_free(sc);
  374         return(ENXIO);
  375     }
  376 
  377     return(0);
  378 }
  379 
  380 /********************************************************************************
  381  * Free all of the resources associated with (sc).
  382  *
  383  * Should not be called if the controller is active.
  384  */
  385 static void
  386 twe_free(struct twe_softc *sc)
  387 {
  388     struct twe_request  *tr;
  389 
  390     debug_called(4);
  391 
  392     /* throw away any command buffers */
  393     while ((tr = twe_dequeue_free(sc)) != NULL)
  394         twe_free_request(tr);
  395 
  396     if (sc->twe_cmd != NULL) {
  397         bus_dmamap_unload(sc->twe_cmd_dmat, sc->twe_cmdmap);
  398         bus_dmamem_free(sc->twe_cmd_dmat, sc->twe_cmd, sc->twe_cmdmap);
  399     }
  400 
  401     if (sc->twe_immediate != NULL) {
  402         bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
  403         bus_dmamem_free(sc->twe_immediate_dmat, sc->twe_immediate,
  404                         sc->twe_immediate_map);
  405     }
  406 
  407     if (sc->twe_immediate_dmat)
  408         bus_dma_tag_destroy(sc->twe_immediate_dmat);
  409 
  410     /* destroy the data-transfer DMA tag */
  411     if (sc->twe_buffer_dmat)
  412         bus_dma_tag_destroy(sc->twe_buffer_dmat);
  413 
  414     /* disconnect the interrupt handler */
  415     if (sc->twe_intr)
  416         bus_teardown_intr(sc->twe_dev, sc->twe_irq, sc->twe_intr);
  417     if (sc->twe_irq != NULL)
  418         bus_release_resource(sc->twe_dev, SYS_RES_IRQ, 0, sc->twe_irq);
  419 
  420     /* destroy the parent DMA tag */
  421     if (sc->twe_parent_dmat)
  422         bus_dma_tag_destroy(sc->twe_parent_dmat);
  423 
  424     /* release the register window mapping */
  425     if (sc->twe_io != NULL)
  426         bus_release_resource(sc->twe_dev, SYS_RES_IOPORT, TWE_IO_CONFIG_REG, sc->twe_io);
  427 
  428     /* destroy control device */
  429     if (sc->twe_dev_t != (struct cdev *)NULL)
  430         destroy_dev(sc->twe_dev_t);
  431 
  432     sx_destroy(&sc->twe_config_lock);
  433     mtx_destroy(&sc->twe_io_lock);
  434 }
  435 
  436 /********************************************************************************
  437  * Disconnect from the controller completely, in preparation for unload.
  438  */
  439 static int
  440 twe_detach(device_t dev)
  441 {
  442     struct twe_softc    *sc = device_get_softc(dev);
  443 
  444     debug_called(4);
  445 
  446     TWE_IO_LOCK(sc);
  447     if (sc->twe_state & TWE_STATE_OPEN) {
  448         TWE_IO_UNLOCK(sc);
  449         return (EBUSY);
  450     }
  451     sc->twe_state |= TWE_STATE_DETACHING;
  452     TWE_IO_UNLOCK(sc);
  453 
  454     /*  
  455      * Shut the controller down.
  456      */
  457     if (twe_shutdown(dev)) {
  458         TWE_IO_LOCK(sc);
  459         sc->twe_state &= ~TWE_STATE_DETACHING;
  460         TWE_IO_UNLOCK(sc);
  461         return (EBUSY);
  462     }
  463 
  464     twe_free(sc);
  465 
  466     return(0);
  467 }
  468 
  469 /********************************************************************************
  470  * Bring the controller down to a dormant state and detach all child devices.
  471  *
  472  * Note that we can assume that the bioq on the controller is empty, as we won't
  473  * allow shutdown if any device is open.
  474  */
  475 static int
  476 twe_shutdown(device_t dev)
  477 {
  478     struct twe_softc    *sc = device_get_softc(dev);
  479     int                 i, error = 0;
  480 
  481     debug_called(4);
  482 
  483     /* 
  484      * Delete all our child devices.
  485      */
  486     TWE_CONFIG_LOCK(sc);
  487     for (i = 0; i < TWE_MAX_UNITS; i++) {
  488         if (sc->twe_drive[i].td_disk != 0) {
  489             if ((error = twe_detach_drive(sc, i)) != 0) {
  490                 TWE_CONFIG_UNLOCK(sc);
  491                 return (error);
  492             }
  493         }
  494     }
  495     TWE_CONFIG_UNLOCK(sc);
  496 
  497     /*
  498      * Bring the controller down.
  499      */
  500     TWE_IO_LOCK(sc);
  501     twe_deinit(sc);
  502     TWE_IO_UNLOCK(sc);
  503 
  504     return(0);
  505 }
  506 
  507 /********************************************************************************
  508  * Bring the controller to a quiescent state, ready for system suspend.
  509  */
  510 static int
  511 twe_suspend(device_t dev)
  512 {
  513     struct twe_softc    *sc = device_get_softc(dev);
  514 
  515     debug_called(4);
  516 
  517     TWE_IO_LOCK(sc);
  518     sc->twe_state |= TWE_STATE_SUSPEND;
  519     
  520     twe_disable_interrupts(sc);
  521     TWE_IO_UNLOCK(sc);
  522 
  523     return(0);
  524 }
  525 
  526 /********************************************************************************
  527  * Bring the controller back to a state ready for operation.
  528  */
  529 static int
  530 twe_resume(device_t dev)
  531 {
  532     struct twe_softc    *sc = device_get_softc(dev);
  533 
  534     debug_called(4);
  535 
  536     TWE_IO_LOCK(sc);
  537     sc->twe_state &= ~TWE_STATE_SUSPEND;
  538     twe_enable_interrupts(sc);
  539     TWE_IO_UNLOCK(sc);
  540 
  541     return(0);
  542 }
  543 
  544 /*******************************************************************************
  545  * Take an interrupt, or be poked by other code to look for interrupt-worthy
  546  * status.
  547  */
  548 static void
  549 twe_pci_intr(void *arg)
  550 {
  551     struct twe_softc *sc = arg;
  552 
  553     TWE_IO_LOCK(sc);
  554     twe_intr(sc);
  555     TWE_IO_UNLOCK(sc);
  556 }
  557 
  558 /********************************************************************************
  559  * Delayed-startup hook
  560  */
  561 static void
  562 twe_intrhook(void *arg)
  563 {
  564     struct twe_softc            *sc = (struct twe_softc *)arg;
  565 
  566     /* pull ourselves off the intrhook chain */
  567     config_intrhook_disestablish(&sc->twe_ich);
  568 
  569     /* call core startup routine */
  570     twe_init(sc);
  571 }
  572 
  573 /********************************************************************************
  574  * Given a detected drive, attach it to the bio interface.
  575  *
  576  * This is called from twe_add_unit.
  577  */
  578 int
  579 twe_attach_drive(struct twe_softc *sc, struct twe_drive *dr)
  580 {
  581     char        buf[80];
  582     int         error;
  583 
  584     bus_topo_lock();
  585     dr->td_disk =  device_add_child(sc->twe_dev, NULL, -1);
  586     if (dr->td_disk == NULL) {
  587             bus_topo_unlock();
  588         twe_printf(sc, "Cannot add unit\n");
  589         return (EIO);
  590     }
  591     device_set_ivars(dr->td_disk, dr);
  592 
  593     /* 
  594      * XXX It would make sense to test the online/initialising bits, but they seem to be
  595      * always set...
  596      */
  597     sprintf(buf, "Unit %d, %s, %s",
  598             dr->td_twe_unit,
  599             twe_describe_code(twe_table_unittype, dr->td_type),
  600             twe_describe_code(twe_table_unitstate, dr->td_state & TWE_PARAM_UNITSTATUS_MASK));
  601     device_set_desc_copy(dr->td_disk, buf);
  602 
  603     error = device_probe_and_attach(dr->td_disk);
  604     bus_topo_unlock();
  605     if (error != 0) {
  606         twe_printf(sc, "Cannot attach unit to controller. error = %d\n", error);
  607         return (EIO);
  608     }
  609     return (0);
  610 }
  611 
  612 /********************************************************************************
  613  * Detach the specified unit if it exsists
  614  *
  615  * This is called from twe_del_unit.
  616  */
  617 int
  618 twe_detach_drive(struct twe_softc *sc, int unit)
  619 {
  620     int error = 0;
  621 
  622     TWE_CONFIG_ASSERT_LOCKED(sc);
  623     bus_topo_lock();
  624     error = device_delete_child(sc->twe_dev, sc->twe_drive[unit].td_disk);
  625     bus_topo_unlock();
  626     if (error != 0) {
  627         twe_printf(sc, "failed to delete unit %d\n", unit);
  628         return(error);
  629     }
  630     bzero(&sc->twe_drive[unit], sizeof(sc->twe_drive[unit]));
  631     return(error);
  632 }
  633 
  634 /********************************************************************************
  635  * Clear a PCI parity error.
  636  */
  637 void
  638 twe_clear_pci_parity_error(struct twe_softc *sc)
  639 {
  640     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PARITY_ERROR);
  641     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2);
  642 }
  643 
  644 /********************************************************************************
  645  * Clear a PCI abort.
  646  */
  647 void
  648 twe_clear_pci_abort(struct twe_softc *sc)
  649 {
  650     TWE_CONTROL(sc, TWE_CONTROL_CLEAR_PCI_ABORT);
  651     pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2);
  652 }
  653 
  654 /********************************************************************************
  655  ********************************************************************************
  656                                                                       Disk device
  657  ********************************************************************************
  658  ********************************************************************************/
  659 
  660 /*
  661  * Disk device softc
  662  */
  663 struct twed_softc
  664 {
  665     device_t            twed_dev;
  666     struct twe_softc    *twed_controller;       /* parent device softc */
  667     struct twe_drive    *twed_drive;            /* drive data in parent softc */
  668     struct disk         *twed_disk;             /* generic disk handle */
  669 };
  670 
  671 /*
  672  * Disk device bus interface
  673  */
  674 static int twed_probe(device_t dev);
  675 static int twed_attach(device_t dev);
  676 static int twed_detach(device_t dev);
  677 
  678 static device_method_t twed_methods[] = {
  679     DEVMETHOD(device_probe,     twed_probe),
  680     DEVMETHOD(device_attach,    twed_attach),
  681     DEVMETHOD(device_detach,    twed_detach),
  682     { 0, 0 }
  683 };
  684 
  685 static driver_t twed_driver = {
  686     "twed",
  687     twed_methods,
  688     sizeof(struct twed_softc)
  689 };
  690 
  691 DRIVER_MODULE(twed, twe, twed_driver, 0, 0);
  692 
  693 /*
  694  * Disk device control interface.
  695  */
  696 
  697 /********************************************************************************
  698  * Handle open from generic layer.
  699  *
  700  * Note that this is typically only called by the diskslice code, and not
  701  * for opens on subdevices (eg. slices, partitions).
  702  */
  703 static int
  704 twed_open(struct disk *dp)
  705 {
  706     struct twed_softc   *sc = (struct twed_softc *)dp->d_drv1;
  707 
  708     debug_called(4);
  709 
  710     if (sc == NULL)
  711         return (ENXIO);
  712 
  713     /* check that the controller is up and running */
  714     if (sc->twed_controller->twe_state & TWE_STATE_SHUTDOWN)
  715         return(ENXIO);
  716 
  717     return (0);
  718 }
  719 
  720 /********************************************************************************
  721  * Handle an I/O request.
  722  */
  723 static void
  724 twed_strategy(struct bio *bp)
  725 {
  726     struct twed_softc   *sc = bp->bio_disk->d_drv1;
  727 
  728     debug_called(4);
  729 
  730     bp->bio_driver1 = &sc->twed_drive->td_twe_unit;
  731     TWED_BIO_IN;
  732 
  733     /* bogus disk? */
  734     if (sc == NULL || sc->twed_drive->td_disk == NULL) {
  735         bp->bio_error = EINVAL;
  736         bp->bio_flags |= BIO_ERROR;
  737         printf("twe: bio for invalid disk!\n");
  738         biodone(bp);
  739         TWED_BIO_OUT;
  740         return;
  741     }
  742 
  743     /* queue the bio on the controller */
  744     TWE_IO_LOCK(sc->twed_controller);
  745     twe_enqueue_bio(sc->twed_controller, bp);
  746 
  747     /* poke the controller to start I/O */
  748     twe_startio(sc->twed_controller);
  749     TWE_IO_UNLOCK(sc->twed_controller);
  750     return;
  751 }
  752 
  753 /********************************************************************************
  754  * System crashdump support
  755  */
  756 static int
  757 twed_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
  758 {
  759     struct twed_softc   *twed_sc;
  760     struct twe_softc    *twe_sc;
  761     int                 error;
  762     struct disk         *dp;
  763 
  764     dp = arg;
  765     twed_sc = (struct twed_softc *)dp->d_drv1;
  766     if (twed_sc == NULL)
  767         return(ENXIO);
  768     twe_sc  = (struct twe_softc *)twed_sc->twed_controller;
  769 
  770     if (length > 0) {
  771         if ((error = twe_dump_blocks(twe_sc, twed_sc->twed_drive->td_twe_unit, offset / TWE_BLOCK_SIZE, virtual, length / TWE_BLOCK_SIZE)) != 0)
  772             return(error);
  773     }
  774     return(0);
  775 }
  776 
  777 /********************************************************************************
  778  * Handle completion of an I/O request.
  779  */
  780 void
  781 twed_intr(struct bio *bp)
  782 {
  783     debug_called(4);
  784 
  785     /* if no error, transfer completed */
  786     if (!(bp->bio_flags & BIO_ERROR))
  787         bp->bio_resid = 0;
  788 
  789     biodone(bp);
  790     TWED_BIO_OUT;
  791 }
  792 
  793 /********************************************************************************
  794  * Default probe stub.
  795  */
  796 static int
  797 twed_probe(device_t dev)
  798 {
  799     return (0);
  800 }
  801 
  802 /********************************************************************************
  803  * Attach a unit to the controller.
  804  */
  805 static int
  806 twed_attach(device_t dev)
  807 {
  808     struct twed_softc   *sc;
  809     device_t            parent;
  810     
  811     debug_called(4);
  812 
  813     /* initialise our softc */
  814     sc = device_get_softc(dev);
  815     parent = device_get_parent(dev);
  816     sc->twed_controller = (struct twe_softc *)device_get_softc(parent);
  817     sc->twed_drive = device_get_ivars(dev);
  818     sc->twed_dev = dev;
  819 
  820     /* report the drive */
  821     twed_printf(sc, "%uMB (%u sectors)\n",
  822                 sc->twed_drive->td_size / ((1024 * 1024) / TWE_BLOCK_SIZE),
  823                 sc->twed_drive->td_size);
  824     
  825     /* attach a generic disk device to ourselves */
  826 
  827     sc->twed_drive->td_sys_unit = device_get_unit(dev);
  828 
  829     sc->twed_disk = disk_alloc();
  830     sc->twed_disk->d_open = twed_open;
  831     sc->twed_disk->d_strategy = twed_strategy;
  832     sc->twed_disk->d_dump = (dumper_t *)twed_dump;
  833     sc->twed_disk->d_name = "twed";
  834     sc->twed_disk->d_drv1 = sc;
  835     sc->twed_disk->d_maxsize = (TWE_MAX_SGL_LENGTH - 1) * PAGE_SIZE;
  836     sc->twed_disk->d_sectorsize = TWE_BLOCK_SIZE;
  837     sc->twed_disk->d_mediasize = TWE_BLOCK_SIZE * (off_t)sc->twed_drive->td_size;
  838     if (sc->twed_drive->td_type == TWE_UD_CONFIG_RAID0 ||
  839         sc->twed_drive->td_type == TWE_UD_CONFIG_RAID5 ||
  840         sc->twed_drive->td_type == TWE_UD_CONFIG_RAID10) {
  841             sc->twed_disk->d_stripesize =
  842                 TWE_BLOCK_SIZE << sc->twed_drive->td_stripe;
  843             sc->twed_disk->d_stripeoffset = 0;
  844     }
  845     sc->twed_disk->d_fwsectors = sc->twed_drive->td_sectors;
  846     sc->twed_disk->d_fwheads = sc->twed_drive->td_heads;
  847     sc->twed_disk->d_unit = sc->twed_drive->td_sys_unit;
  848 
  849     disk_create(sc->twed_disk, DISK_VERSION);
  850 
  851     /* set the maximum I/O size to the theoretical maximum allowed by the S/G list size */
  852 
  853     return (0);
  854 }
  855 
  856 /********************************************************************************
  857  * Disconnect ourselves from the system.
  858  */
  859 static int
  860 twed_detach(device_t dev)
  861 {
  862     struct twed_softc *sc = (struct twed_softc *)device_get_softc(dev);
  863 
  864     debug_called(4);
  865 
  866     if (sc->twed_disk->d_flags & DISKFLAG_OPEN)
  867         return(EBUSY);
  868 
  869     disk_destroy(sc->twed_disk);
  870 
  871     return(0);
  872 }
  873 
  874 /********************************************************************************
  875  ********************************************************************************
  876                                                                              Misc
  877  ********************************************************************************
  878  ********************************************************************************/
  879 
  880 /********************************************************************************
  881  * Allocate a command buffer
  882  */
  883 static MALLOC_DEFINE(TWE_MALLOC_CLASS, "twe_commands", "twe commands");
  884 
  885 struct twe_request *
  886 twe_allocate_request(struct twe_softc *sc, int tag)
  887 {
  888     struct twe_request  *tr;
  889 
  890     tr = malloc(sizeof(struct twe_request), TWE_MALLOC_CLASS, M_WAITOK | M_ZERO);
  891     tr->tr_sc = sc;
  892     tr->tr_tag = tag;
  893     if (bus_dmamap_create(sc->twe_buffer_dmat, 0, &tr->tr_dmamap)) {
  894         twe_free_request(tr);
  895         twe_printf(sc, "unable to allocate dmamap for tag %d\n", tag);
  896         return(NULL);
  897     }    
  898     return(tr);
  899 }
  900 
  901 /********************************************************************************
  902  * Permanently discard a command buffer.
  903  */
  904 void
  905 twe_free_request(struct twe_request *tr) 
  906 {
  907     struct twe_softc    *sc = tr->tr_sc;
  908     
  909     debug_called(4);
  910 
  911     bus_dmamap_destroy(sc->twe_buffer_dmat, tr->tr_dmamap);
  912     free(tr, TWE_MALLOC_CLASS);
  913 }
  914 
  915 /********************************************************************************
  916  * Map/unmap (tr)'s command and data in the controller's addressable space.
  917  *
  918  * These routines ensure that the data which the controller is going to try to
  919  * access is actually visible to the controller, in a machine-independent
  920  * fashion.  Due to a hardware limitation, I/O buffers must be 512-byte aligned
  921  * and we take care of that here as well.
  922  */
  923 static void
  924 twe_fillin_sgl(TWE_SG_Entry *sgl, bus_dma_segment_t *segs, int nsegments, int max_sgl)
  925 {
  926     int i;
  927 
  928     for (i = 0; i < nsegments; i++) {
  929         sgl[i].address = segs[i].ds_addr;
  930         sgl[i].length = segs[i].ds_len;
  931     }
  932     for (; i < max_sgl; i++) {                          /* XXX necessary? */
  933         sgl[i].address = 0;
  934         sgl[i].length = 0;
  935     }
  936 }
  937                 
  938 static void
  939 twe_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
  940 {
  941     struct twe_request  *tr = (struct twe_request *)arg;
  942     struct twe_softc    *sc = tr->tr_sc;
  943     TWE_Command         *cmd = TWE_FIND_COMMAND(tr);
  944 
  945     debug_called(4);
  946 
  947     if (tr->tr_flags & TWE_CMD_MAPPED)
  948         panic("already mapped command");
  949 
  950     tr->tr_flags |= TWE_CMD_MAPPED;
  951 
  952     if (tr->tr_flags & TWE_CMD_IN_PROGRESS)
  953         sc->twe_state &= ~TWE_STATE_FRZN;
  954     /* save base of first segment in command (applicable if there only one segment) */
  955     tr->tr_dataphys = segs[0].ds_addr;
  956 
  957     /* correct command size for s/g list size */
  958     cmd->generic.size += 2 * nsegments;
  959 
  960     /*
  961      * Due to the fact that parameter and I/O commands have the scatter/gather list in
  962      * different places, we need to determine which sort of command this actually is
  963      * before we can populate it correctly.
  964      */
  965     switch(cmd->generic.opcode) {
  966     case TWE_OP_GET_PARAM:
  967     case TWE_OP_SET_PARAM:
  968         cmd->generic.sgl_offset = 2;
  969         twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
  970         break;
  971     case TWE_OP_READ:
  972     case TWE_OP_WRITE:
  973         cmd->generic.sgl_offset = 3;
  974         twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
  975         break;
  976     case TWE_OP_ATA_PASSTHROUGH:
  977         cmd->generic.sgl_offset = 5;
  978         twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
  979         break;
  980     default:
  981         /*
  982          * Fall back to what the linux driver does.
  983          * Do this because the API may send an opcode
  984          * the driver knows nothing about and this will
  985          * at least stop PCIABRT's from hosing us.
  986          */
  987         switch (cmd->generic.sgl_offset) {
  988         case 2:
  989             twe_fillin_sgl(&cmd->param.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
  990             break;
  991         case 3:
  992             twe_fillin_sgl(&cmd->io.sgl[0], segs, nsegments, TWE_MAX_SGL_LENGTH);
  993             break;
  994         case 5:
  995             twe_fillin_sgl(&cmd->ata.sgl[0], segs, nsegments, TWE_MAX_ATA_SGL_LENGTH);
  996             break;
  997         }
  998     }
  999 
 1000     if (tr->tr_flags & TWE_CMD_DATAIN) {
 1001         if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
 1002             bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
 1003                             BUS_DMASYNC_PREREAD);
 1004         } else {
 1005             bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
 1006                             BUS_DMASYNC_PREREAD);
 1007         }
 1008     }
 1009 
 1010     if (tr->tr_flags & TWE_CMD_DATAOUT) {
 1011         /*
 1012          * if we're using an alignment buffer, and we're writing data
 1013          * copy the real data out
 1014          */
 1015         if (tr->tr_flags & TWE_CMD_ALIGNBUF)
 1016             bcopy(tr->tr_realdata, tr->tr_data, tr->tr_length);
 1017 
 1018         if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
 1019             bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
 1020                             BUS_DMASYNC_PREWRITE);
 1021         } else {
 1022             bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
 1023                             BUS_DMASYNC_PREWRITE);
 1024         }
 1025     }
 1026 
 1027     if (twe_start(tr) == EBUSY) {
 1028         tr->tr_sc->twe_state |= TWE_STATE_CTLR_BUSY;
 1029         twe_requeue_ready(tr);
 1030     }
 1031 }
 1032 
 1033 static void
 1034 twe_setup_request_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
 1035 {
 1036     struct twe_softc    *sc = (struct twe_softc *)arg;
 1037 
 1038     debug_called(4);
 1039 
 1040     /* command can't cross a page boundary */
 1041     sc->twe_cmdphys = segs[0].ds_addr;
 1042 }
 1043 
 1044 int
 1045 twe_map_request(struct twe_request *tr)
 1046 {
 1047     struct twe_softc    *sc = tr->tr_sc;
 1048     int                 error = 0;
 1049 
 1050     debug_called(4);
 1051 
 1052     if (!dumping)
 1053         TWE_IO_ASSERT_LOCKED(sc);
 1054     if (sc->twe_state & (TWE_STATE_CTLR_BUSY | TWE_STATE_FRZN)) {
 1055         twe_requeue_ready(tr);
 1056         return (EBUSY);
 1057     }
 1058 
 1059     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_PREWRITE);
 1060 
 1061     /*
 1062      * If the command involves data, map that too.
 1063      */
 1064     if (tr->tr_data != NULL && ((tr->tr_flags & TWE_CMD_MAPPED) == 0)) {
 1065         /* 
 1066          * Data must be 64-byte aligned; allocate a fixup buffer if it's not.
 1067          */
 1068         if (((vm_offset_t)tr->tr_data % TWE_ALIGNMENT) != 0) {
 1069             tr->tr_realdata = tr->tr_data;                              /* save pointer to 'real' data */
 1070             tr->tr_flags |= TWE_CMD_ALIGNBUF;
 1071             tr->tr_data = malloc(tr->tr_length, TWE_MALLOC_CLASS, M_NOWAIT);
 1072             if (tr->tr_data == NULL) {
 1073                 twe_printf(sc, "%s: malloc failed\n", __func__);
 1074                 tr->tr_data = tr->tr_realdata; /* restore original data pointer */
 1075                 return(ENOMEM);
 1076             }
 1077         }
 1078 
 1079         /*
 1080          * Map the data buffer into bus space and build the s/g list.
 1081          */
 1082         if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
 1083             error = bus_dmamap_load(sc->twe_immediate_dmat, sc->twe_immediate_map, sc->twe_immediate,
 1084                             tr->tr_length, twe_setup_data_dmamap, tr, BUS_DMA_NOWAIT);
 1085         } else {
 1086             error = bus_dmamap_load(sc->twe_buffer_dmat, tr->tr_dmamap, tr->tr_data, tr->tr_length, 
 1087                                     twe_setup_data_dmamap, tr, 0);
 1088         }
 1089         if (error == EINPROGRESS) {
 1090             tr->tr_flags |= TWE_CMD_IN_PROGRESS;
 1091             sc->twe_state |= TWE_STATE_FRZN;
 1092             error = 0;
 1093         }
 1094     } else
 1095         if ((error = twe_start(tr)) == EBUSY) {
 1096             sc->twe_state |= TWE_STATE_CTLR_BUSY;
 1097             twe_requeue_ready(tr);
 1098         }
 1099 
 1100     return(error);
 1101 }
 1102 
 1103 void
 1104 twe_unmap_request(struct twe_request *tr)
 1105 {
 1106     struct twe_softc    *sc = tr->tr_sc;
 1107 
 1108     debug_called(4);
 1109 
 1110     if (!dumping)
 1111         TWE_IO_ASSERT_LOCKED(sc);
 1112     bus_dmamap_sync(sc->twe_cmd_dmat, sc->twe_cmdmap, BUS_DMASYNC_POSTWRITE);
 1113 
 1114     /*
 1115      * If the command involved data, unmap that too.
 1116      */
 1117     if (tr->tr_data != NULL) {
 1118         if (tr->tr_flags & TWE_CMD_DATAIN) {
 1119             if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
 1120                 bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
 1121                                 BUS_DMASYNC_POSTREAD);
 1122             } else {
 1123                 bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
 1124                                 BUS_DMASYNC_POSTREAD);
 1125             }
 1126 
 1127             /* if we're using an alignment buffer, and we're reading data, copy the real data in */
 1128             if (tr->tr_flags & TWE_CMD_ALIGNBUF)
 1129                 bcopy(tr->tr_data, tr->tr_realdata, tr->tr_length);
 1130         }
 1131         if (tr->tr_flags & TWE_CMD_DATAOUT) {
 1132             if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
 1133                 bus_dmamap_sync(sc->twe_immediate_dmat, sc->twe_immediate_map,
 1134                                 BUS_DMASYNC_POSTWRITE);
 1135             } else {
 1136                 bus_dmamap_sync(sc->twe_buffer_dmat, tr->tr_dmamap,
 1137                                 BUS_DMASYNC_POSTWRITE);
 1138             }
 1139         }
 1140 
 1141         if (tr->tr_flags & TWE_CMD_IMMEDIATE) {
 1142             bus_dmamap_unload(sc->twe_immediate_dmat, sc->twe_immediate_map);
 1143         } else {
 1144             bus_dmamap_unload(sc->twe_buffer_dmat, tr->tr_dmamap); 
 1145         }
 1146     }
 1147 
 1148     /* free alignment buffer if it was used */
 1149     if (tr->tr_flags & TWE_CMD_ALIGNBUF) {
 1150         free(tr->tr_data, TWE_MALLOC_CLASS);
 1151         tr->tr_data = tr->tr_realdata;          /* restore 'real' data pointer */
 1152     }
 1153 }
 1154 
 1155 #ifdef TWE_DEBUG
 1156 void twe_report(void);
 1157 /********************************************************************************
 1158  * Print current controller status, call from DDB.
 1159  */
 1160 void
 1161 twe_report(void)
 1162 {
 1163     struct twe_softc    *sc;
 1164     int                 i;
 1165 
 1166     for (i = 0; (sc = devclass_get_softc(devclass_find("twe"), i)) != NULL; i++)
 1167         twe_print_controller(sc);
 1168     printf("twed: total bio count in %u  out %u\n", twed_bio_in, twed_bio_out);
 1169 }
 1170 #endif

Cache object: 0a69fb2a0747a9290f170e1c3370ddca


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.