The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ioat/ioat.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2012 Intel Corporation
    3  * All rights reserved.
    4  * Copyright (C) 2018 Alexander Motin <mav@FreeBSD.org>
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #include "opt_ddb.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/bus.h>
   36 #include <sys/conf.h>
   37 #include <sys/domainset.h>
   38 #include <sys/fail.h>
   39 #include <sys/ioccom.h>
   40 #include <sys/kernel.h>
   41 #include <sys/ktr.h>
   42 #include <sys/lock.h>
   43 #include <sys/malloc.h>
   44 #include <sys/module.h>
   45 #include <sys/mutex.h>
   46 #include <sys/rman.h>
   47 #include <sys/sbuf.h>
   48 #include <sys/smp.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/taskqueue.h>
   51 #include <sys/time.h>
   52 #include <dev/pci/pcireg.h>
   53 #include <dev/pci/pcivar.h>
   54 #include <machine/bus.h>
   55 #include <machine/resource.h>
   56 #include <machine/stdarg.h>
   57 
   58 #ifdef DDB
   59 #include <ddb/ddb.h>
   60 #endif
   61 
   62 #include "ioat.h"
   63 #include "ioat_hw.h"
   64 #include "ioat_internal.h"
   65 
   66 #ifndef BUS_SPACE_MAXADDR_40BIT
   67 #define BUS_SPACE_MAXADDR_40BIT MIN(BUS_SPACE_MAXADDR, 0xFFFFFFFFFFULL)
   68 #endif
   69 #ifndef BUS_SPACE_MAXADDR_46BIT
   70 #define BUS_SPACE_MAXADDR_46BIT MIN(BUS_SPACE_MAXADDR, 0x3FFFFFFFFFFFULL)
   71 #endif
   72 
   73 static int ioat_modevent(module_t mod, int type, void *data);
   74 static int ioat_probe(device_t device);
   75 static int ioat_attach(device_t device);
   76 static int ioat_detach(device_t device);
   77 static int ioat_setup_intr(struct ioat_softc *ioat);
   78 static int ioat_teardown_intr(struct ioat_softc *ioat);
   79 static int ioat3_attach(device_t device);
   80 static int ioat_start_channel(struct ioat_softc *ioat);
   81 static int ioat_map_pci_bar(struct ioat_softc *ioat);
   82 static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
   83     int error);
   84 static void ioat_interrupt_handler(void *arg);
   85 static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
   86 static int chanerr_to_errno(uint32_t);
   87 static void ioat_process_events(struct ioat_softc *ioat, boolean_t intr);
   88 static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
   89 static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
   90 static void ioat_free_ring(struct ioat_softc *, uint32_t size,
   91     struct ioat_descriptor *);
   92 static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags);
   93 static union ioat_hw_descriptor *ioat_get_descriptor(struct ioat_softc *,
   94     uint32_t index);
   95 static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *,
   96     uint32_t index);
   97 static void ioat_halted_debug(struct ioat_softc *, uint32_t);
   98 static void ioat_poll_timer_callback(void *arg);
   99 static void dump_descriptor(void *hw_desc);
  100 static void ioat_submit_single(struct ioat_softc *ioat);
  101 static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
  102     int error);
  103 static int ioat_reset_hw(struct ioat_softc *ioat);
  104 static void ioat_reset_hw_task(void *, int);
  105 static void ioat_setup_sysctl(device_t device);
  106 static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
  107 static void ioat_get(struct ioat_softc *);
  108 static void ioat_put(struct ioat_softc *);
  109 static void ioat_drain_locked(struct ioat_softc *);
  110 
  111 #define ioat_log_message(v, ...) do {                                   \
  112         if ((v) <= g_ioat_debug_level) {                                \
  113                 device_printf(ioat->device, __VA_ARGS__);               \
  114         }                                                               \
  115 } while (0)
  116 
  117 MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
  118 SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
  119     "ioat node");
  120 
  121 static int g_force_legacy_interrupts;
  122 SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
  123     &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
  124 
  125 int g_ioat_debug_level = 0;
  126 SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
  127     0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
  128 
  129 unsigned g_ioat_ring_order = 13;
  130 SYSCTL_UINT(_hw_ioat, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ioat_ring_order,
  131     0, "Set IOAT ring order.  (1 << this) == ring size.");
  132 
  133 /*
  134  * OS <-> Driver interface structures
  135  */
  136 static device_method_t ioat_pci_methods[] = {
  137         /* Device interface */
  138         DEVMETHOD(device_probe,     ioat_probe),
  139         DEVMETHOD(device_attach,    ioat_attach),
  140         DEVMETHOD(device_detach,    ioat_detach),
  141         DEVMETHOD_END
  142 };
  143 
  144 static driver_t ioat_pci_driver = {
  145         "ioat",
  146         ioat_pci_methods,
  147         sizeof(struct ioat_softc),
  148 };
  149 
  150 DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_modevent, NULL);
  151 MODULE_VERSION(ioat, 1);
  152 
  153 /*
  154  * Private data structures
  155  */
  156 static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
  157 static unsigned ioat_channel_index = 0;
  158 SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
  159     "Number of IOAT channels attached");
  160 static struct mtx ioat_list_mtx;
  161 MTX_SYSINIT(ioat_list_mtx, &ioat_list_mtx, "ioat list mtx", MTX_DEF);
  162 
  163 static struct _pcsid
  164 {
  165         u_int32_t   type;
  166         const char  *desc;
  167 } pci_ids[] = {
  168         { 0x34308086, "TBG IOAT Ch0" },
  169         { 0x34318086, "TBG IOAT Ch1" },
  170         { 0x34328086, "TBG IOAT Ch2" },
  171         { 0x34338086, "TBG IOAT Ch3" },
  172         { 0x34298086, "TBG IOAT Ch4" },
  173         { 0x342a8086, "TBG IOAT Ch5" },
  174         { 0x342b8086, "TBG IOAT Ch6" },
  175         { 0x342c8086, "TBG IOAT Ch7" },
  176 
  177         { 0x37108086, "JSF IOAT Ch0" },
  178         { 0x37118086, "JSF IOAT Ch1" },
  179         { 0x37128086, "JSF IOAT Ch2" },
  180         { 0x37138086, "JSF IOAT Ch3" },
  181         { 0x37148086, "JSF IOAT Ch4" },
  182         { 0x37158086, "JSF IOAT Ch5" },
  183         { 0x37168086, "JSF IOAT Ch6" },
  184         { 0x37178086, "JSF IOAT Ch7" },
  185         { 0x37188086, "JSF IOAT Ch0 (RAID)" },
  186         { 0x37198086, "JSF IOAT Ch1 (RAID)" },
  187 
  188         { 0x3c208086, "SNB IOAT Ch0" },
  189         { 0x3c218086, "SNB IOAT Ch1" },
  190         { 0x3c228086, "SNB IOAT Ch2" },
  191         { 0x3c238086, "SNB IOAT Ch3" },
  192         { 0x3c248086, "SNB IOAT Ch4" },
  193         { 0x3c258086, "SNB IOAT Ch5" },
  194         { 0x3c268086, "SNB IOAT Ch6" },
  195         { 0x3c278086, "SNB IOAT Ch7" },
  196         { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
  197         { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
  198 
  199         { 0x0e208086, "IVB IOAT Ch0" },
  200         { 0x0e218086, "IVB IOAT Ch1" },
  201         { 0x0e228086, "IVB IOAT Ch2" },
  202         { 0x0e238086, "IVB IOAT Ch3" },
  203         { 0x0e248086, "IVB IOAT Ch4" },
  204         { 0x0e258086, "IVB IOAT Ch5" },
  205         { 0x0e268086, "IVB IOAT Ch6" },
  206         { 0x0e278086, "IVB IOAT Ch7" },
  207         { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
  208         { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
  209 
  210         { 0x2f208086, "HSW IOAT Ch0" },
  211         { 0x2f218086, "HSW IOAT Ch1" },
  212         { 0x2f228086, "HSW IOAT Ch2" },
  213         { 0x2f238086, "HSW IOAT Ch3" },
  214         { 0x2f248086, "HSW IOAT Ch4" },
  215         { 0x2f258086, "HSW IOAT Ch5" },
  216         { 0x2f268086, "HSW IOAT Ch6" },
  217         { 0x2f278086, "HSW IOAT Ch7" },
  218         { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
  219         { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
  220 
  221         { 0x0c508086, "BWD IOAT Ch0" },
  222         { 0x0c518086, "BWD IOAT Ch1" },
  223         { 0x0c528086, "BWD IOAT Ch2" },
  224         { 0x0c538086, "BWD IOAT Ch3" },
  225 
  226         { 0x6f508086, "BDXDE IOAT Ch0" },
  227         { 0x6f518086, "BDXDE IOAT Ch1" },
  228         { 0x6f528086, "BDXDE IOAT Ch2" },
  229         { 0x6f538086, "BDXDE IOAT Ch3" },
  230 
  231         { 0x6f208086, "BDX IOAT Ch0" },
  232         { 0x6f218086, "BDX IOAT Ch1" },
  233         { 0x6f228086, "BDX IOAT Ch2" },
  234         { 0x6f238086, "BDX IOAT Ch3" },
  235         { 0x6f248086, "BDX IOAT Ch4" },
  236         { 0x6f258086, "BDX IOAT Ch5" },
  237         { 0x6f268086, "BDX IOAT Ch6" },
  238         { 0x6f278086, "BDX IOAT Ch7" },
  239         { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" },
  240         { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" },
  241 
  242         { 0x20218086, "SKX IOAT" },
  243 
  244         { 0x0b008086, "ICX IOAT" },
  245 };
  246 
  247 MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ioat, pci_ids,
  248     nitems(pci_ids));
  249 
  250 /*
  251  * OS <-> Driver linkage functions
  252  */
  253 static int
  254 ioat_modevent(module_t mod __unused, int type, void *data __unused)
  255 {
  256         switch(type) {
  257         case MOD_LOAD:
  258                 break;
  259 
  260         case MOD_UNLOAD:
  261                 ioat_test_detach();
  262                 break;
  263 
  264         case MOD_SHUTDOWN:
  265                 break;
  266 
  267         default:
  268                 return (EOPNOTSUPP);
  269         }
  270 
  271         return (0);
  272 }
  273 
  274 static int
  275 ioat_probe(device_t device)
  276 {
  277         struct _pcsid *ep;
  278         u_int32_t type;
  279 
  280         type = pci_get_devid(device);
  281         for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) {
  282                 if (ep->type == type) {
  283                         device_set_desc(device, ep->desc);
  284                         return (0);
  285                 }
  286         }
  287         return (ENXIO);
  288 }
  289 
  290 static int
  291 ioat_attach(device_t device)
  292 {
  293         struct ioat_softc *ioat;
  294         int error, i;
  295 
  296         ioat = DEVICE2SOFTC(device);
  297         ioat->device = device;
  298         if (bus_get_domain(device, &ioat->domain) != 0)
  299                 ioat->domain = 0;
  300         ioat->cpu = CPU_FFS(&cpuset_domain[ioat->domain]) - 1;
  301         if (ioat->cpu < 0)
  302                 ioat->cpu = CPU_FIRST();
  303 
  304         error = ioat_map_pci_bar(ioat);
  305         if (error != 0)
  306                 goto err;
  307 
  308         ioat->version = ioat_read_cbver(ioat);
  309         if (ioat->version < IOAT_VER_3_0) {
  310                 error = ENODEV;
  311                 goto err;
  312         }
  313 
  314         error = ioat3_attach(device);
  315         if (error != 0)
  316                 goto err;
  317 
  318         error = pci_enable_busmaster(device);
  319         if (error != 0)
  320                 goto err;
  321 
  322         error = ioat_setup_intr(ioat);
  323         if (error != 0)
  324                 goto err;
  325 
  326         error = ioat_reset_hw(ioat);
  327         if (error != 0)
  328                 goto err;
  329 
  330         ioat_process_events(ioat, FALSE);
  331         ioat_setup_sysctl(device);
  332 
  333         mtx_lock(&ioat_list_mtx);
  334         for (i = 0; i < IOAT_MAX_CHANNELS; i++) {
  335                 if (ioat_channel[i] == NULL)
  336                         break;
  337         }
  338         if (i >= IOAT_MAX_CHANNELS) {
  339                 mtx_unlock(&ioat_list_mtx);
  340                 device_printf(device, "Too many I/OAT devices in system\n");
  341                 error = ENXIO;
  342                 goto err;
  343         }
  344         ioat->chan_idx = i;
  345         ioat_channel[i] = ioat;
  346         if (i >= ioat_channel_index)
  347                 ioat_channel_index = i + 1;
  348         mtx_unlock(&ioat_list_mtx);
  349 
  350         ioat_test_attach();
  351 
  352 err:
  353         if (error != 0)
  354                 ioat_detach(device);
  355         return (error);
  356 }
  357 
  358 static inline int
  359 ioat_bus_dmamap_destroy(struct ioat_softc *ioat, const char *func,
  360     bus_dma_tag_t dmat, bus_dmamap_t map)
  361 {
  362         int error;
  363 
  364         error = bus_dmamap_destroy(dmat, map);
  365         if (error != 0) {
  366                 ioat_log_message(0,
  367                     "%s: bus_dmamap_destroy failed %d\n", func, error);
  368         }
  369 
  370         return (error);
  371 }
  372 
  373 static int
  374 ioat_detach(device_t device)
  375 {
  376         struct ioat_softc *ioat;
  377         int i, error;
  378 
  379         ioat = DEVICE2SOFTC(device);
  380 
  381         mtx_lock(&ioat_list_mtx);
  382         ioat_channel[ioat->chan_idx] = NULL;
  383         while (ioat_channel_index > 0 &&
  384             ioat_channel[ioat_channel_index - 1] == NULL)
  385                 ioat_channel_index--;
  386         mtx_unlock(&ioat_list_mtx);
  387 
  388         taskqueue_drain(taskqueue_thread, &ioat->reset_task);
  389 
  390         mtx_lock(&ioat->submit_lock);
  391         ioat->quiescing = TRUE;
  392         ioat->destroying = TRUE;
  393         wakeup(&ioat->quiescing);
  394         wakeup(&ioat->resetting);
  395 
  396         ioat_drain_locked(ioat);
  397         mtx_unlock(&ioat->submit_lock);
  398         mtx_lock(&ioat->cleanup_lock);
  399         while (ioat_get_active(ioat) > 0)
  400                 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1);
  401         mtx_unlock(&ioat->cleanup_lock);
  402 
  403         ioat_teardown_intr(ioat);
  404         callout_drain(&ioat->poll_timer);
  405 
  406         pci_disable_busmaster(device);
  407 
  408         if (ioat->pci_resource != NULL)
  409                 bus_release_resource(device, SYS_RES_MEMORY,
  410                     ioat->pci_resource_id, ioat->pci_resource);
  411 
  412         if (ioat->data_tag != NULL) {
  413                 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
  414                         error = ioat_bus_dmamap_destroy(ioat, __func__,
  415                             ioat->data_tag, ioat->ring[i].src_dmamap);
  416                         if (error != 0)
  417                                 return (error);
  418                 }
  419                 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
  420                         error = ioat_bus_dmamap_destroy(ioat, __func__,
  421                             ioat->data_tag, ioat->ring[i].dst_dmamap);
  422                         if (error != 0)
  423                                 return (error);
  424                 }
  425 
  426                 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
  427                         error = ioat_bus_dmamap_destroy(ioat, __func__,
  428                             ioat->data_tag, ioat->ring[i].src2_dmamap);
  429                         if (error != 0)
  430                                 return (error);
  431                 }
  432                 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
  433                         error = ioat_bus_dmamap_destroy(ioat, __func__,
  434                             ioat->data_tag, ioat->ring[i].dst2_dmamap);
  435                         if (error != 0)
  436                                 return (error);
  437                 }
  438 
  439                 bus_dma_tag_destroy(ioat->data_tag);
  440         }
  441 
  442         if (ioat->ring != NULL)
  443                 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
  444 
  445         if (ioat->comp_update != NULL) {
  446                 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
  447                 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
  448                     ioat->comp_update_map);
  449                 bus_dma_tag_destroy(ioat->comp_update_tag);
  450         }
  451 
  452         if (ioat->hw_desc_ring != NULL) {
  453                 bus_dmamap_unload(ioat->hw_desc_tag, ioat->hw_desc_map);
  454                 bus_dmamem_free(ioat->hw_desc_tag, ioat->hw_desc_ring,
  455                     ioat->hw_desc_map);
  456                 bus_dma_tag_destroy(ioat->hw_desc_tag);
  457         }
  458 
  459         return (0);
  460 }
  461 
  462 static int
  463 ioat_teardown_intr(struct ioat_softc *ioat)
  464 {
  465 
  466         if (ioat->tag != NULL)
  467                 bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
  468 
  469         if (ioat->res != NULL)
  470                 bus_release_resource(ioat->device, SYS_RES_IRQ,
  471                     rman_get_rid(ioat->res), ioat->res);
  472 
  473         pci_release_msi(ioat->device);
  474         return (0);
  475 }
  476 
  477 static int
  478 ioat_start_channel(struct ioat_softc *ioat)
  479 {
  480         struct ioat_dma_hw_descriptor *hw_desc;
  481         struct ioat_descriptor *desc;
  482         struct bus_dmadesc *dmadesc;
  483         uint64_t status;
  484         uint32_t chanerr;
  485         int i;
  486 
  487         ioat_acquire(&ioat->dmaengine);
  488 
  489         /* Submit 'NULL' operation manually to avoid quiescing flag */
  490         desc = ioat_get_ring_entry(ioat, ioat->head);
  491         hw_desc = &ioat_get_descriptor(ioat, ioat->head)->dma;
  492         dmadesc = &desc->bus_dmadesc;
  493 
  494         dmadesc->callback_fn = NULL;
  495         dmadesc->callback_arg = NULL;
  496 
  497         hw_desc->u.control_raw = 0;
  498         hw_desc->u.control_generic.op = IOAT_OP_COPY;
  499         hw_desc->u.control_generic.completion_update = 1;
  500         hw_desc->size = 8;
  501         hw_desc->src_addr = 0;
  502         hw_desc->dest_addr = 0;
  503         hw_desc->u.control.null = 1;
  504 
  505         ioat_submit_single(ioat);
  506         ioat_release(&ioat->dmaengine);
  507 
  508         for (i = 0; i < 100; i++) {
  509                 DELAY(1);
  510                 status = ioat_get_chansts(ioat);
  511                 if (is_ioat_idle(status))
  512                         return (0);
  513         }
  514 
  515         chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
  516         ioat_log_message(0, "could not start channel: "
  517             "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr,
  518             IOAT_CHANERR_STR);
  519         return (ENXIO);
  520 }
  521 
  522 /*
  523  * Initialize Hardware
  524  */
  525 static int
  526 ioat3_attach(device_t device)
  527 {
  528         struct ioat_softc *ioat;
  529         struct ioat_descriptor *ring;
  530         struct ioat_dma_hw_descriptor *dma_hw_desc;
  531         void *hw_desc;
  532         bus_addr_t lowaddr;
  533         size_t ringsz;
  534         int i, num_descriptors;
  535         int error;
  536         uint8_t xfercap;
  537 
  538         error = 0;
  539         ioat = DEVICE2SOFTC(device);
  540         ioat->capabilities = ioat_read_dmacapability(ioat);
  541 
  542         ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities,
  543             IOAT_DMACAP_STR);
  544 
  545         xfercap = ioat_read_xfercap(ioat);
  546         ioat->max_xfer_size = 1 << xfercap;
  547 
  548         ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
  549             IOAT_INTRDELAY_SUPPORTED) != 0;
  550         if (ioat->intrdelay_supported)
  551                 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;
  552 
  553         /* TODO: need to check DCA here if we ever do XOR/PQ */
  554 
  555         mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
  556         mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
  557         callout_init(&ioat->poll_timer, 1);
  558         TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat);
  559 
  560         /* Establish lock order for Witness */
  561         mtx_lock(&ioat->cleanup_lock);
  562         mtx_lock(&ioat->submit_lock);
  563         mtx_unlock(&ioat->submit_lock);
  564         mtx_unlock(&ioat->cleanup_lock);
  565 
  566         ioat->is_submitter_processing = FALSE;
  567 
  568         if (ioat->version >= IOAT_VER_3_3)
  569                 lowaddr = BUS_SPACE_MAXADDR_48BIT;
  570         else if (ioat->version >= IOAT_VER_3_2)
  571                 lowaddr = BUS_SPACE_MAXADDR_46BIT;
  572         else
  573                 lowaddr = BUS_SPACE_MAXADDR_40BIT;
  574 
  575         error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
  576             sizeof(uint64_t), 0x0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
  577             sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
  578             &ioat->comp_update_tag);
  579         if (error != 0)
  580                 return (error);
  581 
  582         error = bus_dmamem_alloc(ioat->comp_update_tag,
  583             (void **)&ioat->comp_update, BUS_DMA_ZERO | BUS_DMA_WAITOK,
  584             &ioat->comp_update_map);
  585         if (error != 0)
  586                 return (error);
  587 
  588         error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
  589             ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
  590             BUS_DMA_NOWAIT);
  591         if (error != 0)
  592                 return (error);
  593 
  594         ioat->ring_size_order = g_ioat_ring_order;
  595         num_descriptors = 1 << ioat->ring_size_order;
  596         ringsz = sizeof(struct ioat_dma_hw_descriptor) * num_descriptors;
  597 
  598         error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
  599             2 * 1024 * 1024, 0x0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
  600             ringsz, 1, ringsz, 0, NULL, NULL, &ioat->hw_desc_tag);
  601         if (error != 0)
  602                 return (error);
  603 
  604         error = bus_dmamem_alloc(ioat->hw_desc_tag, &hw_desc,
  605             BUS_DMA_ZERO | BUS_DMA_WAITOK, &ioat->hw_desc_map);
  606         if (error != 0)
  607                 return (error);
  608 
  609         error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
  610             ringsz, ioat_dmamap_cb, &ioat->hw_desc_bus_addr, BUS_DMA_NOWAIT);
  611         if (error)
  612                 return (error);
  613 
  614         ioat->hw_desc_ring = hw_desc;
  615 
  616         error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
  617             1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
  618             ioat->max_xfer_size, 1, ioat->max_xfer_size, 0, NULL, NULL,
  619             &ioat->data_tag);
  620         if (error != 0)
  621                 return (error);
  622         ioat->ring = malloc_domainset(num_descriptors * sizeof(*ring), M_IOAT,
  623             DOMAINSET_PREF(ioat->domain), M_ZERO | M_WAITOK);
  624 
  625         ring = ioat->ring;
  626         for (i = 0; i < num_descriptors; i++) {
  627                 memset(&ring[i].bus_dmadesc, 0, sizeof(ring[i].bus_dmadesc));
  628                 ring[i].id = i;
  629                 error = bus_dmamap_create(ioat->data_tag, 0,
  630                     &ring[i].src_dmamap);
  631                 if (error != 0) {
  632                         ioat_log_message(0,
  633                             "%s: bus_dmamap_create failed %d\n", __func__,
  634                             error);
  635                         return (error);
  636                 }
  637                 error = bus_dmamap_create(ioat->data_tag, 0,
  638                     &ring[i].dst_dmamap);
  639                 if (error != 0) {
  640                         ioat_log_message(0,
  641                             "%s: bus_dmamap_create failed %d\n", __func__,
  642                             error);
  643                         return (error);
  644                 }
  645                 error = bus_dmamap_create(ioat->data_tag, 0,
  646                     &ring[i].src2_dmamap);
  647                 if (error != 0) {
  648                         ioat_log_message(0,
  649                             "%s: bus_dmamap_create failed %d\n", __func__,
  650                             error);
  651                         return (error);
  652                 }
  653                 error = bus_dmamap_create(ioat->data_tag, 0,
  654                     &ring[i].dst2_dmamap);
  655                 if (error != 0) {
  656                         ioat_log_message(0,
  657                             "%s: bus_dmamap_create failed %d\n", __func__,
  658                             error);
  659                         return (error);
  660                 }
  661         }
  662 
  663         for (i = 0; i < num_descriptors; i++) {
  664                 dma_hw_desc = &ioat->hw_desc_ring[i].dma;
  665                 dma_hw_desc->next = RING_PHYS_ADDR(ioat, i + 1);
  666         }
  667 
  668         ioat->tail = ioat->head = 0;
  669         *ioat->comp_update = ioat->last_seen =
  670             RING_PHYS_ADDR(ioat, ioat->tail - 1);
  671         return (0);
  672 }
  673 
  674 static int
  675 ioat_map_pci_bar(struct ioat_softc *ioat)
  676 {
  677 
  678         ioat->pci_resource_id = PCIR_BAR(0);
  679         ioat->pci_resource = bus_alloc_resource_any(ioat->device,
  680             SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
  681 
  682         if (ioat->pci_resource == NULL) {
  683                 ioat_log_message(0, "unable to allocate pci resource\n");
  684                 return (ENODEV);
  685         }
  686 
  687         ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
  688         ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
  689         return (0);
  690 }
  691 
  692 static void
  693 ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
  694 {
  695         struct ioat_softc *ioat = arg;
  696 
  697         KASSERT(error == 0, ("%s: error:%d", __func__, error));
  698         ioat->comp_update_bus_addr = seg[0].ds_addr;
  699 }
  700 
  701 static void
  702 ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  703 {
  704         bus_addr_t *baddr;
  705 
  706         KASSERT(error == 0, ("%s: error:%d", __func__, error));
  707         baddr = arg;
  708         *baddr = segs->ds_addr;
  709 }
  710 
  711 /*
  712  * Interrupt setup and handlers
  713  */
  714 static int
  715 ioat_setup_intr(struct ioat_softc *ioat)
  716 {
  717         uint32_t num_vectors;
  718         int error;
  719         boolean_t use_msix;
  720 
  721         use_msix = FALSE;
  722 
  723         if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
  724                 num_vectors = 1;
  725                 pci_alloc_msix(ioat->device, &num_vectors);
  726                 if (num_vectors == 1)
  727                         use_msix = TRUE;
  728         }
  729 
  730         if (use_msix) {
  731                 ioat->rid = 1;
  732                 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
  733                     &ioat->rid, RF_ACTIVE);
  734         } else {
  735                 ioat->rid = 0;
  736                 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
  737                     &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
  738         }
  739         if (ioat->res == NULL) {
  740                 ioat_log_message(0, "bus_alloc_resource failed\n");
  741                 return (ENOMEM);
  742         }
  743 
  744         ioat->tag = NULL;
  745         error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
  746             INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
  747         if (error != 0) {
  748                 ioat_log_message(0, "bus_setup_intr failed\n");
  749                 return (error);
  750         }
  751 
  752         ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
  753         return (0);
  754 }
  755 
  756 static boolean_t
  757 ioat_model_resets_msix(struct ioat_softc *ioat)
  758 {
  759         u_int32_t pciid;
  760 
  761         pciid = pci_get_devid(ioat->device);
  762         switch (pciid) {
  763                 /* BWD: */
  764         case 0x0c508086:
  765         case 0x0c518086:
  766         case 0x0c528086:
  767         case 0x0c538086:
  768                 /* BDXDE: */
  769         case 0x6f508086:
  770         case 0x6f518086:
  771         case 0x6f528086:
  772         case 0x6f538086:
  773                 return (TRUE);
  774         }
  775 
  776         return (FALSE);
  777 }
  778 
  779 static void
  780 ioat_interrupt_handler(void *arg)
  781 {
  782         struct ioat_softc *ioat = arg;
  783 
  784         ioat->stats.interrupts++;
  785         ioat_process_events(ioat, TRUE);
  786 }
  787 
  788 static int
  789 chanerr_to_errno(uint32_t chanerr)
  790 {
  791 
  792         if (chanerr == 0)
  793                 return (0);
  794         if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0)
  795                 return (EFAULT);
  796         if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0)
  797                 return (EIO);
  798         /* This one is probably our fault: */
  799         if ((chanerr & IOAT_CHANERR_NDADDERR) != 0)
  800                 return (EIO);
  801         return (EIO);
  802 }
  803 
  804 static void
  805 ioat_process_events(struct ioat_softc *ioat, boolean_t intr)
  806 {
  807         struct ioat_descriptor *desc;
  808         struct bus_dmadesc *dmadesc;
  809         uint64_t comp_update, status;
  810         uint32_t completed, chanerr;
  811         int error __diagused;
  812 
  813         if (intr) {
  814                 mtx_lock(&ioat->cleanup_lock);
  815         } else {
  816                 if (!mtx_trylock(&ioat->cleanup_lock))
  817                         return;
  818         }
  819 
  820         /*
  821          * Don't run while the hardware is being reset.  Reset is responsible
  822          * for blocking new work and draining & completing existing work, so
  823          * there is nothing to do until new work is queued after reset anyway.
  824          */
  825         if (ioat->resetting_cleanup) {
  826                 mtx_unlock(&ioat->cleanup_lock);
  827                 return;
  828         }
  829 
  830         completed = 0;
  831         comp_update = *ioat->comp_update;
  832         status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
  833 
  834         if (status < ioat->hw_desc_bus_addr ||
  835             status >= ioat->hw_desc_bus_addr + (1 << ioat->ring_size_order) *
  836             sizeof(struct ioat_generic_hw_descriptor))
  837                 panic("Bogus completion address %jx (channel %u)",
  838                     (uintmax_t)status, ioat->chan_idx);
  839 
  840         if (status == ioat->last_seen) {
  841                 /*
  842                  * If we landed in process_events and nothing has been
  843                  * completed, check for a timeout due to channel halt.
  844                  */
  845                 goto out;
  846         }
  847         CTR4(KTR_IOAT, "%s channel=%u hw_status=0x%lx last_seen=0x%lx",
  848             __func__, ioat->chan_idx, comp_update, ioat->last_seen);
  849 
  850         while (RING_PHYS_ADDR(ioat, ioat->tail - 1) != status) {
  851                 desc = ioat_get_ring_entry(ioat, ioat->tail);
  852                 dmadesc = &desc->bus_dmadesc;
  853                 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) ok  cb %p(%p)",
  854                     ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
  855                     dmadesc->callback_arg);
  856 
  857                 bus_dmamap_unload(ioat->data_tag, desc->src_dmamap);
  858                 bus_dmamap_unload(ioat->data_tag, desc->dst_dmamap);
  859                 bus_dmamap_unload(ioat->data_tag, desc->src2_dmamap);
  860                 bus_dmamap_unload(ioat->data_tag, desc->dst2_dmamap);
  861 
  862                 if (dmadesc->callback_fn != NULL)
  863                         dmadesc->callback_fn(dmadesc->callback_arg, 0);
  864 
  865                 completed++;
  866                 ioat->tail++;
  867         }
  868         CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__,
  869             ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
  870 
  871         if (completed != 0) {
  872                 ioat->last_seen = RING_PHYS_ADDR(ioat, ioat->tail - 1);
  873                 ioat->stats.descriptors_processed += completed;
  874                 wakeup(&ioat->tail);
  875         }
  876 
  877 out:
  878         ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
  879         mtx_unlock(&ioat->cleanup_lock);
  880 
  881         /*
  882          * The device doesn't seem to reliably push suspend/halt statuses to
  883          * the channel completion memory address, so poll the device register
  884          * here.  For performance reasons skip it on interrupts, do it only
  885          * on much more rare polling events.
  886          */
  887         if (!intr)
  888                 comp_update = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
  889         if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update))
  890                 return;
  891 
  892         ioat->stats.channel_halts++;
  893 
  894         /*
  895          * Fatal programming error on this DMA channel.  Flush any outstanding
  896          * work with error status and restart the engine.
  897          */
  898         mtx_lock(&ioat->submit_lock);
  899         ioat->quiescing = TRUE;
  900         mtx_unlock(&ioat->submit_lock);
  901 
  902         /*
  903          * This is safe to do here because the submit queue is quiesced.  We
  904          * know that we will drain all outstanding events, so ioat_reset_hw
  905          * can't deadlock. It is necessary to protect other ioat_process_event
  906          * threads from racing ioat_reset_hw, reading an indeterminate hw
  907          * state, and attempting to continue issuing completions.
  908          */
  909         mtx_lock(&ioat->cleanup_lock);
  910         ioat->resetting_cleanup = TRUE;
  911 
  912         chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
  913         if (1 <= g_ioat_debug_level)
  914                 ioat_halted_debug(ioat, chanerr);
  915         ioat->stats.last_halt_chanerr = chanerr;
  916 
  917         while (ioat_get_active(ioat) > 0) {
  918                 desc = ioat_get_ring_entry(ioat, ioat->tail);
  919                 dmadesc = &desc->bus_dmadesc;
  920                 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) err cb %p(%p)",
  921                     ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
  922                     dmadesc->callback_arg);
  923 
  924                 if (dmadesc->callback_fn != NULL)
  925                         dmadesc->callback_fn(dmadesc->callback_arg,
  926                             chanerr_to_errno(chanerr));
  927 
  928                 ioat->tail++;
  929                 ioat->stats.descriptors_processed++;
  930                 ioat->stats.descriptors_error++;
  931         }
  932         CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__,
  933             ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
  934 
  935         /* Clear error status */
  936         ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
  937 
  938         mtx_unlock(&ioat->cleanup_lock);
  939 
  940         ioat_log_message(0, "Resetting channel to recover from error\n");
  941         error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task);
  942         KASSERT(error == 0,
  943             ("%s: taskqueue_enqueue failed: %d", __func__, error));
  944 }
  945 
  946 static void
  947 ioat_reset_hw_task(void *ctx, int pending __unused)
  948 {
  949         struct ioat_softc *ioat;
  950         int error __diagused;
  951 
  952         ioat = ctx;
  953         ioat_log_message(1, "%s: Resetting channel\n", __func__);
  954 
  955         error = ioat_reset_hw(ioat);
  956         KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
  957 }
  958 
  959 /*
  960  * User API functions
  961  */
  962 unsigned
  963 ioat_get_nchannels(void)
  964 {
  965 
  966         return (ioat_channel_index);
  967 }
  968 
  969 bus_dmaengine_t
  970 ioat_get_dmaengine(uint32_t index, int flags)
  971 {
  972         struct ioat_softc *ioat;
  973 
  974         KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0,
  975             ("invalid flags: 0x%08x", flags));
  976         KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK),
  977             ("invalid wait | nowait"));
  978 
  979         mtx_lock(&ioat_list_mtx);
  980         if (index >= ioat_channel_index ||
  981             (ioat = ioat_channel[index]) == NULL) {
  982                 mtx_unlock(&ioat_list_mtx);
  983                 return (NULL);
  984         }
  985         mtx_lock(&ioat->submit_lock);
  986         mtx_unlock(&ioat_list_mtx);
  987 
  988         if (ioat->destroying) {
  989                 mtx_unlock(&ioat->submit_lock);
  990                 return (NULL);
  991         }
  992 
  993         ioat_get(ioat);
  994         if (ioat->quiescing) {
  995                 if ((flags & M_NOWAIT) != 0) {
  996                         ioat_put(ioat);
  997                         mtx_unlock(&ioat->submit_lock);
  998                         return (NULL);
  999                 }
 1000 
 1001                 while (ioat->quiescing && !ioat->destroying)
 1002                         msleep(&ioat->quiescing, &ioat->submit_lock, 0, "getdma", 0);
 1003 
 1004                 if (ioat->destroying) {
 1005                         ioat_put(ioat);
 1006                         mtx_unlock(&ioat->submit_lock);
 1007                         return (NULL);
 1008                 }
 1009         }
 1010         mtx_unlock(&ioat->submit_lock);
 1011         return (&ioat->dmaengine);
 1012 }
 1013 
 1014 void
 1015 ioat_put_dmaengine(bus_dmaengine_t dmaengine)
 1016 {
 1017         struct ioat_softc *ioat;
 1018 
 1019         ioat = to_ioat_softc(dmaengine);
 1020         mtx_lock(&ioat->submit_lock);
 1021         ioat_put(ioat);
 1022         mtx_unlock(&ioat->submit_lock);
 1023 }
 1024 
 1025 int
 1026 ioat_get_hwversion(bus_dmaengine_t dmaengine)
 1027 {
 1028         struct ioat_softc *ioat;
 1029 
 1030         ioat = to_ioat_softc(dmaengine);
 1031         return (ioat->version);
 1032 }
 1033 
 1034 size_t
 1035 ioat_get_max_io_size(bus_dmaengine_t dmaengine)
 1036 {
 1037         struct ioat_softc *ioat;
 1038 
 1039         ioat = to_ioat_softc(dmaengine);
 1040         return (ioat->max_xfer_size);
 1041 }
 1042 
 1043 uint32_t
 1044 ioat_get_capabilities(bus_dmaengine_t dmaengine)
 1045 {
 1046         struct ioat_softc *ioat;
 1047 
 1048         ioat = to_ioat_softc(dmaengine);
 1049         return (ioat->capabilities);
 1050 }
 1051 
 1052 int
 1053 ioat_get_domain(bus_dmaengine_t dmaengine, int *domain)
 1054 {
 1055         struct ioat_softc *ioat;
 1056 
 1057         ioat = to_ioat_softc(dmaengine);
 1058         return (bus_get_domain(ioat->device, domain));
 1059 }
 1060 
 1061 int
 1062 ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay)
 1063 {
 1064         struct ioat_softc *ioat;
 1065 
 1066         ioat = to_ioat_softc(dmaengine);
 1067         if (!ioat->intrdelay_supported)
 1068                 return (ENODEV);
 1069         if (delay > ioat->intrdelay_max)
 1070                 return (ERANGE);
 1071 
 1072         ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay);
 1073         ioat->cached_intrdelay =
 1074             ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK;
 1075         return (0);
 1076 }
 1077 
 1078 uint16_t
 1079 ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine)
 1080 {
 1081         struct ioat_softc *ioat;
 1082 
 1083         ioat = to_ioat_softc(dmaengine);
 1084         return (ioat->intrdelay_max);
 1085 }
 1086 
 1087 void
 1088 ioat_acquire(bus_dmaengine_t dmaengine)
 1089 {
 1090         struct ioat_softc *ioat;
 1091 
 1092         ioat = to_ioat_softc(dmaengine);
 1093         mtx_lock(&ioat->submit_lock);
 1094         CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
 1095         ioat->acq_head = ioat->head;
 1096 }
 1097 
 1098 int
 1099 ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags)
 1100 {
 1101         struct ioat_softc *ioat;
 1102         int error;
 1103 
 1104         ioat = to_ioat_softc(dmaengine);
 1105         ioat_acquire(dmaengine);
 1106 
 1107         error = ioat_reserve_space(ioat, n, mflags);
 1108         if (error != 0)
 1109                 ioat_release(dmaengine);
 1110         return (error);
 1111 }
 1112 
 1113 void
 1114 ioat_release(bus_dmaengine_t dmaengine)
 1115 {
 1116         struct ioat_softc *ioat;
 1117 
 1118         ioat = to_ioat_softc(dmaengine);
 1119         CTR3(KTR_IOAT, "%s channel=%u dispatch1 head=%u", __func__,
 1120             ioat->chan_idx, ioat->head);
 1121         KFAIL_POINT_CODE(DEBUG_FP, ioat_release, /* do nothing */);
 1122         CTR3(KTR_IOAT, "%s channel=%u dispatch2 head=%u", __func__,
 1123             ioat->chan_idx, ioat->head);
 1124 
 1125         if (ioat->acq_head != ioat->head) {
 1126                 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET,
 1127                     (uint16_t)ioat->head);
 1128 
 1129                 if (!callout_pending(&ioat->poll_timer)) {
 1130                         callout_reset_on(&ioat->poll_timer, 1,
 1131                             ioat_poll_timer_callback, ioat, ioat->cpu);
 1132                 }
 1133         }
 1134         mtx_unlock(&ioat->submit_lock);
 1135 }
 1136 
 1137 static struct ioat_descriptor *
 1138 ioat_op_generic(struct ioat_softc *ioat, uint8_t op,
 1139     uint32_t size, uint64_t src, uint64_t dst,
 1140     bus_dmaengine_callback_t callback_fn, void *callback_arg,
 1141     uint32_t flags)
 1142 {
 1143         struct ioat_generic_hw_descriptor *hw_desc;
 1144         struct ioat_descriptor *desc;
 1145         bus_dma_segment_t seg;
 1146         int mflags, nseg, error;
 1147 
 1148         mtx_assert(&ioat->submit_lock, MA_OWNED);
 1149 
 1150         KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0,
 1151             ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS));
 1152         KASSERT(size <= ioat->max_xfer_size, ("%s: size too big (%u > %u)",
 1153             __func__, (unsigned)size, ioat->max_xfer_size));
 1154 
 1155         if ((flags & DMA_NO_WAIT) != 0)
 1156                 mflags = M_NOWAIT;
 1157         else
 1158                 mflags = M_WAITOK;
 1159 
 1160         if (ioat_reserve_space(ioat, 1, mflags) != 0)
 1161                 return (NULL);
 1162 
 1163         desc = ioat_get_ring_entry(ioat, ioat->head);
 1164         hw_desc = &ioat_get_descriptor(ioat, ioat->head)->generic;
 1165 
 1166         hw_desc->u.control_raw = 0;
 1167         hw_desc->u.control_generic.op = op;
 1168         hw_desc->u.control_generic.completion_update = 1;
 1169 
 1170         if ((flags & DMA_INT_EN) != 0)
 1171                 hw_desc->u.control_generic.int_enable = 1;
 1172         if ((flags & DMA_FENCE) != 0)
 1173                 hw_desc->u.control_generic.fence = 1;
 1174 
 1175         hw_desc->size = size;
 1176 
 1177         if (src != 0) {
 1178                 nseg = -1;
 1179                 error = _bus_dmamap_load_phys(ioat->data_tag, desc->src_dmamap,
 1180                     src, size, 0, &seg, &nseg);
 1181                 if (error != 0) {
 1182                         ioat_log_message(0, "%s: _bus_dmamap_load_phys"
 1183                             " failed %d\n", __func__, error);
 1184                         return (NULL);
 1185                 }
 1186                 hw_desc->src_addr = seg.ds_addr;
 1187         }
 1188 
 1189         if (dst != 0) {
 1190                 nseg = -1;
 1191                 error = _bus_dmamap_load_phys(ioat->data_tag, desc->dst_dmamap,
 1192                     dst, size, 0, &seg, &nseg);
 1193                 if (error != 0) {
 1194                         ioat_log_message(0, "%s: _bus_dmamap_load_phys"
 1195                             " failed %d\n", __func__, error);
 1196                         return (NULL);
 1197                 }
 1198                 hw_desc->dest_addr = seg.ds_addr;
 1199         }
 1200 
 1201         desc->bus_dmadesc.callback_fn = callback_fn;
 1202         desc->bus_dmadesc.callback_arg = callback_arg;
 1203         return (desc);
 1204 }
 1205 
 1206 struct bus_dmadesc *
 1207 ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
 1208     void *callback_arg, uint32_t flags)
 1209 {
 1210         struct ioat_dma_hw_descriptor *hw_desc;
 1211         struct ioat_descriptor *desc;
 1212         struct ioat_softc *ioat;
 1213 
 1214         ioat = to_ioat_softc(dmaengine);
 1215         CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
 1216 
 1217         desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
 1218             callback_arg, flags);
 1219         if (desc == NULL)
 1220                 return (NULL);
 1221 
 1222         hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
 1223         hw_desc->u.control.null = 1;
 1224         ioat_submit_single(ioat);
 1225         return (&desc->bus_dmadesc);
 1226 }
 1227 
 1228 struct bus_dmadesc *
 1229 ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
 1230     bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
 1231     void *callback_arg, uint32_t flags)
 1232 {
 1233         struct ioat_dma_hw_descriptor *hw_desc;
 1234         struct ioat_descriptor *desc;
 1235         struct ioat_softc *ioat;
 1236 
 1237         ioat = to_ioat_softc(dmaengine);
 1238         desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
 1239             callback_arg, flags);
 1240         if (desc == NULL)
 1241                 return (NULL);
 1242 
 1243         hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
 1244         if (g_ioat_debug_level >= 3)
 1245                 dump_descriptor(hw_desc);
 1246 
 1247         ioat_submit_single(ioat);
 1248         CTR6(KTR_IOAT, "%s channel=%u desc=%p dest=%lx src=%lx len=%lx",
 1249             __func__, ioat->chan_idx, &desc->bus_dmadesc, dst, src, len);
 1250         return (&desc->bus_dmadesc);
 1251 }
 1252 
 1253 struct bus_dmadesc *
 1254 ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
 1255     bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
 1256     bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
 1257 {
 1258         struct ioat_dma_hw_descriptor *hw_desc;
 1259         struct ioat_descriptor *desc;
 1260         struct ioat_softc *ioat;
 1261         bus_size_t src1_len, dst1_len;
 1262         bus_dma_segment_t seg;
 1263         int nseg, error;
 1264 
 1265         ioat = to_ioat_softc(dmaengine);
 1266         CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
 1267 
 1268         KASSERT(((src1 | src2 | dst1 | dst2) & PAGE_MASK) == 0,
 1269             ("%s: addresses are not page-aligned", __func__));
 1270 
 1271         desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, 0, 0,
 1272             callback_fn, callback_arg, flags);
 1273         if (desc == NULL)
 1274                 return (NULL);
 1275 
 1276         hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
 1277 
 1278         src1_len = (src2 != src1 + PAGE_SIZE) ? PAGE_SIZE : 2 * PAGE_SIZE;
 1279         nseg = -1;
 1280         error = _bus_dmamap_load_phys(ioat->data_tag,
 1281             desc->src_dmamap, src1, src1_len, 0, &seg, &nseg);
 1282         if (error != 0) {
 1283                 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
 1284                     " failed %d\n", __func__, error);
 1285                 return (NULL);
 1286         }
 1287         hw_desc->src_addr = seg.ds_addr;
 1288         if (src1_len != 2 * PAGE_SIZE) {
 1289                 hw_desc->u.control.src_page_break = 1;
 1290                 nseg = -1;
 1291                 error = _bus_dmamap_load_phys(ioat->data_tag,
 1292                     desc->src2_dmamap, src2, PAGE_SIZE, 0, &seg, &nseg);
 1293                 if (error != 0) {
 1294                         ioat_log_message(0, "%s: _bus_dmamap_load_phys"
 1295                             " failed %d\n", __func__, error);
 1296                         return (NULL);
 1297                 }
 1298                 hw_desc->next_src_addr = seg.ds_addr;
 1299         }
 1300 
 1301         dst1_len = (dst2 != dst1 + PAGE_SIZE) ? PAGE_SIZE : 2 * PAGE_SIZE;
 1302         nseg = -1;
 1303         error = _bus_dmamap_load_phys(ioat->data_tag,
 1304             desc->dst_dmamap, dst1, dst1_len, 0, &seg, &nseg);
 1305         if (error != 0) {
 1306                 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
 1307                     " failed %d\n", __func__, error);
 1308                 return (NULL);
 1309         }
 1310         hw_desc->dest_addr = seg.ds_addr;
 1311         if (dst1_len != 2 * PAGE_SIZE) {
 1312                 hw_desc->u.control.dest_page_break = 1;
 1313                 nseg = -1;
 1314                 error = _bus_dmamap_load_phys(ioat->data_tag,
 1315                     desc->dst2_dmamap, dst2, PAGE_SIZE, 0, &seg, &nseg);
 1316                 if (error != 0) {
 1317                         ioat_log_message(0, "%s: _bus_dmamap_load_phys"
 1318                             " failed %d\n", __func__, error);
 1319                         return (NULL);
 1320                 }
 1321                 hw_desc->next_dest_addr = seg.ds_addr;
 1322         }
 1323 
 1324         if (g_ioat_debug_level >= 3)
 1325                 dump_descriptor(hw_desc);
 1326 
 1327         ioat_submit_single(ioat);
 1328         return (&desc->bus_dmadesc);
 1329 }
 1330 
 1331 struct bus_dmadesc *
 1332 ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src,
 1333     bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
 1334     bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
 1335 {
 1336         struct ioat_crc32_hw_descriptor *hw_desc;
 1337         struct ioat_descriptor *desc;
 1338         struct ioat_softc *ioat;
 1339         uint32_t teststore;
 1340         uint8_t op;
 1341         bus_dma_segment_t seg;
 1342         int nseg, error;
 1343 
 1344         ioat = to_ioat_softc(dmaengine);
 1345         CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
 1346 
 1347         KASSERT((ioat->capabilities & IOAT_DMACAP_MOVECRC) != 0,
 1348             ("%s: device lacks MOVECRC capability", __func__));
 1349         teststore = (flags & _DMA_CRC_TESTSTORE);
 1350         KASSERT(teststore != _DMA_CRC_TESTSTORE,
 1351             ("%s: TEST and STORE invalid", __func__));
 1352         KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0,
 1353             ("%s: INLINE invalid without TEST or STORE", __func__));
 1354 
 1355         switch (teststore) {
 1356         case DMA_CRC_STORE:
 1357                 op = IOAT_OP_MOVECRC_STORE;
 1358                 break;
 1359         case DMA_CRC_TEST:
 1360                 op = IOAT_OP_MOVECRC_TEST;
 1361                 break;
 1362         default:
 1363                 KASSERT(teststore == 0, ("bogus"));
 1364                 op = IOAT_OP_MOVECRC;
 1365                 break;
 1366         }
 1367 
 1368         desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn,
 1369             callback_arg, flags & ~_DMA_CRC_FLAGS);
 1370         if (desc == NULL)
 1371                 return (NULL);
 1372 
 1373         hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
 1374 
 1375         if ((flags & DMA_CRC_INLINE) == 0) {
 1376                 nseg = -1;
 1377                 error = _bus_dmamap_load_phys(ioat->data_tag,
 1378                     desc->dst2_dmamap, crcptr, sizeof(uint32_t), 0,
 1379                     &seg, &nseg);
 1380                 if (error != 0) {
 1381                         ioat_log_message(0, "%s: _bus_dmamap_load_phys"
 1382                             " failed %d\n", __func__, error);
 1383                         return (NULL);
 1384                 }
 1385                 hw_desc->crc_address = seg.ds_addr;
 1386         } else
 1387                 hw_desc->u.control.crc_location = 1;
 1388 
 1389         if (initialseed != NULL) {
 1390                 hw_desc->u.control.use_seed = 1;
 1391                 hw_desc->seed = *initialseed;
 1392         }
 1393 
 1394         if (g_ioat_debug_level >= 3)
 1395                 dump_descriptor(hw_desc);
 1396 
 1397         ioat_submit_single(ioat);
 1398         return (&desc->bus_dmadesc);
 1399 }
 1400 
 1401 struct bus_dmadesc *
 1402 ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len,
 1403     uint32_t *initialseed, bus_addr_t crcptr,
 1404     bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
 1405 {
 1406         struct ioat_crc32_hw_descriptor *hw_desc;
 1407         struct ioat_descriptor *desc;
 1408         struct ioat_softc *ioat;
 1409         uint32_t teststore;
 1410         uint8_t op;
 1411         bus_dma_segment_t seg;
 1412         int nseg, error;
 1413 
 1414         ioat = to_ioat_softc(dmaengine);
 1415         CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
 1416 
 1417         KASSERT((ioat->capabilities & IOAT_DMACAP_CRC) != 0,
 1418             ("%s: device lacks CRC capability", __func__));
 1419         teststore = (flags & _DMA_CRC_TESTSTORE);
 1420         KASSERT(teststore != _DMA_CRC_TESTSTORE,
 1421             ("%s: TEST and STORE invalid", __func__));
 1422         KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0,
 1423             ("%s: INLINE invalid without TEST or STORE", __func__));
 1424 
 1425         switch (teststore) {
 1426         case DMA_CRC_STORE:
 1427                 op = IOAT_OP_CRC_STORE;
 1428                 break;
 1429         case DMA_CRC_TEST:
 1430                 op = IOAT_OP_CRC_TEST;
 1431                 break;
 1432         default:
 1433                 KASSERT(teststore == 0, ("bogus"));
 1434                 op = IOAT_OP_CRC;
 1435                 break;
 1436         }
 1437 
 1438         desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn,
 1439             callback_arg, flags & ~_DMA_CRC_FLAGS);
 1440         if (desc == NULL)
 1441                 return (NULL);
 1442 
 1443         hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
 1444 
 1445         if ((flags & DMA_CRC_INLINE) == 0) {
 1446                 nseg = -1;
 1447                 error = _bus_dmamap_load_phys(ioat->data_tag,
 1448                     desc->dst2_dmamap, crcptr, sizeof(uint32_t), 0,
 1449                     &seg, &nseg);
 1450                 if (error != 0) {
 1451                         ioat_log_message(0, "%s: _bus_dmamap_load_phys"
 1452                             " failed %d\n", __func__, error);
 1453                         return (NULL);
 1454                 }
 1455                 hw_desc->crc_address = seg.ds_addr;
 1456         } else
 1457                 hw_desc->u.control.crc_location = 1;
 1458 
 1459         if (initialseed != NULL) {
 1460                 hw_desc->u.control.use_seed = 1;
 1461                 hw_desc->seed = *initialseed;
 1462         }
 1463 
 1464         if (g_ioat_debug_level >= 3)
 1465                 dump_descriptor(hw_desc);
 1466 
 1467         ioat_submit_single(ioat);
 1468         return (&desc->bus_dmadesc);
 1469 }
 1470 
 1471 struct bus_dmadesc *
 1472 ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
 1473     bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg,
 1474     uint32_t flags)
 1475 {
 1476         struct ioat_fill_hw_descriptor *hw_desc;
 1477         struct ioat_descriptor *desc;
 1478         struct ioat_softc *ioat;
 1479 
 1480         ioat = to_ioat_softc(dmaengine);
 1481         CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
 1482 
 1483         KASSERT((ioat->capabilities & IOAT_DMACAP_BFILL) != 0,
 1484             ("%s: device lacks BFILL capability", __func__));
 1485 
 1486         desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, 0, dst,
 1487             callback_fn, callback_arg, flags);
 1488         if (desc == NULL)
 1489                 return (NULL);
 1490 
 1491         hw_desc = &ioat_get_descriptor(ioat, desc->id)->fill;
 1492         hw_desc->src_data = fillpattern;
 1493         if (g_ioat_debug_level >= 3)
 1494                 dump_descriptor(hw_desc);
 1495 
 1496         ioat_submit_single(ioat);
 1497         return (&desc->bus_dmadesc);
 1498 }
 1499 
 1500 /*
 1501  * Ring Management
 1502  */
 1503 static inline uint32_t
 1504 ioat_get_active(struct ioat_softc *ioat)
 1505 {
 1506 
 1507         return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
 1508 }
 1509 
 1510 static inline uint32_t
 1511 ioat_get_ring_space(struct ioat_softc *ioat)
 1512 {
 1513 
 1514         return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
 1515 }
 1516 
 1517 /*
 1518  * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
 1519  * for 'num_descs'.
 1520  *
 1521  * If mflags contains M_WAITOK, blocks until enough space is available.
 1522  *
 1523  * Returns zero on success, or an errno on error.  If num_descs is beyond the
 1524  * maximum ring size, returns EINVAl; if allocation would block and mflags
 1525  * contains M_NOWAIT, returns EAGAIN.
 1526  *
 1527  * Must be called with the submit_lock held; returns with the lock held.  The
 1528  * lock may be dropped to allocate the ring.
 1529  *
 1530  * (The submit_lock is needed to add any entries to the ring, so callers are
 1531  * assured enough room is available.)
 1532  */
 1533 static int
 1534 ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
 1535 {
 1536         boolean_t dug;
 1537         int error;
 1538 
 1539         mtx_assert(&ioat->submit_lock, MA_OWNED);
 1540         error = 0;
 1541         dug = FALSE;
 1542 
 1543         if (num_descs < 1 || num_descs >= (1 << ioat->ring_size_order)) {
 1544                 error = EINVAL;
 1545                 goto out;
 1546         }
 1547 
 1548         for (;;) {
 1549                 if (ioat->quiescing) {
 1550                         error = ENXIO;
 1551                         goto out;
 1552                 }
 1553 
 1554                 if (ioat_get_ring_space(ioat) >= num_descs)
 1555                         goto out;
 1556 
 1557                 CTR3(KTR_IOAT, "%s channel=%u starved (%u)", __func__,
 1558                     ioat->chan_idx, num_descs);
 1559 
 1560                 if (!dug && !ioat->is_submitter_processing) {
 1561                         ioat->is_submitter_processing = TRUE;
 1562                         mtx_unlock(&ioat->submit_lock);
 1563 
 1564                         CTR2(KTR_IOAT, "%s channel=%u attempting to process events",
 1565                             __func__, ioat->chan_idx);
 1566                         ioat_process_events(ioat, FALSE);
 1567 
 1568                         mtx_lock(&ioat->submit_lock);
 1569                         dug = TRUE;
 1570                         KASSERT(ioat->is_submitter_processing == TRUE,
 1571                             ("is_submitter_processing"));
 1572                         ioat->is_submitter_processing = FALSE;
 1573                         wakeup(&ioat->tail);
 1574                         continue;
 1575                 }
 1576 
 1577                 if ((mflags & M_WAITOK) == 0) {
 1578                         error = EAGAIN;
 1579                         break;
 1580                 }
 1581                 CTR2(KTR_IOAT, "%s channel=%u blocking on completions",
 1582                     __func__, ioat->chan_idx);
 1583                 msleep(&ioat->tail, &ioat->submit_lock, 0,
 1584                     "ioat_full", 0);
 1585                 continue;
 1586         }
 1587 
 1588 out:
 1589         mtx_assert(&ioat->submit_lock, MA_OWNED);
 1590         KASSERT(!ioat->quiescing || error == ENXIO,
 1591             ("reserved during quiesce"));
 1592         return (error);
 1593 }
 1594 
 1595 static void
 1596 ioat_free_ring(struct ioat_softc *ioat, uint32_t size,
 1597     struct ioat_descriptor *ring)
 1598 {
 1599 
 1600         free(ring, M_IOAT);
 1601 }
 1602 
 1603 static struct ioat_descriptor *
 1604 ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
 1605 {
 1606 
 1607         return (&ioat->ring[index % (1 << ioat->ring_size_order)]);
 1608 }
 1609 
 1610 static union ioat_hw_descriptor *
 1611 ioat_get_descriptor(struct ioat_softc *ioat, uint32_t index)
 1612 {
 1613 
 1614         return (&ioat->hw_desc_ring[index % (1 << ioat->ring_size_order)]);
 1615 }
 1616 
 1617 static void
 1618 ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
 1619 {
 1620         union ioat_hw_descriptor *desc;
 1621 
 1622         ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr,
 1623             IOAT_CHANERR_STR);
 1624         if (chanerr == 0)
 1625                 return;
 1626 
 1627         mtx_assert(&ioat->cleanup_lock, MA_OWNED);
 1628 
 1629         desc = ioat_get_descriptor(ioat, ioat->tail + 0);
 1630         dump_descriptor(desc);
 1631 
 1632         desc = ioat_get_descriptor(ioat, ioat->tail + 1);
 1633         dump_descriptor(desc);
 1634 }
 1635 
 1636 static void
 1637 ioat_poll_timer_callback(void *arg)
 1638 {
 1639         struct ioat_softc *ioat;
 1640 
 1641         ioat = arg;
 1642         CTR1(KTR_IOAT, "%s", __func__);
 1643 
 1644         ioat_process_events(ioat, FALSE);
 1645 
 1646         mtx_lock(&ioat->submit_lock);
 1647         if (ioat_get_active(ioat) > 0)
 1648                 callout_schedule(&ioat->poll_timer, 1);
 1649         mtx_unlock(&ioat->submit_lock);
 1650 }
 1651 
 1652 /*
 1653  * Support Functions
 1654  */
 1655 static void
 1656 ioat_submit_single(struct ioat_softc *ioat)
 1657 {
 1658 
 1659         mtx_assert(&ioat->submit_lock, MA_OWNED);
 1660 
 1661         ioat->head++;
 1662         CTR4(KTR_IOAT, "%s channel=%u head=%u tail=%u", __func__,
 1663             ioat->chan_idx, ioat->head, ioat->tail);
 1664 
 1665         ioat->stats.descriptors_submitted++;
 1666 }
 1667 
 1668 static int
 1669 ioat_reset_hw(struct ioat_softc *ioat)
 1670 {
 1671         uint64_t status;
 1672         uint32_t chanerr;
 1673         unsigned timeout;
 1674         int error;
 1675 
 1676         CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
 1677 
 1678         mtx_lock(&ioat->submit_lock);
 1679         while (ioat->resetting && !ioat->destroying)
 1680                 msleep(&ioat->resetting, &ioat->submit_lock, 0, "IRH_drain", 0);
 1681         if (ioat->destroying) {
 1682                 mtx_unlock(&ioat->submit_lock);
 1683                 return (ENXIO);
 1684         }
 1685         ioat->resetting = TRUE;
 1686         ioat->quiescing = TRUE;
 1687         mtx_unlock(&ioat->submit_lock);
 1688         mtx_lock(&ioat->cleanup_lock);
 1689         while (ioat_get_active(ioat) > 0)
 1690                 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1);
 1691 
 1692         /*
 1693          * Suspend ioat_process_events while the hardware and softc are in an
 1694          * indeterminate state.
 1695          */
 1696         ioat->resetting_cleanup = TRUE;
 1697         mtx_unlock(&ioat->cleanup_lock);
 1698 
 1699         CTR2(KTR_IOAT, "%s channel=%u quiesced and drained", __func__,
 1700             ioat->chan_idx);
 1701 
 1702         status = ioat_get_chansts(ioat);
 1703         if (is_ioat_active(status) || is_ioat_idle(status))
 1704                 ioat_suspend(ioat);
 1705 
 1706         /* Wait at most 20 ms */
 1707         for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
 1708             timeout < 20; timeout++) {
 1709                 DELAY(1000);
 1710                 status = ioat_get_chansts(ioat);
 1711         }
 1712         if (timeout == 20) {
 1713                 error = ETIMEDOUT;
 1714                 goto out;
 1715         }
 1716 
 1717         KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));
 1718 
 1719         chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
 1720         ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
 1721 
 1722         CTR2(KTR_IOAT, "%s channel=%u hardware suspended", __func__,
 1723             ioat->chan_idx);
 1724 
 1725         /*
 1726          * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
 1727          *  that can cause stability issues for IOAT v3.
 1728          */
 1729         pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
 1730             4);
 1731         chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
 1732         pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
 1733 
 1734         /*
 1735          * BDXDE and BWD models reset MSI-X registers on device reset.
 1736          * Save/restore their contents manually.
 1737          */
 1738         if (ioat_model_resets_msix(ioat)) {
 1739                 ioat_log_message(1, "device resets MSI-X registers; saving\n");
 1740                 pci_save_state(ioat->device);
 1741         }
 1742 
 1743         ioat_reset(ioat);
 1744         CTR2(KTR_IOAT, "%s channel=%u hardware reset", __func__,
 1745             ioat->chan_idx);
 1746 
 1747         /* Wait at most 20 ms */
 1748         for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
 1749                 DELAY(1000);
 1750         if (timeout == 20) {
 1751                 error = ETIMEDOUT;
 1752                 goto out;
 1753         }
 1754 
 1755         if (ioat_model_resets_msix(ioat)) {
 1756                 ioat_log_message(1, "device resets registers; restored\n");
 1757                 pci_restore_state(ioat->device);
 1758         }
 1759 
 1760         /* Reset attempts to return the hardware to "halted." */
 1761         status = ioat_get_chansts(ioat);
 1762         if (is_ioat_active(status) || is_ioat_idle(status)) {
 1763                 /* So this really shouldn't happen... */
 1764                 ioat_log_message(0, "Device is active after a reset?\n");
 1765                 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
 1766                 error = 0;
 1767                 goto out;
 1768         }
 1769 
 1770         chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
 1771         if (chanerr != 0) {
 1772                 mtx_lock(&ioat->cleanup_lock);
 1773                 ioat_halted_debug(ioat, chanerr);
 1774                 mtx_unlock(&ioat->cleanup_lock);
 1775                 error = EIO;
 1776                 goto out;
 1777         }
 1778 
 1779         /*
 1780          * Bring device back online after reset.  Writing CHAINADDR brings the
 1781          * device back to active.
 1782          *
 1783          * The internal ring counter resets to zero, so we have to start over
 1784          * at zero as well.
 1785          */
 1786         ioat->tail = ioat->head = 0;
 1787         *ioat->comp_update = ioat->last_seen =
 1788             RING_PHYS_ADDR(ioat, ioat->tail - 1);
 1789 
 1790         ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
 1791         ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
 1792         ioat_write_chainaddr(ioat, RING_PHYS_ADDR(ioat, 0));
 1793         error = 0;
 1794         CTR2(KTR_IOAT, "%s channel=%u configured channel", __func__,
 1795             ioat->chan_idx);
 1796 
 1797 out:
 1798         /* Enqueues a null operation and ensures it completes. */
 1799         if (error == 0) {
 1800                 error = ioat_start_channel(ioat);
 1801                 CTR2(KTR_IOAT, "%s channel=%u started channel", __func__,
 1802                     ioat->chan_idx);
 1803         }
 1804 
 1805         /*
 1806          * Resume completions now that ring state is consistent.
 1807          */
 1808         mtx_lock(&ioat->cleanup_lock);
 1809         ioat->resetting_cleanup = FALSE;
 1810         mtx_unlock(&ioat->cleanup_lock);
 1811 
 1812         /* Unblock submission of new work */
 1813         mtx_lock(&ioat->submit_lock);
 1814         ioat->quiescing = FALSE;
 1815         wakeup(&ioat->quiescing);
 1816 
 1817         ioat->resetting = FALSE;
 1818         wakeup(&ioat->resetting);
 1819 
 1820         CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx);
 1821         mtx_unlock(&ioat->submit_lock);
 1822 
 1823         return (error);
 1824 }
 1825 
 1826 static int
 1827 sysctl_handle_chansts(SYSCTL_HANDLER_ARGS)
 1828 {
 1829         struct ioat_softc *ioat;
 1830         struct sbuf sb;
 1831         uint64_t status;
 1832         int error;
 1833 
 1834         ioat = arg1;
 1835 
 1836         status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
 1837 
 1838         sbuf_new_for_sysctl(&sb, NULL, 256, req);
 1839         switch (status) {
 1840         case IOAT_CHANSTS_ACTIVE:
 1841                 sbuf_printf(&sb, "ACTIVE");
 1842                 break;
 1843         case IOAT_CHANSTS_IDLE:
 1844                 sbuf_printf(&sb, "IDLE");
 1845                 break;
 1846         case IOAT_CHANSTS_SUSPENDED:
 1847                 sbuf_printf(&sb, "SUSPENDED");
 1848                 break;
 1849         case IOAT_CHANSTS_HALTED:
 1850                 sbuf_printf(&sb, "HALTED");
 1851                 break;
 1852         case IOAT_CHANSTS_ARMED:
 1853                 sbuf_printf(&sb, "ARMED");
 1854                 break;
 1855         default:
 1856                 sbuf_printf(&sb, "UNKNOWN");
 1857                 break;
 1858         }
 1859         error = sbuf_finish(&sb);
 1860         sbuf_delete(&sb);
 1861 
 1862         if (error != 0 || req->newptr == NULL)
 1863                 return (error);
 1864         return (EINVAL);
 1865 }
 1866 
 1867 static int
 1868 sysctl_handle_dpi(SYSCTL_HANDLER_ARGS)
 1869 {
 1870         struct ioat_softc *ioat;
 1871         struct sbuf sb;
 1872 #define PRECISION       "1"
 1873         const uintmax_t factor = 10;
 1874         uintmax_t rate;
 1875         int error;
 1876 
 1877         ioat = arg1;
 1878         sbuf_new_for_sysctl(&sb, NULL, 16, req);
 1879 
 1880         if (ioat->stats.interrupts == 0) {
 1881                 sbuf_printf(&sb, "NaN");
 1882                 goto out;
 1883         }
 1884         rate = ioat->stats.descriptors_processed * factor /
 1885             ioat->stats.interrupts;
 1886         sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor,
 1887             rate % factor);
 1888 #undef  PRECISION
 1889 out:
 1890         error = sbuf_finish(&sb);
 1891         sbuf_delete(&sb);
 1892         if (error != 0 || req->newptr == NULL)
 1893                 return (error);
 1894         return (EINVAL);
 1895 }
 1896 
 1897 static int
 1898 sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
 1899 {
 1900         struct ioat_softc *ioat;
 1901         int error, arg;
 1902 
 1903         ioat = arg1;
 1904 
 1905         arg = 0;
 1906         error = SYSCTL_OUT(req, &arg, sizeof(arg));
 1907         if (error != 0 || req->newptr == NULL)
 1908                 return (error);
 1909 
 1910         error = SYSCTL_IN(req, &arg, sizeof(arg));
 1911         if (error != 0)
 1912                 return (error);
 1913 
 1914         if (arg != 0)
 1915                 error = ioat_reset_hw(ioat);
 1916 
 1917         return (error);
 1918 }
 1919 
 1920 static void
 1921 dump_descriptor(void *hw_desc)
 1922 {
 1923         int i, j;
 1924 
 1925         for (i = 0; i < 2; i++) {
 1926                 for (j = 0; j < 8; j++)
 1927                         printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
 1928                 printf("\n");
 1929         }
 1930 }
 1931 
 1932 static void
 1933 ioat_setup_sysctl(device_t device)
 1934 {
 1935         struct sysctl_oid_list *par, *statpar, *state, *hammer;
 1936         struct sysctl_ctx_list *ctx;
 1937         struct sysctl_oid *tree, *tmp;
 1938         struct ioat_softc *ioat;
 1939 
 1940         ioat = DEVICE2SOFTC(device);
 1941         ctx = device_get_sysctl_ctx(device);
 1942         tree = device_get_sysctl_tree(device);
 1943         par = SYSCTL_CHILDREN(tree);
 1944 
 1945         SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD,
 1946             &ioat->version, 0, "HW version (0xMM form)");
 1947         SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD,
 1948             &ioat->max_xfer_size, 0, "HW maximum transfer size");
 1949         SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD,
 1950             &ioat->intrdelay_supported, 0, "Is INTRDELAY supported");
 1951         SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD,
 1952             &ioat->intrdelay_max, 0,
 1953             "Maximum configurable INTRDELAY on this channel (microseconds)");
 1954 
 1955         tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state",
 1956             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IOAT channel internal state");
 1957         state = SYSCTL_CHILDREN(tmp);
 1958 
 1959         SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD,
 1960             &ioat->ring_size_order, 0, "SW descriptor ring size order");
 1961         SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head,
 1962             0, "SW descriptor head pointer index");
 1963         SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail,
 1964             0, "SW descriptor tail pointer index");
 1965 
 1966         SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD,
 1967             ioat->comp_update, "HW addr of last completion");
 1968 
 1969         SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_submitter_processing",
 1970             CTLFLAG_RD, &ioat->is_submitter_processing, 0,
 1971             "submitter processing");
 1972 
 1973         SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts",
 1974             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ioat, 0,
 1975             sysctl_handle_chansts, "A", "String of the channel status");
 1976 
 1977         SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD,
 1978             &ioat->cached_intrdelay, 0,
 1979             "Current INTRDELAY on this channel (cached, microseconds)");
 1980 
 1981         tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer",
 1982             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
 1983             "Big hammers (mostly for testing)");
 1984         hammer = SYSCTL_CHILDREN(tmp);
 1985 
 1986         SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset",
 1987             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, ioat, 0,
 1988             sysctl_handle_reset, "I", "Set to non-zero to reset the hardware");
 1989 
 1990         tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats",
 1991             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IOAT channel statistics");
 1992         statpar = SYSCTL_CHILDREN(tmp);
 1993 
 1994         SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts",
 1995             CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.interrupts,
 1996             "Number of interrupts processed on this channel");
 1997         SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors",
 1998             CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.descriptors_processed,
 1999             "Number of descriptors processed on this channel");
 2000         SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted",
 2001             CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.descriptors_submitted,
 2002             "Number of descriptors submitted to this channel");
 2003         SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored",
 2004             CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.descriptors_error,
 2005             "Number of descriptors failed by channel errors");
 2006         SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts",
 2007             CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.channel_halts, 0,
 2008             "Number of times the channel has halted");
 2009         SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr",
 2010             CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.last_halt_chanerr, 0,
 2011             "The raw CHANERR when the channel was last halted");
 2012 
 2013         SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt",
 2014             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ioat, 0,
 2015             sysctl_handle_dpi, "A", "Descriptors per interrupt");
 2016 }
 2017 
 2018 static void
 2019 ioat_get(struct ioat_softc *ioat)
 2020 {
 2021 
 2022         mtx_assert(&ioat->submit_lock, MA_OWNED);
 2023         KASSERT(ioat->refcnt < UINT32_MAX, ("refcnt overflow"));
 2024 
 2025         ioat->refcnt++;
 2026 }
 2027 
 2028 static void
 2029 ioat_put(struct ioat_softc *ioat)
 2030 {
 2031 
 2032         mtx_assert(&ioat->submit_lock, MA_OWNED);
 2033         KASSERT(ioat->refcnt >= 1, ("refcnt error"));
 2034 
 2035         if (--ioat->refcnt == 0)
 2036                 wakeup(&ioat->refcnt);
 2037 }
 2038 
 2039 static void
 2040 ioat_drain_locked(struct ioat_softc *ioat)
 2041 {
 2042 
 2043         mtx_assert(&ioat->submit_lock, MA_OWNED);
 2044 
 2045         while (ioat->refcnt > 0)
 2046                 msleep(&ioat->refcnt, &ioat->submit_lock, 0, "ioat_drain", 0);
 2047 }
 2048 
 2049 #ifdef DDB
 2050 #define _db_show_lock(lo)       LOCK_CLASS(lo)->lc_ddb_show(lo)
 2051 #define db_show_lock(lk)        _db_show_lock(&(lk)->lock_object)
 2052 DB_SHOW_COMMAND(ioat, db_show_ioat)
 2053 {
 2054         struct ioat_softc *sc;
 2055         unsigned idx;
 2056 
 2057         if (!have_addr)
 2058                 goto usage;
 2059         idx = (unsigned)addr;
 2060         if (idx >= ioat_channel_index)
 2061                 goto usage;
 2062 
 2063         sc = ioat_channel[idx];
 2064         db_printf("ioat softc at %p\n", sc);
 2065         if (sc == NULL)
 2066                 return;
 2067 
 2068         db_printf(" version: %d\n", sc->version);
 2069         db_printf(" chan_idx: %u\n", sc->chan_idx);
 2070         db_printf(" submit_lock: ");
 2071         db_show_lock(&sc->submit_lock);
 2072 
 2073         db_printf(" capabilities: %b\n", (int)sc->capabilities,
 2074             IOAT_DMACAP_STR);
 2075         db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay);
 2076         db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update);
 2077 
 2078         db_printf(" poll_timer:\n");
 2079         db_printf("  c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time);
 2080         db_printf("  c_arg: %p\n", sc->poll_timer.c_arg);
 2081         db_printf("  c_func: %p\n", sc->poll_timer.c_func);
 2082         db_printf("  c_lock: %p\n", sc->poll_timer.c_lock);
 2083         db_printf("  c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags);
 2084 
 2085         db_printf(" quiescing: %d\n", (int)sc->quiescing);
 2086         db_printf(" destroying: %d\n", (int)sc->destroying);
 2087         db_printf(" is_submitter_processing: %d\n",
 2088             (int)sc->is_submitter_processing);
 2089         db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported);
 2090         db_printf(" resetting: %d\n", (int)sc->resetting);
 2091 
 2092         db_printf(" head: %u\n", sc->head);
 2093         db_printf(" tail: %u\n", sc->tail);
 2094         db_printf(" ring_size_order: %u\n", sc->ring_size_order);
 2095         db_printf(" last_seen: 0x%lx\n", sc->last_seen);
 2096         db_printf(" ring: %p\n", sc->ring);
 2097         db_printf(" descriptors: %p\n", sc->hw_desc_ring);
 2098         db_printf(" descriptors (phys): 0x%jx\n",
 2099             (uintmax_t)sc->hw_desc_bus_addr);
 2100 
 2101         db_printf("  ring[%u] (tail):\n", sc->tail %
 2102             (1 << sc->ring_size_order));
 2103         db_printf("   id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id);
 2104         db_printf("   addr: 0x%lx\n",
 2105             RING_PHYS_ADDR(sc, sc->tail));
 2106         db_printf("   next: 0x%lx\n",
 2107              ioat_get_descriptor(sc, sc->tail)->generic.next);
 2108 
 2109         db_printf("  ring[%u] (head - 1):\n", (sc->head - 1) %
 2110             (1 << sc->ring_size_order));
 2111         db_printf("   id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id);
 2112         db_printf("   addr: 0x%lx\n",
 2113             RING_PHYS_ADDR(sc, sc->head - 1));
 2114         db_printf("   next: 0x%lx\n",
 2115              ioat_get_descriptor(sc, sc->head - 1)->generic.next);
 2116 
 2117         db_printf("  ring[%u] (head):\n", (sc->head) %
 2118             (1 << sc->ring_size_order));
 2119         db_printf("   id: %u\n", ioat_get_ring_entry(sc, sc->head)->id);
 2120         db_printf("   addr: 0x%lx\n",
 2121             RING_PHYS_ADDR(sc, sc->head));
 2122         db_printf("   next: 0x%lx\n",
 2123              ioat_get_descriptor(sc, sc->head)->generic.next);
 2124 
 2125         for (idx = 0; idx < (1 << sc->ring_size_order); idx++)
 2126                 if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK)
 2127                     == RING_PHYS_ADDR(sc, idx))
 2128                         db_printf("  ring[%u] == hardware tail\n", idx);
 2129 
 2130         db_printf(" cleanup_lock: ");
 2131         db_show_lock(&sc->cleanup_lock);
 2132 
 2133         db_printf(" refcnt: %u\n", sc->refcnt);
 2134         db_printf(" stats:\n");
 2135         db_printf("  interrupts: %lu\n", sc->stats.interrupts);
 2136         db_printf("  descriptors_processed: %lu\n", sc->stats.descriptors_processed);
 2137         db_printf("  descriptors_error: %lu\n", sc->stats.descriptors_error);
 2138         db_printf("  descriptors_submitted: %lu\n", sc->stats.descriptors_submitted);
 2139 
 2140         db_printf("  channel_halts: %u\n", sc->stats.channel_halts);
 2141         db_printf("  last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr);
 2142 
 2143         if (db_pager_quit)
 2144                 return;
 2145 
 2146         db_printf(" hw status:\n");
 2147         db_printf("  status: 0x%lx\n", ioat_get_chansts(sc));
 2148         db_printf("  chanctrl: 0x%x\n",
 2149             (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET));
 2150         db_printf("  chancmd: 0x%x\n",
 2151             (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET));
 2152         db_printf("  dmacount: 0x%x\n",
 2153             (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET));
 2154         db_printf("  chainaddr: 0x%lx\n",
 2155             ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW));
 2156         db_printf("  chancmp: 0x%lx\n",
 2157             ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW));
 2158         db_printf("  chanerr: %b\n",
 2159             (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR);
 2160         return;
 2161 usage:
 2162         db_printf("usage: show ioat <0-%u>\n", ioat_channel_index);
 2163         return;
 2164 }
 2165 #endif /* DDB */

Cache object: ef637d6515a99686fd52f87080a19ffd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.