The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/sec/sec.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
   19  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
   21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
   22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
   23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
   24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
   25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   26  */
   27 
   28 /*
   29  * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
   30  * 3.0 are supported.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 #include <sys/param.h>
   37 #include <sys/systm.h>
   38 #include <sys/bus.h>
   39 #include <sys/endian.h>
   40 #include <sys/kernel.h>
   41 #include <sys/lock.h>
   42 #include <sys/malloc.h>
   43 #include <sys/mbuf.h>
   44 #include <sys/module.h>
   45 #include <sys/mutex.h>
   46 #include <sys/random.h>
   47 #include <sys/rman.h>
   48 
   49 #include <machine/_inttypes.h>
   50 #include <machine/bus.h>
   51 #include <machine/resource.h>
   52 
   53 #include <opencrypto/cryptodev.h>
   54 #include <opencrypto/xform_auth.h>
   55 #include "cryptodev_if.h"
   56 
   57 #include <dev/ofw/ofw_bus_subr.h>
   58 #include <dev/sec/sec.h>
   59 
   60 static int      sec_probe(device_t dev);
   61 static int      sec_attach(device_t dev);
   62 static int      sec_detach(device_t dev);
   63 static int      sec_suspend(device_t dev);
   64 static int      sec_resume(device_t dev);
   65 static int      sec_shutdown(device_t dev);
   66 static void     sec_primary_intr(void *arg);
   67 static void     sec_secondary_intr(void *arg);
   68 static int      sec_setup_intr(struct sec_softc *sc, struct resource **ires,
   69     void **ihand, int *irid, driver_intr_t handler, const char *iname);
   70 static void     sec_release_intr(struct sec_softc *sc, struct resource *ires,
   71     void *ihand, int irid, const char *iname);
   72 static int      sec_controller_reset(struct sec_softc *sc);
   73 static int      sec_channel_reset(struct sec_softc *sc, int channel, int full);
   74 static int      sec_init(struct sec_softc *sc);
   75 static int      sec_alloc_dma_mem(struct sec_softc *sc,
   76     struct sec_dma_mem *dma_mem, bus_size_t size);
   77 static int      sec_desc_map_dma(struct sec_softc *sc,
   78     struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size,
   79     struct sec_desc_map_info *sdmi);
   80 static void     sec_free_dma_mem(struct sec_dma_mem *dma_mem);
   81 static void     sec_enqueue(struct sec_softc *sc);
   82 static int      sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
   83     int channel);
   84 static int      sec_eu_channel(struct sec_softc *sc, int eu);
   85 static int      sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
   86     u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize);
   87 static int      sec_make_pointer_direct(struct sec_softc *sc,
   88     struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
   89 static int      sec_probesession(device_t dev,
   90     const struct crypto_session_params *csp);
   91 static int      sec_newsession(device_t dev, crypto_session_t cses,
   92     const struct crypto_session_params *csp);
   93 static int      sec_process(device_t dev, struct cryptop *crp, int hint);
   94 static int      sec_build_common_ns_desc(struct sec_softc *sc,
   95     struct sec_desc *desc, const struct crypto_session_params *csp,
   96     struct cryptop *crp);
   97 static int      sec_build_common_s_desc(struct sec_softc *sc,
   98     struct sec_desc *desc, const struct crypto_session_params *csp,
   99     struct cryptop *crp);
  100 
  101 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
  102 
  103 /* AESU */
  104 static bool     sec_aesu_newsession(const struct crypto_session_params *csp);
  105 static int      sec_aesu_make_desc(struct sec_softc *sc,
  106     const struct crypto_session_params *csp, struct sec_desc *desc,
  107     struct cryptop *crp);
  108 
  109 /* MDEU */
  110 static bool     sec_mdeu_can_handle(u_int alg);
  111 static int      sec_mdeu_config(const struct crypto_session_params *csp,
  112     u_int *eu, u_int *mode, u_int *hashlen);
  113 static bool     sec_mdeu_newsession(const struct crypto_session_params *csp);
  114 static int      sec_mdeu_make_desc(struct sec_softc *sc,
  115     const struct crypto_session_params *csp, struct sec_desc *desc,
  116     struct cryptop *crp);
  117 
  118 static device_method_t sec_methods[] = {
  119         /* Device interface */
  120         DEVMETHOD(device_probe,         sec_probe),
  121         DEVMETHOD(device_attach,        sec_attach),
  122         DEVMETHOD(device_detach,        sec_detach),
  123 
  124         DEVMETHOD(device_suspend,       sec_suspend),
  125         DEVMETHOD(device_resume,        sec_resume),
  126         DEVMETHOD(device_shutdown,      sec_shutdown),
  127 
  128         /* Crypto methods */
  129         DEVMETHOD(cryptodev_probesession, sec_probesession),
  130         DEVMETHOD(cryptodev_newsession, sec_newsession),
  131         DEVMETHOD(cryptodev_process,    sec_process),
  132 
  133         DEVMETHOD_END
  134 };
  135 static driver_t sec_driver = {
  136         "sec",
  137         sec_methods,
  138         sizeof(struct sec_softc),
  139 };
  140 
  141 DRIVER_MODULE(sec, simplebus, sec_driver, 0, 0);
  142 MODULE_DEPEND(sec, crypto, 1, 1, 1);
  143 
  144 static struct sec_eu_methods sec_eus[] = {
  145         {
  146                 sec_aesu_newsession,
  147                 sec_aesu_make_desc,
  148         },
  149         {
  150                 sec_mdeu_newsession,
  151                 sec_mdeu_make_desc,
  152         },
  153         { NULL, NULL }
  154 };
  155 
  156 static inline void
  157 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
  158 {
  159 
  160         /* Sync only if dma memory is valid */
  161         if (dma_mem->dma_vaddr != NULL)
  162                 bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
  163 }
  164 
  165 static inline void *
  166 sec_get_pointer_data(struct sec_desc *desc, u_int n)
  167 {
  168 
  169         return (desc->sd_ptr_dmem[n].dma_vaddr);
  170 }
  171 
  172 static int
  173 sec_probe(device_t dev)
  174 {
  175         struct sec_softc *sc;
  176         uint64_t id;
  177 
  178         if (!ofw_bus_status_okay(dev))
  179                 return (ENXIO);
  180 
  181         if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
  182                 return (ENXIO);
  183 
  184         sc = device_get_softc(dev);
  185 
  186         sc->sc_rrid = 0;
  187         sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
  188             RF_ACTIVE);
  189 
  190         if (sc->sc_rres == NULL)
  191                 return (ENXIO);
  192 
  193         sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
  194         sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
  195 
  196         id = SEC_READ(sc, SEC_ID);
  197 
  198         bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
  199 
  200         switch (id) {
  201         case SEC_20_ID:
  202                 device_set_desc(dev, "Freescale Security Engine 2.0");
  203                 sc->sc_version = 2;
  204                 break;
  205         case SEC_30_ID:
  206                 device_set_desc(dev, "Freescale Security Engine 3.0");
  207                 sc->sc_version = 3;
  208                 break;
  209         case SEC_31_ID:
  210                 device_set_desc(dev, "Freescale Security Engine 3.1");
  211                 sc->sc_version = 3;
  212                 break;
  213         default:
  214                 device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id);
  215                 return (ENXIO);
  216         }
  217 
  218         return (0);
  219 }
  220 
  221 static int
  222 sec_attach(device_t dev)
  223 {
  224         struct sec_softc *sc;
  225         struct sec_hw_lt *lt;
  226         int error = 0;
  227         int i;
  228 
  229         sc = device_get_softc(dev);
  230         sc->sc_dev = dev;
  231         sc->sc_blocked = 0;
  232         sc->sc_shutdown = 0;
  233 
  234         sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session),
  235             CRYPTOCAP_F_HARDWARE);
  236         if (sc->sc_cid < 0) {
  237                 device_printf(dev, "could not get crypto driver ID!\n");
  238                 return (ENXIO);
  239         }
  240 
  241         /* Init locks */
  242         mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
  243             "SEC Controller lock", MTX_DEF);
  244         mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
  245             "SEC Descriptors lock", MTX_DEF);
  246 
  247         /* Allocate I/O memory for SEC registers */
  248         sc->sc_rrid = 0;
  249         sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
  250             RF_ACTIVE);
  251 
  252         if (sc->sc_rres == NULL) {
  253                 device_printf(dev, "could not allocate I/O memory!\n");
  254                 goto fail1;
  255         }
  256 
  257         sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
  258         sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
  259 
  260         /* Setup interrupts */
  261         sc->sc_pri_irid = 0;
  262         error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
  263             &sc->sc_pri_irid, sec_primary_intr, "primary");
  264 
  265         if (error)
  266                 goto fail2;
  267 
  268         if (sc->sc_version == 3) {
  269                 sc->sc_sec_irid = 1;
  270                 error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
  271                     &sc->sc_sec_irid, sec_secondary_intr, "secondary");
  272 
  273                 if (error)
  274                         goto fail3;
  275         }
  276 
  277         /* Alloc DMA memory for descriptors and link tables */
  278         error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
  279             SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
  280 
  281         if (error)
  282                 goto fail4;
  283 
  284         error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
  285             (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
  286 
  287         if (error)
  288                 goto fail5;
  289 
  290         /* Fill in descriptors and link tables */
  291         for (i = 0; i < SEC_DESCRIPTORS; i++) {
  292                 sc->sc_desc[i].sd_desc =
  293                     (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
  294                 sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
  295                     (i * sizeof(struct sec_hw_desc));
  296         }
  297 
  298         for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
  299                 sc->sc_lt[i].sl_lt =
  300                     (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
  301                 sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
  302                     (i * sizeof(struct sec_hw_lt));
  303         }
  304 
  305         /* Last entry in link table is used to create a circle */
  306         lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
  307         lt->shl_length = 0;
  308         lt->shl_r = 0;
  309         lt->shl_n = 1;
  310         lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
  311 
  312         /* Init descriptor and link table queues pointers */
  313         SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
  314         SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
  315         SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
  316         SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
  317         SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
  318         SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
  319         SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
  320         SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
  321 
  322         /* Create masks for fast checks */
  323         sc->sc_int_error_mask = 0;
  324         for (i = 0; i < SEC_CHANNELS; i++)
  325                 sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
  326 
  327         switch (sc->sc_version) {
  328         case 2:
  329                 sc->sc_channel_idle_mask =
  330                     (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
  331                     (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
  332                     (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
  333                     (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
  334                 break;
  335         case 3:
  336                 sc->sc_channel_idle_mask =
  337                     (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
  338                     (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
  339                     (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
  340                     (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
  341                 break;
  342         }
  343 
  344         /* Init hardware */
  345         error = sec_init(sc);
  346 
  347         if (error)
  348                 goto fail6;
  349 
  350         return (0);
  351 
  352 fail6:
  353         sec_free_dma_mem(&(sc->sc_lt_dmem));
  354 fail5:
  355         sec_free_dma_mem(&(sc->sc_desc_dmem));
  356 fail4:
  357         sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
  358             sc->sc_sec_irid, "secondary");
  359 fail3:
  360         sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
  361             sc->sc_pri_irid, "primary");
  362 fail2:
  363         bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
  364 fail1:
  365         mtx_destroy(&sc->sc_controller_lock);
  366         mtx_destroy(&sc->sc_descriptors_lock);
  367 
  368         return (ENXIO);
  369 }
  370 
  371 static int
  372 sec_detach(device_t dev)
  373 {
  374         struct sec_softc *sc = device_get_softc(dev);
  375         int i, error, timeout = SEC_TIMEOUT;
  376 
  377         /* Prepare driver to shutdown */
  378         SEC_LOCK(sc, descriptors);
  379         sc->sc_shutdown = 1;
  380         SEC_UNLOCK(sc, descriptors);
  381 
  382         /* Wait until all queued processing finishes */
  383         while (1) {
  384                 SEC_LOCK(sc, descriptors);
  385                 i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
  386                 SEC_UNLOCK(sc, descriptors);
  387 
  388                 if (i == 0)
  389                         break;
  390 
  391                 if (timeout < 0) {
  392                         device_printf(dev, "queue flush timeout!\n");
  393 
  394                         /* DMA can be still active - stop it */
  395                         for (i = 0; i < SEC_CHANNELS; i++)
  396                                 sec_channel_reset(sc, i, 1);
  397 
  398                         break;
  399                 }
  400 
  401                 timeout -= 1000;
  402                 DELAY(1000);
  403         }
  404 
  405         /* Disable interrupts */
  406         SEC_WRITE(sc, SEC_IER, 0);
  407 
  408         /* Unregister from OCF */
  409         crypto_unregister_all(sc->sc_cid);
  410 
  411         /* Free DMA memory */
  412         for (i = 0; i < SEC_DESCRIPTORS; i++)
  413                 SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
  414 
  415         sec_free_dma_mem(&(sc->sc_lt_dmem));
  416         sec_free_dma_mem(&(sc->sc_desc_dmem));
  417 
  418         /* Release interrupts */
  419         sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
  420             sc->sc_pri_irid, "primary");
  421         sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
  422             sc->sc_sec_irid, "secondary");
  423 
  424         /* Release memory */
  425         if (sc->sc_rres) {
  426                 error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
  427                     sc->sc_rres);
  428                 if (error)
  429                         device_printf(dev, "bus_release_resource() failed for"
  430                             " I/O memory, error %d\n", error);
  431 
  432                 sc->sc_rres = NULL;
  433         }
  434 
  435         mtx_destroy(&sc->sc_controller_lock);
  436         mtx_destroy(&sc->sc_descriptors_lock);
  437 
  438         return (0);
  439 }
  440 
  441 static int
  442 sec_suspend(device_t dev)
  443 {
  444 
  445         return (0);
  446 }
  447 
  448 static int
  449 sec_resume(device_t dev)
  450 {
  451 
  452         return (0);
  453 }
  454 
  455 static int
  456 sec_shutdown(device_t dev)
  457 {
  458 
  459         return (0);
  460 }
  461 
  462 static int
  463 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
  464     int *irid, driver_intr_t handler, const char *iname)
  465 {
  466         int error;
  467 
  468         (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
  469             RF_ACTIVE);
  470 
  471         if ((*ires) == NULL) {
  472                 device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
  473                 return (ENXIO);
  474         }
  475 
  476         error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
  477             NULL, handler, sc, ihand);
  478 
  479         if (error) {
  480                 device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
  481                 if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
  482                         device_printf(sc->sc_dev, "could not release %s IRQ\n",
  483                             iname);
  484 
  485                 (*ires) = NULL;
  486                 return (error);
  487         }
  488 
  489         return (0);
  490 }
  491 
  492 static void
  493 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
  494     int irid, const char *iname)
  495 {
  496         int error;
  497 
  498         if (ires == NULL)
  499                 return;
  500 
  501         error = bus_teardown_intr(sc->sc_dev, ires, ihand);
  502         if (error)
  503                 device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
  504                     " IRQ, error %d\n", iname, error);
  505 
  506         error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
  507         if (error)
  508                 device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
  509                     " IRQ, error %d\n", iname, error);
  510 }
  511 
  512 static void
  513 sec_primary_intr(void *arg)
  514 {
  515         struct sec_session *ses;
  516         struct sec_softc *sc = arg;
  517         struct sec_desc *desc;
  518         struct cryptop *crp;
  519         uint64_t isr;
  520         uint8_t hash[HASH_MAX_LEN];
  521         int i, wakeup = 0;
  522 
  523         SEC_LOCK(sc, controller);
  524 
  525         /* Check for errors */
  526         isr = SEC_READ(sc, SEC_ISR);
  527         if (isr & sc->sc_int_error_mask) {
  528                 /* Check each channel for error */
  529                 for (i = 0; i < SEC_CHANNELS; i++) {
  530                         if ((isr & SEC_INT_CH_ERR(i)) == 0)
  531                                 continue;
  532 
  533                         device_printf(sc->sc_dev,
  534                             "I/O error on channel %i!\n", i);
  535 
  536                         /* Find and mark problematic descriptor */
  537                         desc = sec_find_desc(sc, SEC_READ(sc,
  538                             SEC_CHAN_CDPR(i)));
  539 
  540                         if (desc != NULL)
  541                                 desc->sd_error = EIO;
  542 
  543                         /* Do partial channel reset */
  544                         sec_channel_reset(sc, i, 0);
  545                 }
  546         }
  547 
  548         /* ACK interrupt */
  549         SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
  550 
  551         SEC_UNLOCK(sc, controller);
  552         SEC_LOCK(sc, descriptors);
  553 
  554         /* Handle processed descriptors */
  555         SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  556 
  557         while (SEC_QUEUED_DESC_CNT(sc) > 0) {
  558                 desc = SEC_GET_QUEUED_DESC(sc);
  559 
  560                 if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
  561                         SEC_PUT_BACK_QUEUED_DESC(sc);
  562                         break;
  563                 }
  564 
  565                 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
  566                     BUS_DMASYNC_PREWRITE);
  567 
  568                 crp = desc->sd_crp;
  569                 crp->crp_etype = desc->sd_error;
  570                 if (crp->crp_etype == 0) {
  571                         ses = crypto_get_driver_session(crp->crp_session);
  572                         if (ses->ss_mlen != 0) {
  573                                 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
  574                                         crypto_copydata(crp,
  575                                             crp->crp_digest_start,
  576                                             ses->ss_mlen, hash);
  577                                         if (timingsafe_bcmp(
  578                                             desc->sd_desc->shd_digest,
  579                                             hash, ses->ss_mlen) != 0)
  580                                                 crp->crp_etype = EBADMSG;
  581                                 } else
  582                                         crypto_copyback(crp,
  583                                             crp->crp_digest_start,
  584                                             ses->ss_mlen,
  585                                             desc->sd_desc->shd_digest);
  586                         }
  587                 }
  588                 crypto_done(desc->sd_crp);
  589 
  590                 SEC_DESC_FREE_POINTERS(desc);
  591                 SEC_DESC_FREE_LT(sc, desc);
  592                 SEC_DESC_QUEUED2FREE(sc);
  593         }
  594 
  595         SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  596 
  597         if (!sc->sc_shutdown) {
  598                 wakeup = sc->sc_blocked;
  599                 sc->sc_blocked = 0;
  600         }
  601 
  602         SEC_UNLOCK(sc, descriptors);
  603 
  604         /* Enqueue ready descriptors in hardware */
  605         sec_enqueue(sc);
  606 
  607         if (wakeup)
  608                 crypto_unblock(sc->sc_cid, wakeup);
  609 }
  610 
  611 static void
  612 sec_secondary_intr(void *arg)
  613 {
  614         struct sec_softc *sc = arg;
  615 
  616         device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
  617         sec_primary_intr(arg);
  618 }
  619 
  620 static int
  621 sec_controller_reset(struct sec_softc *sc)
  622 {
  623         int timeout = SEC_TIMEOUT;
  624 
  625         /* Reset Controller */
  626         SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
  627 
  628         while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
  629                 DELAY(1000);
  630                 timeout -= 1000;
  631 
  632                 if (timeout < 0) {
  633                         device_printf(sc->sc_dev, "timeout while waiting for "
  634                             "device reset!\n");
  635                         return (ETIMEDOUT);
  636                 }
  637         }
  638 
  639         return (0);
  640 }
  641 
  642 static int
  643 sec_channel_reset(struct sec_softc *sc, int channel, int full)
  644 {
  645         int timeout = SEC_TIMEOUT;
  646         uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
  647         uint64_t reg;
  648 
  649         /* Reset Channel */
  650         reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
  651         SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
  652 
  653         while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
  654                 DELAY(1000);
  655                 timeout -= 1000;
  656 
  657                 if (timeout < 0) {
  658                         device_printf(sc->sc_dev, "timeout while waiting for "
  659                             "channel reset!\n");
  660                         return (ETIMEDOUT);
  661                 }
  662         }
  663 
  664         if (full) {
  665                 reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
  666 
  667                 switch(sc->sc_version) {
  668                 case 2:
  669                         reg |= SEC_CHAN_CCR_CDWE;
  670                         break;
  671                 case 3:
  672                         reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
  673                         break;
  674                 }
  675 
  676                 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
  677         }
  678 
  679         return (0);
  680 }
  681 
  682 static int
  683 sec_init(struct sec_softc *sc)
  684 {
  685         uint64_t reg;
  686         int error, i;
  687 
  688         /* Reset controller twice to clear all pending interrupts */
  689         error = sec_controller_reset(sc);
  690         if (error)
  691                 return (error);
  692 
  693         error = sec_controller_reset(sc);
  694         if (error)
  695                 return (error);
  696 
  697         /* Reset channels */
  698         for (i = 0; i < SEC_CHANNELS; i++) {
  699                 error = sec_channel_reset(sc, i, 1);
  700                 if (error)
  701                         return (error);
  702         }
  703 
  704         /* Enable Interrupts */
  705         reg = SEC_INT_ITO;
  706         for (i = 0; i < SEC_CHANNELS; i++)
  707                 reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
  708 
  709         SEC_WRITE(sc, SEC_IER, reg);
  710 
  711         return (error);
  712 }
  713 
  714 static void
  715 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  716 {
  717         struct sec_dma_mem *dma_mem = arg;
  718 
  719         if (error)
  720                 return;
  721 
  722         KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
  723         dma_mem->dma_paddr = segs->ds_addr;
  724 }
  725 
  726 static void
  727 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
  728     int error)
  729 {
  730         struct sec_desc_map_info *sdmi = arg;
  731         struct sec_softc *sc = sdmi->sdmi_sc;
  732         struct sec_lt *lt = NULL;
  733         bus_addr_t addr;
  734         bus_size_t size;
  735         int i;
  736 
  737         SEC_LOCK_ASSERT(sc, descriptors);
  738 
  739         if (error)
  740                 return;
  741 
  742         for (i = 0; i < nseg; i++) {
  743                 addr = segs[i].ds_addr;
  744                 size = segs[i].ds_len;
  745 
  746                 /* Skip requested offset */
  747                 if (sdmi->sdmi_offset >= size) {
  748                         sdmi->sdmi_offset -= size;
  749                         continue;
  750                 }
  751 
  752                 addr += sdmi->sdmi_offset;
  753                 size -= sdmi->sdmi_offset;
  754                 sdmi->sdmi_offset = 0;
  755 
  756                 /* Do not link more than requested */
  757                 if (sdmi->sdmi_size < size)
  758                         size = sdmi->sdmi_size;
  759 
  760                 lt = SEC_ALLOC_LT_ENTRY(sc);
  761                 lt->sl_lt->shl_length = size;
  762                 lt->sl_lt->shl_r = 0;
  763                 lt->sl_lt->shl_n = 0;
  764                 lt->sl_lt->shl_ptr = addr;
  765 
  766                 if (sdmi->sdmi_lt_first == NULL)
  767                         sdmi->sdmi_lt_first = lt;
  768 
  769                 sdmi->sdmi_lt_used += 1;
  770 
  771                 if ((sdmi->sdmi_size -= size) == 0)
  772                         break;
  773         }
  774 
  775         sdmi->sdmi_lt_last = lt;
  776 }
  777 
  778 static int
  779 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
  780     bus_size_t size)
  781 {
  782         int error;
  783 
  784         if (dma_mem->dma_vaddr != NULL)
  785                 return (EBUSY);
  786 
  787         error = bus_dma_tag_create(NULL,        /* parent */
  788                 SEC_DMA_ALIGNMENT, 0,           /* alignment, boundary */
  789                 BUS_SPACE_MAXADDR_32BIT,        /* lowaddr */
  790                 BUS_SPACE_MAXADDR,              /* highaddr */
  791                 NULL, NULL,                     /* filtfunc, filtfuncarg */
  792                 size, 1,                        /* maxsize, nsegments */
  793                 size, 0,                        /* maxsegsz, flags */
  794                 NULL, NULL,                     /* lockfunc, lockfuncarg */
  795                 &(dma_mem->dma_tag));           /* dmat */
  796 
  797         if (error) {
  798                 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
  799                     " %i!\n", error);
  800                 goto err1;
  801         }
  802 
  803         error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
  804             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
  805 
  806         if (error) {
  807                 device_printf(sc->sc_dev, "failed to allocate DMA safe"
  808                     " memory, error %i!\n", error);
  809                 goto err2;
  810         }
  811 
  812         error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
  813                     dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
  814                     BUS_DMA_NOWAIT);
  815 
  816         if (error) {
  817                 device_printf(sc->sc_dev, "cannot get address of the DMA"
  818                     " memory, error %i\n", error);
  819                 goto err3;
  820         }
  821 
  822         dma_mem->dma_is_map = 0;
  823         return (0);
  824 
  825 err3:
  826         bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
  827 err2:
  828         bus_dma_tag_destroy(dma_mem->dma_tag);
  829 err1:
  830         dma_mem->dma_vaddr = NULL;
  831         return(error);
  832 }
  833 
  834 static int
  835 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
  836     struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi)
  837 {
  838         int error;
  839 
  840         if (dma_mem->dma_vaddr != NULL)
  841                 return (EBUSY);
  842 
  843         switch (crp->crp_buf.cb_type) {
  844         case CRYPTO_BUF_CONTIG:
  845                 break;
  846         case CRYPTO_BUF_UIO:
  847                 size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
  848                 break;
  849         case CRYPTO_BUF_MBUF:
  850                 size = m_length(crp->crp_buf.cb_mbuf, NULL);
  851                 break;
  852         case CRYPTO_BUF_SINGLE_MBUF:
  853                 size = crp->crp_buf.cb_mbuf->m_len;
  854                 break;
  855         case CRYPTO_BUF_VMPAGE:
  856                 size = PAGE_SIZE - crp->crp_buf.cb_vm_page_offset;
  857                 break;
  858         default:
  859                 return (EINVAL);
  860         }
  861 
  862         error = bus_dma_tag_create(NULL,        /* parent */
  863                 SEC_DMA_ALIGNMENT, 0,           /* alignment, boundary */
  864                 BUS_SPACE_MAXADDR_32BIT,        /* lowaddr */
  865                 BUS_SPACE_MAXADDR,              /* highaddr */
  866                 NULL, NULL,                     /* filtfunc, filtfuncarg */
  867                 size,                           /* maxsize */
  868                 SEC_FREE_LT_CNT(sc),            /* nsegments */
  869                 SEC_MAX_DMA_BLOCK_SIZE, 0,      /* maxsegsz, flags */
  870                 NULL, NULL,                     /* lockfunc, lockfuncarg */
  871                 &(dma_mem->dma_tag));           /* dmat */
  872 
  873         if (error) {
  874                 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
  875                     " %i!\n", error);
  876                 dma_mem->dma_vaddr = NULL;
  877                 return (error);
  878         }
  879 
  880         error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
  881 
  882         if (error) {
  883                 device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
  884                     "\n", error);
  885                 bus_dma_tag_destroy(dma_mem->dma_tag);
  886                 return (error);
  887         }
  888 
  889         error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp,
  890             sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
  891 
  892         if (error) {
  893                 device_printf(sc->sc_dev, "cannot get address of the DMA"
  894                     " memory, error %i!\n", error);
  895                 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
  896                 bus_dma_tag_destroy(dma_mem->dma_tag);
  897                 return (error);
  898         }
  899 
  900         dma_mem->dma_is_map = 1;
  901         dma_mem->dma_vaddr = crp;
  902 
  903         return (0);
  904 }
  905 
  906 static void
  907 sec_free_dma_mem(struct sec_dma_mem *dma_mem)
  908 {
  909 
  910         /* Check for double free */
  911         if (dma_mem->dma_vaddr == NULL)
  912                 return;
  913 
  914         bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
  915 
  916         if (dma_mem->dma_is_map)
  917                 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
  918         else
  919                 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
  920                     dma_mem->dma_map);
  921 
  922         bus_dma_tag_destroy(dma_mem->dma_tag);
  923         dma_mem->dma_vaddr = NULL;
  924 }
  925 
  926 static int
  927 sec_eu_channel(struct sec_softc *sc, int eu)
  928 {
  929         uint64_t reg;
  930         int channel = 0;
  931 
  932         SEC_LOCK_ASSERT(sc, controller);
  933 
  934         reg = SEC_READ(sc, SEC_EUASR);
  935 
  936         switch (eu) {
  937         case SEC_EU_AFEU:
  938                 channel = SEC_EUASR_AFEU(reg);
  939                 break;
  940         case SEC_EU_DEU:
  941                 channel = SEC_EUASR_DEU(reg);
  942                 break;
  943         case SEC_EU_MDEU_A:
  944         case SEC_EU_MDEU_B:
  945                 channel = SEC_EUASR_MDEU(reg);
  946                 break;
  947         case SEC_EU_RNGU:
  948                 channel = SEC_EUASR_RNGU(reg);
  949                 break;
  950         case SEC_EU_PKEU:
  951                 channel = SEC_EUASR_PKEU(reg);
  952                 break;
  953         case SEC_EU_AESU:
  954                 channel = SEC_EUASR_AESU(reg);
  955                 break;
  956         case SEC_EU_KEU:
  957                 channel = SEC_EUASR_KEU(reg);
  958                 break;
  959         case SEC_EU_CRCU:
  960                 channel = SEC_EUASR_CRCU(reg);
  961                 break;
  962         }
  963 
  964         return (channel - 1);
  965 }
  966 
  967 static int
  968 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
  969 {
  970         u_int fflvl = SEC_MAX_FIFO_LEVEL;
  971         uint64_t reg;
  972         int i;
  973 
  974         SEC_LOCK_ASSERT(sc, controller);
  975 
  976         /* Find free channel if have not got one */
  977         if (channel < 0) {
  978                 for (i = 0; i < SEC_CHANNELS; i++) {
  979                         reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
  980 
  981                         if ((reg & sc->sc_channel_idle_mask) == 0) {
  982                                 channel = i;
  983                                 break;
  984                         }
  985                 }
  986         }
  987 
  988         /* There is no free channel */
  989         if (channel < 0)
  990                 return (-1);
  991 
  992         /* Check FIFO level on selected channel */
  993         reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
  994 
  995         switch(sc->sc_version) {
  996         case 2:
  997                 fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
  998                 break;
  999         case 3:
 1000                 fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
 1001                 break;
 1002         }
 1003 
 1004         if (fflvl >= SEC_MAX_FIFO_LEVEL)
 1005                 return (-1);
 1006 
 1007         /* Enqueue descriptor in channel */
 1008         SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
 1009 
 1010         return (channel);
 1011 }
 1012 
 1013 static void
 1014 sec_enqueue(struct sec_softc *sc)
 1015 {
 1016         struct sec_desc *desc;
 1017         int ch0, ch1;
 1018 
 1019         SEC_LOCK(sc, descriptors);
 1020         SEC_LOCK(sc, controller);
 1021 
 1022         while (SEC_READY_DESC_CNT(sc) > 0) {
 1023                 desc = SEC_GET_READY_DESC(sc);
 1024 
 1025                 ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
 1026                 ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
 1027 
 1028                 /*
 1029                  * Both EU are used by the same channel.
 1030                  * Enqueue descriptor in channel used by busy EUs.
 1031                  */
 1032                 if (ch0 >= 0 && ch0 == ch1) {
 1033                         if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
 1034                                 SEC_DESC_READY2QUEUED(sc);
 1035                                 continue;
 1036                         }
 1037                 }
 1038 
 1039                 /*
 1040                  * Only one EU is free.
 1041                  * Enqueue descriptor in channel used by busy EU.
 1042                  */
 1043                 if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
 1044                         if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
 1045                             >= 0) {
 1046                                 SEC_DESC_READY2QUEUED(sc);
 1047                                 continue;
 1048                         }
 1049                 }
 1050 
 1051                 /*
 1052                  * Both EU are free.
 1053                  * Enqueue descriptor in first free channel.
 1054                  */
 1055                 if (ch0 < 0 && ch1 < 0) {
 1056                         if (sec_enqueue_desc(sc, desc, -1) >= 0) {
 1057                                 SEC_DESC_READY2QUEUED(sc);
 1058                                 continue;
 1059                         }
 1060                 }
 1061 
 1062                 /* Current descriptor can not be queued at the moment */
 1063                 SEC_PUT_BACK_READY_DESC(sc);
 1064                 break;
 1065         }
 1066 
 1067         SEC_UNLOCK(sc, controller);
 1068         SEC_UNLOCK(sc, descriptors);
 1069 }
 1070 
 1071 static struct sec_desc *
 1072 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
 1073 {
 1074         struct sec_desc *desc = NULL;
 1075         int i;
 1076 
 1077         SEC_LOCK_ASSERT(sc, descriptors);
 1078 
 1079         for (i = 0; i < SEC_CHANNELS; i++) {
 1080                 if (sc->sc_desc[i].sd_desc_paddr == paddr) {
 1081                         desc = &(sc->sc_desc[i]);
 1082                         break;
 1083                 }
 1084         }
 1085 
 1086         return (desc);
 1087 }
 1088 
 1089 static int
 1090 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
 1091     bus_addr_t data, bus_size_t dsize)
 1092 {
 1093         struct sec_hw_desc_ptr *ptr;
 1094 
 1095         SEC_LOCK_ASSERT(sc, descriptors);
 1096 
 1097         ptr = &(desc->sd_desc->shd_pointer[n]);
 1098         ptr->shdp_length = dsize;
 1099         ptr->shdp_extent = 0;
 1100         ptr->shdp_j = 0;
 1101         ptr->shdp_ptr = data;
 1102 
 1103         return (0);
 1104 }
 1105 
 1106 static int
 1107 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
 1108     u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize)
 1109 {
 1110         struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
 1111         struct sec_hw_desc_ptr *ptr;
 1112         int error;
 1113 
 1114         SEC_LOCK_ASSERT(sc, descriptors);
 1115 
 1116         error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize,
 1117             &sdmi);
 1118 
 1119         if (error)
 1120                 return (error);
 1121 
 1122         sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
 1123         desc->sd_lt_used += sdmi.sdmi_lt_used;
 1124 
 1125         ptr = &(desc->sd_desc->shd_pointer[n]);
 1126         ptr->shdp_length = dsize;
 1127         ptr->shdp_extent = 0;
 1128         ptr->shdp_j = 1;
 1129         ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
 1130 
 1131         return (0);
 1132 }
 1133 
 1134 static bool
 1135 sec_cipher_supported(const struct crypto_session_params *csp)
 1136 {
 1137 
 1138         switch (csp->csp_cipher_alg) {
 1139         case CRYPTO_AES_CBC:
 1140                 /* AESU */
 1141                 if (csp->csp_ivlen != AES_BLOCK_LEN)
 1142                         return (false);
 1143                 break;
 1144         default:
 1145                 return (false);
 1146         }
 1147 
 1148         if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN)
 1149                 return (false);
 1150 
 1151         return (true);
 1152 }
 1153 
 1154 static bool
 1155 sec_auth_supported(struct sec_softc *sc,
 1156     const struct crypto_session_params *csp)
 1157 {
 1158 
 1159         switch (csp->csp_auth_alg) {
 1160         case CRYPTO_SHA2_384_HMAC:
 1161         case CRYPTO_SHA2_512_HMAC:
 1162                 if (sc->sc_version < 3)
 1163                         return (false);
 1164                 /* FALLTHROUGH */
 1165         case CRYPTO_SHA1_HMAC:
 1166         case CRYPTO_SHA2_256_HMAC:
 1167                 if (csp->csp_auth_klen > SEC_MAX_KEY_LEN)
 1168                         return (false);
 1169                 break;
 1170         case CRYPTO_SHA1:
 1171                 break;
 1172         default:
 1173                 return (false);
 1174         }
 1175         return (true);
 1176 }
 1177 
 1178 static int
 1179 sec_probesession(device_t dev, const struct crypto_session_params *csp)
 1180 {
 1181         struct sec_softc *sc = device_get_softc(dev);
 1182 
 1183         if (csp->csp_flags != 0)
 1184                 return (EINVAL);
 1185         switch (csp->csp_mode) {
 1186         case CSP_MODE_DIGEST:
 1187                 if (!sec_auth_supported(sc, csp))
 1188                         return (EINVAL);
 1189                 break;
 1190         case CSP_MODE_CIPHER:
 1191                 if (!sec_cipher_supported(csp))
 1192                         return (EINVAL);
 1193                 break;
 1194         case CSP_MODE_ETA:
 1195                 if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp))
 1196                         return (EINVAL);
 1197                 break;
 1198         default:
 1199                 return (EINVAL);
 1200         }
 1201         return (CRYPTODEV_PROBE_HARDWARE);
 1202 }
 1203 
 1204 static int
 1205 sec_newsession(device_t dev, crypto_session_t cses,
 1206     const struct crypto_session_params *csp)
 1207 {
 1208         struct sec_eu_methods *eu = sec_eus;
 1209         struct sec_session *ses;
 1210 
 1211         ses = crypto_get_driver_session(cses);
 1212 
 1213         /* Find EU for this session */
 1214         while (eu->sem_make_desc != NULL) {
 1215                 if (eu->sem_newsession(csp))
 1216                         break;
 1217                 eu++;
 1218         }
 1219         KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session"));
 1220 
 1221         /* Save cipher key */
 1222         if (csp->csp_cipher_key != NULL)
 1223                 memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen);
 1224 
 1225         /* Save digest key */
 1226         if (csp->csp_auth_key != NULL)
 1227                 memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen);
 1228 
 1229         if (csp->csp_auth_alg != 0) {
 1230                 if (csp->csp_auth_mlen == 0)
 1231                         ses->ss_mlen = crypto_auth_hash(csp)->hashsize;
 1232                 else
 1233                         ses->ss_mlen = csp->csp_auth_mlen;
 1234         }
 1235 
 1236         return (0);
 1237 }
 1238 
 1239 static int
 1240 sec_process(device_t dev, struct cryptop *crp, int hint)
 1241 {
 1242         struct sec_softc *sc = device_get_softc(dev);
 1243         struct sec_desc *desc = NULL;
 1244         const struct crypto_session_params *csp;
 1245         struct sec_session *ses;
 1246         int error = 0;
 1247 
 1248         ses = crypto_get_driver_session(crp->crp_session);
 1249         csp = crypto_get_params(crp->crp_session);
 1250 
 1251         /* Check for input length */
 1252         if (crypto_buffer_len(&crp->crp_buf) > SEC_MAX_DMA_BLOCK_SIZE) {
 1253                 crp->crp_etype = E2BIG;
 1254                 crypto_done(crp);
 1255                 return (0);
 1256         }
 1257 
 1258         SEC_LOCK(sc, descriptors);
 1259         SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1260 
 1261         /* Block driver if there is no free descriptors or we are going down */
 1262         if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
 1263                 sc->sc_blocked |= CRYPTO_SYMQ;
 1264                 SEC_UNLOCK(sc, descriptors);
 1265                 return (ERESTART);
 1266         }
 1267 
 1268         /* Prepare descriptor */
 1269         desc = SEC_GET_FREE_DESC(sc);
 1270         desc->sd_lt_used = 0;
 1271         desc->sd_error = 0;
 1272         desc->sd_crp = crp;
 1273 
 1274         if (csp->csp_cipher_alg != 0)
 1275                 crypto_read_iv(crp, desc->sd_desc->shd_iv);
 1276 
 1277         if (crp->crp_cipher_key != NULL)
 1278                 memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen);
 1279 
 1280         if (crp->crp_auth_key != NULL)
 1281                 memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen);
 1282 
 1283         memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen);
 1284         memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen);
 1285 
 1286         error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp);
 1287 
 1288         if (error) {
 1289                 SEC_DESC_FREE_POINTERS(desc);
 1290                 SEC_DESC_PUT_BACK_LT(sc, desc);
 1291                 SEC_PUT_BACK_FREE_DESC(sc);
 1292                 SEC_UNLOCK(sc, descriptors);
 1293                 crp->crp_etype = error;
 1294                 crypto_done(crp);
 1295                 return (0);
 1296         }
 1297 
 1298         /*
 1299          * Skip DONE interrupt if this is not last request in burst, but only
 1300          * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
 1301          * signaling on each descriptor.
 1302          */
 1303         if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
 1304                 desc->sd_desc->shd_dn = 0;
 1305         else
 1306                 desc->sd_desc->shd_dn = 1;
 1307 
 1308         SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1309         SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
 1310             BUS_DMASYNC_POSTWRITE);
 1311         SEC_DESC_FREE2READY(sc);
 1312         SEC_UNLOCK(sc, descriptors);
 1313 
 1314         /* Enqueue ready descriptors in hardware */
 1315         sec_enqueue(sc);
 1316 
 1317         return (0);
 1318 }
 1319 
 1320 static int
 1321 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
 1322     const struct crypto_session_params *csp, struct cryptop *crp)
 1323 {
 1324         struct sec_hw_desc *hd = desc->sd_desc;
 1325         int error;
 1326 
 1327         hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
 1328         hd->shd_eu_sel1 = SEC_EU_NONE;
 1329         hd->shd_mode1 = 0;
 1330 
 1331         /* Pointer 0: NULL */
 1332         error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
 1333         if (error)
 1334                 return (error);
 1335 
 1336         /* Pointer 1: IV IN */
 1337         error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
 1338             offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
 1339         if (error)
 1340                 return (error);
 1341 
 1342         /* Pointer 2: Cipher Key */
 1343         error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
 1344             offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
 1345         if (error)
 1346                 return (error);
 1347 
 1348         /* Pointer 3: Data IN */
 1349         error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
 1350             crp->crp_payload_length);
 1351         if (error)
 1352                 return (error);
 1353 
 1354         /* Pointer 4: Data OUT */
 1355         error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
 1356             crp->crp_payload_length);
 1357         if (error)
 1358                 return (error);
 1359 
 1360         /* Pointer 5: IV OUT (Not used: NULL) */
 1361         error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
 1362         if (error)
 1363                 return (error);
 1364 
 1365         /* Pointer 6: NULL */
 1366         error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
 1367 
 1368         return (error);
 1369 }
 1370 
 1371 static int
 1372 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
 1373     const struct crypto_session_params *csp, struct cryptop *crp)
 1374 {
 1375         struct sec_hw_desc *hd = desc->sd_desc;
 1376         u_int eu, mode, hashlen;
 1377         int error;
 1378 
 1379         error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
 1380         if (error)
 1381                 return (error);
 1382 
 1383         hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
 1384         hd->shd_eu_sel1 = eu;
 1385         hd->shd_mode1 = mode;
 1386 
 1387         /* Pointer 0: HMAC Key */
 1388         error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
 1389             offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen);
 1390         if (error)
 1391                 return (error);
 1392 
 1393         /* Pointer 1: HMAC-Only Data IN */
 1394         error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start,
 1395             crp->crp_aad_length);
 1396         if (error)
 1397                 return (error);
 1398 
 1399         /* Pointer 2: Cipher Key */
 1400         error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
 1401             offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
 1402         if (error)
 1403                 return (error);
 1404 
 1405         /* Pointer 3: IV IN */
 1406         error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
 1407             offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
 1408         if (error)
 1409                 return (error);
 1410 
 1411         /* Pointer 4: Data IN */
 1412         error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
 1413             crp->crp_payload_length);
 1414         if (error)
 1415                 return (error);
 1416 
 1417         /* Pointer 5: Data OUT */
 1418         error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start,
 1419             crp->crp_payload_length);
 1420         if (error)
 1421                 return (error);
 1422 
 1423         /* Pointer 6: HMAC OUT */
 1424         error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr +
 1425             offsetof(struct sec_hw_desc, shd_digest), hashlen);
 1426 
 1427         return (error);
 1428 }
 1429 
 1430 /* AESU */
 1431 
 1432 static bool
 1433 sec_aesu_newsession(const struct crypto_session_params *csp)
 1434 {
 1435 
 1436         return (csp->csp_cipher_alg == CRYPTO_AES_CBC);
 1437 }
 1438 
 1439 static int
 1440 sec_aesu_make_desc(struct sec_softc *sc,
 1441     const struct crypto_session_params *csp, struct sec_desc *desc,
 1442     struct cryptop *crp)
 1443 {
 1444         struct sec_hw_desc *hd = desc->sd_desc;
 1445         int error;
 1446 
 1447         hd->shd_eu_sel0 = SEC_EU_AESU;
 1448         hd->shd_mode0 = SEC_AESU_MODE_CBC;
 1449 
 1450         if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
 1451                 hd->shd_mode0 |= SEC_AESU_MODE_ED;
 1452                 hd->shd_dir = 0;
 1453         } else
 1454                 hd->shd_dir = 1;
 1455 
 1456         if (csp->csp_mode == CSP_MODE_ETA)
 1457                 error = sec_build_common_s_desc(sc, desc, csp, crp);
 1458         else
 1459                 error = sec_build_common_ns_desc(sc, desc, csp, crp);
 1460 
 1461         return (error);
 1462 }
 1463 
 1464 /* MDEU */
 1465 
 1466 static bool
 1467 sec_mdeu_can_handle(u_int alg)
 1468 {
 1469         switch (alg) {
 1470         case CRYPTO_SHA1:
 1471         case CRYPTO_SHA1_HMAC:
 1472         case CRYPTO_SHA2_256_HMAC:
 1473         case CRYPTO_SHA2_384_HMAC:
 1474         case CRYPTO_SHA2_512_HMAC:
 1475                 return (true);
 1476         default:
 1477                 return (false);
 1478         }
 1479 }
 1480 
 1481 static int
 1482 sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode,
 1483     u_int *hashlen)
 1484 {
 1485 
 1486         *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
 1487         *eu = SEC_EU_NONE;
 1488 
 1489         switch (csp->csp_auth_alg) {
 1490         case CRYPTO_SHA1_HMAC:
 1491                 *mode |= SEC_MDEU_MODE_HMAC;
 1492                 /* FALLTHROUGH */
 1493         case CRYPTO_SHA1:
 1494                 *eu = SEC_EU_MDEU_A;
 1495                 *mode |= SEC_MDEU_MODE_SHA1;
 1496                 *hashlen = SHA1_HASH_LEN;
 1497                 break;
 1498         case CRYPTO_SHA2_256_HMAC:
 1499                 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
 1500                 *eu = SEC_EU_MDEU_A;
 1501                 break;
 1502         case CRYPTO_SHA2_384_HMAC:
 1503                 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
 1504                 *eu = SEC_EU_MDEU_B;
 1505                 break;
 1506         case CRYPTO_SHA2_512_HMAC:
 1507                 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
 1508                 *eu = SEC_EU_MDEU_B;
 1509                 break;
 1510         default:
 1511                 return (EINVAL);
 1512         }
 1513 
 1514         if (*mode & SEC_MDEU_MODE_HMAC)
 1515                 *hashlen = SEC_HMAC_HASH_LEN;
 1516 
 1517         return (0);
 1518 }
 1519 
 1520 static bool
 1521 sec_mdeu_newsession(const struct crypto_session_params *csp)
 1522 {
 1523 
 1524         return (sec_mdeu_can_handle(csp->csp_auth_alg));
 1525 }
 1526 
 1527 static int
 1528 sec_mdeu_make_desc(struct sec_softc *sc,
 1529     const struct crypto_session_params *csp,
 1530     struct sec_desc *desc, struct cryptop *crp)
 1531 {
 1532         struct sec_hw_desc *hd = desc->sd_desc;
 1533         u_int eu, mode, hashlen;
 1534         int error;
 1535 
 1536         error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
 1537         if (error)
 1538                 return (error);
 1539 
 1540         hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
 1541         hd->shd_eu_sel0 = eu;
 1542         hd->shd_mode0 = mode;
 1543         hd->shd_eu_sel1 = SEC_EU_NONE;
 1544         hd->shd_mode1 = 0;
 1545 
 1546         /* Pointer 0: NULL */
 1547         error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
 1548         if (error)
 1549                 return (error);
 1550 
 1551         /* Pointer 1: Context In (Not used: NULL) */
 1552         error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
 1553         if (error)
 1554                 return (error);
 1555 
 1556         /* Pointer 2: HMAC Key (or NULL, depending on digest type) */
 1557         if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
 1558                 error = sec_make_pointer_direct(sc, desc, 2,
 1559                     desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
 1560                     shd_mkey), csp->csp_auth_klen);
 1561         else
 1562                 error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
 1563 
 1564         if (error)
 1565                 return (error);
 1566 
 1567         /* Pointer 3: Input Data */
 1568         error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
 1569             crp->crp_payload_length);
 1570         if (error)
 1571                 return (error);
 1572 
 1573         /* Pointer 4: NULL */
 1574         error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
 1575         if (error)
 1576                 return (error);
 1577 
 1578         /* Pointer 5: Hash out */
 1579         error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr +
 1580             offsetof(struct sec_hw_desc, shd_digest), hashlen);
 1581         if (error)
 1582                 return (error);
 1583 
 1584         /* Pointer 6: NULL */
 1585         error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
 1586 
 1587         return (0);
 1588 }

Cache object: d8b2bf8050072fab45365239912a60a2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.