The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/opencrypto/crypto.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 #include <sys/cdefs.h>
   26 __FBSDID("$FreeBSD$");
   27 
   28 /*
   29  * Cryptographic Subsystem.
   30  *
   31  * This code is derived from the Openbsd Cryptographic Framework (OCF)
   32  * that has the copyright shown below.  Very little of the original
   33  * code remains.
   34  */
   35 
   36 /*-
   37  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
   38  *
   39  * This code was written by Angelos D. Keromytis in Athens, Greece, in
   40  * February 2000. Network Security Technologies Inc. (NSTI) kindly
   41  * supported the development of this code.
   42  *
   43  * Copyright (c) 2000, 2001 Angelos D. Keromytis
   44  *
   45  * Permission to use, copy, and modify this software with or without fee
   46  * is hereby granted, provided that this entire notice is included in
   47  * all source code copies of any software which is or includes a copy or
   48  * modification of this software.
   49  *
   50  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
   51  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
   52  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
   53  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
   54  * PURPOSE.
   55  */
   56 
   57 #define CRYPTO_TIMING                           /* enable timing support */
   58 
   59 #include "opt_ddb.h"
   60 
   61 #include <sys/param.h>
   62 #include <sys/systm.h>
   63 #include <sys/eventhandler.h>
   64 #include <sys/kernel.h>
   65 #include <sys/kthread.h>
   66 #include <sys/linker.h>
   67 #include <sys/lock.h>
   68 #include <sys/module.h>
   69 #include <sys/mutex.h>
   70 #include <sys/malloc.h>
   71 #include <sys/proc.h>
   72 #include <sys/sdt.h>
   73 #include <sys/sysctl.h>
   74 
   75 #include <ddb/ddb.h>
   76 
   77 #include <vm/uma.h>
   78 #include <crypto/intake.h>
   79 #include <opencrypto/cryptodev.h>
   80 #include <opencrypto/xform.h>                   /* XXX for M_XDATA */
   81 
   82 #include <sys/kobj.h>
   83 #include <sys/bus.h>
   84 #include "cryptodev_if.h"
   85 
   86 #if defined(__i386__) || defined(__amd64__)
   87 #include <machine/pcb.h>
   88 #endif
   89 #include <machine/metadata.h>
   90 
   91 SDT_PROVIDER_DEFINE(opencrypto);
   92 
   93 /*
   94  * Crypto drivers register themselves by allocating a slot in the
   95  * crypto_drivers table with crypto_get_driverid() and then registering
   96  * each algorithm they support with crypto_register() and crypto_kregister().
   97  */
   98 static  struct mtx crypto_drivers_mtx;          /* lock on driver table */
   99 #define CRYPTO_DRIVER_LOCK()    mtx_lock(&crypto_drivers_mtx)
  100 #define CRYPTO_DRIVER_UNLOCK()  mtx_unlock(&crypto_drivers_mtx)
  101 #define CRYPTO_DRIVER_ASSERT()  mtx_assert(&crypto_drivers_mtx, MA_OWNED)
  102 
  103 /*
  104  * Crypto device/driver capabilities structure.
  105  *
  106  * Synchronization:
  107  * (d) - protected by CRYPTO_DRIVER_LOCK()
  108  * (q) - protected by CRYPTO_Q_LOCK()
  109  * Not tagged fields are read-only.
  110  */
  111 struct cryptocap {
  112         device_t        cc_dev;                 /* (d) device/driver */
  113         u_int32_t       cc_sessions;            /* (d) # of sessions */
  114         u_int32_t       cc_koperations;         /* (d) # os asym operations */
  115         /*
  116          * Largest possible operator length (in bits) for each type of
  117          * encryption algorithm. XXX not used
  118          */
  119         u_int16_t       cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
  120         u_int8_t        cc_alg[CRYPTO_ALGORITHM_MAX + 1];
  121         u_int8_t        cc_kalg[CRK_ALGORITHM_MAX + 1];
  122 
  123         int             cc_flags;               /* (d) flags */
  124 #define CRYPTOCAP_F_CLEANUP     0x80000000      /* needs resource cleanup */
  125         int             cc_qblocked;            /* (q) symmetric q blocked */
  126         int             cc_kqblocked;           /* (q) asymmetric q blocked */
  127 };
  128 static  struct cryptocap *crypto_drivers = NULL;
  129 static  int crypto_drivers_num = 0;
  130 
  131 /*
  132  * There are two queues for crypto requests; one for symmetric (e.g.
  133  * cipher) operations and one for asymmetric (e.g. MOD)operations.
  134  * A single mutex is used to lock access to both queues.  We could
  135  * have one per-queue but having one simplifies handling of block/unblock
  136  * operations.
  137  */
  138 static  int crp_sleep = 0;
  139 static  TAILQ_HEAD(,cryptop) crp_q;             /* request queues */
  140 static  TAILQ_HEAD(,cryptkop) crp_kq;
  141 static  struct mtx crypto_q_mtx;
  142 #define CRYPTO_Q_LOCK()         mtx_lock(&crypto_q_mtx)
  143 #define CRYPTO_Q_UNLOCK()       mtx_unlock(&crypto_q_mtx)
  144 
  145 /*
  146  * There are two queues for processing completed crypto requests; one
  147  * for the symmetric and one for the asymmetric ops.  We only need one
  148  * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
  149  * mutex is used to lock access to both queues.  Note that this lock
  150  * must be separate from the lock on request queues to insure driver
  151  * callbacks don't generate lock order reversals.
  152  */
  153 static  TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queues */
  154 static  TAILQ_HEAD(,cryptkop) crp_ret_kq;
  155 static  struct mtx crypto_ret_q_mtx;
  156 #define CRYPTO_RETQ_LOCK()      mtx_lock(&crypto_ret_q_mtx)
  157 #define CRYPTO_RETQ_UNLOCK()    mtx_unlock(&crypto_ret_q_mtx)
  158 #define CRYPTO_RETQ_EMPTY()     (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
  159 
  160 static  uma_zone_t cryptop_zone;
  161 static  uma_zone_t cryptodesc_zone;
  162 
  163 int     crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
  164 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
  165            &crypto_userasymcrypto, 0,
  166            "Enable/disable user-mode access to asymmetric crypto support");
  167 int     crypto_devallowsoft = 0;        /* only use hardware crypto */
  168 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
  169            &crypto_devallowsoft, 0,
  170            "Enable/disable use of software crypto by /dev/crypto");
  171 
  172 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
  173 
  174 static  void crypto_proc(void);
  175 static  struct proc *cryptoproc;
  176 static  void crypto_ret_proc(void);
  177 static  struct proc *cryptoretproc;
  178 static  void crypto_destroy(void);
  179 static  int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
  180 static  int crypto_kinvoke(struct cryptkop *krp, int flags);
  181 
  182 static  struct cryptostats cryptostats;
  183 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
  184             cryptostats, "Crypto system statistics");
  185 
  186 #ifdef CRYPTO_TIMING
  187 static  int crypto_timing = 0;
  188 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
  189            &crypto_timing, 0, "Enable/disable crypto timing support");
  190 #endif
  191 
  192 /* Try to avoid directly exposing the key buffer as a symbol */
  193 static struct keybuf *keybuf;
  194 
  195 static struct keybuf empty_keybuf = {
  196         .kb_nents = 0
  197 };
  198 
  199 /* Obtain the key buffer from boot metadata */
  200 static void
  201 keybuf_init(void)
  202 {
  203         caddr_t kmdp;
  204 
  205         kmdp = preload_search_by_type("elf kernel");
  206 
  207         if (kmdp == NULL)
  208                 kmdp = preload_search_by_type("elf64 kernel");
  209 
  210         keybuf = (struct keybuf *)preload_search_info(kmdp,
  211             MODINFO_METADATA | MODINFOMD_KEYBUF);
  212 
  213         if (keybuf == NULL)
  214                 keybuf = &empty_keybuf;
  215 }
  216 
  217 /* It'd be nice if we could store these in some kind of secure memory... */
  218 struct keybuf * get_keybuf(void) {
  219 
  220         return (keybuf);
  221 }
  222 
  223 static int
  224 crypto_init(void)
  225 {
  226         int error;
  227 
  228         mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
  229                 MTX_DEF|MTX_QUIET);
  230 
  231         TAILQ_INIT(&crp_q);
  232         TAILQ_INIT(&crp_kq);
  233         mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
  234 
  235         TAILQ_INIT(&crp_ret_q);
  236         TAILQ_INIT(&crp_ret_kq);
  237         mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
  238 
  239         cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
  240                                     0, 0, 0, 0,
  241                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
  242         cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
  243                                     0, 0, 0, 0,
  244                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
  245         if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
  246                 printf("crypto_init: cannot setup crypto zones\n");
  247                 error = ENOMEM;
  248                 goto bad;
  249         }
  250 
  251         crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
  252         crypto_drivers = malloc(crypto_drivers_num *
  253             sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
  254         if (crypto_drivers == NULL) {
  255                 printf("crypto_init: cannot setup crypto drivers\n");
  256                 error = ENOMEM;
  257                 goto bad;
  258         }
  259 
  260         error = kproc_create((void (*)(void *)) crypto_proc, NULL,
  261                     &cryptoproc, 0, 0, "crypto");
  262         if (error) {
  263                 printf("crypto_init: cannot start crypto thread; error %d",
  264                         error);
  265                 goto bad;
  266         }
  267 
  268         error = kproc_create((void (*)(void *)) crypto_ret_proc, NULL,
  269                     &cryptoretproc, 0, 0, "crypto returns");
  270         if (error) {
  271                 printf("crypto_init: cannot start cryptoret thread; error %d",
  272                         error);
  273                 goto bad;
  274         }
  275 
  276         keybuf_init();
  277 
  278         return 0;
  279 bad:
  280         crypto_destroy();
  281         return error;
  282 }
  283 
  284 /*
  285  * Signal a crypto thread to terminate.  We use the driver
  286  * table lock to synchronize the sleep/wakeups so that we
  287  * are sure the threads have terminated before we release
  288  * the data structures they use.  See crypto_finis below
  289  * for the other half of this song-and-dance.
  290  */
  291 static void
  292 crypto_terminate(struct proc **pp, void *q)
  293 {
  294         struct proc *p;
  295 
  296         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
  297         p = *pp;
  298         *pp = NULL;
  299         if (p) {
  300                 wakeup_one(q);
  301                 PROC_LOCK(p);           /* NB: insure we don't miss wakeup */
  302                 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
  303                 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
  304                 PROC_UNLOCK(p);
  305                 CRYPTO_DRIVER_LOCK();
  306         }
  307 }
  308 
  309 static void
  310 crypto_destroy(void)
  311 {
  312         /*
  313          * Terminate any crypto threads.
  314          */
  315         CRYPTO_DRIVER_LOCK();
  316         crypto_terminate(&cryptoproc, &crp_q);
  317         crypto_terminate(&cryptoretproc, &crp_ret_q);
  318         CRYPTO_DRIVER_UNLOCK();
  319 
  320         /* XXX flush queues??? */
  321 
  322         /*
  323          * Reclaim dynamically allocated resources.
  324          */
  325         if (crypto_drivers != NULL)
  326                 free(crypto_drivers, M_CRYPTO_DATA);
  327 
  328         if (cryptodesc_zone != NULL)
  329                 uma_zdestroy(cryptodesc_zone);
  330         if (cryptop_zone != NULL)
  331                 uma_zdestroy(cryptop_zone);
  332         mtx_destroy(&crypto_q_mtx);
  333         mtx_destroy(&crypto_ret_q_mtx);
  334         mtx_destroy(&crypto_drivers_mtx);
  335 }
  336 
  337 static struct cryptocap *
  338 crypto_checkdriver(u_int32_t hid)
  339 {
  340         if (crypto_drivers == NULL)
  341                 return NULL;
  342         return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
  343 }
  344 
  345 /*
  346  * Compare a driver's list of supported algorithms against another
  347  * list; return non-zero if all algorithms are supported.
  348  */
  349 static int
  350 driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
  351 {
  352         const struct cryptoini *cr;
  353 
  354         /* See if all the algorithms are supported. */
  355         for (cr = cri; cr; cr = cr->cri_next)
  356                 if (cap->cc_alg[cr->cri_alg] == 0)
  357                         return 0;
  358         return 1;
  359 }
  360 
  361 /*
  362  * Select a driver for a new session that supports the specified
  363  * algorithms and, optionally, is constrained according to the flags.
  364  * The algorithm we use here is pretty stupid; just use the
  365  * first driver that supports all the algorithms we need. If there
  366  * are multiple drivers we choose the driver with the fewest active
  367  * sessions.  We prefer hardware-backed drivers to software ones.
  368  *
  369  * XXX We need more smarts here (in real life too, but that's
  370  * XXX another story altogether).
  371  */
  372 static struct cryptocap *
  373 crypto_select_driver(const struct cryptoini *cri, int flags)
  374 {
  375         struct cryptocap *cap, *best;
  376         int match, hid;
  377 
  378         CRYPTO_DRIVER_ASSERT();
  379 
  380         /*
  381          * Look first for hardware crypto devices if permitted.
  382          */
  383         if (flags & CRYPTOCAP_F_HARDWARE)
  384                 match = CRYPTOCAP_F_HARDWARE;
  385         else
  386                 match = CRYPTOCAP_F_SOFTWARE;
  387         best = NULL;
  388 again:
  389         for (hid = 0; hid < crypto_drivers_num; hid++) {
  390                 cap = &crypto_drivers[hid];
  391                 /*
  392                  * If it's not initialized, is in the process of
  393                  * going away, or is not appropriate (hardware
  394                  * or software based on match), then skip.
  395                  */
  396                 if (cap->cc_dev == NULL ||
  397                     (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
  398                     (cap->cc_flags & match) == 0)
  399                         continue;
  400 
  401                 /* verify all the algorithms are supported. */
  402                 if (driver_suitable(cap, cri)) {
  403                         if (best == NULL ||
  404                             cap->cc_sessions < best->cc_sessions)
  405                                 best = cap;
  406                 }
  407         }
  408         if (best == NULL && match == CRYPTOCAP_F_HARDWARE &&
  409             (flags & CRYPTOCAP_F_SOFTWARE)) {
  410                 /* sort of an Algol 68-style for loop */
  411                 match = CRYPTOCAP_F_SOFTWARE;
  412                 goto again;
  413         }
  414         return best;
  415 }
  416 
  417 /*
  418  * Create a new session.  The crid argument specifies a crypto
  419  * driver to use or constraints on a driver to select (hardware
  420  * only, software only, either).  Whatever driver is selected
  421  * must be capable of the requested crypto algorithms.
  422  */
  423 int
  424 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
  425 {
  426         struct cryptocap *cap;
  427         u_int32_t hid, lid;
  428         int err;
  429 
  430         CRYPTO_DRIVER_LOCK();
  431         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
  432                 /*
  433                  * Use specified driver; verify it is capable.
  434                  */
  435                 cap = crypto_checkdriver(crid);
  436                 if (cap != NULL && !driver_suitable(cap, cri))
  437                         cap = NULL;
  438         } else {
  439                 /*
  440                  * No requested driver; select based on crid flags.
  441                  */
  442                 cap = crypto_select_driver(cri, crid);
  443                 /*
  444                  * if NULL then can't do everything in one session.
  445                  * XXX Fix this. We need to inject a "virtual" session
  446                  * XXX layer right about here.
  447                  */
  448         }
  449         if (cap != NULL) {
  450                 /* Call the driver initialization routine. */
  451                 hid = cap - crypto_drivers;
  452                 lid = hid;              /* Pass the driver ID. */
  453                 err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
  454                 if (err == 0) {
  455                         (*sid) = (cap->cc_flags & 0xff000000)
  456                                | (hid & 0x00ffffff);
  457                         (*sid) <<= 32;
  458                         (*sid) |= (lid & 0xffffffff);
  459                         cap->cc_sessions++;
  460                 } else
  461                         CRYPTDEB("dev newsession failed");
  462         } else {
  463                 CRYPTDEB("no driver");
  464                 err = EINVAL;
  465         }
  466         CRYPTO_DRIVER_UNLOCK();
  467         return err;
  468 }
  469 
  470 static void
  471 crypto_remove(struct cryptocap *cap)
  472 {
  473 
  474         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
  475         if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
  476                 bzero(cap, sizeof(*cap));
  477 }
  478 
  479 /*
  480  * Delete an existing session (or a reserved session on an unregistered
  481  * driver).
  482  */
  483 int
  484 crypto_freesession(u_int64_t sid)
  485 {
  486         struct cryptocap *cap;
  487         u_int32_t hid;
  488         int err;
  489 
  490         CRYPTO_DRIVER_LOCK();
  491 
  492         if (crypto_drivers == NULL) {
  493                 err = EINVAL;
  494                 goto done;
  495         }
  496 
  497         /* Determine two IDs. */
  498         hid = CRYPTO_SESID2HID(sid);
  499 
  500         if (hid >= crypto_drivers_num) {
  501                 err = ENOENT;
  502                 goto done;
  503         }
  504         cap = &crypto_drivers[hid];
  505 
  506         if (cap->cc_sessions)
  507                 cap->cc_sessions--;
  508 
  509         /* Call the driver cleanup routine, if available. */
  510         err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
  511 
  512         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
  513                 crypto_remove(cap);
  514 
  515 done:
  516         CRYPTO_DRIVER_UNLOCK();
  517         return err;
  518 }
  519 
  520 /*
  521  * Return an unused driver id.  Used by drivers prior to registering
  522  * support for the algorithms they handle.
  523  */
  524 int32_t
  525 crypto_get_driverid(device_t dev, int flags)
  526 {
  527         struct cryptocap *newdrv;
  528         int i;
  529 
  530         if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
  531                 printf("%s: no flags specified when registering driver\n",
  532                     device_get_nameunit(dev));
  533                 return -1;
  534         }
  535 
  536         CRYPTO_DRIVER_LOCK();
  537 
  538         for (i = 0; i < crypto_drivers_num; i++) {
  539                 if (crypto_drivers[i].cc_dev == NULL &&
  540                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
  541                         break;
  542                 }
  543         }
  544 
  545         /* Out of entries, allocate some more. */
  546         if (i == crypto_drivers_num) {
  547                 /* Be careful about wrap-around. */
  548                 if (2 * crypto_drivers_num <= crypto_drivers_num) {
  549                         CRYPTO_DRIVER_UNLOCK();
  550                         printf("crypto: driver count wraparound!\n");
  551                         return -1;
  552                 }
  553 
  554                 newdrv = malloc(2 * crypto_drivers_num *
  555                     sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
  556                 if (newdrv == NULL) {
  557                         CRYPTO_DRIVER_UNLOCK();
  558                         printf("crypto: no space to expand driver table!\n");
  559                         return -1;
  560                 }
  561 
  562                 bcopy(crypto_drivers, newdrv,
  563                     crypto_drivers_num * sizeof(struct cryptocap));
  564 
  565                 crypto_drivers_num *= 2;
  566 
  567                 free(crypto_drivers, M_CRYPTO_DATA);
  568                 crypto_drivers = newdrv;
  569         }
  570 
  571         /* NB: state is zero'd on free */
  572         crypto_drivers[i].cc_sessions = 1;      /* Mark */
  573         crypto_drivers[i].cc_dev = dev;
  574         crypto_drivers[i].cc_flags = flags;
  575         if (bootverbose)
  576                 printf("crypto: assign %s driver id %u, flags %u\n",
  577                     device_get_nameunit(dev), i, flags);
  578 
  579         CRYPTO_DRIVER_UNLOCK();
  580 
  581         return i;
  582 }
  583 
  584 /*
  585  * Lookup a driver by name.  We match against the full device
  586  * name and unit, and against just the name.  The latter gives
  587  * us a simple widlcarding by device name.  On success return the
  588  * driver/hardware identifier; otherwise return -1.
  589  */
  590 int
  591 crypto_find_driver(const char *match)
  592 {
  593         int i, len = strlen(match);
  594 
  595         CRYPTO_DRIVER_LOCK();
  596         for (i = 0; i < crypto_drivers_num; i++) {
  597                 device_t dev = crypto_drivers[i].cc_dev;
  598                 if (dev == NULL ||
  599                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
  600                         continue;
  601                 if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
  602                     strncmp(match, device_get_name(dev), len) == 0)
  603                         break;
  604         }
  605         CRYPTO_DRIVER_UNLOCK();
  606         return i < crypto_drivers_num ? i : -1;
  607 }
  608 
  609 /*
  610  * Return the device_t for the specified driver or NULL
  611  * if the driver identifier is invalid.
  612  */
  613 device_t
  614 crypto_find_device_byhid(int hid)
  615 {
  616         struct cryptocap *cap = crypto_checkdriver(hid);
  617         return cap != NULL ? cap->cc_dev : NULL;
  618 }
  619 
  620 /*
  621  * Return the device/driver capabilities.
  622  */
  623 int
  624 crypto_getcaps(int hid)
  625 {
  626         struct cryptocap *cap = crypto_checkdriver(hid);
  627         return cap != NULL ? cap->cc_flags : 0;
  628 }
  629 
  630 /*
  631  * Register support for a key-related algorithm.  This routine
  632  * is called once for each algorithm supported a driver.
  633  */
  634 int
  635 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
  636 {
  637         struct cryptocap *cap;
  638         int err;
  639 
  640         CRYPTO_DRIVER_LOCK();
  641 
  642         cap = crypto_checkdriver(driverid);
  643         if (cap != NULL &&
  644             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
  645                 /*
  646                  * XXX Do some performance testing to determine placing.
  647                  * XXX We probably need an auxiliary data structure that
  648                  * XXX describes relative performances.
  649                  */
  650 
  651                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
  652                 if (bootverbose)
  653                         printf("crypto: %s registers key alg %u flags %u\n"
  654                                 , device_get_nameunit(cap->cc_dev)
  655                                 , kalg
  656                                 , flags
  657                         );
  658                 err = 0;
  659         } else
  660                 err = EINVAL;
  661 
  662         CRYPTO_DRIVER_UNLOCK();
  663         return err;
  664 }
  665 
  666 /*
  667  * Register support for a non-key-related algorithm.  This routine
  668  * is called once for each such algorithm supported by a driver.
  669  */
  670 int
  671 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
  672     u_int32_t flags)
  673 {
  674         struct cryptocap *cap;
  675         int err;
  676 
  677         CRYPTO_DRIVER_LOCK();
  678 
  679         cap = crypto_checkdriver(driverid);
  680         /* NB: algorithms are in the range [1..max] */
  681         if (cap != NULL &&
  682             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
  683                 /*
  684                  * XXX Do some performance testing to determine placing.
  685                  * XXX We probably need an auxiliary data structure that
  686                  * XXX describes relative performances.
  687                  */
  688 
  689                 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
  690                 cap->cc_max_op_len[alg] = maxoplen;
  691                 if (bootverbose)
  692                         printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
  693                                 , device_get_nameunit(cap->cc_dev)
  694                                 , alg
  695                                 , flags
  696                                 , maxoplen
  697                         );
  698                 cap->cc_sessions = 0;           /* Unmark */
  699                 err = 0;
  700         } else
  701                 err = EINVAL;
  702 
  703         CRYPTO_DRIVER_UNLOCK();
  704         return err;
  705 }
  706 
  707 static void
  708 driver_finis(struct cryptocap *cap)
  709 {
  710         u_int32_t ses, kops;
  711 
  712         CRYPTO_DRIVER_ASSERT();
  713 
  714         ses = cap->cc_sessions;
  715         kops = cap->cc_koperations;
  716         bzero(cap, sizeof(*cap));
  717         if (ses != 0 || kops != 0) {
  718                 /*
  719                  * If there are pending sessions,
  720                  * just mark as invalid.
  721                  */
  722                 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
  723                 cap->cc_sessions = ses;
  724                 cap->cc_koperations = kops;
  725         }
  726 }
  727 
  728 /*
  729  * Unregister a crypto driver. If there are pending sessions using it,
  730  * leave enough information around so that subsequent calls using those
  731  * sessions will correctly detect the driver has been unregistered and
  732  * reroute requests.
  733  */
  734 int
  735 crypto_unregister(u_int32_t driverid, int alg)
  736 {
  737         struct cryptocap *cap;
  738         int i, err;
  739 
  740         CRYPTO_DRIVER_LOCK();
  741         cap = crypto_checkdriver(driverid);
  742         if (cap != NULL &&
  743             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
  744             cap->cc_alg[alg] != 0) {
  745                 cap->cc_alg[alg] = 0;
  746                 cap->cc_max_op_len[alg] = 0;
  747 
  748                 /* Was this the last algorithm ? */
  749                 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
  750                         if (cap->cc_alg[i] != 0)
  751                                 break;
  752 
  753                 if (i == CRYPTO_ALGORITHM_MAX + 1)
  754                         driver_finis(cap);
  755                 err = 0;
  756         } else
  757                 err = EINVAL;
  758         CRYPTO_DRIVER_UNLOCK();
  759 
  760         return err;
  761 }
  762 
  763 /*
  764  * Unregister all algorithms associated with a crypto driver.
  765  * If there are pending sessions using it, leave enough information
  766  * around so that subsequent calls using those sessions will
  767  * correctly detect the driver has been unregistered and reroute
  768  * requests.
  769  */
  770 int
  771 crypto_unregister_all(u_int32_t driverid)
  772 {
  773         struct cryptocap *cap;
  774         int err;
  775 
  776         CRYPTO_DRIVER_LOCK();
  777         cap = crypto_checkdriver(driverid);
  778         if (cap != NULL) {
  779                 driver_finis(cap);
  780                 err = 0;
  781         } else
  782                 err = EINVAL;
  783         CRYPTO_DRIVER_UNLOCK();
  784 
  785         return err;
  786 }
  787 
  788 /*
  789  * Clear blockage on a driver.  The what parameter indicates whether
  790  * the driver is now ready for cryptop's and/or cryptokop's.
  791  */
  792 int
  793 crypto_unblock(u_int32_t driverid, int what)
  794 {
  795         struct cryptocap *cap;
  796         int err;
  797 
  798         CRYPTO_Q_LOCK();
  799         cap = crypto_checkdriver(driverid);
  800         if (cap != NULL) {
  801                 if (what & CRYPTO_SYMQ)
  802                         cap->cc_qblocked = 0;
  803                 if (what & CRYPTO_ASYMQ)
  804                         cap->cc_kqblocked = 0;
  805                 if (crp_sleep)
  806                         wakeup_one(&crp_q);
  807                 err = 0;
  808         } else
  809                 err = EINVAL;
  810         CRYPTO_Q_UNLOCK();
  811 
  812         return err;
  813 }
  814 
  815 /*
  816  * Add a crypto request to a queue, to be processed by the kernel thread.
  817  */
  818 int
  819 crypto_dispatch(struct cryptop *crp)
  820 {
  821         struct cryptocap *cap;
  822         u_int32_t hid;
  823         int result;
  824 
  825         cryptostats.cs_ops++;
  826 
  827 #ifdef CRYPTO_TIMING
  828         if (crypto_timing)
  829                 binuptime(&crp->crp_tstamp);
  830 #endif
  831 
  832         hid = CRYPTO_SESID2HID(crp->crp_sid);
  833 
  834         if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
  835                 /*
  836                  * Caller marked the request to be processed
  837                  * immediately; dispatch it directly to the
  838                  * driver unless the driver is currently blocked.
  839                  */
  840                 cap = crypto_checkdriver(hid);
  841                 /* Driver cannot disappeared when there is an active session. */
  842                 KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
  843                 if (!cap->cc_qblocked) {
  844                         result = crypto_invoke(cap, crp, 0);
  845                         if (result != ERESTART)
  846                                 return (result);
  847                         /*
  848                          * The driver ran out of resources, put the request on
  849                          * the queue.
  850                          */
  851                 }
  852         }
  853         CRYPTO_Q_LOCK();
  854         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
  855         if (crp_sleep)
  856                 wakeup_one(&crp_q);
  857         CRYPTO_Q_UNLOCK();
  858         return 0;
  859 }
  860 
  861 /*
  862  * Add an asymetric crypto request to a queue,
  863  * to be processed by the kernel thread.
  864  */
  865 int
  866 crypto_kdispatch(struct cryptkop *krp)
  867 {
  868         int error;
  869 
  870         cryptostats.cs_kops++;
  871 
  872         error = crypto_kinvoke(krp, krp->krp_crid);
  873         if (error == ERESTART) {
  874                 CRYPTO_Q_LOCK();
  875                 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
  876                 if (crp_sleep)
  877                         wakeup_one(&crp_q);
  878                 CRYPTO_Q_UNLOCK();
  879                 error = 0;
  880         }
  881         return error;
  882 }
  883 
  884 /*
  885  * Verify a driver is suitable for the specified operation.
  886  */
  887 static __inline int
  888 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
  889 {
  890         return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
  891 }
  892 
  893 /*
  894  * Select a driver for an asym operation.  The driver must
  895  * support the necessary algorithm.  The caller can constrain
  896  * which device is selected with the flags parameter.  The
  897  * algorithm we use here is pretty stupid; just use the first
  898  * driver that supports the algorithms we need. If there are
  899  * multiple suitable drivers we choose the driver with the
  900  * fewest active operations.  We prefer hardware-backed
  901  * drivers to software ones when either may be used.
  902  */
  903 static struct cryptocap *
  904 crypto_select_kdriver(const struct cryptkop *krp, int flags)
  905 {
  906         struct cryptocap *cap, *best, *blocked;
  907         int match, hid;
  908 
  909         CRYPTO_DRIVER_ASSERT();
  910 
  911         /*
  912          * Look first for hardware crypto devices if permitted.
  913          */
  914         if (flags & CRYPTOCAP_F_HARDWARE)
  915                 match = CRYPTOCAP_F_HARDWARE;
  916         else
  917                 match = CRYPTOCAP_F_SOFTWARE;
  918         best = NULL;
  919         blocked = NULL;
  920 again:
  921         for (hid = 0; hid < crypto_drivers_num; hid++) {
  922                 cap = &crypto_drivers[hid];
  923                 /*
  924                  * If it's not initialized, is in the process of
  925                  * going away, or is not appropriate (hardware
  926                  * or software based on match), then skip.
  927                  */
  928                 if (cap->cc_dev == NULL ||
  929                     (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
  930                     (cap->cc_flags & match) == 0)
  931                         continue;
  932 
  933                 /* verify all the algorithms are supported. */
  934                 if (kdriver_suitable(cap, krp)) {
  935                         if (best == NULL ||
  936                             cap->cc_koperations < best->cc_koperations)
  937                                 best = cap;
  938                 }
  939         }
  940         if (best != NULL)
  941                 return best;
  942         if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
  943                 /* sort of an Algol 68-style for loop */
  944                 match = CRYPTOCAP_F_SOFTWARE;
  945                 goto again;
  946         }
  947         return best;
  948 }
  949 
  950 /*
  951  * Dispatch an asymmetric crypto request.
  952  */
  953 static int
  954 crypto_kinvoke(struct cryptkop *krp, int crid)
  955 {
  956         struct cryptocap *cap = NULL;
  957         int error;
  958 
  959         KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
  960         KASSERT(krp->krp_callback != NULL,
  961             ("%s: krp->crp_callback == NULL", __func__));
  962 
  963         CRYPTO_DRIVER_LOCK();
  964         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
  965                 cap = crypto_checkdriver(crid);
  966                 if (cap != NULL) {
  967                         /*
  968                          * Driver present, it must support the necessary
  969                          * algorithm and, if s/w drivers are excluded,
  970                          * it must be registered as hardware-backed.
  971                          */
  972                         if (!kdriver_suitable(cap, krp) ||
  973                             (!crypto_devallowsoft &&
  974                              (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
  975                                 cap = NULL;
  976                 }
  977         } else {
  978                 /*
  979                  * No requested driver; select based on crid flags.
  980                  */
  981                 if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
  982                         crid &= ~CRYPTOCAP_F_SOFTWARE;
  983                 cap = crypto_select_kdriver(krp, crid);
  984         }
  985         if (cap != NULL && !cap->cc_kqblocked) {
  986                 krp->krp_hid = cap - crypto_drivers;
  987                 cap->cc_koperations++;
  988                 CRYPTO_DRIVER_UNLOCK();
  989                 error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
  990                 CRYPTO_DRIVER_LOCK();
  991                 if (error == ERESTART) {
  992                         cap->cc_koperations--;
  993                         CRYPTO_DRIVER_UNLOCK();
  994                         return (error);
  995                 }
  996         } else {
  997                 /*
  998                  * NB: cap is !NULL if device is blocked; in
  999                  *     that case return ERESTART so the operation
 1000                  *     is resubmitted if possible.
 1001                  */
 1002                 error = (cap == NULL) ? ENODEV : ERESTART;
 1003         }
 1004         CRYPTO_DRIVER_UNLOCK();
 1005 
 1006         if (error) {
 1007                 krp->krp_status = error;
 1008                 crypto_kdone(krp);
 1009         }
 1010         return 0;
 1011 }
 1012 
 1013 #ifdef CRYPTO_TIMING
 1014 static void
 1015 crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
 1016 {
 1017         struct bintime now, delta;
 1018         struct timespec t;
 1019         uint64_t u;
 1020 
 1021         binuptime(&now);
 1022         u = now.frac;
 1023         delta.frac = now.frac - bt->frac;
 1024         delta.sec = now.sec - bt->sec;
 1025         if (u < delta.frac)
 1026                 delta.sec--;
 1027         bintime2timespec(&delta, &t);
 1028         timespecadd(&ts->acc, &t);
 1029         if (timespeccmp(&t, &ts->min, <))
 1030                 ts->min = t;
 1031         if (timespeccmp(&t, &ts->max, >))
 1032                 ts->max = t;
 1033         ts->count++;
 1034 
 1035         *bt = now;
 1036 }
 1037 #endif
 1038 
 1039 /*
 1040  * Dispatch a crypto request to the appropriate crypto devices.
 1041  */
 1042 static int
 1043 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
 1044 {
 1045 
 1046         KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
 1047         KASSERT(crp->crp_callback != NULL,
 1048             ("%s: crp->crp_callback == NULL", __func__));
 1049         KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
 1050 
 1051 #ifdef CRYPTO_TIMING
 1052         if (crypto_timing)
 1053                 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
 1054 #endif
 1055         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
 1056                 struct cryptodesc *crd;
 1057                 u_int64_t nid;
 1058 
 1059                 /*
 1060                  * Driver has unregistered; migrate the session and return
 1061                  * an error to the caller so they'll resubmit the op.
 1062                  *
 1063                  * XXX: What if there are more already queued requests for this
 1064                  *      session?
 1065                  */
 1066                 crypto_freesession(crp->crp_sid);
 1067 
 1068                 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
 1069                         crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
 1070 
 1071                 /* XXX propagate flags from initial session? */
 1072                 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
 1073                     CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
 1074                         crp->crp_sid = nid;
 1075 
 1076                 crp->crp_etype = EAGAIN;
 1077                 crypto_done(crp);
 1078                 return 0;
 1079         } else {
 1080                 /*
 1081                  * Invoke the driver to process the request.
 1082                  */
 1083                 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
 1084         }
 1085 }
 1086 
 1087 /*
 1088  * Release a set of crypto descriptors.
 1089  */
 1090 void
 1091 crypto_freereq(struct cryptop *crp)
 1092 {
 1093         struct cryptodesc *crd;
 1094 
 1095         if (crp == NULL)
 1096                 return;
 1097 
 1098 #ifdef DIAGNOSTIC
 1099         {
 1100                 struct cryptop *crp2;
 1101 
 1102                 CRYPTO_Q_LOCK();
 1103                 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
 1104                         KASSERT(crp2 != crp,
 1105                             ("Freeing cryptop from the crypto queue (%p).",
 1106                             crp));
 1107                 }
 1108                 CRYPTO_Q_UNLOCK();
 1109                 CRYPTO_RETQ_LOCK();
 1110                 TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
 1111                         KASSERT(crp2 != crp,
 1112                             ("Freeing cryptop from the return queue (%p).",
 1113                             crp));
 1114                 }
 1115                 CRYPTO_RETQ_UNLOCK();
 1116         }
 1117 #endif
 1118 
 1119         while ((crd = crp->crp_desc) != NULL) {
 1120                 crp->crp_desc = crd->crd_next;
 1121                 uma_zfree(cryptodesc_zone, crd);
 1122         }
 1123         uma_zfree(cryptop_zone, crp);
 1124 }
 1125 
 1126 /*
 1127  * Acquire a set of crypto descriptors.
 1128  */
 1129 struct cryptop *
 1130 crypto_getreq(int num)
 1131 {
 1132         struct cryptodesc *crd;
 1133         struct cryptop *crp;
 1134 
 1135         crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
 1136         if (crp != NULL) {
 1137                 while (num--) {
 1138                         crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
 1139                         if (crd == NULL) {
 1140                                 crypto_freereq(crp);
 1141                                 return NULL;
 1142                         }
 1143 
 1144                         crd->crd_next = crp->crp_desc;
 1145                         crp->crp_desc = crd;
 1146                 }
 1147         }
 1148         return crp;
 1149 }
 1150 
 1151 /*
 1152  * Invoke the callback on behalf of the driver.
 1153  */
 1154 void
 1155 crypto_done(struct cryptop *crp)
 1156 {
 1157         KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
 1158                 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
 1159         crp->crp_flags |= CRYPTO_F_DONE;
 1160         if (crp->crp_etype != 0)
 1161                 cryptostats.cs_errs++;
 1162 #ifdef CRYPTO_TIMING
 1163         if (crypto_timing)
 1164                 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
 1165 #endif
 1166         /*
 1167          * CBIMM means unconditionally do the callback immediately;
 1168          * CBIFSYNC means do the callback immediately only if the
 1169          * operation was done synchronously.  Both are used to avoid
 1170          * doing extraneous context switches; the latter is mostly
 1171          * used with the software crypto driver.
 1172          */
 1173         if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
 1174             ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
 1175              (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
 1176                 /*
 1177                  * Do the callback directly.  This is ok when the
 1178                  * callback routine does very little (e.g. the
 1179                  * /dev/crypto callback method just does a wakeup).
 1180                  */
 1181 #ifdef CRYPTO_TIMING
 1182                 if (crypto_timing) {
 1183                         /*
 1184                          * NB: We must copy the timestamp before
 1185                          * doing the callback as the cryptop is
 1186                          * likely to be reclaimed.
 1187                          */
 1188                         struct bintime t = crp->crp_tstamp;
 1189                         crypto_tstat(&cryptostats.cs_cb, &t);
 1190                         crp->crp_callback(crp);
 1191                         crypto_tstat(&cryptostats.cs_finis, &t);
 1192                 } else
 1193 #endif
 1194                         crp->crp_callback(crp);
 1195         } else {
 1196                 /*
 1197                  * Normal case; queue the callback for the thread.
 1198                  */
 1199                 CRYPTO_RETQ_LOCK();
 1200                 if (CRYPTO_RETQ_EMPTY())
 1201                         wakeup_one(&crp_ret_q); /* shared wait channel */
 1202                 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
 1203                 CRYPTO_RETQ_UNLOCK();
 1204         }
 1205 }
 1206 
 1207 /*
 1208  * Invoke the callback on behalf of the driver.
 1209  */
 1210 void
 1211 crypto_kdone(struct cryptkop *krp)
 1212 {
 1213         struct cryptocap *cap;
 1214 
 1215         if (krp->krp_status != 0)
 1216                 cryptostats.cs_kerrs++;
 1217         CRYPTO_DRIVER_LOCK();
 1218         /* XXX: What if driver is loaded in the meantime? */
 1219         if (krp->krp_hid < crypto_drivers_num) {
 1220                 cap = &crypto_drivers[krp->krp_hid];
 1221                 KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
 1222                 cap->cc_koperations--;
 1223                 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
 1224                         crypto_remove(cap);
 1225         }
 1226         CRYPTO_DRIVER_UNLOCK();
 1227         CRYPTO_RETQ_LOCK();
 1228         if (CRYPTO_RETQ_EMPTY())
 1229                 wakeup_one(&crp_ret_q);         /* shared wait channel */
 1230         TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
 1231         CRYPTO_RETQ_UNLOCK();
 1232 }
 1233 
 1234 int
 1235 crypto_getfeat(int *featp)
 1236 {
 1237         int hid, kalg, feat = 0;
 1238 
 1239         CRYPTO_DRIVER_LOCK();
 1240         for (hid = 0; hid < crypto_drivers_num; hid++) {
 1241                 const struct cryptocap *cap = &crypto_drivers[hid];
 1242 
 1243                 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
 1244                     !crypto_devallowsoft) {
 1245                         continue;
 1246                 }
 1247                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
 1248                         if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
 1249                                 feat |=  1 << kalg;
 1250         }
 1251         CRYPTO_DRIVER_UNLOCK();
 1252         *featp = feat;
 1253         return (0);
 1254 }
 1255 
 1256 /*
 1257  * Terminate a thread at module unload.  The process that
 1258  * initiated this is waiting for us to signal that we're gone;
 1259  * wake it up and exit.  We use the driver table lock to insure
 1260  * we don't do the wakeup before they're waiting.  There is no
 1261  * race here because the waiter sleeps on the proc lock for the
 1262  * thread so it gets notified at the right time because of an
 1263  * extra wakeup that's done in exit1().
 1264  */
 1265 static void
 1266 crypto_finis(void *chan)
 1267 {
 1268         CRYPTO_DRIVER_LOCK();
 1269         wakeup_one(chan);
 1270         CRYPTO_DRIVER_UNLOCK();
 1271         kproc_exit(0);
 1272 }
 1273 
 1274 /*
 1275  * Crypto thread, dispatches crypto requests.
 1276  */
 1277 static void
 1278 crypto_proc(void)
 1279 {
 1280         struct cryptop *crp, *submit;
 1281         struct cryptkop *krp;
 1282         struct cryptocap *cap;
 1283         u_int32_t hid;
 1284         int result, hint;
 1285 
 1286 #if defined(__i386__) || defined(__amd64__)
 1287         fpu_kern_thread(FPU_KERN_NORMAL);
 1288 #endif
 1289 
 1290         CRYPTO_Q_LOCK();
 1291         for (;;) {
 1292                 /*
 1293                  * Find the first element in the queue that can be
 1294                  * processed and look-ahead to see if multiple ops
 1295                  * are ready for the same driver.
 1296                  */
 1297                 submit = NULL;
 1298                 hint = 0;
 1299                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
 1300                         hid = CRYPTO_SESID2HID(crp->crp_sid);
 1301                         cap = crypto_checkdriver(hid);
 1302                         /*
 1303                          * Driver cannot disappeared when there is an active
 1304                          * session.
 1305                          */
 1306                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
 1307                             __func__, __LINE__));
 1308                         if (cap == NULL || cap->cc_dev == NULL) {
 1309                                 /* Op needs to be migrated, process it. */
 1310                                 if (submit == NULL)
 1311                                         submit = crp;
 1312                                 break;
 1313                         }
 1314                         if (!cap->cc_qblocked) {
 1315                                 if (submit != NULL) {
 1316                                         /*
 1317                                          * We stop on finding another op,
 1318                                          * regardless whether its for the same
 1319                                          * driver or not.  We could keep
 1320                                          * searching the queue but it might be
 1321                                          * better to just use a per-driver
 1322                                          * queue instead.
 1323                                          */
 1324                                         if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
 1325                                                 hint = CRYPTO_HINT_MORE;
 1326                                         break;
 1327                                 } else {
 1328                                         submit = crp;
 1329                                         if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
 1330                                                 break;
 1331                                         /* keep scanning for more are q'd */
 1332                                 }
 1333                         }
 1334                 }
 1335                 if (submit != NULL) {
 1336                         TAILQ_REMOVE(&crp_q, submit, crp_next);
 1337                         hid = CRYPTO_SESID2HID(submit->crp_sid);
 1338                         cap = crypto_checkdriver(hid);
 1339                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
 1340                             __func__, __LINE__));
 1341                         result = crypto_invoke(cap, submit, hint);
 1342                         if (result == ERESTART) {
 1343                                 /*
 1344                                  * The driver ran out of resources, mark the
 1345                                  * driver ``blocked'' for cryptop's and put
 1346                                  * the request back in the queue.  It would
 1347                                  * best to put the request back where we got
 1348                                  * it but that's hard so for now we put it
 1349                                  * at the front.  This should be ok; putting
 1350                                  * it at the end does not work.
 1351                                  */
 1352                                 /* XXX validate sid again? */
 1353                                 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
 1354                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
 1355                                 cryptostats.cs_blocks++;
 1356                         }
 1357                 }
 1358 
 1359                 /* As above, but for key ops */
 1360                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
 1361                         cap = crypto_checkdriver(krp->krp_hid);
 1362                         if (cap == NULL || cap->cc_dev == NULL) {
 1363                                 /*
 1364                                  * Operation needs to be migrated, invalidate
 1365                                  * the assigned device so it will reselect a
 1366                                  * new one below.  Propagate the original
 1367                                  * crid selection flags if supplied.
 1368                                  */
 1369                                 krp->krp_hid = krp->krp_crid &
 1370                                     (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
 1371                                 if (krp->krp_hid == 0)
 1372                                         krp->krp_hid =
 1373                                     CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
 1374                                 break;
 1375                         }
 1376                         if (!cap->cc_kqblocked)
 1377                                 break;
 1378                 }
 1379                 if (krp != NULL) {
 1380                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
 1381                         result = crypto_kinvoke(krp, krp->krp_hid);
 1382                         if (result == ERESTART) {
 1383                                 /*
 1384                                  * The driver ran out of resources, mark the
 1385                                  * driver ``blocked'' for cryptkop's and put
 1386                                  * the request back in the queue.  It would
 1387                                  * best to put the request back where we got
 1388                                  * it but that's hard so for now we put it
 1389                                  * at the front.  This should be ok; putting
 1390                                  * it at the end does not work.
 1391                                  */
 1392                                 /* XXX validate sid again? */
 1393                                 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
 1394                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
 1395                                 cryptostats.cs_kblocks++;
 1396                         }
 1397                 }
 1398 
 1399                 if (submit == NULL && krp == NULL) {
 1400                         /*
 1401                          * Nothing more to be processed.  Sleep until we're
 1402                          * woken because there are more ops to process.
 1403                          * This happens either by submission or by a driver
 1404                          * becoming unblocked and notifying us through
 1405                          * crypto_unblock.  Note that when we wakeup we
 1406                          * start processing each queue again from the
 1407                          * front. It's not clear that it's important to
 1408                          * preserve this ordering since ops may finish
 1409                          * out of order if dispatched to different devices
 1410                          * and some become blocked while others do not.
 1411                          */
 1412                         crp_sleep = 1;
 1413                         msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
 1414                         crp_sleep = 0;
 1415                         if (cryptoproc == NULL)
 1416                                 break;
 1417                         cryptostats.cs_intrs++;
 1418                 }
 1419         }
 1420         CRYPTO_Q_UNLOCK();
 1421 
 1422         crypto_finis(&crp_q);
 1423 }
 1424 
 1425 /*
 1426  * Crypto returns thread, does callbacks for processed crypto requests.
 1427  * Callbacks are done here, rather than in the crypto drivers, because
 1428  * callbacks typically are expensive and would slow interrupt handling.
 1429  */
 1430 static void
 1431 crypto_ret_proc(void)
 1432 {
 1433         struct cryptop *crpt;
 1434         struct cryptkop *krpt;
 1435 
 1436         CRYPTO_RETQ_LOCK();
 1437         for (;;) {
 1438                 /* Harvest return q's for completed ops */
 1439                 crpt = TAILQ_FIRST(&crp_ret_q);
 1440                 if (crpt != NULL)
 1441                         TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
 1442 
 1443                 krpt = TAILQ_FIRST(&crp_ret_kq);
 1444                 if (krpt != NULL)
 1445                         TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
 1446 
 1447                 if (crpt != NULL || krpt != NULL) {
 1448                         CRYPTO_RETQ_UNLOCK();
 1449                         /*
 1450                          * Run callbacks unlocked.
 1451                          */
 1452                         if (crpt != NULL) {
 1453 #ifdef CRYPTO_TIMING
 1454                                 if (crypto_timing) {
 1455                                         /*
 1456                                          * NB: We must copy the timestamp before
 1457                                          * doing the callback as the cryptop is
 1458                                          * likely to be reclaimed.
 1459                                          */
 1460                                         struct bintime t = crpt->crp_tstamp;
 1461                                         crypto_tstat(&cryptostats.cs_cb, &t);
 1462                                         crpt->crp_callback(crpt);
 1463                                         crypto_tstat(&cryptostats.cs_finis, &t);
 1464                                 } else
 1465 #endif
 1466                                         crpt->crp_callback(crpt);
 1467                         }
 1468                         if (krpt != NULL)
 1469                                 krpt->krp_callback(krpt);
 1470                         CRYPTO_RETQ_LOCK();
 1471                 } else {
 1472                         /*
 1473                          * Nothing more to be processed.  Sleep until we're
 1474                          * woken because there are more returns to process.
 1475                          */
 1476                         msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
 1477                                 "crypto_ret_wait", 0);
 1478                         if (cryptoretproc == NULL)
 1479                                 break;
 1480                         cryptostats.cs_rets++;
 1481                 }
 1482         }
 1483         CRYPTO_RETQ_UNLOCK();
 1484 
 1485         crypto_finis(&crp_ret_q);
 1486 }
 1487 
 1488 #ifdef DDB
 1489 static void
 1490 db_show_drivers(void)
 1491 {
 1492         int hid;
 1493 
 1494         db_printf("%12s %4s %4s %8s %2s %2s\n"
 1495                 , "Device"
 1496                 , "Ses"
 1497                 , "Kops"
 1498                 , "Flags"
 1499                 , "QB"
 1500                 , "KB"
 1501         );
 1502         for (hid = 0; hid < crypto_drivers_num; hid++) {
 1503                 const struct cryptocap *cap = &crypto_drivers[hid];
 1504                 if (cap->cc_dev == NULL)
 1505                         continue;
 1506                 db_printf("%-12s %4u %4u %08x %2u %2u\n"
 1507                     , device_get_nameunit(cap->cc_dev)
 1508                     , cap->cc_sessions
 1509                     , cap->cc_koperations
 1510                     , cap->cc_flags
 1511                     , cap->cc_qblocked
 1512                     , cap->cc_kqblocked
 1513                 );
 1514         }
 1515 }
 1516 
 1517 DB_SHOW_COMMAND(crypto, db_show_crypto)
 1518 {
 1519         struct cryptop *crp;
 1520 
 1521         db_show_drivers();
 1522         db_printf("\n");
 1523 
 1524         db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
 1525             "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
 1526             "Desc", "Callback");
 1527         TAILQ_FOREACH(crp, &crp_q, crp_next) {
 1528                 db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
 1529                     , (int) CRYPTO_SESID2HID(crp->crp_sid)
 1530                     , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
 1531                     , crp->crp_ilen, crp->crp_olen
 1532                     , crp->crp_etype
 1533                     , crp->crp_flags
 1534                     , crp->crp_desc
 1535                     , crp->crp_callback
 1536                 );
 1537         }
 1538         if (!TAILQ_EMPTY(&crp_ret_q)) {
 1539                 db_printf("\n%4s %4s %4s %8s\n",
 1540                     "HID", "Etype", "Flags", "Callback");
 1541                 TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
 1542                         db_printf("%4u %4u %04x %8p\n"
 1543                             , (int) CRYPTO_SESID2HID(crp->crp_sid)
 1544                             , crp->crp_etype
 1545                             , crp->crp_flags
 1546                             , crp->crp_callback
 1547                         );
 1548                 }
 1549         }
 1550 }
 1551 
 1552 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
 1553 {
 1554         struct cryptkop *krp;
 1555 
 1556         db_show_drivers();
 1557         db_printf("\n");
 1558 
 1559         db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
 1560             "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
 1561         TAILQ_FOREACH(krp, &crp_kq, krp_next) {
 1562                 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
 1563                     , krp->krp_op
 1564                     , krp->krp_status
 1565                     , krp->krp_iparams, krp->krp_oparams
 1566                     , krp->krp_crid, krp->krp_hid
 1567                     , krp->krp_callback
 1568                 );
 1569         }
 1570         if (!TAILQ_EMPTY(&crp_ret_q)) {
 1571                 db_printf("%4s %5s %8s %4s %8s\n",
 1572                     "Op", "Status", "CRID", "HID", "Callback");
 1573                 TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
 1574                         db_printf("%4u %5u %08x %4u %8p\n"
 1575                             , krp->krp_op
 1576                             , krp->krp_status
 1577                             , krp->krp_crid, krp->krp_hid
 1578                             , krp->krp_callback
 1579                         );
 1580                 }
 1581         }
 1582 }
 1583 #endif
 1584 
 1585 int crypto_modevent(module_t mod, int type, void *unused);
 1586 
 1587 /*
 1588  * Initialization code, both for static and dynamic loading.
 1589  * Note this is not invoked with the usual MODULE_DECLARE
 1590  * mechanism but instead is listed as a dependency by the
 1591  * cryptosoft driver.  This guarantees proper ordering of
 1592  * calls on module load/unload.
 1593  */
 1594 int
 1595 crypto_modevent(module_t mod, int type, void *unused)
 1596 {
 1597         int error = EINVAL;
 1598 
 1599         switch (type) {
 1600         case MOD_LOAD:
 1601                 error = crypto_init();
 1602                 if (error == 0 && bootverbose)
 1603                         printf("crypto: <crypto core>\n");
 1604                 break;
 1605         case MOD_UNLOAD:
 1606                 /*XXX disallow if active sessions */
 1607                 error = 0;
 1608                 crypto_destroy();
 1609                 return 0;
 1610         }
 1611         return error;
 1612 }
 1613 MODULE_VERSION(crypto, 1);
 1614 MODULE_DEPEND(crypto, zlib, 1, 1, 1);

Cache object: 5c412b652666f7cd0fe724ef9f606a37


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.