The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/opencrypto/crypto.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 #include <sys/cdefs.h>
   26 __FBSDID("$FreeBSD$");
   27 
   28 /*
   29  * Cryptographic Subsystem.
   30  *
   31  * This code is derived from the Openbsd Cryptographic Framework (OCF)
   32  * that has the copyright shown below.  Very little of the original
   33  * code remains.
   34  */
   35 
   36 /*-
   37  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
   38  *
   39  * This code was written by Angelos D. Keromytis in Athens, Greece, in
   40  * February 2000. Network Security Technologies Inc. (NSTI) kindly
   41  * supported the development of this code.
   42  *
   43  * Copyright (c) 2000, 2001 Angelos D. Keromytis
   44  *
   45  * Permission to use, copy, and modify this software with or without fee
   46  * is hereby granted, provided that this entire notice is included in
   47  * all source code copies of any software which is or includes a copy or
   48  * modification of this software.
   49  *
   50  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
   51  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
   52  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
   53  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
   54  * PURPOSE.
   55  */
   56 
   57 #define CRYPTO_TIMING                           /* enable timing support */
   58 
   59 #include "opt_ddb.h"
   60 #include "opt_kdtrace.h"
   61 
   62 #include <sys/param.h>
   63 #include <sys/systm.h>
   64 #include <sys/eventhandler.h>
   65 #include <sys/kernel.h>
   66 #include <sys/kthread.h>
   67 #include <sys/lock.h>
   68 #include <sys/module.h>
   69 #include <sys/mutex.h>
   70 #include <sys/malloc.h>
   71 #include <sys/proc.h>
   72 #include <sys/sdt.h>
   73 #include <sys/sysctl.h>
   74 
   75 #include <ddb/ddb.h>
   76 
   77 #include <vm/uma.h>
   78 #include <opencrypto/cryptodev.h>
   79 #include <opencrypto/xform.h>                   /* XXX for M_XDATA */
   80 
   81 #include <sys/kobj.h>
   82 #include <sys/bus.h>
   83 #include "cryptodev_if.h"
   84 
   85 SDT_PROVIDER_DEFINE(opencrypto);
   86 
   87 /*
   88  * Crypto drivers register themselves by allocating a slot in the
   89  * crypto_drivers table with crypto_get_driverid() and then registering
   90  * each algorithm they support with crypto_register() and crypto_kregister().
   91  */
   92 static  struct mtx crypto_drivers_mtx;          /* lock on driver table */
   93 #define CRYPTO_DRIVER_LOCK()    mtx_lock(&crypto_drivers_mtx)
   94 #define CRYPTO_DRIVER_UNLOCK()  mtx_unlock(&crypto_drivers_mtx)
   95 #define CRYPTO_DRIVER_ASSERT()  mtx_assert(&crypto_drivers_mtx, MA_OWNED)
   96 
   97 /*
   98  * Crypto device/driver capabilities structure.
   99  *
  100  * Synchronization:
  101  * (d) - protected by CRYPTO_DRIVER_LOCK()
  102  * (q) - protected by CRYPTO_Q_LOCK()
  103  * Not tagged fields are read-only.
  104  */
  105 struct cryptocap {
  106         device_t        cc_dev;                 /* (d) device/driver */
  107         u_int32_t       cc_sessions;            /* (d) # of sessions */
  108         u_int32_t       cc_koperations;         /* (d) # os asym operations */
  109         /*
  110          * Largest possible operator length (in bits) for each type of
  111          * encryption algorithm. XXX not used
  112          */
  113         u_int16_t       cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
  114         u_int8_t        cc_alg[CRYPTO_ALGORITHM_MAX + 1];
  115         u_int8_t        cc_kalg[CRK_ALGORITHM_MAX + 1];
  116 
  117         int             cc_flags;               /* (d) flags */
  118 #define CRYPTOCAP_F_CLEANUP     0x80000000      /* needs resource cleanup */
  119         int             cc_qblocked;            /* (q) symmetric q blocked */
  120         int             cc_kqblocked;           /* (q) asymmetric q blocked */
  121 };
  122 static  struct cryptocap *crypto_drivers = NULL;
  123 static  int crypto_drivers_num = 0;
  124 
  125 /*
  126  * There are two queues for crypto requests; one for symmetric (e.g.
  127  * cipher) operations and one for asymmetric (e.g. MOD)operations.
  128  * A single mutex is used to lock access to both queues.  We could
  129  * have one per-queue but having one simplifies handling of block/unblock
  130  * operations.
  131  */
  132 static  int crp_sleep = 0;
  133 static  TAILQ_HEAD(,cryptop) crp_q;             /* request queues */
  134 static  TAILQ_HEAD(,cryptkop) crp_kq;
  135 static  struct mtx crypto_q_mtx;
  136 #define CRYPTO_Q_LOCK()         mtx_lock(&crypto_q_mtx)
  137 #define CRYPTO_Q_UNLOCK()       mtx_unlock(&crypto_q_mtx)
  138 
  139 /*
  140  * There are two queues for processing completed crypto requests; one
  141  * for the symmetric and one for the asymmetric ops.  We only need one
  142  * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
  143  * mutex is used to lock access to both queues.  Note that this lock
  144  * must be separate from the lock on request queues to insure driver
  145  * callbacks don't generate lock order reversals.
  146  */
  147 static  TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queues */
  148 static  TAILQ_HEAD(,cryptkop) crp_ret_kq;
  149 static  struct mtx crypto_ret_q_mtx;
  150 #define CRYPTO_RETQ_LOCK()      mtx_lock(&crypto_ret_q_mtx)
  151 #define CRYPTO_RETQ_UNLOCK()    mtx_unlock(&crypto_ret_q_mtx)
  152 #define CRYPTO_RETQ_EMPTY()     (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
  153 
  154 static  uma_zone_t cryptop_zone;
  155 static  uma_zone_t cryptodesc_zone;
  156 
  157 int     crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
  158 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
  159            &crypto_userasymcrypto, 0,
  160            "Enable/disable user-mode access to asymmetric crypto support");
  161 int     crypto_devallowsoft = 0;        /* only use hardware crypto for asym */
  162 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
  163            &crypto_devallowsoft, 0,
  164            "Enable/disable use of software asym crypto support");
  165 
  166 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
  167 
  168 static  void crypto_proc(void);
  169 static  struct proc *cryptoproc;
  170 static  void crypto_ret_proc(void);
  171 static  struct proc *cryptoretproc;
  172 static  void crypto_destroy(void);
  173 static  int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
  174 static  int crypto_kinvoke(struct cryptkop *krp, int flags);
  175 
  176 static  struct cryptostats cryptostats;
  177 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
  178             cryptostats, "Crypto system statistics");
  179 
  180 #ifdef CRYPTO_TIMING
  181 static  int crypto_timing = 0;
  182 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
  183            &crypto_timing, 0, "Enable/disable crypto timing support");
  184 #endif
  185 
  186 static int
  187 crypto_init(void)
  188 {
  189         int error;
  190 
  191         mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
  192                 MTX_DEF|MTX_QUIET);
  193 
  194         TAILQ_INIT(&crp_q);
  195         TAILQ_INIT(&crp_kq);
  196         mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
  197 
  198         TAILQ_INIT(&crp_ret_q);
  199         TAILQ_INIT(&crp_ret_kq);
  200         mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
  201 
  202         cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
  203                                     0, 0, 0, 0,
  204                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
  205         cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
  206                                     0, 0, 0, 0,
  207                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
  208         if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
  209                 printf("crypto_init: cannot setup crypto zones\n");
  210                 error = ENOMEM;
  211                 goto bad;
  212         }
  213 
  214         crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
  215         crypto_drivers = malloc(crypto_drivers_num *
  216             sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
  217         if (crypto_drivers == NULL) {
  218                 printf("crypto_init: cannot setup crypto drivers\n");
  219                 error = ENOMEM;
  220                 goto bad;
  221         }
  222 
  223         error = kthread_create((void (*)(void *)) crypto_proc, NULL,
  224                     &cryptoproc, 0, 0, "crypto");
  225         if (error) {
  226                 printf("crypto_init: cannot start crypto thread; error %d",
  227                         error);
  228                 goto bad;
  229         }
  230 
  231         error = kthread_create((void (*)(void *)) crypto_ret_proc, NULL,
  232                     &cryptoretproc, 0, 0, "crypto returns");
  233         if (error) {
  234                 printf("crypto_init: cannot start cryptoret thread; error %d",
  235                         error);
  236                 goto bad;
  237         }
  238         return 0;
  239 bad:
  240         crypto_destroy();
  241         return error;
  242 }
  243 
  244 /*
  245  * Signal a crypto thread to terminate.  We use the driver
  246  * table lock to synchronize the sleep/wakeups so that we
  247  * are sure the threads have terminated before we release
  248  * the data structures they use.  See crypto_finis below
  249  * for the other half of this song-and-dance.
  250  */
  251 static void
  252 crypto_terminate(struct proc **pp, void *q)
  253 {
  254         struct proc *p;
  255 
  256         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
  257         p = *pp;
  258         *pp = NULL;
  259         if (p) {
  260                 wakeup_one(q);
  261                 PROC_LOCK(p);           /* NB: insure we don't miss wakeup */
  262                 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
  263                 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
  264                 PROC_UNLOCK(p);
  265                 CRYPTO_DRIVER_LOCK();
  266         }
  267 }
  268 
  269 static void
  270 crypto_destroy(void)
  271 {
  272         /*
  273          * Terminate any crypto threads.
  274          */
  275         CRYPTO_DRIVER_LOCK();
  276         crypto_terminate(&cryptoproc, &crp_q);
  277         crypto_terminate(&cryptoretproc, &crp_ret_q);
  278         CRYPTO_DRIVER_UNLOCK();
  279 
  280         /* XXX flush queues??? */
  281 
  282         /* 
  283          * Reclaim dynamically allocated resources.
  284          */
  285         if (crypto_drivers != NULL)
  286                 free(crypto_drivers, M_CRYPTO_DATA);
  287 
  288         if (cryptodesc_zone != NULL)
  289                 uma_zdestroy(cryptodesc_zone);
  290         if (cryptop_zone != NULL)
  291                 uma_zdestroy(cryptop_zone);
  292         mtx_destroy(&crypto_q_mtx);
  293         mtx_destroy(&crypto_ret_q_mtx);
  294         mtx_destroy(&crypto_drivers_mtx);
  295 }
  296 
  297 static struct cryptocap *
  298 crypto_checkdriver(u_int32_t hid)
  299 {
  300         if (crypto_drivers == NULL)
  301                 return NULL;
  302         return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
  303 }
  304 
  305 /*
  306  * Compare a driver's list of supported algorithms against another
  307  * list; return non-zero if all algorithms are supported.
  308  */
  309 static int
  310 driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
  311 {
  312         const struct cryptoini *cr;
  313 
  314         /* See if all the algorithms are supported. */
  315         for (cr = cri; cr; cr = cr->cri_next)
  316                 if (cap->cc_alg[cr->cri_alg] == 0)
  317                         return 0;
  318         return 1;
  319 }
  320 
  321 /*
  322  * Select a driver for a new session that supports the specified
  323  * algorithms and, optionally, is constrained according to the flags.
  324  * The algorithm we use here is pretty stupid; just use the
  325  * first driver that supports all the algorithms we need. If there
  326  * are multiple drivers we choose the driver with the fewest active
  327  * sessions.  We prefer hardware-backed drivers to software ones.
  328  *
  329  * XXX We need more smarts here (in real life too, but that's
  330  * XXX another story altogether).
  331  */
  332 static struct cryptocap *
  333 crypto_select_driver(const struct cryptoini *cri, int flags)
  334 {
  335         struct cryptocap *cap, *best;
  336         int match, hid;
  337 
  338         CRYPTO_DRIVER_ASSERT();
  339 
  340         /*
  341          * Look first for hardware crypto devices if permitted.
  342          */
  343         if (flags & CRYPTOCAP_F_HARDWARE)
  344                 match = CRYPTOCAP_F_HARDWARE;
  345         else
  346                 match = CRYPTOCAP_F_SOFTWARE;
  347         best = NULL;
  348 again:
  349         for (hid = 0; hid < crypto_drivers_num; hid++) {
  350                 cap = &crypto_drivers[hid];
  351                 /*
  352                  * If it's not initialized, is in the process of
  353                  * going away, or is not appropriate (hardware
  354                  * or software based on match), then skip.
  355                  */
  356                 if (cap->cc_dev == NULL ||
  357                     (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
  358                     (cap->cc_flags & match) == 0)
  359                         continue;
  360 
  361                 /* verify all the algorithms are supported. */
  362                 if (driver_suitable(cap, cri)) {
  363                         if (best == NULL ||
  364                             cap->cc_sessions < best->cc_sessions)
  365                                 best = cap;
  366                 }
  367         }
  368         if (best != NULL)
  369                 return best;
  370         if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
  371                 /* sort of an Algol 68-style for loop */
  372                 match = CRYPTOCAP_F_SOFTWARE;
  373                 goto again;
  374         }
  375         return best;
  376 }
  377 
  378 /*
  379  * Create a new session.  The crid argument specifies a crypto
  380  * driver to use or constraints on a driver to select (hardware
  381  * only, software only, either).  Whatever driver is selected
  382  * must be capable of the requested crypto algorithms.
  383  */
  384 int
  385 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
  386 {
  387         struct cryptocap *cap;
  388         u_int32_t hid, lid;
  389         int err;
  390 
  391         CRYPTO_DRIVER_LOCK();
  392         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
  393                 /*
  394                  * Use specified driver; verify it is capable.
  395                  */
  396                 cap = crypto_checkdriver(crid);
  397                 if (cap != NULL && !driver_suitable(cap, cri))
  398                         cap = NULL;
  399         } else {
  400                 /*
  401                  * No requested driver; select based on crid flags.
  402                  */
  403                 cap = crypto_select_driver(cri, crid);
  404                 /*
  405                  * if NULL then can't do everything in one session.
  406                  * XXX Fix this. We need to inject a "virtual" session
  407                  * XXX layer right about here.
  408                  */
  409         }
  410         if (cap != NULL) {
  411                 /* Call the driver initialization routine. */
  412                 hid = cap - crypto_drivers;
  413                 lid = hid;              /* Pass the driver ID. */
  414                 err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
  415                 if (err == 0) {
  416                         (*sid) = (cap->cc_flags & 0xff000000)
  417                                | (hid & 0x00ffffff);
  418                         (*sid) <<= 32;
  419                         (*sid) |= (lid & 0xffffffff);
  420                         cap->cc_sessions++;
  421                 }
  422         } else
  423                 err = EINVAL;
  424         CRYPTO_DRIVER_UNLOCK();
  425         return err;
  426 }
  427 
  428 static void
  429 crypto_remove(struct cryptocap *cap)
  430 {
  431 
  432         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
  433         if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
  434                 bzero(cap, sizeof(*cap));
  435 }
  436 
  437 /*
  438  * Delete an existing session (or a reserved session on an unregistered
  439  * driver).
  440  */
  441 int
  442 crypto_freesession(u_int64_t sid)
  443 {
  444         struct cryptocap *cap;
  445         u_int32_t hid;
  446         int err;
  447 
  448         CRYPTO_DRIVER_LOCK();
  449 
  450         if (crypto_drivers == NULL) {
  451                 err = EINVAL;
  452                 goto done;
  453         }
  454 
  455         /* Determine two IDs. */
  456         hid = CRYPTO_SESID2HID(sid);
  457 
  458         if (hid >= crypto_drivers_num) {
  459                 err = ENOENT;
  460                 goto done;
  461         }
  462         cap = &crypto_drivers[hid];
  463 
  464         if (cap->cc_sessions)
  465                 cap->cc_sessions--;
  466 
  467         /* Call the driver cleanup routine, if available. */
  468         err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
  469 
  470         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
  471                 crypto_remove(cap);
  472 
  473 done:
  474         CRYPTO_DRIVER_UNLOCK();
  475         return err;
  476 }
  477 
  478 /*
  479  * Return an unused driver id.  Used by drivers prior to registering
  480  * support for the algorithms they handle.
  481  */
  482 int32_t
  483 crypto_get_driverid(device_t dev, int flags)
  484 {
  485         struct cryptocap *newdrv;
  486         int i;
  487 
  488         if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
  489                 printf("%s: no flags specified when registering driver\n",
  490                     device_get_nameunit(dev));
  491                 return -1;
  492         }
  493 
  494         CRYPTO_DRIVER_LOCK();
  495 
  496         for (i = 0; i < crypto_drivers_num; i++) {
  497                 if (crypto_drivers[i].cc_dev == NULL &&
  498                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
  499                         break;
  500                 }
  501         }
  502 
  503         /* Out of entries, allocate some more. */
  504         if (i == crypto_drivers_num) {
  505                 /* Be careful about wrap-around. */
  506                 if (2 * crypto_drivers_num <= crypto_drivers_num) {
  507                         CRYPTO_DRIVER_UNLOCK();
  508                         printf("crypto: driver count wraparound!\n");
  509                         return -1;
  510                 }
  511 
  512                 newdrv = malloc(2 * crypto_drivers_num *
  513                     sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
  514                 if (newdrv == NULL) {
  515                         CRYPTO_DRIVER_UNLOCK();
  516                         printf("crypto: no space to expand driver table!\n");
  517                         return -1;
  518                 }
  519 
  520                 bcopy(crypto_drivers, newdrv,
  521                     crypto_drivers_num * sizeof(struct cryptocap));
  522 
  523                 crypto_drivers_num *= 2;
  524 
  525                 free(crypto_drivers, M_CRYPTO_DATA);
  526                 crypto_drivers = newdrv;
  527         }
  528 
  529         /* NB: state is zero'd on free */
  530         crypto_drivers[i].cc_sessions = 1;      /* Mark */
  531         crypto_drivers[i].cc_dev = dev;
  532         crypto_drivers[i].cc_flags = flags;
  533         if (bootverbose)
  534                 printf("crypto: assign %s driver id %u, flags %u\n",
  535                     device_get_nameunit(dev), i, flags);
  536 
  537         CRYPTO_DRIVER_UNLOCK();
  538 
  539         return i;
  540 }
  541 
  542 /*
  543  * Lookup a driver by name.  We match against the full device
  544  * name and unit, and against just the name.  The latter gives
  545  * us a simple widlcarding by device name.  On success return the
  546  * driver/hardware identifier; otherwise return -1.
  547  */
  548 int
  549 crypto_find_driver(const char *match)
  550 {
  551         int i, len = strlen(match);
  552 
  553         CRYPTO_DRIVER_LOCK();
  554         for (i = 0; i < crypto_drivers_num; i++) {
  555                 device_t dev = crypto_drivers[i].cc_dev;
  556                 if (dev == NULL ||
  557                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
  558                         continue;
  559                 if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
  560                     strncmp(match, device_get_name(dev), len) == 0)
  561                         break;
  562         }
  563         CRYPTO_DRIVER_UNLOCK();
  564         return i < crypto_drivers_num ? i : -1;
  565 }
  566 
  567 /*
  568  * Return the device_t for the specified driver or NULL
  569  * if the driver identifier is invalid.
  570  */
  571 device_t
  572 crypto_find_device_byhid(int hid)
  573 {
  574         struct cryptocap *cap = crypto_checkdriver(hid);
  575         return cap != NULL ? cap->cc_dev : NULL;
  576 }
  577 
  578 /*
  579  * Return the device/driver capabilities.
  580  */
  581 int
  582 crypto_getcaps(int hid)
  583 {
  584         struct cryptocap *cap = crypto_checkdriver(hid);
  585         return cap != NULL ? cap->cc_flags : 0;
  586 }
  587 
  588 /*
  589  * Register support for a key-related algorithm.  This routine
  590  * is called once for each algorithm supported a driver.
  591  */
  592 int
  593 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
  594 {
  595         struct cryptocap *cap;
  596         int err;
  597 
  598         CRYPTO_DRIVER_LOCK();
  599 
  600         cap = crypto_checkdriver(driverid);
  601         if (cap != NULL &&
  602             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
  603                 /*
  604                  * XXX Do some performance testing to determine placing.
  605                  * XXX We probably need an auxiliary data structure that
  606                  * XXX describes relative performances.
  607                  */
  608 
  609                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
  610                 if (bootverbose)
  611                         printf("crypto: %s registers key alg %u flags %u\n"
  612                                 , device_get_nameunit(cap->cc_dev)
  613                                 , kalg
  614                                 , flags
  615                         );
  616                 err = 0;
  617         } else
  618                 err = EINVAL;
  619 
  620         CRYPTO_DRIVER_UNLOCK();
  621         return err;
  622 }
  623 
  624 /*
  625  * Register support for a non-key-related algorithm.  This routine
  626  * is called once for each such algorithm supported by a driver.
  627  */
  628 int
  629 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
  630     u_int32_t flags)
  631 {
  632         struct cryptocap *cap;
  633         int err;
  634 
  635         CRYPTO_DRIVER_LOCK();
  636 
  637         cap = crypto_checkdriver(driverid);
  638         /* NB: algorithms are in the range [1..max] */
  639         if (cap != NULL &&
  640             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
  641                 /*
  642                  * XXX Do some performance testing to determine placing.
  643                  * XXX We probably need an auxiliary data structure that
  644                  * XXX describes relative performances.
  645                  */
  646 
  647                 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
  648                 cap->cc_max_op_len[alg] = maxoplen;
  649                 if (bootverbose)
  650                         printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
  651                                 , device_get_nameunit(cap->cc_dev)
  652                                 , alg
  653                                 , flags
  654                                 , maxoplen
  655                         );
  656                 cap->cc_sessions = 0;           /* Unmark */
  657                 err = 0;
  658         } else
  659                 err = EINVAL;
  660 
  661         CRYPTO_DRIVER_UNLOCK();
  662         return err;
  663 }
  664 
  665 static void
  666 driver_finis(struct cryptocap *cap)
  667 {
  668         u_int32_t ses, kops;
  669 
  670         CRYPTO_DRIVER_ASSERT();
  671 
  672         ses = cap->cc_sessions;
  673         kops = cap->cc_koperations;
  674         bzero(cap, sizeof(*cap));
  675         if (ses != 0 || kops != 0) {
  676                 /*
  677                  * If there are pending sessions,
  678                  * just mark as invalid.
  679                  */
  680                 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
  681                 cap->cc_sessions = ses;
  682                 cap->cc_koperations = kops;
  683         }
  684 }
  685 
  686 /*
  687  * Unregister a crypto driver. If there are pending sessions using it,
  688  * leave enough information around so that subsequent calls using those
  689  * sessions will correctly detect the driver has been unregistered and
  690  * reroute requests.
  691  */
  692 int
  693 crypto_unregister(u_int32_t driverid, int alg)
  694 {
  695         struct cryptocap *cap;
  696         int i, err;
  697 
  698         CRYPTO_DRIVER_LOCK();
  699         cap = crypto_checkdriver(driverid);
  700         if (cap != NULL &&
  701             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
  702             cap->cc_alg[alg] != 0) {
  703                 cap->cc_alg[alg] = 0;
  704                 cap->cc_max_op_len[alg] = 0;
  705 
  706                 /* Was this the last algorithm ? */
  707                 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
  708                         if (cap->cc_alg[i] != 0)
  709                                 break;
  710 
  711                 if (i == CRYPTO_ALGORITHM_MAX + 1)
  712                         driver_finis(cap);
  713                 err = 0;
  714         } else
  715                 err = EINVAL;
  716         CRYPTO_DRIVER_UNLOCK();
  717 
  718         return err;
  719 }
  720 
  721 /*
  722  * Unregister all algorithms associated with a crypto driver.
  723  * If there are pending sessions using it, leave enough information
  724  * around so that subsequent calls using those sessions will
  725  * correctly detect the driver has been unregistered and reroute
  726  * requests.
  727  */
  728 int
  729 crypto_unregister_all(u_int32_t driverid)
  730 {
  731         struct cryptocap *cap;
  732         int err;
  733 
  734         CRYPTO_DRIVER_LOCK();
  735         cap = crypto_checkdriver(driverid);
  736         if (cap != NULL) {
  737                 driver_finis(cap);
  738                 err = 0;
  739         } else
  740                 err = EINVAL;
  741         CRYPTO_DRIVER_UNLOCK();
  742 
  743         return err;
  744 }
  745 
  746 /*
  747  * Clear blockage on a driver.  The what parameter indicates whether
  748  * the driver is now ready for cryptop's and/or cryptokop's.
  749  */
  750 int
  751 crypto_unblock(u_int32_t driverid, int what)
  752 {
  753         struct cryptocap *cap;
  754         int err;
  755 
  756         CRYPTO_Q_LOCK();
  757         cap = crypto_checkdriver(driverid);
  758         if (cap != NULL) {
  759                 if (what & CRYPTO_SYMQ)
  760                         cap->cc_qblocked = 0;
  761                 if (what & CRYPTO_ASYMQ)
  762                         cap->cc_kqblocked = 0;
  763                 if (crp_sleep)
  764                         wakeup_one(&crp_q);
  765                 err = 0;
  766         } else
  767                 err = EINVAL;
  768         CRYPTO_Q_UNLOCK();
  769 
  770         return err;
  771 }
  772 
  773 /*
  774  * Add a crypto request to a queue, to be processed by the kernel thread.
  775  */
  776 int
  777 crypto_dispatch(struct cryptop *crp)
  778 {
  779         struct cryptocap *cap;
  780         u_int32_t hid;
  781         int result;
  782 
  783         cryptostats.cs_ops++;
  784 
  785 #ifdef CRYPTO_TIMING
  786         if (crypto_timing)
  787                 binuptime(&crp->crp_tstamp);
  788 #endif
  789 
  790         hid = CRYPTO_SESID2HID(crp->crp_sid);
  791 
  792         if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
  793                 /*
  794                  * Caller marked the request to be processed
  795                  * immediately; dispatch it directly to the
  796                  * driver unless the driver is currently blocked.
  797                  */
  798                 cap = crypto_checkdriver(hid);
  799                 /* Driver cannot disappeared when there is an active session. */
  800                 KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
  801                 if (!cap->cc_qblocked) {
  802                         result = crypto_invoke(cap, crp, 0);
  803                         if (result != ERESTART)
  804                                 return (result);
  805                         /*
  806                          * The driver ran out of resources, put the request on
  807                          * the queue.
  808                          */
  809                 }
  810         }
  811         CRYPTO_Q_LOCK();
  812         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
  813         if (crp_sleep)
  814                 wakeup_one(&crp_q);
  815         CRYPTO_Q_UNLOCK();
  816         return 0;
  817 }
  818 
  819 /*
  820  * Add an asymetric crypto request to a queue,
  821  * to be processed by the kernel thread.
  822  */
  823 int
  824 crypto_kdispatch(struct cryptkop *krp)
  825 {
  826         int error;
  827 
  828         cryptostats.cs_kops++;
  829 
  830         error = crypto_kinvoke(krp, krp->krp_crid);
  831         if (error == ERESTART) {
  832                 CRYPTO_Q_LOCK();
  833                 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
  834                 if (crp_sleep)
  835                         wakeup_one(&crp_q);
  836                 CRYPTO_Q_UNLOCK();
  837                 error = 0;
  838         }
  839         return error;
  840 }
  841 
  842 /*
  843  * Verify a driver is suitable for the specified operation.
  844  */
  845 static __inline int
  846 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
  847 {
  848         return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
  849 }
  850 
  851 /*
  852  * Select a driver for an asym operation.  The driver must
  853  * support the necessary algorithm.  The caller can constrain
  854  * which device is selected with the flags parameter.  The
  855  * algorithm we use here is pretty stupid; just use the first
  856  * driver that supports the algorithms we need. If there are
  857  * multiple suitable drivers we choose the driver with the
  858  * fewest active operations.  We prefer hardware-backed
  859  * drivers to software ones when either may be used.
  860  */
  861 static struct cryptocap *
  862 crypto_select_kdriver(const struct cryptkop *krp, int flags)
  863 {
  864         struct cryptocap *cap, *best, *blocked;
  865         int match, hid;
  866 
  867         CRYPTO_DRIVER_ASSERT();
  868 
  869         /*
  870          * Look first for hardware crypto devices if permitted.
  871          */
  872         if (flags & CRYPTOCAP_F_HARDWARE)
  873                 match = CRYPTOCAP_F_HARDWARE;
  874         else
  875                 match = CRYPTOCAP_F_SOFTWARE;
  876         best = NULL;
  877         blocked = NULL;
  878 again:
  879         for (hid = 0; hid < crypto_drivers_num; hid++) {
  880                 cap = &crypto_drivers[hid];
  881                 /*
  882                  * If it's not initialized, is in the process of
  883                  * going away, or is not appropriate (hardware
  884                  * or software based on match), then skip.
  885                  */
  886                 if (cap->cc_dev == NULL ||
  887                     (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
  888                     (cap->cc_flags & match) == 0)
  889                         continue;
  890 
  891                 /* verify all the algorithms are supported. */
  892                 if (kdriver_suitable(cap, krp)) {
  893                         if (best == NULL ||
  894                             cap->cc_koperations < best->cc_koperations)
  895                                 best = cap;
  896                 }
  897         }
  898         if (best != NULL)
  899                 return best;
  900         if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
  901                 /* sort of an Algol 68-style for loop */
  902                 match = CRYPTOCAP_F_SOFTWARE;
  903                 goto again;
  904         }
  905         return best;
  906 }
  907 
  908 /*
  909  * Dispatch an assymetric crypto request.
  910  */
  911 static int
  912 crypto_kinvoke(struct cryptkop *krp, int crid)
  913 {
  914         struct cryptocap *cap = NULL;
  915         int error;
  916 
  917         KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
  918         KASSERT(krp->krp_callback != NULL,
  919             ("%s: krp->crp_callback == NULL", __func__));
  920 
  921         CRYPTO_DRIVER_LOCK();
  922         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
  923                 cap = crypto_checkdriver(crid);
  924                 if (cap != NULL) {
  925                         /*
  926                          * Driver present, it must support the necessary
  927                          * algorithm and, if s/w drivers are excluded,
  928                          * it must be registered as hardware-backed.
  929                          */
  930                         if (!kdriver_suitable(cap, krp) ||
  931                             (!crypto_devallowsoft &&
  932                              (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
  933                                 cap = NULL;
  934                 }
  935         } else {
  936                 /*
  937                  * No requested driver; select based on crid flags.
  938                  */
  939                 if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
  940                         crid &= ~CRYPTOCAP_F_SOFTWARE;
  941                 cap = crypto_select_kdriver(krp, crid);
  942         }
  943         if (cap != NULL && !cap->cc_kqblocked) {
  944                 krp->krp_hid = cap - crypto_drivers;
  945                 cap->cc_koperations++;
  946                 CRYPTO_DRIVER_UNLOCK();
  947                 error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
  948                 CRYPTO_DRIVER_LOCK();
  949                 if (error == ERESTART) {
  950                         cap->cc_koperations--;
  951                         CRYPTO_DRIVER_UNLOCK();
  952                         return (error);
  953                 }
  954         } else {
  955                 /*
  956                  * NB: cap is !NULL if device is blocked; in
  957                  *     that case return ERESTART so the operation
  958                  *     is resubmitted if possible.
  959                  */
  960                 error = (cap == NULL) ? ENODEV : ERESTART;
  961         }
  962         CRYPTO_DRIVER_UNLOCK();
  963 
  964         if (error) {
  965                 krp->krp_status = error;
  966                 crypto_kdone(krp);
  967         }
  968         return 0;
  969 }
  970 
  971 #ifdef CRYPTO_TIMING
  972 static void
  973 crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
  974 {
  975         struct bintime now, delta;
  976         struct timespec t;
  977         uint64_t u;
  978 
  979         binuptime(&now);
  980         u = now.frac;
  981         delta.frac = now.frac - bt->frac;
  982         delta.sec = now.sec - bt->sec;
  983         if (u < delta.frac)
  984                 delta.sec--;
  985         bintime2timespec(&delta, &t);
  986         timespecadd(&ts->acc, &t);
  987         if (timespeccmp(&t, &ts->min, <))
  988                 ts->min = t;
  989         if (timespeccmp(&t, &ts->max, >))
  990                 ts->max = t;
  991         ts->count++;
  992 
  993         *bt = now;
  994 }
  995 #endif
  996 
  997 /*
  998  * Dispatch a crypto request to the appropriate crypto devices.
  999  */
 1000 static int
 1001 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
 1002 {
 1003 
 1004         KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
 1005         KASSERT(crp->crp_callback != NULL,
 1006             ("%s: crp->crp_callback == NULL", __func__));
 1007         KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
 1008 
 1009 #ifdef CRYPTO_TIMING
 1010         if (crypto_timing)
 1011                 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
 1012 #endif
 1013         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
 1014                 struct cryptodesc *crd;
 1015                 u_int64_t nid;
 1016 
 1017                 /*
 1018                  * Driver has unregistered; migrate the session and return
 1019                  * an error to the caller so they'll resubmit the op.
 1020                  *
 1021                  * XXX: What if there are more already queued requests for this
 1022                  *      session?
 1023                  */
 1024                 crypto_freesession(crp->crp_sid);
 1025 
 1026                 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
 1027                         crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
 1028 
 1029                 /* XXX propagate flags from initial session? */
 1030                 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
 1031                     CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
 1032                         crp->crp_sid = nid;
 1033 
 1034                 crp->crp_etype = EAGAIN;
 1035                 crypto_done(crp);
 1036                 return 0;
 1037         } else {
 1038                 /*
 1039                  * Invoke the driver to process the request.
 1040                  */
 1041                 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
 1042         }
 1043 }
 1044 
 1045 /*
 1046  * Release a set of crypto descriptors.
 1047  */
 1048 void
 1049 crypto_freereq(struct cryptop *crp)
 1050 {
 1051         struct cryptodesc *crd;
 1052 
 1053         if (crp == NULL)
 1054                 return;
 1055 
 1056 #ifdef DIAGNOSTIC
 1057         {
 1058                 struct cryptop *crp2;
 1059 
 1060                 CRYPTO_Q_LOCK();
 1061                 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
 1062                         KASSERT(crp2 != crp,
 1063                             ("Freeing cryptop from the crypto queue (%p).",
 1064                             crp));
 1065                 }
 1066                 CRYPTO_Q_UNLOCK();
 1067                 CRYPTO_RETQ_LOCK();
 1068                 TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
 1069                         KASSERT(crp2 != crp,
 1070                             ("Freeing cryptop from the return queue (%p).",
 1071                             crp));
 1072                 }
 1073                 CRYPTO_RETQ_UNLOCK();
 1074         }
 1075 #endif
 1076 
 1077         while ((crd = crp->crp_desc) != NULL) {
 1078                 crp->crp_desc = crd->crd_next;
 1079                 uma_zfree(cryptodesc_zone, crd);
 1080         }
 1081         uma_zfree(cryptop_zone, crp);
 1082 }
 1083 
 1084 /*
 1085  * Acquire a set of crypto descriptors.
 1086  */
 1087 struct cryptop *
 1088 crypto_getreq(int num)
 1089 {
 1090         struct cryptodesc *crd;
 1091         struct cryptop *crp;
 1092 
 1093         crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
 1094         if (crp != NULL) {
 1095                 while (num--) {
 1096                         crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
 1097                         if (crd == NULL) {
 1098                                 crypto_freereq(crp);
 1099                                 return NULL;
 1100                         }
 1101 
 1102                         crd->crd_next = crp->crp_desc;
 1103                         crp->crp_desc = crd;
 1104                 }
 1105         }
 1106         return crp;
 1107 }
 1108 
 1109 /*
 1110  * Invoke the callback on behalf of the driver.
 1111  */
 1112 void
 1113 crypto_done(struct cryptop *crp)
 1114 {
 1115         KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
 1116                 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
 1117         crp->crp_flags |= CRYPTO_F_DONE;
 1118         if (crp->crp_etype != 0)
 1119                 cryptostats.cs_errs++;
 1120 #ifdef CRYPTO_TIMING
 1121         if (crypto_timing)
 1122                 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
 1123 #endif
 1124         /*
 1125          * CBIMM means unconditionally do the callback immediately;
 1126          * CBIFSYNC means do the callback immediately only if the
 1127          * operation was done synchronously.  Both are used to avoid
 1128          * doing extraneous context switches; the latter is mostly
 1129          * used with the software crypto driver.
 1130          */
 1131         if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
 1132             ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
 1133              (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
 1134                 /*
 1135                  * Do the callback directly.  This is ok when the
 1136                  * callback routine does very little (e.g. the
 1137                  * /dev/crypto callback method just does a wakeup).
 1138                  */
 1139 #ifdef CRYPTO_TIMING
 1140                 if (crypto_timing) {
 1141                         /*
 1142                          * NB: We must copy the timestamp before
 1143                          * doing the callback as the cryptop is
 1144                          * likely to be reclaimed.
 1145                          */
 1146                         struct bintime t = crp->crp_tstamp;
 1147                         crypto_tstat(&cryptostats.cs_cb, &t);
 1148                         crp->crp_callback(crp);
 1149                         crypto_tstat(&cryptostats.cs_finis, &t);
 1150                 } else
 1151 #endif
 1152                         crp->crp_callback(crp);
 1153         } else {
 1154                 /*
 1155                  * Normal case; queue the callback for the thread.
 1156                  */
 1157                 CRYPTO_RETQ_LOCK();
 1158                 if (CRYPTO_RETQ_EMPTY())
 1159                         wakeup_one(&crp_ret_q); /* shared wait channel */
 1160                 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
 1161                 CRYPTO_RETQ_UNLOCK();
 1162         }
 1163 }
 1164 
 1165 /*
 1166  * Invoke the callback on behalf of the driver.
 1167  */
 1168 void
 1169 crypto_kdone(struct cryptkop *krp)
 1170 {
 1171         struct cryptocap *cap;
 1172 
 1173         if (krp->krp_status != 0)
 1174                 cryptostats.cs_kerrs++;
 1175         CRYPTO_DRIVER_LOCK();
 1176         /* XXX: What if driver is loaded in the meantime? */
 1177         if (krp->krp_hid < crypto_drivers_num) {
 1178                 cap = &crypto_drivers[krp->krp_hid];
 1179                 cap->cc_koperations--;
 1180                 KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
 1181                 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
 1182                         crypto_remove(cap);
 1183         }
 1184         CRYPTO_DRIVER_UNLOCK();
 1185         CRYPTO_RETQ_LOCK();
 1186         if (CRYPTO_RETQ_EMPTY())
 1187                 wakeup_one(&crp_ret_q);         /* shared wait channel */
 1188         TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
 1189         CRYPTO_RETQ_UNLOCK();
 1190 }
 1191 
 1192 int
 1193 crypto_getfeat(int *featp)
 1194 {
 1195         int hid, kalg, feat = 0;
 1196 
 1197         CRYPTO_DRIVER_LOCK();
 1198         for (hid = 0; hid < crypto_drivers_num; hid++) {
 1199                 const struct cryptocap *cap = &crypto_drivers[hid];
 1200 
 1201                 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
 1202                     !crypto_devallowsoft) {
 1203                         continue;
 1204                 }
 1205                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
 1206                         if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
 1207                                 feat |=  1 << kalg;
 1208         }
 1209         CRYPTO_DRIVER_UNLOCK();
 1210         *featp = feat;
 1211         return (0);
 1212 }
 1213 
 1214 /*
 1215  * Terminate a thread at module unload.  The process that
 1216  * initiated this is waiting for us to signal that we're gone;
 1217  * wake it up and exit.  We use the driver table lock to insure
 1218  * we don't do the wakeup before they're waiting.  There is no
 1219  * race here because the waiter sleeps on the proc lock for the
 1220  * thread so it gets notified at the right time because of an
 1221  * extra wakeup that's done in exit1().
 1222  */
 1223 static void
 1224 crypto_finis(void *chan)
 1225 {
 1226         CRYPTO_DRIVER_LOCK();
 1227         wakeup_one(chan);
 1228         CRYPTO_DRIVER_UNLOCK();
 1229         kthread_exit(0);
 1230 }
 1231 
 1232 /*
 1233  * Crypto thread, dispatches crypto requests.
 1234  */
 1235 static void
 1236 crypto_proc(void)
 1237 {
 1238         struct cryptop *crp, *submit;
 1239         struct cryptkop *krp;
 1240         struct cryptocap *cap;
 1241         u_int32_t hid;
 1242         int result, hint;
 1243 
 1244         CRYPTO_Q_LOCK();
 1245         for (;;) {
 1246                 /*
 1247                  * Find the first element in the queue that can be
 1248                  * processed and look-ahead to see if multiple ops
 1249                  * are ready for the same driver.
 1250                  */
 1251                 submit = NULL;
 1252                 hint = 0;
 1253                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
 1254                         hid = CRYPTO_SESID2HID(crp->crp_sid);
 1255                         cap = crypto_checkdriver(hid);
 1256                         /*
 1257                          * Driver cannot disappeared when there is an active
 1258                          * session.
 1259                          */
 1260                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
 1261                             __func__, __LINE__));
 1262                         if (cap == NULL || cap->cc_dev == NULL) {
 1263                                 /* Op needs to be migrated, process it. */
 1264                                 if (submit == NULL)
 1265                                         submit = crp;
 1266                                 break;
 1267                         }
 1268                         if (!cap->cc_qblocked) {
 1269                                 if (submit != NULL) {
 1270                                         /*
 1271                                          * We stop on finding another op,
 1272                                          * regardless whether its for the same
 1273                                          * driver or not.  We could keep
 1274                                          * searching the queue but it might be
 1275                                          * better to just use a per-driver
 1276                                          * queue instead.
 1277                                          */
 1278                                         if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
 1279                                                 hint = CRYPTO_HINT_MORE;
 1280                                         break;
 1281                                 } else {
 1282                                         submit = crp;
 1283                                         if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
 1284                                                 break;
 1285                                         /* keep scanning for more are q'd */
 1286                                 }
 1287                         }
 1288                 }
 1289                 if (submit != NULL) {
 1290                         TAILQ_REMOVE(&crp_q, submit, crp_next);
 1291                         hid = CRYPTO_SESID2HID(submit->crp_sid);
 1292                         cap = crypto_checkdriver(hid);
 1293                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
 1294                             __func__, __LINE__));
 1295                         result = crypto_invoke(cap, submit, hint);
 1296                         if (result == ERESTART) {
 1297                                 /*
 1298                                  * The driver ran out of resources, mark the
 1299                                  * driver ``blocked'' for cryptop's and put
 1300                                  * the request back in the queue.  It would
 1301                                  * best to put the request back where we got
 1302                                  * it but that's hard so for now we put it
 1303                                  * at the front.  This should be ok; putting
 1304                                  * it at the end does not work.
 1305                                  */
 1306                                 /* XXX validate sid again? */
 1307                                 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
 1308                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
 1309                                 cryptostats.cs_blocks++;
 1310                         }
 1311                 }
 1312 
 1313                 /* As above, but for key ops */
 1314                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
 1315                         cap = crypto_checkdriver(krp->krp_hid);
 1316                         if (cap == NULL || cap->cc_dev == NULL) {
 1317                                 /*
 1318                                  * Operation needs to be migrated, invalidate
 1319                                  * the assigned device so it will reselect a
 1320                                  * new one below.  Propagate the original
 1321                                  * crid selection flags if supplied.
 1322                                  */
 1323                                 krp->krp_hid = krp->krp_crid &
 1324                                     (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
 1325                                 if (krp->krp_hid == 0)
 1326                                         krp->krp_hid =
 1327                                     CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
 1328                                 break;
 1329                         }
 1330                         if (!cap->cc_kqblocked)
 1331                                 break;
 1332                 }
 1333                 if (krp != NULL) {
 1334                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
 1335                         result = crypto_kinvoke(krp, krp->krp_hid);
 1336                         if (result == ERESTART) {
 1337                                 /*
 1338                                  * The driver ran out of resources, mark the
 1339                                  * driver ``blocked'' for cryptkop's and put
 1340                                  * the request back in the queue.  It would
 1341                                  * best to put the request back where we got
 1342                                  * it but that's hard so for now we put it
 1343                                  * at the front.  This should be ok; putting
 1344                                  * it at the end does not work.
 1345                                  */
 1346                                 /* XXX validate sid again? */
 1347                                 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
 1348                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
 1349                                 cryptostats.cs_kblocks++;
 1350                         }
 1351                 }
 1352 
 1353                 if (submit == NULL && krp == NULL) {
 1354                         /*
 1355                          * Nothing more to be processed.  Sleep until we're
 1356                          * woken because there are more ops to process.
 1357                          * This happens either by submission or by a driver
 1358                          * becoming unblocked and notifying us through
 1359                          * crypto_unblock.  Note that when we wakeup we
 1360                          * start processing each queue again from the
 1361                          * front. It's not clear that it's important to
 1362                          * preserve this ordering since ops may finish
 1363                          * out of order if dispatched to different devices
 1364                          * and some become blocked while others do not.
 1365                          */
 1366                         crp_sleep = 1;
 1367                         msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
 1368                         crp_sleep = 0;
 1369                         if (cryptoproc == NULL)
 1370                                 break;
 1371                         cryptostats.cs_intrs++;
 1372                 }
 1373         }
 1374         CRYPTO_Q_UNLOCK();
 1375 
 1376         crypto_finis(&crp_q);
 1377 }
 1378 
 1379 /*
 1380  * Crypto returns thread, does callbacks for processed crypto requests.
 1381  * Callbacks are done here, rather than in the crypto drivers, because
 1382  * callbacks typically are expensive and would slow interrupt handling.
 1383  */
 1384 static void
 1385 crypto_ret_proc(void)
 1386 {
 1387         struct cryptop *crpt;
 1388         struct cryptkop *krpt;
 1389 
 1390         CRYPTO_RETQ_LOCK();
 1391         for (;;) {
 1392                 /* Harvest return q's for completed ops */
 1393                 crpt = TAILQ_FIRST(&crp_ret_q);
 1394                 if (crpt != NULL)
 1395                         TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
 1396 
 1397                 krpt = TAILQ_FIRST(&crp_ret_kq);
 1398                 if (krpt != NULL)
 1399                         TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
 1400 
 1401                 if (crpt != NULL || krpt != NULL) {
 1402                         CRYPTO_RETQ_UNLOCK();
 1403                         /*
 1404                          * Run callbacks unlocked.
 1405                          */
 1406                         if (crpt != NULL) {
 1407 #ifdef CRYPTO_TIMING
 1408                                 if (crypto_timing) {
 1409                                         /*
 1410                                          * NB: We must copy the timestamp before
 1411                                          * doing the callback as the cryptop is
 1412                                          * likely to be reclaimed.
 1413                                          */
 1414                                         struct bintime t = crpt->crp_tstamp;
 1415                                         crypto_tstat(&cryptostats.cs_cb, &t);
 1416                                         crpt->crp_callback(crpt);
 1417                                         crypto_tstat(&cryptostats.cs_finis, &t);
 1418                                 } else
 1419 #endif
 1420                                         crpt->crp_callback(crpt);
 1421                         }
 1422                         if (krpt != NULL)
 1423                                 krpt->krp_callback(krpt);
 1424                         CRYPTO_RETQ_LOCK();
 1425                 } else {
 1426                         /*
 1427                          * Nothing more to be processed.  Sleep until we're
 1428                          * woken because there are more returns to process.
 1429                          */
 1430                         msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
 1431                                 "crypto_ret_wait", 0);
 1432                         if (cryptoretproc == NULL)
 1433                                 break;
 1434                         cryptostats.cs_rets++;
 1435                 }
 1436         }
 1437         CRYPTO_RETQ_UNLOCK();
 1438 
 1439         crypto_finis(&crp_ret_q);
 1440 }
 1441 
 1442 #ifdef DDB
 1443 static void
 1444 db_show_drivers(void)
 1445 {
 1446         int hid;
 1447 
 1448         db_printf("%12s %4s %4s %8s %2s %2s\n"
 1449                 , "Device"
 1450                 , "Ses"
 1451                 , "Kops"
 1452                 , "Flags"
 1453                 , "QB"
 1454                 , "KB"
 1455         );
 1456         for (hid = 0; hid < crypto_drivers_num; hid++) {
 1457                 const struct cryptocap *cap = &crypto_drivers[hid];
 1458                 if (cap->cc_dev == NULL)
 1459                         continue;
 1460                 db_printf("%-12s %4u %4u %08x %2u %2u\n"
 1461                     , device_get_nameunit(cap->cc_dev)
 1462                     , cap->cc_sessions
 1463                     , cap->cc_koperations
 1464                     , cap->cc_flags
 1465                     , cap->cc_qblocked
 1466                     , cap->cc_kqblocked
 1467                 );
 1468         }
 1469 }
 1470 
 1471 DB_SHOW_COMMAND(crypto, db_show_crypto)
 1472 {
 1473         struct cryptop *crp;
 1474 
 1475         db_show_drivers();
 1476         db_printf("\n");
 1477 
 1478         db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
 1479             "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
 1480             "Desc", "Callback");
 1481         TAILQ_FOREACH(crp, &crp_q, crp_next) {
 1482                 db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
 1483                     , (int) CRYPTO_SESID2HID(crp->crp_sid)
 1484                     , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
 1485                     , crp->crp_ilen, crp->crp_olen
 1486                     , crp->crp_etype
 1487                     , crp->crp_flags
 1488                     , crp->crp_desc
 1489                     , crp->crp_callback
 1490                 );
 1491         }
 1492         if (!TAILQ_EMPTY(&crp_ret_q)) {
 1493                 db_printf("\n%4s %4s %4s %8s\n",
 1494                     "HID", "Etype", "Flags", "Callback");
 1495                 TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
 1496                         db_printf("%4u %4u %04x %8p\n"
 1497                             , (int) CRYPTO_SESID2HID(crp->crp_sid)
 1498                             , crp->crp_etype
 1499                             , crp->crp_flags
 1500                             , crp->crp_callback
 1501                         );
 1502                 }
 1503         }
 1504 }
 1505 
 1506 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
 1507 {
 1508         struct cryptkop *krp;
 1509 
 1510         db_show_drivers();
 1511         db_printf("\n");
 1512 
 1513         db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
 1514             "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
 1515         TAILQ_FOREACH(krp, &crp_kq, krp_next) {
 1516                 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
 1517                     , krp->krp_op
 1518                     , krp->krp_status
 1519                     , krp->krp_iparams, krp->krp_oparams
 1520                     , krp->krp_crid, krp->krp_hid
 1521                     , krp->krp_callback
 1522                 );
 1523         }
 1524         if (!TAILQ_EMPTY(&crp_ret_q)) {
 1525                 db_printf("%4s %5s %8s %4s %8s\n",
 1526                     "Op", "Status", "CRID", "HID", "Callback");
 1527                 TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
 1528                         db_printf("%4u %5u %08x %4u %8p\n"
 1529                             , krp->krp_op
 1530                             , krp->krp_status
 1531                             , krp->krp_crid, krp->krp_hid
 1532                             , krp->krp_callback
 1533                         );
 1534                 }
 1535         }
 1536 }
 1537 #endif
 1538 
 1539 int crypto_modevent(module_t mod, int type, void *unused);
 1540 
 1541 /*
 1542  * Initialization code, both for static and dynamic loading.
 1543  * Note this is not invoked with the usual MODULE_DECLARE
 1544  * mechanism but instead is listed as a dependency by the
 1545  * cryptosoft driver.  This guarantees proper ordering of
 1546  * calls on module load/unload.
 1547  */
 1548 int
 1549 crypto_modevent(module_t mod, int type, void *unused)
 1550 {
 1551         int error = EINVAL;
 1552 
 1553         switch (type) {
 1554         case MOD_LOAD:
 1555                 error = crypto_init();
 1556                 if (error == 0 && bootverbose)
 1557                         printf("crypto: <crypto core>\n");
 1558                 break;
 1559         case MOD_UNLOAD:
 1560                 /*XXX disallow if active sessions */
 1561                 error = 0;
 1562                 crypto_destroy();
 1563                 return 0;
 1564         }
 1565         return error;
 1566 }
 1567 MODULE_VERSION(crypto, 1);
 1568 MODULE_DEPEND(crypto, zlib, 1, 1, 1);

Cache object: b10edab4415c8945b278d795709a43b2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.