The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/opencrypto/crypto.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 #include <sys/cdefs.h>
   26 __FBSDID("$FreeBSD: releng/11.0/sys/opencrypto/crypto.c 299202 2016-05-06 23:37:19Z pfg $");
   27 
   28 /*
   29  * Cryptographic Subsystem.
   30  *
   31  * This code is derived from the Openbsd Cryptographic Framework (OCF)
   32  * that has the copyright shown below.  Very little of the original
   33  * code remains.
   34  */
   35 
   36 /*-
   37  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
   38  *
   39  * This code was written by Angelos D. Keromytis in Athens, Greece, in
   40  * February 2000. Network Security Technologies Inc. (NSTI) kindly
   41  * supported the development of this code.
   42  *
   43  * Copyright (c) 2000, 2001 Angelos D. Keromytis
   44  *
   45  * Permission to use, copy, and modify this software with or without fee
   46  * is hereby granted, provided that this entire notice is included in
   47  * all source code copies of any software which is or includes a copy or
   48  * modification of this software.
   49  *
   50  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
   51  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
   52  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
   53  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
   54  * PURPOSE.
   55  */
   56 
   57 #define CRYPTO_TIMING                           /* enable timing support */
   58 
   59 #include "opt_ddb.h"
   60 
   61 #include <sys/param.h>
   62 #include <sys/systm.h>
   63 #include <sys/eventhandler.h>
   64 #include <sys/kernel.h>
   65 #include <sys/kthread.h>
   66 #include <sys/lock.h>
   67 #include <sys/module.h>
   68 #include <sys/mutex.h>
   69 #include <sys/malloc.h>
   70 #include <sys/proc.h>
   71 #include <sys/sdt.h>
   72 #include <sys/sysctl.h>
   73 
   74 #include <ddb/ddb.h>
   75 
   76 #include <vm/uma.h>
   77 #include <opencrypto/cryptodev.h>
   78 #include <opencrypto/xform.h>                   /* XXX for M_XDATA */
   79 
   80 #include <sys/kobj.h>
   81 #include <sys/bus.h>
   82 #include "cryptodev_if.h"
   83 
   84 #if defined(__i386__) || defined(__amd64__)
   85 #include <machine/pcb.h>
   86 #endif
   87 
   88 SDT_PROVIDER_DEFINE(opencrypto);
   89 
   90 /*
   91  * Crypto drivers register themselves by allocating a slot in the
   92  * crypto_drivers table with crypto_get_driverid() and then registering
   93  * each algorithm they support with crypto_register() and crypto_kregister().
   94  */
   95 static  struct mtx crypto_drivers_mtx;          /* lock on driver table */
   96 #define CRYPTO_DRIVER_LOCK()    mtx_lock(&crypto_drivers_mtx)
   97 #define CRYPTO_DRIVER_UNLOCK()  mtx_unlock(&crypto_drivers_mtx)
   98 #define CRYPTO_DRIVER_ASSERT()  mtx_assert(&crypto_drivers_mtx, MA_OWNED)
   99 
  100 /*
  101  * Crypto device/driver capabilities structure.
  102  *
  103  * Synchronization:
  104  * (d) - protected by CRYPTO_DRIVER_LOCK()
  105  * (q) - protected by CRYPTO_Q_LOCK()
  106  * Not tagged fields are read-only.
  107  */
  108 struct cryptocap {
  109         device_t        cc_dev;                 /* (d) device/driver */
  110         u_int32_t       cc_sessions;            /* (d) # of sessions */
  111         u_int32_t       cc_koperations;         /* (d) # os asym operations */
  112         /*
  113          * Largest possible operator length (in bits) for each type of
  114          * encryption algorithm. XXX not used
  115          */
  116         u_int16_t       cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
  117         u_int8_t        cc_alg[CRYPTO_ALGORITHM_MAX + 1];
  118         u_int8_t        cc_kalg[CRK_ALGORITHM_MAX + 1];
  119 
  120         int             cc_flags;               /* (d) flags */
  121 #define CRYPTOCAP_F_CLEANUP     0x80000000      /* needs resource cleanup */
  122         int             cc_qblocked;            /* (q) symmetric q blocked */
  123         int             cc_kqblocked;           /* (q) asymmetric q blocked */
  124 };
  125 static  struct cryptocap *crypto_drivers = NULL;
  126 static  int crypto_drivers_num = 0;
  127 
  128 /*
  129  * There are two queues for crypto requests; one for symmetric (e.g.
  130  * cipher) operations and one for asymmetric (e.g. MOD)operations.
  131  * A single mutex is used to lock access to both queues.  We could
  132  * have one per-queue but having one simplifies handling of block/unblock
  133  * operations.
  134  */
  135 static  int crp_sleep = 0;
  136 static  TAILQ_HEAD(,cryptop) crp_q;             /* request queues */
  137 static  TAILQ_HEAD(,cryptkop) crp_kq;
  138 static  struct mtx crypto_q_mtx;
  139 #define CRYPTO_Q_LOCK()         mtx_lock(&crypto_q_mtx)
  140 #define CRYPTO_Q_UNLOCK()       mtx_unlock(&crypto_q_mtx)
  141 
  142 /*
  143  * There are two queues for processing completed crypto requests; one
  144  * for the symmetric and one for the asymmetric ops.  We only need one
  145  * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
  146  * mutex is used to lock access to both queues.  Note that this lock
  147  * must be separate from the lock on request queues to insure driver
  148  * callbacks don't generate lock order reversals.
  149  */
  150 static  TAILQ_HEAD(,cryptop) crp_ret_q;         /* callback queues */
  151 static  TAILQ_HEAD(,cryptkop) crp_ret_kq;
  152 static  struct mtx crypto_ret_q_mtx;
  153 #define CRYPTO_RETQ_LOCK()      mtx_lock(&crypto_ret_q_mtx)
  154 #define CRYPTO_RETQ_UNLOCK()    mtx_unlock(&crypto_ret_q_mtx)
  155 #define CRYPTO_RETQ_EMPTY()     (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
  156 
  157 static  uma_zone_t cryptop_zone;
  158 static  uma_zone_t cryptodesc_zone;
  159 
  160 int     crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
  161 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
  162            &crypto_userasymcrypto, 0,
  163            "Enable/disable user-mode access to asymmetric crypto support");
  164 int     crypto_devallowsoft = 0;        /* only use hardware crypto */
  165 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
  166            &crypto_devallowsoft, 0,
  167            "Enable/disable use of software crypto by /dev/crypto");
  168 
  169 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
  170 
  171 static  void crypto_proc(void);
  172 static  struct proc *cryptoproc;
  173 static  void crypto_ret_proc(void);
  174 static  struct proc *cryptoretproc;
  175 static  void crypto_destroy(void);
  176 static  int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
  177 static  int crypto_kinvoke(struct cryptkop *krp, int flags);
  178 
  179 static  struct cryptostats cryptostats;
  180 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
  181             cryptostats, "Crypto system statistics");
  182 
  183 #ifdef CRYPTO_TIMING
  184 static  int crypto_timing = 0;
  185 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
  186            &crypto_timing, 0, "Enable/disable crypto timing support");
  187 #endif
  188 
  189 static int
  190 crypto_init(void)
  191 {
  192         int error;
  193 
  194         mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
  195                 MTX_DEF|MTX_QUIET);
  196 
  197         TAILQ_INIT(&crp_q);
  198         TAILQ_INIT(&crp_kq);
  199         mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
  200 
  201         TAILQ_INIT(&crp_ret_q);
  202         TAILQ_INIT(&crp_ret_kq);
  203         mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
  204 
  205         cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
  206                                     0, 0, 0, 0,
  207                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
  208         cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
  209                                     0, 0, 0, 0,
  210                                     UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
  211         if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
  212                 printf("crypto_init: cannot setup crypto zones\n");
  213                 error = ENOMEM;
  214                 goto bad;
  215         }
  216 
  217         crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
  218         crypto_drivers = malloc(crypto_drivers_num *
  219             sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
  220         if (crypto_drivers == NULL) {
  221                 printf("crypto_init: cannot setup crypto drivers\n");
  222                 error = ENOMEM;
  223                 goto bad;
  224         }
  225 
  226         error = kproc_create((void (*)(void *)) crypto_proc, NULL,
  227                     &cryptoproc, 0, 0, "crypto");
  228         if (error) {
  229                 printf("crypto_init: cannot start crypto thread; error %d",
  230                         error);
  231                 goto bad;
  232         }
  233 
  234         error = kproc_create((void (*)(void *)) crypto_ret_proc, NULL,
  235                     &cryptoretproc, 0, 0, "crypto returns");
  236         if (error) {
  237                 printf("crypto_init: cannot start cryptoret thread; error %d",
  238                         error);
  239                 goto bad;
  240         }
  241         return 0;
  242 bad:
  243         crypto_destroy();
  244         return error;
  245 }
  246 
  247 /*
  248  * Signal a crypto thread to terminate.  We use the driver
  249  * table lock to synchronize the sleep/wakeups so that we
  250  * are sure the threads have terminated before we release
  251  * the data structures they use.  See crypto_finis below
  252  * for the other half of this song-and-dance.
  253  */
  254 static void
  255 crypto_terminate(struct proc **pp, void *q)
  256 {
  257         struct proc *p;
  258 
  259         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
  260         p = *pp;
  261         *pp = NULL;
  262         if (p) {
  263                 wakeup_one(q);
  264                 PROC_LOCK(p);           /* NB: insure we don't miss wakeup */
  265                 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
  266                 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
  267                 PROC_UNLOCK(p);
  268                 CRYPTO_DRIVER_LOCK();
  269         }
  270 }
  271 
  272 static void
  273 crypto_destroy(void)
  274 {
  275         /*
  276          * Terminate any crypto threads.
  277          */
  278         CRYPTO_DRIVER_LOCK();
  279         crypto_terminate(&cryptoproc, &crp_q);
  280         crypto_terminate(&cryptoretproc, &crp_ret_q);
  281         CRYPTO_DRIVER_UNLOCK();
  282 
  283         /* XXX flush queues??? */
  284 
  285         /* 
  286          * Reclaim dynamically allocated resources.
  287          */
  288         if (crypto_drivers != NULL)
  289                 free(crypto_drivers, M_CRYPTO_DATA);
  290 
  291         if (cryptodesc_zone != NULL)
  292                 uma_zdestroy(cryptodesc_zone);
  293         if (cryptop_zone != NULL)
  294                 uma_zdestroy(cryptop_zone);
  295         mtx_destroy(&crypto_q_mtx);
  296         mtx_destroy(&crypto_ret_q_mtx);
  297         mtx_destroy(&crypto_drivers_mtx);
  298 }
  299 
  300 static struct cryptocap *
  301 crypto_checkdriver(u_int32_t hid)
  302 {
  303         if (crypto_drivers == NULL)
  304                 return NULL;
  305         return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
  306 }
  307 
  308 /*
  309  * Compare a driver's list of supported algorithms against another
  310  * list; return non-zero if all algorithms are supported.
  311  */
  312 static int
  313 driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
  314 {
  315         const struct cryptoini *cr;
  316 
  317         /* See if all the algorithms are supported. */
  318         for (cr = cri; cr; cr = cr->cri_next)
  319                 if (cap->cc_alg[cr->cri_alg] == 0)
  320                         return 0;
  321         return 1;
  322 }
  323 
  324 /*
  325  * Select a driver for a new session that supports the specified
  326  * algorithms and, optionally, is constrained according to the flags.
  327  * The algorithm we use here is pretty stupid; just use the
  328  * first driver that supports all the algorithms we need. If there
  329  * are multiple drivers we choose the driver with the fewest active
  330  * sessions.  We prefer hardware-backed drivers to software ones.
  331  *
  332  * XXX We need more smarts here (in real life too, but that's
  333  * XXX another story altogether).
  334  */
  335 static struct cryptocap *
  336 crypto_select_driver(const struct cryptoini *cri, int flags)
  337 {
  338         struct cryptocap *cap, *best;
  339         int match, hid;
  340 
  341         CRYPTO_DRIVER_ASSERT();
  342 
  343         /*
  344          * Look first for hardware crypto devices if permitted.
  345          */
  346         if (flags & CRYPTOCAP_F_HARDWARE)
  347                 match = CRYPTOCAP_F_HARDWARE;
  348         else
  349                 match = CRYPTOCAP_F_SOFTWARE;
  350         best = NULL;
  351 again:
  352         for (hid = 0; hid < crypto_drivers_num; hid++) {
  353                 cap = &crypto_drivers[hid];
  354                 /*
  355                  * If it's not initialized, is in the process of
  356                  * going away, or is not appropriate (hardware
  357                  * or software based on match), then skip.
  358                  */
  359                 if (cap->cc_dev == NULL ||
  360                     (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
  361                     (cap->cc_flags & match) == 0)
  362                         continue;
  363 
  364                 /* verify all the algorithms are supported. */
  365                 if (driver_suitable(cap, cri)) {
  366                         if (best == NULL ||
  367                             cap->cc_sessions < best->cc_sessions)
  368                                 best = cap;
  369                 }
  370         }
  371         if (best == NULL && match == CRYPTOCAP_F_HARDWARE &&
  372             (flags & CRYPTOCAP_F_SOFTWARE)) {
  373                 /* sort of an Algol 68-style for loop */
  374                 match = CRYPTOCAP_F_SOFTWARE;
  375                 goto again;
  376         }
  377         return best;
  378 }
  379 
  380 /*
  381  * Create a new session.  The crid argument specifies a crypto
  382  * driver to use or constraints on a driver to select (hardware
  383  * only, software only, either).  Whatever driver is selected
  384  * must be capable of the requested crypto algorithms.
  385  */
  386 int
  387 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
  388 {
  389         struct cryptocap *cap;
  390         u_int32_t hid, lid;
  391         int err;
  392 
  393         CRYPTO_DRIVER_LOCK();
  394         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
  395                 /*
  396                  * Use specified driver; verify it is capable.
  397                  */
  398                 cap = crypto_checkdriver(crid);
  399                 if (cap != NULL && !driver_suitable(cap, cri))
  400                         cap = NULL;
  401         } else {
  402                 /*
  403                  * No requested driver; select based on crid flags.
  404                  */
  405                 cap = crypto_select_driver(cri, crid);
  406                 /*
  407                  * if NULL then can't do everything in one session.
  408                  * XXX Fix this. We need to inject a "virtual" session
  409                  * XXX layer right about here.
  410                  */
  411         }
  412         if (cap != NULL) {
  413                 /* Call the driver initialization routine. */
  414                 hid = cap - crypto_drivers;
  415                 lid = hid;              /* Pass the driver ID. */
  416                 err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
  417                 if (err == 0) {
  418                         (*sid) = (cap->cc_flags & 0xff000000)
  419                                | (hid & 0x00ffffff);
  420                         (*sid) <<= 32;
  421                         (*sid) |= (lid & 0xffffffff);
  422                         cap->cc_sessions++;
  423                 } else
  424                         CRYPTDEB("dev newsession failed");
  425         } else {
  426                 CRYPTDEB("no driver");
  427                 err = EINVAL;
  428         }
  429         CRYPTO_DRIVER_UNLOCK();
  430         return err;
  431 }
  432 
  433 static void
  434 crypto_remove(struct cryptocap *cap)
  435 {
  436 
  437         mtx_assert(&crypto_drivers_mtx, MA_OWNED);
  438         if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
  439                 bzero(cap, sizeof(*cap));
  440 }
  441 
  442 /*
  443  * Delete an existing session (or a reserved session on an unregistered
  444  * driver).
  445  */
  446 int
  447 crypto_freesession(u_int64_t sid)
  448 {
  449         struct cryptocap *cap;
  450         u_int32_t hid;
  451         int err;
  452 
  453         CRYPTO_DRIVER_LOCK();
  454 
  455         if (crypto_drivers == NULL) {
  456                 err = EINVAL;
  457                 goto done;
  458         }
  459 
  460         /* Determine two IDs. */
  461         hid = CRYPTO_SESID2HID(sid);
  462 
  463         if (hid >= crypto_drivers_num) {
  464                 err = ENOENT;
  465                 goto done;
  466         }
  467         cap = &crypto_drivers[hid];
  468 
  469         if (cap->cc_sessions)
  470                 cap->cc_sessions--;
  471 
  472         /* Call the driver cleanup routine, if available. */
  473         err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
  474 
  475         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
  476                 crypto_remove(cap);
  477 
  478 done:
  479         CRYPTO_DRIVER_UNLOCK();
  480         return err;
  481 }
  482 
  483 /*
  484  * Return an unused driver id.  Used by drivers prior to registering
  485  * support for the algorithms they handle.
  486  */
  487 int32_t
  488 crypto_get_driverid(device_t dev, int flags)
  489 {
  490         struct cryptocap *newdrv;
  491         int i;
  492 
  493         if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
  494                 printf("%s: no flags specified when registering driver\n",
  495                     device_get_nameunit(dev));
  496                 return -1;
  497         }
  498 
  499         CRYPTO_DRIVER_LOCK();
  500 
  501         for (i = 0; i < crypto_drivers_num; i++) {
  502                 if (crypto_drivers[i].cc_dev == NULL &&
  503                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
  504                         break;
  505                 }
  506         }
  507 
  508         /* Out of entries, allocate some more. */
  509         if (i == crypto_drivers_num) {
  510                 /* Be careful about wrap-around. */
  511                 if (2 * crypto_drivers_num <= crypto_drivers_num) {
  512                         CRYPTO_DRIVER_UNLOCK();
  513                         printf("crypto: driver count wraparound!\n");
  514                         return -1;
  515                 }
  516 
  517                 newdrv = malloc(2 * crypto_drivers_num *
  518                     sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
  519                 if (newdrv == NULL) {
  520                         CRYPTO_DRIVER_UNLOCK();
  521                         printf("crypto: no space to expand driver table!\n");
  522                         return -1;
  523                 }
  524 
  525                 bcopy(crypto_drivers, newdrv,
  526                     crypto_drivers_num * sizeof(struct cryptocap));
  527 
  528                 crypto_drivers_num *= 2;
  529 
  530                 free(crypto_drivers, M_CRYPTO_DATA);
  531                 crypto_drivers = newdrv;
  532         }
  533 
  534         /* NB: state is zero'd on free */
  535         crypto_drivers[i].cc_sessions = 1;      /* Mark */
  536         crypto_drivers[i].cc_dev = dev;
  537         crypto_drivers[i].cc_flags = flags;
  538         if (bootverbose)
  539                 printf("crypto: assign %s driver id %u, flags %u\n",
  540                     device_get_nameunit(dev), i, flags);
  541 
  542         CRYPTO_DRIVER_UNLOCK();
  543 
  544         return i;
  545 }
  546 
  547 /*
  548  * Lookup a driver by name.  We match against the full device
  549  * name and unit, and against just the name.  The latter gives
  550  * us a simple widlcarding by device name.  On success return the
  551  * driver/hardware identifier; otherwise return -1.
  552  */
  553 int
  554 crypto_find_driver(const char *match)
  555 {
  556         int i, len = strlen(match);
  557 
  558         CRYPTO_DRIVER_LOCK();
  559         for (i = 0; i < crypto_drivers_num; i++) {
  560                 device_t dev = crypto_drivers[i].cc_dev;
  561                 if (dev == NULL ||
  562                     (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
  563                         continue;
  564                 if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
  565                     strncmp(match, device_get_name(dev), len) == 0)
  566                         break;
  567         }
  568         CRYPTO_DRIVER_UNLOCK();
  569         return i < crypto_drivers_num ? i : -1;
  570 }
  571 
  572 /*
  573  * Return the device_t for the specified driver or NULL
  574  * if the driver identifier is invalid.
  575  */
  576 device_t
  577 crypto_find_device_byhid(int hid)
  578 {
  579         struct cryptocap *cap = crypto_checkdriver(hid);
  580         return cap != NULL ? cap->cc_dev : NULL;
  581 }
  582 
  583 /*
  584  * Return the device/driver capabilities.
  585  */
  586 int
  587 crypto_getcaps(int hid)
  588 {
  589         struct cryptocap *cap = crypto_checkdriver(hid);
  590         return cap != NULL ? cap->cc_flags : 0;
  591 }
  592 
  593 /*
  594  * Register support for a key-related algorithm.  This routine
  595  * is called once for each algorithm supported a driver.
  596  */
  597 int
  598 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
  599 {
  600         struct cryptocap *cap;
  601         int err;
  602 
  603         CRYPTO_DRIVER_LOCK();
  604 
  605         cap = crypto_checkdriver(driverid);
  606         if (cap != NULL &&
  607             (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
  608                 /*
  609                  * XXX Do some performance testing to determine placing.
  610                  * XXX We probably need an auxiliary data structure that
  611                  * XXX describes relative performances.
  612                  */
  613 
  614                 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
  615                 if (bootverbose)
  616                         printf("crypto: %s registers key alg %u flags %u\n"
  617                                 , device_get_nameunit(cap->cc_dev)
  618                                 , kalg
  619                                 , flags
  620                         );
  621                 err = 0;
  622         } else
  623                 err = EINVAL;
  624 
  625         CRYPTO_DRIVER_UNLOCK();
  626         return err;
  627 }
  628 
  629 /*
  630  * Register support for a non-key-related algorithm.  This routine
  631  * is called once for each such algorithm supported by a driver.
  632  */
  633 int
  634 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
  635     u_int32_t flags)
  636 {
  637         struct cryptocap *cap;
  638         int err;
  639 
  640         CRYPTO_DRIVER_LOCK();
  641 
  642         cap = crypto_checkdriver(driverid);
  643         /* NB: algorithms are in the range [1..max] */
  644         if (cap != NULL &&
  645             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
  646                 /*
  647                  * XXX Do some performance testing to determine placing.
  648                  * XXX We probably need an auxiliary data structure that
  649                  * XXX describes relative performances.
  650                  */
  651 
  652                 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
  653                 cap->cc_max_op_len[alg] = maxoplen;
  654                 if (bootverbose)
  655                         printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
  656                                 , device_get_nameunit(cap->cc_dev)
  657                                 , alg
  658                                 , flags
  659                                 , maxoplen
  660                         );
  661                 cap->cc_sessions = 0;           /* Unmark */
  662                 err = 0;
  663         } else
  664                 err = EINVAL;
  665 
  666         CRYPTO_DRIVER_UNLOCK();
  667         return err;
  668 }
  669 
  670 static void
  671 driver_finis(struct cryptocap *cap)
  672 {
  673         u_int32_t ses, kops;
  674 
  675         CRYPTO_DRIVER_ASSERT();
  676 
  677         ses = cap->cc_sessions;
  678         kops = cap->cc_koperations;
  679         bzero(cap, sizeof(*cap));
  680         if (ses != 0 || kops != 0) {
  681                 /*
  682                  * If there are pending sessions,
  683                  * just mark as invalid.
  684                  */
  685                 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
  686                 cap->cc_sessions = ses;
  687                 cap->cc_koperations = kops;
  688         }
  689 }
  690 
  691 /*
  692  * Unregister a crypto driver. If there are pending sessions using it,
  693  * leave enough information around so that subsequent calls using those
  694  * sessions will correctly detect the driver has been unregistered and
  695  * reroute requests.
  696  */
  697 int
  698 crypto_unregister(u_int32_t driverid, int alg)
  699 {
  700         struct cryptocap *cap;
  701         int i, err;
  702 
  703         CRYPTO_DRIVER_LOCK();
  704         cap = crypto_checkdriver(driverid);
  705         if (cap != NULL &&
  706             (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
  707             cap->cc_alg[alg] != 0) {
  708                 cap->cc_alg[alg] = 0;
  709                 cap->cc_max_op_len[alg] = 0;
  710 
  711                 /* Was this the last algorithm ? */
  712                 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
  713                         if (cap->cc_alg[i] != 0)
  714                                 break;
  715 
  716                 if (i == CRYPTO_ALGORITHM_MAX + 1)
  717                         driver_finis(cap);
  718                 err = 0;
  719         } else
  720                 err = EINVAL;
  721         CRYPTO_DRIVER_UNLOCK();
  722 
  723         return err;
  724 }
  725 
  726 /*
  727  * Unregister all algorithms associated with a crypto driver.
  728  * If there are pending sessions using it, leave enough information
  729  * around so that subsequent calls using those sessions will
  730  * correctly detect the driver has been unregistered and reroute
  731  * requests.
  732  */
  733 int
  734 crypto_unregister_all(u_int32_t driverid)
  735 {
  736         struct cryptocap *cap;
  737         int err;
  738 
  739         CRYPTO_DRIVER_LOCK();
  740         cap = crypto_checkdriver(driverid);
  741         if (cap != NULL) {
  742                 driver_finis(cap);
  743                 err = 0;
  744         } else
  745                 err = EINVAL;
  746         CRYPTO_DRIVER_UNLOCK();
  747 
  748         return err;
  749 }
  750 
  751 /*
  752  * Clear blockage on a driver.  The what parameter indicates whether
  753  * the driver is now ready for cryptop's and/or cryptokop's.
  754  */
  755 int
  756 crypto_unblock(u_int32_t driverid, int what)
  757 {
  758         struct cryptocap *cap;
  759         int err;
  760 
  761         CRYPTO_Q_LOCK();
  762         cap = crypto_checkdriver(driverid);
  763         if (cap != NULL) {
  764                 if (what & CRYPTO_SYMQ)
  765                         cap->cc_qblocked = 0;
  766                 if (what & CRYPTO_ASYMQ)
  767                         cap->cc_kqblocked = 0;
  768                 if (crp_sleep)
  769                         wakeup_one(&crp_q);
  770                 err = 0;
  771         } else
  772                 err = EINVAL;
  773         CRYPTO_Q_UNLOCK();
  774 
  775         return err;
  776 }
  777 
  778 /*
  779  * Add a crypto request to a queue, to be processed by the kernel thread.
  780  */
  781 int
  782 crypto_dispatch(struct cryptop *crp)
  783 {
  784         struct cryptocap *cap;
  785         u_int32_t hid;
  786         int result;
  787 
  788         cryptostats.cs_ops++;
  789 
  790 #ifdef CRYPTO_TIMING
  791         if (crypto_timing)
  792                 binuptime(&crp->crp_tstamp);
  793 #endif
  794 
  795         hid = CRYPTO_SESID2HID(crp->crp_sid);
  796 
  797         if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
  798                 /*
  799                  * Caller marked the request to be processed
  800                  * immediately; dispatch it directly to the
  801                  * driver unless the driver is currently blocked.
  802                  */
  803                 cap = crypto_checkdriver(hid);
  804                 /* Driver cannot disappeared when there is an active session. */
  805                 KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
  806                 if (!cap->cc_qblocked) {
  807                         result = crypto_invoke(cap, crp, 0);
  808                         if (result != ERESTART)
  809                                 return (result);
  810                         /*
  811                          * The driver ran out of resources, put the request on
  812                          * the queue.
  813                          */
  814                 }
  815         }
  816         CRYPTO_Q_LOCK();
  817         TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
  818         if (crp_sleep)
  819                 wakeup_one(&crp_q);
  820         CRYPTO_Q_UNLOCK();
  821         return 0;
  822 }
  823 
  824 /*
  825  * Add an asymetric crypto request to a queue,
  826  * to be processed by the kernel thread.
  827  */
  828 int
  829 crypto_kdispatch(struct cryptkop *krp)
  830 {
  831         int error;
  832 
  833         cryptostats.cs_kops++;
  834 
  835         error = crypto_kinvoke(krp, krp->krp_crid);
  836         if (error == ERESTART) {
  837                 CRYPTO_Q_LOCK();
  838                 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
  839                 if (crp_sleep)
  840                         wakeup_one(&crp_q);
  841                 CRYPTO_Q_UNLOCK();
  842                 error = 0;
  843         }
  844         return error;
  845 }
  846 
  847 /*
  848  * Verify a driver is suitable for the specified operation.
  849  */
  850 static __inline int
  851 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
  852 {
  853         return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
  854 }
  855 
  856 /*
  857  * Select a driver for an asym operation.  The driver must
  858  * support the necessary algorithm.  The caller can constrain
  859  * which device is selected with the flags parameter.  The
  860  * algorithm we use here is pretty stupid; just use the first
  861  * driver that supports the algorithms we need. If there are
  862  * multiple suitable drivers we choose the driver with the
  863  * fewest active operations.  We prefer hardware-backed
  864  * drivers to software ones when either may be used.
  865  */
  866 static struct cryptocap *
  867 crypto_select_kdriver(const struct cryptkop *krp, int flags)
  868 {
  869         struct cryptocap *cap, *best, *blocked;
  870         int match, hid;
  871 
  872         CRYPTO_DRIVER_ASSERT();
  873 
  874         /*
  875          * Look first for hardware crypto devices if permitted.
  876          */
  877         if (flags & CRYPTOCAP_F_HARDWARE)
  878                 match = CRYPTOCAP_F_HARDWARE;
  879         else
  880                 match = CRYPTOCAP_F_SOFTWARE;
  881         best = NULL;
  882         blocked = NULL;
  883 again:
  884         for (hid = 0; hid < crypto_drivers_num; hid++) {
  885                 cap = &crypto_drivers[hid];
  886                 /*
  887                  * If it's not initialized, is in the process of
  888                  * going away, or is not appropriate (hardware
  889                  * or software based on match), then skip.
  890                  */
  891                 if (cap->cc_dev == NULL ||
  892                     (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
  893                     (cap->cc_flags & match) == 0)
  894                         continue;
  895 
  896                 /* verify all the algorithms are supported. */
  897                 if (kdriver_suitable(cap, krp)) {
  898                         if (best == NULL ||
  899                             cap->cc_koperations < best->cc_koperations)
  900                                 best = cap;
  901                 }
  902         }
  903         if (best != NULL)
  904                 return best;
  905         if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
  906                 /* sort of an Algol 68-style for loop */
  907                 match = CRYPTOCAP_F_SOFTWARE;
  908                 goto again;
  909         }
  910         return best;
  911 }
  912 
  913 /*
  914  * Dispatch an asymmetric crypto request.
  915  */
  916 static int
  917 crypto_kinvoke(struct cryptkop *krp, int crid)
  918 {
  919         struct cryptocap *cap = NULL;
  920         int error;
  921 
  922         KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
  923         KASSERT(krp->krp_callback != NULL,
  924             ("%s: krp->crp_callback == NULL", __func__));
  925 
  926         CRYPTO_DRIVER_LOCK();
  927         if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
  928                 cap = crypto_checkdriver(crid);
  929                 if (cap != NULL) {
  930                         /*
  931                          * Driver present, it must support the necessary
  932                          * algorithm and, if s/w drivers are excluded,
  933                          * it must be registered as hardware-backed.
  934                          */
  935                         if (!kdriver_suitable(cap, krp) ||
  936                             (!crypto_devallowsoft &&
  937                              (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
  938                                 cap = NULL;
  939                 }
  940         } else {
  941                 /*
  942                  * No requested driver; select based on crid flags.
  943                  */
  944                 if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
  945                         crid &= ~CRYPTOCAP_F_SOFTWARE;
  946                 cap = crypto_select_kdriver(krp, crid);
  947         }
  948         if (cap != NULL && !cap->cc_kqblocked) {
  949                 krp->krp_hid = cap - crypto_drivers;
  950                 cap->cc_koperations++;
  951                 CRYPTO_DRIVER_UNLOCK();
  952                 error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
  953                 CRYPTO_DRIVER_LOCK();
  954                 if (error == ERESTART) {
  955                         cap->cc_koperations--;
  956                         CRYPTO_DRIVER_UNLOCK();
  957                         return (error);
  958                 }
  959         } else {
  960                 /*
  961                  * NB: cap is !NULL if device is blocked; in
  962                  *     that case return ERESTART so the operation
  963                  *     is resubmitted if possible.
  964                  */
  965                 error = (cap == NULL) ? ENODEV : ERESTART;
  966         }
  967         CRYPTO_DRIVER_UNLOCK();
  968 
  969         if (error) {
  970                 krp->krp_status = error;
  971                 crypto_kdone(krp);
  972         }
  973         return 0;
  974 }
  975 
  976 #ifdef CRYPTO_TIMING
  977 static void
  978 crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
  979 {
  980         struct bintime now, delta;
  981         struct timespec t;
  982         uint64_t u;
  983 
  984         binuptime(&now);
  985         u = now.frac;
  986         delta.frac = now.frac - bt->frac;
  987         delta.sec = now.sec - bt->sec;
  988         if (u < delta.frac)
  989                 delta.sec--;
  990         bintime2timespec(&delta, &t);
  991         timespecadd(&ts->acc, &t);
  992         if (timespeccmp(&t, &ts->min, <))
  993                 ts->min = t;
  994         if (timespeccmp(&t, &ts->max, >))
  995                 ts->max = t;
  996         ts->count++;
  997 
  998         *bt = now;
  999 }
 1000 #endif
 1001 
 1002 /*
 1003  * Dispatch a crypto request to the appropriate crypto devices.
 1004  */
 1005 static int
 1006 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
 1007 {
 1008 
 1009         KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
 1010         KASSERT(crp->crp_callback != NULL,
 1011             ("%s: crp->crp_callback == NULL", __func__));
 1012         KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
 1013 
 1014 #ifdef CRYPTO_TIMING
 1015         if (crypto_timing)
 1016                 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
 1017 #endif
 1018         if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
 1019                 struct cryptodesc *crd;
 1020                 u_int64_t nid;
 1021 
 1022                 /*
 1023                  * Driver has unregistered; migrate the session and return
 1024                  * an error to the caller so they'll resubmit the op.
 1025                  *
 1026                  * XXX: What if there are more already queued requests for this
 1027                  *      session?
 1028                  */
 1029                 crypto_freesession(crp->crp_sid);
 1030 
 1031                 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
 1032                         crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
 1033 
 1034                 /* XXX propagate flags from initial session? */
 1035                 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
 1036                     CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
 1037                         crp->crp_sid = nid;
 1038 
 1039                 crp->crp_etype = EAGAIN;
 1040                 crypto_done(crp);
 1041                 return 0;
 1042         } else {
 1043                 /*
 1044                  * Invoke the driver to process the request.
 1045                  */
 1046                 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
 1047         }
 1048 }
 1049 
 1050 /*
 1051  * Release a set of crypto descriptors.
 1052  */
 1053 void
 1054 crypto_freereq(struct cryptop *crp)
 1055 {
 1056         struct cryptodesc *crd;
 1057 
 1058         if (crp == NULL)
 1059                 return;
 1060 
 1061 #ifdef DIAGNOSTIC
 1062         {
 1063                 struct cryptop *crp2;
 1064 
 1065                 CRYPTO_Q_LOCK();
 1066                 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
 1067                         KASSERT(crp2 != crp,
 1068                             ("Freeing cryptop from the crypto queue (%p).",
 1069                             crp));
 1070                 }
 1071                 CRYPTO_Q_UNLOCK();
 1072                 CRYPTO_RETQ_LOCK();
 1073                 TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
 1074                         KASSERT(crp2 != crp,
 1075                             ("Freeing cryptop from the return queue (%p).",
 1076                             crp));
 1077                 }
 1078                 CRYPTO_RETQ_UNLOCK();
 1079         }
 1080 #endif
 1081 
 1082         while ((crd = crp->crp_desc) != NULL) {
 1083                 crp->crp_desc = crd->crd_next;
 1084                 uma_zfree(cryptodesc_zone, crd);
 1085         }
 1086         uma_zfree(cryptop_zone, crp);
 1087 }
 1088 
 1089 /*
 1090  * Acquire a set of crypto descriptors.
 1091  */
 1092 struct cryptop *
 1093 crypto_getreq(int num)
 1094 {
 1095         struct cryptodesc *crd;
 1096         struct cryptop *crp;
 1097 
 1098         crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
 1099         if (crp != NULL) {
 1100                 while (num--) {
 1101                         crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
 1102                         if (crd == NULL) {
 1103                                 crypto_freereq(crp);
 1104                                 return NULL;
 1105                         }
 1106 
 1107                         crd->crd_next = crp->crp_desc;
 1108                         crp->crp_desc = crd;
 1109                 }
 1110         }
 1111         return crp;
 1112 }
 1113 
 1114 /*
 1115  * Invoke the callback on behalf of the driver.
 1116  */
 1117 void
 1118 crypto_done(struct cryptop *crp)
 1119 {
 1120         KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
 1121                 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
 1122         crp->crp_flags |= CRYPTO_F_DONE;
 1123         if (crp->crp_etype != 0)
 1124                 cryptostats.cs_errs++;
 1125 #ifdef CRYPTO_TIMING
 1126         if (crypto_timing)
 1127                 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
 1128 #endif
 1129         /*
 1130          * CBIMM means unconditionally do the callback immediately;
 1131          * CBIFSYNC means do the callback immediately only if the
 1132          * operation was done synchronously.  Both are used to avoid
 1133          * doing extraneous context switches; the latter is mostly
 1134          * used with the software crypto driver.
 1135          */
 1136         if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
 1137             ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
 1138              (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
 1139                 /*
 1140                  * Do the callback directly.  This is ok when the
 1141                  * callback routine does very little (e.g. the
 1142                  * /dev/crypto callback method just does a wakeup).
 1143                  */
 1144 #ifdef CRYPTO_TIMING
 1145                 if (crypto_timing) {
 1146                         /*
 1147                          * NB: We must copy the timestamp before
 1148                          * doing the callback as the cryptop is
 1149                          * likely to be reclaimed.
 1150                          */
 1151                         struct bintime t = crp->crp_tstamp;
 1152                         crypto_tstat(&cryptostats.cs_cb, &t);
 1153                         crp->crp_callback(crp);
 1154                         crypto_tstat(&cryptostats.cs_finis, &t);
 1155                 } else
 1156 #endif
 1157                         crp->crp_callback(crp);
 1158         } else {
 1159                 /*
 1160                  * Normal case; queue the callback for the thread.
 1161                  */
 1162                 CRYPTO_RETQ_LOCK();
 1163                 if (CRYPTO_RETQ_EMPTY())
 1164                         wakeup_one(&crp_ret_q); /* shared wait channel */
 1165                 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
 1166                 CRYPTO_RETQ_UNLOCK();
 1167         }
 1168 }
 1169 
 1170 /*
 1171  * Invoke the callback on behalf of the driver.
 1172  */
 1173 void
 1174 crypto_kdone(struct cryptkop *krp)
 1175 {
 1176         struct cryptocap *cap;
 1177 
 1178         if (krp->krp_status != 0)
 1179                 cryptostats.cs_kerrs++;
 1180         CRYPTO_DRIVER_LOCK();
 1181         /* XXX: What if driver is loaded in the meantime? */
 1182         if (krp->krp_hid < crypto_drivers_num) {
 1183                 cap = &crypto_drivers[krp->krp_hid];
 1184                 KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
 1185                 cap->cc_koperations--;
 1186                 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
 1187                         crypto_remove(cap);
 1188         }
 1189         CRYPTO_DRIVER_UNLOCK();
 1190         CRYPTO_RETQ_LOCK();
 1191         if (CRYPTO_RETQ_EMPTY())
 1192                 wakeup_one(&crp_ret_q);         /* shared wait channel */
 1193         TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
 1194         CRYPTO_RETQ_UNLOCK();
 1195 }
 1196 
 1197 int
 1198 crypto_getfeat(int *featp)
 1199 {
 1200         int hid, kalg, feat = 0;
 1201 
 1202         CRYPTO_DRIVER_LOCK();
 1203         for (hid = 0; hid < crypto_drivers_num; hid++) {
 1204                 const struct cryptocap *cap = &crypto_drivers[hid];
 1205 
 1206                 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
 1207                     !crypto_devallowsoft) {
 1208                         continue;
 1209                 }
 1210                 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
 1211                         if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
 1212                                 feat |=  1 << kalg;
 1213         }
 1214         CRYPTO_DRIVER_UNLOCK();
 1215         *featp = feat;
 1216         return (0);
 1217 }
 1218 
 1219 /*
 1220  * Terminate a thread at module unload.  The process that
 1221  * initiated this is waiting for us to signal that we're gone;
 1222  * wake it up and exit.  We use the driver table lock to insure
 1223  * we don't do the wakeup before they're waiting.  There is no
 1224  * race here because the waiter sleeps on the proc lock for the
 1225  * thread so it gets notified at the right time because of an
 1226  * extra wakeup that's done in exit1().
 1227  */
 1228 static void
 1229 crypto_finis(void *chan)
 1230 {
 1231         CRYPTO_DRIVER_LOCK();
 1232         wakeup_one(chan);
 1233         CRYPTO_DRIVER_UNLOCK();
 1234         kproc_exit(0);
 1235 }
 1236 
 1237 /*
 1238  * Crypto thread, dispatches crypto requests.
 1239  */
 1240 static void
 1241 crypto_proc(void)
 1242 {
 1243         struct cryptop *crp, *submit;
 1244         struct cryptkop *krp;
 1245         struct cryptocap *cap;
 1246         u_int32_t hid;
 1247         int result, hint;
 1248 
 1249 #if defined(__i386__) || defined(__amd64__)
 1250         fpu_kern_thread(FPU_KERN_NORMAL);
 1251 #endif
 1252 
 1253         CRYPTO_Q_LOCK();
 1254         for (;;) {
 1255                 /*
 1256                  * Find the first element in the queue that can be
 1257                  * processed and look-ahead to see if multiple ops
 1258                  * are ready for the same driver.
 1259                  */
 1260                 submit = NULL;
 1261                 hint = 0;
 1262                 TAILQ_FOREACH(crp, &crp_q, crp_next) {
 1263                         hid = CRYPTO_SESID2HID(crp->crp_sid);
 1264                         cap = crypto_checkdriver(hid);
 1265                         /*
 1266                          * Driver cannot disappeared when there is an active
 1267                          * session.
 1268                          */
 1269                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
 1270                             __func__, __LINE__));
 1271                         if (cap == NULL || cap->cc_dev == NULL) {
 1272                                 /* Op needs to be migrated, process it. */
 1273                                 if (submit == NULL)
 1274                                         submit = crp;
 1275                                 break;
 1276                         }
 1277                         if (!cap->cc_qblocked) {
 1278                                 if (submit != NULL) {
 1279                                         /*
 1280                                          * We stop on finding another op,
 1281                                          * regardless whether its for the same
 1282                                          * driver or not.  We could keep
 1283                                          * searching the queue but it might be
 1284                                          * better to just use a per-driver
 1285                                          * queue instead.
 1286                                          */
 1287                                         if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
 1288                                                 hint = CRYPTO_HINT_MORE;
 1289                                         break;
 1290                                 } else {
 1291                                         submit = crp;
 1292                                         if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
 1293                                                 break;
 1294                                         /* keep scanning for more are q'd */
 1295                                 }
 1296                         }
 1297                 }
 1298                 if (submit != NULL) {
 1299                         TAILQ_REMOVE(&crp_q, submit, crp_next);
 1300                         hid = CRYPTO_SESID2HID(submit->crp_sid);
 1301                         cap = crypto_checkdriver(hid);
 1302                         KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
 1303                             __func__, __LINE__));
 1304                         result = crypto_invoke(cap, submit, hint);
 1305                         if (result == ERESTART) {
 1306                                 /*
 1307                                  * The driver ran out of resources, mark the
 1308                                  * driver ``blocked'' for cryptop's and put
 1309                                  * the request back in the queue.  It would
 1310                                  * best to put the request back where we got
 1311                                  * it but that's hard so for now we put it
 1312                                  * at the front.  This should be ok; putting
 1313                                  * it at the end does not work.
 1314                                  */
 1315                                 /* XXX validate sid again? */
 1316                                 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
 1317                                 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
 1318                                 cryptostats.cs_blocks++;
 1319                         }
 1320                 }
 1321 
 1322                 /* As above, but for key ops */
 1323                 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
 1324                         cap = crypto_checkdriver(krp->krp_hid);
 1325                         if (cap == NULL || cap->cc_dev == NULL) {
 1326                                 /*
 1327                                  * Operation needs to be migrated, invalidate
 1328                                  * the assigned device so it will reselect a
 1329                                  * new one below.  Propagate the original
 1330                                  * crid selection flags if supplied.
 1331                                  */
 1332                                 krp->krp_hid = krp->krp_crid &
 1333                                     (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
 1334                                 if (krp->krp_hid == 0)
 1335                                         krp->krp_hid =
 1336                                     CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
 1337                                 break;
 1338                         }
 1339                         if (!cap->cc_kqblocked)
 1340                                 break;
 1341                 }
 1342                 if (krp != NULL) {
 1343                         TAILQ_REMOVE(&crp_kq, krp, krp_next);
 1344                         result = crypto_kinvoke(krp, krp->krp_hid);
 1345                         if (result == ERESTART) {
 1346                                 /*
 1347                                  * The driver ran out of resources, mark the
 1348                                  * driver ``blocked'' for cryptkop's and put
 1349                                  * the request back in the queue.  It would
 1350                                  * best to put the request back where we got
 1351                                  * it but that's hard so for now we put it
 1352                                  * at the front.  This should be ok; putting
 1353                                  * it at the end does not work.
 1354                                  */
 1355                                 /* XXX validate sid again? */
 1356                                 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
 1357                                 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
 1358                                 cryptostats.cs_kblocks++;
 1359                         }
 1360                 }
 1361 
 1362                 if (submit == NULL && krp == NULL) {
 1363                         /*
 1364                          * Nothing more to be processed.  Sleep until we're
 1365                          * woken because there are more ops to process.
 1366                          * This happens either by submission or by a driver
 1367                          * becoming unblocked and notifying us through
 1368                          * crypto_unblock.  Note that when we wakeup we
 1369                          * start processing each queue again from the
 1370                          * front. It's not clear that it's important to
 1371                          * preserve this ordering since ops may finish
 1372                          * out of order if dispatched to different devices
 1373                          * and some become blocked while others do not.
 1374                          */
 1375                         crp_sleep = 1;
 1376                         msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
 1377                         crp_sleep = 0;
 1378                         if (cryptoproc == NULL)
 1379                                 break;
 1380                         cryptostats.cs_intrs++;
 1381                 }
 1382         }
 1383         CRYPTO_Q_UNLOCK();
 1384 
 1385         crypto_finis(&crp_q);
 1386 }
 1387 
 1388 /*
 1389  * Crypto returns thread, does callbacks for processed crypto requests.
 1390  * Callbacks are done here, rather than in the crypto drivers, because
 1391  * callbacks typically are expensive and would slow interrupt handling.
 1392  */
 1393 static void
 1394 crypto_ret_proc(void)
 1395 {
 1396         struct cryptop *crpt;
 1397         struct cryptkop *krpt;
 1398 
 1399         CRYPTO_RETQ_LOCK();
 1400         for (;;) {
 1401                 /* Harvest return q's for completed ops */
 1402                 crpt = TAILQ_FIRST(&crp_ret_q);
 1403                 if (crpt != NULL)
 1404                         TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
 1405 
 1406                 krpt = TAILQ_FIRST(&crp_ret_kq);
 1407                 if (krpt != NULL)
 1408                         TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
 1409 
 1410                 if (crpt != NULL || krpt != NULL) {
 1411                         CRYPTO_RETQ_UNLOCK();
 1412                         /*
 1413                          * Run callbacks unlocked.
 1414                          */
 1415                         if (crpt != NULL) {
 1416 #ifdef CRYPTO_TIMING
 1417                                 if (crypto_timing) {
 1418                                         /*
 1419                                          * NB: We must copy the timestamp before
 1420                                          * doing the callback as the cryptop is
 1421                                          * likely to be reclaimed.
 1422                                          */
 1423                                         struct bintime t = crpt->crp_tstamp;
 1424                                         crypto_tstat(&cryptostats.cs_cb, &t);
 1425                                         crpt->crp_callback(crpt);
 1426                                         crypto_tstat(&cryptostats.cs_finis, &t);
 1427                                 } else
 1428 #endif
 1429                                         crpt->crp_callback(crpt);
 1430                         }
 1431                         if (krpt != NULL)
 1432                                 krpt->krp_callback(krpt);
 1433                         CRYPTO_RETQ_LOCK();
 1434                 } else {
 1435                         /*
 1436                          * Nothing more to be processed.  Sleep until we're
 1437                          * woken because there are more returns to process.
 1438                          */
 1439                         msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
 1440                                 "crypto_ret_wait", 0);
 1441                         if (cryptoretproc == NULL)
 1442                                 break;
 1443                         cryptostats.cs_rets++;
 1444                 }
 1445         }
 1446         CRYPTO_RETQ_UNLOCK();
 1447 
 1448         crypto_finis(&crp_ret_q);
 1449 }
 1450 
 1451 #ifdef DDB
 1452 static void
 1453 db_show_drivers(void)
 1454 {
 1455         int hid;
 1456 
 1457         db_printf("%12s %4s %4s %8s %2s %2s\n"
 1458                 , "Device"
 1459                 , "Ses"
 1460                 , "Kops"
 1461                 , "Flags"
 1462                 , "QB"
 1463                 , "KB"
 1464         );
 1465         for (hid = 0; hid < crypto_drivers_num; hid++) {
 1466                 const struct cryptocap *cap = &crypto_drivers[hid];
 1467                 if (cap->cc_dev == NULL)
 1468                         continue;
 1469                 db_printf("%-12s %4u %4u %08x %2u %2u\n"
 1470                     , device_get_nameunit(cap->cc_dev)
 1471                     , cap->cc_sessions
 1472                     , cap->cc_koperations
 1473                     , cap->cc_flags
 1474                     , cap->cc_qblocked
 1475                     , cap->cc_kqblocked
 1476                 );
 1477         }
 1478 }
 1479 
 1480 DB_SHOW_COMMAND(crypto, db_show_crypto)
 1481 {
 1482         struct cryptop *crp;
 1483 
 1484         db_show_drivers();
 1485         db_printf("\n");
 1486 
 1487         db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
 1488             "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
 1489             "Desc", "Callback");
 1490         TAILQ_FOREACH(crp, &crp_q, crp_next) {
 1491                 db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
 1492                     , (int) CRYPTO_SESID2HID(crp->crp_sid)
 1493                     , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
 1494                     , crp->crp_ilen, crp->crp_olen
 1495                     , crp->crp_etype
 1496                     , crp->crp_flags
 1497                     , crp->crp_desc
 1498                     , crp->crp_callback
 1499                 );
 1500         }
 1501         if (!TAILQ_EMPTY(&crp_ret_q)) {
 1502                 db_printf("\n%4s %4s %4s %8s\n",
 1503                     "HID", "Etype", "Flags", "Callback");
 1504                 TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
 1505                         db_printf("%4u %4u %04x %8p\n"
 1506                             , (int) CRYPTO_SESID2HID(crp->crp_sid)
 1507                             , crp->crp_etype
 1508                             , crp->crp_flags
 1509                             , crp->crp_callback
 1510                         );
 1511                 }
 1512         }
 1513 }
 1514 
 1515 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
 1516 {
 1517         struct cryptkop *krp;
 1518 
 1519         db_show_drivers();
 1520         db_printf("\n");
 1521 
 1522         db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
 1523             "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
 1524         TAILQ_FOREACH(krp, &crp_kq, krp_next) {
 1525                 db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
 1526                     , krp->krp_op
 1527                     , krp->krp_status
 1528                     , krp->krp_iparams, krp->krp_oparams
 1529                     , krp->krp_crid, krp->krp_hid
 1530                     , krp->krp_callback
 1531                 );
 1532         }
 1533         if (!TAILQ_EMPTY(&crp_ret_q)) {
 1534                 db_printf("%4s %5s %8s %4s %8s\n",
 1535                     "Op", "Status", "CRID", "HID", "Callback");
 1536                 TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
 1537                         db_printf("%4u %5u %08x %4u %8p\n"
 1538                             , krp->krp_op
 1539                             , krp->krp_status
 1540                             , krp->krp_crid, krp->krp_hid
 1541                             , krp->krp_callback
 1542                         );
 1543                 }
 1544         }
 1545 }
 1546 #endif
 1547 
 1548 int crypto_modevent(module_t mod, int type, void *unused);
 1549 
 1550 /*
 1551  * Initialization code, both for static and dynamic loading.
 1552  * Note this is not invoked with the usual MODULE_DECLARE
 1553  * mechanism but instead is listed as a dependency by the
 1554  * cryptosoft driver.  This guarantees proper ordering of
 1555  * calls on module load/unload.
 1556  */
 1557 int
 1558 crypto_modevent(module_t mod, int type, void *unused)
 1559 {
 1560         int error = EINVAL;
 1561 
 1562         switch (type) {
 1563         case MOD_LOAD:
 1564                 error = crypto_init();
 1565                 if (error == 0 && bootverbose)
 1566                         printf("crypto: <crypto core>\n");
 1567                 break;
 1568         case MOD_UNLOAD:
 1569                 /*XXX disallow if active sessions */
 1570                 error = 0;
 1571                 crypto_destroy();
 1572                 return 0;
 1573         }
 1574         return error;
 1575 }
 1576 MODULE_VERSION(crypto, 1);
 1577 MODULE_DEPEND(crypto, zlib, 1, 1, 1);

Cache object: 99d8e9974e4a0d441ed040cd5e720d36


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.