The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qat_c2xxx/qat_hw15.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
    2 /*      $NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $      */
    3 
    4 /*
    5  * Copyright (c) 2019 Internet Initiative Japan, Inc.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   27  * POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  *   Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
   32  *
   33  *   Redistribution and use in source and binary forms, with or without
   34  *   modification, are permitted provided that the following conditions
   35  *   are met:
   36  *
   37  *     * Redistributions of source code must retain the above copyright
   38  *       notice, this list of conditions and the following disclaimer.
   39  *     * Redistributions in binary form must reproduce the above copyright
   40  *       notice, this list of conditions and the following disclaimer in
   41  *       the documentation and/or other materials provided with the
   42  *       distribution.
   43  *     * Neither the name of Intel Corporation nor the names of its
   44  *       contributors may be used to endorse or promote products derived
   45  *       from this software without specific prior written permission.
   46  *
   47  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   48  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   49  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   50  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
   51  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   52  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   53  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   54  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   55  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   56  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   57  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   58  */
   59 
   60 #include <sys/cdefs.h>
   61 __FBSDID("$FreeBSD$");
   62 #if 0
   63 __KERNEL_RCSID(0, "$NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
   64 #endif
   65 
   66 #include <sys/param.h>
   67 #include <sys/bus.h>
   68 #include <sys/kernel.h>
   69 #include <sys/proc.h>
   70 #include <sys/systm.h>
   71 
   72 #include <machine/bus.h>
   73 
   74 #include <opencrypto/xform.h>
   75 
   76 #include <dev/pci/pcireg.h>
   77 #include <dev/pci/pcivar.h>
   78 
   79 #include "qatreg.h"
   80 #include "qat_hw15reg.h"
   81 #include "qatvar.h"
   82 #include "qat_hw15var.h"
   83 
   84 static int      qat_adm_ring_init_ring_table(struct qat_softc *);
   85 static void     qat_adm_ring_build_slice_mask(uint16_t *, uint32_t, uint32_t);
   86 static void     qat_adm_ring_build_shram_mask(uint64_t *, uint32_t, uint32_t);
   87 static int      qat_adm_ring_build_ring_table(struct qat_softc *, uint32_t);
   88 static int      qat_adm_ring_build_init_msg(struct qat_softc *,
   89                     struct fw_init_req *, enum fw_init_cmd_id, uint32_t,
   90                     struct qat_accel_init_cb *);
   91 static int      qat_adm_ring_send_init_msg_sync(struct qat_softc *,
   92                     enum fw_init_cmd_id, uint32_t);
   93 static int      qat_adm_ring_send_init_msg(struct qat_softc *,
   94                     enum fw_init_cmd_id);
   95 static int      qat_adm_ring_intr(struct qat_softc *, void *, void *);
   96 
   97 void
   98 qat_msg_req_type_populate(struct arch_if_req_hdr *msg, enum arch_if_req type,
   99     uint32_t rxring)
  100 {
  101 
  102         memset(msg, 0, sizeof(struct arch_if_req_hdr));
  103         msg->flags = ARCH_IF_FLAGS_VALID_FLAG |
  104             ARCH_IF_FLAGS_RESP_RING_TYPE_ET | ARCH_IF_FLAGS_RESP_TYPE_S;
  105         msg->req_type = type;
  106         msg->resp_pipe_id = rxring;
  107 }
  108 
  109 void
  110 qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *msg, bus_addr_t desc_paddr,
  111     uint8_t hdrsz, uint8_t hwblksz, uint16_t comn_req_flags, uint32_t flow_id)
  112 {
  113         struct fw_comn_req_hdr *hdr = &msg->comn_hdr;
  114 
  115         hdr->comn_req_flags = comn_req_flags;
  116         hdr->content_desc_params_sz = hwblksz;
  117         hdr->content_desc_hdr_sz = hdrsz;
  118         hdr->content_desc_addr = desc_paddr;
  119         msg->flow_id = flow_id;
  120 }
  121 
  122 void
  123 qat_msg_service_cmd_populate(struct fw_la_bulk_req *msg, enum fw_la_cmd_id cmdid,
  124     uint16_t cmd_flags)
  125 {
  126         msg->comn_la_req.la_cmd_id = cmdid;
  127         msg->comn_la_req.u.la_flags = cmd_flags;
  128 }
  129 
  130 void
  131 qat_msg_cmn_mid_populate(struct fw_comn_req_mid *msg, void *cookie,
  132     uint64_t src, uint64_t dst)
  133 {
  134 
  135         msg->opaque_data = (uint64_t)(uintptr_t)cookie;
  136         msg->src_data_addr = src;
  137         if (dst == 0)
  138                 msg->dest_data_addr = src;
  139         else
  140                 msg->dest_data_addr = dst;
  141 }
  142 
  143 void
  144 qat_msg_req_params_populate(struct fw_la_bulk_req *msg,
  145     bus_addr_t req_params_paddr, uint8_t req_params_sz)
  146 {
  147         msg->req_params_addr = req_params_paddr;
  148         msg->comn_la_req.u1.req_params_blk_sz = req_params_sz / 8;
  149 }
  150 
  151 void
  152 qat_msg_cmn_footer_populate(union fw_comn_req_ftr *msg, uint64_t next_addr)
  153 {
  154         msg->next_request_addr = next_addr;
  155 }
  156 
  157 void
  158 qat_msg_params_populate(struct fw_la_bulk_req *msg,
  159     struct qat_crypto_desc *desc, uint8_t req_params_sz,
  160     uint16_t service_cmd_flags, uint16_t comn_req_flags)
  161 {
  162         qat_msg_cmn_hdr_populate(msg, desc->qcd_desc_paddr,
  163             desc->qcd_hdr_sz, desc->qcd_hw_blk_sz, comn_req_flags, 0);
  164         qat_msg_service_cmd_populate(msg, desc->qcd_cmd_id, service_cmd_flags);
  165         qat_msg_cmn_mid_populate(&msg->comn_mid, NULL, 0, 0);
  166         qat_msg_req_params_populate(msg, 0, req_params_sz);
  167         qat_msg_cmn_footer_populate(&msg->comn_ftr, 0);
  168 }
  169 
  170 static int
  171 qat_adm_ring_init_ring_table(struct qat_softc *sc)
  172 {
  173         struct qat_admin_rings *qadr = &sc->sc_admin_rings;
  174 
  175         if (sc->sc_ae_num == 1) {
  176                 qadr->qadr_cya_ring_tbl =
  177                     &qadr->qadr_master_ring_tbl[0];
  178                 qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
  179         } else if (sc->sc_ae_num == 2 || sc->sc_ae_num == 4) {
  180                 qadr->qadr_cya_ring_tbl =
  181                     &qadr->qadr_master_ring_tbl[0];
  182                 qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
  183                 qadr->qadr_cyb_ring_tbl =
  184                     &qadr->qadr_master_ring_tbl[1];
  185                 qadr->qadr_srv_mask[1] = QAT_SERVICE_CRYPTO_B;
  186         }
  187 
  188         return 0;
  189 }
  190 
  191 int
  192 qat_adm_ring_init(struct qat_softc *sc)
  193 {
  194         struct qat_admin_rings *qadr = &sc->sc_admin_rings;
  195         int error, i, j;
  196 
  197         error = qat_alloc_dmamem(sc, &qadr->qadr_dma, 1, PAGE_SIZE, PAGE_SIZE);
  198         if (error)
  199                 return error;
  200 
  201         qadr->qadr_master_ring_tbl = qadr->qadr_dma.qdm_dma_vaddr;
  202 
  203         MPASS(sc->sc_ae_num *
  204             sizeof(struct fw_init_ring_table) <= PAGE_SIZE);
  205 
  206         /* Initialize the Master Ring Table */
  207         for (i = 0; i < sc->sc_ae_num; i++) {
  208                 struct fw_init_ring_table *firt =
  209                     &qadr->qadr_master_ring_tbl[i];
  210 
  211                 for (j = 0; j < INIT_RING_TABLE_SZ; j++) {
  212                         struct fw_init_ring_params *firp = 
  213                             &firt->firt_bulk_rings[j];
  214 
  215                         firp->firp_reserved = 0;
  216                         firp->firp_curr_weight = QAT_DEFAULT_RING_WEIGHT;
  217                         firp->firp_init_weight = QAT_DEFAULT_RING_WEIGHT;
  218                         firp->firp_ring_pvl = QAT_DEFAULT_PVL;
  219                 }
  220                 memset(firt->firt_ring_mask, 0, sizeof(firt->firt_ring_mask));
  221         }
  222 
  223         error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_TX,
  224             ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_req_size,
  225             NULL, NULL, "admin_tx", &qadr->qadr_admin_tx);
  226         if (error)
  227                 return error;
  228 
  229         error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_RX,
  230             ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_resp_size,
  231             qat_adm_ring_intr, qadr, "admin_rx", &qadr->qadr_admin_rx);
  232         if (error)
  233                 return error;
  234 
  235         /*
  236          * Finally set up the service indices into the Master Ring Table
  237          * and convenient ring table pointers for each service enabled.
  238          * Only the Admin rings are initialized.
  239          */
  240         error = qat_adm_ring_init_ring_table(sc);
  241         if (error)
  242                 return error;
  243 
  244         /*
  245          * Calculate the number of active AEs per QAT
  246          * needed for Shram partitioning.
  247          */
  248         for (i = 0; i < sc->sc_ae_num; i++) {
  249                 if (qadr->qadr_srv_mask[i])
  250                         qadr->qadr_active_aes_per_accel++;
  251         }
  252 
  253         return 0;
  254 }
  255 
  256 static void
  257 qat_adm_ring_build_slice_mask(uint16_t *slice_mask, uint32_t srv_mask,
  258    uint32_t init_shram)
  259 {
  260         uint16_t shram = 0, comn_req = 0;
  261 
  262         if (init_shram)
  263                 shram = COMN_REQ_SHRAM_INIT_REQUIRED;
  264 
  265         if (srv_mask & QAT_SERVICE_CRYPTO_A)
  266                 comn_req |= COMN_REQ_CY0_ONLY(shram);
  267         if (srv_mask & QAT_SERVICE_CRYPTO_B)
  268                 comn_req |= COMN_REQ_CY1_ONLY(shram);
  269 
  270         *slice_mask = comn_req;
  271 }
  272 
  273 static void
  274 qat_adm_ring_build_shram_mask(uint64_t *shram_mask, uint32_t active_aes,
  275     uint32_t ae)
  276 {
  277         *shram_mask = 0;
  278 
  279         if (active_aes == 1) {
  280                 *shram_mask = ~(*shram_mask);
  281         } else if (active_aes == 2) {
  282                 if (ae == 1)
  283                         *shram_mask = ((~(*shram_mask)) & 0xffffffff);
  284                 else
  285                         *shram_mask = ((~(*shram_mask)) & 0xffffffff00000000ull);
  286         } else if (active_aes == 3) {
  287                 if (ae == 0)
  288                         *shram_mask = ((~(*shram_mask)) & 0x7fffff);
  289                 else if (ae == 1)
  290                         *shram_mask = ((~(*shram_mask)) & 0x3fffff800000ull);
  291                 else
  292                         *shram_mask = ((~(*shram_mask)) & 0xffffc00000000000ull);
  293         } else {
  294                 panic("Only three services are supported in current version");
  295         }
  296 }
  297 
  298 static int
  299 qat_adm_ring_build_ring_table(struct qat_softc *sc, uint32_t ae)
  300 {
  301         struct qat_admin_rings *qadr = &sc->sc_admin_rings;
  302         struct fw_init_ring_table *tbl;
  303         struct fw_init_ring_params *param;
  304         uint8_t srv_mask = sc->sc_admin_rings.qadr_srv_mask[ae];
  305 
  306         if ((srv_mask & QAT_SERVICE_CRYPTO_A)) {
  307                 tbl = qadr->qadr_cya_ring_tbl;
  308         } else if ((srv_mask & QAT_SERVICE_CRYPTO_B)) {
  309                 tbl = qadr->qadr_cyb_ring_tbl;
  310         } else {
  311                 device_printf(sc->sc_dev,
  312                     "Invalid execution engine %d\n", ae);
  313                 return EINVAL;
  314         }
  315 
  316         param = &tbl->firt_bulk_rings[sc->sc_hw.qhw_ring_sym_tx];
  317         param->firp_curr_weight = QAT_HI_PRIO_RING_WEIGHT;
  318         param->firp_init_weight = QAT_HI_PRIO_RING_WEIGHT;
  319         FW_INIT_RING_MASK_SET(tbl, sc->sc_hw.qhw_ring_sym_tx);
  320 
  321         return 0;
  322 }
  323 
  324 static int
  325 qat_adm_ring_build_init_msg(struct qat_softc *sc,
  326     struct fw_init_req *initmsg, enum fw_init_cmd_id cmd, uint32_t ae,
  327     struct qat_accel_init_cb *cb)
  328 {
  329         struct fw_init_set_ae_info_hdr *aehdr;
  330         struct fw_init_set_ae_info *aeinfo;
  331         struct fw_init_set_ring_info_hdr *ringhdr;
  332         struct fw_init_set_ring_info *ringinfo;
  333         int init_shram = 0, tgt_id, cluster_id;
  334         uint32_t srv_mask;
  335 
  336         srv_mask = sc->sc_admin_rings.qadr_srv_mask[
  337             ae % sc->sc_ae_num];
  338         
  339         memset(initmsg, 0, sizeof(struct fw_init_req));
  340 
  341         qat_msg_req_type_populate(&initmsg->comn_hdr.arch_if,
  342             ARCH_IF_REQ_QAT_FW_INIT,
  343             sc->sc_admin_rings.qadr_admin_rx->qr_ring_id);
  344 
  345         qat_msg_cmn_mid_populate(&initmsg->comn_mid, cb, 0, 0);
  346 
  347         switch (cmd) {
  348         case FW_INIT_CMD_SET_AE_INFO:
  349                 if (ae % sc->sc_ae_num == 0)
  350                         init_shram = 1;
  351                 if (ae >= sc->sc_ae_num) {
  352                         tgt_id = 1;
  353                         cluster_id = 1;
  354                 } else {
  355                         cluster_id = 0;
  356                         if (sc->sc_ae_mask)
  357                                 tgt_id = 0;
  358                         else
  359                                 tgt_id = 1;
  360                 }
  361                 aehdr = &initmsg->u.set_ae_info;
  362                 aeinfo = &initmsg->u1.set_ae_info;
  363 
  364                 aehdr->init_cmd_id = cmd;
  365                 /* XXX that does not support sparse ae_mask */
  366                 aehdr->init_trgt_id = ae;
  367                 aehdr->init_ring_cluster_id = cluster_id;
  368                 aehdr->init_qat_id = tgt_id;
  369 
  370                 qat_adm_ring_build_slice_mask(&aehdr->init_slice_mask, srv_mask,
  371                     init_shram);
  372 
  373                 qat_adm_ring_build_shram_mask(&aeinfo->init_shram_mask,
  374                     sc->sc_admin_rings.qadr_active_aes_per_accel,
  375                     ae % sc->sc_ae_num);
  376 
  377                 break;
  378         case FW_INIT_CMD_SET_RING_INFO:
  379                 ringhdr = &initmsg->u.set_ring_info;
  380                 ringinfo = &initmsg->u1.set_ring_info;
  381 
  382                 ringhdr->init_cmd_id = cmd;
  383                 /* XXX that does not support sparse ae_mask */
  384                 ringhdr->init_trgt_id = ae;
  385 
  386                 /* XXX */
  387                 qat_adm_ring_build_ring_table(sc,
  388                     ae % sc->sc_ae_num);
  389 
  390                 ringhdr->init_ring_tbl_sz = sizeof(struct fw_init_ring_table);
  391 
  392                 ringinfo->init_ring_table_ptr =
  393                     sc->sc_admin_rings.qadr_dma.qdm_dma_seg.ds_addr +
  394                     ((ae % sc->sc_ae_num) *
  395                     sizeof(struct fw_init_ring_table));
  396 
  397                 break;
  398         default:
  399                 return ENOTSUP;
  400         }
  401 
  402         return 0;
  403 }
  404 
  405 static int
  406 qat_adm_ring_send_init_msg_sync(struct qat_softc *sc,
  407     enum fw_init_cmd_id cmd, uint32_t ae)
  408 {
  409         struct fw_init_req initmsg;
  410         struct qat_accel_init_cb cb;
  411         int error;
  412 
  413         error = qat_adm_ring_build_init_msg(sc, &initmsg, cmd, ae, &cb);
  414         if (error)
  415                 return error;
  416 
  417         error = qat_etr_put_msg(sc, sc->sc_admin_rings.qadr_admin_tx,
  418             (uint32_t *)&initmsg);
  419         if (error)
  420                 return error;
  421 
  422         error = tsleep(&cb, PZERO, "qat_init", hz * 3 / 2);
  423         if (error) {
  424                 device_printf(sc->sc_dev,
  425                     "Timed out initialization firmware: %d\n", error);
  426                 return error;
  427         }
  428         if (cb.qaic_status) {
  429                 device_printf(sc->sc_dev, "Failed to initialize firmware\n");
  430                 return EIO;
  431         }
  432 
  433         return error;
  434 }
  435 
  436 static int
  437 qat_adm_ring_send_init_msg(struct qat_softc *sc,
  438     enum fw_init_cmd_id cmd)
  439 {
  440         struct qat_admin_rings *qadr = &sc->sc_admin_rings;
  441         uint32_t error, ae;
  442 
  443         for (ae = 0; ae < sc->sc_ae_num; ae++) {
  444                 uint8_t srv_mask = qadr->qadr_srv_mask[ae];
  445                 switch (cmd) {
  446                 case FW_INIT_CMD_SET_AE_INFO:
  447                 case FW_INIT_CMD_SET_RING_INFO:
  448                         if (!srv_mask)
  449                                 continue;
  450                         break;
  451                 case FW_INIT_CMD_TRNG_ENABLE:
  452                 case FW_INIT_CMD_TRNG_DISABLE:
  453                         if (!(srv_mask & QAT_SERVICE_CRYPTO_A))
  454                                 continue;
  455                         break;
  456                 default:
  457                         return ENOTSUP;
  458                 }
  459 
  460                 error = qat_adm_ring_send_init_msg_sync(sc, cmd, ae);
  461                 if (error)
  462                         return error;
  463         }
  464 
  465         return 0;
  466 }
  467 
  468 int
  469 qat_adm_ring_send_init(struct qat_softc *sc)
  470 {
  471         int error;
  472 
  473         error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_AE_INFO);
  474         if (error)
  475                 return error;
  476 
  477         error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_RING_INFO);
  478         if (error)
  479                 return error;
  480 
  481         return 0;
  482 }
  483 
  484 static int
  485 qat_adm_ring_intr(struct qat_softc *sc, void *arg, void *msg)
  486 {
  487         struct arch_if_resp_hdr *resp;
  488         struct fw_init_resp *init_resp;
  489         struct qat_accel_init_cb *init_cb;
  490         int handled = 0;
  491 
  492         resp = (struct arch_if_resp_hdr *)msg;
  493 
  494         switch (resp->resp_type) {
  495         case ARCH_IF_REQ_QAT_FW_INIT:
  496                 init_resp = (struct fw_init_resp *)msg;
  497                 init_cb = (struct qat_accel_init_cb *)
  498                     (uintptr_t)init_resp->comn_resp.opaque_data;
  499                 init_cb->qaic_status =
  500                     __SHIFTOUT(init_resp->comn_resp.comn_status,
  501                     COMN_RESP_INIT_ADMIN_STATUS);
  502                 wakeup(init_cb);
  503                 break;
  504         default:
  505                 device_printf(sc->sc_dev,
  506                     "unknown resp type %d\n", resp->resp_type);
  507                 break;
  508         }
  509 
  510         return handled;
  511 }
  512 
  513 static inline uint16_t
  514 qat_hw15_get_comn_req_flags(uint8_t ae)
  515 {
  516         if (ae == 0) {
  517                 return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
  518                     COMN_REQ_AUTH0_SLICE_REQUIRED |
  519                     COMN_REQ_CIPHER0_SLICE_REQUIRED;
  520         } else {
  521                 return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
  522                     COMN_REQ_AUTH1_SLICE_REQUIRED |
  523                     COMN_REQ_CIPHER1_SLICE_REQUIRED;
  524         }
  525 }
  526 
  527 static uint32_t
  528 qat_hw15_crypto_setup_cipher_desc(struct qat_crypto_desc *desc,
  529     struct qat_session *qs, struct fw_cipher_hdr *cipher_hdr,
  530     uint32_t hw_blk_offset, enum fw_slice next_slice)
  531 {
  532         desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
  533 
  534         cipher_hdr->state_padding_sz = 0;
  535         cipher_hdr->key_sz = qs->qs_cipher_klen / 8;
  536 
  537         cipher_hdr->state_sz = desc->qcd_cipher_blk_sz / 8;
  538 
  539         cipher_hdr->next_id = next_slice;
  540         cipher_hdr->curr_id = FW_SLICE_CIPHER;
  541         cipher_hdr->offset = hw_blk_offset / 8;
  542         cipher_hdr->resrvd = 0;
  543 
  544         return sizeof(struct hw_cipher_config) + qs->qs_cipher_klen;
  545 }
  546 
  547 static void
  548 qat_hw15_crypto_setup_cipher_config(const struct qat_crypto_desc *desc,
  549     const struct qat_session *qs, const struct cryptop *crp,
  550     struct hw_cipher_config *cipher_config)
  551 {
  552         const uint8_t *key;
  553         uint8_t *cipher_key;
  554 
  555         cipher_config->val = qat_crypto_load_cipher_session(desc, qs);
  556         cipher_config->reserved = 0;
  557 
  558         cipher_key = (uint8_t *)(cipher_config + 1);
  559         if (crp != NULL && crp->crp_cipher_key != NULL)
  560                 key = crp->crp_cipher_key;
  561         else
  562                 key = qs->qs_cipher_key;
  563         memcpy(cipher_key, key, qs->qs_cipher_klen);
  564 }
  565 
  566 static uint32_t
  567 qat_hw15_crypto_setup_auth_desc(struct qat_crypto_desc *desc,
  568     struct qat_session *qs, struct fw_auth_hdr *auth_hdr,
  569     uint32_t ctrl_blk_offset, uint32_t hw_blk_offset,
  570     enum fw_slice next_slice)
  571 {
  572         const struct qat_sym_hash_def *hash_def;
  573 
  574         (void)qat_crypto_load_auth_session(desc, qs, &hash_def);
  575 
  576         auth_hdr->next_id = next_slice;
  577         auth_hdr->curr_id = FW_SLICE_AUTH;
  578         auth_hdr->offset = hw_blk_offset / 8;
  579         auth_hdr->resrvd = 0;
  580 
  581         auth_hdr->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
  582         auth_hdr->u.inner_prefix_sz = 0;
  583         auth_hdr->outer_prefix_sz = 0;
  584         auth_hdr->final_sz = hash_def->qshd_alg->qshai_digest_len;
  585         auth_hdr->inner_state1_sz =
  586             roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
  587         auth_hdr->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
  588         auth_hdr->inner_state2_sz =
  589             roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
  590         auth_hdr->inner_state2_off = auth_hdr->offset +
  591             ((sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz) / 8);
  592 
  593         auth_hdr->outer_config_off = 0;
  594         auth_hdr->outer_state1_sz = 0;
  595         auth_hdr->outer_res_sz = 0;
  596         auth_hdr->outer_prefix_off = 0;
  597 
  598         desc->qcd_auth_sz = hash_def->qshd_alg->qshai_sah->hashsize;
  599         desc->qcd_state_storage_sz = (sizeof(struct hw_auth_counter) +
  600             roundup(hash_def->qshd_alg->qshai_state_size, 8)) / 8;
  601         desc->qcd_gcm_aad_sz_offset1 = desc->qcd_auth_offset +
  602             sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
  603             AES_BLOCK_LEN;
  604         desc->qcd_gcm_aad_sz_offset2 = ctrl_blk_offset +
  605             offsetof(struct fw_auth_hdr, u.aad_sz);
  606 
  607         return sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
  608             auth_hdr->inner_state2_sz;
  609 }
  610 
  611 static void
  612 qat_hw15_crypto_setup_auth_setup(const struct qat_crypto_desc *desc,
  613     const struct qat_session *qs, const struct cryptop *crp,
  614     struct hw_auth_setup *auth_setup)
  615 {
  616         const struct qat_sym_hash_def *hash_def;
  617         const uint8_t *key;
  618         uint8_t *state1, *state2;
  619         uint32_t state_sz, state1_sz, state2_sz, state1_pad_len, state2_pad_len;
  620 
  621         auth_setup->auth_config.config = qat_crypto_load_auth_session(desc, qs,
  622             &hash_def);
  623         auth_setup->auth_config.reserved = 0;
  624 
  625         auth_setup->auth_counter.counter =
  626             htobe32(hash_def->qshd_qat->qshqi_auth_counter);
  627         auth_setup->auth_counter.reserved = 0;
  628 
  629         state1 = (uint8_t *)(auth_setup + 1);
  630         state2 = state1 + roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
  631         switch (qs->qs_auth_algo) {
  632         case HW_AUTH_ALGO_GALOIS_128:
  633                 qat_crypto_gmac_precompute(desc, qs->qs_cipher_key,
  634                     qs->qs_cipher_klen, hash_def, state2);
  635                 break;
  636         case HW_AUTH_ALGO_SHA1:
  637                 state_sz = hash_def->qshd_alg->qshai_state_size;
  638                 state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
  639                 state2_sz = roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
  640                 if (qs->qs_auth_mode == HW_AUTH_MODE1) {
  641                         state1_pad_len = state1_sz - state_sz;
  642                         state2_pad_len = state2_sz - state_sz;
  643                         if (state1_pad_len > 0)
  644                                 memset(state1 + state_sz, 0, state1_pad_len);
  645                         if (state2_pad_len > 0)
  646                                 memset(state2 + state_sz, 0, state2_pad_len);
  647                 }
  648                 /* FALLTHROUGH */
  649         case HW_AUTH_ALGO_SHA256:
  650         case HW_AUTH_ALGO_SHA384:
  651         case HW_AUTH_ALGO_SHA512:
  652                 switch (qs->qs_auth_mode) {
  653                 case HW_AUTH_MODE0:
  654                         memcpy(state1, hash_def->qshd_alg->qshai_init_state,
  655                             state1_sz);
  656                         /* Override for mode 0 hashes. */
  657                         auth_setup->auth_counter.counter = 0;
  658                         break;
  659                 case HW_AUTH_MODE1:
  660                         if (crp != NULL && crp->crp_auth_key != NULL)
  661                                 key = crp->crp_auth_key;
  662                         else
  663                                 key = qs->qs_auth_key;
  664                         if (key != NULL) {
  665                                 qat_crypto_hmac_precompute(desc, key,
  666                                     qs->qs_auth_klen, hash_def, state1, state2);
  667                         }
  668                         break;
  669                 default:
  670                         panic("%s: unhandled auth mode %d", __func__,
  671                             qs->qs_auth_mode);
  672                 }
  673                 break;
  674         default:
  675                 panic("%s: unhandled auth algorithm %d", __func__,
  676                     qs->qs_auth_algo);
  677         }
  678 }
  679 
  680 void
  681 qat_hw15_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
  682     struct qat_crypto_desc *desc)
  683 {
  684         struct fw_cipher_hdr *cipher_hdr;
  685         struct fw_auth_hdr *auth_hdr;
  686         struct fw_la_bulk_req *req_cache;
  687         struct hw_auth_setup *auth_setup;
  688         struct hw_cipher_config *cipher_config;
  689         uint32_t ctrl_blk_sz, ctrl_blk_offset, hw_blk_offset;
  690         int i;
  691         uint16_t la_cmd_flags;
  692         uint8_t req_params_sz;
  693         uint8_t *ctrl_blk_ptr, *hw_blk_ptr;
  694 
  695         ctrl_blk_sz = 0;
  696         if (qs->qs_cipher_algo != HW_CIPHER_ALGO_NULL)
  697                 ctrl_blk_sz += sizeof(struct fw_cipher_hdr);
  698         if (qs->qs_auth_algo != HW_AUTH_ALGO_NULL)
  699                 ctrl_blk_sz += sizeof(struct fw_auth_hdr);
  700 
  701         ctrl_blk_ptr = desc->qcd_content_desc;
  702         ctrl_blk_offset = 0;
  703         hw_blk_ptr = ctrl_blk_ptr + ctrl_blk_sz;
  704         hw_blk_offset = 0;
  705 
  706         la_cmd_flags = 0;
  707         req_params_sz = 0;
  708         for (i = 0; i < MAX_FW_SLICE; i++) {
  709                 switch (desc->qcd_slices[i]) {
  710                 case FW_SLICE_CIPHER:
  711                         cipher_hdr = (struct fw_cipher_hdr *)(ctrl_blk_ptr +
  712                             ctrl_blk_offset);
  713                         cipher_config = (struct hw_cipher_config *)(hw_blk_ptr +
  714                             hw_blk_offset);
  715                         desc->qcd_cipher_offset = ctrl_blk_sz + hw_blk_offset;
  716                         hw_blk_offset += qat_hw15_crypto_setup_cipher_desc(desc,
  717                             qs, cipher_hdr, hw_blk_offset,
  718                             desc->qcd_slices[i + 1]);
  719                         qat_hw15_crypto_setup_cipher_config(desc, qs, NULL,
  720                             cipher_config);
  721                         ctrl_blk_offset += sizeof(struct fw_cipher_hdr);
  722                         req_params_sz += sizeof(struct fw_la_cipher_req_params);
  723                         break;
  724                 case FW_SLICE_AUTH:
  725                         auth_hdr = (struct fw_auth_hdr *)(ctrl_blk_ptr +
  726                             ctrl_blk_offset);
  727                         auth_setup = (struct hw_auth_setup *)(hw_blk_ptr +
  728                             hw_blk_offset);
  729                         desc->qcd_auth_offset = ctrl_blk_sz + hw_blk_offset;
  730                         hw_blk_offset += qat_hw15_crypto_setup_auth_desc(desc,
  731                             qs, auth_hdr, ctrl_blk_offset, hw_blk_offset,
  732                             desc->qcd_slices[i + 1]);
  733                         qat_hw15_crypto_setup_auth_setup(desc, qs, NULL,
  734                             auth_setup);
  735                         ctrl_blk_offset += sizeof(struct fw_auth_hdr);
  736                         req_params_sz += sizeof(struct fw_la_auth_req_params);
  737                         la_cmd_flags |= LA_FLAGS_RET_AUTH_RES;
  738                         /* no digest verify */
  739                         break;
  740                 case FW_SLICE_DRAM_WR:
  741                         i = MAX_FW_SLICE; /* end of chain */
  742                         break;
  743                 default:
  744                         MPASS(0);
  745                         break;
  746                 }
  747         }
  748 
  749         desc->qcd_hdr_sz = ctrl_blk_offset / 8;
  750         desc->qcd_hw_blk_sz = hw_blk_offset / 8;
  751 
  752         req_cache = (struct fw_la_bulk_req *)desc->qcd_req_cache;
  753         qat_msg_req_type_populate(
  754             &req_cache->comn_hdr.arch_if,
  755             ARCH_IF_REQ_QAT_FW_LA, 0);
  756 
  757         if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128)
  758                 la_cmd_flags |= LA_FLAGS_PROTO_GCM | LA_FLAGS_GCM_IV_LEN_FLAG;
  759         else
  760                 la_cmd_flags |= LA_FLAGS_PROTO_NO;
  761 
  762         qat_msg_params_populate(req_cache, desc, req_params_sz,
  763             la_cmd_flags, 0);
  764 
  765         bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
  766             qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE);
  767 }
  768 
  769 static void
  770 qat_hw15_crypto_req_setkey(const struct qat_crypto_desc *desc,
  771     const struct qat_session *qs, struct qat_sym_cookie *qsc,
  772     struct fw_la_bulk_req *bulk_req, struct cryptop *crp)
  773 {
  774         struct hw_auth_setup *auth_setup;
  775         struct hw_cipher_config *cipher_config;
  776         uint8_t *cdesc;
  777         int i;
  778 
  779         cdesc = qsc->qsc_content_desc;
  780         memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE);
  781         for (i = 0; i < MAX_FW_SLICE; i++) {
  782                 switch (desc->qcd_slices[i]) {
  783                 case FW_SLICE_CIPHER:
  784                         cipher_config = (struct hw_cipher_config *)
  785                             (cdesc + desc->qcd_cipher_offset);
  786                         qat_hw15_crypto_setup_cipher_config(desc, qs, crp,
  787                             cipher_config);
  788                         break;
  789                 case FW_SLICE_AUTH:
  790                         auth_setup = (struct hw_auth_setup *)
  791                             (cdesc + desc->qcd_auth_offset);
  792                         qat_hw15_crypto_setup_auth_setup(desc, qs, crp,
  793                             auth_setup);
  794                         break;
  795                 case FW_SLICE_DRAM_WR:
  796                         i = MAX_FW_SLICE; /* end of chain */
  797                         break;
  798                 default:
  799                         MPASS(0);
  800                 }
  801         }
  802 
  803         bulk_req->comn_hdr.content_desc_addr = qsc->qsc_content_desc_paddr;
  804 }
  805 
  806 void
  807 qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb,
  808     struct qat_session *qs, struct qat_crypto_desc const *desc,
  809     struct qat_sym_cookie *qsc, struct cryptop *crp)
  810 {
  811         struct qat_sym_bulk_cookie *qsbc;
  812         struct fw_la_bulk_req *bulk_req;
  813         struct fw_la_cipher_req_params *cipher_req;
  814         struct fw_la_auth_req_params *auth_req;
  815         bus_addr_t digest_paddr;
  816         uint8_t *aad_szp2, *req_params_ptr;
  817         uint32_t aad_sz, *aad_szp1;
  818         enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
  819         enum fw_slice next_slice;
  820 
  821         qsbc = &qsc->qsc_bulk_cookie;
  822 
  823         bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
  824         memcpy(bulk_req, &desc->qcd_req_cache, QAT_HW15_SESSION_REQ_CACHE_SIZE);
  825         bulk_req->comn_hdr.arch_if.resp_pipe_id = qcb->qcb_sym_rx->qr_ring_id;
  826         bulk_req->comn_hdr.comn_req_flags =
  827             qat_hw15_get_comn_req_flags(qcb->qcb_bank % 2);
  828         bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
  829         if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
  830                 bulk_req->comn_mid.dest_data_addr =
  831                     qsc->qsc_obuffer_list_desc_paddr;
  832         } else {
  833                 bulk_req->comn_mid.dest_data_addr =
  834                     qsc->qsc_buffer_list_desc_paddr;
  835         }
  836         bulk_req->req_params_addr = qsc->qsc_bulk_req_params_buf_paddr;
  837         bulk_req->comn_ftr.next_request_addr = 0;
  838         bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
  839         if (__predict_false(crp->crp_cipher_key != NULL ||
  840             crp->crp_auth_key != NULL)) {
  841                 qat_hw15_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
  842         }
  843 
  844         digest_paddr = 0;
  845         if (desc->qcd_auth_sz != 0)
  846                 digest_paddr = qsc->qsc_auth_res_paddr;
  847 
  848         req_params_ptr = qsbc->qsbc_req_params_buf;
  849         memset(req_params_ptr, 0, sizeof(qsbc->qsbc_req_params_buf));
  850 
  851         /*
  852          * The SG list layout is a bit different for GCM and GMAC, it's simpler
  853          * to handle those cases separately.
  854          */
  855         if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
  856                 cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr;
  857                 auth_req = (struct fw_la_auth_req_params *)
  858                     (req_params_ptr + sizeof(struct fw_la_cipher_req_params));
  859 
  860                 cipher_req->cipher_state_sz = desc->qcd_cipher_blk_sz / 8;
  861                 cipher_req->curr_id = FW_SLICE_CIPHER;
  862                 if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH)
  863                         cipher_req->next_id = FW_SLICE_DRAM_WR;
  864                 else
  865                         cipher_req->next_id = FW_SLICE_AUTH;
  866                 cipher_req->state_address = qsc->qsc_iv_buf_paddr;
  867 
  868                 if (cmd_id != FW_LA_CMD_AUTH) {
  869                         /*
  870                          * Don't fill out the cipher block if we're doing GMAC
  871                          * only.
  872                          */
  873                         cipher_req->cipher_off = 0;
  874                         cipher_req->cipher_len = crp->crp_payload_length;
  875                 }
  876                 auth_req->curr_id = FW_SLICE_AUTH;
  877                 if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH)
  878                         auth_req->next_id = FW_SLICE_CIPHER;
  879                 else
  880                         auth_req->next_id = FW_SLICE_DRAM_WR;
  881 
  882                 auth_req->auth_res_address = digest_paddr;
  883                 auth_req->auth_res_sz = desc->qcd_auth_sz;
  884 
  885                 auth_req->auth_off = 0;
  886                 auth_req->auth_len = crp->crp_payload_length;
  887 
  888                 auth_req->hash_state_sz =
  889                     roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) >> 3;
  890                 auth_req->u1.aad_addr = crp->crp_aad_length > 0 ?
  891                     qsc->qsc_gcm_aad_paddr : 0;
  892 
  893                 /*
  894                  * Update the hash state block if necessary.  This only occurs
  895                  * when the AAD length changes between requests in a session and
  896                  * is synchronized by qat_process().
  897                  */
  898                 aad_sz = htobe32(crp->crp_aad_length);
  899                 aad_szp1 = (uint32_t *)(
  900                     __DECONST(uint8_t *, desc->qcd_content_desc) +
  901                     desc->qcd_gcm_aad_sz_offset1);
  902                 aad_szp2 = __DECONST(uint8_t *, desc->qcd_content_desc) +
  903                     desc->qcd_gcm_aad_sz_offset2;
  904                 if (__predict_false(*aad_szp1 != aad_sz)) {
  905                         *aad_szp1 = aad_sz;
  906                         *aad_szp2 = (uint8_t)roundup2(crp->crp_aad_length,
  907                             QAT_AES_GCM_AAD_ALIGN);
  908                         bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
  909                             qs->qs_desc_mem.qdm_dma_map,
  910                             BUS_DMASYNC_PREWRITE);
  911                 }
  912         } else {
  913                 cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr;
  914                 if (cmd_id != FW_LA_CMD_AUTH) {
  915                         if (cmd_id == FW_LA_CMD_CIPHER ||
  916                             cmd_id == FW_LA_CMD_HASH_CIPHER)
  917                                 next_slice = FW_SLICE_DRAM_WR;
  918                         else
  919                                 next_slice = FW_SLICE_AUTH;
  920 
  921                         cipher_req->cipher_state_sz =
  922                             desc->qcd_cipher_blk_sz / 8;
  923 
  924                         cipher_req->curr_id = FW_SLICE_CIPHER;
  925                         cipher_req->next_id = next_slice;
  926 
  927                         if (crp->crp_aad_length == 0) {
  928                                 cipher_req->cipher_off = 0;
  929                         } else if (crp->crp_aad == NULL) {
  930                                 cipher_req->cipher_off =
  931                                     crp->crp_payload_start - crp->crp_aad_start;
  932                         } else {
  933                                 cipher_req->cipher_off = crp->crp_aad_length;
  934                         }
  935                         cipher_req->cipher_len = crp->crp_payload_length;
  936                         cipher_req->state_address = qsc->qsc_iv_buf_paddr;
  937                 }
  938                 if (cmd_id != FW_LA_CMD_CIPHER) {
  939                         if (cmd_id == FW_LA_CMD_AUTH)
  940                                 auth_req = (struct fw_la_auth_req_params *)
  941                                     req_params_ptr;
  942                         else
  943                                 auth_req = (struct fw_la_auth_req_params *)
  944                                     (cipher_req + 1);
  945                         if (cmd_id == FW_LA_CMD_HASH_CIPHER)
  946                                 next_slice = FW_SLICE_CIPHER;
  947                         else
  948                                 next_slice = FW_SLICE_DRAM_WR;
  949 
  950                         auth_req->curr_id = FW_SLICE_AUTH;
  951                         auth_req->next_id = next_slice;
  952 
  953                         auth_req->auth_res_address = digest_paddr;
  954                         auth_req->auth_res_sz = desc->qcd_auth_sz;
  955 
  956                         auth_req->auth_len =
  957                             crp->crp_payload_length + crp->crp_aad_length;
  958                         auth_req->auth_off = 0;
  959 
  960                         auth_req->hash_state_sz = 0;
  961                         auth_req->u1.prefix_addr = desc->qcd_hash_state_paddr +
  962                             desc->qcd_state_storage_sz;
  963                 }
  964         }
  965 }

Cache object: 853df8c10c1b47e340196361f7e0314f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.