The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qat_c2xxx/qat_ae.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
    2 /*      $NetBSD: qat_ae.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $        */
    3 
    4 /*
    5  * Copyright (c) 2019 Internet Initiative Japan, Inc.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   27  * POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  *   Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
   32  *
   33  *   Redistribution and use in source and binary forms, with or without
   34  *   modification, are permitted provided that the following conditions
   35  *   are met:
   36  *
   37  *     * Redistributions of source code must retain the above copyright
   38  *       notice, this list of conditions and the following disclaimer.
   39  *     * Redistributions in binary form must reproduce the above copyright
   40  *       notice, this list of conditions and the following disclaimer in
   41  *       the documentation and/or other materials provided with the
   42  *       distribution.
   43  *     * Neither the name of Intel Corporation nor the names of its
   44  *       contributors may be used to endorse or promote products derived
   45  *       from this software without specific prior written permission.
   46  *
   47  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   48  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   49  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   50  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
   51  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   52  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   53  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   54  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   55  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   56  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   57  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   58  */
   59 
   60 #include <sys/cdefs.h>
   61 __FBSDID("$FreeBSD$");
   62 #if 0
   63 __KERNEL_RCSID(0, "$NetBSD: qat_ae.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
   64 #endif
   65 
   66 #include <sys/param.h>
   67 #include <sys/bus.h>
   68 #include <sys/firmware.h>
   69 #include <sys/limits.h>
   70 #include <sys/systm.h>
   71 
   72 #include <machine/bus.h>
   73 
   74 #include <dev/pci/pcireg.h>
   75 #include <dev/pci/pcivar.h>
   76 
   77 #include "qatreg.h"
   78 #include "qatvar.h"
   79 #include "qat_aevar.h"
   80 
   81 static int      qat_ae_write_4(struct qat_softc *, u_char, bus_size_t,
   82                     uint32_t);
   83 static int      qat_ae_read_4(struct qat_softc *, u_char, bus_size_t,
   84                     uint32_t *);
   85 static void     qat_ae_ctx_indr_write(struct qat_softc *, u_char, uint32_t,
   86                     bus_size_t, uint32_t);
   87 static int      qat_ae_ctx_indr_read(struct qat_softc *, u_char, uint32_t,
   88                     bus_size_t, uint32_t *);
   89 
   90 static u_short  qat_aereg_get_10bit_addr(enum aereg_type, u_short);
   91 static int      qat_aereg_rel_data_write(struct qat_softc *, u_char, u_char,
   92                     enum aereg_type, u_short, uint32_t);
   93 static int      qat_aereg_rel_data_read(struct qat_softc *, u_char, u_char,
   94                     enum aereg_type, u_short, uint32_t *);
   95 static int      qat_aereg_rel_rdxfer_write(struct qat_softc *, u_char, u_char,
   96                     enum aereg_type, u_short, uint32_t);
   97 static int      qat_aereg_rel_wrxfer_write(struct qat_softc *, u_char, u_char,
   98                     enum aereg_type, u_short, uint32_t);
   99 static int      qat_aereg_rel_nn_write(struct qat_softc *, u_char, u_char,
  100                     enum aereg_type, u_short, uint32_t);
  101 static int      qat_aereg_abs_to_rel(struct qat_softc *, u_char, u_short,
  102                     u_short *, u_char *);
  103 static int      qat_aereg_abs_data_write(struct qat_softc *, u_char,
  104                     enum aereg_type, u_short, uint32_t);
  105 
  106 static void     qat_ae_enable_ctx(struct qat_softc *, u_char, u_int);
  107 static void     qat_ae_disable_ctx(struct qat_softc *, u_char, u_int);
  108 static void     qat_ae_write_ctx_mode(struct qat_softc *, u_char, u_char);
  109 static void     qat_ae_write_nn_mode(struct qat_softc *, u_char, u_char);
  110 static void     qat_ae_write_lm_mode(struct qat_softc *, u_char,
  111                     enum aereg_type, u_char);
  112 static void     qat_ae_write_shared_cs_mode0(struct qat_softc *, u_char,
  113                     u_char);
  114 static void     qat_ae_write_shared_cs_mode(struct qat_softc *, u_char, u_char);
  115 static int      qat_ae_set_reload_ustore(struct qat_softc *, u_char, u_int, int,
  116                     u_int);
  117 
  118 static enum qat_ae_status qat_ae_get_status(struct qat_softc *, u_char);
  119 static int      qat_ae_is_active(struct qat_softc *, u_char);
  120 static int      qat_ae_wait_num_cycles(struct qat_softc *, u_char, int, int);
  121 
  122 static int      qat_ae_clear_reset(struct qat_softc *);
  123 static int      qat_ae_check(struct qat_softc *);
  124 static int      qat_ae_reset_timestamp(struct qat_softc *);
  125 static void     qat_ae_clear_xfer(struct qat_softc *);
  126 static int      qat_ae_clear_gprs(struct qat_softc *);
  127 
  128 static void     qat_ae_get_shared_ustore_ae(u_char, u_char *);
  129 static u_int    qat_ae_ucode_parity64(uint64_t);
  130 static uint64_t qat_ae_ucode_set_ecc(uint64_t);
  131 static int      qat_ae_ucode_write(struct qat_softc *, u_char, u_int, u_int,
  132                     const uint64_t *);
  133 static int      qat_ae_ucode_read(struct qat_softc *, u_char, u_int, u_int,
  134                     uint64_t *);
  135 static u_int    qat_ae_concat_ucode(uint64_t *, u_int, u_int, u_int, u_int *);
  136 static int      qat_ae_exec_ucode(struct qat_softc *, u_char, u_char,
  137                     uint64_t *, u_int, int, u_int, u_int *);
  138 static int      qat_ae_exec_ucode_init_lm(struct qat_softc *, u_char, u_char,
  139                     int *, uint64_t *, u_int,
  140                     u_int *, u_int *, u_int *, u_int *, u_int *);
  141 static int      qat_ae_restore_init_lm_gprs(struct qat_softc *, u_char, u_char,
  142                     u_int, u_int, u_int, u_int, u_int);
  143 static int      qat_ae_get_inst_num(int);
  144 static int      qat_ae_batch_put_lm(struct qat_softc *, u_char,
  145                     struct qat_ae_batch_init_list *, size_t);
  146 static int      qat_ae_write_pc(struct qat_softc *, u_char, u_int, u_int);
  147 
  148 static u_int    qat_aefw_csum(char *, int);
  149 static const char *qat_aefw_uof_string(struct qat_softc *, size_t);
  150 static struct uof_chunk_hdr *qat_aefw_uof_find_chunk(struct qat_softc *,
  151                     const char *, struct uof_chunk_hdr *);
  152 
  153 static int      qat_aefw_load_mof(struct qat_softc *);
  154 static void     qat_aefw_unload_mof(struct qat_softc *);
  155 static int      qat_aefw_load_mmp(struct qat_softc *);
  156 static void     qat_aefw_unload_mmp(struct qat_softc *);
  157 
  158 static int      qat_aefw_mof_find_uof0(struct qat_softc *,
  159                     struct mof_uof_hdr *, struct mof_uof_chunk_hdr *,
  160                     u_int, size_t, const char *,
  161                     size_t *, void **);
  162 static int      qat_aefw_mof_find_uof(struct qat_softc *);
  163 static int      qat_aefw_mof_parse(struct qat_softc *);
  164 
  165 static int      qat_aefw_uof_parse_image(struct qat_softc *,
  166                     struct qat_uof_image *, struct uof_chunk_hdr *uch);
  167 static int      qat_aefw_uof_parse_images(struct qat_softc *);
  168 static int      qat_aefw_uof_parse(struct qat_softc *);
  169 
  170 static int      qat_aefw_alloc_auth_dmamem(struct qat_softc *, char *, size_t,
  171                     struct qat_dmamem *);
  172 static int      qat_aefw_auth(struct qat_softc *, struct qat_dmamem *);
  173 static int      qat_aefw_suof_load(struct qat_softc *sc,
  174                     struct qat_dmamem *dma);
  175 static int      qat_aefw_suof_parse_image(struct qat_softc *,
  176                     struct qat_suof_image *, struct suof_chunk_hdr *);
  177 static int      qat_aefw_suof_parse(struct qat_softc *);
  178 static int      qat_aefw_suof_write(struct qat_softc *);
  179 
  180 static int      qat_aefw_uof_assign_image(struct qat_softc *, struct qat_ae *,
  181                     struct qat_uof_image *);
  182 static int      qat_aefw_uof_init_ae(struct qat_softc *, u_char);
  183 static int      qat_aefw_uof_init(struct qat_softc *);
  184 
  185 static int      qat_aefw_init_memory_one(struct qat_softc *,
  186                     struct uof_init_mem *);
  187 static void     qat_aefw_free_lm_init(struct qat_softc *, u_char);
  188 static int      qat_aefw_init_ustore(struct qat_softc *);
  189 static int      qat_aefw_init_reg(struct qat_softc *, u_char, u_char,
  190                     enum aereg_type, u_short, u_int);
  191 static int      qat_aefw_init_reg_sym_expr(struct qat_softc *, u_char,
  192                     struct qat_uof_image *);
  193 static int      qat_aefw_init_memory(struct qat_softc *);
  194 static int      qat_aefw_init_globals(struct qat_softc *);
  195 static uint64_t qat_aefw_get_uof_inst(struct qat_softc *,
  196                     struct qat_uof_page *, u_int);
  197 static int      qat_aefw_do_pagein(struct qat_softc *, u_char,
  198                     struct qat_uof_page *);
  199 static int      qat_aefw_uof_write_one(struct qat_softc *,
  200                     struct qat_uof_image *);
  201 static int      qat_aefw_uof_write(struct qat_softc *);
  202 
  203 static int
  204 qat_ae_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
  205         uint32_t value)
  206 {
  207         int times = TIMEOUT_AE_CSR;
  208 
  209         do {
  210                 qat_ae_local_write_4(sc, ae, offset, value);
  211                 if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) &
  212                     LOCAL_CSR_STATUS_STATUS) == 0)
  213                         return 0;
  214 
  215         } while (times--);
  216 
  217         device_printf(sc->sc_dev,
  218             "couldn't write AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset);
  219         return EFAULT;
  220 }
  221 
  222 static int
  223 qat_ae_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
  224         uint32_t *value)
  225 {
  226         int times = TIMEOUT_AE_CSR;
  227         uint32_t v;
  228 
  229         do {
  230                 v = qat_ae_local_read_4(sc, ae, offset);
  231                 if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) &
  232                     LOCAL_CSR_STATUS_STATUS) == 0) {
  233                         *value = v;
  234                         return 0;
  235                 }
  236         } while (times--);
  237 
  238         device_printf(sc->sc_dev,
  239             "couldn't read AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset);
  240         return EFAULT;
  241 }
  242 
  243 static void
  244 qat_ae_ctx_indr_write(struct qat_softc *sc, u_char ae, uint32_t ctx_mask,
  245     bus_size_t offset, uint32_t value)
  246 {
  247         int ctx;
  248         uint32_t ctxptr;
  249 
  250         MPASS(offset == CTX_FUTURE_COUNT_INDIRECT ||
  251             offset == FUTURE_COUNT_SIGNAL_INDIRECT ||
  252             offset == CTX_STS_INDIRECT ||
  253             offset == CTX_WAKEUP_EVENTS_INDIRECT ||
  254             offset == CTX_SIG_EVENTS_INDIRECT ||
  255             offset == LM_ADDR_0_INDIRECT ||
  256             offset == LM_ADDR_1_INDIRECT ||
  257             offset == INDIRECT_LM_ADDR_0_BYTE_INDEX ||
  258             offset == INDIRECT_LM_ADDR_1_BYTE_INDEX);
  259 
  260         qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr);
  261         for (ctx = 0; ctx < MAX_AE_CTX; ctx++) {
  262                 if ((ctx_mask & (1 << ctx)) == 0)
  263                         continue;
  264                 qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx);
  265                 qat_ae_write_4(sc, ae, offset, value);
  266         }
  267         qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr);
  268 }
  269 
  270 static int
  271 qat_ae_ctx_indr_read(struct qat_softc *sc, u_char ae, uint32_t ctx,
  272     bus_size_t offset, uint32_t *value)
  273 {
  274         int error;
  275         uint32_t ctxptr;
  276 
  277         MPASS(offset == CTX_FUTURE_COUNT_INDIRECT ||
  278             offset == FUTURE_COUNT_SIGNAL_INDIRECT ||
  279             offset == CTX_STS_INDIRECT ||
  280             offset == CTX_WAKEUP_EVENTS_INDIRECT ||
  281             offset == CTX_SIG_EVENTS_INDIRECT ||
  282             offset == LM_ADDR_0_INDIRECT ||
  283             offset == LM_ADDR_1_INDIRECT ||
  284             offset == INDIRECT_LM_ADDR_0_BYTE_INDEX ||
  285             offset == INDIRECT_LM_ADDR_1_BYTE_INDEX);
  286 
  287         /* save the ctx ptr */
  288         qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr);
  289         if ((ctxptr & CSR_CTX_POINTER_CONTEXT) !=
  290             (ctx & CSR_CTX_POINTER_CONTEXT))
  291                 qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx);
  292 
  293         error = qat_ae_read_4(sc, ae, offset, value);
  294 
  295         /* restore ctx ptr */
  296         if ((ctxptr & CSR_CTX_POINTER_CONTEXT) !=
  297             (ctx & CSR_CTX_POINTER_CONTEXT))
  298                 qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr);
  299 
  300         return error;
  301 }
  302 
  303 static u_short
  304 qat_aereg_get_10bit_addr(enum aereg_type regtype, u_short reg)
  305 {
  306         u_short addr;
  307 
  308         switch (regtype) {
  309         case AEREG_GPA_ABS:
  310         case AEREG_GPB_ABS:
  311                 addr = (reg & 0x7f) | 0x80;
  312                 break;
  313         case AEREG_GPA_REL:
  314         case AEREG_GPB_REL:
  315                 addr = reg & 0x1f;
  316                 break;
  317         case AEREG_SR_RD_REL:
  318         case AEREG_SR_WR_REL:
  319         case AEREG_SR_REL:
  320                 addr = 0x180 | (reg & 0x1f);
  321                 break;
  322         case AEREG_SR_INDX:
  323                 addr = 0x140 | ((reg & 0x3) << 1);
  324                 break;
  325         case AEREG_DR_RD_REL:
  326         case AEREG_DR_WR_REL:
  327         case AEREG_DR_REL:
  328                 addr = 0x1c0 | (reg & 0x1f);
  329                 break;
  330         case AEREG_DR_INDX:
  331                 addr = 0x100 | ((reg & 0x3) << 1);
  332                 break;
  333         case AEREG_NEIGH_INDX:
  334                 addr = 0x241 | ((reg & 0x3) << 1);
  335                 break;
  336         case AEREG_NEIGH_REL:
  337                 addr = 0x280 | (reg & 0x1f);
  338                 break;
  339         case AEREG_LMEM0:
  340                 addr = 0x200;
  341                 break;
  342         case AEREG_LMEM1:
  343                 addr = 0x220;
  344                 break;
  345         case AEREG_NO_DEST:
  346                 addr = 0x300 | (reg & 0xff);
  347                 break;
  348         default:
  349                 addr = AEREG_BAD_REGADDR;
  350                 break;
  351         }
  352         return (addr);
  353 }
  354 
  355 static int
  356 qat_aereg_rel_data_write(struct qat_softc *sc, u_char ae, u_char ctx,
  357     enum aereg_type regtype, u_short relreg, uint32_t value)
  358 {
  359         uint16_t srchi, srclo, destaddr, data16hi, data16lo;
  360         uint64_t inst[] = {
  361                 0x0F440000000ull,       /* immed_w1[reg, val_hi16] */
  362                 0x0F040000000ull,       /* immed_w0[reg, val_lo16] */
  363                 0x0F0000C0300ull,       /* nop */
  364                 0x0E000010000ull        /* ctx_arb[kill] */
  365         };
  366         const int ninst = nitems(inst);
  367         const int imm_w1 = 0, imm_w0 = 1;
  368         unsigned int ctxen;
  369         uint16_t mask;
  370 
  371         /* This logic only works for GPRs and LM index registers,
  372            not NN or XFER registers! */
  373         MPASS(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL ||
  374             regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1);
  375 
  376         if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL)) {
  377                 /* determine the context mode */
  378                 qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
  379                 if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
  380                         /* 4-ctx mode */
  381                         if (ctx & 0x1)
  382                                 return EINVAL;
  383                         mask = 0x1f;
  384                 } else {
  385                         /* 8-ctx mode */
  386                         mask = 0x0f;
  387                 }
  388                 if (relreg & ~mask)
  389                         return EINVAL;
  390         }
  391         if ((destaddr = qat_aereg_get_10bit_addr(regtype, relreg)) ==
  392             AEREG_BAD_REGADDR) {
  393                 return EINVAL;
  394         }
  395 
  396         data16lo = 0xffff & value;
  397         data16hi = 0xffff & (value >> 16);
  398         srchi = qat_aereg_get_10bit_addr(AEREG_NO_DEST,
  399                 (uint16_t)(0xff & data16hi));
  400         srclo = qat_aereg_get_10bit_addr(AEREG_NO_DEST,
  401                 (uint16_t)(0xff & data16lo));
  402 
  403         switch (regtype) {
  404         case AEREG_GPA_REL:     /* A rel source */
  405                 inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) |
  406                     ((srchi & 0x3ff) << 10) | (destaddr & 0x3ff);
  407                 inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) |
  408                     ((srclo & 0x3ff) << 10) | (destaddr & 0x3ff);
  409                 break;
  410         default:
  411                 inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) |
  412                     ((destaddr & 0x3ff) << 10) | (srchi & 0x3ff);
  413                 inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) |
  414                     ((destaddr & 0x3ff) << 10) | (srclo & 0x3ff);
  415                 break;
  416         }
  417 
  418         return qat_ae_exec_ucode(sc, ae, ctx, inst, ninst, 1, ninst * 5, NULL);
  419 }
  420 
  421 static int
  422 qat_aereg_rel_data_read(struct qat_softc *sc, u_char ae, u_char ctx,
  423     enum aereg_type regtype, u_short relreg, uint32_t *value)
  424 {
  425         uint64_t inst, savucode;
  426         uint32_t ctxen, misc, nmisc, savctx, ctxarbctl, ulo, uhi;
  427         u_int uaddr, ustore_addr;
  428         int error;
  429         u_short mask, regaddr;
  430         u_char nae;
  431 
  432         MPASS(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL ||
  433             regtype == AEREG_SR_REL || regtype == AEREG_SR_RD_REL ||
  434             regtype == AEREG_DR_REL || regtype == AEREG_DR_RD_REL ||
  435             regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1);
  436 
  437         if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL) ||
  438             (regtype == AEREG_SR_REL) || (regtype == AEREG_SR_RD_REL) ||
  439             (regtype == AEREG_DR_REL) || (regtype == AEREG_DR_RD_REL))
  440         {
  441                 /* determine the context mode */
  442                 qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
  443                 if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
  444                         /* 4-ctx mode */
  445                         if (ctx & 0x1)
  446                                 return EINVAL;
  447                         mask = 0x1f;
  448                 } else {
  449                         /* 8-ctx mode */
  450                         mask = 0x0f;
  451                 }
  452                 if (relreg & ~mask)
  453                         return EINVAL;
  454         }
  455         if ((regaddr = qat_aereg_get_10bit_addr(regtype, relreg)) ==
  456             AEREG_BAD_REGADDR) {
  457                 return EINVAL;
  458         }
  459 
  460         /* instruction -- alu[--, --, B, reg] */
  461         switch (regtype) {
  462         case AEREG_GPA_REL:
  463                 /* A rel source */
  464                 inst = 0xA070000000ull | (regaddr & 0x3ff);
  465                 break;
  466         default:
  467                 inst = (0xA030000000ull | ((regaddr & 0x3ff) << 10));
  468                 break;
  469         }
  470 
  471         /* backup shared control store bit, and force AE to
  472          * none-shared mode before executing ucode snippet */
  473         qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
  474         if (misc & AE_MISC_CONTROL_SHARE_CS) {
  475                 qat_ae_get_shared_ustore_ae(ae, &nae);
  476                 if ((1 << nae) & sc->sc_ae_mask && qat_ae_is_active(sc, nae))
  477                         return EBUSY;
  478         }
  479 
  480         nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS;
  481         qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc);
  482 
  483         /* read current context */
  484         qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx);
  485         qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl);
  486 
  487         qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
  488         /* prevent clearing the W1C bits: the breakpoint bit,
  489         ECC error bit, and Parity error bit */
  490         ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
  491 
  492         /* change the context */
  493         if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO))
  494                 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
  495                     ctx & ACTIVE_CTX_STATUS_ACNO);
  496         /* save a ustore location */
  497         if ((error = qat_ae_ucode_read(sc, ae, 0, 1, &savucode)) != 0) {
  498                 /* restore AE_MISC_CONTROL csr */
  499                 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
  500 
  501                 /* restore the context */
  502                 if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) {
  503                         qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
  504                             savctx & ACTIVE_CTX_STATUS_ACNO);
  505                 }
  506                 qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl);
  507 
  508                 return (error);
  509         }
  510 
  511         /* turn off ustore parity */
  512         qat_ae_write_4(sc, ae, CTX_ENABLES,
  513             ctxen & (~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE));
  514 
  515         /* save ustore-addr csr */
  516         qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr);
  517 
  518         /* write the ALU instruction to ustore, enable ecs bit */
  519         uaddr = 0 | USTORE_ADDRESS_ECS;
  520 
  521         /* set the uaddress */
  522         qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
  523         inst = qat_ae_ucode_set_ecc(inst);
  524 
  525         ulo = (uint32_t)(inst & 0xffffffff);
  526         uhi = (uint32_t)(inst >> 32);
  527 
  528         qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo);
  529 
  530         /* this will auto increment the address */
  531         qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi);
  532 
  533         /* set the uaddress */
  534         qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
  535 
  536         /* delay for at least 8 cycles */
  537         qat_ae_wait_num_cycles(sc, ae, 0x8, 0);
  538 
  539         /* read ALU output -- the instruction should have been executed
  540         prior to clearing the ECS in putUwords */
  541         qat_ae_read_4(sc, ae, ALU_OUT, value);
  542 
  543         /* restore ustore-addr csr */
  544         qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr);
  545 
  546         /* restore the ustore */
  547         error = qat_ae_ucode_write(sc, ae, 0, 1, &savucode);
  548 
  549         /* restore the context */
  550         if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) {
  551                 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
  552                     savctx & ACTIVE_CTX_STATUS_ACNO);
  553         }
  554 
  555         qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl);
  556 
  557         /* restore AE_MISC_CONTROL csr */
  558         qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
  559 
  560         qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
  561 
  562         return error;
  563 }
  564 
  565 static int
  566 qat_aereg_rel_rdxfer_write(struct qat_softc *sc, u_char ae, u_char ctx,
  567     enum aereg_type regtype, u_short relreg, uint32_t value)
  568 {
  569         bus_size_t addr;
  570         int error;
  571         uint32_t ctxen;
  572         u_short mask;
  573         u_short dr_offset;
  574 
  575         MPASS(regtype == AEREG_SR_REL || regtype == AEREG_DR_REL ||
  576             regtype == AEREG_SR_RD_REL || regtype == AEREG_DR_RD_REL);
  577 
  578         error = qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
  579         if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
  580                 if (ctx & 0x1) {
  581                         device_printf(sc->sc_dev,
  582                             "bad ctx argument in 4-ctx mode,ctx=0x%x\n", ctx);
  583                         return EINVAL;
  584                 }
  585                 mask = 0x1f;
  586                 dr_offset = 0x20;
  587 
  588         } else {
  589                 mask = 0x0f;
  590                 dr_offset = 0x10;
  591         }
  592 
  593         if (relreg & ~mask)
  594                 return EINVAL;
  595 
  596         addr = relreg + (ctx << 0x5);
  597 
  598         switch (regtype) {
  599         case AEREG_SR_REL:
  600         case AEREG_SR_RD_REL:
  601                 qat_ae_xfer_write_4(sc, ae, addr, value);
  602                 break;
  603         case AEREG_DR_REL:
  604         case AEREG_DR_RD_REL:
  605                 qat_ae_xfer_write_4(sc, ae, addr + dr_offset, value);
  606                 break;
  607         default:
  608                 error = EINVAL;
  609         }
  610 
  611         return error;
  612 }
  613 
  614 static int
  615 qat_aereg_rel_wrxfer_write(struct qat_softc *sc, u_char ae, u_char ctx,
  616     enum aereg_type regtype, u_short relreg, uint32_t value)
  617 {
  618 
  619         panic("notyet");
  620 
  621         return 0;
  622 }
  623 
  624 static int
  625 qat_aereg_rel_nn_write(struct qat_softc *sc, u_char ae, u_char ctx,
  626     enum aereg_type regtype, u_short relreg, uint32_t value)
  627 {
  628 
  629         panic("notyet");
  630 
  631         return 0;
  632 }
  633 
  634 static int
  635 qat_aereg_abs_to_rel(struct qat_softc *sc, u_char ae,
  636         u_short absreg, u_short *relreg, u_char *ctx)
  637 {
  638         uint32_t ctxen;
  639 
  640         qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
  641         if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
  642                 /* 4-ctx mode */
  643                 *relreg = absreg & 0x1f;
  644                 *ctx = (absreg >> 0x4) & 0x6;
  645         } else {
  646                 /* 8-ctx mode */
  647                 *relreg = absreg & 0x0f;
  648                 *ctx = (absreg >> 0x4) & 0x7;
  649         }
  650 
  651         return 0;
  652 }
  653 
  654 static int
  655 qat_aereg_abs_data_write(struct qat_softc *sc, u_char ae,
  656         enum aereg_type regtype, u_short absreg, uint32_t value)
  657 {
  658         int error;
  659         u_short relreg;
  660         u_char ctx;
  661 
  662         qat_aereg_abs_to_rel(sc, ae, absreg, &relreg, &ctx);
  663 
  664         switch (regtype) {
  665         case AEREG_GPA_ABS:
  666                 MPASS(absreg < MAX_GPR_REG);
  667                 error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL,
  668                     relreg, value);
  669                 break;
  670         case AEREG_GPB_ABS:
  671                 MPASS(absreg < MAX_GPR_REG);
  672                 error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL,
  673                     relreg, value);
  674                 break;
  675         case AEREG_DR_RD_ABS:
  676                 MPASS(absreg < MAX_XFER_REG);
  677                 error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_DR_RD_REL,
  678                     relreg, value);
  679                 break;
  680         case AEREG_SR_RD_ABS:
  681                 MPASS(absreg < MAX_XFER_REG);
  682                 error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_SR_RD_REL,
  683                     relreg, value);
  684                 break;
  685         case AEREG_DR_WR_ABS:
  686                 MPASS(absreg < MAX_XFER_REG);
  687                 error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_DR_WR_REL,
  688                     relreg, value);
  689                 break;
  690         case AEREG_SR_WR_ABS:
  691                 MPASS(absreg < MAX_XFER_REG);
  692                 error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_SR_WR_REL,
  693                     relreg, value);
  694                 break;
  695         case AEREG_NEIGH_ABS:
  696                 MPASS(absreg < MAX_NN_REG);
  697                 if (absreg >= MAX_NN_REG)
  698                         return EINVAL;
  699                 error = qat_aereg_rel_nn_write(sc, ae, ctx, AEREG_NEIGH_REL,
  700                     relreg, value);
  701                 break;
  702         default:
  703                 panic("Invalid Register Type");
  704         }
  705 
  706         return error;
  707 }
  708 
  709 static void
  710 qat_ae_enable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask)
  711 {
  712         uint32_t ctxen;
  713 
  714         qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
  715         ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
  716 
  717         if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) {
  718                 ctx_mask &= 0x55;
  719         } else {
  720                 ctx_mask &= 0xff;
  721         }
  722 
  723         ctxen |= __SHIFTIN(ctx_mask, CTX_ENABLES_ENABLE);
  724         qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
  725 }
  726 
  727 static void
  728 qat_ae_disable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask)
  729 {
  730         uint32_t ctxen;
  731 
  732         qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
  733         ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
  734         ctxen &= ~(__SHIFTIN(ctx_mask & AE_ALL_CTX, CTX_ENABLES_ENABLE));
  735         qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
  736 }
  737 
  738 static void
  739 qat_ae_write_ctx_mode(struct qat_softc *sc, u_char ae, u_char mode)
  740 {
  741         uint32_t val, nval;
  742 
  743         qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
  744         val &= CTX_ENABLES_IGNORE_W1C_MASK;
  745 
  746         if (mode == 4)
  747                 nval = val | CTX_ENABLES_INUSE_CONTEXTS;
  748         else
  749                 nval = val & ~CTX_ENABLES_INUSE_CONTEXTS;
  750 
  751         if (val != nval)
  752                 qat_ae_write_4(sc, ae, CTX_ENABLES, nval);
  753 }
  754 
  755 static void
  756 qat_ae_write_nn_mode(struct qat_softc *sc, u_char ae, u_char mode)
  757 {
  758         uint32_t val, nval;
  759 
  760         qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
  761         val &= CTX_ENABLES_IGNORE_W1C_MASK;
  762 
  763         if (mode)
  764                 nval = val | CTX_ENABLES_NN_MODE;
  765         else
  766                 nval = val & ~CTX_ENABLES_NN_MODE;
  767 
  768         if (val != nval)
  769                 qat_ae_write_4(sc, ae, CTX_ENABLES, nval);
  770 }
  771 
  772 static void
  773 qat_ae_write_lm_mode(struct qat_softc *sc, u_char ae,
  774         enum aereg_type lm, u_char mode)
  775 {
  776         uint32_t val, nval;
  777         uint32_t bit;
  778 
  779         qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
  780         val &= CTX_ENABLES_IGNORE_W1C_MASK;
  781 
  782         switch (lm) {
  783         case AEREG_LMEM0:
  784                 bit = CTX_ENABLES_LMADDR_0_GLOBAL;
  785                 break;
  786         case AEREG_LMEM1:
  787                 bit = CTX_ENABLES_LMADDR_1_GLOBAL;
  788                 break;
  789         default:
  790                 panic("invalid lmem reg type");
  791                 break;
  792         }
  793 
  794         if (mode)
  795                 nval = val | bit;
  796         else
  797                 nval = val & ~bit;
  798 
  799         if (val != nval)
  800                 qat_ae_write_4(sc, ae, CTX_ENABLES, nval);
  801 }
  802 
  803 static void
  804 qat_ae_write_shared_cs_mode0(struct qat_softc *sc, u_char ae, u_char mode)
  805 {
  806         uint32_t val, nval;
  807 
  808         qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val);
  809 
  810         if (mode == 1)
  811                 nval = val | AE_MISC_CONTROL_SHARE_CS;
  812         else
  813                 nval = val & ~AE_MISC_CONTROL_SHARE_CS;
  814 
  815         if (val != nval)
  816                 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nval);
  817 }
  818 
  819 static void
  820 qat_ae_write_shared_cs_mode(struct qat_softc *sc, u_char ae, u_char mode)
  821 {
  822         u_char nae;
  823 
  824         qat_ae_get_shared_ustore_ae(ae, &nae);
  825 
  826         qat_ae_write_shared_cs_mode0(sc, ae, mode);
  827 
  828         if ((sc->sc_ae_mask & (1 << nae))) {
  829                 qat_ae_write_shared_cs_mode0(sc, nae, mode);
  830         }
  831 }
  832 
  833 static int
  834 qat_ae_set_reload_ustore(struct qat_softc *sc, u_char ae,
  835         u_int reload_size, int shared_mode, u_int ustore_dram_addr)
  836 {
  837         uint32_t val, cs_reload;
  838 
  839         switch (reload_size) {
  840         case 0:
  841                 cs_reload = 0x0;
  842                 break;
  843         case QAT_2K:
  844                 cs_reload = 0x1;
  845                 break;
  846         case QAT_4K:
  847                 cs_reload = 0x2;
  848                 break;
  849         case QAT_8K:
  850                 cs_reload = 0x3;
  851                 break;
  852         default:
  853                 return EINVAL;
  854         }
  855 
  856         if (cs_reload)
  857                 QAT_AE(sc, ae).qae_ustore_dram_addr = ustore_dram_addr;
  858 
  859         QAT_AE(sc, ae).qae_reload_size = reload_size;
  860 
  861         qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val);
  862         val &= ~(AE_MISC_CONTROL_ONE_CTX_RELOAD |
  863             AE_MISC_CONTROL_CS_RELOAD | AE_MISC_CONTROL_SHARE_CS);
  864         val |= __SHIFTIN(cs_reload, AE_MISC_CONTROL_CS_RELOAD) |
  865             __SHIFTIN(shared_mode, AE_MISC_CONTROL_ONE_CTX_RELOAD);
  866         qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val);
  867 
  868         return 0;
  869 }
  870 
  871 static enum qat_ae_status
  872 qat_ae_get_status(struct qat_softc *sc, u_char ae)
  873 {
  874         int error;
  875         uint32_t val = 0;
  876 
  877         error = qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
  878         if (error || val & CTX_ENABLES_ENABLE)
  879                 return QAT_AE_ENABLED;
  880 
  881         qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val);
  882         if (val & ACTIVE_CTX_STATUS_ABO)
  883                 return QAT_AE_ACTIVE;
  884 
  885         return QAT_AE_DISABLED;
  886 }
  887 
  888 
  889 static int
  890 qat_ae_is_active(struct qat_softc *sc, u_char ae)
  891 {
  892         uint32_t val;
  893 
  894         if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED)
  895                 return 1;
  896 
  897         qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val);
  898         if (val & ACTIVE_CTX_STATUS_ABO)
  899                 return 1;
  900         else
  901                 return 0;
  902 }
  903 
  904 /* returns 1 if actually waited for specified number of cycles */
  905 static int
  906 qat_ae_wait_num_cycles(struct qat_softc *sc, u_char ae, int cycles, int check)
  907 {
  908         uint32_t cnt, actx;
  909         int pcnt, ccnt, elapsed, times;
  910 
  911         qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt);
  912         pcnt = cnt & 0xffff;
  913 
  914         times = TIMEOUT_AE_CHECK;
  915         do {
  916                 qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt);
  917                 ccnt = cnt & 0xffff;
  918 
  919                 elapsed = ccnt - pcnt;
  920                 if (elapsed == 0) {
  921                         times--;
  922                 }
  923                 if (times <= 0) {
  924                         device_printf(sc->sc_dev,
  925                             "qat_ae_wait_num_cycles timeout\n");
  926                         return -1;
  927                 }
  928 
  929                 if (elapsed < 0)
  930                         elapsed += 0x10000;
  931 
  932                 if (elapsed >= CYCLES_FROM_READY2EXE && check) {
  933                         if (qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS,
  934                             &actx) == 0) {
  935                                 if ((actx & ACTIVE_CTX_STATUS_ABO) == 0)
  936                                         return 0;
  937                         }
  938                 }
  939         } while (cycles > elapsed);
  940 
  941         if (check && qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &actx) == 0) {
  942                 if ((actx & ACTIVE_CTX_STATUS_ABO) == 0)
  943                         return 0;
  944         }
  945 
  946         return 1;
  947 }
  948 
  949 int
  950 qat_ae_init(struct qat_softc *sc)
  951 {
  952         int error;
  953         uint32_t mask, val = 0;
  954         u_char ae;
  955 
  956         /* XXX adf_initSysMemInfo */
  957 
  958         /* XXX Disable clock gating for some chip if debug mode */
  959 
  960         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
  961                 struct qat_ae *qae = &sc->sc_ae[ae];
  962                 if (!(mask & 1))
  963                         continue;
  964 
  965                 qae->qae_ustore_size = USTORE_SIZE;
  966 
  967                 qae->qae_free_addr = 0;
  968                 qae->qae_free_size = USTORE_SIZE;
  969                 qae->qae_live_ctx_mask = AE_ALL_CTX;
  970                 qae->qae_ustore_dram_addr = 0;
  971                 qae->qae_reload_size = 0;
  972         }
  973 
  974         /* XXX Enable attention interrupt */
  975 
  976         error = qat_ae_clear_reset(sc);
  977         if (error)
  978                 return error;
  979 
  980         qat_ae_clear_xfer(sc);
  981 
  982         if (!sc->sc_hw.qhw_fw_auth) {
  983                 error = qat_ae_clear_gprs(sc);
  984                 if (error)
  985                         return error;
  986         }
  987 
  988         /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
  989         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
  990                 if (!(mask & 1))
  991                         continue;
  992                 qat_ae_read_4(sc, ae, SIGNATURE_ENABLE, &val);
  993                 val |= 0x1;
  994                 qat_ae_write_4(sc, ae, SIGNATURE_ENABLE, val);
  995         }
  996 
  997         error = qat_ae_clear_reset(sc);
  998         if (error)
  999                 return error;
 1000 
 1001         /* XXX XXX XXX Clean MMP memory if mem scrub is supported */
 1002         /* halMem_ScrubMMPMemory */
 1003 
 1004         return 0;
 1005 }
 1006 
 1007 int
 1008 qat_ae_start(struct qat_softc *sc)
 1009 {
 1010         int error;
 1011         u_char ae;
 1012 
 1013         for (ae = 0; ae < sc->sc_ae_num; ae++) {
 1014                 if ((sc->sc_ae_mask & (1 << ae)) == 0)
 1015                         continue;
 1016 
 1017                 error = qat_aefw_start(sc, ae, 0xff);
 1018                 if (error)
 1019                         return error;
 1020         }
 1021 
 1022         return 0;
 1023 }
 1024 
 1025 void
 1026 qat_ae_cluster_intr(void *arg)
 1027 {
 1028         /* Nothing to implement until we support SRIOV. */
 1029         printf("qat_ae_cluster_intr\n");
 1030 }
 1031 
 1032 static int
 1033 qat_ae_clear_reset(struct qat_softc *sc)
 1034 {
 1035         int error;
 1036         uint32_t times, reset, clock, reg, mask;
 1037         u_char ae;
 1038 
 1039         reset = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET);
 1040         reset &= ~(__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK));
 1041         reset &= ~(__SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK));
 1042         times = TIMEOUT_AE_RESET;
 1043         do {
 1044                 qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_RESET, reset);
 1045                 if ((times--) == 0) {
 1046                         device_printf(sc->sc_dev, "couldn't reset AEs\n");
 1047                         return EBUSY;
 1048                 }
 1049                 reg = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET);
 1050         } while ((__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK) |
 1051             __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK))
 1052             & reg);
 1053 
 1054         /* Enable clock for AE and QAT */
 1055         clock = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_CLK_EN);
 1056         clock |= __SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_CLK_EN_AE_MASK);
 1057         clock |= __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_CLK_EN_ACCEL_MASK);
 1058         qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_CLK_EN, clock);
 1059 
 1060         error = qat_ae_check(sc);
 1061         if (error)
 1062                 return error;
 1063 
 1064         /*
 1065          * Set undefined power-up/reset states to reasonable default values...
 1066          * just to make sure we're starting from a known point
 1067          */
 1068         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
 1069                 if (!(mask & 1))
 1070                         continue;
 1071 
 1072                 /* init the ctx_enable */
 1073                 qat_ae_write_4(sc, ae, CTX_ENABLES,
 1074                     CTX_ENABLES_INIT);
 1075 
 1076                 /* initialize the PCs */
 1077                 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
 1078                     CTX_STS_INDIRECT,
 1079                     UPC_MASK & CTX_STS_INDIRECT_UPC_INIT);
 1080 
 1081                 /* init the ctx_arb */
 1082                 qat_ae_write_4(sc, ae, CTX_ARB_CNTL,
 1083                     CTX_ARB_CNTL_INIT);
 1084 
 1085                 /* enable cc */
 1086                 qat_ae_write_4(sc, ae, CC_ENABLE,
 1087                     CC_ENABLE_INIT);
 1088                 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
 1089                     CTX_WAKEUP_EVENTS_INDIRECT,
 1090                     CTX_WAKEUP_EVENTS_INDIRECT_INIT);
 1091                 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
 1092                     CTX_SIG_EVENTS_INDIRECT,
 1093                     CTX_SIG_EVENTS_INDIRECT_INIT);
 1094         }
 1095 
 1096         if ((sc->sc_ae_mask != 0) &&
 1097             sc->sc_flags & QAT_FLAG_ESRAM_ENABLE_AUTO_INIT) {
 1098                 /* XXX XXX XXX init eSram only when this is boot time */
 1099         }
 1100 
 1101         if ((sc->sc_ae_mask != 0) &&
 1102             sc->sc_flags & QAT_FLAG_SHRAM_WAIT_READY) {
 1103                 /* XXX XXX XXX wait shram to complete initialization */
 1104         }
 1105 
 1106         qat_ae_reset_timestamp(sc);
 1107 
 1108         return 0;
 1109 }
 1110 
 1111 static int
 1112 qat_ae_check(struct qat_softc *sc)
 1113 {
 1114         int error, times, ae;
 1115         uint32_t cnt, pcnt, mask;
 1116 
 1117         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
 1118                 if (!(mask & 1))
 1119                         continue;
 1120 
 1121                 times = TIMEOUT_AE_CHECK;
 1122                 error = qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt);
 1123                 if (error) {
 1124                         device_printf(sc->sc_dev,
 1125                             "couldn't access AE %d CSR\n", ae);
 1126                         return error;
 1127                 }
 1128                 pcnt = cnt & 0xffff;
 1129 
 1130                 while (1) {
 1131                         error = qat_ae_read_4(sc, ae,
 1132                             PROFILE_COUNT, &cnt);
 1133                         if (error) {
 1134                                 device_printf(sc->sc_dev,
 1135                                     "couldn't access AE %d CSR\n", ae);
 1136                                 return error;
 1137                         }
 1138                         cnt &= 0xffff;
 1139                         if (cnt == pcnt)
 1140                                 times--;
 1141                         else
 1142                                 break;
 1143                         if (times <= 0) {
 1144                                 device_printf(sc->sc_dev,
 1145                                     "AE %d CSR is useless\n", ae);
 1146                                 return EFAULT;
 1147                         }
 1148                 }
 1149         }
 1150 
 1151         return 0;
 1152 }
 1153 
 1154 static int
 1155 qat_ae_reset_timestamp(struct qat_softc *sc)
 1156 {
 1157         uint32_t misc, mask;
 1158         u_char ae;
 1159 
 1160         /* stop the timestamp timers */
 1161         misc = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_MISC);
 1162         if (misc & CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN) {
 1163                 qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC,
 1164                     misc & (~CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN));
 1165         }
 1166 
 1167         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
 1168                 if (!(mask & 1))
 1169                         continue;
 1170                 qat_ae_write_4(sc, ae, TIMESTAMP_LOW, 0);
 1171                 qat_ae_write_4(sc, ae, TIMESTAMP_HIGH, 0);
 1172         }
 1173 
 1174         /* start timestamp timers */
 1175         qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC,
 1176             misc | CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN);
 1177 
 1178         return 0;
 1179 }
 1180 
 1181 static void
 1182 qat_ae_clear_xfer(struct qat_softc *sc)
 1183 {
 1184         u_int mask, reg;
 1185         u_char ae;
 1186 
 1187         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
 1188                 if (!(mask & 1))
 1189                         continue;
 1190 
 1191                 for (reg = 0; reg < MAX_GPR_REG; reg++) {
 1192                         qat_aereg_abs_data_write(sc, ae, AEREG_SR_RD_ABS,
 1193                             reg, 0);
 1194                         qat_aereg_abs_data_write(sc, ae, AEREG_DR_RD_ABS,
 1195                             reg, 0);
 1196                 }
 1197         }
 1198 }
 1199 
 1200 static int
 1201 qat_ae_clear_gprs(struct qat_softc *sc)
 1202 {
 1203         uint32_t val;
 1204         uint32_t saved_ctx = 0;
 1205         int times = TIMEOUT_AE_CHECK, rv;
 1206         u_char ae;
 1207         u_int mask;
 1208 
 1209         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
 1210                 if (!(mask & 1))
 1211                         continue;
 1212 
 1213                 /* turn off share control store bit */
 1214                 val = qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val);
 1215                 val &= ~AE_MISC_CONTROL_SHARE_CS;
 1216                 qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val);
 1217 
 1218                 /* turn off ucode parity */
 1219                 /* make sure nn_mode is set to self */
 1220                 qat_ae_read_4(sc, ae, CTX_ENABLES, &val);
 1221                 val &= CTX_ENABLES_IGNORE_W1C_MASK;
 1222                 val |= CTX_ENABLES_NN_MODE;
 1223                 val &= ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE;
 1224                 qat_ae_write_4(sc, ae, CTX_ENABLES, val);
 1225 
 1226                 /* copy instructions to ustore */
 1227                 qat_ae_ucode_write(sc, ae, 0, nitems(ae_clear_gprs_inst),
 1228                     ae_clear_gprs_inst);
 1229 
 1230                 /* set PC */
 1231                 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_STS_INDIRECT,
 1232                     UPC_MASK & CTX_STS_INDIRECT_UPC_INIT);
 1233 
 1234                 /* save current context */
 1235                 qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &saved_ctx);
 1236                 /* change the active context */
 1237                 /* start the context from ctx 0 */
 1238                 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 0);
 1239 
 1240                 /* wakeup-event voluntary */
 1241                 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
 1242                     CTX_WAKEUP_EVENTS_INDIRECT,
 1243                     CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY);
 1244                 /* clean signals */
 1245                 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
 1246                     CTX_SIG_EVENTS_INDIRECT, 0);
 1247                 qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0);
 1248 
 1249                 qat_ae_enable_ctx(sc, ae, AE_ALL_CTX);
 1250         }
 1251 
 1252         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
 1253                 if (!(mask & 1))
 1254                         continue;
 1255                 /* wait for AE to finish */
 1256                 do {
 1257                         rv = qat_ae_wait_num_cycles(sc, ae, AE_EXEC_CYCLE, 1);
 1258                 } while (rv && times--);
 1259                 if (times <= 0) {
 1260                         device_printf(sc->sc_dev,
 1261                             "qat_ae_clear_gprs timeout");
 1262                         return ETIMEDOUT;
 1263                 }
 1264                 qat_ae_disable_ctx(sc, ae, AE_ALL_CTX);
 1265                 /* change the active context */
 1266                 qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
 1267                     saved_ctx & ACTIVE_CTX_STATUS_ACNO);
 1268                 /* init the ctx_enable */
 1269                 qat_ae_write_4(sc, ae, CTX_ENABLES, CTX_ENABLES_INIT);
 1270                 /* initialize the PCs */
 1271                 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
 1272                     CTX_STS_INDIRECT, UPC_MASK & CTX_STS_INDIRECT_UPC_INIT);
 1273                 /* init the ctx_arb */
 1274                 qat_ae_write_4(sc, ae, CTX_ARB_CNTL, CTX_ARB_CNTL_INIT);
 1275                 /* enable cc */
 1276                 qat_ae_write_4(sc, ae, CC_ENABLE, CC_ENABLE_INIT);
 1277                 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX,
 1278                     CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_INIT);
 1279                 qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_SIG_EVENTS_INDIRECT,
 1280                     CTX_SIG_EVENTS_INDIRECT_INIT);
 1281         }
 1282 
 1283         return 0;
 1284 }
 1285 
 1286 static void
 1287 qat_ae_get_shared_ustore_ae(u_char ae, u_char *nae)
 1288 {
 1289         if (ae & 0x1)
 1290                 *nae = ae - 1;
 1291         else
 1292                 *nae = ae + 1;
 1293 }
 1294 
 1295 static u_int
 1296 qat_ae_ucode_parity64(uint64_t ucode)
 1297 {
 1298 
 1299         ucode ^= ucode >> 1;
 1300         ucode ^= ucode >> 2;
 1301         ucode ^= ucode >> 4;
 1302         ucode ^= ucode >> 8;
 1303         ucode ^= ucode >> 16;
 1304         ucode ^= ucode >> 32;
 1305 
 1306         return ((u_int)(ucode & 1));
 1307 }
 1308 
 1309 static uint64_t
 1310 qat_ae_ucode_set_ecc(uint64_t ucode)
 1311 {
 1312         static const uint64_t
 1313                 bit0mask=0xff800007fffULL, bit1mask=0x1f801ff801fULL,
 1314                 bit2mask=0xe387e0781e1ULL, bit3mask=0x7cb8e388e22ULL,
 1315                 bit4mask=0xaf5b2c93244ULL, bit5mask=0xf56d5525488ULL,
 1316                 bit6mask=0xdaf69a46910ULL;
 1317 
 1318         /* clear the ecc bits */
 1319         ucode &= ~(0x7fULL << USTORE_ECC_BIT_0);
 1320 
 1321         ucode |= (uint64_t)qat_ae_ucode_parity64(bit0mask & ucode) <<
 1322             USTORE_ECC_BIT_0;
 1323         ucode |= (uint64_t)qat_ae_ucode_parity64(bit1mask & ucode) <<
 1324             USTORE_ECC_BIT_1;
 1325         ucode |= (uint64_t)qat_ae_ucode_parity64(bit2mask & ucode) <<
 1326             USTORE_ECC_BIT_2;
 1327         ucode |= (uint64_t)qat_ae_ucode_parity64(bit3mask & ucode) <<
 1328             USTORE_ECC_BIT_3;
 1329         ucode |= (uint64_t)qat_ae_ucode_parity64(bit4mask & ucode) <<
 1330             USTORE_ECC_BIT_4;
 1331         ucode |= (uint64_t)qat_ae_ucode_parity64(bit5mask & ucode) <<
 1332             USTORE_ECC_BIT_5;
 1333         ucode |= (uint64_t)qat_ae_ucode_parity64(bit6mask & ucode) <<
 1334             USTORE_ECC_BIT_6;
 1335 
 1336         return (ucode);
 1337 }
 1338 
 1339 static int
 1340 qat_ae_ucode_write(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst,
 1341         const uint64_t *ucode)
 1342 {
 1343         uint64_t tmp;
 1344         uint32_t ustore_addr, ulo, uhi;
 1345         int i;
 1346 
 1347         qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr);
 1348         uaddr |= USTORE_ADDRESS_ECS;
 1349 
 1350         qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
 1351         for (i = 0; i < ninst; i++) {
 1352                 tmp = qat_ae_ucode_set_ecc(ucode[i]);
 1353                 ulo = (uint32_t)(tmp & 0xffffffff);
 1354                 uhi = (uint32_t)(tmp >> 32);
 1355 
 1356                 qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo);
 1357                 /* this will auto increment the address */
 1358                 qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi);
 1359         }
 1360         qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr);
 1361 
 1362         return 0;
 1363 }
 1364 
 1365 static int
 1366 qat_ae_ucode_read(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst,
 1367     uint64_t *ucode)
 1368 {
 1369         uint32_t misc, ustore_addr, ulo, uhi;
 1370         u_int ii;
 1371         u_char nae;
 1372 
 1373         if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED)
 1374                 return EBUSY;
 1375 
 1376         /* determine whether it neighbour AE runs in shared control store
 1377          * status */
 1378         qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
 1379         if (misc & AE_MISC_CONTROL_SHARE_CS) {
 1380                 qat_ae_get_shared_ustore_ae(ae, &nae);
 1381                 if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae))
 1382                         return EBUSY;
 1383         }
 1384 
 1385         /* if reloadable, then get it all from dram-ustore */
 1386         if (__SHIFTOUT(misc, AE_MISC_CONTROL_CS_RELOAD))
 1387                 panic("notyet"); /* XXX getReloadUwords */
 1388 
 1389         /* disable SHARE_CS bit to workaround silicon bug */
 1390         qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc & 0xfffffffb);
 1391 
 1392         MPASS(uaddr + ninst <= USTORE_SIZE);
 1393 
 1394         /* save ustore-addr csr */
 1395         qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr);
 1396 
 1397         uaddr |= USTORE_ADDRESS_ECS;    /* enable ecs bit */
 1398         for (ii = 0; ii < ninst; ii++) {
 1399                 qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr);
 1400 
 1401                 uaddr++;
 1402                 qat_ae_read_4(sc, ae, USTORE_DATA_LOWER, &ulo);
 1403                 qat_ae_read_4(sc, ae, USTORE_DATA_UPPER, &uhi);
 1404                 ucode[ii] = uhi;
 1405                 ucode[ii] = (ucode[ii] << 32) | ulo;
 1406         }
 1407 
 1408         /* restore SHARE_CS bit to workaround silicon bug */
 1409         qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
 1410         qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr);
 1411 
 1412         return 0;
 1413 }
 1414 
 1415 static u_int
 1416 qat_ae_concat_ucode(uint64_t *ucode, u_int ninst, u_int size, u_int addr,
 1417     u_int *value)
 1418 {
 1419         const uint64_t *inst_arr;
 1420         u_int ninst0, curvalue;
 1421         int ii, vali, fixup, usize = 0;
 1422 
 1423         if (size == 0)
 1424                 return 0;
 1425 
 1426         ninst0 = ninst;
 1427         vali = 0;
 1428         curvalue = value[vali++];
 1429 
 1430         switch (size) {
 1431         case 0x1:
 1432                 inst_arr = ae_inst_1b;
 1433                 usize = nitems(ae_inst_1b);
 1434                 break;
 1435         case 0x2:
 1436                 inst_arr = ae_inst_2b;
 1437                 usize = nitems(ae_inst_2b);
 1438                 break;
 1439         case 0x3:
 1440                 inst_arr = ae_inst_3b;
 1441                 usize = nitems(ae_inst_3b);
 1442                 break;
 1443         default:
 1444                 inst_arr = ae_inst_4b;
 1445                 usize = nitems(ae_inst_4b);
 1446                 break;
 1447         }
 1448 
 1449         fixup = ninst;
 1450         for (ii = 0; ii < usize; ii++)
 1451                 ucode[ninst++] = inst_arr[ii];
 1452 
 1453         INSERT_IMMED_GPRA_CONST(ucode[fixup], (addr));
 1454         fixup++;
 1455         INSERT_IMMED_GPRA_CONST(ucode[fixup], 0);
 1456         fixup++;
 1457         INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0));
 1458         fixup++;
 1459         INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16));
 1460         /* XXX fixup++ ? */
 1461 
 1462         if (size <= 0x4)
 1463                 return (ninst - ninst0);
 1464 
 1465         size -= sizeof(u_int);
 1466         while (size >= sizeof(u_int)) {
 1467                 curvalue = value[vali++];
 1468                 fixup = ninst;
 1469                 ucode[ninst++] = ae_inst_4b[0x2];
 1470                 ucode[ninst++] = ae_inst_4b[0x3];
 1471                 ucode[ninst++] = ae_inst_4b[0x8];
 1472                 INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16));
 1473                 fixup++;
 1474                 INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0));
 1475                 /* XXX fixup++ ? */
 1476 
 1477                 addr += sizeof(u_int);
 1478                 size -= sizeof(u_int);
 1479         }
 1480         /* call this function recusive when the left size less than 4 */
 1481         ninst +=
 1482             qat_ae_concat_ucode(ucode, ninst, size, addr, value + vali);
 1483 
 1484         return (ninst - ninst0);
 1485 }
 1486 
 1487 static int
 1488 qat_ae_exec_ucode(struct qat_softc *sc, u_char ae, u_char ctx,
 1489     uint64_t *ucode, u_int ninst, int cond_code_off, u_int max_cycles,
 1490     u_int *endpc)
 1491 {
 1492         int error = 0, share_cs = 0;
 1493         uint64_t savucode[MAX_EXEC_INST];
 1494         uint32_t indr_lm_addr_0, indr_lm_addr_1;
 1495         uint32_t indr_lm_addr_byte_0, indr_lm_addr_byte_1;
 1496         uint32_t indr_future_cnt_sig;
 1497         uint32_t indr_sig, active_sig;
 1498         uint32_t wakeup_ev, savpc, savcc, savctx, ctxarbctl;
 1499         uint32_t misc, nmisc, ctxen;
 1500         u_char nae;
 1501 
 1502         MPASS(ninst <= USTORE_SIZE);
 1503 
 1504         if (qat_ae_is_active(sc, ae))
 1505                 return EBUSY;
 1506 
 1507         /* save current LM addr */
 1508         qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_0_INDIRECT, &indr_lm_addr_0);
 1509         qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_1_INDIRECT, &indr_lm_addr_1);
 1510         qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
 1511             &indr_lm_addr_byte_0);
 1512         qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
 1513             &indr_lm_addr_byte_1);
 1514 
 1515         /* backup shared control store bit, and force AE to
 1516            none-shared mode before executing ucode snippet */
 1517         qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
 1518         if (misc & AE_MISC_CONTROL_SHARE_CS) {
 1519                 share_cs = 1;
 1520                 qat_ae_get_shared_ustore_ae(ae, &nae);
 1521                 if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae))
 1522                         return EBUSY;
 1523         }
 1524         nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS;
 1525         qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc);
 1526 
 1527         /* save current states: */
 1528         if (ninst <= MAX_EXEC_INST) {
 1529                 error = qat_ae_ucode_read(sc, ae, 0, ninst, savucode);
 1530                 if (error) {
 1531                         qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc);
 1532                         return error;
 1533                 }
 1534         }
 1535 
 1536         /* save wakeup-events */
 1537         qat_ae_ctx_indr_read(sc, ae, ctx, CTX_WAKEUP_EVENTS_INDIRECT,
 1538             &wakeup_ev);
 1539         /* save PC */
 1540         qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, &savpc);
 1541         savpc &= UPC_MASK;
 1542 
 1543         /* save ctx enables */
 1544         qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen);
 1545         ctxen &= CTX_ENABLES_IGNORE_W1C_MASK;
 1546         /* save conditional-code */
 1547         qat_ae_read_4(sc, ae, CC_ENABLE, &savcc);
 1548         /* save current context */
 1549         qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx);
 1550         qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl);
 1551 
 1552         /* save indirect csrs */
 1553         qat_ae_ctx_indr_read(sc, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
 1554             &indr_future_cnt_sig);
 1555         qat_ae_ctx_indr_read(sc, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &indr_sig);
 1556         qat_ae_read_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, &active_sig);
 1557 
 1558         /* turn off ucode parity */
 1559         qat_ae_write_4(sc, ae, CTX_ENABLES,
 1560             ctxen & ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE);
 1561 
 1562         /* copy instructions to ustore */
 1563         qat_ae_ucode_write(sc, ae, 0, ninst, ucode);
 1564         /* set PC */
 1565         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, 0);
 1566         /* change the active context */
 1567         qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
 1568             ctx & ACTIVE_CTX_STATUS_ACNO);
 1569 
 1570         if (cond_code_off) {
 1571                 /* disable conditional-code*/
 1572                 qat_ae_write_4(sc, ae, CC_ENABLE, savcc & 0xffffdfff);
 1573         }
 1574 
 1575         /* wakeup-event voluntary */
 1576         qat_ae_ctx_indr_write(sc, ae, 1 << ctx,
 1577             CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY);
 1578 
 1579         /* clean signals */
 1580         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT, 0);
 1581         qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0);
 1582 
 1583         /* enable context */
 1584         qat_ae_enable_ctx(sc, ae, 1 << ctx);
 1585 
 1586         /* wait for it to finish */
 1587         if (qat_ae_wait_num_cycles(sc, ae, max_cycles, 1) != 0)
 1588                 error = ETIMEDOUT;
 1589 
 1590         /* see if we need to get the current PC */
 1591         if (endpc != NULL) {
 1592                 uint32_t ctx_status;
 1593 
 1594                 qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT,
 1595                     &ctx_status);
 1596                 *endpc = ctx_status & UPC_MASK;
 1597         }
 1598 #if 0
 1599         {
 1600                 uint32_t ctx_status;
 1601 
 1602                 qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT,
 1603                     &ctx_status);
 1604                 printf("%s: endpc 0x%08x\n", __func__,
 1605                     ctx_status & UPC_MASK);
 1606         }
 1607 #endif
 1608 
 1609         /* retore to previous states: */
 1610         /* disable context */
 1611         qat_ae_disable_ctx(sc, ae, 1 << ctx);
 1612         if (ninst <= MAX_EXEC_INST) {
 1613                 /* instructions */
 1614                 qat_ae_ucode_write(sc, ae, 0, ninst, savucode);
 1615         }
 1616         /* wakeup-events */
 1617         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_WAKEUP_EVENTS_INDIRECT,
 1618             wakeup_ev);
 1619         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, savpc);
 1620 
 1621         /* only restore shared control store bit,
 1622            other bit might be changed by AE code snippet */
 1623         qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc);
 1624         if (share_cs)
 1625                 nmisc = misc | AE_MISC_CONTROL_SHARE_CS;
 1626         else
 1627                 nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS;
 1628         qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc);
 1629         /* conditional-code */
 1630         qat_ae_write_4(sc, ae, CC_ENABLE, savcc);
 1631         /* change the active context */
 1632         qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS,
 1633             savctx & ACTIVE_CTX_STATUS_ACNO);
 1634         /* restore the nxt ctx to run */
 1635         qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl);
 1636         /* restore current LM addr */
 1637         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_0_INDIRECT,
 1638             indr_lm_addr_0);
 1639         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_1_INDIRECT,
 1640             indr_lm_addr_1);
 1641         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
 1642             indr_lm_addr_byte_0);
 1643         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
 1644             indr_lm_addr_byte_1);
 1645 
 1646         /* restore indirect csrs */
 1647         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
 1648             indr_future_cnt_sig);
 1649         qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT,
 1650             indr_sig);
 1651         qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, active_sig);
 1652 
 1653         /* ctx-enables */
 1654         qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen);
 1655 
 1656         return error;
 1657 }
 1658 
 1659 static int
 1660 qat_ae_exec_ucode_init_lm(struct qat_softc *sc, u_char ae, u_char ctx,
 1661     int *first_exec, uint64_t *ucode, u_int ninst,
 1662     u_int *gpr_a0, u_int *gpr_a1, u_int *gpr_a2, u_int *gpr_b0, u_int *gpr_b1)
 1663 {
 1664 
 1665         if (*first_exec) {
 1666                 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0);
 1667                 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1);
 1668                 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2);
 1669                 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0);
 1670                 qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1);
 1671                 *first_exec = 0;
 1672         }
 1673 
 1674         return qat_ae_exec_ucode(sc, ae, ctx, ucode, ninst, 1, ninst * 5, NULL);
 1675 }
 1676 
 1677 static int
 1678 qat_ae_restore_init_lm_gprs(struct qat_softc *sc, u_char ae, u_char ctx,
 1679     u_int gpr_a0, u_int gpr_a1, u_int gpr_a2, u_int gpr_b0, u_int gpr_b1)
 1680 {
 1681         qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0);
 1682         qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1);
 1683         qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2);
 1684         qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0);
 1685         qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1);
 1686 
 1687         return 0;
 1688 }
 1689 
 1690 static int
 1691 qat_ae_get_inst_num(int lmsize)
 1692 {
 1693         int ninst, left;
 1694 
 1695         if (lmsize == 0)
 1696                 return 0;
 1697 
 1698         left = lmsize % sizeof(u_int);
 1699 
 1700         if (left) {
 1701                 ninst = nitems(ae_inst_1b) +
 1702                     qat_ae_get_inst_num(lmsize - left);
 1703         } else {
 1704                 /* 3 instruction is needed for further code */
 1705                 ninst = (lmsize - sizeof(u_int)) * 3 / 4 + nitems(ae_inst_4b);
 1706         }
 1707 
 1708         return (ninst);
 1709 }
 1710 
 1711 static int
 1712 qat_ae_batch_put_lm(struct qat_softc *sc, u_char ae,
 1713     struct qat_ae_batch_init_list *qabi_list, size_t nqabi)
 1714 {
 1715         struct qat_ae_batch_init *qabi;
 1716         size_t alloc_ninst, ninst;
 1717         uint64_t *ucode;
 1718         u_int gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1;
 1719         int insnsz, error = 0, execed = 0, first_exec = 1;
 1720 
 1721         if (STAILQ_FIRST(qabi_list) == NULL)
 1722                 return 0;
 1723 
 1724         alloc_ninst = min(USTORE_SIZE, nqabi);
 1725         ucode = qat_alloc_mem(sizeof(uint64_t) * alloc_ninst);
 1726 
 1727         ninst = 0;
 1728         STAILQ_FOREACH(qabi, qabi_list, qabi_next) {
 1729                 insnsz = qat_ae_get_inst_num(qabi->qabi_size);
 1730                 if (insnsz + ninst > alloc_ninst) {
 1731                         /* add ctx_arb[kill] */
 1732                         ucode[ninst++] = 0x0E000010000ull;
 1733                         execed = 1;
 1734 
 1735                         error = qat_ae_exec_ucode_init_lm(sc, ae, 0,
 1736                             &first_exec, ucode, ninst,
 1737                             &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1);
 1738                         if (error) {
 1739                                 qat_ae_restore_init_lm_gprs(sc, ae, 0,
 1740                                     gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1);
 1741                                 qat_free_mem(ucode);
 1742                                 return error;
 1743                         }
 1744                         /* run microExec to execute the microcode */
 1745                         ninst = 0;
 1746                 }
 1747                 ninst += qat_ae_concat_ucode(ucode, ninst,
 1748                     qabi->qabi_size, qabi->qabi_addr, qabi->qabi_value);
 1749         }
 1750 
 1751         if (ninst > 0) {
 1752                 ucode[ninst++] = 0x0E000010000ull;
 1753                 execed = 1;
 1754 
 1755                 error = qat_ae_exec_ucode_init_lm(sc, ae, 0,
 1756                     &first_exec, ucode, ninst,
 1757                     &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1);
 1758         }
 1759         if (execed) {
 1760                 qat_ae_restore_init_lm_gprs(sc, ae, 0,
 1761                     gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1);
 1762         }
 1763 
 1764         qat_free_mem(ucode);
 1765 
 1766         return error;
 1767 }
 1768 
 1769 static int
 1770 qat_ae_write_pc(struct qat_softc *sc, u_char ae, u_int ctx_mask, u_int upc)
 1771 {
 1772 
 1773         if (qat_ae_is_active(sc, ae))
 1774                 return EBUSY;
 1775 
 1776         qat_ae_ctx_indr_write(sc, ae, ctx_mask, CTX_STS_INDIRECT,
 1777             UPC_MASK & upc);
 1778         return 0;
 1779 }
 1780 
 1781 static inline u_int
 1782 qat_aefw_csum_calc(u_int reg, int ch)
 1783 {
 1784         int i;
 1785         u_int topbit = CRC_BITMASK(CRC_WIDTH - 1);
 1786         u_int inbyte = (u_int)((reg >> 0x18) ^ ch);
 1787 
 1788         reg ^= inbyte << (CRC_WIDTH - 0x8);
 1789         for (i = 0; i < 0x8; i++) {
 1790                 if (reg & topbit)
 1791                         reg = (reg << 1) ^ CRC_POLY;
 1792                 else
 1793                         reg <<= 1;
 1794         }
 1795 
 1796         return (reg & CRC_WIDTHMASK(CRC_WIDTH));
 1797 }
 1798 
 1799 static u_int
 1800 qat_aefw_csum(char *buf, int size)
 1801 {
 1802         u_int csum = 0;
 1803 
 1804         while (size--) {
 1805                 csum = qat_aefw_csum_calc(csum, *buf++);
 1806         }
 1807 
 1808         return csum;
 1809 }
 1810 
 1811 static const char *
 1812 qat_aefw_uof_string(struct qat_softc *sc, size_t offset)
 1813 {
 1814         if (offset >= sc->sc_aefw_uof.qafu_str_tab_size)
 1815                 return NULL;
 1816         if (sc->sc_aefw_uof.qafu_str_tab == NULL)
 1817                 return NULL;
 1818 
 1819         return (const char *)((uintptr_t)sc->sc_aefw_uof.qafu_str_tab + offset);
 1820 }
 1821 
 1822 static struct uof_chunk_hdr *
 1823 qat_aefw_uof_find_chunk(struct qat_softc *sc,
 1824         const char *id, struct uof_chunk_hdr *cur)
 1825 {
 1826         struct uof_obj_hdr *uoh = sc->sc_aefw_uof.qafu_obj_hdr;
 1827         struct uof_chunk_hdr *uch;
 1828         int i;
 1829 
 1830         uch = (struct uof_chunk_hdr *)(uoh + 1);
 1831         for (i = 0; i < uoh->uoh_num_chunks; i++, uch++) {
 1832                 if (uch->uch_offset + uch->uch_size > sc->sc_aefw_uof.qafu_size)
 1833                         return NULL;
 1834 
 1835                 if (cur < uch && !strncmp(uch->uch_id, id, UOF_OBJ_ID_LEN))
 1836                         return uch;
 1837         }
 1838 
 1839         return NULL;
 1840 }
 1841 
 1842 static int
 1843 qat_aefw_load_mof(struct qat_softc *sc)
 1844 {
 1845         const struct firmware *fw;
 1846 
 1847         fw = firmware_get(sc->sc_hw.qhw_mof_fwname);
 1848         if (fw == NULL) {
 1849                 device_printf(sc->sc_dev, "couldn't load MOF firmware %s\n",
 1850                     sc->sc_hw.qhw_mof_fwname);
 1851                 return ENXIO;
 1852         }
 1853 
 1854         sc->sc_fw_mof = qat_alloc_mem(fw->datasize);
 1855         sc->sc_fw_mof_size = fw->datasize;
 1856         memcpy(sc->sc_fw_mof, fw->data, fw->datasize);
 1857         firmware_put(fw, FIRMWARE_UNLOAD);
 1858         return 0;
 1859 }
 1860 
 1861 static void
 1862 qat_aefw_unload_mof(struct qat_softc *sc)
 1863 {
 1864         if (sc->sc_fw_mof != NULL) {
 1865                 qat_free_mem(sc->sc_fw_mof);
 1866                 sc->sc_fw_mof = NULL;
 1867         }
 1868 }
 1869 
 1870 static int
 1871 qat_aefw_load_mmp(struct qat_softc *sc)
 1872 {
 1873         const struct firmware *fw;
 1874 
 1875         fw = firmware_get(sc->sc_hw.qhw_mmp_fwname);
 1876         if (fw == NULL) {
 1877                 device_printf(sc->sc_dev, "couldn't load MOF firmware %s\n",
 1878                     sc->sc_hw.qhw_mmp_fwname);
 1879                 return ENXIO;
 1880         }
 1881 
 1882         sc->sc_fw_mmp = qat_alloc_mem(fw->datasize);
 1883         sc->sc_fw_mmp_size = fw->datasize;
 1884         memcpy(sc->sc_fw_mmp, fw->data, fw->datasize);
 1885         firmware_put(fw, FIRMWARE_UNLOAD);
 1886         return 0;
 1887 }
 1888 
 1889 static void
 1890 qat_aefw_unload_mmp(struct qat_softc *sc)
 1891 {
 1892         if (sc->sc_fw_mmp != NULL) {
 1893                 qat_free_mem(sc->sc_fw_mmp);
 1894                 sc->sc_fw_mmp = NULL;
 1895         }
 1896 }
 1897 
 1898 static int
 1899 qat_aefw_mof_find_uof0(struct qat_softc *sc,
 1900         struct mof_uof_hdr *muh, struct mof_uof_chunk_hdr *head,
 1901         u_int nchunk, size_t size, const char *id,
 1902         size_t *fwsize, void **fwptr)
 1903 {
 1904         int i;
 1905         char *uof_name;
 1906 
 1907         for (i = 0; i < nchunk; i++) {
 1908                 struct mof_uof_chunk_hdr *much = &head[i];
 1909 
 1910                 if (strncmp(much->much_id, id, MOF_OBJ_ID_LEN))
 1911                         return EINVAL;
 1912 
 1913                 if (much->much_offset + much->much_size > size)
 1914                         return EINVAL;
 1915 
 1916                 if (sc->sc_mof.qmf_sym_size <= much->much_name)
 1917                         return EINVAL;
 1918 
 1919                 uof_name = (char *)((uintptr_t)sc->sc_mof.qmf_sym +
 1920                     much->much_name);
 1921 
 1922                 if (!strcmp(uof_name, sc->sc_fw_uof_name)) {
 1923                         *fwptr = (void *)((uintptr_t)muh +
 1924                             (uintptr_t)much->much_offset);
 1925                         *fwsize = (size_t)much->much_size;
 1926                         return 0;
 1927                 }
 1928         }
 1929 
 1930         return ENOENT;
 1931 }
 1932 
 1933 static int
 1934 qat_aefw_mof_find_uof(struct qat_softc *sc)
 1935 {
 1936         struct mof_uof_hdr *uof_hdr, *suof_hdr;
 1937         u_int nuof_chunks = 0, nsuof_chunks = 0;
 1938         int error;
 1939 
 1940         uof_hdr = sc->sc_mof.qmf_uof_objs;
 1941         suof_hdr = sc->sc_mof.qmf_suof_objs;
 1942 
 1943         if (uof_hdr != NULL) {
 1944                 if (uof_hdr->muh_max_chunks < uof_hdr->muh_num_chunks) {
 1945                         return EINVAL;
 1946                 }
 1947                 nuof_chunks = uof_hdr->muh_num_chunks;
 1948         }
 1949         if (suof_hdr != NULL) {
 1950                 if (suof_hdr->muh_max_chunks < suof_hdr->muh_num_chunks)
 1951                         return EINVAL;
 1952                 nsuof_chunks = suof_hdr->muh_num_chunks;
 1953         }
 1954 
 1955         if (nuof_chunks + nsuof_chunks == 0)
 1956                 return EINVAL;
 1957 
 1958         if (uof_hdr != NULL) {
 1959                 error = qat_aefw_mof_find_uof0(sc, uof_hdr,
 1960                     (struct mof_uof_chunk_hdr *)(uof_hdr + 1), nuof_chunks,
 1961                     sc->sc_mof.qmf_uof_objs_size, UOF_IMAG,
 1962                     &sc->sc_fw_uof_size, &sc->sc_fw_uof);
 1963                 if (error && error != ENOENT)
 1964                         return error;
 1965         }
 1966 
 1967         if (suof_hdr != NULL) {
 1968                 error = qat_aefw_mof_find_uof0(sc, suof_hdr,
 1969                     (struct mof_uof_chunk_hdr *)(suof_hdr + 1), nsuof_chunks,
 1970                     sc->sc_mof.qmf_suof_objs_size, SUOF_IMAG,
 1971                     &sc->sc_fw_suof_size, &sc->sc_fw_suof);
 1972                 if (error && error != ENOENT)
 1973                         return error;
 1974         }
 1975 
 1976         if (sc->sc_fw_uof == NULL && sc->sc_fw_suof == NULL)
 1977                 return ENOENT;
 1978 
 1979         return 0;
 1980 }
 1981 
 1982 static int
 1983 qat_aefw_mof_parse(struct qat_softc *sc)
 1984 {
 1985         const struct mof_file_hdr *mfh;
 1986         const struct mof_file_chunk_hdr *mfch;
 1987         size_t size;
 1988         u_int csum;
 1989         int error, i;
 1990 
 1991         size = sc->sc_fw_mof_size;
 1992 
 1993         if (size < sizeof(struct mof_file_hdr))
 1994                 return EINVAL;
 1995         size -= sizeof(struct mof_file_hdr);
 1996 
 1997         mfh = sc->sc_fw_mof;
 1998 
 1999         if (mfh->mfh_fid != MOF_FID)
 2000                 return EINVAL;
 2001 
 2002         csum = qat_aefw_csum((char *)((uintptr_t)sc->sc_fw_mof +
 2003             offsetof(struct mof_file_hdr, mfh_min_ver)),
 2004             sc->sc_fw_mof_size -
 2005             offsetof(struct mof_file_hdr, mfh_min_ver));
 2006         if (mfh->mfh_csum != csum)
 2007                 return EINVAL;
 2008 
 2009         if (mfh->mfh_min_ver != MOF_MIN_VER ||
 2010             mfh->mfh_maj_ver != MOF_MAJ_VER)
 2011                 return EINVAL;
 2012 
 2013         if (mfh->mfh_max_chunks < mfh->mfh_num_chunks)
 2014                 return EINVAL;
 2015 
 2016         if (size < sizeof(struct mof_file_chunk_hdr) * mfh->mfh_num_chunks)
 2017                 return EINVAL;
 2018         mfch = (const struct mof_file_chunk_hdr *)(mfh + 1);
 2019 
 2020         for (i = 0; i < mfh->mfh_num_chunks; i++, mfch++) {
 2021                 if (mfch->mfch_offset + mfch->mfch_size > sc->sc_fw_mof_size)
 2022                         return EINVAL;
 2023 
 2024                 if (!strncmp(mfch->mfch_id, SYM_OBJS, MOF_OBJ_ID_LEN)) {
 2025                         if (sc->sc_mof.qmf_sym != NULL)
 2026                                 return EINVAL;
 2027 
 2028                         sc->sc_mof.qmf_sym =
 2029                             (void *)((uintptr_t)sc->sc_fw_mof +
 2030                             (uintptr_t)mfch->mfch_offset + sizeof(u_int));
 2031                         sc->sc_mof.qmf_sym_size =
 2032                             *(u_int *)((uintptr_t)sc->sc_fw_mof +
 2033                             (uintptr_t)mfch->mfch_offset);
 2034 
 2035                         if (sc->sc_mof.qmf_sym_size % sizeof(u_int) != 0)
 2036                                 return EINVAL;
 2037                         if (mfch->mfch_size != sc->sc_mof.qmf_sym_size +
 2038                             sizeof(u_int) || mfch->mfch_size == 0)
 2039                                 return EINVAL;
 2040                         if (*(char *)((uintptr_t)sc->sc_mof.qmf_sym +
 2041                             sc->sc_mof.qmf_sym_size - 1) != '\0')
 2042                                 return EINVAL;
 2043 
 2044                 } else if (!strncmp(mfch->mfch_id, UOF_OBJS, MOF_OBJ_ID_LEN)) {
 2045                         if (sc->sc_mof.qmf_uof_objs != NULL)
 2046                                 return EINVAL;
 2047 
 2048                         sc->sc_mof.qmf_uof_objs =
 2049                             (void *)((uintptr_t)sc->sc_fw_mof +
 2050                             (uintptr_t)mfch->mfch_offset);
 2051                         sc->sc_mof.qmf_uof_objs_size = mfch->mfch_size;
 2052 
 2053                 } else if (!strncmp(mfch->mfch_id, SUOF_OBJS, MOF_OBJ_ID_LEN)) {
 2054                         if (sc->sc_mof.qmf_suof_objs != NULL)
 2055                                 return EINVAL;
 2056 
 2057                         sc->sc_mof.qmf_suof_objs =
 2058                             (void *)((uintptr_t)sc->sc_fw_mof +
 2059                             (uintptr_t)mfch->mfch_offset);
 2060                         sc->sc_mof.qmf_suof_objs_size = mfch->mfch_size;
 2061                 }
 2062         }
 2063 
 2064         if (sc->sc_mof.qmf_sym == NULL ||
 2065             (sc->sc_mof.qmf_uof_objs == NULL &&
 2066             sc->sc_mof.qmf_suof_objs == NULL))
 2067                 return EINVAL;
 2068 
 2069         error = qat_aefw_mof_find_uof(sc);
 2070         if (error)
 2071                 return error;
 2072         return 0;
 2073 }
 2074 
 2075 static int
 2076 qat_aefw_uof_parse_image(struct qat_softc *sc,
 2077         struct qat_uof_image *qui, struct uof_chunk_hdr *uch)
 2078 {
 2079         struct uof_image *image;
 2080         struct uof_code_page *page;
 2081         uintptr_t base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr;
 2082         size_t lim = uch->uch_offset + uch->uch_size, size;
 2083         int i, p;
 2084 
 2085         size = uch->uch_size;
 2086         if (size < sizeof(struct uof_image))
 2087                 return EINVAL;
 2088         size -= sizeof(struct uof_image);
 2089 
 2090         qui->qui_image = image =
 2091             (struct uof_image *)(base + uch->uch_offset);
 2092 
 2093 #define ASSIGN_OBJ_TAB(np, typep, type, base, off, lim)                 \
 2094 do {                                                                    \
 2095         u_int nent;                                                     \
 2096         nent = ((struct uof_obj_table *)((base) + (off)))->uot_nentries;\
 2097         if ((lim) < off + sizeof(struct uof_obj_table) +                \
 2098             sizeof(type) * nent)                                        \
 2099                 return EINVAL;                                          \
 2100         *(np) = nent;                                                   \
 2101         if (nent > 0)                                                   \
 2102                 *(typep) = (type)((struct uof_obj_table *)              \
 2103                     ((base) + (off)) + 1);                              \
 2104         else                                                            \
 2105                 *(typep) = NULL;                                        \
 2106 } while (0)
 2107 
 2108         ASSIGN_OBJ_TAB(&qui->qui_num_ae_reg, &qui->qui_ae_reg,
 2109             struct uof_ae_reg *, base, image->ui_reg_tab, lim);
 2110         ASSIGN_OBJ_TAB(&qui->qui_num_init_reg_sym, &qui->qui_init_reg_sym,
 2111             struct uof_init_reg_sym *, base, image->ui_init_reg_sym_tab, lim);
 2112         ASSIGN_OBJ_TAB(&qui->qui_num_sbreak, &qui->qui_sbreak,
 2113             struct qui_sbreak *, base, image->ui_sbreak_tab, lim);
 2114 
 2115         if (size < sizeof(struct uof_code_page) * image->ui_num_pages)
 2116                 return EINVAL;
 2117         if (nitems(qui->qui_pages) < image->ui_num_pages)
 2118                 return EINVAL;
 2119 
 2120         page = (struct uof_code_page *)(image + 1);
 2121 
 2122         for (p = 0; p < image->ui_num_pages; p++, page++) {
 2123                 struct qat_uof_page *qup = &qui->qui_pages[p];
 2124                 struct uof_code_area *uca;
 2125 
 2126                 qup->qup_page_num = page->ucp_page_num;
 2127                 qup->qup_def_page = page->ucp_def_page;
 2128                 qup->qup_page_region = page->ucp_page_region;
 2129                 qup->qup_beg_vaddr = page->ucp_beg_vaddr;
 2130                 qup->qup_beg_paddr = page->ucp_beg_paddr;
 2131 
 2132                 ASSIGN_OBJ_TAB(&qup->qup_num_uc_var, &qup->qup_uc_var,
 2133                     struct uof_uword_fixup *, base,
 2134                     page->ucp_uc_var_tab, lim);
 2135                 ASSIGN_OBJ_TAB(&qup->qup_num_imp_var, &qup->qup_imp_var,
 2136                     struct uof_import_var *, base,
 2137                     page->ucp_imp_var_tab, lim);
 2138                 ASSIGN_OBJ_TAB(&qup->qup_num_imp_expr, &qup->qup_imp_expr,
 2139                     struct uof_uword_fixup *, base,
 2140                     page->ucp_imp_expr_tab, lim);
 2141                 ASSIGN_OBJ_TAB(&qup->qup_num_neigh_reg, &qup->qup_neigh_reg,
 2142                     struct uof_uword_fixup *, base,
 2143                     page->ucp_neigh_reg_tab, lim);
 2144 
 2145                 if (lim < page->ucp_code_area + sizeof(struct uof_code_area))
 2146                         return EINVAL;
 2147 
 2148                 uca = (struct uof_code_area *)(base + page->ucp_code_area);
 2149                 qup->qup_num_micro_words = uca->uca_num_micro_words;
 2150 
 2151                 ASSIGN_OBJ_TAB(&qup->qup_num_uw_blocks, &qup->qup_uw_blocks,
 2152                     struct qat_uof_uword_block *, base,
 2153                     uca->uca_uword_block_tab, lim);
 2154 
 2155                 for (i = 0; i < qup->qup_num_uw_blocks; i++) {
 2156                         u_int uwordoff = ((struct uof_uword_block *)(
 2157                             &qup->qup_uw_blocks[i]))->uub_uword_offset;
 2158 
 2159                         if (lim < uwordoff)
 2160                                 return EINVAL;
 2161 
 2162                         qup->qup_uw_blocks[i].quub_micro_words =
 2163                             (base + uwordoff);
 2164                 }
 2165         }
 2166 
 2167 #undef ASSIGN_OBJ_TAB
 2168 
 2169         return 0;
 2170 }
 2171 
 2172 static int
 2173 qat_aefw_uof_parse_images(struct qat_softc *sc)
 2174 {
 2175         struct uof_chunk_hdr *uch = NULL;
 2176         int i, error;
 2177 
 2178         for (i = 0; i < MAX_NUM_AE * MAX_AE_CTX; i++) {
 2179                 uch = qat_aefw_uof_find_chunk(sc, UOF_IMAG, uch);
 2180                 if (uch == NULL)
 2181                         break;
 2182 
 2183                 if (i >= nitems(sc->sc_aefw_uof.qafu_imgs))
 2184                         return ENOENT;
 2185 
 2186                 error = qat_aefw_uof_parse_image(sc, &sc->sc_aefw_uof.qafu_imgs[i], uch);
 2187                 if (error)
 2188                         return error;
 2189 
 2190                 sc->sc_aefw_uof.qafu_num_imgs++;
 2191         }
 2192 
 2193         return 0;
 2194 }
 2195 
 2196 static int
 2197 qat_aefw_uof_parse(struct qat_softc *sc)
 2198 {
 2199         struct uof_file_hdr *ufh;
 2200         struct uof_file_chunk_hdr *ufch;
 2201         struct uof_obj_hdr *uoh;
 2202         struct uof_chunk_hdr *uch;
 2203         void *uof = NULL;
 2204         size_t size, uof_size, hdr_size;
 2205         uintptr_t base;
 2206         u_int csum;
 2207         int i;
 2208 
 2209         size = sc->sc_fw_uof_size;
 2210         if (size < MIN_UOF_SIZE)
 2211                 return EINVAL;
 2212         size -= sizeof(struct uof_file_hdr);
 2213 
 2214         ufh = sc->sc_fw_uof;
 2215 
 2216         if (ufh->ufh_id != UOF_FID)
 2217                 return EINVAL;
 2218         if (ufh->ufh_min_ver != UOF_MIN_VER || ufh->ufh_maj_ver != UOF_MAJ_VER)
 2219                 return EINVAL;
 2220 
 2221         if (ufh->ufh_max_chunks < ufh->ufh_num_chunks)
 2222                 return EINVAL;
 2223         if (size < sizeof(struct uof_file_chunk_hdr) * ufh->ufh_num_chunks)
 2224                 return EINVAL;
 2225         ufch = (struct uof_file_chunk_hdr *)(ufh + 1);
 2226 
 2227         uof_size = 0;
 2228         for (i = 0; i < ufh->ufh_num_chunks; i++, ufch++) {
 2229                 if (ufch->ufch_offset + ufch->ufch_size > sc->sc_fw_uof_size)
 2230                         return EINVAL;
 2231 
 2232                 if (!strncmp(ufch->ufch_id, UOF_OBJS, UOF_OBJ_ID_LEN)) {
 2233                         if (uof != NULL)
 2234                                 return EINVAL;
 2235 
 2236                         uof =
 2237                             (void *)((uintptr_t)sc->sc_fw_uof +
 2238                             ufch->ufch_offset);
 2239                         uof_size = ufch->ufch_size;
 2240 
 2241                         csum = qat_aefw_csum(uof, uof_size);
 2242                         if (csum != ufch->ufch_csum)
 2243                                 return EINVAL;
 2244                 }
 2245         }
 2246 
 2247         if (uof == NULL)
 2248                 return ENOENT;
 2249 
 2250         size = uof_size;
 2251         if (size < sizeof(struct uof_obj_hdr))
 2252                 return EINVAL;
 2253         size -= sizeof(struct uof_obj_hdr);
 2254 
 2255         uoh = uof;
 2256 
 2257         if (size < sizeof(struct uof_chunk_hdr) * uoh->uoh_num_chunks)
 2258                 return EINVAL;
 2259 
 2260         /* Check if the UOF objects are compatible with the chip */
 2261         if ((uoh->uoh_cpu_type & sc->sc_hw.qhw_prod_type) == 0)
 2262                 return ENOTSUP;
 2263 
 2264         if (uoh->uoh_min_cpu_ver > sc->sc_rev ||
 2265             uoh->uoh_max_cpu_ver < sc->sc_rev)
 2266                 return ENOTSUP;
 2267 
 2268         sc->sc_aefw_uof.qafu_size = uof_size;
 2269         sc->sc_aefw_uof.qafu_obj_hdr = uoh;
 2270 
 2271         base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr;
 2272 
 2273         /* map uof string-table */
 2274         uch = qat_aefw_uof_find_chunk(sc, UOF_STRT, NULL);
 2275         if (uch != NULL) {
 2276                 hdr_size = offsetof(struct uof_str_tab, ust_strings);
 2277                 sc->sc_aefw_uof.qafu_str_tab =
 2278                     (void *)(base + uch->uch_offset + hdr_size);
 2279                 sc->sc_aefw_uof.qafu_str_tab_size = uch->uch_size - hdr_size;
 2280         }
 2281 
 2282         /* get ustore mem inits table -- should be only one */
 2283         uch = qat_aefw_uof_find_chunk(sc, UOF_IMEM, NULL);
 2284         if (uch != NULL) {
 2285                 if (uch->uch_size < sizeof(struct uof_obj_table))
 2286                         return EINVAL;
 2287                 sc->sc_aefw_uof.qafu_num_init_mem = ((struct uof_obj_table *)(base +
 2288                     uch->uch_offset))->uot_nentries;
 2289                 if (sc->sc_aefw_uof.qafu_num_init_mem) {
 2290                         sc->sc_aefw_uof.qafu_init_mem =
 2291                             (struct uof_init_mem *)(base + uch->uch_offset +
 2292                             sizeof(struct uof_obj_table));
 2293                         sc->sc_aefw_uof.qafu_init_mem_size =
 2294                             uch->uch_size - sizeof(struct uof_obj_table);
 2295                 }
 2296         }
 2297 
 2298         uch = qat_aefw_uof_find_chunk(sc, UOF_MSEG, NULL);
 2299         if (uch != NULL) {
 2300                 if (uch->uch_size < sizeof(struct uof_obj_table) +
 2301                     sizeof(struct uof_var_mem_seg))
 2302                         return EINVAL;
 2303                 sc->sc_aefw_uof.qafu_var_mem_seg =
 2304                     (struct uof_var_mem_seg *)(base + uch->uch_offset +
 2305                     sizeof(struct uof_obj_table));
 2306         }
 2307 
 2308         return qat_aefw_uof_parse_images(sc);
 2309 }
 2310 
 2311 static int
 2312 qat_aefw_suof_parse_image(struct qat_softc *sc, struct qat_suof_image *qsi,
 2313     struct suof_chunk_hdr *sch)
 2314 {
 2315         struct qat_aefw_suof *qafs = &sc->sc_aefw_suof;
 2316         struct simg_ae_mode *ae_mode;
 2317         u_int maj_ver;
 2318 
 2319         qsi->qsi_simg_buf = qafs->qafs_suof_buf + sch->sch_offset +
 2320             sizeof(struct suof_obj_hdr);
 2321         qsi->qsi_simg_len =
 2322             ((struct suof_obj_hdr *)
 2323             (qafs->qafs_suof_buf + sch->sch_offset))->soh_img_length;
 2324 
 2325         qsi->qsi_css_header = qsi->qsi_simg_buf;
 2326         qsi->qsi_css_key = qsi->qsi_css_header + sizeof(struct css_hdr);
 2327         qsi->qsi_css_signature = qsi->qsi_css_key +
 2328             CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN;
 2329         qsi->qsi_css_simg = qsi->qsi_css_signature + CSS_SIGNATURE_LEN;
 2330 
 2331         ae_mode = (struct simg_ae_mode *)qsi->qsi_css_simg;
 2332         qsi->qsi_ae_mask = ae_mode->sam_ae_mask;
 2333         qsi->qsi_simg_name = (u_long)&ae_mode->sam_simg_name;
 2334         qsi->qsi_appmeta_data = (u_long)&ae_mode->sam_appmeta_data;
 2335         qsi->qsi_fw_type = ae_mode->sam_fw_type;
 2336 
 2337         if (ae_mode->sam_dev_type != sc->sc_hw.qhw_prod_type)
 2338                 return EINVAL;
 2339 
 2340         maj_ver = (QAT_PID_MAJOR_REV | (sc->sc_rev & QAT_PID_MINOR_REV)) & 0xff;
 2341         if ((maj_ver > ae_mode->sam_devmax_ver) ||
 2342             (maj_ver < ae_mode->sam_devmin_ver)) {
 2343                 return EINVAL;
 2344         }
 2345 
 2346         return 0;
 2347 }
 2348 
 2349 static int
 2350 qat_aefw_suof_parse(struct qat_softc *sc)
 2351 {
 2352         struct suof_file_hdr *sfh;
 2353         struct suof_chunk_hdr *sch;
 2354         struct qat_aefw_suof *qafs = &sc->sc_aefw_suof;
 2355         struct qat_suof_image *qsi;
 2356         size_t size;
 2357         u_int csum;
 2358         int ae0_img = MAX_AE;
 2359         int i, error;
 2360 
 2361         size = sc->sc_fw_suof_size;
 2362         if (size < sizeof(struct suof_file_hdr))
 2363                 return EINVAL;
 2364 
 2365         sfh = sc->sc_fw_suof;
 2366 
 2367         if (sfh->sfh_file_id != SUOF_FID)
 2368                 return EINVAL;
 2369         if (sfh->sfh_fw_type != 0)
 2370                 return EINVAL;
 2371         if (sfh->sfh_num_chunks <= 1)
 2372                 return EINVAL;
 2373         if (sfh->sfh_min_ver != SUOF_MIN_VER ||
 2374             sfh->sfh_maj_ver != SUOF_MAJ_VER)
 2375                 return EINVAL;
 2376 
 2377         csum = qat_aefw_csum((char *)&sfh->sfh_min_ver,
 2378             size - offsetof(struct suof_file_hdr, sfh_min_ver));
 2379         if (csum != sfh->sfh_check_sum)
 2380                 return EINVAL;
 2381 
 2382         size -= sizeof(struct suof_file_hdr);
 2383 
 2384         qafs->qafs_file_id = SUOF_FID;
 2385         qafs->qafs_suof_buf = sc->sc_fw_suof;
 2386         qafs->qafs_suof_size = sc->sc_fw_suof_size;
 2387         qafs->qafs_check_sum = sfh->sfh_check_sum;
 2388         qafs->qafs_min_ver = sfh->sfh_min_ver;
 2389         qafs->qafs_maj_ver = sfh->sfh_maj_ver;
 2390         qafs->qafs_fw_type = sfh->sfh_fw_type;
 2391 
 2392         if (size < sizeof(struct suof_chunk_hdr))
 2393                 return EINVAL;
 2394         sch = (struct suof_chunk_hdr *)(sfh + 1);
 2395         size -= sizeof(struct suof_chunk_hdr);
 2396 
 2397         if (size < sizeof(struct suof_str_tab))
 2398                 return EINVAL;
 2399         size -= offsetof(struct suof_str_tab, sst_strings);
 2400 
 2401         qafs->qafs_sym_size = ((struct suof_str_tab *)
 2402             (qafs->qafs_suof_buf + sch->sch_offset))->sst_tab_length;
 2403         if (size < qafs->qafs_sym_size)
 2404                 return EINVAL;
 2405         qafs->qafs_sym_str = qafs->qafs_suof_buf + sch->sch_offset +
 2406             offsetof(struct suof_str_tab, sst_strings);
 2407 
 2408         qafs->qafs_num_simgs = sfh->sfh_num_chunks - 1;
 2409         if (qafs->qafs_num_simgs == 0)
 2410                 return EINVAL;
 2411 
 2412         qsi = qat_alloc_mem(
 2413             sizeof(struct qat_suof_image) * qafs->qafs_num_simgs);
 2414         qafs->qafs_simg = qsi;
 2415 
 2416         for (i = 0; i < qafs->qafs_num_simgs; i++) {
 2417                 error = qat_aefw_suof_parse_image(sc, &qsi[i], &sch[i + 1]);
 2418                 if (error)
 2419                         return error;
 2420                 if ((qsi[i].qsi_ae_mask & 0x1) != 0)
 2421                         ae0_img = i;
 2422         }
 2423 
 2424         if (ae0_img != qafs->qafs_num_simgs - 1) {
 2425                 struct qat_suof_image last_qsi;
 2426 
 2427                 memcpy(&last_qsi, &qsi[qafs->qafs_num_simgs - 1],
 2428                     sizeof(struct qat_suof_image));
 2429                 memcpy(&qsi[qafs->qafs_num_simgs - 1], &qsi[ae0_img],
 2430                     sizeof(struct qat_suof_image));
 2431                 memcpy(&qsi[ae0_img], &last_qsi,
 2432                     sizeof(struct qat_suof_image));
 2433         }
 2434 
 2435         return 0;
 2436 }
 2437 
 2438 static int
 2439 qat_aefw_alloc_auth_dmamem(struct qat_softc *sc, char *image, size_t size,
 2440     struct qat_dmamem *dma)
 2441 {
 2442         struct css_hdr *css = (struct css_hdr *)image;
 2443         struct auth_chunk *auth_chunk;
 2444         struct fw_auth_desc *auth_desc;
 2445         size_t mapsize, simg_offset = sizeof(struct auth_chunk);
 2446         bus_size_t bus_addr;
 2447         uintptr_t virt_addr;
 2448         int error;
 2449 
 2450         if (size > AE_IMG_OFFSET + CSS_MAX_IMAGE_LEN)
 2451                 return EINVAL;
 2452 
 2453         mapsize = (css->css_fw_type == CSS_AE_FIRMWARE) ?
 2454             CSS_AE_SIMG_LEN + simg_offset :
 2455             size + CSS_FWSK_PAD_LEN + simg_offset;
 2456         error = qat_alloc_dmamem(sc, dma, 1, mapsize, PAGE_SIZE);
 2457         if (error)
 2458                 return error;
 2459 
 2460         memset(dma->qdm_dma_vaddr, 0, mapsize);
 2461 
 2462         auth_chunk = dma->qdm_dma_vaddr;
 2463         auth_chunk->ac_chunk_size = mapsize;
 2464         auth_chunk->ac_chunk_bus_addr = dma->qdm_dma_seg.ds_addr;
 2465 
 2466         virt_addr = (uintptr_t)dma->qdm_dma_vaddr;
 2467         virt_addr += simg_offset;
 2468         bus_addr = auth_chunk->ac_chunk_bus_addr;
 2469         bus_addr += simg_offset;
 2470 
 2471         auth_desc = &auth_chunk->ac_fw_auth_desc;
 2472         auth_desc->fad_css_hdr_high = (uint64_t)bus_addr >> 32;
 2473         auth_desc->fad_css_hdr_low = bus_addr;
 2474 
 2475         memcpy((void *)virt_addr, image, sizeof(struct css_hdr));
 2476         /* pub key */
 2477         virt_addr += sizeof(struct css_hdr);
 2478         bus_addr += sizeof(struct css_hdr);
 2479         image += sizeof(struct css_hdr);
 2480 
 2481         auth_desc->fad_fwsk_pub_high = (uint64_t)bus_addr >> 32;
 2482         auth_desc->fad_fwsk_pub_low = bus_addr;
 2483 
 2484         memcpy((void *)virt_addr, image, CSS_FWSK_MODULUS_LEN);
 2485         memset((void *)(virt_addr + CSS_FWSK_MODULUS_LEN), 0, CSS_FWSK_PAD_LEN);
 2486         memcpy((void *)(virt_addr + CSS_FWSK_MODULUS_LEN + CSS_FWSK_PAD_LEN),
 2487             image + CSS_FWSK_MODULUS_LEN, sizeof(uint32_t));
 2488 
 2489         virt_addr += CSS_FWSK_PUB_LEN;
 2490         bus_addr += CSS_FWSK_PUB_LEN;
 2491         image += CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN;
 2492 
 2493         auth_desc->fad_signature_high = (uint64_t)bus_addr >> 32;
 2494         auth_desc->fad_signature_low = bus_addr;
 2495 
 2496         memcpy((void *)virt_addr, image, CSS_SIGNATURE_LEN);
 2497 
 2498         virt_addr += CSS_SIGNATURE_LEN;
 2499         bus_addr += CSS_SIGNATURE_LEN;
 2500         image += CSS_SIGNATURE_LEN;
 2501 
 2502         auth_desc->fad_img_high = (uint64_t)bus_addr >> 32;
 2503         auth_desc->fad_img_low = bus_addr;
 2504         auth_desc->fad_img_len = size - AE_IMG_OFFSET;
 2505 
 2506         memcpy((void *)virt_addr, image, auth_desc->fad_img_len);
 2507 
 2508         if (css->css_fw_type == CSS_AE_FIRMWARE) {
 2509                 auth_desc->fad_img_ae_mode_data_high = auth_desc->fad_img_high;
 2510                 auth_desc->fad_img_ae_mode_data_low = auth_desc->fad_img_low;
 2511 
 2512                 bus_addr += sizeof(struct simg_ae_mode);
 2513 
 2514                 auth_desc->fad_img_ae_init_data_high = (uint64_t)bus_addr >> 32;
 2515                 auth_desc->fad_img_ae_init_data_low = bus_addr;
 2516 
 2517                 bus_addr += SIMG_AE_INIT_SEQ_LEN;
 2518 
 2519                 auth_desc->fad_img_ae_insts_high = (uint64_t)bus_addr >> 32;
 2520                 auth_desc->fad_img_ae_insts_low = bus_addr;
 2521         } else {
 2522                 auth_desc->fad_img_ae_insts_high = auth_desc->fad_img_high;
 2523                 auth_desc->fad_img_ae_insts_low = auth_desc->fad_img_low;
 2524         }
 2525 
 2526         bus_dmamap_sync(dma->qdm_dma_tag, dma->qdm_dma_map,
 2527             BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 2528 
 2529         return 0;
 2530 }
 2531 
 2532 static int
 2533 qat_aefw_auth(struct qat_softc *sc, struct qat_dmamem *dma)
 2534 {
 2535         bus_addr_t addr;
 2536         uint32_t fcu, sts;
 2537         int retry = 0;
 2538 
 2539         addr = dma->qdm_dma_seg.ds_addr;
 2540         qat_cap_global_write_4(sc, FCU_DRAM_ADDR_HI, (uint64_t)addr >> 32);
 2541         qat_cap_global_write_4(sc, FCU_DRAM_ADDR_LO, addr);
 2542         qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_AUTH);
 2543 
 2544         do {
 2545                 DELAY(FW_AUTH_WAIT_PERIOD * 1000);
 2546                 fcu = qat_cap_global_read_4(sc, FCU_STATUS);
 2547                 sts = __SHIFTOUT(fcu, FCU_STATUS_STS);
 2548                 if (sts == FCU_STATUS_STS_VERI_FAIL)
 2549                         goto fail;
 2550                 if (fcu & FCU_STATUS_AUTHFWLD &&
 2551                     sts == FCU_STATUS_STS_VERI_DONE) {
 2552                         return 0;
 2553                 }
 2554         } while (retry++ < FW_AUTH_MAX_RETRY);
 2555 
 2556 fail:
 2557         device_printf(sc->sc_dev,
 2558            "firmware authentication error: status 0x%08x retry %d\n",
 2559            fcu, retry);
 2560         return EINVAL;
 2561 }
 2562 
 2563 static int
 2564 qat_aefw_suof_load(struct qat_softc *sc, struct qat_dmamem *dma)
 2565 {
 2566         struct simg_ae_mode *ae_mode;
 2567         uint32_t fcu, sts, loaded;
 2568         u_int mask;
 2569         u_char ae;
 2570         int retry = 0;
 2571 
 2572         ae_mode = (struct simg_ae_mode *)((uintptr_t)dma->qdm_dma_vaddr +
 2573             sizeof(struct auth_chunk) + sizeof(struct css_hdr) +
 2574             CSS_FWSK_PUB_LEN + CSS_SIGNATURE_LEN);
 2575 
 2576         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
 2577                 if (!(mask & 1))
 2578                         continue;
 2579                 if (!((ae_mode->sam_ae_mask >> ae) & 0x1))
 2580                         continue;
 2581                 if (qat_ae_is_active(sc, ae)) {
 2582                         device_printf(sc->sc_dev, "AE %d is active\n", ae);
 2583                         return EINVAL;
 2584                 }
 2585                 qat_cap_global_write_4(sc, FCU_CTRL,
 2586                     FCU_CTRL_CMD_LOAD | __SHIFTIN(ae, FCU_CTRL_AE));
 2587                 do {
 2588                         DELAY(FW_AUTH_WAIT_PERIOD * 1000);
 2589                         fcu = qat_cap_global_read_4(sc, FCU_STATUS);
 2590                         sts = __SHIFTOUT(fcu, FCU_STATUS_STS);
 2591                         loaded = __SHIFTOUT(fcu, FCU_STATUS_LOADED_AE);
 2592                         if (sts == FCU_STATUS_STS_LOAD_DONE &&
 2593                             (loaded & (1 << ae))) {
 2594                                 break;
 2595                         }
 2596                 } while (retry++ < FW_AUTH_MAX_RETRY);
 2597 
 2598                 if (retry > FW_AUTH_MAX_RETRY) {
 2599                         device_printf(sc->sc_dev,
 2600                             "firmware load timeout: status %08x\n", fcu);
 2601                         return EINVAL;
 2602                 }
 2603         }
 2604 
 2605         return 0;
 2606 }
 2607 
 2608 static int
 2609 qat_aefw_suof_write(struct qat_softc *sc)
 2610 {
 2611         struct qat_suof_image *qsi;
 2612         int i, error = 0;
 2613 
 2614         for (i = 0; i < sc->sc_aefw_suof.qafs_num_simgs; i++) {
 2615                 qsi = &sc->sc_aefw_suof.qafs_simg[i];
 2616                 error = qat_aefw_alloc_auth_dmamem(sc, qsi->qsi_simg_buf,
 2617                     qsi->qsi_simg_len, &qsi->qsi_dma);
 2618                 if (error)
 2619                         return error;
 2620                 error = qat_aefw_auth(sc, &qsi->qsi_dma);
 2621                 if (error) {
 2622                         qat_free_dmamem(sc, &qsi->qsi_dma);
 2623                         return error;
 2624                 }
 2625                 error = qat_aefw_suof_load(sc, &qsi->qsi_dma);
 2626                 if (error) {
 2627                         qat_free_dmamem(sc, &qsi->qsi_dma);
 2628                         return error;
 2629                 }
 2630                 qat_free_dmamem(sc, &qsi->qsi_dma);
 2631         }
 2632         qat_free_mem(sc->sc_aefw_suof.qafs_simg);
 2633 
 2634         return 0;
 2635 }
 2636 
 2637 static int
 2638 qat_aefw_uof_assign_image(struct qat_softc *sc, struct qat_ae *qae,
 2639         struct qat_uof_image *qui)
 2640 {
 2641         struct qat_ae_slice *slice;
 2642         int i, npages, nregions;
 2643 
 2644         if (qae->qae_num_slices >= nitems(qae->qae_slices))
 2645                 return ENOENT;
 2646 
 2647         if (qui->qui_image->ui_ae_mode &
 2648             (AE_MODE_RELOAD_CTX_SHARED | AE_MODE_SHARED_USTORE)) {
 2649                 /* XXX */
 2650                 device_printf(sc->sc_dev,
 2651                     "shared ae mode is not supported yet\n");
 2652                 return ENOTSUP;
 2653         }
 2654 
 2655         qae->qae_shareable_ustore = 0; /* XXX */
 2656         qae->qae_effect_ustore_size = USTORE_SIZE;
 2657 
 2658         slice = &qae->qae_slices[qae->qae_num_slices];
 2659 
 2660         slice->qas_image = qui;
 2661         slice->qas_assigned_ctx_mask = qui->qui_image->ui_ctx_assigned;
 2662 
 2663         nregions = qui->qui_image->ui_num_page_regions;
 2664         npages = qui->qui_image->ui_num_pages;
 2665 
 2666         if (nregions > nitems(slice->qas_regions))
 2667                 return ENOENT;
 2668         if (npages > nitems(slice->qas_pages))
 2669                 return ENOENT;
 2670 
 2671         for (i = 0; i < nregions; i++) {
 2672                 STAILQ_INIT(&slice->qas_regions[i].qar_waiting_pages);
 2673         }
 2674         for (i = 0; i < npages; i++) {
 2675                 struct qat_ae_page *page = &slice->qas_pages[i];
 2676                 int region;
 2677 
 2678                 page->qap_page = &qui->qui_pages[i];
 2679                 region = page->qap_page->qup_page_region;
 2680                 if (region >= nregions)
 2681                         return EINVAL;
 2682 
 2683                 page->qap_region = &slice->qas_regions[region];
 2684         }
 2685 
 2686         qae->qae_num_slices++;
 2687 
 2688         return 0;
 2689 }
 2690 
 2691 static int
 2692 qat_aefw_uof_init_ae(struct qat_softc *sc, u_char ae)
 2693 {
 2694         struct uof_image *image;
 2695         struct qat_ae *qae = &(QAT_AE(sc, ae));
 2696         int s;
 2697         u_char nn_mode;
 2698 
 2699         for (s = 0; s < qae->qae_num_slices; s++) {
 2700                 if (qae->qae_slices[s].qas_image == NULL)
 2701                         continue;
 2702 
 2703                 image = qae->qae_slices[s].qas_image->qui_image;
 2704                 qat_ae_write_ctx_mode(sc, ae,
 2705                     __SHIFTOUT(image->ui_ae_mode, AE_MODE_CTX_MODE));
 2706 
 2707                 nn_mode = __SHIFTOUT(image->ui_ae_mode, AE_MODE_NN_MODE);
 2708                 if (nn_mode != AE_MODE_NN_MODE_DONTCARE)
 2709                         qat_ae_write_nn_mode(sc, ae, nn_mode);
 2710 
 2711                 qat_ae_write_lm_mode(sc, ae, AEREG_LMEM0,
 2712                     __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM0));
 2713                 qat_ae_write_lm_mode(sc, ae, AEREG_LMEM1,
 2714                     __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM1));
 2715 
 2716                 qat_ae_write_shared_cs_mode(sc, ae,
 2717                     __SHIFTOUT(image->ui_ae_mode, AE_MODE_SHARED_USTORE));
 2718                 qat_ae_set_reload_ustore(sc, ae, image->ui_reloadable_size,
 2719                     __SHIFTOUT(image->ui_ae_mode, AE_MODE_RELOAD_CTX_SHARED),
 2720                     qae->qae_reloc_ustore_dram);
 2721         }
 2722 
 2723         return 0;
 2724 }
 2725 
 2726 static int
 2727 qat_aefw_uof_init(struct qat_softc *sc)
 2728 {
 2729         int ae, i, error;
 2730         uint32_t mask;
 2731 
 2732         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
 2733                 struct qat_ae *qae;
 2734 
 2735                 if (!(mask & 1))
 2736                         continue;
 2737 
 2738                 qae = &(QAT_AE(sc, ae));
 2739 
 2740                 for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
 2741                         if ((sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned &
 2742                             (1 << ae)) == 0)
 2743                                 continue;
 2744 
 2745                         error = qat_aefw_uof_assign_image(sc, qae,
 2746                             &sc->sc_aefw_uof.qafu_imgs[i]);
 2747                         if (error)
 2748                                 return error;
 2749                 }
 2750 
 2751                 /* XXX UcLo_initNumUwordUsed */
 2752 
 2753                 qae->qae_reloc_ustore_dram = UINT_MAX; /* XXX */
 2754 
 2755                 error = qat_aefw_uof_init_ae(sc, ae);
 2756                 if (error)
 2757                         return error;
 2758         }
 2759 
 2760         return 0;
 2761 }
 2762 
 2763 int
 2764 qat_aefw_load(struct qat_softc *sc)
 2765 {
 2766         int error;
 2767 
 2768         error = qat_aefw_load_mof(sc);
 2769         if (error)
 2770                 return error;
 2771 
 2772         error = qat_aefw_load_mmp(sc);
 2773         if (error)
 2774                 return error;
 2775 
 2776         error = qat_aefw_mof_parse(sc);
 2777         if (error) {
 2778                 device_printf(sc->sc_dev, "couldn't parse mof: %d\n", error);
 2779                 return error;
 2780         }
 2781 
 2782         if (sc->sc_hw.qhw_fw_auth) {
 2783                 error = qat_aefw_suof_parse(sc);
 2784                 if (error) {
 2785                         device_printf(sc->sc_dev, "couldn't parse suof: %d\n",
 2786                             error);
 2787                         return error;
 2788                 }
 2789 
 2790                 error = qat_aefw_suof_write(sc);
 2791                 if (error) {
 2792                         device_printf(sc->sc_dev,
 2793                             "could not write firmware: %d\n", error);
 2794                         return error;
 2795                 }
 2796 
 2797         } else {
 2798                 error = qat_aefw_uof_parse(sc);
 2799                 if (error) {
 2800                         device_printf(sc->sc_dev, "couldn't parse uof: %d\n",
 2801                             error);
 2802                         return error;
 2803                 }
 2804 
 2805                 error = qat_aefw_uof_init(sc);
 2806                 if (error) {
 2807                         device_printf(sc->sc_dev,
 2808                             "couldn't init for aefw: %d\n", error);
 2809                         return error;
 2810                 }
 2811 
 2812                 error = qat_aefw_uof_write(sc);
 2813                 if (error) {
 2814                         device_printf(sc->sc_dev,
 2815                             "Could not write firmware: %d\n", error);
 2816                         return error;
 2817                 }
 2818         }
 2819 
 2820         return 0;
 2821 }
 2822 
 2823 void
 2824 qat_aefw_unload(struct qat_softc *sc)
 2825 {
 2826         qat_aefw_unload_mmp(sc);
 2827         qat_aefw_unload_mof(sc);
 2828 }
 2829 
 2830 int
 2831 qat_aefw_start(struct qat_softc *sc, u_char ae, u_int ctx_mask)
 2832 {
 2833         uint32_t fcu;
 2834         int retry = 0;
 2835 
 2836         if (sc->sc_hw.qhw_fw_auth) {
 2837                 qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_START);
 2838                 do {
 2839                         DELAY(FW_AUTH_WAIT_PERIOD * 1000);
 2840                         fcu = qat_cap_global_read_4(sc, FCU_STATUS);
 2841                         if (fcu & FCU_STATUS_DONE)
 2842                                 return 0;
 2843                 } while (retry++ < FW_AUTH_MAX_RETRY);
 2844 
 2845                 device_printf(sc->sc_dev,
 2846                     "firmware start timeout: status %08x\n", fcu);
 2847                 return EINVAL;
 2848         } else {
 2849                 qat_ae_ctx_indr_write(sc, ae, (~ctx_mask) & AE_ALL_CTX,
 2850                     CTX_WAKEUP_EVENTS_INDIRECT,
 2851                     CTX_WAKEUP_EVENTS_INDIRECT_SLEEP);
 2852                 qat_ae_enable_ctx(sc, ae, ctx_mask);
 2853         }
 2854 
 2855         return 0;
 2856 }
 2857 
 2858 static int
 2859 qat_aefw_init_memory_one(struct qat_softc *sc, struct uof_init_mem *uim)
 2860 {
 2861         struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
 2862         struct qat_ae_batch_init_list *qabi_list;
 2863         struct uof_mem_val_attr *memattr;
 2864         size_t *curinit;
 2865         u_long ael;
 2866         int i;
 2867         const char *sym;
 2868         char *ep;
 2869 
 2870         memattr = (struct uof_mem_val_attr *)(uim + 1);
 2871 
 2872         switch (uim->uim_region) {
 2873         case LMEM_REGION:
 2874                 if ((uim->uim_addr + uim->uim_num_bytes) > MAX_LMEM_REG * 4) {
 2875                         device_printf(sc->sc_dev,
 2876                             "Invalid lmem addr or bytes\n");
 2877                         return ENOBUFS;
 2878                 }
 2879                 if (uim->uim_scope != UOF_SCOPE_LOCAL)
 2880                         return EINVAL;
 2881                 sym = qat_aefw_uof_string(sc, uim->uim_sym_name);
 2882                 ael = strtoul(sym, &ep, 10);
 2883                 if (ep == sym || ael > MAX_AE)
 2884                         return EINVAL;
 2885                 if ((sc->sc_ae_mask & (1 << ael)) == 0)
 2886                         return 0; /* ae is fused out */
 2887 
 2888                 curinit = &qafu->qafu_num_lm_init[ael];
 2889                 qabi_list = &qafu->qafu_lm_init[ael];
 2890 
 2891                 for (i = 0; i < uim->uim_num_val_attr; i++, memattr++) {
 2892                         struct qat_ae_batch_init *qabi;
 2893 
 2894                         qabi = qat_alloc_mem(sizeof(struct qat_ae_batch_init));
 2895                         if (*curinit == 0)
 2896                                 STAILQ_INIT(qabi_list);
 2897                         STAILQ_INSERT_TAIL(qabi_list, qabi, qabi_next);
 2898 
 2899                         qabi->qabi_ae = (u_int)ael;
 2900                         qabi->qabi_addr =
 2901                             uim->uim_addr + memattr->umva_byte_offset;
 2902                         qabi->qabi_value = &memattr->umva_value;
 2903                         qabi->qabi_size = 4;
 2904                         qafu->qafu_num_lm_init_inst[ael] +=
 2905                             qat_ae_get_inst_num(qabi->qabi_size);
 2906                         (*curinit)++;
 2907                         if (*curinit >= MAX_LMEM_REG) {
 2908                                 device_printf(sc->sc_dev,
 2909                                     "Invalid lmem val attr\n");
 2910                                 return ENOBUFS;
 2911                         }
 2912                 }
 2913                 break;
 2914         case SRAM_REGION:
 2915         case DRAM_REGION:
 2916         case DRAM1_REGION:
 2917         case SCRATCH_REGION:
 2918         case UMEM_REGION:
 2919                 /* XXX */
 2920                 /* fallthrough */
 2921         default:
 2922                 device_printf(sc->sc_dev,
 2923                     "unsupported memory region to init: %d\n",
 2924                     uim->uim_region);
 2925                 return ENOTSUP;
 2926         }
 2927 
 2928         return 0;
 2929 }
 2930 
 2931 static void
 2932 qat_aefw_free_lm_init(struct qat_softc *sc, u_char ae)
 2933 {
 2934         struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
 2935         struct qat_ae_batch_init *qabi;
 2936 
 2937         while ((qabi = STAILQ_FIRST(&qafu->qafu_lm_init[ae])) != NULL) {
 2938                 STAILQ_REMOVE_HEAD(&qafu->qafu_lm_init[ae], qabi_next);
 2939                 qat_free_mem(qabi);
 2940         }
 2941 
 2942         qafu->qafu_num_lm_init[ae] = 0;
 2943         qafu->qafu_num_lm_init_inst[ae] = 0;
 2944 }
 2945 
 2946 static int
 2947 qat_aefw_init_ustore(struct qat_softc *sc)
 2948 {
 2949         uint64_t *fill;
 2950         uint32_t dont_init;
 2951         int a, i, p;
 2952         int error = 0;
 2953         int usz, end, start;
 2954         u_char ae, nae;
 2955 
 2956         fill = qat_alloc_mem(MAX_USTORE * sizeof(uint64_t));
 2957 
 2958         for (a = 0; a < sc->sc_aefw_uof.qafu_num_imgs; a++) {
 2959                 struct qat_uof_image *qui = &sc->sc_aefw_uof.qafu_imgs[a];
 2960                 struct uof_image *ui = qui->qui_image;
 2961 
 2962                 for (i = 0; i < MAX_USTORE; i++)
 2963                         memcpy(&fill[i], ui->ui_fill_pattern, sizeof(uint64_t));
 2964                 /*
 2965                  * Compute do_not_init value as a value that will not be equal
 2966                  * to fill data when cast to an int
 2967                  */
 2968                 dont_init = 0;
 2969                 if (dont_init == (uint32_t)fill[0])
 2970                         dont_init = 0xffffffff;
 2971 
 2972                 for (p = 0; p < ui->ui_num_pages; p++) {
 2973                         struct qat_uof_page *qup = &qui->qui_pages[p];
 2974                         if (!qup->qup_def_page)
 2975                                 continue;
 2976 
 2977                         for (i = qup->qup_beg_paddr;
 2978                             i < qup->qup_beg_paddr + qup->qup_num_micro_words;
 2979                             i++ ) {
 2980                                 fill[i] = (uint64_t)dont_init;
 2981                         }
 2982                 }
 2983 
 2984                 for (ae = 0; ae < sc->sc_ae_num; ae++) {
 2985                         MPASS(ae < UOF_MAX_NUM_OF_AE);
 2986                         if ((ui->ui_ae_assigned & (1 << ae)) == 0)
 2987                                 continue;
 2988 
 2989                         if (QAT_AE(sc, ae).qae_shareable_ustore && (ae & 1)) {
 2990                                 qat_ae_get_shared_ustore_ae(ae, &nae);
 2991                                 if (ui->ui_ae_assigned & (1 << ae))
 2992                                         continue;
 2993                         }
 2994                         usz = QAT_AE(sc, ae).qae_effect_ustore_size;
 2995 
 2996                         /* initialize the areas not going to be overwritten */
 2997                         end = -1;
 2998                         do {
 2999                                 /* find next uword that needs to be initialized */
 3000                                 for (start = end + 1; start < usz; start++) {
 3001                                         if ((uint32_t)fill[start] != dont_init)
 3002                                                 break;
 3003                                 }
 3004                                 /* see if there are no more such uwords */
 3005                                 if (start >= usz)
 3006                                         break;
 3007                                 for (end = start + 1; end < usz; end++) {
 3008                                         if ((uint32_t)fill[end] == dont_init)
 3009                                                 break;
 3010                                 }
 3011                                 if (QAT_AE(sc, ae).qae_shareable_ustore) {
 3012                                         error = ENOTSUP; /* XXX */
 3013                                         goto out;
 3014                                 } else {
 3015                                         error = qat_ae_ucode_write(sc, ae,
 3016                                             start, end - start, &fill[start]);
 3017                                         if (error) {
 3018                                                 goto out;
 3019                                         }
 3020                                 }
 3021 
 3022                         } while (end < usz);
 3023                 }
 3024         }
 3025 
 3026 out:
 3027         qat_free_mem(fill);
 3028         return error;
 3029 }
 3030 
 3031 static int
 3032 qat_aefw_init_reg(struct qat_softc *sc, u_char ae, u_char ctx_mask,
 3033     enum aereg_type regtype, u_short regaddr, u_int value)
 3034 {
 3035         int error = 0;
 3036         u_char ctx;
 3037 
 3038         switch (regtype) {
 3039         case AEREG_GPA_REL:
 3040         case AEREG_GPB_REL:
 3041         case AEREG_SR_REL:
 3042         case AEREG_SR_RD_REL:
 3043         case AEREG_SR_WR_REL:
 3044         case AEREG_DR_REL:
 3045         case AEREG_DR_RD_REL:
 3046         case AEREG_DR_WR_REL:
 3047         case AEREG_NEIGH_REL:
 3048                 /* init for all valid ctx */
 3049                 for (ctx = 0; ctx < MAX_AE_CTX; ctx++) {
 3050                         if ((ctx_mask & (1 << ctx)) == 0)
 3051                                 continue;
 3052                         error = qat_aereg_rel_data_write(sc, ae, ctx, regtype,
 3053                             regaddr, value);
 3054                 }
 3055                 break;
 3056         case AEREG_GPA_ABS:
 3057         case AEREG_GPB_ABS:
 3058         case AEREG_SR_ABS:
 3059         case AEREG_SR_RD_ABS:
 3060         case AEREG_SR_WR_ABS:
 3061         case AEREG_DR_ABS:
 3062         case AEREG_DR_RD_ABS:
 3063         case AEREG_DR_WR_ABS:
 3064                 error = qat_aereg_abs_data_write(sc, ae, regtype,
 3065                     regaddr, value);
 3066                 break;
 3067         default:
 3068                 error = EINVAL;
 3069                 break;
 3070         }
 3071 
 3072         return error;
 3073 }
 3074 
 3075 static int
 3076 qat_aefw_init_reg_sym_expr(struct qat_softc *sc, u_char ae,
 3077     struct qat_uof_image *qui)
 3078 {
 3079         u_int i, expres;
 3080         u_char ctx_mask;
 3081 
 3082         for (i = 0; i < qui->qui_num_init_reg_sym; i++) {
 3083                 struct uof_init_reg_sym *uirs = &qui->qui_init_reg_sym[i];
 3084 
 3085                 if (uirs->uirs_value_type == EXPR_VAL) {
 3086                         /* XXX */
 3087                         device_printf(sc->sc_dev,
 3088                             "does not support initializing EXPR_VAL\n");
 3089                         return ENOTSUP;
 3090                 } else {
 3091                         expres = uirs->uirs_value;
 3092                 }
 3093 
 3094                 switch (uirs->uirs_init_type) {
 3095                 case INIT_REG:
 3096                         if (__SHIFTOUT(qui->qui_image->ui_ae_mode,
 3097                             AE_MODE_CTX_MODE) == MAX_AE_CTX) {
 3098                                 ctx_mask = 0xff; /* 8-ctx mode */
 3099                         } else {
 3100                                 ctx_mask = 0x55; /* 4-ctx mode */
 3101                         }
 3102                         qat_aefw_init_reg(sc, ae, ctx_mask,
 3103                             (enum aereg_type)uirs->uirs_reg_type,
 3104                             (u_short)uirs->uirs_addr_offset, expres);
 3105                         break;
 3106                 case INIT_REG_CTX:
 3107                         if (__SHIFTOUT(qui->qui_image->ui_ae_mode,
 3108                             AE_MODE_CTX_MODE) == MAX_AE_CTX) {
 3109                                 ctx_mask = 0xff; /* 8-ctx mode */
 3110                         } else {
 3111                                 ctx_mask = 0x55; /* 4-ctx mode */
 3112                         }
 3113                         if (((1 << uirs->uirs_ctx) & ctx_mask) == 0)
 3114                                 return EINVAL;
 3115                         qat_aefw_init_reg(sc, ae, 1 << uirs->uirs_ctx,
 3116                             (enum aereg_type)uirs->uirs_reg_type,
 3117                             (u_short)uirs->uirs_addr_offset, expres);
 3118                         break;
 3119                 case INIT_EXPR:
 3120                 case INIT_EXPR_ENDIAN_SWAP:
 3121                 default:
 3122                         device_printf(sc->sc_dev,
 3123                             "does not support initializing init_type %d\n",
 3124                             uirs->uirs_init_type);
 3125                         return ENOTSUP;
 3126                 }
 3127         }
 3128 
 3129         return 0;
 3130 }
 3131 
 3132 static int
 3133 qat_aefw_init_memory(struct qat_softc *sc)
 3134 {
 3135         struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
 3136         size_t uimsz, initmemsz = qafu->qafu_init_mem_size;
 3137         struct uof_init_mem *uim;
 3138         int error, i;
 3139         u_char ae;
 3140 
 3141         uim = qafu->qafu_init_mem;
 3142         for (i = 0; i < qafu->qafu_num_init_mem; i++) {
 3143                 uimsz = sizeof(struct uof_init_mem) +
 3144                     sizeof(struct uof_mem_val_attr) * uim->uim_num_val_attr;
 3145                 if (uimsz > initmemsz) {
 3146                         device_printf(sc->sc_dev,
 3147                             "invalid uof_init_mem or uof_mem_val_attr size\n");
 3148                         return EINVAL;
 3149                 }
 3150 
 3151                 if (uim->uim_num_bytes > 0) {
 3152                         error = qat_aefw_init_memory_one(sc, uim);
 3153                         if (error) {
 3154                                 device_printf(sc->sc_dev,
 3155                                     "Could not init ae memory: %d\n", error);
 3156                                 return error;
 3157                         }
 3158                 }
 3159                 uim = (struct uof_init_mem *)((uintptr_t)uim + uimsz);
 3160                 initmemsz -= uimsz;
 3161         }
 3162 
 3163         /* run Batch put LM API */
 3164         for (ae = 0; ae < MAX_AE; ae++) {
 3165                 error = qat_ae_batch_put_lm(sc, ae, &qafu->qafu_lm_init[ae],
 3166                     qafu->qafu_num_lm_init_inst[ae]);
 3167                 if (error)
 3168                         device_printf(sc->sc_dev, "Could not put lm\n");
 3169 
 3170                 qat_aefw_free_lm_init(sc, ae);
 3171         }
 3172 
 3173         error = qat_aefw_init_ustore(sc);
 3174 
 3175         /* XXX run Batch put LM API */
 3176 
 3177         return error;
 3178 }
 3179 
 3180 static int
 3181 qat_aefw_init_globals(struct qat_softc *sc)
 3182 {
 3183         struct qat_aefw_uof *qafu = &sc->sc_aefw_uof;
 3184         int error, i, p, s;
 3185         u_char ae;
 3186 
 3187         /* initialize the memory segments */
 3188         if (qafu->qafu_num_init_mem > 0) {
 3189                 error = qat_aefw_init_memory(sc);
 3190                 if (error)
 3191                         return error;
 3192         } else {
 3193                 error = qat_aefw_init_ustore(sc);
 3194                 if (error)
 3195                         return error;
 3196         }
 3197 
 3198         /* XXX bind import variables with ivd values */
 3199 
 3200         /* XXX bind the uC global variables
 3201          * local variables will done on-the-fly */
 3202         for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
 3203                 for (p = 0; p < sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_num_pages; p++) {
 3204                         struct qat_uof_page *qup =
 3205                             &sc->sc_aefw_uof.qafu_imgs[i].qui_pages[p];
 3206                         if (qup->qup_num_uw_blocks &&
 3207                             (qup->qup_num_uc_var || qup->qup_num_imp_var)) {
 3208                                 device_printf(sc->sc_dev,
 3209                                     "not support uC global variables\n");
 3210                                 return ENOTSUP;
 3211                         }
 3212                 }
 3213         }
 3214 
 3215         for (ae = 0; ae < sc->sc_ae_num; ae++) {
 3216                 struct qat_ae *qae = &(QAT_AE(sc, ae));
 3217 
 3218                 for (s = 0; s < qae->qae_num_slices; s++) {
 3219                         struct qat_ae_slice *qas = &qae->qae_slices[s];
 3220 
 3221                         if (qas->qas_image == NULL)
 3222                                 continue;
 3223 
 3224                         error =
 3225                             qat_aefw_init_reg_sym_expr(sc, ae, qas->qas_image);
 3226                         if (error)
 3227                                 return error;
 3228                 }
 3229         }
 3230 
 3231         return 0;
 3232 }
 3233 
 3234 static uint64_t
 3235 qat_aefw_get_uof_inst(struct qat_softc *sc, struct qat_uof_page *qup,
 3236     u_int addr)
 3237 {
 3238         uint64_t uinst = 0;
 3239         u_int i;
 3240 
 3241         /* find the block */
 3242         for (i = 0; i < qup->qup_num_uw_blocks; i++) {
 3243                 struct qat_uof_uword_block *quub = &qup->qup_uw_blocks[i];
 3244 
 3245                 if ((addr >= quub->quub_start_addr) &&
 3246                     (addr <= (quub->quub_start_addr +
 3247                     (quub->quub_num_words - 1)))) {
 3248                         /* unpack n bytes and assigned to the 64-bit uword value.
 3249                         note: the microwords are stored as packed bytes.
 3250                         */
 3251                         addr -= quub->quub_start_addr;
 3252                         addr *= AEV2_PACKED_UWORD_BYTES;
 3253                         memcpy(&uinst,
 3254                             (void *)((uintptr_t)quub->quub_micro_words + addr),
 3255                             AEV2_PACKED_UWORD_BYTES);
 3256                         uinst = uinst & UWORD_MASK;
 3257 
 3258                         return uinst;
 3259                 }
 3260         }
 3261 
 3262         return INVLD_UWORD;
 3263 }
 3264 
 3265 static int
 3266 qat_aefw_do_pagein(struct qat_softc *sc, u_char ae, struct qat_uof_page *qup)
 3267 {
 3268         struct qat_ae *qae = &(QAT_AE(sc, ae));
 3269         uint64_t fill, *ucode_cpybuf;
 3270         u_int error, i, upaddr, ninst, cpylen;
 3271 
 3272         if (qup->qup_num_uc_var || qup->qup_num_neigh_reg ||
 3273             qup->qup_num_imp_var || qup->qup_num_imp_expr) {
 3274                 device_printf(sc->sc_dev,
 3275                     "does not support fixup locals\n");
 3276                 return ENOTSUP;
 3277         }
 3278 
 3279         ucode_cpybuf = qat_alloc_mem(UWORD_CPYBUF_SIZE * sizeof(uint64_t));
 3280 
 3281         /* XXX get fill-pattern from an image -- they are all the same */
 3282         memcpy(&fill, sc->sc_aefw_uof.qafu_imgs[0].qui_image->ui_fill_pattern,
 3283             sizeof(uint64_t));
 3284 
 3285         upaddr = qup->qup_beg_paddr;
 3286         ninst = qup->qup_num_micro_words;
 3287         while (ninst > 0) {
 3288                 cpylen = min(ninst, UWORD_CPYBUF_SIZE);
 3289 
 3290                 /* load the buffer */
 3291                 for (i = 0; i < cpylen; i++) {
 3292                         /* keep below code structure in case there are
 3293                          * different handling for shared secnarios */
 3294                         if (!qae->qae_shareable_ustore) {
 3295                                 /* qat_aefw_get_uof_inst() takes an address that
 3296                                  * is relative to the start of the page.
 3297                                  * So we don't need to add in the physical
 3298                                  * offset of the page. */
 3299                                 if (qup->qup_page_region != 0) {
 3300                                         /* XXX */
 3301                                         device_printf(sc->sc_dev,
 3302                                             "region != 0 is not supported\n");
 3303                                         qat_free_mem(ucode_cpybuf);
 3304                                         return ENOTSUP;
 3305                                 } else {
 3306                                         /* for mixing case, it should take
 3307                                          * physical address */
 3308                                         ucode_cpybuf[i] = qat_aefw_get_uof_inst(
 3309                                             sc, qup, upaddr + i);
 3310                                         if (ucode_cpybuf[i] == INVLD_UWORD) {
 3311                                             /* fill hole in the uof */
 3312                                             ucode_cpybuf[i] = fill;
 3313                                         }
 3314                                 }
 3315                         } else {
 3316                                 /* XXX */
 3317                                 qat_free_mem(ucode_cpybuf);
 3318                                 return ENOTSUP;
 3319                         }
 3320                 }
 3321 
 3322                 /* copy the buffer to ustore */
 3323                 if (!qae->qae_shareable_ustore) {
 3324                         error = qat_ae_ucode_write(sc, ae, upaddr, cpylen,
 3325                             ucode_cpybuf);
 3326                         if (error)
 3327                                 return error;
 3328                 } else {
 3329                         /* XXX */
 3330                         qat_free_mem(ucode_cpybuf);
 3331                         return ENOTSUP;
 3332                 }
 3333                 upaddr += cpylen;
 3334                 ninst -= cpylen;
 3335         }
 3336 
 3337         qat_free_mem(ucode_cpybuf);
 3338 
 3339         return 0;
 3340 }
 3341 
 3342 static int
 3343 qat_aefw_uof_write_one(struct qat_softc *sc, struct qat_uof_image *qui)
 3344 {
 3345         struct uof_image *ui = qui->qui_image;
 3346         struct qat_ae_page *qap;
 3347         u_int s, p, c;
 3348         int error;
 3349         u_char ae, ctx_mask;
 3350 
 3351         if (__SHIFTOUT(ui->ui_ae_mode, AE_MODE_CTX_MODE) == MAX_AE_CTX)
 3352                 ctx_mask = 0xff; /* 8-ctx mode */
 3353         else
 3354                 ctx_mask = 0x55; /* 4-ctx mode */
 3355 
 3356         /* load the default page and set assigned CTX PC
 3357          * to the entrypoint address */
 3358         for (ae = 0; ae < sc->sc_ae_num; ae++) {
 3359                 struct qat_ae *qae = &(QAT_AE(sc, ae));
 3360                 struct qat_ae_slice *qas;
 3361                 u_int metadata;
 3362 
 3363                 MPASS(ae < UOF_MAX_NUM_OF_AE);
 3364 
 3365                 if ((ui->ui_ae_assigned & (1 << ae)) == 0)
 3366                         continue;
 3367 
 3368                 /* find the slice to which this image is assigned */
 3369                 for (s = 0; s < qae->qae_num_slices; s++) {
 3370                         qas = &qae->qae_slices[s];
 3371                         if (ui->ui_ctx_assigned & qas->qas_assigned_ctx_mask)
 3372                                 break;
 3373                 }
 3374                 if (s >= qae->qae_num_slices)
 3375                         continue;
 3376 
 3377                 qas = &qae->qae_slices[s];
 3378 
 3379                 for (p = 0; p < ui->ui_num_pages; p++) {
 3380                         qap = &qas->qas_pages[p];
 3381 
 3382                         /* Only load pages loaded by default */
 3383                         if (!qap->qap_page->qup_def_page)
 3384                                 continue;
 3385 
 3386                         error = qat_aefw_do_pagein(sc, ae, qap->qap_page);
 3387                         if (error)
 3388                                 return error;
 3389                 }
 3390 
 3391                 metadata = qas->qas_image->qui_image->ui_app_metadata;
 3392                 if (metadata != 0xffffffff && bootverbose) {
 3393                         device_printf(sc->sc_dev,
 3394                             "loaded firmware: %s\n",
 3395                             qat_aefw_uof_string(sc, metadata));
 3396                 }
 3397 
 3398                 /* Assume starting page is page 0 */
 3399                 qap = &qas->qas_pages[0];
 3400                 for (c = 0; c < MAX_AE_CTX; c++) {
 3401                         if (ctx_mask & (1 << c))
 3402                                 qas->qas_cur_pages[c] = qap;
 3403                         else
 3404                                 qas->qas_cur_pages[c] = NULL;
 3405                 }
 3406 
 3407                 /* set the live context */
 3408                 qae->qae_live_ctx_mask = ui->ui_ctx_assigned;
 3409 
 3410                 /* set context PC to the image entrypoint address */
 3411                 error = qat_ae_write_pc(sc, ae, ui->ui_ctx_assigned,
 3412                     ui->ui_entry_address);
 3413                 if (error)
 3414                         return error;
 3415         }
 3416 
 3417         /* XXX store the checksum for convenience */
 3418 
 3419         return 0;
 3420 }
 3421 
 3422 static int
 3423 qat_aefw_uof_write(struct qat_softc *sc)
 3424 {
 3425         int error = 0;
 3426         int i;
 3427 
 3428         error = qat_aefw_init_globals(sc);
 3429         if (error) {
 3430                 device_printf(sc->sc_dev,
 3431                     "Could not initialize globals\n");
 3432                 return error;
 3433         }
 3434 
 3435         for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) {
 3436                 error = qat_aefw_uof_write_one(sc,
 3437                     &sc->sc_aefw_uof.qafu_imgs[i]);
 3438                 if (error)
 3439                         break;
 3440         }
 3441 
 3442         /* XXX UcLo_computeFreeUstore */
 3443 
 3444         return error;
 3445 }

Cache object: c81b702b5210da00fd0a11263d4e4624


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.