The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/cxgbe/cudbg/cudbg_lib.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2017 Chelsio Communications, Inc.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD$");
   29 
   30 #include <sys/types.h>
   31 #include <sys/param.h>
   32 
   33 #include "common/common.h"
   34 #include "common/t4_regs.h"
   35 #include "cudbg.h"
   36 #include "cudbg_lib_common.h"
   37 #include "cudbg_lib.h"
   38 #include "cudbg_entity.h"
   39 #define  BUFFER_WARN_LIMIT 10000000
   40 
   41 struct large_entity large_entity_list[] = {
   42         {CUDBG_EDC0, 0, 0},
   43         {CUDBG_EDC1, 0 , 0},
   44         {CUDBG_MC0, 0, 0},
   45         {CUDBG_MC1, 0, 0}
   46 };
   47 
   48 static int is_fw_attached(struct cudbg_init *pdbg_init)
   49 {
   50 
   51         return (pdbg_init->adap->flags & FW_OK);
   52 }
   53 
   54 /* This function will add additional padding bytes into debug_buffer to make it
   55  * 4 byte aligned.*/
   56 static void align_debug_buffer(struct cudbg_buffer *dbg_buff,
   57                         struct cudbg_entity_hdr *entity_hdr)
   58 {
   59         u8 zero_buf[4] = {0};
   60         u8 padding, remain;
   61 
   62         remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
   63         padding = 4 - remain;
   64         if (remain) {
   65                 memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
   66                        padding);
   67                 dbg_buff->offset += padding;
   68                 entity_hdr->num_pad = padding;
   69         }
   70 
   71         entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
   72 }
   73 
   74 static void read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
   75                           enum ctxt_type ctype, u32 *data)
   76 {
   77         struct adapter *padap = pdbg_init->adap;
   78         int rc = -1;
   79 
   80         if (is_fw_attached(pdbg_init)) {
   81                 rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
   82                     "t4cudf");
   83                 if (rc != 0)
   84                         goto out;
   85                 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
   86                                     data);
   87                 end_synchronized_op(padap, 0);
   88         }
   89 
   90 out:
   91         if (rc)
   92                 t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
   93 }
   94 
   95 static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
   96                             struct cudbg_buffer *dbg_buff,
   97                             struct cudbg_entity_hdr **entity_hdr)
   98 {
   99         struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
  100         int rc = 0;
  101         u32 ext_offset = cudbg_hdr->data_len;
  102         *ext_size = 0;
  103 
  104         if (dbg_buff->size - dbg_buff->offset <=
  105                  sizeof(struct cudbg_entity_hdr)) {
  106                 rc = CUDBG_STATUS_BUFFER_SHORT;
  107                 goto err;
  108         }
  109 
  110         *entity_hdr = (struct cudbg_entity_hdr *)
  111                        ((char *)outbuf + cudbg_hdr->data_len);
  112 
  113         /* Find the last extended entity header */
  114         while ((*entity_hdr)->size) {
  115 
  116                 ext_offset += sizeof(struct cudbg_entity_hdr) +
  117                                      (*entity_hdr)->size;
  118 
  119                 *ext_size += (*entity_hdr)->size +
  120                               sizeof(struct cudbg_entity_hdr);
  121 
  122                 if (dbg_buff->size - dbg_buff->offset + *ext_size  <=
  123                         sizeof(struct cudbg_entity_hdr)) {
  124                         rc = CUDBG_STATUS_BUFFER_SHORT;
  125                         goto err;
  126                 }
  127 
  128                 if (ext_offset != (*entity_hdr)->next_ext_offset) {
  129                         ext_offset -= sizeof(struct cudbg_entity_hdr) +
  130                                      (*entity_hdr)->size;
  131                         break;
  132                 }
  133 
  134                 (*entity_hdr)->next_ext_offset = *ext_size;
  135 
  136                 *entity_hdr = (struct cudbg_entity_hdr *)
  137                                            ((char *)outbuf +
  138                                            ext_offset);
  139         }
  140 
  141         /* update the data offset */
  142         dbg_buff->offset = ext_offset;
  143 err:
  144         return rc;
  145 }
  146 
  147 static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
  148                        u32 cur_entity_data_offset,
  149                        u32 cur_entity_size,
  150                        int entity_nu, u32 ext_size)
  151 {
  152         struct cudbg_private *priv = handle;
  153         struct cudbg_init *cudbg_init = &priv->dbg_init;
  154         struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
  155         u64 timestamp;
  156         u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
  157         u32 remain_flash_size;
  158         u32 flash_data_offset;
  159         u32 data_hdr_size;
  160         int rc = -1;
  161 
  162         data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
  163                         sizeof(struct cudbg_hdr);
  164 
  165         flash_data_offset = (FLASH_CUDBG_NSECS *
  166                              (sizeof(struct cudbg_flash_hdr) +
  167                               data_hdr_size)) +
  168                             (cur_entity_data_offset - data_hdr_size);
  169 
  170         if (flash_data_offset > CUDBG_FLASH_SIZE) {
  171                 update_skip_size(sec_info, cur_entity_size);
  172                 if (cudbg_init->verbose)
  173                         cudbg_init->print("Large entity skipping...\n");
  174                 return rc;
  175         }
  176 
  177         remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
  178 
  179         if (cur_entity_size > remain_flash_size) {
  180                 update_skip_size(sec_info, cur_entity_size);
  181                 if (cudbg_init->verbose)
  182                         cudbg_init->print("Large entity skipping...\n");
  183         } else {
  184                 timestamp = 0;
  185 
  186                 cur_entity_hdr_offset +=
  187                         (sizeof(struct cudbg_entity_hdr) *
  188                         (entity_nu - 1));
  189 
  190                 rc = cudbg_write_flash(handle, timestamp, dbg_buff,
  191                                        cur_entity_data_offset,
  192                                        cur_entity_hdr_offset,
  193                                        cur_entity_size,
  194                                        ext_size);
  195                 if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
  196                         cudbg_init->print("\n\tFLASH is full... "
  197                                 "can not write in flash more\n\n");
  198         }
  199 
  200         return rc;
  201 }
  202 
  203 int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
  204 {
  205         struct cudbg_entity_hdr *entity_hdr = NULL;
  206         struct cudbg_entity_hdr *ext_entity_hdr = NULL;
  207         struct cudbg_hdr *cudbg_hdr;
  208         struct cudbg_buffer dbg_buff;
  209         struct cudbg_error cudbg_err = {0};
  210         int large_entity_code;
  211 
  212         u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
  213         struct cudbg_init *cudbg_init =
  214                 &(((struct cudbg_private *)handle)->dbg_init);
  215         struct adapter *padap = cudbg_init->adap;
  216         u32 total_size, remaining_buf_size;
  217         u32 ext_size = 0;
  218         int index, bit, i, rc = -1;
  219         int all;
  220         bool flag_ext = 0;
  221 
  222         reset_skip_entity();
  223 
  224         dbg_buff.data = outbuf;
  225         dbg_buff.size = *outbuf_size;
  226         dbg_buff.offset = 0;
  227 
  228         cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
  229         cudbg_hdr->signature = CUDBG_SIGNATURE;
  230         cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
  231         cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
  232         cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
  233         cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
  234         cudbg_hdr->chip_ver = padap->params.chipid;
  235 
  236         if (cudbg_hdr->data_len)
  237                 flag_ext = 1;
  238 
  239         if (cudbg_init->use_flash) {
  240 #ifndef notyet
  241                 rc = t4_get_flash_params(padap);
  242                 if (rc) {
  243                         if (cudbg_init->verbose)
  244                                 cudbg_init->print("\nGet flash params failed.\n\n");
  245                         cudbg_init->use_flash = 0;
  246                 }
  247 #endif
  248 
  249 #ifdef notyet
  250                 /* Timestamp is mandatory. If it is not passed then disable
  251                  * flash support
  252                  */
  253                 if (!cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM].u.time) {
  254                         if (cudbg_init->verbose)
  255                                 cudbg_init->print("\nTimestamp param missing,"
  256                                           "so ignoring flash write request\n\n");
  257                         cudbg_init->use_flash = 0;
  258                 }
  259 #endif
  260         }
  261 
  262         if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
  263             dbg_buff.size) {
  264                 rc = CUDBG_STATUS_SMALL_BUFF;
  265                 total_size = cudbg_hdr->hdr_len;
  266                 goto err;
  267         }
  268 
  269         /* If ext flag is set then move the offset to the end of the buf
  270          * so that we can add ext entities
  271          */
  272         if (flag_ext) {
  273                 ext_entity_hdr = (struct cudbg_entity_hdr *)
  274                               ((char *)outbuf + cudbg_hdr->hdr_len +
  275                               (sizeof(struct cudbg_entity_hdr) *
  276                               (CUDBG_EXT_ENTITY - 1)));
  277                 ext_entity_hdr->start_offset = cudbg_hdr->data_len;
  278                 ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
  279                 ext_entity_hdr->size = 0;
  280                 dbg_buff.offset = cudbg_hdr->data_len;
  281         } else {
  282                 dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
  283                 dbg_buff.offset += CUDBG_MAX_ENTITY *
  284                                         sizeof(struct cudbg_entity_hdr);
  285         }
  286 
  287         total_size = dbg_buff.offset;
  288         all = dbg_bitmap[0] & (1 << CUDBG_ALL);
  289 
  290         /*sort(large_entity_list);*/
  291 
  292         for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
  293                 index = i / 8;
  294                 bit = i % 8;
  295 
  296                 if (entity_list[i].bit == CUDBG_EXT_ENTITY)
  297                         continue;
  298 
  299                 if (all || (dbg_bitmap[index] & (1 << bit))) {
  300 
  301                         if (!flag_ext) {
  302                                 rc = get_entity_hdr(outbuf, i, dbg_buff.size,
  303                                                     &entity_hdr);
  304                                 if (rc)
  305                                         cudbg_hdr->hdr_flags = rc;
  306                         } else {
  307                                 rc = get_next_ext_entity_hdr(outbuf, &ext_size,
  308                                                              &dbg_buff,
  309                                                              &entity_hdr);
  310                                 if (rc)
  311                                         goto err;
  312 
  313                                 /* move the offset after the ext header */
  314                                 dbg_buff.offset +=
  315                                         sizeof(struct cudbg_entity_hdr);
  316                         }
  317 
  318                         entity_hdr->entity_type = i;
  319                         entity_hdr->start_offset = dbg_buff.offset;
  320                         /* process each entity by calling process_entity fp */
  321                         remaining_buf_size = dbg_buff.size - dbg_buff.offset;
  322 
  323                         if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
  324                             is_large_entity(i)) {
  325                                 if (cudbg_init->verbose)
  326                                         cudbg_init->print("Skipping %s\n",
  327                                             entity_list[i].name);
  328                                 skip_entity(i);
  329                                 continue;
  330                         } else {
  331 
  332                                 /* If fw_attach is 0, then skip entities which
  333                                  * communicates with firmware
  334                                  */
  335 
  336                                 if (!is_fw_attached(cudbg_init) &&
  337                                     (entity_list[i].flag &
  338                                     (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
  339                                         if (cudbg_init->verbose)
  340                                                 cudbg_init->print("Skipping %s entity,"\
  341                                                           "because fw_attach "\
  342                                                           "is 0\n",
  343                                                           entity_list[i].name);
  344                                         continue;
  345                                 }
  346 
  347                                 if (cudbg_init->verbose)
  348                                         cudbg_init->print("collecting debug entity: "\
  349                                                   "%s\n", entity_list[i].name);
  350                                 memset(&cudbg_err, 0,
  351                                        sizeof(struct cudbg_error));
  352                                 rc = process_entity[i-1](cudbg_init, &dbg_buff,
  353                                                          &cudbg_err);
  354                         }
  355 
  356                         if (rc) {
  357                                 entity_hdr->size = 0;
  358                                 dbg_buff.offset = entity_hdr->start_offset;
  359                         } else
  360                                 align_debug_buffer(&dbg_buff, entity_hdr);
  361 
  362                         if (cudbg_err.sys_err)
  363                                 rc = CUDBG_SYSTEM_ERROR;
  364 
  365                         entity_hdr->hdr_flags =  rc;
  366                         entity_hdr->sys_err = cudbg_err.sys_err;
  367                         entity_hdr->sys_warn =  cudbg_err.sys_warn;
  368 
  369                         /* We don't want to include ext entity size in global
  370                          * header
  371                          */
  372                         if (!flag_ext)
  373                                 total_size += entity_hdr->size;
  374 
  375                         cudbg_hdr->data_len = total_size;
  376                         *outbuf_size = total_size;
  377 
  378                         /* consider the size of the ext entity header and data
  379                          * also
  380                          */
  381                         if (flag_ext) {
  382                                 ext_size += (sizeof(struct cudbg_entity_hdr) +
  383                                              entity_hdr->size);
  384                                 entity_hdr->start_offset -= cudbg_hdr->data_len;
  385                                 ext_entity_hdr->size = ext_size;
  386                                 entity_hdr->next_ext_offset = ext_size;
  387                                 entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
  388                         }
  389 
  390                         if (cudbg_init->use_flash) {
  391                                 if (flag_ext) {
  392                                         wr_entity_to_flash(handle,
  393                                                            &dbg_buff,
  394                                                            ext_entity_hdr->
  395                                                            start_offset,
  396                                                            entity_hdr->
  397                                                            size,
  398                                                            CUDBG_EXT_ENTITY,
  399                                                            ext_size);
  400                                 }
  401                                 else
  402                                         wr_entity_to_flash(handle,
  403                                                            &dbg_buff,
  404                                                            entity_hdr->\
  405                                                            start_offset,
  406                                                            entity_hdr->size,
  407                                                            i, ext_size);
  408                         }
  409                 }
  410         }
  411 
  412         for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
  413              i++) {
  414                 large_entity_code = large_entity_list[i].entity_code;
  415                 if (large_entity_list[i].skip_flag) {
  416                         if (!flag_ext) {
  417                                 rc = get_entity_hdr(outbuf, large_entity_code,
  418                                                     dbg_buff.size, &entity_hdr);
  419                                 if (rc)
  420                                         cudbg_hdr->hdr_flags = rc;
  421                         } else {
  422                                 rc = get_next_ext_entity_hdr(outbuf, &ext_size,
  423                                                              &dbg_buff,
  424                                                              &entity_hdr);
  425                                 if (rc)
  426                                         goto err;
  427 
  428                                 dbg_buff.offset +=
  429                                         sizeof(struct cudbg_entity_hdr);
  430                         }
  431 
  432                         /* If fw_attach is 0, then skip entities which
  433                          * communicates with firmware
  434                          */
  435                         if (!is_fw_attached(cudbg_init) &&
  436                             (entity_list[large_entity_code].flag &
  437                             (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
  438                                 if (cudbg_init->verbose)
  439                                         cudbg_init->print("Skipping %s entity,"\
  440                                                   "because fw_attach "\
  441                                                   "is 0\n",
  442                                                   entity_list[large_entity_code]
  443                                                   .name);
  444                                 continue;
  445                         }
  446 
  447                         entity_hdr->entity_type = large_entity_code;
  448                         entity_hdr->start_offset = dbg_buff.offset;
  449                         if (cudbg_init->verbose)
  450                                 cudbg_init->print("Re-trying debug entity: %s\n",
  451                                           entity_list[large_entity_code].name);
  452 
  453                         memset(&cudbg_err, 0, sizeof(struct cudbg_error));
  454                         rc = process_entity[large_entity_code - 1](cudbg_init,
  455                                                                    &dbg_buff,
  456                                                                    &cudbg_err);
  457                         if (rc) {
  458                                 entity_hdr->size = 0;
  459                                 dbg_buff.offset = entity_hdr->start_offset;
  460                         } else
  461                                 align_debug_buffer(&dbg_buff, entity_hdr);
  462 
  463                         if (cudbg_err.sys_err)
  464                                 rc = CUDBG_SYSTEM_ERROR;
  465 
  466                         entity_hdr->hdr_flags = rc;
  467                         entity_hdr->sys_err = cudbg_err.sys_err;
  468                         entity_hdr->sys_warn =  cudbg_err.sys_warn;
  469 
  470                         /* We don't want to include ext entity size in global
  471                          * header
  472                          */
  473                         if (!flag_ext)
  474                                 total_size += entity_hdr->size;
  475 
  476                         cudbg_hdr->data_len = total_size;
  477                         *outbuf_size = total_size;
  478 
  479                         /* consider the size of the ext entity header and
  480                          * data also
  481                          */
  482                         if (flag_ext) {
  483                                 ext_size += (sizeof(struct cudbg_entity_hdr) +
  484                                                    entity_hdr->size);
  485                                 entity_hdr->start_offset -=
  486                                                         cudbg_hdr->data_len;
  487                                 ext_entity_hdr->size = ext_size;
  488                                 entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
  489                         }
  490 
  491                         if (cudbg_init->use_flash) {
  492                                 if (flag_ext)
  493                                         wr_entity_to_flash(handle,
  494                                                            &dbg_buff,
  495                                                            ext_entity_hdr->
  496                                                            start_offset,
  497                                                            entity_hdr->size,
  498                                                            CUDBG_EXT_ENTITY,
  499                                                            ext_size);
  500                                 else
  501                                         wr_entity_to_flash(handle,
  502                                                            &dbg_buff,
  503                                                            entity_hdr->
  504                                                            start_offset,
  505                                                            entity_hdr->
  506                                                            size,
  507                                                            large_entity_list[i].
  508                                                            entity_code,
  509                                                            ext_size);
  510                         }
  511                 }
  512         }
  513 
  514         cudbg_hdr->data_len = total_size;
  515         *outbuf_size = total_size;
  516 
  517         if (flag_ext)
  518                 *outbuf_size += ext_size;
  519 
  520         return 0;
  521 err:
  522         return rc;
  523 }
  524 
  525 void reset_skip_entity(void)
  526 {
  527         int i;
  528 
  529         for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
  530                 large_entity_list[i].skip_flag = 0;
  531 }
  532 
  533 void skip_entity(int entity_code)
  534 {
  535         int i;
  536         for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
  537              i++) {
  538                 if (large_entity_list[i].entity_code == entity_code)
  539                         large_entity_list[i].skip_flag = 1;
  540         }
  541 }
  542 
  543 int is_large_entity(int entity_code)
  544 {
  545         int i;
  546 
  547         for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
  548              i++) {
  549                 if (large_entity_list[i].entity_code == entity_code)
  550                         return 1;
  551         }
  552         return 0;
  553 }
  554 
  555 int get_entity_hdr(void *outbuf, int i, u32 size,
  556                    struct cudbg_entity_hdr **entity_hdr)
  557 {
  558         int rc = 0;
  559         struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
  560 
  561         if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
  562                 return CUDBG_STATUS_SMALL_BUFF;
  563 
  564         *entity_hdr = (struct cudbg_entity_hdr *)
  565                       ((char *)outbuf+cudbg_hdr->hdr_len +
  566                        (sizeof(struct cudbg_entity_hdr)*(i-1)));
  567         return rc;
  568 }
  569 
  570 static int collect_rss(struct cudbg_init *pdbg_init,
  571                        struct cudbg_buffer *dbg_buff,
  572                        struct cudbg_error *cudbg_err)
  573 {
  574         struct adapter *padap = pdbg_init->adap;
  575         struct cudbg_buffer scratch_buff;
  576         u32 size;
  577         int rc = 0;
  578 
  579         size = padap->chip_params->rss_nentries * sizeof(u16);
  580         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
  581         if (rc)
  582                 goto err;
  583 
  584         rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
  585         if (rc) {
  586                 if (pdbg_init->verbose)
  587                         pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n",
  588                                  __func__, rc);
  589                 cudbg_err->sys_err = rc;
  590                 goto err1;
  591         }
  592 
  593         rc = write_compression_hdr(&scratch_buff, dbg_buff);
  594         if (rc)
  595                 goto err1;
  596 
  597         rc = compress_buff(&scratch_buff, dbg_buff);
  598 
  599 err1:
  600         release_scratch_buff(&scratch_buff, dbg_buff);
  601 err:
  602         return rc;
  603 }
  604 
  605 static int collect_sw_state(struct cudbg_init *pdbg_init,
  606                             struct cudbg_buffer *dbg_buff,
  607                             struct cudbg_error *cudbg_err)
  608 {
  609         struct adapter *padap = pdbg_init->adap;
  610         struct cudbg_buffer scratch_buff;
  611         struct sw_state *swstate;
  612         u32 size;
  613         int rc = 0;
  614 
  615         size = sizeof(struct sw_state);
  616 
  617         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
  618         if (rc)
  619                 goto err;
  620 
  621         swstate = (struct sw_state *) scratch_buff.data;
  622 
  623         swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
  624         snprintf(swstate->caller_string, sizeof(swstate->caller_string), "%s",
  625             "FreeBSD");
  626         swstate->os_type = 0;
  627 
  628         rc = write_compression_hdr(&scratch_buff, dbg_buff);
  629         if (rc)
  630                 goto err1;
  631 
  632         rc = compress_buff(&scratch_buff, dbg_buff);
  633 
  634 err1:
  635         release_scratch_buff(&scratch_buff, dbg_buff);
  636 err:
  637         return rc;
  638 }
  639 
  640 static int collect_ddp_stats(struct cudbg_init *pdbg_init,
  641                              struct cudbg_buffer *dbg_buff,
  642                              struct cudbg_error *cudbg_err)
  643 {
  644         struct adapter *padap = pdbg_init->adap;
  645         struct cudbg_buffer scratch_buff;
  646         struct tp_usm_stats  *tp_usm_stats_buff;
  647         u32 size;
  648         int rc = 0;
  649 
  650         size = sizeof(struct tp_usm_stats);
  651 
  652         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
  653         if (rc)
  654                 goto err;
  655 
  656         tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
  657 
  658         /* spin_lock(&padap->stats_lock);       TODO*/
  659         t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
  660         /* spin_unlock(&padap->stats_lock);     TODO*/
  661 
  662         rc = write_compression_hdr(&scratch_buff, dbg_buff);
  663         if (rc)
  664                 goto err1;
  665 
  666         rc = compress_buff(&scratch_buff, dbg_buff);
  667 
  668 err1:
  669         release_scratch_buff(&scratch_buff, dbg_buff);
  670 err:
  671         return rc;
  672 }
  673 
  674 static int collect_ulptx_la(struct cudbg_init *pdbg_init,
  675                             struct cudbg_buffer *dbg_buff,
  676                             struct cudbg_error *cudbg_err)
  677 {
  678         struct adapter *padap = pdbg_init->adap;
  679         struct cudbg_buffer scratch_buff;
  680         struct struct_ulptx_la *ulptx_la_buff;
  681         u32 size, i, j;
  682         int rc = 0;
  683 
  684         size = sizeof(struct struct_ulptx_la);
  685 
  686         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
  687         if (rc)
  688                 goto err;
  689 
  690         ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
  691 
  692         for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
  693                 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
  694                                                       A_ULP_TX_LA_RDPTR_0 +
  695                                                       0x10 * i);
  696                 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
  697                                                       A_ULP_TX_LA_WRPTR_0 +
  698                                                       0x10 * i);
  699                 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
  700                                                        A_ULP_TX_LA_RDDATA_0 +
  701                                                        0x10 * i);
  702                 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
  703                         ulptx_la_buff->rd_data[i][j] =
  704                                 t4_read_reg(padap,
  705                                             A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
  706                 }
  707         }
  708 
  709         rc = write_compression_hdr(&scratch_buff, dbg_buff);
  710         if (rc)
  711                 goto err1;
  712 
  713         rc = compress_buff(&scratch_buff, dbg_buff);
  714 
  715 err1:
  716         release_scratch_buff(&scratch_buff, dbg_buff);
  717 err:
  718         return rc;
  719 
  720 }
  721 
  722 static int collect_ulprx_la(struct cudbg_init *pdbg_init,
  723                             struct cudbg_buffer *dbg_buff,
  724                             struct cudbg_error *cudbg_err)
  725 {
  726         struct adapter *padap = pdbg_init->adap;
  727         struct cudbg_buffer scratch_buff;
  728         struct struct_ulprx_la *ulprx_la_buff;
  729         u32 size;
  730         int rc = 0;
  731 
  732         size = sizeof(struct struct_ulprx_la);
  733 
  734         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
  735         if (rc)
  736                 goto err;
  737 
  738         ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
  739         t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
  740         ulprx_la_buff->size = ULPRX_LA_SIZE;
  741 
  742         rc = write_compression_hdr(&scratch_buff, dbg_buff);
  743         if (rc)
  744                 goto err1;
  745 
  746         rc = compress_buff(&scratch_buff, dbg_buff);
  747 
  748 err1:
  749         release_scratch_buff(&scratch_buff, dbg_buff);
  750 err:
  751         return rc;
  752 }
  753 
  754 static int collect_cpl_stats(struct cudbg_init *pdbg_init,
  755                              struct cudbg_buffer *dbg_buff,
  756                              struct cudbg_error *cudbg_err)
  757 {
  758         struct adapter *padap = pdbg_init->adap;
  759         struct cudbg_buffer scratch_buff;
  760         struct struct_tp_cpl_stats *tp_cpl_stats_buff;
  761         u32 size;
  762         int rc = 0;
  763 
  764         size = sizeof(struct struct_tp_cpl_stats);
  765 
  766         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
  767         if (rc)
  768                 goto err;
  769 
  770         tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
  771         tp_cpl_stats_buff->nchan = padap->chip_params->nchan;
  772 
  773         /* spin_lock(&padap->stats_lock);       TODO*/
  774         t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
  775         /* spin_unlock(&padap->stats_lock);     TODO*/
  776 
  777         rc = write_compression_hdr(&scratch_buff, dbg_buff);
  778         if (rc)
  779                 goto err1;
  780 
  781         rc = compress_buff(&scratch_buff, dbg_buff);
  782 
  783 err1:
  784         release_scratch_buff(&scratch_buff, dbg_buff);
  785 err:
  786         return rc;
  787 }
  788 
  789 static int collect_wc_stats(struct cudbg_init *pdbg_init,
  790                             struct cudbg_buffer *dbg_buff,
  791                             struct cudbg_error *cudbg_err)
  792 {
  793         struct adapter *padap = pdbg_init->adap;
  794         struct cudbg_buffer scratch_buff;
  795         struct struct_wc_stats *wc_stats_buff;
  796         u32 val1;
  797         u32 val2;
  798         u32 size;
  799 
  800         int rc = 0;
  801 
  802         size = sizeof(struct struct_wc_stats);
  803 
  804         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
  805         if (rc)
  806                 goto err;
  807 
  808         wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
  809 
  810         if (!is_t4(padap)) {
  811                 val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
  812                 val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
  813                 wc_stats_buff->wr_cl_success = val1 - val2;
  814                 wc_stats_buff->wr_cl_fail = val2;
  815         } else {
  816                 wc_stats_buff->wr_cl_success = 0;
  817                 wc_stats_buff->wr_cl_fail = 0;
  818         }
  819 
  820         rc = write_compression_hdr(&scratch_buff, dbg_buff);
  821         if (rc)
  822                 goto err1;
  823 
  824         rc = compress_buff(&scratch_buff, dbg_buff);
  825 err1:
  826         release_scratch_buff(&scratch_buff, dbg_buff);
  827 err:
  828         return rc;
  829 }
  830 
  831 static int mem_desc_cmp(const void *a, const void *b)
  832 {
  833         return ((const struct struct_mem_desc *)a)->base -
  834                 ((const struct struct_mem_desc *)b)->base;
  835 }
  836 
  837 static int fill_meminfo(struct adapter *padap,
  838                         struct struct_meminfo *meminfo_buff)
  839 {
  840         struct struct_mem_desc *md;
  841         u32 size, lo, hi;
  842         u32 used, alloc;
  843         int n, i, rc = 0;
  844 
  845         size = sizeof(struct struct_meminfo);
  846 
  847         memset(meminfo_buff->avail, 0,
  848                ARRAY_SIZE(meminfo_buff->avail) *
  849                sizeof(struct struct_mem_desc));
  850         memset(meminfo_buff->mem, 0,
  851                (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
  852         md  = meminfo_buff->mem;
  853 
  854         for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
  855                 meminfo_buff->mem[i].limit = 0;
  856                 meminfo_buff->mem[i].idx = i;
  857         }
  858 
  859         i = 0;
  860 
  861         lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
  862 
  863         if (lo & F_EDRAM0_ENABLE) {
  864                 hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
  865                 meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
  866                 meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
  867                                                (G_EDRAM0_SIZE(hi) << 20);
  868                 meminfo_buff->avail[i].idx = 0;
  869                 i++;
  870         }
  871 
  872         if (lo & F_EDRAM1_ENABLE) {
  873                 hi =  t4_read_reg(padap, A_MA_EDRAM1_BAR);
  874                 meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
  875                 meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
  876                                                (G_EDRAM1_SIZE(hi) << 20);
  877                 meminfo_buff->avail[i].idx = 1;
  878                 i++;
  879         }
  880 
  881         if (is_t5(padap)) {
  882                 if (lo & F_EXT_MEM0_ENABLE) {
  883                         hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
  884                         meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
  885                         meminfo_buff->avail[i].limit =
  886                                 meminfo_buff->avail[i].base +
  887                                 (G_EXT_MEM_SIZE(hi) << 20);
  888                         meminfo_buff->avail[i].idx = 3;
  889                         i++;
  890                 }
  891 
  892                 if (lo & F_EXT_MEM1_ENABLE) {
  893                         hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
  894                         meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
  895                         meminfo_buff->avail[i].limit =
  896                                 meminfo_buff->avail[i].base +
  897                                 (G_EXT_MEM1_SIZE(hi) << 20);
  898                         meminfo_buff->avail[i].idx = 4;
  899                         i++;
  900                 }
  901         } else if (is_t6(padap)) {
  902                 if (lo & F_EXT_MEM_ENABLE) {
  903                         hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
  904                         meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
  905                         meminfo_buff->avail[i].limit =
  906                                 meminfo_buff->avail[i].base +
  907                                 (G_EXT_MEM_SIZE(hi) << 20);
  908                         meminfo_buff->avail[i].idx = 2;
  909                         i++;
  910                 }
  911         }
  912 
  913         if (!i) {                                  /* no memory available */
  914                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
  915                 goto err;
  916         }
  917 
  918         meminfo_buff->avail_c = i;
  919         qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
  920             mem_desc_cmp);
  921         (md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
  922         (md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
  923         (md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
  924         (md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
  925         (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
  926         (md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
  927         (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
  928         (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
  929         (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
  930 
  931         /* the next few have explicit upper bounds */
  932         md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
  933         md->limit = md->base - 1 +
  934                     t4_read_reg(padap,
  935                                 A_TP_PMM_TX_PAGE_SIZE) *
  936                                 G_PMTXMAXPAGE(t4_read_reg(padap,
  937                                                           A_TP_PMM_TX_MAX_PAGE)
  938                                              );
  939         md++;
  940 
  941         md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
  942         md->limit = md->base - 1 +
  943                     t4_read_reg(padap,
  944                                 A_TP_PMM_RX_PAGE_SIZE) *
  945                                 G_PMRXMAXPAGE(t4_read_reg(padap,
  946                                                           A_TP_PMM_RX_MAX_PAGE)
  947                                               );
  948         md++;
  949         if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
  950                 if (chip_id(padap) <= CHELSIO_T5) {
  951                         hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
  952                         md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
  953                 } else {
  954                         hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
  955                         md->base = t4_read_reg(padap,
  956                                                A_LE_DB_HASH_TBL_BASE_ADDR);
  957                 }
  958                 md->limit = 0;
  959         } else {
  960                 md->base = 0;
  961                 md->idx = ARRAY_SIZE(region);  /* hide it */
  962         }
  963         md++;
  964 #define ulp_region(reg) \
  965         {\
  966                 md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
  967                 (md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
  968         }
  969 
  970         ulp_region(RX_ISCSI);
  971         ulp_region(RX_TDDP);
  972         ulp_region(TX_TPT);
  973         ulp_region(RX_STAG);
  974         ulp_region(RX_RQ);
  975         ulp_region(RX_RQUDP);
  976         ulp_region(RX_PBL);
  977         ulp_region(TX_PBL);
  978 #undef ulp_region
  979         md->base = 0;
  980         md->idx = ARRAY_SIZE(region);
  981         if (!is_t4(padap)) {
  982                 u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
  983                 u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
  984                 if (is_t5(padap)) {
  985                         if (sge_ctrl & F_VFIFO_ENABLE)
  986                                 size = G_DBVFIFO_SIZE(fifo_size);
  987                 } else
  988                         size = G_T6_DBVFIFO_SIZE(fifo_size);
  989 
  990                 if (size) {
  991                         md->base = G_BASEADDR(t4_read_reg(padap,
  992                                                           A_SGE_DBVFIFO_BADDR));
  993                         md->limit = md->base + (size << 2) - 1;
  994                 }
  995         }
  996 
  997         md++;
  998 
  999         md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
 1000         md->limit = 0;
 1001         md++;
 1002         md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
 1003         md->limit = 0;
 1004         md++;
 1005 #ifndef __NO_DRIVER_OCQ_SUPPORT__
 1006         /*md->base = padap->vres.ocq.start;*/
 1007         /*if (adap->vres.ocq.size)*/
 1008         /*        md->limit = md->base + adap->vres.ocq.size - 1;*/
 1009         /*else*/
 1010         md->idx = ARRAY_SIZE(region);  /* hide it */
 1011         md++;
 1012 #endif
 1013 
 1014         /* add any address-space holes, there can be up to 3 */
 1015         for (n = 0; n < i - 1; n++)
 1016                 if (meminfo_buff->avail[n].limit <
 1017                     meminfo_buff->avail[n + 1].base)
 1018                         (md++)->base = meminfo_buff->avail[n].limit;
 1019 
 1020         if (meminfo_buff->avail[n].limit)
 1021                 (md++)->base = meminfo_buff->avail[n].limit;
 1022 
 1023         n = (int) (md - meminfo_buff->mem);
 1024         meminfo_buff->mem_c = n;
 1025 
 1026         qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
 1027             mem_desc_cmp);
 1028 
 1029         lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
 1030         hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
 1031         meminfo_buff->up_ram_lo = lo;
 1032         meminfo_buff->up_ram_hi = hi;
 1033 
 1034         lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
 1035         hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
 1036         meminfo_buff->up_extmem2_lo = lo;
 1037         meminfo_buff->up_extmem2_hi = hi;
 1038 
 1039         lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
 1040         meminfo_buff->rx_pages_data[0] =  G_PMRXMAXPAGE(lo);
 1041         meminfo_buff->rx_pages_data[1] =
 1042                 t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
 1043         meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
 1044 
 1045         lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
 1046         hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
 1047         meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
 1048         meminfo_buff->tx_pages_data[1] =
 1049                 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
 1050         meminfo_buff->tx_pages_data[2] =
 1051                 hi >= (1 << 20) ? 'M' : 'K';
 1052         meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
 1053 
 1054         for (i = 0; i < 4; i++) {
 1055                 if (chip_id(padap) > CHELSIO_T5)
 1056                         lo = t4_read_reg(padap,
 1057                                          A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
 1058                 else
 1059                         lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
 1060                 if (is_t5(padap)) {
 1061                         used = G_T5_USED(lo);
 1062                         alloc = G_T5_ALLOC(lo);
 1063                 } else {
 1064                         used = G_USED(lo);
 1065                         alloc = G_ALLOC(lo);
 1066                 }
 1067                 meminfo_buff->port_used[i] = used;
 1068                 meminfo_buff->port_alloc[i] = alloc;
 1069         }
 1070 
 1071         for (i = 0; i < padap->chip_params->nchan; i++) {
 1072                 if (chip_id(padap) > CHELSIO_T5)
 1073                         lo = t4_read_reg(padap,
 1074                                          A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
 1075                 else
 1076                         lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
 1077                 if (is_t5(padap)) {
 1078                         used = G_T5_USED(lo);
 1079                         alloc = G_T5_ALLOC(lo);
 1080                 } else {
 1081                         used = G_USED(lo);
 1082                         alloc = G_ALLOC(lo);
 1083                 }
 1084                 meminfo_buff->loopback_used[i] = used;
 1085                 meminfo_buff->loopback_alloc[i] = alloc;
 1086         }
 1087 err:
 1088         return rc;
 1089 }
 1090 
 1091 static int collect_meminfo(struct cudbg_init *pdbg_init,
 1092                            struct cudbg_buffer *dbg_buff,
 1093                            struct cudbg_error *cudbg_err)
 1094 {
 1095         struct adapter *padap = pdbg_init->adap;
 1096         struct cudbg_buffer scratch_buff;
 1097         struct struct_meminfo *meminfo_buff;
 1098         int rc = 0;
 1099         u32 size;
 1100 
 1101         size = sizeof(struct struct_meminfo);
 1102 
 1103         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1104         if (rc)
 1105                 goto err;
 1106 
 1107         meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
 1108 
 1109         rc = fill_meminfo(padap, meminfo_buff);
 1110         if (rc)
 1111                 goto err;
 1112 
 1113         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1114         if (rc)
 1115                 goto err1;
 1116 
 1117         rc = compress_buff(&scratch_buff, dbg_buff);
 1118 err1:
 1119         release_scratch_buff(&scratch_buff, dbg_buff);
 1120 err:
 1121         return rc;
 1122 }
 1123 
 1124 static int collect_lb_stats(struct cudbg_init *pdbg_init,
 1125                             struct cudbg_buffer *dbg_buff,
 1126                             struct cudbg_error *cudbg_err)
 1127 {
 1128         struct adapter *padap = pdbg_init->adap;
 1129         struct cudbg_buffer scratch_buff;
 1130         struct lb_port_stats *tmp_stats;
 1131         struct struct_lb_stats *lb_stats_buff;
 1132         u32 i, n, size;
 1133         int rc = 0;
 1134 
 1135         rc = padap->params.nports;
 1136         if (rc < 0)
 1137                 goto err;
 1138 
 1139         n = rc;
 1140         size = sizeof(struct struct_lb_stats) +
 1141                n * sizeof(struct lb_port_stats);
 1142 
 1143         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1144         if (rc)
 1145                 goto err;
 1146 
 1147         lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
 1148 
 1149         lb_stats_buff->nchan = n;
 1150         tmp_stats = lb_stats_buff->s;
 1151 
 1152         for (i = 0; i < n; i += 2, tmp_stats += 2) {
 1153                 t4_get_lb_stats(padap, i, tmp_stats);
 1154                 t4_get_lb_stats(padap, i + 1, tmp_stats+1);
 1155         }
 1156 
 1157         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1158         if (rc)
 1159                 goto err1;
 1160 
 1161         rc = compress_buff(&scratch_buff, dbg_buff);
 1162 err1:
 1163         release_scratch_buff(&scratch_buff, dbg_buff);
 1164 err:
 1165         return rc;
 1166 }
 1167 
 1168 static int collect_rdma_stats(struct cudbg_init *pdbg_init,
 1169                               struct cudbg_buffer *dbg_buff,
 1170                               struct cudbg_error *cudbg_er)
 1171 {
 1172         struct adapter *padap = pdbg_init->adap;
 1173         struct cudbg_buffer scratch_buff;
 1174         struct tp_rdma_stats *rdma_stats_buff;
 1175         u32 size;
 1176         int rc = 0;
 1177 
 1178         size = sizeof(struct tp_rdma_stats);
 1179 
 1180         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1181         if (rc)
 1182                 goto err;
 1183 
 1184         rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
 1185 
 1186         /* spin_lock(&padap->stats_lock);       TODO*/
 1187         t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
 1188         /* spin_unlock(&padap->stats_lock);     TODO*/
 1189 
 1190         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1191         if (rc)
 1192                 goto err1;
 1193 
 1194         rc = compress_buff(&scratch_buff, dbg_buff);
 1195 err1:
 1196         release_scratch_buff(&scratch_buff, dbg_buff);
 1197 err:
 1198         return rc;
 1199 }
 1200 
 1201 static int collect_clk_info(struct cudbg_init *pdbg_init,
 1202                             struct cudbg_buffer *dbg_buff,
 1203                             struct cudbg_error *cudbg_err)
 1204 {
 1205         struct cudbg_buffer scratch_buff;
 1206         struct adapter *padap = pdbg_init->adap;
 1207         struct struct_clk_info *clk_info_buff;
 1208         u64 tp_tick_us;
 1209         int size;
 1210         int rc = 0;
 1211 
 1212         if (!padap->params.vpd.cclk) {
 1213                 rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
 1214                 goto err;
 1215         }
 1216 
 1217         size = sizeof(struct struct_clk_info);
 1218         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1219         if (rc)
 1220                 goto err;
 1221 
 1222         clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
 1223 
 1224         clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;  /* in ps
 1225         */
 1226         clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
 1227         clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
 1228         clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
 1229         tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
 1230         /* in us */
 1231         clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
 1232                                       clk_info_buff->dack_re) / 1000000) *
 1233                                      t4_read_reg(padap, A_TP_DACK_TIMER);
 1234 
 1235         clk_info_buff->retransmit_min =
 1236                 tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
 1237         clk_info_buff->retransmit_max =
 1238                 tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
 1239 
 1240         clk_info_buff->persist_timer_min =
 1241                 tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
 1242         clk_info_buff->persist_timer_max =
 1243                 tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
 1244 
 1245         clk_info_buff->keepalive_idle_timer =
 1246                 tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
 1247         clk_info_buff->keepalive_interval =
 1248                 tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
 1249 
 1250         clk_info_buff->initial_srtt =
 1251                 tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
 1252         clk_info_buff->finwait2_timer =
 1253                 tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
 1254 
 1255         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1256 
 1257         if (rc)
 1258                 goto err1;
 1259 
 1260         rc = compress_buff(&scratch_buff, dbg_buff);
 1261 err1:
 1262         release_scratch_buff(&scratch_buff, dbg_buff);
 1263 err:
 1264         return rc;
 1265 
 1266 }
 1267 
 1268 static int collect_macstats(struct cudbg_init *pdbg_init,
 1269                             struct cudbg_buffer *dbg_buff,
 1270                             struct cudbg_error *cudbg_err)
 1271 {
 1272         struct adapter *padap = pdbg_init->adap;
 1273         struct cudbg_buffer scratch_buff;
 1274         struct struct_mac_stats_rev1 *mac_stats_buff;
 1275         u32 i, n, size;
 1276         int rc = 0;
 1277 
 1278         rc = padap->params.nports;
 1279         if (rc < 0)
 1280                 goto err;
 1281 
 1282         n = rc;
 1283         size = sizeof(struct struct_mac_stats_rev1);
 1284 
 1285         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1286         if (rc)
 1287                 goto err;
 1288 
 1289         mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
 1290 
 1291         mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
 1292         mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
 1293         mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
 1294                                        sizeof(struct cudbg_ver_hdr);
 1295 
 1296         mac_stats_buff->port_count = n;
 1297         for (i = 0; i <  mac_stats_buff->port_count; i++)
 1298                 t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
 1299 
 1300         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1301         if (rc)
 1302                 goto err1;
 1303 
 1304         rc = compress_buff(&scratch_buff, dbg_buff);
 1305 err1:
 1306         release_scratch_buff(&scratch_buff, dbg_buff);
 1307 err:
 1308         return rc;
 1309 }
 1310 
 1311 static int collect_cim_pif_la(struct cudbg_init *pdbg_init,
 1312                               struct cudbg_buffer *dbg_buff,
 1313                               struct cudbg_error *cudbg_err)
 1314 {
 1315         struct adapter *padap = pdbg_init->adap;
 1316         struct cudbg_buffer scratch_buff;
 1317         struct cim_pif_la *cim_pif_la_buff;
 1318         u32 size;
 1319         int rc = 0;
 1320 
 1321         size = sizeof(struct cim_pif_la) +
 1322                2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
 1323 
 1324         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1325         if (rc)
 1326                 goto err;
 1327 
 1328         cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
 1329         cim_pif_la_buff->size = CIM_PIFLA_SIZE;
 1330 
 1331         t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
 1332                            (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
 1333                            NULL, NULL);
 1334 
 1335         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1336         if (rc)
 1337                 goto err1;
 1338 
 1339         rc = compress_buff(&scratch_buff, dbg_buff);
 1340 err1:
 1341         release_scratch_buff(&scratch_buff, dbg_buff);
 1342 err:
 1343         return rc;
 1344 }
 1345 
 1346 static int collect_tp_la(struct cudbg_init *pdbg_init,
 1347                          struct cudbg_buffer *dbg_buff,
 1348                          struct cudbg_error *cudbg_err)
 1349 {
 1350         struct adapter *padap = pdbg_init->adap;
 1351         struct cudbg_buffer scratch_buff;
 1352         struct struct_tp_la *tp_la_buff;
 1353         u32 size;
 1354         int rc = 0;
 1355 
 1356         size = sizeof(struct struct_tp_la) + TPLA_SIZE *  sizeof(u64);
 1357 
 1358         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1359         if (rc)
 1360                 goto err;
 1361 
 1362         tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
 1363 
 1364         tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
 1365         t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
 1366 
 1367         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1368         if (rc)
 1369                 goto err1;
 1370 
 1371         rc = compress_buff(&scratch_buff, dbg_buff);
 1372 err1:
 1373         release_scratch_buff(&scratch_buff, dbg_buff);
 1374 err:
 1375         return rc;
 1376 }
 1377 
 1378 static int collect_fcoe_stats(struct cudbg_init *pdbg_init,
 1379                               struct cudbg_buffer *dbg_buff,
 1380                               struct cudbg_error *cudbg_err)
 1381 {
 1382         struct adapter *padap = pdbg_init->adap;
 1383         struct cudbg_buffer scratch_buff;
 1384         struct struct_tp_fcoe_stats  *tp_fcoe_stats_buff;
 1385         u32 size;
 1386         int rc = 0;
 1387 
 1388         size = sizeof(struct struct_tp_fcoe_stats);
 1389 
 1390         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1391         if (rc)
 1392                 goto err;
 1393 
 1394         tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
 1395 
 1396         /* spin_lock(&padap->stats_lock);       TODO*/
 1397         t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
 1398         t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
 1399         if (padap->chip_params->nchan == NCHAN) {
 1400                 t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
 1401                 t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
 1402         }
 1403         /* spin_unlock(&padap->stats_lock);     TODO*/
 1404 
 1405         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1406         if (rc)
 1407                 goto err1;
 1408 
 1409         rc = compress_buff(&scratch_buff, dbg_buff);
 1410 err1:
 1411         release_scratch_buff(&scratch_buff, dbg_buff);
 1412 err:
 1413         return rc;
 1414 }
 1415 
 1416 static int collect_tp_err_stats(struct cudbg_init *pdbg_init,
 1417                                 struct cudbg_buffer *dbg_buff,
 1418                                 struct cudbg_error *cudbg_err)
 1419 {
 1420         struct adapter *padap = pdbg_init->adap;
 1421         struct cudbg_buffer scratch_buff;
 1422         struct struct_tp_err_stats *tp_err_stats_buff;
 1423         u32 size;
 1424         int rc = 0;
 1425 
 1426         size = sizeof(struct struct_tp_err_stats);
 1427 
 1428         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1429         if (rc)
 1430                 goto err;
 1431 
 1432         tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
 1433 
 1434         /* spin_lock(&padap->stats_lock);       TODO*/
 1435         t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
 1436         /* spin_unlock(&padap->stats_lock);     TODO*/
 1437         tp_err_stats_buff->nchan = padap->chip_params->nchan;
 1438 
 1439         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1440         if (rc)
 1441                 goto err1;
 1442 
 1443         rc = compress_buff(&scratch_buff, dbg_buff);
 1444 err1:
 1445         release_scratch_buff(&scratch_buff, dbg_buff);
 1446 err:
 1447         return rc;
 1448 }
 1449 
 1450 static int collect_tcp_stats(struct cudbg_init *pdbg_init,
 1451                              struct cudbg_buffer *dbg_buff,
 1452                              struct cudbg_error *cudbg_err)
 1453 {
 1454         struct adapter *padap = pdbg_init->adap;
 1455         struct cudbg_buffer scratch_buff;
 1456         struct struct_tcp_stats *tcp_stats_buff;
 1457         u32 size;
 1458         int rc = 0;
 1459 
 1460         size = sizeof(struct struct_tcp_stats);
 1461 
 1462         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1463         if (rc)
 1464                 goto err;
 1465 
 1466         tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
 1467 
 1468         /* spin_lock(&padap->stats_lock);       TODO*/
 1469         t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
 1470         /* spin_unlock(&padap->stats_lock);     TODO*/
 1471 
 1472         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1473         if (rc)
 1474                 goto err1;
 1475 
 1476         rc = compress_buff(&scratch_buff, dbg_buff);
 1477 err1:
 1478         release_scratch_buff(&scratch_buff, dbg_buff);
 1479 err:
 1480         return rc;
 1481 }
 1482 
 1483 static int collect_hw_sched(struct cudbg_init *pdbg_init,
 1484                             struct cudbg_buffer *dbg_buff,
 1485                             struct cudbg_error *cudbg_err)
 1486 {
 1487         struct adapter *padap = pdbg_init->adap;
 1488         struct cudbg_buffer scratch_buff;
 1489         struct struct_hw_sched *hw_sched_buff;
 1490         u32 size;
 1491         int i, rc = 0;
 1492 
 1493         if (!padap->params.vpd.cclk) {
 1494                 rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
 1495                 goto err;
 1496         }
 1497 
 1498         size = sizeof(struct struct_hw_sched);
 1499         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1500         if (rc)
 1501                 goto err;
 1502 
 1503         hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
 1504 
 1505         hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
 1506         hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
 1507         t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
 1508 
 1509         for (i = 0; i < NTX_SCHED; ++i) {
 1510                 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
 1511                     &hw_sched_buff->ipg[i], 1);
 1512         }
 1513 
 1514         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1515         if (rc)
 1516                 goto err1;
 1517 
 1518         rc = compress_buff(&scratch_buff, dbg_buff);
 1519 err1:
 1520         release_scratch_buff(&scratch_buff, dbg_buff);
 1521 err:
 1522         return rc;
 1523 }
 1524 
 1525 static int collect_pm_stats(struct cudbg_init *pdbg_init,
 1526                             struct cudbg_buffer *dbg_buff,
 1527                             struct cudbg_error *cudbg_err)
 1528 {
 1529         struct adapter *padap = pdbg_init->adap;
 1530         struct cudbg_buffer scratch_buff;
 1531         struct struct_pm_stats *pm_stats_buff;
 1532         u32 size;
 1533         int rc = 0;
 1534 
 1535         size = sizeof(struct struct_pm_stats);
 1536 
 1537         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1538         if (rc)
 1539                 goto err;
 1540 
 1541         pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
 1542 
 1543         t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
 1544         t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
 1545 
 1546         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1547         if (rc)
 1548                 goto err1;
 1549 
 1550         rc = compress_buff(&scratch_buff, dbg_buff);
 1551 err1:
 1552         release_scratch_buff(&scratch_buff, dbg_buff);
 1553 err:
 1554         return rc;
 1555 }
 1556 
 1557 static int collect_path_mtu(struct cudbg_init *pdbg_init,
 1558                             struct cudbg_buffer *dbg_buff,
 1559                             struct cudbg_error *cudbg_err)
 1560 {
 1561         struct adapter *padap = pdbg_init->adap;
 1562         struct cudbg_buffer scratch_buff;
 1563         u32 size;
 1564         int rc = 0;
 1565 
 1566         size = NMTUS  * sizeof(u16);
 1567 
 1568         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1569         if (rc)
 1570                 goto err;
 1571 
 1572         t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
 1573 
 1574         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1575         if (rc)
 1576                 goto err1;
 1577 
 1578         rc = compress_buff(&scratch_buff, dbg_buff);
 1579 err1:
 1580         release_scratch_buff(&scratch_buff, dbg_buff);
 1581 err:
 1582         return rc;
 1583 }
 1584 
 1585 static int collect_rss_key(struct cudbg_init *pdbg_init,
 1586                            struct cudbg_buffer *dbg_buff,
 1587                            struct cudbg_error *cudbg_err)
 1588 {
 1589         struct adapter *padap = pdbg_init->adap;
 1590         struct cudbg_buffer scratch_buff;
 1591         u32 size;
 1592 
 1593         int rc = 0;
 1594 
 1595         size = 10  * sizeof(u32);
 1596         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1597         if (rc)
 1598                 goto err;
 1599 
 1600         t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
 1601 
 1602         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1603         if (rc)
 1604                 goto err1;
 1605 
 1606         rc = compress_buff(&scratch_buff, dbg_buff);
 1607 err1:
 1608         release_scratch_buff(&scratch_buff, dbg_buff);
 1609 err:
 1610         return rc;
 1611 }
 1612 
 1613 static int collect_rss_config(struct cudbg_init *pdbg_init,
 1614                               struct cudbg_buffer *dbg_buff,
 1615                               struct cudbg_error *cudbg_err)
 1616 {
 1617         struct adapter *padap = pdbg_init->adap;
 1618         struct cudbg_buffer scratch_buff;
 1619         struct rss_config *rss_conf;
 1620         int rc;
 1621         u32 size;
 1622 
 1623         size = sizeof(struct rss_config);
 1624 
 1625         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1626         if (rc)
 1627                 goto err;
 1628 
 1629         rss_conf =  (struct rss_config *)scratch_buff.data;
 1630 
 1631         rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
 1632         rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
 1633         rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
 1634         rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
 1635         rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
 1636         rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
 1637         rss_conf->chip = padap->params.chipid;
 1638 
 1639         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1640         if (rc)
 1641                 goto err1;
 1642 
 1643         rc = compress_buff(&scratch_buff, dbg_buff);
 1644 
 1645 err1:
 1646         release_scratch_buff(&scratch_buff, dbg_buff);
 1647 err:
 1648         return rc;
 1649 }
 1650 
 1651 static int collect_rss_vf_config(struct cudbg_init *pdbg_init,
 1652                                  struct cudbg_buffer *dbg_buff,
 1653                                  struct cudbg_error *cudbg_err)
 1654 {
 1655         struct adapter *padap = pdbg_init->adap;
 1656         struct cudbg_buffer scratch_buff;
 1657         struct rss_vf_conf *vfconf;
 1658         int vf, rc, vf_count;
 1659         u32 size;
 1660 
 1661         vf_count = padap->chip_params->vfcount;
 1662         size = vf_count * sizeof(*vfconf);
 1663 
 1664         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1665         if (rc)
 1666                 goto err;
 1667 
 1668         vfconf =  (struct rss_vf_conf *)scratch_buff.data;
 1669 
 1670         for (vf = 0; vf < vf_count; vf++) {
 1671                 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
 1672                                       &vfconf[vf].rss_vf_vfh, 1);
 1673         }
 1674 
 1675         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1676         if (rc)
 1677                 goto err1;
 1678 
 1679         rc = compress_buff(&scratch_buff, dbg_buff);
 1680 
 1681 err1:
 1682         release_scratch_buff(&scratch_buff, dbg_buff);
 1683 err:
 1684         return rc;
 1685 }
 1686 
 1687 static int collect_rss_pf_config(struct cudbg_init *pdbg_init,
 1688                                  struct cudbg_buffer *dbg_buff,
 1689                                  struct cudbg_error *cudbg_err)
 1690 {
 1691         struct cudbg_buffer scratch_buff;
 1692         struct rss_pf_conf *pfconf;
 1693         struct adapter *padap = pdbg_init->adap;
 1694         u32 rss_pf_map, rss_pf_mask, size;
 1695         int pf, rc;
 1696 
 1697         size = 8  * sizeof(*pfconf);
 1698 
 1699         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1700         if (rc)
 1701                 goto err;
 1702 
 1703         pfconf =  (struct rss_pf_conf *)scratch_buff.data;
 1704 
 1705         rss_pf_map = t4_read_rss_pf_map(padap, 1);
 1706         rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
 1707 
 1708         for (pf = 0; pf < 8; pf++) {
 1709                 pfconf[pf].rss_pf_map = rss_pf_map;
 1710                 pfconf[pf].rss_pf_mask = rss_pf_mask;
 1711                 /* no return val */
 1712                 t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
 1713         }
 1714 
 1715         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1716         if (rc)
 1717                 goto err1;
 1718 
 1719         rc = compress_buff(&scratch_buff, dbg_buff);
 1720 err1:
 1721         release_scratch_buff(&scratch_buff, dbg_buff);
 1722 err:
 1723         return rc;
 1724 }
 1725 
 1726 static int check_valid(u32 *buf, int type)
 1727 {
 1728         int index;
 1729         int bit;
 1730         int bit_pos = 0;
 1731 
 1732         switch (type) {
 1733         case CTXT_EGRESS:
 1734                 bit_pos = 176;
 1735                 break;
 1736         case CTXT_INGRESS:
 1737                 bit_pos = 141;
 1738                 break;
 1739         case CTXT_FLM:
 1740                 bit_pos = 89;
 1741                 break;
 1742         }
 1743         index = bit_pos / 32;
 1744         bit =  bit_pos % 32;
 1745 
 1746         return buf[index] & (1U << bit);
 1747 }
 1748 
 1749 /**
 1750  * Get EGRESS, INGRESS, FLM, and CNM max qid.
 1751  *
 1752  * For EGRESS and INGRESS, do the following calculation.
 1753  * max_qid = (DBQ/IMSG context region size in bytes) /
 1754  *           (size of context in bytes).
 1755  *
 1756  * For FLM, do the following calculation.
 1757  * max_qid = (FLM cache region size in bytes) /
 1758  *           ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
 1759  *
 1760  * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
 1761  * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
 1762  * splitting is enabled, then max CNM qid is half of max FLM qid.
 1763  */
 1764 static int get_max_ctxt_qid(struct adapter *padap,
 1765                             struct struct_meminfo *meminfo,
 1766                             u32 *max_ctx_qid, u8 nelem)
 1767 {
 1768         u32 i, idx, found = 0;
 1769 
 1770         if (nelem != (CTXT_CNM + 1))
 1771                 return -EINVAL;
 1772 
 1773         for (i = 0; i < meminfo->mem_c; i++) {
 1774                 if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
 1775                         continue;                        /* skip holes */
 1776 
 1777                 idx = meminfo->mem[i].idx;
 1778                 /* Get DBQ, IMSG, and FLM context region size */
 1779                 if (idx <= CTXT_FLM) {
 1780                         if (!(meminfo->mem[i].limit))
 1781                                 meminfo->mem[i].limit =
 1782                                         i < meminfo->mem_c - 1 ?
 1783                                         meminfo->mem[i + 1].base - 1 : ~0;
 1784 
 1785                         if (idx < CTXT_FLM) {
 1786                                 /* Get EGRESS and INGRESS max qid. */
 1787                                 max_ctx_qid[idx] = (meminfo->mem[i].limit -
 1788                                                     meminfo->mem[i].base + 1) /
 1789                                                    CUDBG_CTXT_SIZE_BYTES;
 1790                                 found++;
 1791                         } else {
 1792                                 /* Get FLM and CNM max qid. */
 1793                                 u32 value, edram_ptr_count;
 1794                                 u8 bytes_per_ptr = 8;
 1795                                 u8 nohdr;
 1796 
 1797                                 value = t4_read_reg(padap, A_SGE_FLM_CFG);
 1798 
 1799                                 /* Check if header splitting is enabled. */
 1800                                 nohdr = (value >> S_NOHDR) & 1U;
 1801 
 1802                                 /* Get the number of pointers in EDRAM per
 1803                                  * qid in units of 32.
 1804                                  */
 1805                                 edram_ptr_count = 32 *
 1806                                                   (1U << G_EDRAMPTRCNT(value));
 1807 
 1808                                 /* EDRAMPTRCNT value of 3 is reserved.
 1809                                  * So don't exceed 128.
 1810                                  */
 1811                                 if (edram_ptr_count > 128)
 1812                                         edram_ptr_count = 128;
 1813 
 1814                                 max_ctx_qid[idx] = (meminfo->mem[i].limit -
 1815                                                     meminfo->mem[i].base + 1) /
 1816                                                    (edram_ptr_count *
 1817                                                     bytes_per_ptr);
 1818                                 found++;
 1819 
 1820                                 /* CNM has 1-to-1 mapping with FLM.
 1821                                  * However, if header splitting is enabled,
 1822                                  * then max CNM qid is half of max FLM qid.
 1823                                  */
 1824                                 max_ctx_qid[CTXT_CNM] = nohdr ?
 1825                                                         max_ctx_qid[idx] :
 1826                                                         max_ctx_qid[idx] >> 1;
 1827 
 1828                                 /* One more increment for CNM */
 1829                                 found++;
 1830                         }
 1831                 }
 1832                 if (found == nelem)
 1833                         break;
 1834         }
 1835 
 1836         /* Sanity check. Ensure the values are within known max. */
 1837         max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
 1838                                          M_CTXTQID);
 1839         max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
 1840                                           CUDBG_MAX_INGRESS_QIDS);
 1841         max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
 1842                                       CUDBG_MAX_FL_QIDS);
 1843         max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
 1844                                       CUDBG_MAX_CNM_QIDS);
 1845         return 0;
 1846 }
 1847 
 1848 static int collect_dump_context(struct cudbg_init *pdbg_init,
 1849                                 struct cudbg_buffer *dbg_buff,
 1850                                 struct cudbg_error *cudbg_err)
 1851 {
 1852         struct cudbg_buffer scratch_buff;
 1853         struct cudbg_buffer temp_buff;
 1854         struct adapter *padap = pdbg_init->adap;
 1855         u32 size = 0, next_offset = 0, total_size = 0;
 1856         struct cudbg_ch_cntxt *buff = NULL;
 1857         struct struct_meminfo meminfo;
 1858         int bytes = 0;
 1859         int rc = 0;
 1860         u32 i, j;
 1861         u32 max_ctx_qid[CTXT_CNM + 1];
 1862         bool limit_qid = false;
 1863         u32 qid_count = 0;
 1864 
 1865         rc = fill_meminfo(padap, &meminfo);
 1866         if (rc)
 1867                 goto err;
 1868 
 1869         /* Get max valid qid for each type of queue */
 1870         rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
 1871         if (rc)
 1872                 goto err;
 1873 
 1874         /* There are four types of queues. Collect context upto max
 1875          * qid of each type of queue.
 1876          */
 1877         for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
 1878                 size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
 1879 
 1880         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1881         if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
 1882                 /* Not enough scratch Memory available.
 1883                  * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
 1884                  * for each queue type.
 1885                  */
 1886                 size = 0;
 1887                 for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
 1888                         size += sizeof(struct cudbg_ch_cntxt) *
 1889                                 CUDBG_LOWMEM_MAX_CTXT_QIDS;
 1890 
 1891                 limit_qid = true;
 1892                 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 1893                 if (rc)
 1894                         goto err;
 1895         }
 1896 
 1897         buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
 1898 
 1899         /* Collect context data */
 1900         for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
 1901                 qid_count = 0;
 1902                 for (j = 0; j < max_ctx_qid[i]; j++) {
 1903                         read_sge_ctxt(pdbg_init, j, i, buff->data);
 1904 
 1905                         rc = check_valid(buff->data, i);
 1906                         if (rc) {
 1907                                 buff->cntxt_type = i;
 1908                                 buff->cntxt_id = j;
 1909                                 buff++;
 1910                                 total_size += sizeof(struct cudbg_ch_cntxt);
 1911 
 1912                                 if (i == CTXT_FLM) {
 1913                                         read_sge_ctxt(pdbg_init, j, CTXT_CNM,
 1914                                                       buff->data);
 1915                                         buff->cntxt_type = CTXT_CNM;
 1916                                         buff->cntxt_id = j;
 1917                                         buff++;
 1918                                         total_size +=
 1919                                                 sizeof(struct cudbg_ch_cntxt);
 1920                                 }
 1921                                 qid_count++;
 1922                         }
 1923 
 1924                         /* If there's not enough space to collect more qids,
 1925                          * then bail and move on to next queue type.
 1926                          */
 1927                         if (limit_qid &&
 1928                             qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
 1929                                 break;
 1930                 }
 1931         }
 1932 
 1933         scratch_buff.size = total_size;
 1934         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 1935         if (rc)
 1936                 goto err1;
 1937 
 1938         /* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
 1939         while (total_size > 0) {
 1940                 bytes = min_t(unsigned long, (unsigned long)total_size,
 1941                               (unsigned long)CUDBG_CHUNK_SIZE);
 1942                 temp_buff.size = bytes;
 1943                 temp_buff.data = (void *)((char *)scratch_buff.data +
 1944                                           next_offset);
 1945 
 1946                 rc = compress_buff(&temp_buff, dbg_buff);
 1947                 if (rc)
 1948                         goto err1;
 1949 
 1950                 total_size -= bytes;
 1951                 next_offset += bytes;
 1952         }
 1953 
 1954 err1:
 1955         scratch_buff.size = size;
 1956         release_scratch_buff(&scratch_buff, dbg_buff);
 1957 err:
 1958         return rc;
 1959 }
 1960 
 1961 static int collect_fw_devlog(struct cudbg_init *pdbg_init,
 1962                              struct cudbg_buffer *dbg_buff,
 1963                              struct cudbg_error *cudbg_err)
 1964 {
 1965 #ifdef notyet
 1966         struct adapter *padap = pdbg_init->adap;
 1967         struct devlog_params *dparams = &padap->params.devlog;
 1968         struct cudbg_param *params = NULL;
 1969         struct cudbg_buffer scratch_buff;
 1970         u32 offset;
 1971         int rc = 0, i;
 1972 
 1973         rc = t4_init_devlog_params(padap, 1);
 1974 
 1975         if (rc < 0) {
 1976                 pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
 1977                                  "%d\n", __func__, rc);
 1978                 for (i = 0; i < pdbg_init->dbg_params_cnt; i++) {
 1979                         if (pdbg_init->dbg_params[i].param_type ==
 1980                             CUDBG_DEVLOG_PARAM) {
 1981                                 params = &pdbg_init->dbg_params[i];
 1982                                 break;
 1983                         }
 1984                 }
 1985 
 1986                 if (params) {
 1987                         dparams->memtype = params->u.devlog_param.memtype;
 1988                         dparams->start = params->u.devlog_param.start;
 1989                         dparams->size = params->u.devlog_param.size;
 1990                 } else {
 1991                         cudbg_err->sys_err = rc;
 1992                         goto err;
 1993                 }
 1994         }
 1995 
 1996         rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
 1997 
 1998         if (rc)
 1999                 goto err;
 2000 
 2001         /* Collect FW devlog */
 2002         if (dparams->start != 0) {
 2003                 offset = scratch_buff.offset;
 2004                 rc = t4_memory_rw(padap, padap->params.drv_memwin,
 2005                                   dparams->memtype, dparams->start,
 2006                                   dparams->size,
 2007                                   (__be32 *)((char *)scratch_buff.data +
 2008                                              offset), 1);
 2009 
 2010                 if (rc) {
 2011                         pdbg_init->print("%s(), t4_memory_rw failed!, rc: "\
 2012                                          "%d\n", __func__, rc);
 2013                         cudbg_err->sys_err = rc;
 2014                         goto err1;
 2015                 }
 2016         }
 2017 
 2018         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 2019 
 2020         if (rc)
 2021                 goto err1;
 2022 
 2023         rc = compress_buff(&scratch_buff, dbg_buff);
 2024 
 2025 err1:
 2026         release_scratch_buff(&scratch_buff, dbg_buff);
 2027 err:
 2028         return rc;
 2029 #endif
 2030         return (CUDBG_STATUS_NOT_IMPLEMENTED);
 2031 }
 2032 /* CIM OBQ */
 2033 
 2034 static int collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
 2035                                 struct cudbg_buffer *dbg_buff,
 2036                                 struct cudbg_error *cudbg_err)
 2037 {
 2038         int rc = 0, qid = 0;
 2039 
 2040         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
 2041 
 2042         return rc;
 2043 }
 2044 
 2045 static int collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
 2046                                 struct cudbg_buffer *dbg_buff,
 2047                                 struct cudbg_error *cudbg_err)
 2048 {
 2049         int rc = 0, qid = 1;
 2050 
 2051         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
 2052 
 2053         return rc;
 2054 }
 2055 
 2056 static int collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
 2057                                 struct cudbg_buffer *dbg_buff,
 2058                                 struct cudbg_error *cudbg_err)
 2059 {
 2060         int rc = 0, qid = 2;
 2061 
 2062         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
 2063 
 2064         return rc;
 2065 }
 2066 
 2067 static int collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
 2068                                 struct cudbg_buffer *dbg_buff,
 2069                                 struct cudbg_error *cudbg_err)
 2070 {
 2071         int rc = 0, qid = 3;
 2072 
 2073         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
 2074 
 2075         return rc;
 2076 }
 2077 
 2078 static int collect_cim_obq_sge(struct cudbg_init *pdbg_init,
 2079                                struct cudbg_buffer *dbg_buff,
 2080                                struct cudbg_error *cudbg_err)
 2081 {
 2082         int rc = 0, qid = 4;
 2083 
 2084         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
 2085 
 2086         return rc;
 2087 }
 2088 
 2089 static int collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
 2090                                 struct cudbg_buffer *dbg_buff,
 2091                                 struct cudbg_error *cudbg_err)
 2092 {
 2093         int rc = 0, qid = 5;
 2094 
 2095         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
 2096 
 2097         return rc;
 2098 }
 2099 
 2100 static int collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
 2101                                  struct cudbg_buffer *dbg_buff,
 2102                                  struct cudbg_error *cudbg_err)
 2103 {
 2104         int rc = 0, qid = 6;
 2105 
 2106         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
 2107 
 2108         return rc;
 2109 }
 2110 
 2111 static int collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
 2112                                  struct cudbg_buffer *dbg_buff,
 2113                                  struct cudbg_error *cudbg_err)
 2114 {
 2115         int rc = 0, qid = 7;
 2116 
 2117         rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
 2118 
 2119         return rc;
 2120 }
 2121 
 2122 static int read_cim_obq(struct cudbg_init *pdbg_init,
 2123                         struct cudbg_buffer *dbg_buff,
 2124                         struct cudbg_error *cudbg_err, int qid)
 2125 {
 2126         struct cudbg_buffer scratch_buff;
 2127         struct adapter *padap = pdbg_init->adap;
 2128         u32 qsize;
 2129         int rc;
 2130         int no_of_read_words;
 2131 
 2132         /* collect CIM OBQ */
 2133         qsize =  6 * CIM_OBQ_SIZE * 4 *  sizeof(u32);
 2134         rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
 2135         if (rc)
 2136                 goto err;
 2137 
 2138         /* t4_read_cim_obq will return no. of read words or error */
 2139         no_of_read_words = t4_read_cim_obq(padap, qid,
 2140                                            (u32 *)((u32 *)scratch_buff.data +
 2141                                            scratch_buff.offset), qsize);
 2142 
 2143         /* no_of_read_words is less than or equal to 0 means error */
 2144         if (no_of_read_words <= 0) {
 2145                 if (no_of_read_words == 0)
 2146                         rc = CUDBG_SYSTEM_ERROR;
 2147                 else
 2148                         rc = no_of_read_words;
 2149                 if (pdbg_init->verbose)
 2150                         pdbg_init->print("%s: t4_read_cim_obq failed (%d)\n",
 2151                                  __func__, rc);
 2152                 cudbg_err->sys_err = rc;
 2153                 goto err1;
 2154         }
 2155 
 2156         scratch_buff.size = no_of_read_words * 4;
 2157 
 2158         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 2159 
 2160         if (rc)
 2161                 goto err1;
 2162 
 2163         rc = compress_buff(&scratch_buff, dbg_buff);
 2164 
 2165         if (rc)
 2166                 goto err1;
 2167 
 2168 err1:
 2169         release_scratch_buff(&scratch_buff, dbg_buff);
 2170 err:
 2171         return rc;
 2172 }
 2173 
 2174 /* CIM IBQ */
 2175 
 2176 static int collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
 2177                                struct cudbg_buffer *dbg_buff,
 2178                                struct cudbg_error *cudbg_err)
 2179 {
 2180         int rc = 0, qid = 0;
 2181 
 2182         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
 2183         return rc;
 2184 }
 2185 
 2186 static int collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
 2187                                struct cudbg_buffer *dbg_buff,
 2188                                struct cudbg_error *cudbg_err)
 2189 {
 2190         int rc = 0, qid = 1;
 2191 
 2192         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
 2193         return rc;
 2194 }
 2195 
 2196 static int collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
 2197                                struct cudbg_buffer *dbg_buff,
 2198                                struct cudbg_error *cudbg_err)
 2199 {
 2200         int rc = 0, qid = 2;
 2201 
 2202         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
 2203         return rc;
 2204 }
 2205 
 2206 static int collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
 2207                                 struct cudbg_buffer *dbg_buff,
 2208                                 struct cudbg_error *cudbg_err)
 2209 {
 2210         int rc = 0, qid = 3;
 2211 
 2212         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
 2213         return rc;
 2214 }
 2215 
 2216 static int collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
 2217                                 struct cudbg_buffer *dbg_buff,
 2218                                 struct cudbg_error *cudbg_err)
 2219 {
 2220         int rc = 0, qid = 4;
 2221 
 2222         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
 2223         return rc;
 2224 }
 2225 
 2226 static int collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
 2227                                 struct cudbg_buffer *dbg_buff,
 2228                                 struct cudbg_error *cudbg_err)
 2229 {
 2230         int rc, qid = 5;
 2231 
 2232         rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
 2233         return rc;
 2234 }
 2235 
 2236 static int read_cim_ibq(struct cudbg_init *pdbg_init,
 2237                         struct cudbg_buffer *dbg_buff,
 2238                         struct cudbg_error *cudbg_err, int qid)
 2239 {
 2240         struct adapter *padap = pdbg_init->adap;
 2241         struct cudbg_buffer scratch_buff;
 2242         u32 qsize;
 2243         int rc;
 2244         int no_of_read_words;
 2245 
 2246         /* collect CIM IBQ */
 2247         qsize = CIM_IBQ_SIZE * 4 *  sizeof(u32);
 2248         rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
 2249 
 2250         if (rc)
 2251                 goto err;
 2252 
 2253         /* t4_read_cim_ibq will return no. of read words or error */
 2254         no_of_read_words = t4_read_cim_ibq(padap, qid,
 2255                                            (u32 *)((u32 *)scratch_buff.data +
 2256                                            scratch_buff.offset), qsize);
 2257         /* no_of_read_words is less than or equal to 0 means error */
 2258         if (no_of_read_words <= 0) {
 2259                 if (no_of_read_words == 0)
 2260                         rc = CUDBG_SYSTEM_ERROR;
 2261                 else
 2262                         rc = no_of_read_words;
 2263                 if (pdbg_init->verbose)
 2264                         pdbg_init->print("%s: t4_read_cim_ibq failed (%d)\n",
 2265                                  __func__, rc);
 2266                 cudbg_err->sys_err = rc;
 2267                 goto err1;
 2268         }
 2269 
 2270         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 2271         if (rc)
 2272                 goto err1;
 2273 
 2274         rc = compress_buff(&scratch_buff, dbg_buff);
 2275         if (rc)
 2276                 goto err1;
 2277 
 2278 err1:
 2279         release_scratch_buff(&scratch_buff, dbg_buff);
 2280 
 2281 err:
 2282         return rc;
 2283 }
 2284 
 2285 static int collect_cim_ma_la(struct cudbg_init *pdbg_init,
 2286                              struct cudbg_buffer *dbg_buff,
 2287                              struct cudbg_error *cudbg_err)
 2288 {
 2289         struct cudbg_buffer scratch_buff;
 2290         struct adapter *padap = pdbg_init->adap;
 2291         u32 rc = 0;
 2292 
 2293         /* collect CIM MA LA */
 2294         scratch_buff.size =  2 * CIM_MALA_SIZE * 5 * sizeof(u32);
 2295         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 2296         if (rc)
 2297                 goto err;
 2298 
 2299         /* no return */
 2300         t4_cim_read_ma_la(padap,
 2301                           (u32 *) ((char *)scratch_buff.data +
 2302                                    scratch_buff.offset),
 2303                           (u32 *) ((char *)scratch_buff.data +
 2304                                    scratch_buff.offset + 5 * CIM_MALA_SIZE));
 2305 
 2306         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 2307         if (rc)
 2308                 goto err1;
 2309 
 2310         rc = compress_buff(&scratch_buff, dbg_buff);
 2311 
 2312 err1:
 2313         release_scratch_buff(&scratch_buff, dbg_buff);
 2314 err:
 2315         return rc;
 2316 }
 2317 
 2318 static int collect_cim_la(struct cudbg_init *pdbg_init,
 2319                           struct cudbg_buffer *dbg_buff,
 2320                           struct cudbg_error *cudbg_err)
 2321 {
 2322         struct cudbg_buffer scratch_buff;
 2323         struct adapter *padap = pdbg_init->adap;
 2324 
 2325         int rc;
 2326         u32 cfg = 0;
 2327         int size;
 2328 
 2329         /* collect CIM LA */
 2330         if (is_t6(padap)) {
 2331                 size = padap->params.cim_la_size / 10 + 1;
 2332                 size *= 11 * sizeof(u32);
 2333         } else {
 2334                 size = padap->params.cim_la_size / 8;
 2335                 size *= 8 * sizeof(u32);
 2336         }
 2337 
 2338         size += sizeof(cfg);
 2339 
 2340         rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
 2341         if (rc)
 2342                 goto err;
 2343 
 2344         rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
 2345 
 2346         if (rc) {
 2347                 if (pdbg_init->verbose)
 2348                         pdbg_init->print("%s: t4_cim_read failed (%d)\n",
 2349                                  __func__, rc);
 2350                 cudbg_err->sys_err = rc;
 2351                 goto err1;
 2352         }
 2353 
 2354         memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
 2355                sizeof(cfg));
 2356 
 2357         rc = t4_cim_read_la(padap,
 2358                             (u32 *) ((char *)scratch_buff.data +
 2359                                      scratch_buff.offset + sizeof(cfg)), NULL);
 2360         if (rc < 0) {
 2361                 if (pdbg_init->verbose)
 2362                         pdbg_init->print("%s: t4_cim_read_la failed (%d)\n",
 2363                                  __func__, rc);
 2364                 cudbg_err->sys_err = rc;
 2365                 goto err1;
 2366         }
 2367 
 2368         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 2369         if (rc)
 2370                 goto err1;
 2371 
 2372         rc = compress_buff(&scratch_buff, dbg_buff);
 2373         if (rc)
 2374                 goto err1;
 2375 
 2376 err1:
 2377         release_scratch_buff(&scratch_buff, dbg_buff);
 2378 err:
 2379         return rc;
 2380 }
 2381 
 2382 static int collect_cim_qcfg(struct cudbg_init *pdbg_init,
 2383                             struct cudbg_buffer *dbg_buff,
 2384                             struct cudbg_error *cudbg_err)
 2385 {
 2386         struct cudbg_buffer scratch_buff;
 2387         struct adapter *padap = pdbg_init->adap;
 2388         u32 offset;
 2389         int rc = 0;
 2390 
 2391         struct struct_cim_qcfg *cim_qcfg_data = NULL;
 2392 
 2393         rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
 2394                               &scratch_buff);
 2395 
 2396         if (rc)
 2397                 goto err;
 2398 
 2399         offset = scratch_buff.offset;
 2400 
 2401         cim_qcfg_data =
 2402                 (struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
 2403                                            offset));
 2404 
 2405         rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
 2406                          ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
 2407 
 2408         if (rc) {
 2409                 if (pdbg_init->verbose)
 2410                         pdbg_init->print("%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
 2411                             __func__, rc);
 2412                 cudbg_err->sys_err = rc;
 2413                 goto err1;
 2414         }
 2415 
 2416         rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
 2417                          ARRAY_SIZE(cim_qcfg_data->obq_wr),
 2418                          cim_qcfg_data->obq_wr);
 2419 
 2420         if (rc) {
 2421                 if (pdbg_init->verbose)
 2422                         pdbg_init->print("%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
 2423                             __func__, rc);
 2424                 cudbg_err->sys_err = rc;
 2425                 goto err1;
 2426         }
 2427 
 2428         /* no return val */
 2429         t4_read_cimq_cfg(padap,
 2430                         cim_qcfg_data->base,
 2431                         cim_qcfg_data->size,
 2432                         cim_qcfg_data->thres);
 2433 
 2434         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 2435         if (rc)
 2436                 goto err1;
 2437 
 2438         rc = compress_buff(&scratch_buff, dbg_buff);
 2439         if (rc)
 2440                 goto err1;
 2441 
 2442 err1:
 2443         release_scratch_buff(&scratch_buff, dbg_buff);
 2444 err:
 2445         return rc;
 2446 }
 2447 
 2448 /**
 2449  * Fetch the TX/RX payload regions start and end.
 2450  *
 2451  * @padap (IN): adapter handle.
 2452  * @mem_type (IN): EDC0, EDC1, MC/MC0/MC1.
 2453  * @mem_tot_len (IN): total length of @mem_type memory region to read.
 2454  * @payload_type (IN): TX or RX Payload.
 2455  * @reg_info (OUT): store the payload region info.
 2456  *
 2457  * Fetch the TX/RX payload region information from meminfo.
 2458  * However, reading from the @mem_type region starts at 0 and not
 2459  * from whatever base info is stored in meminfo.  Hence, if the
 2460  * payload region exists, then calculate the payload region
 2461  * start and end wrt 0 and @mem_tot_len, respectively, and set
 2462  * @reg_info->exist to true. Otherwise, set @reg_info->exist to false.
 2463  */
 2464 #ifdef notyet
 2465 static int get_payload_range(struct adapter *padap, u8 mem_type,
 2466                              unsigned long mem_tot_len, u8 payload_type,
 2467                              struct struct_region_info *reg_info)
 2468 {
 2469         struct struct_meminfo meminfo;
 2470         struct struct_mem_desc mem_region;
 2471         struct struct_mem_desc payload;
 2472         u32 i, idx, found = 0;
 2473         u8 mc_type;
 2474         int rc;
 2475 
 2476         /* Get meminfo of all regions */
 2477         rc = fill_meminfo(padap, &meminfo);
 2478         if (rc)
 2479                 return rc;
 2480 
 2481         /* Extract the specified TX or RX Payload region range */
 2482         memset(&payload, 0, sizeof(struct struct_mem_desc));
 2483         for (i = 0; i < meminfo.mem_c; i++) {
 2484                 if (meminfo.mem[i].idx >= ARRAY_SIZE(region))
 2485                         continue;                        /* skip holes */
 2486 
 2487                 idx = meminfo.mem[i].idx;
 2488                 /* Get TX or RX Payload region start and end */
 2489                 if (idx == payload_type) {
 2490                         if (!(meminfo.mem[i].limit))
 2491                                 meminfo.mem[i].limit =
 2492                                         i < meminfo.mem_c - 1 ?
 2493                                         meminfo.mem[i + 1].base - 1 : ~0;
 2494 
 2495                         memcpy(&payload, &meminfo.mem[i], sizeof(payload));
 2496                         found = 1;
 2497                         break;
 2498                 }
 2499         }
 2500 
 2501         /* If TX or RX Payload region is not found return error. */
 2502         if (!found)
 2503                 return -EINVAL;
 2504 
 2505         if (mem_type < MEM_MC) {
 2506                 memcpy(&mem_region, &meminfo.avail[mem_type],
 2507                        sizeof(mem_region));
 2508         } else {
 2509                 /* Check if both MC0 and MC1 exist by checking if a
 2510                  * base address for the specified @mem_type exists.
 2511                  * If a base address exists, then there is MC1 and
 2512                  * hence use the base address stored at index 3.
 2513                  * Otherwise, use the base address stored at index 2.
 2514                  */
 2515                 mc_type = meminfo.avail[mem_type].base ?
 2516                           mem_type : mem_type - 1;
 2517                 memcpy(&mem_region, &meminfo.avail[mc_type],
 2518                        sizeof(mem_region));
 2519         }
 2520 
 2521         /* Check if payload region exists in current memory */
 2522         if (payload.base < mem_region.base && payload.limit < mem_region.base) {
 2523                 reg_info->exist = false;
 2524                 return 0;
 2525         }
 2526 
 2527         /* Get Payload region start and end with respect to 0 and
 2528          * mem_tot_len, respectively.  This is because reading from the
 2529          * memory region starts at 0 and not at base info stored in meminfo.
 2530          */
 2531         if (payload.base < mem_region.limit) {
 2532                 reg_info->exist = true;
 2533                 if (payload.base >= mem_region.base)
 2534                         reg_info->start = payload.base - mem_region.base;
 2535                 else
 2536                         reg_info->start = 0;
 2537 
 2538                 if (payload.limit < mem_region.limit)
 2539                         reg_info->end = payload.limit - mem_region.base;
 2540                 else
 2541                         reg_info->end = mem_tot_len;
 2542         }
 2543 
 2544         return 0;
 2545 }
 2546 #endif
 2547 
 2548 static int read_fw_mem(struct cudbg_init *pdbg_init,
 2549                         struct cudbg_buffer *dbg_buff, u8 mem_type,
 2550                         unsigned long tot_len, struct cudbg_error *cudbg_err)
 2551 {
 2552 #ifdef notyet
 2553         struct cudbg_buffer scratch_buff;
 2554         struct adapter *padap = pdbg_init->adap;
 2555         unsigned long bytes_read = 0;
 2556         unsigned long bytes_left;
 2557         unsigned long bytes;
 2558         int           rc;
 2559         struct struct_region_info payload[2]; /* TX and RX Payload Region */
 2560         u16 get_payload_flag;
 2561         u8 i;
 2562 
 2563         get_payload_flag =
 2564                 pdbg_init->dbg_params[CUDBG_GET_PAYLOAD_PARAM].param_type;
 2565 
 2566         /* If explicitly asked to get TX/RX Payload data,
 2567          * then don't zero out the payload data. Otherwise,
 2568          * zero out the payload data.
 2569          */
 2570         if (!get_payload_flag) {
 2571                 u8 region_index[2];
 2572                 u8 j = 0;
 2573 
 2574                 /* Find the index of TX and RX Payload regions in meminfo */
 2575                 for (i = 0; i < ARRAY_SIZE(region); i++) {
 2576                         if (!strcmp(region[i], "Tx payload:") ||
 2577                             !strcmp(region[i], "Rx payload:")) {
 2578                                 region_index[j] = i;
 2579                                 j++;
 2580                                 if (j == 2)
 2581                                         break;
 2582                         }
 2583                 }
 2584 
 2585                 /* Get TX/RX Payload region range if they exist */
 2586                 memset(payload, 0, ARRAY_SIZE(payload) * sizeof(payload[0]));
 2587                 for (i = 0; i < ARRAY_SIZE(payload); i++) {
 2588                         rc = get_payload_range(padap, mem_type, tot_len,
 2589                                                region_index[i],
 2590                                                &payload[i]);
 2591                         if (rc)
 2592                                 goto err;
 2593 
 2594                         if (payload[i].exist) {
 2595                                 /* Align start and end to avoid wrap around */
 2596                                 payload[i].start =
 2597                                         roundup(payload[i].start,
 2598                                             CUDBG_CHUNK_SIZE);
 2599                                 payload[i].end =
 2600                                         rounddown(payload[i].end,
 2601                                             CUDBG_CHUNK_SIZE);
 2602                         }
 2603                 }
 2604         }
 2605 
 2606         bytes_left = tot_len;
 2607         scratch_buff.size = tot_len;
 2608         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 2609         if (rc)
 2610                 goto err;
 2611 
 2612         while (bytes_left > 0) {
 2613                 bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
 2614                 rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
 2615 
 2616                 if (rc) {
 2617                         rc = CUDBG_STATUS_NO_SCRATCH_MEM;
 2618                         goto err;
 2619                 }
 2620 
 2621                 if (!get_payload_flag) {
 2622                         for (i = 0; i < ARRAY_SIZE(payload); i++) {
 2623                                 if (payload[i].exist &&
 2624                                     bytes_read >= payload[i].start &&
 2625                                     (bytes_read + bytes) <= payload[i].end) {
 2626                                         memset(scratch_buff.data, 0, bytes);
 2627                                         /* TX and RX Payload regions
 2628                                          * can't overlap.
 2629                                          */
 2630                                         goto skip_read;
 2631                                 }
 2632                         }
 2633                 }
 2634 
 2635                 /* Read from file */
 2636                 /*fread(scratch_buff.data, 1, Bytes, in);*/
 2637                 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
 2638                                   bytes, (__be32 *)(scratch_buff.data), 1);
 2639 
 2640                 if (rc) {
 2641                         if (pdbg_init->verbose)
 2642                                 pdbg_init->print("%s: t4_memory_rw failed (%d)",
 2643                                     __func__, rc);
 2644                         cudbg_err->sys_err = rc;
 2645                         goto err1;
 2646                 }
 2647 
 2648 skip_read:
 2649                 rc = compress_buff(&scratch_buff, dbg_buff);
 2650                 if (rc)
 2651                         goto err1;
 2652 
 2653                 bytes_left -= bytes;
 2654                 bytes_read += bytes;
 2655                 release_scratch_buff(&scratch_buff, dbg_buff);
 2656         }
 2657 
 2658 err1:
 2659         if (rc)
 2660                 release_scratch_buff(&scratch_buff, dbg_buff);
 2661 
 2662 err:
 2663         return rc;
 2664 #endif
 2665         return (CUDBG_STATUS_NOT_IMPLEMENTED);
 2666 }
 2667 
 2668 static void collect_mem_info(struct cudbg_init *pdbg_init,
 2669                              struct card_mem *mem_info)
 2670 {
 2671         struct adapter *padap = pdbg_init->adap;
 2672         u32 value;
 2673         int t4 = 0;
 2674 
 2675         if (is_t4(padap))
 2676                 t4 = 1;
 2677 
 2678         if (t4) {
 2679                 value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
 2680                 value = G_EXT_MEM_SIZE(value);
 2681                 mem_info->size_mc0 = (u16)value;  /* size in MB */
 2682 
 2683                 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
 2684                 if (value & F_EXT_MEM_ENABLE)
 2685                         mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
 2686                                                                   bit */
 2687         } else {
 2688                 value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
 2689                 value = G_EXT_MEM0_SIZE(value);
 2690                 mem_info->size_mc0 = (u16)value;
 2691 
 2692                 value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
 2693                 value = G_EXT_MEM1_SIZE(value);
 2694                 mem_info->size_mc1 = (u16)value;
 2695 
 2696                 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
 2697                 if (value & F_EXT_MEM0_ENABLE)
 2698                         mem_info->mem_flag |= (1 << MC0_FLAG);
 2699                 if (value & F_EXT_MEM1_ENABLE)
 2700                         mem_info->mem_flag |= (1 << MC1_FLAG);
 2701         }
 2702 
 2703         value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
 2704         value = G_EDRAM0_SIZE(value);
 2705         mem_info->size_edc0 = (u16)value;
 2706 
 2707         value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
 2708         value = G_EDRAM1_SIZE(value);
 2709         mem_info->size_edc1 = (u16)value;
 2710 
 2711         value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
 2712         if (value & F_EDRAM0_ENABLE)
 2713                 mem_info->mem_flag |= (1 << EDC0_FLAG);
 2714         if (value & F_EDRAM1_ENABLE)
 2715                 mem_info->mem_flag |= (1 << EDC1_FLAG);
 2716 
 2717 }
 2718 
 2719 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
 2720                                 struct cudbg_error *cudbg_err)
 2721 {
 2722         struct adapter *padap = pdbg_init->adap;
 2723         int rc;
 2724 
 2725         if (is_fw_attached(pdbg_init)) {
 2726 
 2727                 /* Flush uP dcache before reading edcX/mcX  */
 2728                 rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
 2729                     "t4cudl");
 2730                 if (rc == 0) {
 2731                         rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
 2732                         end_synchronized_op(padap, 0);
 2733                 }
 2734 
 2735                 if (rc) {
 2736                         if (pdbg_init->verbose)
 2737                                 pdbg_init->print("%s: t4_fwcache failed (%d)\n",
 2738                                  __func__, rc);
 2739                         cudbg_err->sys_warn = rc;
 2740                 }
 2741         }
 2742 }
 2743 
 2744 static int collect_edc0_meminfo(struct cudbg_init *pdbg_init,
 2745                                 struct cudbg_buffer *dbg_buff,
 2746                                 struct cudbg_error *cudbg_err)
 2747 {
 2748         struct card_mem mem_info = {0};
 2749         unsigned long edc0_size;
 2750         int rc;
 2751 
 2752         cudbg_t4_fwcache(pdbg_init, cudbg_err);
 2753 
 2754         collect_mem_info(pdbg_init, &mem_info);
 2755 
 2756         if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
 2757                 edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
 2758                 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
 2759                                  edc0_size, cudbg_err);
 2760                 if (rc)
 2761                         goto err;
 2762 
 2763         } else {
 2764                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
 2765                 if (pdbg_init->verbose)
 2766                         pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
 2767                                  __func__, err_msg[-rc]);
 2768                 goto err;
 2769 
 2770         }
 2771 err:
 2772         return rc;
 2773 }
 2774 
 2775 static int collect_edc1_meminfo(struct cudbg_init *pdbg_init,
 2776                                 struct cudbg_buffer *dbg_buff,
 2777                                 struct cudbg_error *cudbg_err)
 2778 {
 2779         struct card_mem mem_info = {0};
 2780         unsigned long edc1_size;
 2781         int rc;
 2782 
 2783         cudbg_t4_fwcache(pdbg_init, cudbg_err);
 2784 
 2785         collect_mem_info(pdbg_init, &mem_info);
 2786 
 2787         if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
 2788                 edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
 2789                 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
 2790                                  edc1_size, cudbg_err);
 2791                 if (rc)
 2792                         goto err;
 2793         } else {
 2794                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
 2795                 if (pdbg_init->verbose)
 2796                         pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
 2797                                  __func__, err_msg[-rc]);
 2798                 goto err;
 2799         }
 2800 
 2801 err:
 2802 
 2803         return rc;
 2804 }
 2805 
 2806 static int collect_mc0_meminfo(struct cudbg_init *pdbg_init,
 2807                                struct cudbg_buffer *dbg_buff,
 2808                                struct cudbg_error *cudbg_err)
 2809 {
 2810         struct card_mem mem_info = {0};
 2811         unsigned long mc0_size;
 2812         int rc;
 2813 
 2814         cudbg_t4_fwcache(pdbg_init, cudbg_err);
 2815 
 2816         collect_mem_info(pdbg_init, &mem_info);
 2817 
 2818         if (mem_info.mem_flag & (1 << MC0_FLAG)) {
 2819                 mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
 2820                 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
 2821                                  mc0_size, cudbg_err);
 2822                 if (rc)
 2823                         goto err;
 2824         } else {
 2825                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
 2826                 if (pdbg_init->verbose)
 2827                         pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
 2828                                  __func__, err_msg[-rc]);
 2829                 goto err;
 2830         }
 2831 
 2832 err:
 2833         return rc;
 2834 }
 2835 
 2836 static int collect_mc1_meminfo(struct cudbg_init *pdbg_init,
 2837                                struct cudbg_buffer *dbg_buff,
 2838                                struct cudbg_error *cudbg_err)
 2839 {
 2840         struct card_mem mem_info = {0};
 2841         unsigned long mc1_size;
 2842         int rc;
 2843 
 2844         cudbg_t4_fwcache(pdbg_init, cudbg_err);
 2845 
 2846         collect_mem_info(pdbg_init, &mem_info);
 2847 
 2848         if (mem_info.mem_flag & (1 << MC1_FLAG)) {
 2849                 mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
 2850                 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
 2851                                  mc1_size, cudbg_err);
 2852                 if (rc)
 2853                         goto err;
 2854         } else {
 2855                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
 2856 
 2857                 if (pdbg_init->verbose)
 2858                         pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
 2859                                  __func__, err_msg[-rc]);
 2860                 goto err;
 2861         }
 2862 err:
 2863         return rc;
 2864 }
 2865 
 2866 static int collect_reg_dump(struct cudbg_init *pdbg_init,
 2867                             struct cudbg_buffer *dbg_buff,
 2868                             struct cudbg_error *cudbg_err)
 2869 {
 2870         struct cudbg_buffer scratch_buff;
 2871         struct cudbg_buffer tmp_scratch_buff;
 2872         struct adapter *padap = pdbg_init->adap;
 2873         unsigned long        bytes_read = 0;
 2874         unsigned long        bytes_left;
 2875         u32                  buf_size = 0, bytes = 0;
 2876         int                  rc = 0;
 2877 
 2878         if (is_t4(padap))
 2879                 buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
 2880         else if (is_t5(padap) || is_t6(padap))
 2881                 buf_size = T5_REGMAP_SIZE;
 2882 
 2883         scratch_buff.size = buf_size;
 2884 
 2885         tmp_scratch_buff = scratch_buff;
 2886 
 2887         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 2888         if (rc)
 2889                 goto err;
 2890 
 2891         /* no return */
 2892         t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
 2893         bytes_left =   scratch_buff.size;
 2894 
 2895         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 2896         if (rc)
 2897                 goto err1;
 2898 
 2899         while (bytes_left > 0) {
 2900                 tmp_scratch_buff.data =
 2901                         ((char *)scratch_buff.data) + bytes_read;
 2902                 bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
 2903                 tmp_scratch_buff.size = bytes;
 2904                 compress_buff(&tmp_scratch_buff, dbg_buff);
 2905                 bytes_left -= bytes;
 2906                 bytes_read += bytes;
 2907         }
 2908 
 2909 err1:
 2910         release_scratch_buff(&scratch_buff, dbg_buff);
 2911 err:
 2912         return rc;
 2913 }
 2914 
 2915 static int collect_cctrl(struct cudbg_init *pdbg_init,
 2916                          struct cudbg_buffer *dbg_buff,
 2917                          struct cudbg_error *cudbg_err)
 2918 {
 2919         struct cudbg_buffer scratch_buff;
 2920         struct adapter *padap = pdbg_init->adap;
 2921         u32 size;
 2922         int rc;
 2923 
 2924         size = sizeof(u16) * NMTUS * NCCTRL_WIN;
 2925         scratch_buff.size = size;
 2926 
 2927         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 2928         if (rc)
 2929                 goto err;
 2930 
 2931         t4_read_cong_tbl(padap, (void *)scratch_buff.data);
 2932 
 2933         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 2934         if (rc)
 2935                 goto err1;
 2936 
 2937         rc = compress_buff(&scratch_buff, dbg_buff);
 2938 
 2939 err1:
 2940         release_scratch_buff(&scratch_buff, dbg_buff);
 2941 err:
 2942         return rc;
 2943 }
 2944 
 2945 static int check_busy_bit(struct adapter *padap)
 2946 {
 2947         u32 val;
 2948         u32 busy = 1;
 2949         int i = 0;
 2950         int retry = 10;
 2951         int status = 0;
 2952 
 2953         while (busy && i < retry) {
 2954                 val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
 2955                 busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
 2956                 i++;
 2957         }
 2958 
 2959         if (busy)
 2960                 status = -1;
 2961 
 2962         return status;
 2963 }
 2964 
 2965 static int cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
 2966 {
 2967         int rc = 0;
 2968 
 2969         /* write register address into the A_CIM_HOST_ACC_CTRL */
 2970         t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
 2971 
 2972         /* Poll HOSTBUSY */
 2973         rc = check_busy_bit(padap);
 2974         if (rc)
 2975                 goto err;
 2976 
 2977         /* Read value from A_CIM_HOST_ACC_DATA */
 2978         *val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
 2979 
 2980 err:
 2981         return rc;
 2982 }
 2983 
 2984 static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
 2985                        struct ireg_field *up_cim_reg, u32 *buff)
 2986 {
 2987         u32 i;
 2988         int rc = 0;
 2989 
 2990         for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
 2991                 rc = cim_ha_rreg(padap,
 2992                                  up_cim_reg->ireg_local_offset + (i * 4),
 2993                                 buff);
 2994                 if (rc) {
 2995                         if (pdbg_init->verbose)
 2996                                 pdbg_init->print("BUSY timeout reading"
 2997                                          "CIM_HOST_ACC_CTRL\n");
 2998                         goto err;
 2999                 }
 3000 
 3001                 buff++;
 3002         }
 3003 
 3004 err:
 3005         return rc;
 3006 }
 3007 
 3008 static int collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 3009                                    struct cudbg_buffer *dbg_buff,
 3010                                    struct cudbg_error *cudbg_err)
 3011 {
 3012         struct cudbg_buffer scratch_buff;
 3013         struct adapter *padap = pdbg_init->adap;
 3014         struct ireg_buf *up_cim;
 3015         u32 size;
 3016         int i, rc, n;
 3017 
 3018         n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
 3019         size = sizeof(struct ireg_buf) * n;
 3020         scratch_buff.size = size;
 3021 
 3022         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3023         if (rc)
 3024                 goto err;
 3025 
 3026         up_cim = (struct ireg_buf *)scratch_buff.data;
 3027 
 3028         for (i = 0; i < n; i++) {
 3029                 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
 3030                 u32 *buff = up_cim->outbuf;
 3031 
 3032                 if (is_t5(padap)) {
 3033                         up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
 3034                         up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
 3035                         up_cim_reg->ireg_local_offset =
 3036                                                 t5_up_cim_reg_array[i][2];
 3037                         up_cim_reg->ireg_offset_range =
 3038                                                 t5_up_cim_reg_array[i][3];
 3039                 } else if (is_t6(padap)) {
 3040                         up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
 3041                         up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
 3042                         up_cim_reg->ireg_local_offset =
 3043                                                 t6_up_cim_reg_array[i][2];
 3044                         up_cim_reg->ireg_offset_range =
 3045                                                 t6_up_cim_reg_array[i][3];
 3046                 }
 3047 
 3048                 rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
 3049 
 3050                 up_cim++;
 3051         }
 3052 
 3053         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3054         if (rc)
 3055                 goto err1;
 3056 
 3057         rc = compress_buff(&scratch_buff, dbg_buff);
 3058 
 3059 err1:
 3060         release_scratch_buff(&scratch_buff, dbg_buff);
 3061 err:
 3062         return rc;
 3063 }
 3064 
 3065 static int collect_mbox_log(struct cudbg_init *pdbg_init,
 3066                             struct cudbg_buffer *dbg_buff,
 3067                             struct cudbg_error *cudbg_err)
 3068 {
 3069 #ifdef notyet
 3070         struct cudbg_buffer scratch_buff;
 3071         struct cudbg_mbox_log *mboxlog = NULL;
 3072         struct mbox_cmd_log *log = NULL;
 3073         struct mbox_cmd *entry;
 3074         u64 flit;
 3075         u32 size;
 3076         unsigned int entry_idx;
 3077         int i, k, rc;
 3078         u16 mbox_cmds;
 3079 
 3080         if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
 3081                 log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
 3082                         mboxlog_param.log;
 3083                 mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
 3084                                 mboxlog_param.mbox_cmds;
 3085         } else {
 3086                 if (pdbg_init->verbose)
 3087                         pdbg_init->print("Mbox log is not requested\n");
 3088                 return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
 3089         }
 3090 
 3091         size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
 3092         scratch_buff.size = size;
 3093         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3094         if (rc)
 3095                 goto err;
 3096 
 3097         mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
 3098 
 3099         for (k = 0; k < mbox_cmds; k++) {
 3100                 entry_idx = log->cursor + k;
 3101                 if (entry_idx >= log->size)
 3102                         entry_idx -= log->size;
 3103                 entry = mbox_cmd_log_entry(log, entry_idx);
 3104 
 3105                 /* skip over unused entries */
 3106                 if (entry->timestamp == 0)
 3107                         continue;
 3108 
 3109                 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
 3110 
 3111                 for (i = 0; i < MBOX_LEN / 8; i++) {
 3112                         flit = entry->cmd[i];
 3113                         mboxlog->hi[i] = (u32)(flit >> 32);
 3114                         mboxlog->lo[i] = (u32)flit;
 3115                 }
 3116 
 3117                 mboxlog++;
 3118         }
 3119 
 3120         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3121         if (rc)
 3122                 goto err1;
 3123 
 3124         rc = compress_buff(&scratch_buff, dbg_buff);
 3125 
 3126 err1:
 3127         release_scratch_buff(&scratch_buff, dbg_buff);
 3128 err:
 3129         return rc;
 3130 #endif
 3131         return (CUDBG_STATUS_NOT_IMPLEMENTED);
 3132 }
 3133 
 3134 static int collect_pbt_tables(struct cudbg_init *pdbg_init,
 3135                               struct cudbg_buffer *dbg_buff,
 3136                               struct cudbg_error *cudbg_err)
 3137 {
 3138         struct cudbg_buffer scratch_buff;
 3139         struct adapter *padap = pdbg_init->adap;
 3140         struct cudbg_pbt_tables *pbt = NULL;
 3141         u32 size;
 3142         u32 addr;
 3143         int i, rc;
 3144 
 3145         size = sizeof(struct cudbg_pbt_tables);
 3146         scratch_buff.size = size;
 3147 
 3148         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3149         if (rc)
 3150                 goto err;
 3151 
 3152         pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
 3153 
 3154         /* PBT dynamic entries */
 3155         addr = CUDBG_CHAC_PBT_ADDR;
 3156         for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
 3157                 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
 3158                 if (rc) {
 3159                         if (pdbg_init->verbose)
 3160                                 pdbg_init->print("BUSY timeout reading"
 3161                                          "CIM_HOST_ACC_CTRL\n");
 3162                         goto err1;
 3163                 }
 3164         }
 3165 
 3166         /* PBT static entries */
 3167 
 3168         /* static entries start when bit 6 is set */
 3169         addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
 3170         for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
 3171                 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
 3172                 if (rc) {
 3173                         if (pdbg_init->verbose)
 3174                                 pdbg_init->print("BUSY timeout reading"
 3175                                          "CIM_HOST_ACC_CTRL\n");
 3176                         goto err1;
 3177                 }
 3178         }
 3179 
 3180         /* LRF entries */
 3181         addr = CUDBG_CHAC_PBT_LRF;
 3182         for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
 3183                 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
 3184                 if (rc) {
 3185                         if (pdbg_init->verbose)
 3186                                 pdbg_init->print("BUSY timeout reading"
 3187                                          "CIM_HOST_ACC_CTRL\n");
 3188                         goto err1;
 3189                 }
 3190         }
 3191 
 3192         /* PBT data entries */
 3193         addr = CUDBG_CHAC_PBT_DATA;
 3194         for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
 3195                 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
 3196                 if (rc) {
 3197                         if (pdbg_init->verbose)
 3198                                 pdbg_init->print("BUSY timeout reading"
 3199                                          "CIM_HOST_ACC_CTRL\n");
 3200                         goto err1;
 3201                 }
 3202         }
 3203 
 3204         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3205         if (rc)
 3206                 goto err1;
 3207 
 3208         rc = compress_buff(&scratch_buff, dbg_buff);
 3209 
 3210 err1:
 3211         release_scratch_buff(&scratch_buff, dbg_buff);
 3212 err:
 3213         return rc;
 3214 }
 3215 
 3216 static int collect_pm_indirect(struct cudbg_init *pdbg_init,
 3217                                struct cudbg_buffer *dbg_buff,
 3218                                struct cudbg_error *cudbg_err)
 3219 {
 3220         struct cudbg_buffer scratch_buff;
 3221         struct adapter *padap = pdbg_init->adap;
 3222         struct ireg_buf *ch_pm;
 3223         u32 size;
 3224         int i, rc, n;
 3225 
 3226         n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
 3227         size = sizeof(struct ireg_buf) * n * 2;
 3228         scratch_buff.size = size;
 3229 
 3230         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3231         if (rc)
 3232                 goto err;
 3233 
 3234         ch_pm = (struct ireg_buf *)scratch_buff.data;
 3235 
 3236         /*PM_RX*/
 3237         for (i = 0; i < n; i++) {
 3238                 struct ireg_field *pm_pio = &ch_pm->tp_pio;
 3239                 u32 *buff = ch_pm->outbuf;
 3240 
 3241                 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
 3242                 pm_pio->ireg_data = t5_pm_rx_array[i][1];
 3243                 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
 3244                 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
 3245 
 3246                 t4_read_indirect(padap,
 3247                                 pm_pio->ireg_addr,
 3248                                 pm_pio->ireg_data,
 3249                                 buff,
 3250                                 pm_pio->ireg_offset_range,
 3251                                 pm_pio->ireg_local_offset);
 3252 
 3253                 ch_pm++;
 3254         }
 3255 
 3256         /*PM_Tx*/
 3257         n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
 3258         for (i = 0; i < n; i++) {
 3259                 struct ireg_field *pm_pio = &ch_pm->tp_pio;
 3260                 u32 *buff = ch_pm->outbuf;
 3261 
 3262                 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
 3263                 pm_pio->ireg_data = t5_pm_tx_array[i][1];
 3264                 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
 3265                 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
 3266 
 3267                 t4_read_indirect(padap,
 3268                                 pm_pio->ireg_addr,
 3269                                 pm_pio->ireg_data,
 3270                                 buff,
 3271                                 pm_pio->ireg_offset_range,
 3272                                 pm_pio->ireg_local_offset);
 3273 
 3274                 ch_pm++;
 3275         }
 3276 
 3277         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3278         if (rc)
 3279                 goto err1;
 3280 
 3281         rc = compress_buff(&scratch_buff, dbg_buff);
 3282 
 3283 err1:
 3284         release_scratch_buff(&scratch_buff, dbg_buff);
 3285 err:
 3286         return rc;
 3287 
 3288 }
 3289 
 3290 static int collect_tid(struct cudbg_init *pdbg_init,
 3291                        struct cudbg_buffer *dbg_buff,
 3292                        struct cudbg_error *cudbg_err)
 3293 {
 3294 
 3295         struct cudbg_buffer scratch_buff;
 3296         struct adapter *padap = pdbg_init->adap;
 3297         struct tid_info_region *tid;
 3298         struct tid_info_region_rev1 *tid1;
 3299         u32 para[7], val[7];
 3300         u32 mbox, pf;
 3301         int rc;
 3302 
 3303         scratch_buff.size = sizeof(struct tid_info_region_rev1);
 3304 
 3305         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3306         if (rc)
 3307                 goto err;
 3308 
 3309 #define FW_PARAM_DEV_A(param) \
 3310         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
 3311          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
 3312 #define FW_PARAM_PFVF_A(param) \
 3313         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
 3314          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
 3315          V_FW_PARAMS_PARAM_Y(0) | \
 3316          V_FW_PARAMS_PARAM_Z(0))
 3317 #define MAX_ATIDS_A 8192U
 3318 
 3319         tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
 3320         tid = &(tid1->tid);
 3321         tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
 3322         tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
 3323         tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
 3324                              sizeof(struct cudbg_ver_hdr);
 3325 
 3326         if (is_t5(padap)) {
 3327                 tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
 3328                 tid1->tid_start = 0;
 3329         } else if (is_t6(padap)) {
 3330                 tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
 3331                 tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
 3332         }
 3333 
 3334         tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
 3335 
 3336         para[0] = FW_PARAM_PFVF_A(FILTER_START);
 3337         para[1] = FW_PARAM_PFVF_A(FILTER_END);
 3338         para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
 3339         para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
 3340         para[4] = FW_PARAM_DEV_A(NTID);
 3341         para[5] = FW_PARAM_PFVF_A(SERVER_START);
 3342         para[6] = FW_PARAM_PFVF_A(SERVER_END);
 3343 
 3344         rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK, "t4cudq");
 3345         if (rc)
 3346                 goto err;
 3347         mbox = padap->mbox;
 3348         pf = padap->pf;
 3349         rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
 3350         if (rc <  0) {
 3351                 if (rc == -FW_EPERM) {
 3352                         /* It looks like we don't have permission to use
 3353                          * padap->mbox.
 3354                          *
 3355                          * Try mbox 4.  If it works, we'll continue to
 3356                          * collect the rest of tid info from mbox 4.
 3357                          * Else, quit trying to collect tid info.
 3358                          */
 3359                         mbox = 4;
 3360                         pf = 4;
 3361                         rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
 3362                         if (rc < 0) {
 3363                                 cudbg_err->sys_err = rc;
 3364                                 goto err1;
 3365                         }
 3366                 } else {
 3367                         cudbg_err->sys_err = rc;
 3368                         goto err1;
 3369                 }
 3370         }
 3371 
 3372         tid->ftid_base = val[0];
 3373         tid->nftids = val[1] - val[0] + 1;
 3374         /*active filter region*/
 3375         if (val[2] != val[3]) {
 3376 #ifdef notyet
 3377                 tid->flags |= FW_OFLD_CONN;
 3378 #endif
 3379                 tid->aftid_base = val[2];
 3380                 tid->aftid_end = val[3];
 3381         }
 3382         tid->ntids = val[4];
 3383         tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
 3384         tid->stid_base = val[5];
 3385         tid->nstids = val[6] - val[5] + 1;
 3386 
 3387         if (chip_id(padap) >= CHELSIO_T6) {
 3388                 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
 3389                 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
 3390                 rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
 3391                 if (rc < 0) {
 3392                         cudbg_err->sys_err = rc;
 3393                         goto err1;
 3394                 }
 3395 
 3396                 tid->hpftid_base = val[0];
 3397                 tid->nhpftids = val[1] - val[0] + 1;
 3398         }
 3399 
 3400         if (chip_id(padap) <= CHELSIO_T5) {
 3401                 tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
 3402                 tid->hash_base /= 4;
 3403         } else
 3404                 tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
 3405 
 3406         /*UO context range*/
 3407         para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
 3408         para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
 3409 
 3410         rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
 3411         if (rc <  0) {
 3412                 cudbg_err->sys_err = rc;
 3413                 goto err1;
 3414         }
 3415 
 3416         if (val[0] != val[1]) {
 3417                 tid->uotid_base = val[0];
 3418                 tid->nuotids = val[1] - val[0] + 1;
 3419         }
 3420         tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
 3421         tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
 3422 
 3423 #undef FW_PARAM_PFVF_A
 3424 #undef FW_PARAM_DEV_A
 3425 #undef MAX_ATIDS_A
 3426 
 3427         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3428         if (rc)
 3429                 goto err1;
 3430         rc = compress_buff(&scratch_buff, dbg_buff);
 3431 
 3432 err1:
 3433         end_synchronized_op(padap, 0);
 3434         release_scratch_buff(&scratch_buff, dbg_buff);
 3435 err:
 3436         return rc;
 3437 }
 3438 
 3439 static int collect_tx_rate(struct cudbg_init *pdbg_init,
 3440                            struct cudbg_buffer *dbg_buff,
 3441                            struct cudbg_error *cudbg_err)
 3442 {
 3443         struct cudbg_buffer scratch_buff;
 3444         struct adapter *padap = pdbg_init->adap;
 3445         struct tx_rate *tx_rate;
 3446         u32 size;
 3447         int rc;
 3448 
 3449         size = sizeof(struct tx_rate);
 3450         scratch_buff.size = size;
 3451 
 3452         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3453         if (rc)
 3454                 goto err;
 3455 
 3456         tx_rate = (struct tx_rate *)scratch_buff.data;
 3457         t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
 3458         tx_rate->nchan = padap->chip_params->nchan;
 3459 
 3460         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3461         if (rc)
 3462                 goto err1;
 3463 
 3464         rc = compress_buff(&scratch_buff, dbg_buff);
 3465 
 3466 err1:
 3467         release_scratch_buff(&scratch_buff, dbg_buff);
 3468 err:
 3469         return rc;
 3470 }
 3471 
 3472 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
 3473 {
 3474         *mask = x | y;
 3475         y = (__force u64)cpu_to_be64(y);
 3476         memcpy(addr, (char *)&y + 2, ETH_ALEN);
 3477 }
 3478 
 3479 static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
 3480 {
 3481         if (is_t5(padap)) {
 3482                 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
 3483                                                           A_MPS_VF_RPLCT_MAP3));
 3484                 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
 3485                                                           A_MPS_VF_RPLCT_MAP2));
 3486                 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
 3487                                                           A_MPS_VF_RPLCT_MAP1));
 3488                 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
 3489                                                           A_MPS_VF_RPLCT_MAP0));
 3490         } else {
 3491                 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
 3492                                                           A_MPS_VF_RPLCT_MAP7));
 3493                 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
 3494                                                           A_MPS_VF_RPLCT_MAP6));
 3495                 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
 3496                                                           A_MPS_VF_RPLCT_MAP5));
 3497                 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
 3498                                                           A_MPS_VF_RPLCT_MAP4));
 3499         }
 3500         mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
 3501         mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
 3502         mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
 3503         mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
 3504 }
 3505 
 3506 static int collect_mps_tcam(struct cudbg_init *pdbg_init,
 3507                             struct cudbg_buffer *dbg_buff,
 3508                             struct cudbg_error *cudbg_err)
 3509 {
 3510         struct cudbg_buffer scratch_buff;
 3511         struct adapter *padap = pdbg_init->adap;
 3512         struct cudbg_mps_tcam *tcam = NULL;
 3513         u32 size = 0, i, n, total_size = 0;
 3514         u32 ctl, data2;
 3515         u64 tcamy, tcamx, val;
 3516         int rc;
 3517 
 3518         n = padap->chip_params->mps_tcam_size;
 3519         size = sizeof(struct cudbg_mps_tcam) * n;
 3520         scratch_buff.size = size;
 3521 
 3522         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3523         if (rc)
 3524                 goto err;
 3525         memset(scratch_buff.data, 0, size);
 3526 
 3527         tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
 3528         for (i = 0; i < n; i++) {
 3529                 if (chip_id(padap) >= CHELSIO_T6) {
 3530                         /* CtlReqID   - 1: use Host Driver Requester ID
 3531                          * CtlCmdType - 0: Read, 1: Write
 3532                          * CtlTcamSel - 0: TCAM0, 1: TCAM1
 3533                          * CtlXYBitSel- 0: Y bit, 1: X bit
 3534                          */
 3535 
 3536                         /* Read tcamy */
 3537                         ctl = (V_CTLREQID(1) |
 3538                                V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
 3539                         if (i < 256)
 3540                                 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
 3541                         else
 3542                                 ctl |= V_CTLTCAMINDEX(i - 256) |
 3543                                        V_CTLTCAMSEL(1);
 3544 
 3545                         t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
 3546                         val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
 3547                         tcamy = G_DMACH(val) << 32;
 3548                         tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
 3549                         data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
 3550                         tcam->lookup_type = G_DATALKPTYPE(data2);
 3551 
 3552                         /* 0 - Outer header, 1 - Inner header
 3553                          * [71:48] bit locations are overloaded for
 3554                          * outer vs. inner lookup types.
 3555                          */
 3556 
 3557                         if (tcam->lookup_type &&
 3558                             (tcam->lookup_type != M_DATALKPTYPE)) {
 3559                                 /* Inner header VNI */
 3560                                 tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
 3561                                              (G_DATAVIDH1(data2) << 16) |
 3562                                              G_VIDL(val);
 3563                                 tcam->dip_hit = data2 & F_DATADIPHIT;
 3564                         } else {
 3565                                 tcam->vlan_vld = data2 & F_DATAVIDH2;
 3566                                 tcam->ivlan = G_VIDL(val);
 3567                         }
 3568 
 3569                         tcam->port_num = G_DATAPORTNUM(data2);
 3570 
 3571                         /* Read tcamx. Change the control param */
 3572                         ctl |= V_CTLXYBITSEL(1);
 3573                         t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
 3574                         val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
 3575                         tcamx = G_DMACH(val) << 32;
 3576                         tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
 3577                         data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
 3578                         if (tcam->lookup_type &&
 3579                             (tcam->lookup_type != M_DATALKPTYPE)) {
 3580                                 /* Inner header VNI mask */
 3581                                 tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
 3582                                              (G_DATAVIDH1(data2) << 16) |
 3583                                              G_VIDL(val);
 3584                         }
 3585                 } else {
 3586                         tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
 3587                         tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
 3588                 }
 3589 
 3590                 if (tcamx & tcamy)
 3591                         continue;
 3592 
 3593                 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
 3594                 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
 3595 
 3596                 if (is_t5(padap))
 3597                         tcam->repli = (tcam->cls_lo & F_REPLICATE);
 3598                 else if (is_t6(padap))
 3599                         tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
 3600 
 3601                 if (tcam->repli) {
 3602                         struct fw_ldst_cmd ldst_cmd;
 3603                         struct fw_ldst_mps_rplc mps_rplc;
 3604 
 3605                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
 3606                         ldst_cmd.op_to_addrspace =
 3607                                 htonl(V_FW_CMD_OP(FW_LDST_CMD) |
 3608                                       F_FW_CMD_REQUEST |
 3609                                       F_FW_CMD_READ |
 3610                                       V_FW_LDST_CMD_ADDRSPACE(
 3611                                               FW_LDST_ADDRSPC_MPS));
 3612 
 3613                         ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
 3614 
 3615                         ldst_cmd.u.mps.rplc.fid_idx =
 3616                                 htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
 3617                                       V_FW_LDST_CMD_IDX(i));
 3618 
 3619                         rc = begin_synchronized_op(padap, NULL,
 3620                             SLEEP_OK | INTR_OK, "t4cudm");
 3621                         if (rc == 0) {
 3622                                 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
 3623                                                 sizeof(ldst_cmd), &ldst_cmd);
 3624                                 end_synchronized_op(padap, 0);
 3625                         }
 3626 
 3627                         if (rc)
 3628                                 mps_rpl_backdoor(padap, &mps_rplc);
 3629                         else
 3630                                 mps_rplc = ldst_cmd.u.mps.rplc;
 3631 
 3632                         tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
 3633                         tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
 3634                         tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
 3635                         tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
 3636                         if (padap->chip_params->mps_rplc_size >
 3637                                         CUDBG_MAX_RPLC_SIZE) {
 3638                                 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
 3639                                 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
 3640                                 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
 3641                                 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
 3642                         }
 3643                 }
 3644                 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
 3645 
 3646                 tcam->idx = i;
 3647                 tcam->rplc_size = padap->chip_params->mps_rplc_size;
 3648 
 3649                 total_size += sizeof(struct cudbg_mps_tcam);
 3650 
 3651                 tcam++;
 3652         }
 3653 
 3654         if (total_size == 0) {
 3655                 rc = CUDBG_SYSTEM_ERROR;
 3656                 goto err1;
 3657         }
 3658 
 3659         scratch_buff.size = total_size;
 3660         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3661         if (rc)
 3662                 goto err1;
 3663 
 3664         rc = compress_buff(&scratch_buff, dbg_buff);
 3665 
 3666 err1:
 3667         scratch_buff.size = size;
 3668         release_scratch_buff(&scratch_buff, dbg_buff);
 3669 err:
 3670         return rc;
 3671 }
 3672 
 3673 static int collect_pcie_config(struct cudbg_init *pdbg_init,
 3674                                struct cudbg_buffer *dbg_buff,
 3675                                struct cudbg_error *cudbg_err)
 3676 {
 3677         struct cudbg_buffer scratch_buff;
 3678         struct adapter *padap = pdbg_init->adap;
 3679         u32 size, *value, j;
 3680         int i, rc, n;
 3681 
 3682         size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
 3683         n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
 3684         scratch_buff.size = size;
 3685 
 3686         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3687         if (rc)
 3688                 goto err;
 3689 
 3690         value = (u32 *)scratch_buff.data;
 3691         for (i = 0; i < n; i++) {
 3692                 for (j = t5_pcie_config_array[i][0];
 3693                      j <= t5_pcie_config_array[i][1]; j += 4) {
 3694                         *value++ = t4_hw_pci_read_cfg4(padap, j);
 3695                 }
 3696         }
 3697 
 3698         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3699         if (rc)
 3700                 goto err1;
 3701 
 3702         rc = compress_buff(&scratch_buff, dbg_buff);
 3703 
 3704 err1:
 3705         release_scratch_buff(&scratch_buff, dbg_buff);
 3706 err:
 3707         return rc;
 3708 }
 3709 
 3710 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
 3711                           struct cudbg_tid_data *tid_data)
 3712 {
 3713         int i, cmd_retry = 8;
 3714         struct adapter *padap = pdbg_init->adap;
 3715         u32 val;
 3716 
 3717         /* Fill REQ_DATA regs with 0's */
 3718         for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
 3719                 t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
 3720 
 3721         /* Write DBIG command */
 3722         val = (0x4 << S_DBGICMD) | tid;
 3723         t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
 3724         tid_data->dbig_cmd = val;
 3725 
 3726         val = 0;
 3727         val |= 1 << S_DBGICMDSTRT;
 3728         val |= 1;  /* LE mode */
 3729         t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
 3730         tid_data->dbig_conf = val;
 3731 
 3732         /* Poll the DBGICMDBUSY bit */
 3733         val = 1;
 3734         while (val) {
 3735                 val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
 3736                 val = (val >> S_DBGICMDBUSY) & 1;
 3737                 cmd_retry--;
 3738                 if (!cmd_retry) {
 3739                         if (pdbg_init->verbose)
 3740                                 pdbg_init->print("%s(): Timeout waiting for non-busy\n",
 3741                                          __func__);
 3742                         return CUDBG_SYSTEM_ERROR;
 3743                 }
 3744         }
 3745 
 3746         /* Check RESP status */
 3747         val = 0;
 3748         val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
 3749         tid_data->dbig_rsp_stat = val;
 3750         if (!(val & 1)) {
 3751                 if (pdbg_init->verbose)
 3752                         pdbg_init->print("%s(): DBGI command failed\n", __func__);
 3753                 return CUDBG_SYSTEM_ERROR;
 3754         }
 3755 
 3756         /* Read RESP data */
 3757         for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
 3758                 tid_data->data[i] = t4_read_reg(padap,
 3759                                                 A_LE_DB_DBGI_RSP_DATA +
 3760                                                 (i << 2));
 3761 
 3762         tid_data->tid = tid;
 3763 
 3764         return 0;
 3765 }
 3766 
 3767 static int collect_le_tcam(struct cudbg_init *pdbg_init,
 3768                            struct cudbg_buffer *dbg_buff,
 3769                            struct cudbg_error *cudbg_err)
 3770 {
 3771         struct cudbg_buffer scratch_buff;
 3772         struct adapter *padap = pdbg_init->adap;
 3773         struct cudbg_tcam tcam_region = {0};
 3774         struct cudbg_tid_data *tid_data = NULL;
 3775         u32 value, bytes = 0, bytes_left  = 0;
 3776         u32 i;
 3777         int rc, size;
 3778 
 3779         /* Get the LE regions */
 3780         value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
 3781                                                              index */
 3782         tcam_region.tid_hash_base = value;
 3783 
 3784         /* Get routing table index */
 3785         value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
 3786         tcam_region.routing_start = value;
 3787 
 3788         /*Get clip table index */
 3789         value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
 3790         tcam_region.clip_start = value;
 3791 
 3792         /* Get filter table index */
 3793         value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
 3794         tcam_region.filter_start = value;
 3795 
 3796         /* Get server table index */
 3797         value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
 3798         tcam_region.server_start = value;
 3799 
 3800         /* Check whether hash is enabled and calculate the max tids */
 3801         value = t4_read_reg(padap, A_LE_DB_CONFIG);
 3802         if ((value >> S_HASHEN) & 1) {
 3803                 value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
 3804                 if (chip_id(padap) > CHELSIO_T5)
 3805                         tcam_region.max_tid = (value & 0xFFFFF) +
 3806                                               tcam_region.tid_hash_base;
 3807                 else {      /* for T5 */
 3808                         value = G_HASHTIDSIZE(value);
 3809                         value = 1 << value;
 3810                         tcam_region.max_tid = value +
 3811                                 tcam_region.tid_hash_base;
 3812                 }
 3813         } else   /* hash not enabled */
 3814                 tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
 3815 
 3816         size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
 3817         size += sizeof(struct cudbg_tcam);
 3818         scratch_buff.size = size;
 3819 
 3820         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3821         if (rc)
 3822                 goto err;
 3823 
 3824         rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
 3825         if (rc)
 3826                 goto err;
 3827 
 3828         memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
 3829 
 3830         tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
 3831                                              scratch_buff.data) + 1);
 3832         bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
 3833         bytes = sizeof(struct cudbg_tcam);
 3834 
 3835         /* read all tid */
 3836         for (i = 0; i < tcam_region.max_tid; i++) {
 3837                 if (bytes_left < sizeof(struct cudbg_tid_data)) {
 3838                         scratch_buff.size = bytes;
 3839                         rc = compress_buff(&scratch_buff, dbg_buff);
 3840                         if (rc)
 3841                                 goto err1;
 3842                         scratch_buff.size = CUDBG_CHUNK_SIZE;
 3843                         release_scratch_buff(&scratch_buff, dbg_buff);
 3844 
 3845                         /* new alloc */
 3846                         rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
 3847                                               &scratch_buff);
 3848                         if (rc)
 3849                                 goto err;
 3850 
 3851                         tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
 3852                         bytes_left = CUDBG_CHUNK_SIZE;
 3853                         bytes = 0;
 3854                 }
 3855 
 3856                 rc = cudbg_read_tid(pdbg_init, i, tid_data);
 3857 
 3858                 if (rc) {
 3859                         cudbg_err->sys_err = rc;
 3860                         goto err1;
 3861                 }
 3862 
 3863                 tid_data++;
 3864                 bytes_left -= sizeof(struct cudbg_tid_data);
 3865                 bytes += sizeof(struct cudbg_tid_data);
 3866         }
 3867 
 3868         if (bytes) {
 3869                 scratch_buff.size = bytes;
 3870                 rc = compress_buff(&scratch_buff, dbg_buff);
 3871         }
 3872 
 3873 err1:
 3874         scratch_buff.size = CUDBG_CHUNK_SIZE;
 3875         release_scratch_buff(&scratch_buff, dbg_buff);
 3876 err:
 3877         return rc;
 3878 }
 3879 
 3880 static int collect_ma_indirect(struct cudbg_init *pdbg_init,
 3881                                struct cudbg_buffer *dbg_buff,
 3882                                struct cudbg_error *cudbg_err)
 3883 {
 3884         struct cudbg_buffer scratch_buff;
 3885         struct adapter *padap = pdbg_init->adap;
 3886         struct ireg_buf *ma_indr = NULL;
 3887         u32 size, j;
 3888         int i, rc, n;
 3889 
 3890         if (chip_id(padap) < CHELSIO_T6) {
 3891                 if (pdbg_init->verbose)
 3892                         pdbg_init->print("MA indirect available only in T6\n");
 3893                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
 3894                 goto err;
 3895         }
 3896 
 3897         n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
 3898         size = sizeof(struct ireg_buf) * n * 2;
 3899         scratch_buff.size = size;
 3900 
 3901         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3902         if (rc)
 3903                 goto err;
 3904 
 3905         ma_indr = (struct ireg_buf *)scratch_buff.data;
 3906 
 3907         for (i = 0; i < n; i++) {
 3908                 struct ireg_field *ma_fli = &ma_indr->tp_pio;
 3909                 u32 *buff = ma_indr->outbuf;
 3910 
 3911                 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
 3912                 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
 3913                 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
 3914                 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
 3915 
 3916                 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
 3917                                  buff, ma_fli->ireg_offset_range,
 3918                                  ma_fli->ireg_local_offset);
 3919 
 3920                 ma_indr++;
 3921 
 3922         }
 3923 
 3924         n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
 3925 
 3926         for (i = 0; i < n; i++) {
 3927                 struct ireg_field *ma_fli = &ma_indr->tp_pio;
 3928                 u32 *buff = ma_indr->outbuf;
 3929 
 3930                 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
 3931                 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
 3932                 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
 3933 
 3934                 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
 3935                         t4_read_indirect(padap, ma_fli->ireg_addr,
 3936                                          ma_fli->ireg_data, buff, 1,
 3937                                          ma_fli->ireg_local_offset);
 3938                         buff++;
 3939                         ma_fli->ireg_local_offset += 0x20;
 3940                 }
 3941                 ma_indr++;
 3942         }
 3943 
 3944         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 3945         if (rc)
 3946                 goto err1;
 3947 
 3948         rc = compress_buff(&scratch_buff, dbg_buff);
 3949 
 3950 err1:
 3951         release_scratch_buff(&scratch_buff, dbg_buff);
 3952 err:
 3953         return rc;
 3954 }
 3955 
 3956 static int collect_hma_indirect(struct cudbg_init *pdbg_init,
 3957                                struct cudbg_buffer *dbg_buff,
 3958                                struct cudbg_error *cudbg_err)
 3959 {
 3960         struct cudbg_buffer scratch_buff;
 3961         struct adapter *padap = pdbg_init->adap;
 3962         struct ireg_buf *hma_indr = NULL;
 3963         u32 size;
 3964         int i, rc, n;
 3965 
 3966         if (chip_id(padap) < CHELSIO_T6) {
 3967                 if (pdbg_init->verbose)
 3968                         pdbg_init->print("HMA indirect available only in T6\n");
 3969                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
 3970                 goto err;
 3971         }
 3972 
 3973         n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
 3974         size = sizeof(struct ireg_buf) * n;
 3975         scratch_buff.size = size;
 3976 
 3977         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 3978         if (rc)
 3979                 goto err;
 3980 
 3981         hma_indr = (struct ireg_buf *)scratch_buff.data;
 3982 
 3983         for (i = 0; i < n; i++) {
 3984                 struct ireg_field *hma_fli = &hma_indr->tp_pio;
 3985                 u32 *buff = hma_indr->outbuf;
 3986 
 3987                 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
 3988                 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
 3989                 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
 3990                 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
 3991 
 3992                 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
 3993                                  buff, hma_fli->ireg_offset_range,
 3994                                  hma_fli->ireg_local_offset);
 3995 
 3996                 hma_indr++;
 3997 
 3998         }
 3999 
 4000         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 4001         if (rc)
 4002                 goto err1;
 4003 
 4004         rc = compress_buff(&scratch_buff, dbg_buff);
 4005 
 4006 err1:
 4007         release_scratch_buff(&scratch_buff, dbg_buff);
 4008 err:
 4009         return rc;
 4010 }
 4011 
 4012 static int collect_pcie_indirect(struct cudbg_init *pdbg_init,
 4013                                  struct cudbg_buffer *dbg_buff,
 4014                                  struct cudbg_error *cudbg_err)
 4015 {
 4016         struct cudbg_buffer scratch_buff;
 4017         struct adapter *padap = pdbg_init->adap;
 4018         struct ireg_buf *ch_pcie;
 4019         u32 size;
 4020         int i, rc, n;
 4021 
 4022         n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
 4023         size = sizeof(struct ireg_buf) * n * 2;
 4024         scratch_buff.size = size;
 4025 
 4026         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 4027         if (rc)
 4028                 goto err;
 4029 
 4030         ch_pcie = (struct ireg_buf *)scratch_buff.data;
 4031 
 4032         /*PCIE_PDBG*/
 4033         for (i = 0; i < n; i++) {
 4034                 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
 4035                 u32 *buff = ch_pcie->outbuf;
 4036 
 4037                 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
 4038                 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
 4039                 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
 4040                 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
 4041 
 4042                 t4_read_indirect(padap,
 4043                                 pcie_pio->ireg_addr,
 4044                                 pcie_pio->ireg_data,
 4045                                 buff,
 4046                                 pcie_pio->ireg_offset_range,
 4047                                 pcie_pio->ireg_local_offset);
 4048 
 4049                 ch_pcie++;
 4050         }
 4051 
 4052         /*PCIE_CDBG*/
 4053         n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
 4054         for (i = 0; i < n; i++) {
 4055                 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
 4056                 u32 *buff = ch_pcie->outbuf;
 4057 
 4058                 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
 4059                 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
 4060                 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
 4061                 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
 4062 
 4063                 t4_read_indirect(padap,
 4064                                 pcie_pio->ireg_addr,
 4065                                 pcie_pio->ireg_data,
 4066                                 buff,
 4067                                 pcie_pio->ireg_offset_range,
 4068                                 pcie_pio->ireg_local_offset);
 4069 
 4070                 ch_pcie++;
 4071         }
 4072 
 4073         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 4074         if (rc)
 4075                 goto err1;
 4076 
 4077         rc = compress_buff(&scratch_buff, dbg_buff);
 4078 
 4079 err1:
 4080         release_scratch_buff(&scratch_buff, dbg_buff);
 4081 err:
 4082         return rc;
 4083 
 4084 }
 4085 
 4086 static int collect_tp_indirect(struct cudbg_init *pdbg_init,
 4087                                struct cudbg_buffer *dbg_buff,
 4088                                struct cudbg_error *cudbg_err)
 4089 {
 4090         struct cudbg_buffer scratch_buff;
 4091         struct adapter *padap = pdbg_init->adap;
 4092         struct ireg_buf *ch_tp_pio;
 4093         u32 size;
 4094         int i, rc, n = 0;
 4095 
 4096         if (is_t5(padap))
 4097                 n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
 4098         else if (is_t6(padap))
 4099                 n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
 4100 
 4101         size = sizeof(struct ireg_buf) * n * 3;
 4102         scratch_buff.size = size;
 4103 
 4104         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 4105         if (rc)
 4106                 goto err;
 4107 
 4108         ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
 4109 
 4110         /* TP_PIO*/
 4111         for (i = 0; i < n; i++) {
 4112                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
 4113                 u32 *buff = ch_tp_pio->outbuf;
 4114 
 4115                 if (is_t5(padap)) {
 4116                         tp_pio->ireg_addr = t5_tp_pio_array[i][0];
 4117                         tp_pio->ireg_data = t5_tp_pio_array[i][1];
 4118                         tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
 4119                         tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
 4120                 } else if (is_t6(padap)) {
 4121                         tp_pio->ireg_addr = t6_tp_pio_array[i][0];
 4122                         tp_pio->ireg_data = t6_tp_pio_array[i][1];
 4123                         tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
 4124                         tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
 4125                 }
 4126 
 4127                 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
 4128                                tp_pio->ireg_local_offset, true);
 4129 
 4130                 ch_tp_pio++;
 4131         }
 4132 
 4133         /* TP_TM_PIO*/
 4134         if (is_t5(padap))
 4135                 n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
 4136         else if (is_t6(padap))
 4137                 n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
 4138 
 4139         for (i = 0; i < n; i++) {
 4140                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
 4141                 u32 *buff = ch_tp_pio->outbuf;
 4142 
 4143                 if (is_t5(padap)) {
 4144                         tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
 4145                         tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
 4146                         tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
 4147                         tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
 4148                 } else if (is_t6(padap)) {
 4149                         tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
 4150                         tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
 4151                         tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
 4152                         tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
 4153                 }
 4154 
 4155                 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
 4156                                   tp_pio->ireg_local_offset, true);
 4157 
 4158                 ch_tp_pio++;
 4159         }
 4160 
 4161         /* TP_MIB_INDEX*/
 4162         if (is_t5(padap))
 4163                 n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
 4164         else if (is_t6(padap))
 4165                 n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
 4166 
 4167         for (i = 0; i < n ; i++) {
 4168                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
 4169                 u32 *buff = ch_tp_pio->outbuf;
 4170 
 4171                 if (is_t5(padap)) {
 4172                         tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
 4173                         tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
 4174                         tp_pio->ireg_local_offset =
 4175                                 t5_tp_mib_index_array[i][2];
 4176                         tp_pio->ireg_offset_range =
 4177                                 t5_tp_mib_index_array[i][3];
 4178                 } else if (is_t6(padap)) {
 4179                         tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
 4180                         tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
 4181                         tp_pio->ireg_local_offset =
 4182                                 t6_tp_mib_index_array[i][2];
 4183                         tp_pio->ireg_offset_range =
 4184                                 t6_tp_mib_index_array[i][3];
 4185                 }
 4186 
 4187                 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
 4188                                tp_pio->ireg_local_offset, true);
 4189 
 4190                 ch_tp_pio++;
 4191         }
 4192 
 4193         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 4194         if (rc)
 4195                 goto err1;
 4196 
 4197         rc = compress_buff(&scratch_buff, dbg_buff);
 4198 
 4199 err1:
 4200         release_scratch_buff(&scratch_buff, dbg_buff);
 4201 err:
 4202         return rc;
 4203 }
 4204 
 4205 static int collect_sge_indirect(struct cudbg_init *pdbg_init,
 4206                                 struct cudbg_buffer *dbg_buff,
 4207                                 struct cudbg_error *cudbg_err)
 4208 {
 4209         struct cudbg_buffer scratch_buff;
 4210         struct adapter *padap = pdbg_init->adap;
 4211         struct ireg_buf *ch_sge_dbg;
 4212         u32 size;
 4213         int i, rc;
 4214 
 4215         size = sizeof(struct ireg_buf) * 2;
 4216         scratch_buff.size = size;
 4217 
 4218         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 4219         if (rc)
 4220                 goto err;
 4221 
 4222         ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
 4223 
 4224         for (i = 0; i < 2; i++) {
 4225                 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
 4226                 u32 *buff = ch_sge_dbg->outbuf;
 4227 
 4228                 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
 4229                 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
 4230                 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
 4231                 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
 4232 
 4233                 t4_read_indirect(padap,
 4234                                 sge_pio->ireg_addr,
 4235                                 sge_pio->ireg_data,
 4236                                 buff,
 4237                                 sge_pio->ireg_offset_range,
 4238                                 sge_pio->ireg_local_offset);
 4239 
 4240                 ch_sge_dbg++;
 4241         }
 4242 
 4243         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 4244         if (rc)
 4245                 goto err1;
 4246 
 4247         rc = compress_buff(&scratch_buff, dbg_buff);
 4248 
 4249 err1:
 4250         release_scratch_buff(&scratch_buff, dbg_buff);
 4251 err:
 4252         return rc;
 4253 }
 4254 
 4255 static int collect_full(struct cudbg_init *pdbg_init,
 4256                         struct cudbg_buffer *dbg_buff,
 4257                         struct cudbg_error *cudbg_err)
 4258 {
 4259         struct cudbg_buffer scratch_buff;
 4260         struct adapter *padap = pdbg_init->adap;
 4261         u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
 4262         u32 *sp;
 4263         int rc;
 4264         int nreg = 0;
 4265 
 4266         /* Collect Registers:
 4267          * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
 4268          * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
 4269          * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
 4270          * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
 4271          * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
 4272          * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3)  This is for T6
 4273          * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
 4274          **/
 4275 
 4276         if (is_t5(padap))
 4277                 nreg = 6;
 4278         else if (is_t6(padap))
 4279                 nreg = 7;
 4280 
 4281         scratch_buff.size = nreg * sizeof(u32);
 4282 
 4283         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 4284         if (rc)
 4285                 goto err;
 4286 
 4287         sp = (u32 *)scratch_buff.data;
 4288 
 4289         /* TP_DBG_SCHED_TX */
 4290         reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
 4291         reg_offset_range = 1;
 4292 
 4293         t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
 4294 
 4295         sp++;
 4296 
 4297         /* TP_DBG_SCHED_RX */
 4298         reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
 4299         reg_offset_range = 1;
 4300 
 4301         t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
 4302 
 4303         sp++;
 4304 
 4305         /* TP_DBG_CSIDE_INT */
 4306         reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
 4307         reg_offset_range = 1;
 4308 
 4309         t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
 4310 
 4311         sp++;
 4312 
 4313         /* TP_DBG_ESIDE_INT */
 4314         reg_local_offset = t5_tp_pio_array[8][2] + 3;
 4315         reg_offset_range = 1;
 4316 
 4317         t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
 4318 
 4319         sp++;
 4320 
 4321         /* PCIE_CDEBUG_INDEX[AppData0] */
 4322         reg_addr = t5_pcie_cdbg_array[0][0];
 4323         reg_data = t5_pcie_cdbg_array[0][1];
 4324         reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
 4325         reg_offset_range = 1;
 4326 
 4327         t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
 4328                          reg_local_offset);
 4329 
 4330         sp++;
 4331 
 4332         if (is_t6(padap)) {
 4333                 /* PCIE_CDEBUG_INDEX[AppData1] */
 4334                 reg_addr = t5_pcie_cdbg_array[0][0];
 4335                 reg_data = t5_pcie_cdbg_array[0][1];
 4336                 reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
 4337                 reg_offset_range = 1;
 4338 
 4339                 t4_read_indirect(padap, reg_addr, reg_data, sp,
 4340                                  reg_offset_range, reg_local_offset);
 4341 
 4342                 sp++;
 4343         }
 4344 
 4345         /* SGE_DEBUG_DATA_HIGH_INDEX_10 */
 4346         *sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
 4347 
 4348         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 4349         if (rc)
 4350                 goto err1;
 4351 
 4352         rc = compress_buff(&scratch_buff, dbg_buff);
 4353 
 4354 err1:
 4355         release_scratch_buff(&scratch_buff, dbg_buff);
 4356 err:
 4357         return rc;
 4358 }
 4359 
 4360 static int collect_vpd_data(struct cudbg_init *pdbg_init,
 4361                             struct cudbg_buffer *dbg_buff,
 4362                             struct cudbg_error *cudbg_err)
 4363 {
 4364 #ifdef notyet
 4365         struct cudbg_buffer scratch_buff;
 4366         struct adapter *padap = pdbg_init->adap;
 4367         struct struct_vpd_data *vpd_data;
 4368         char vpd_ver[4];
 4369         u32 fw_vers;
 4370         u32 size;
 4371         int rc;
 4372 
 4373         size = sizeof(struct struct_vpd_data);
 4374         scratch_buff.size = size;
 4375 
 4376         rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
 4377         if (rc)
 4378                 goto err;
 4379 
 4380         vpd_data = (struct struct_vpd_data *)scratch_buff.data;
 4381 
 4382         if (is_t5(padap)) {
 4383                 read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
 4384                 read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
 4385                 read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
 4386                 read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
 4387         } else if (is_t6(padap)) {
 4388                 read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
 4389                 read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
 4390                 read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
 4391                 read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
 4392         }
 4393 
 4394         if (is_fw_attached(pdbg_init)) {
 4395            rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
 4396         } else {
 4397                 rc = 1;
 4398         }
 4399 
 4400         if (rc) {
 4401                 /* Now trying with backdoor mechanism */
 4402                 rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
 4403                                   (u8 *)&vpd_data->scfg_vers);
 4404                 if (rc)
 4405                         goto err1;
 4406         }
 4407 
 4408         if (is_fw_attached(pdbg_init)) {
 4409                 rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
 4410         } else {
 4411                 rc = 1;
 4412         }
 4413 
 4414         if (rc) {
 4415                 /* Now trying with backdoor mechanism */
 4416                 rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
 4417                                   (u8 *)vpd_ver);
 4418                 if (rc)
 4419                         goto err1;
 4420                 /* read_vpd_reg return string of stored hex
 4421                  * converting hex string to char string
 4422                  * vpd version is 2 bytes only */
 4423                 sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
 4424                 vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
 4425         }
 4426 
 4427         /* Get FW version if it's not already filled in */
 4428         fw_vers = padap->params.fw_vers;
 4429         if (!fw_vers) {
 4430                 rc = t4_get_fw_version(padap, &fw_vers);
 4431                 if (rc)
 4432                         goto err1;
 4433         }
 4434 
 4435         vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
 4436         vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
 4437         vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
 4438         vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
 4439 
 4440         rc = write_compression_hdr(&scratch_buff, dbg_buff);
 4441         if (rc)
 4442                 goto err1;
 4443 
 4444         rc = compress_buff(&scratch_buff, dbg_buff);
 4445 
 4446 err1:
 4447         release_scratch_buff(&scratch_buff, dbg_buff);
 4448 err:
 4449         return rc;
 4450 #endif
 4451         return (CUDBG_STATUS_NOT_IMPLEMENTED);
 4452 }

Cache object: 8f72859e5fd231533f61937a31e40c8a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.