The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ice/ice_sched.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* SPDX-License-Identifier: BSD-3-Clause */
    2 /*  Copyright (c) 2021, Intel Corporation
    3  *  All rights reserved.
    4  *
    5  *  Redistribution and use in source and binary forms, with or without
    6  *  modification, are permitted provided that the following conditions are met:
    7  *
    8  *   1. Redistributions of source code must retain the above copyright notice,
    9  *      this list of conditions and the following disclaimer.
   10  *
   11  *   2. Redistributions in binary form must reproduce the above copyright
   12  *      notice, this list of conditions and the following disclaimer in the
   13  *      documentation and/or other materials provided with the distribution.
   14  *
   15  *   3. Neither the name of the Intel Corporation nor the names of its
   16  *      contributors may be used to endorse or promote products derived from
   17  *      this software without specific prior written permission.
   18  *
   19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
   23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  *  POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 /*$FreeBSD$*/
   32 
   33 #include "ice_sched.h"
   34 
   35 /**
   36  * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
   37  * @pi: port information structure
   38  * @info: Scheduler element information from firmware
   39  *
   40  * This function inserts the root node of the scheduling tree topology
   41  * to the SW DB.
   42  */
   43 static enum ice_status
   44 ice_sched_add_root_node(struct ice_port_info *pi,
   45                         struct ice_aqc_txsched_elem_data *info)
   46 {
   47         struct ice_sched_node *root;
   48         struct ice_hw *hw;
   49 
   50         if (!pi)
   51                 return ICE_ERR_PARAM;
   52 
   53         hw = pi->hw;
   54 
   55         root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root));
   56         if (!root)
   57                 return ICE_ERR_NO_MEMORY;
   58 
   59         /* coverity[suspicious_sizeof] */
   60         root->children = (struct ice_sched_node **)
   61                 ice_calloc(hw, hw->max_children[0], sizeof(*root));
   62         if (!root->children) {
   63                 ice_free(hw, root);
   64                 return ICE_ERR_NO_MEMORY;
   65         }
   66 
   67         ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA);
   68         pi->root = root;
   69         return ICE_SUCCESS;
   70 }
   71 
   72 /**
   73  * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
   74  * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
   75  * @teid: node TEID to search
   76  *
   77  * This function searches for a node matching the TEID in the scheduling tree
   78  * from the SW DB. The search is recursive and is restricted by the number of
   79  * layers it has searched through; stopping at the max supported layer.
   80  *
   81  * This function needs to be called when holding the port_info->sched_lock
   82  */
   83 struct ice_sched_node *
   84 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
   85 {
   86         u16 i;
   87 
   88         /* The TEID is same as that of the start_node */
   89         if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
   90                 return start_node;
   91 
   92         /* The node has no children or is at the max layer */
   93         if (!start_node->num_children ||
   94             start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
   95             start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
   96                 return NULL;
   97 
   98         /* Check if TEID matches to any of the children nodes */
   99         for (i = 0; i < start_node->num_children; i++)
  100                 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
  101                         return start_node->children[i];
  102 
  103         /* Search within each child's sub-tree */
  104         for (i = 0; i < start_node->num_children; i++) {
  105                 struct ice_sched_node *tmp;
  106 
  107                 tmp = ice_sched_find_node_by_teid(start_node->children[i],
  108                                                   teid);
  109                 if (tmp)
  110                         return tmp;
  111         }
  112 
  113         return NULL;
  114 }
  115 
  116 /**
  117  * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
  118  * @hw: pointer to the HW struct
  119  * @cmd_opc: cmd opcode
  120  * @elems_req: number of elements to request
  121  * @buf: pointer to buffer
  122  * @buf_size: buffer size in bytes
  123  * @elems_resp: returns total number of elements response
  124  * @cd: pointer to command details structure or NULL
  125  *
  126  * This function sends a scheduling elements cmd (cmd_opc)
  127  */
  128 static enum ice_status
  129 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
  130                             u16 elems_req, void *buf, u16 buf_size,
  131                             u16 *elems_resp, struct ice_sq_cd *cd)
  132 {
  133         struct ice_aqc_sched_elem_cmd *cmd;
  134         struct ice_aq_desc desc;
  135         enum ice_status status;
  136 
  137         cmd = &desc.params.sched_elem_cmd;
  138         ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
  139         cmd->num_elem_req = CPU_TO_LE16(elems_req);
  140         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
  141         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  142         if (!status && elems_resp)
  143                 *elems_resp = LE16_TO_CPU(cmd->num_elem_resp);
  144 
  145         return status;
  146 }
  147 
  148 /**
  149  * ice_aq_query_sched_elems - query scheduler elements
  150  * @hw: pointer to the HW struct
  151  * @elems_req: number of elements to query
  152  * @buf: pointer to buffer
  153  * @buf_size: buffer size in bytes
  154  * @elems_ret: returns total number of elements returned
  155  * @cd: pointer to command details structure or NULL
  156  *
  157  * Query scheduling elements (0x0404)
  158  */
  159 enum ice_status
  160 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
  161                          struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
  162                          u16 *elems_ret, struct ice_sq_cd *cd)
  163 {
  164         return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
  165                                            elems_req, (void *)buf, buf_size,
  166                                            elems_ret, cd);
  167 }
  168 
  169 /**
  170  * ice_sched_add_node - Insert the Tx scheduler node in SW DB
  171  * @pi: port information structure
  172  * @layer: Scheduler layer of the node
  173  * @info: Scheduler element information from firmware
  174  *
  175  * This function inserts a scheduler node to the SW DB.
  176  */
  177 enum ice_status
  178 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
  179                    struct ice_aqc_txsched_elem_data *info)
  180 {
  181         struct ice_aqc_txsched_elem_data elem;
  182         struct ice_sched_node *parent;
  183         struct ice_sched_node *node;
  184         enum ice_status status;
  185         struct ice_hw *hw;
  186 
  187         if (!pi)
  188                 return ICE_ERR_PARAM;
  189 
  190         hw = pi->hw;
  191 
  192         /* A valid parent node should be there */
  193         parent = ice_sched_find_node_by_teid(pi->root,
  194                                              LE32_TO_CPU(info->parent_teid));
  195         if (!parent) {
  196                 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
  197                           LE32_TO_CPU(info->parent_teid));
  198                 return ICE_ERR_PARAM;
  199         }
  200 
  201         /* query the current node information from FW before adding it
  202          * to the SW DB
  203          */
  204         status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem);
  205         if (status)
  206                 return status;
  207         node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node));
  208         if (!node)
  209                 return ICE_ERR_NO_MEMORY;
  210         if (hw->max_children[layer]) {
  211                 /* coverity[suspicious_sizeof] */
  212                 node->children = (struct ice_sched_node **)
  213                         ice_calloc(hw, hw->max_children[layer], sizeof(*node));
  214                 if (!node->children) {
  215                         ice_free(hw, node);
  216                         return ICE_ERR_NO_MEMORY;
  217                 }
  218         }
  219 
  220         node->in_use = true;
  221         node->parent = parent;
  222         node->tx_sched_layer = layer;
  223         parent->children[parent->num_children++] = node;
  224         node->info = elem;
  225         return ICE_SUCCESS;
  226 }
  227 
  228 /**
  229  * ice_aq_delete_sched_elems - delete scheduler elements
  230  * @hw: pointer to the HW struct
  231  * @grps_req: number of groups to delete
  232  * @buf: pointer to buffer
  233  * @buf_size: buffer size in bytes
  234  * @grps_del: returns total number of elements deleted
  235  * @cd: pointer to command details structure or NULL
  236  *
  237  * Delete scheduling elements (0x040F)
  238  */
  239 static enum ice_status
  240 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
  241                           struct ice_aqc_delete_elem *buf, u16 buf_size,
  242                           u16 *grps_del, struct ice_sq_cd *cd)
  243 {
  244         return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
  245                                            grps_req, (void *)buf, buf_size,
  246                                            grps_del, cd);
  247 }
  248 
  249 /**
  250  * ice_sched_remove_elems - remove nodes from HW
  251  * @hw: pointer to the HW struct
  252  * @parent: pointer to the parent node
  253  * @num_nodes: number of nodes
  254  * @node_teids: array of node teids to be deleted
  255  *
  256  * This function remove nodes from HW
  257  */
  258 static enum ice_status
  259 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
  260                        u16 num_nodes, u32 *node_teids)
  261 {
  262         struct ice_aqc_delete_elem *buf;
  263         u16 i, num_groups_removed = 0;
  264         enum ice_status status;
  265         u16 buf_size;
  266 
  267         buf_size = ice_struct_size(buf, teid, num_nodes);
  268         buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
  269         if (!buf)
  270                 return ICE_ERR_NO_MEMORY;
  271 
  272         buf->hdr.parent_teid = parent->info.node_teid;
  273         buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
  274         for (i = 0; i < num_nodes; i++)
  275                 buf->teid[i] = CPU_TO_LE32(node_teids[i]);
  276 
  277         status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
  278                                            &num_groups_removed, NULL);
  279         if (status != ICE_SUCCESS || num_groups_removed != 1)
  280                 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
  281                           hw->adminq.sq_last_status);
  282 
  283         ice_free(hw, buf);
  284         return status;
  285 }
  286 
  287 /**
  288  * ice_sched_get_first_node - get the first node of the given layer
  289  * @pi: port information structure
  290  * @parent: pointer the base node of the subtree
  291  * @layer: layer number
  292  *
  293  * This function retrieves the first node of the given layer from the subtree
  294  */
  295 static struct ice_sched_node *
  296 ice_sched_get_first_node(struct ice_port_info *pi,
  297                          struct ice_sched_node *parent, u8 layer)
  298 {
  299         return pi->sib_head[parent->tc_num][layer];
  300 }
  301 
  302 /**
  303  * ice_sched_get_tc_node - get pointer to TC node
  304  * @pi: port information structure
  305  * @tc: TC number
  306  *
  307  * This function returns the TC node pointer
  308  */
  309 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
  310 {
  311         u8 i;
  312 
  313         if (!pi || !pi->root)
  314                 return NULL;
  315         for (i = 0; i < pi->root->num_children; i++)
  316                 if (pi->root->children[i]->tc_num == tc)
  317                         return pi->root->children[i];
  318         return NULL;
  319 }
  320 
  321 /**
  322  * ice_free_sched_node - Free a Tx scheduler node from SW DB
  323  * @pi: port information structure
  324  * @node: pointer to the ice_sched_node struct
  325  *
  326  * This function frees up a node from SW DB as well as from HW
  327  *
  328  * This function needs to be called with the port_info->sched_lock held
  329  */
  330 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
  331 {
  332         struct ice_sched_node *parent;
  333         struct ice_hw *hw = pi->hw;
  334         u8 i, j;
  335 
  336         /* Free the children before freeing up the parent node
  337          * The parent array is updated below and that shifts the nodes
  338          * in the array. So always pick the first child if num children > 0
  339          */
  340         while (node->num_children)
  341                 ice_free_sched_node(pi, node->children[0]);
  342 
  343         /* Leaf, TC and root nodes can't be deleted by SW */
  344         if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
  345             node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
  346             node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
  347             node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
  348                 u32 teid = LE32_TO_CPU(node->info.node_teid);
  349 
  350                 ice_sched_remove_elems(hw, node->parent, 1, &teid);
  351         }
  352         parent = node->parent;
  353         /* root has no parent */
  354         if (parent) {
  355                 struct ice_sched_node *p;
  356 
  357                 /* update the parent */
  358                 for (i = 0; i < parent->num_children; i++)
  359                         if (parent->children[i] == node) {
  360                                 for (j = i + 1; j < parent->num_children; j++)
  361                                         parent->children[j - 1] =
  362                                                 parent->children[j];
  363                                 parent->num_children--;
  364                                 break;
  365                         }
  366 
  367                 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
  368                 while (p) {
  369                         if (p->sibling == node) {
  370                                 p->sibling = node->sibling;
  371                                 break;
  372                         }
  373                         p = p->sibling;
  374                 }
  375 
  376                 /* update the sibling head if head is getting removed */
  377                 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
  378                         pi->sib_head[node->tc_num][node->tx_sched_layer] =
  379                                 node->sibling;
  380         }
  381 
  382         /* leaf nodes have no children */
  383         if (node->children)
  384                 ice_free(hw, node->children);
  385         ice_free(hw, node);
  386 }
  387 
  388 /**
  389  * ice_aq_get_dflt_topo - gets default scheduler topology
  390  * @hw: pointer to the HW struct
  391  * @lport: logical port number
  392  * @buf: pointer to buffer
  393  * @buf_size: buffer size in bytes
  394  * @num_branches: returns total number of queue to port branches
  395  * @cd: pointer to command details structure or NULL
  396  *
  397  * Get default scheduler topology (0x400)
  398  */
  399 static enum ice_status
  400 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
  401                      struct ice_aqc_get_topo_elem *buf, u16 buf_size,
  402                      u8 *num_branches, struct ice_sq_cd *cd)
  403 {
  404         struct ice_aqc_get_topo *cmd;
  405         struct ice_aq_desc desc;
  406         enum ice_status status;
  407 
  408         cmd = &desc.params.get_topo;
  409         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
  410         cmd->port_num = lport;
  411         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  412         if (!status && num_branches)
  413                 *num_branches = cmd->num_branches;
  414 
  415         return status;
  416 }
  417 
  418 /**
  419  * ice_aq_add_sched_elems - adds scheduling element
  420  * @hw: pointer to the HW struct
  421  * @grps_req: the number of groups that are requested to be added
  422  * @buf: pointer to buffer
  423  * @buf_size: buffer size in bytes
  424  * @grps_added: returns total number of groups added
  425  * @cd: pointer to command details structure or NULL
  426  *
  427  * Add scheduling elements (0x0401)
  428  */
  429 static enum ice_status
  430 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
  431                        struct ice_aqc_add_elem *buf, u16 buf_size,
  432                        u16 *grps_added, struct ice_sq_cd *cd)
  433 {
  434         return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
  435                                            grps_req, (void *)buf, buf_size,
  436                                            grps_added, cd);
  437 }
  438 
  439 /**
  440  * ice_aq_cfg_sched_elems - configures scheduler elements
  441  * @hw: pointer to the HW struct
  442  * @elems_req: number of elements to configure
  443  * @buf: pointer to buffer
  444  * @buf_size: buffer size in bytes
  445  * @elems_cfgd: returns total number of elements configured
  446  * @cd: pointer to command details structure or NULL
  447  *
  448  * Configure scheduling elements (0x0403)
  449  */
  450 static enum ice_status
  451 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
  452                        struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
  453                        u16 *elems_cfgd, struct ice_sq_cd *cd)
  454 {
  455         return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
  456                                            elems_req, (void *)buf, buf_size,
  457                                            elems_cfgd, cd);
  458 }
  459 
  460 /**
  461  * ice_aq_move_sched_elems - move scheduler elements
  462  * @hw: pointer to the HW struct
  463  * @grps_req: number of groups to move
  464  * @buf: pointer to buffer
  465  * @buf_size: buffer size in bytes
  466  * @grps_movd: returns total number of groups moved
  467  * @cd: pointer to command details structure or NULL
  468  *
  469  * Move scheduling elements (0x0408)
  470  */
  471 static enum ice_status
  472 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
  473                         struct ice_aqc_move_elem *buf, u16 buf_size,
  474                         u16 *grps_movd, struct ice_sq_cd *cd)
  475 {
  476         return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
  477                                            grps_req, (void *)buf, buf_size,
  478                                            grps_movd, cd);
  479 }
  480 
  481 /**
  482  * ice_aq_suspend_sched_elems - suspend scheduler elements
  483  * @hw: pointer to the HW struct
  484  * @elems_req: number of elements to suspend
  485  * @buf: pointer to buffer
  486  * @buf_size: buffer size in bytes
  487  * @elems_ret: returns total number of elements suspended
  488  * @cd: pointer to command details structure or NULL
  489  *
  490  * Suspend scheduling elements (0x0409)
  491  */
  492 static enum ice_status
  493 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
  494                            u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
  495 {
  496         return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
  497                                            elems_req, (void *)buf, buf_size,
  498                                            elems_ret, cd);
  499 }
  500 
  501 /**
  502  * ice_aq_resume_sched_elems - resume scheduler elements
  503  * @hw: pointer to the HW struct
  504  * @elems_req: number of elements to resume
  505  * @buf: pointer to buffer
  506  * @buf_size: buffer size in bytes
  507  * @elems_ret: returns total number of elements resumed
  508  * @cd: pointer to command details structure or NULL
  509  *
  510  * resume scheduling elements (0x040A)
  511  */
  512 static enum ice_status
  513 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
  514                           u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
  515 {
  516         return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
  517                                            elems_req, (void *)buf, buf_size,
  518                                            elems_ret, cd);
  519 }
  520 
  521 /**
  522  * ice_aq_query_sched_res - query scheduler resource
  523  * @hw: pointer to the HW struct
  524  * @buf_size: buffer size in bytes
  525  * @buf: pointer to buffer
  526  * @cd: pointer to command details structure or NULL
  527  *
  528  * Query scheduler resource allocation (0x0412)
  529  */
  530 static enum ice_status
  531 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
  532                        struct ice_aqc_query_txsched_res_resp *buf,
  533                        struct ice_sq_cd *cd)
  534 {
  535         struct ice_aq_desc desc;
  536 
  537         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
  538         return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  539 }
  540 
  541 /**
  542  * ice_sched_suspend_resume_elems - suspend or resume HW nodes
  543  * @hw: pointer to the HW struct
  544  * @num_nodes: number of nodes
  545  * @node_teids: array of node teids to be suspended or resumed
  546  * @suspend: true means suspend / false means resume
  547  *
  548  * This function suspends or resumes HW nodes
  549  */
  550 static enum ice_status
  551 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
  552                                bool suspend)
  553 {
  554         u16 i, buf_size, num_elem_ret = 0;
  555         enum ice_status status;
  556         __le32 *buf;
  557 
  558         buf_size = sizeof(*buf) * num_nodes;
  559         buf = (__le32 *)ice_malloc(hw, buf_size);
  560         if (!buf)
  561                 return ICE_ERR_NO_MEMORY;
  562 
  563         for (i = 0; i < num_nodes; i++)
  564                 buf[i] = CPU_TO_LE32(node_teids[i]);
  565 
  566         if (suspend)
  567                 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
  568                                                     buf_size, &num_elem_ret,
  569                                                     NULL);
  570         else
  571                 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
  572                                                    buf_size, &num_elem_ret,
  573                                                    NULL);
  574         if (status != ICE_SUCCESS || num_elem_ret != num_nodes)
  575                 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
  576 
  577         ice_free(hw, buf);
  578         return status;
  579 }
  580 
  581 /**
  582  * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
  583  * @hw: pointer to the HW struct
  584  * @vsi_handle: VSI handle
  585  * @tc: TC number
  586  * @new_numqs: number of queues
  587  */
  588 static enum ice_status
  589 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
  590 {
  591         struct ice_vsi_ctx *vsi_ctx;
  592         struct ice_q_ctx *q_ctx;
  593 
  594         vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
  595         if (!vsi_ctx)
  596                 return ICE_ERR_PARAM;
  597         /* allocate LAN queue contexts */
  598         if (!vsi_ctx->lan_q_ctx[tc]) {
  599                 vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *)
  600                         ice_calloc(hw, new_numqs, sizeof(*q_ctx));
  601                 if (!vsi_ctx->lan_q_ctx[tc])
  602                         return ICE_ERR_NO_MEMORY;
  603                 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
  604                 return ICE_SUCCESS;
  605         }
  606         /* num queues are increased, update the queue contexts */
  607         if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
  608                 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
  609 
  610                 q_ctx = (struct ice_q_ctx *)
  611                         ice_calloc(hw, new_numqs, sizeof(*q_ctx));
  612                 if (!q_ctx)
  613                         return ICE_ERR_NO_MEMORY;
  614                 ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
  615                            prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
  616                 ice_free(hw, vsi_ctx->lan_q_ctx[tc]);
  617                 vsi_ctx->lan_q_ctx[tc] = q_ctx;
  618                 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
  619         }
  620         return ICE_SUCCESS;
  621 }
  622 
  623 /**
  624  * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
  625  * @hw: pointer to the HW struct
  626  * @vsi_handle: VSI handle
  627  * @tc: TC number
  628  * @new_numqs: number of queues
  629  */
  630 static enum ice_status
  631 ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
  632 {
  633         struct ice_vsi_ctx *vsi_ctx;
  634         struct ice_q_ctx *q_ctx;
  635 
  636         vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
  637         if (!vsi_ctx)
  638                 return ICE_ERR_PARAM;
  639         /* allocate RDMA queue contexts */
  640         if (!vsi_ctx->rdma_q_ctx[tc]) {
  641                 vsi_ctx->rdma_q_ctx[tc] = (struct ice_q_ctx *)
  642                         ice_calloc(hw, new_numqs, sizeof(*q_ctx));
  643                 if (!vsi_ctx->rdma_q_ctx[tc])
  644                         return ICE_ERR_NO_MEMORY;
  645                 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
  646                 return ICE_SUCCESS;
  647         }
  648         /* num queues are increased, update the queue contexts */
  649         if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
  650                 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
  651 
  652                 q_ctx = (struct ice_q_ctx *)
  653                         ice_calloc(hw, new_numqs, sizeof(*q_ctx));
  654                 if (!q_ctx)
  655                         return ICE_ERR_NO_MEMORY;
  656                 ice_memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
  657                            prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
  658                 ice_free(hw, vsi_ctx->rdma_q_ctx[tc]);
  659                 vsi_ctx->rdma_q_ctx[tc] = q_ctx;
  660                 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
  661         }
  662         return ICE_SUCCESS;
  663 }
  664 
  665 /**
  666  * ice_aq_rl_profile - performs a rate limiting task
  667  * @hw: pointer to the HW struct
  668  * @opcode: opcode for add, query, or remove profile(s)
  669  * @num_profiles: the number of profiles
  670  * @buf: pointer to buffer
  671  * @buf_size: buffer size in bytes
  672  * @num_processed: number of processed add or remove profile(s) to return
  673  * @cd: pointer to command details structure
  674  *
  675  * RL profile function to add, query, or remove profile(s)
  676  */
  677 static enum ice_status
  678 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
  679                   u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
  680                   u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
  681 {
  682         struct ice_aqc_rl_profile *cmd;
  683         struct ice_aq_desc desc;
  684         enum ice_status status;
  685 
  686         cmd = &desc.params.rl_profile;
  687 
  688         ice_fill_dflt_direct_cmd_desc(&desc, opcode);
  689         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
  690         cmd->num_profiles = CPU_TO_LE16(num_profiles);
  691         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  692         if (!status && num_processed)
  693                 *num_processed = LE16_TO_CPU(cmd->num_processed);
  694         return status;
  695 }
  696 
  697 /**
  698  * ice_aq_add_rl_profile - adds rate limiting profile(s)
  699  * @hw: pointer to the HW struct
  700  * @num_profiles: the number of profile(s) to be add
  701  * @buf: pointer to buffer
  702  * @buf_size: buffer size in bytes
  703  * @num_profiles_added: total number of profiles added to return
  704  * @cd: pointer to command details structure
  705  *
  706  * Add RL profile (0x0410)
  707  */
  708 static enum ice_status
  709 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
  710                       struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
  711                       u16 *num_profiles_added, struct ice_sq_cd *cd)
  712 {
  713         return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
  714                                  buf, buf_size, num_profiles_added, cd);
  715 }
  716 
  717 /**
  718  * ice_aq_query_rl_profile - query rate limiting profile(s)
  719  * @hw: pointer to the HW struct
  720  * @num_profiles: the number of profile(s) to query
  721  * @buf: pointer to buffer
  722  * @buf_size: buffer size in bytes
  723  * @cd: pointer to command details structure
  724  *
  725  * Query RL profile (0x0411)
  726  */
  727 enum ice_status
  728 ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
  729                         struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
  730                         struct ice_sq_cd *cd)
  731 {
  732         return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles,
  733                                  num_profiles, buf, buf_size, NULL, cd);
  734 }
  735 
  736 /**
  737  * ice_aq_remove_rl_profile - removes RL profile(s)
  738  * @hw: pointer to the HW struct
  739  * @num_profiles: the number of profile(s) to remove
  740  * @buf: pointer to buffer
  741  * @buf_size: buffer size in bytes
  742  * @num_profiles_removed: total number of profiles removed to return
  743  * @cd: pointer to command details structure or NULL
  744  *
  745  * Remove RL profile (0x0415)
  746  */
  747 static enum ice_status
  748 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
  749                          struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
  750                          u16 *num_profiles_removed, struct ice_sq_cd *cd)
  751 {
  752         return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
  753                                  num_profiles, buf, buf_size,
  754                                  num_profiles_removed, cd);
  755 }
  756 
  757 /**
  758  * ice_sched_del_rl_profile - remove RL profile
  759  * @hw: pointer to the HW struct
  760  * @rl_info: rate limit profile information
  761  *
  762  * If the profile ID is not referenced anymore, it removes profile ID with
  763  * its associated parameters from HW DB,and locally. The caller needs to
  764  * hold scheduler lock.
  765  */
  766 static enum ice_status
  767 ice_sched_del_rl_profile(struct ice_hw *hw,
  768                          struct ice_aqc_rl_profile_info *rl_info)
  769 {
  770         struct ice_aqc_rl_profile_elem *buf;
  771         u16 num_profiles_removed;
  772         enum ice_status status;
  773         u16 num_profiles = 1;
  774 
  775         if (rl_info->prof_id_ref != 0)
  776                 return ICE_ERR_IN_USE;
  777 
  778         /* Safe to remove profile ID */
  779         buf = &rl_info->profile;
  780         status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
  781                                           &num_profiles_removed, NULL);
  782         if (status || num_profiles_removed != num_profiles)
  783                 return ICE_ERR_CFG;
  784 
  785         /* Delete stale entry now */
  786         LIST_DEL(&rl_info->list_entry);
  787         ice_free(hw, rl_info);
  788         return status;
  789 }
  790 
  791 /**
  792  * ice_sched_clear_rl_prof - clears RL prof entries
  793  * @pi: port information structure
  794  *
  795  * This function removes all RL profile from HW as well as from SW DB.
  796  */
  797 static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
  798 {
  799         u16 ln;
  800         struct ice_hw *hw = pi->hw;
  801 
  802         for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
  803                 struct ice_aqc_rl_profile_info *rl_prof_elem;
  804                 struct ice_aqc_rl_profile_info *rl_prof_tmp;
  805 
  806                 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
  807                                          &hw->rl_prof_list[ln],
  808                                          ice_aqc_rl_profile_info, list_entry) {
  809                         enum ice_status status;
  810 
  811                         rl_prof_elem->prof_id_ref = 0;
  812                         status = ice_sched_del_rl_profile(hw, rl_prof_elem);
  813                         if (status) {
  814                                 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
  815                                 /* On error, free mem required */
  816                                 LIST_DEL(&rl_prof_elem->list_entry);
  817                                 ice_free(hw, rl_prof_elem);
  818                         }
  819                 }
  820         }
  821 }
  822 
  823 /**
  824  * ice_sched_clear_agg - clears the aggregator related information
  825  * @hw: pointer to the hardware structure
  826  *
  827  * This function removes aggregator list and free up aggregator related memory
  828  * previously allocated.
  829  */
  830 void ice_sched_clear_agg(struct ice_hw *hw)
  831 {
  832         struct ice_sched_agg_info *agg_info;
  833         struct ice_sched_agg_info *atmp;
  834 
  835         LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list,
  836                                  ice_sched_agg_info,
  837                                  list_entry) {
  838                 struct ice_sched_agg_vsi_info *agg_vsi_info;
  839                 struct ice_sched_agg_vsi_info *vtmp;
  840 
  841                 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
  842                                          &agg_info->agg_vsi_list,
  843                                          ice_sched_agg_vsi_info, list_entry) {
  844                         LIST_DEL(&agg_vsi_info->list_entry);
  845                         ice_free(hw, agg_vsi_info);
  846                 }
  847                 LIST_DEL(&agg_info->list_entry);
  848                 ice_free(hw, agg_info);
  849         }
  850 }
  851 
  852 /**
  853  * ice_sched_clear_tx_topo - clears the scheduler tree nodes
  854  * @pi: port information structure
  855  *
  856  * This function removes all the nodes from HW as well as from SW DB.
  857  */
  858 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
  859 {
  860         if (!pi)
  861                 return;
  862         /* remove RL profiles related lists */
  863         ice_sched_clear_rl_prof(pi);
  864         if (pi->root) {
  865                 ice_free_sched_node(pi, pi->root);
  866                 pi->root = NULL;
  867         }
  868 }
  869 
  870 /**
  871  * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
  872  * @pi: port information structure
  873  *
  874  * Cleanup scheduling elements from SW DB
  875  */
  876 void ice_sched_clear_port(struct ice_port_info *pi)
  877 {
  878         if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
  879                 return;
  880 
  881         pi->port_state = ICE_SCHED_PORT_STATE_INIT;
  882         ice_acquire_lock(&pi->sched_lock);
  883         ice_sched_clear_tx_topo(pi);
  884         ice_release_lock(&pi->sched_lock);
  885         ice_destroy_lock(&pi->sched_lock);
  886 }
  887 
  888 /**
  889  * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
  890  * @hw: pointer to the HW struct
  891  *
  892  * Cleanup scheduling elements from SW DB for all the ports
  893  */
  894 void ice_sched_cleanup_all(struct ice_hw *hw)
  895 {
  896         if (!hw)
  897                 return;
  898 
  899         if (hw->layer_info) {
  900                 ice_free(hw, hw->layer_info);
  901                 hw->layer_info = NULL;
  902         }
  903 
  904         ice_sched_clear_port(hw->port_info);
  905 
  906         hw->num_tx_sched_layers = 0;
  907         hw->num_tx_sched_phys_layers = 0;
  908         hw->flattened_layers = 0;
  909         hw->max_cgds = 0;
  910 }
  911 
  912 /**
  913  * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping
  914  * @hw: pointer to the HW struct
  915  * @num_l2_nodes: the number of L2 nodes whose CGDs to configure
  916  * @buf: pointer to buffer
  917  * @buf_size: buffer size in bytes
  918  * @cd: pointer to command details structure or NULL
  919  *
  920  * Configure L2 Node CGD (0x0414)
  921  */
  922 enum ice_status
  923 ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
  924                        struct ice_aqc_cfg_l2_node_cgd_elem *buf,
  925                        u16 buf_size, struct ice_sq_cd *cd)
  926 {
  927         struct ice_aqc_cfg_l2_node_cgd *cmd;
  928         struct ice_aq_desc desc;
  929 
  930         cmd = &desc.params.cfg_l2_node_cgd;
  931         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd);
  932         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
  933 
  934         cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes);
  935         return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  936 }
  937 
  938 /**
  939  * ice_sched_add_elems - add nodes to HW and SW DB
  940  * @pi: port information structure
  941  * @tc_node: pointer to the branch node
  942  * @parent: pointer to the parent node
  943  * @layer: layer number to add nodes
  944  * @num_nodes: number of nodes
  945  * @num_nodes_added: pointer to num nodes added
  946  * @first_node_teid: if new nodes are added then return the TEID of first node
  947  *
  948  * This function add nodes to HW as well as to SW DB for a given layer
  949  */
  950 static enum ice_status
  951 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
  952                     struct ice_sched_node *parent, u8 layer, u16 num_nodes,
  953                     u16 *num_nodes_added, u32 *first_node_teid)
  954 {
  955         struct ice_sched_node *prev, *new_node;
  956         struct ice_aqc_add_elem *buf;
  957         u16 i, num_groups_added = 0;
  958         enum ice_status status = ICE_SUCCESS;
  959         struct ice_hw *hw = pi->hw;
  960         u16 buf_size;
  961         u32 teid;
  962 
  963         buf_size = ice_struct_size(buf, generic, num_nodes);
  964         buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size);
  965         if (!buf)
  966                 return ICE_ERR_NO_MEMORY;
  967 
  968         buf->hdr.parent_teid = parent->info.node_teid;
  969         buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
  970         for (i = 0; i < num_nodes; i++) {
  971                 buf->generic[i].parent_teid = parent->info.node_teid;
  972                 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
  973                 buf->generic[i].data.valid_sections =
  974                         ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
  975                         ICE_AQC_ELEM_VALID_EIR;
  976                 buf->generic[i].data.generic = 0;
  977                 buf->generic[i].data.cir_bw.bw_profile_idx =
  978                         CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
  979                 buf->generic[i].data.cir_bw.bw_alloc =
  980                         CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
  981                 buf->generic[i].data.eir_bw.bw_profile_idx =
  982                         CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
  983                 buf->generic[i].data.eir_bw.bw_alloc =
  984                         CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
  985         }
  986 
  987         status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
  988                                         &num_groups_added, NULL);
  989         if (status != ICE_SUCCESS || num_groups_added != 1) {
  990                 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
  991                           hw->adminq.sq_last_status);
  992                 ice_free(hw, buf);
  993                 return ICE_ERR_CFG;
  994         }
  995 
  996         *num_nodes_added = num_nodes;
  997         /* add nodes to the SW DB */
  998         for (i = 0; i < num_nodes; i++) {
  999                 status = ice_sched_add_node(pi, layer, &buf->generic[i]);
 1000                 if (status != ICE_SUCCESS) {
 1001                         ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
 1002                                   status);
 1003                         break;
 1004                 }
 1005 
 1006                 teid = LE32_TO_CPU(buf->generic[i].node_teid);
 1007                 new_node = ice_sched_find_node_by_teid(parent, teid);
 1008                 if (!new_node) {
 1009                         ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
 1010                         break;
 1011                 }
 1012 
 1013                 new_node->sibling = NULL;
 1014                 new_node->tc_num = tc_node->tc_num;
 1015 
 1016                 /* add it to previous node sibling pointer */
 1017                 /* Note: siblings are not linked across branches */
 1018                 prev = ice_sched_get_first_node(pi, tc_node, layer);
 1019                 if (prev && prev != new_node) {
 1020                         while (prev->sibling)
 1021                                 prev = prev->sibling;
 1022                         prev->sibling = new_node;
 1023                 }
 1024 
 1025                 /* initialize the sibling head */
 1026                 if (!pi->sib_head[tc_node->tc_num][layer])
 1027                         pi->sib_head[tc_node->tc_num][layer] = new_node;
 1028 
 1029                 if (i == 0)
 1030                         *first_node_teid = teid;
 1031         }
 1032 
 1033         ice_free(hw, buf);
 1034         return status;
 1035 }
 1036 
 1037 /**
 1038  * ice_sched_add_nodes_to_hw_layer - Add nodes to hw layer
 1039  * @pi: port information structure
 1040  * @tc_node: pointer to TC node
 1041  * @parent: pointer to parent node
 1042  * @layer: layer number to add nodes
 1043  * @num_nodes: number of nodes to be added
 1044  * @first_node_teid: pointer to the first node TEID
 1045  * @num_nodes_added: pointer to number of nodes added
 1046  *
 1047  * Add nodes into specific hw layer.
 1048  */
 1049 static enum ice_status
 1050 ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
 1051                                 struct ice_sched_node *tc_node,
 1052                                 struct ice_sched_node *parent, u8 layer,
 1053                                 u16 num_nodes, u32 *first_node_teid,
 1054                                 u16 *num_nodes_added)
 1055 {
 1056         u16 max_child_nodes;
 1057 
 1058         *num_nodes_added = 0;
 1059 
 1060         if (!num_nodes)
 1061                 return ICE_SUCCESS;
 1062 
 1063         if (!parent || layer < pi->hw->sw_entry_point_layer)
 1064                 return ICE_ERR_PARAM;
 1065 
 1066         /* max children per node per layer */
 1067         max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
 1068 
 1069         /* current number of children + required nodes exceed max children */
 1070         if ((parent->num_children + num_nodes) > max_child_nodes) {
 1071                 /* Fail if the parent is a TC node */
 1072                 if (parent == tc_node)
 1073                         return ICE_ERR_CFG;
 1074                 return ICE_ERR_MAX_LIMIT;
 1075         }
 1076 
 1077         return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
 1078                                    num_nodes_added, first_node_teid);
 1079 }
 1080 
 1081 /**
 1082  * ice_sched_add_nodes_to_layer - Add nodes to a given layer
 1083  * @pi: port information structure
 1084  * @tc_node: pointer to TC node
 1085  * @parent: pointer to parent node
 1086  * @layer: layer number to add nodes
 1087  * @num_nodes: number of nodes to be added
 1088  * @first_node_teid: pointer to the first node TEID
 1089  * @num_nodes_added: pointer to number of nodes added
 1090  *
 1091  * This function add nodes to a given layer.
 1092  */
 1093 static enum ice_status
 1094 ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
 1095                              struct ice_sched_node *tc_node,
 1096                              struct ice_sched_node *parent, u8 layer,
 1097                              u16 num_nodes, u32 *first_node_teid,
 1098                              u16 *num_nodes_added)
 1099 {
 1100         u32 *first_teid_ptr = first_node_teid;
 1101         u16 new_num_nodes = num_nodes;
 1102         enum ice_status status = ICE_SUCCESS;
 1103 
 1104         *num_nodes_added = 0;
 1105         while (*num_nodes_added < num_nodes) {
 1106                 u16 max_child_nodes, num_added = 0;
 1107                 u32 temp;
 1108 
 1109                 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
 1110                                                          layer, new_num_nodes,
 1111                                                          first_teid_ptr,
 1112                                                          &num_added);
 1113                 if (status == ICE_SUCCESS)
 1114                         *num_nodes_added += num_added;
 1115                 /* added more nodes than requested ? */
 1116                 if (*num_nodes_added > num_nodes) {
 1117                         ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
 1118                                   *num_nodes_added);
 1119                         status = ICE_ERR_CFG;
 1120                         break;
 1121                 }
 1122                 /* break if all the nodes are added successfully */
 1123                 if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes))
 1124                         break;
 1125                 /* break if the error is not max limit */
 1126                 if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT)
 1127                         break;
 1128                 /* Exceeded the max children */
 1129                 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
 1130                 /* utilize all the spaces if the parent is not full */
 1131                 if (parent->num_children < max_child_nodes) {
 1132                         new_num_nodes = max_child_nodes - parent->num_children;
 1133                 } else {
 1134                         /* This parent is full, try the next sibling */
 1135                         parent = parent->sibling;
 1136                         /* Don't modify the first node TEID memory if the
 1137                          * first node was added already in the above call.
 1138                          * Instead send some temp memory for all other
 1139                          * recursive calls.
 1140                          */
 1141                         if (num_added)
 1142                                 first_teid_ptr = &temp;
 1143 
 1144                         new_num_nodes = num_nodes - *num_nodes_added;
 1145                 }
 1146         }
 1147         return status;
 1148 }
 1149 
 1150 /**
 1151  * ice_sched_get_qgrp_layer - get the current queue group layer number
 1152  * @hw: pointer to the HW struct
 1153  *
 1154  * This function returns the current queue group layer number
 1155  */
 1156 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
 1157 {
 1158         /* It's always total layers - 1, the array is 0 relative so -2 */
 1159         return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
 1160 }
 1161 
 1162 /**
 1163  * ice_sched_get_vsi_layer - get the current VSI layer number
 1164  * @hw: pointer to the HW struct
 1165  *
 1166  * This function returns the current VSI layer number
 1167  */
 1168 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
 1169 {
 1170         /* Num Layers       VSI layer
 1171          *     9               6
 1172          *     7               4
 1173          *     5 or less       sw_entry_point_layer
 1174          */
 1175         /* calculate the VSI layer based on number of layers. */
 1176         if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
 1177                 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
 1178 
 1179                 if (layer > hw->sw_entry_point_layer)
 1180                         return layer;
 1181         }
 1182         return hw->sw_entry_point_layer;
 1183 }
 1184 
 1185 /**
 1186  * ice_sched_get_agg_layer - get the current aggregator layer number
 1187  * @hw: pointer to the HW struct
 1188  *
 1189  * This function returns the current aggregator layer number
 1190  */
 1191 static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
 1192 {
 1193         /* Num Layers       aggregator layer
 1194          *     9               4
 1195          *     7 or less       sw_entry_point_layer
 1196          */
 1197         /* calculate the aggregator layer based on number of layers. */
 1198         if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
 1199                 u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
 1200 
 1201                 if (layer > hw->sw_entry_point_layer)
 1202                         return layer;
 1203         }
 1204         return hw->sw_entry_point_layer;
 1205 }
 1206 
 1207 /**
 1208  * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
 1209  * @pi: port information structure
 1210  *
 1211  * This function removes the leaf node that was created by the FW
 1212  * during initialization
 1213  */
 1214 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
 1215 {
 1216         struct ice_sched_node *node;
 1217 
 1218         node = pi->root;
 1219         while (node) {
 1220                 if (!node->num_children)
 1221                         break;
 1222                 node = node->children[0];
 1223         }
 1224         if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
 1225                 u32 teid = LE32_TO_CPU(node->info.node_teid);
 1226                 enum ice_status status;
 1227 
 1228                 /* remove the default leaf node */
 1229                 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
 1230                 if (!status)
 1231                         ice_free_sched_node(pi, node);
 1232         }
 1233 }
 1234 
 1235 /**
 1236  * ice_sched_rm_dflt_nodes - free the default nodes in the tree
 1237  * @pi: port information structure
 1238  *
 1239  * This function frees all the nodes except root and TC that were created by
 1240  * the FW during initialization
 1241  */
 1242 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
 1243 {
 1244         struct ice_sched_node *node;
 1245 
 1246         ice_rm_dflt_leaf_node(pi);
 1247 
 1248         /* remove the default nodes except TC and root nodes */
 1249         node = pi->root;
 1250         while (node) {
 1251                 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
 1252                     node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
 1253                     node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
 1254                         ice_free_sched_node(pi, node);
 1255                         break;
 1256                 }
 1257 
 1258                 if (!node->num_children)
 1259                         break;
 1260                 node = node->children[0];
 1261         }
 1262 }
 1263 
 1264 /**
 1265  * ice_sched_init_port - Initialize scheduler by querying information from FW
 1266  * @pi: port info structure for the tree to cleanup
 1267  *
 1268  * This function is the initial call to find the total number of Tx scheduler
 1269  * resources, default topology created by firmware and storing the information
 1270  * in SW DB.
 1271  */
 1272 enum ice_status ice_sched_init_port(struct ice_port_info *pi)
 1273 {
 1274         struct ice_aqc_get_topo_elem *buf;
 1275         enum ice_status status;
 1276         struct ice_hw *hw;
 1277         u8 num_branches;
 1278         u16 num_elems;
 1279         u8 i, j;
 1280 
 1281         if (!pi)
 1282                 return ICE_ERR_PARAM;
 1283         hw = pi->hw;
 1284 
 1285         /* Query the Default Topology from FW */
 1286         buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw,
 1287                                                          ICE_AQ_MAX_BUF_LEN);
 1288         if (!buf)
 1289                 return ICE_ERR_NO_MEMORY;
 1290 
 1291         /* Query default scheduling tree topology */
 1292         status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
 1293                                       &num_branches, NULL);
 1294         if (status)
 1295                 goto err_init_port;
 1296 
 1297         /* num_branches should be between 1-8 */
 1298         if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
 1299                 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
 1300                           num_branches);
 1301                 status = ICE_ERR_PARAM;
 1302                 goto err_init_port;
 1303         }
 1304 
 1305         /* get the number of elements on the default/first branch */
 1306         num_elems = LE16_TO_CPU(buf[0].hdr.num_elems);
 1307 
 1308         /* num_elems should always be between 1-9 */
 1309         if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
 1310                 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
 1311                           num_elems);
 1312                 status = ICE_ERR_PARAM;
 1313                 goto err_init_port;
 1314         }
 1315 
 1316         /* If the last node is a leaf node then the index of the queue group
 1317          * layer is two less than the number of elements.
 1318          */
 1319         if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
 1320             ICE_AQC_ELEM_TYPE_LEAF)
 1321                 pi->last_node_teid =
 1322                         LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid);
 1323         else
 1324                 pi->last_node_teid =
 1325                         LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid);
 1326 
 1327         /* Insert the Tx Sched root node */
 1328         status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
 1329         if (status)
 1330                 goto err_init_port;
 1331 
 1332         /* Parse the default tree and cache the information */
 1333         for (i = 0; i < num_branches; i++) {
 1334                 num_elems = LE16_TO_CPU(buf[i].hdr.num_elems);
 1335 
 1336                 /* Skip root element as already inserted */
 1337                 for (j = 1; j < num_elems; j++) {
 1338                         /* update the sw entry point */
 1339                         if (buf[0].generic[j].data.elem_type ==
 1340                             ICE_AQC_ELEM_TYPE_ENTRY_POINT)
 1341                                 hw->sw_entry_point_layer = j;
 1342 
 1343                         status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
 1344                         if (status)
 1345                                 goto err_init_port;
 1346                 }
 1347         }
 1348 
 1349         /* Remove the default nodes. */
 1350         if (pi->root)
 1351                 ice_sched_rm_dflt_nodes(pi);
 1352 
 1353         /* initialize the port for handling the scheduler tree */
 1354         pi->port_state = ICE_SCHED_PORT_STATE_READY;
 1355         ice_init_lock(&pi->sched_lock);
 1356         for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
 1357                 INIT_LIST_HEAD(&hw->rl_prof_list[i]);
 1358 
 1359 err_init_port:
 1360         if (status && pi->root) {
 1361                 ice_free_sched_node(pi, pi->root);
 1362                 pi->root = NULL;
 1363         }
 1364 
 1365         ice_free(hw, buf);
 1366         return status;
 1367 }
 1368 
 1369 /**
 1370  * ice_sched_get_node - Get the struct ice_sched_node for given TEID
 1371  * @pi: port information structure
 1372  * @teid: Scheduler node TEID
 1373  *
 1374  * This function retrieves the ice_sched_node struct for given TEID from
 1375  * the SW DB and returns it to the caller.
 1376  */
 1377 struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid)
 1378 {
 1379         struct ice_sched_node *node;
 1380 
 1381         if (!pi)
 1382                 return NULL;
 1383 
 1384         /* Find the node starting from root */
 1385         ice_acquire_lock(&pi->sched_lock);
 1386         node = ice_sched_find_node_by_teid(pi->root, teid);
 1387         ice_release_lock(&pi->sched_lock);
 1388 
 1389         if (!node)
 1390                 ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid);
 1391 
 1392         return node;
 1393 }
 1394 
 1395 /**
 1396  * ice_sched_query_res_alloc - query the FW for num of logical sched layers
 1397  * @hw: pointer to the HW struct
 1398  *
 1399  * query FW for allocated scheduler resources and store in HW struct
 1400  */
 1401 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
 1402 {
 1403         struct ice_aqc_query_txsched_res_resp *buf;
 1404         enum ice_status status = ICE_SUCCESS;
 1405         __le16 max_sibl;
 1406         u8 i;
 1407 
 1408         if (hw->layer_info)
 1409                 return status;
 1410 
 1411         buf = (struct ice_aqc_query_txsched_res_resp *)
 1412                 ice_malloc(hw, sizeof(*buf));
 1413         if (!buf)
 1414                 return ICE_ERR_NO_MEMORY;
 1415 
 1416         status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
 1417         if (status)
 1418                 goto sched_query_out;
 1419 
 1420         hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels);
 1421         hw->num_tx_sched_phys_layers =
 1422                 LE16_TO_CPU(buf->sched_props.phys_levels);
 1423         hw->flattened_layers = buf->sched_props.flattening_bitmap;
 1424         hw->max_cgds = buf->sched_props.max_pf_cgds;
 1425 
 1426         /* max sibling group size of current layer refers to the max children
 1427          * of the below layer node.
 1428          * layer 1 node max children will be layer 2 max sibling group size
 1429          * layer 2 node max children will be layer 3 max sibling group size
 1430          * and so on. This array will be populated from root (index 0) to
 1431          * qgroup layer 7. Leaf node has no children.
 1432          */
 1433         for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
 1434                 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
 1435                 hw->max_children[i] = LE16_TO_CPU(max_sibl);
 1436         }
 1437 
 1438         hw->layer_info = (struct ice_aqc_layer_props *)
 1439                          ice_memdup(hw, buf->layer_props,
 1440                                     (hw->num_tx_sched_layers *
 1441                                      sizeof(*hw->layer_info)),
 1442                                     ICE_NONDMA_TO_NONDMA);
 1443         if (!hw->layer_info) {
 1444                 status = ICE_ERR_NO_MEMORY;
 1445                 goto sched_query_out;
 1446         }
 1447 
 1448 sched_query_out:
 1449         ice_free(hw, buf);
 1450         return status;
 1451 }
 1452 
 1453 /**
 1454  * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
 1455  * @hw: pointer to the HW struct
 1456  *
 1457  * Determine the PSM clock frequency and store in HW struct
 1458  */
 1459 void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
 1460 {
 1461         u32 val, clk_src;
 1462 
 1463         val = rd32(hw, GLGEN_CLKSTAT_SRC);
 1464         clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
 1465                 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
 1466 
 1467 #define PSM_CLK_SRC_367_MHZ 0x0
 1468 #define PSM_CLK_SRC_416_MHZ 0x1
 1469 #define PSM_CLK_SRC_446_MHZ 0x2
 1470 #define PSM_CLK_SRC_390_MHZ 0x3
 1471 
 1472         switch (clk_src) {
 1473         case PSM_CLK_SRC_367_MHZ:
 1474                 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
 1475                 break;
 1476         case PSM_CLK_SRC_416_MHZ:
 1477                 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
 1478                 break;
 1479         case PSM_CLK_SRC_446_MHZ:
 1480                 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
 1481                 break;
 1482         case PSM_CLK_SRC_390_MHZ:
 1483                 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
 1484                 break;
 1485         default:
 1486                 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
 1487                           clk_src);
 1488                 /* fall back to a safe default */
 1489                 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
 1490         }
 1491 }
 1492 
 1493 /**
 1494  * ice_sched_find_node_in_subtree - Find node in part of base node subtree
 1495  * @hw: pointer to the HW struct
 1496  * @base: pointer to the base node
 1497  * @node: pointer to the node to search
 1498  *
 1499  * This function checks whether a given node is part of the base node
 1500  * subtree or not
 1501  */
 1502 bool
 1503 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
 1504                                struct ice_sched_node *node)
 1505 {
 1506         u8 i;
 1507 
 1508         for (i = 0; i < base->num_children; i++) {
 1509                 struct ice_sched_node *child = base->children[i];
 1510 
 1511                 if (node == child)
 1512                         return true;
 1513 
 1514                 if (child->tx_sched_layer > node->tx_sched_layer)
 1515                         return false;
 1516 
 1517                 /* this recursion is intentional, and wouldn't
 1518                  * go more than 8 calls
 1519                  */
 1520                 if (ice_sched_find_node_in_subtree(hw, child, node))
 1521                         return true;
 1522         }
 1523         return false;
 1524 }
 1525 
 1526 /**
 1527  * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
 1528  * @pi: port information structure
 1529  * @vsi_node: software VSI handle
 1530  * @qgrp_node: first queue group node identified for scanning
 1531  * @owner: LAN or RDMA
 1532  *
 1533  * This function retrieves a free LAN or RDMA queue group node by scanning
 1534  * qgrp_node and its siblings for the queue group with the fewest number
 1535  * of queues currently assigned.
 1536  */
 1537 static struct ice_sched_node *
 1538 ice_sched_get_free_qgrp(struct ice_port_info *pi,
 1539                         struct ice_sched_node *vsi_node,
 1540                         struct ice_sched_node *qgrp_node, u8 owner)
 1541 {
 1542         struct ice_sched_node *min_qgrp;
 1543         u8 min_children;
 1544 
 1545         if (!qgrp_node)
 1546                 return qgrp_node;
 1547         min_children = qgrp_node->num_children;
 1548         if (!min_children)
 1549                 return qgrp_node;
 1550         min_qgrp = qgrp_node;
 1551         /* scan all queue groups until find a node which has less than the
 1552          * minimum number of children. This way all queue group nodes get
 1553          * equal number of shares and active. The bandwidth will be equally
 1554          * distributed across all queues.
 1555          */
 1556         while (qgrp_node) {
 1557                 /* make sure the qgroup node is part of the VSI subtree */
 1558                 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
 1559                         if (qgrp_node->num_children < min_children &&
 1560                             qgrp_node->owner == owner) {
 1561                                 /* replace the new min queue group node */
 1562                                 min_qgrp = qgrp_node;
 1563                                 min_children = min_qgrp->num_children;
 1564                                 /* break if it has no children, */
 1565                                 if (!min_children)
 1566                                         break;
 1567                         }
 1568                 qgrp_node = qgrp_node->sibling;
 1569         }
 1570         return min_qgrp;
 1571 }
 1572 
 1573 /**
 1574  * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
 1575  * @pi: port information structure
 1576  * @vsi_handle: software VSI handle
 1577  * @tc: branch number
 1578  * @owner: LAN or RDMA
 1579  *
 1580  * This function retrieves a free LAN or RDMA queue group node
 1581  */
 1582 struct ice_sched_node *
 1583 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 1584                            u8 owner)
 1585 {
 1586         struct ice_sched_node *vsi_node, *qgrp_node;
 1587         struct ice_vsi_ctx *vsi_ctx;
 1588         u16 max_children;
 1589         u8 qgrp_layer;
 1590 
 1591         qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
 1592         max_children = pi->hw->max_children[qgrp_layer];
 1593 
 1594         vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
 1595         if (!vsi_ctx)
 1596                 return NULL;
 1597         vsi_node = vsi_ctx->sched.vsi_node[tc];
 1598         /* validate invalid VSI ID */
 1599         if (!vsi_node)
 1600                 return NULL;
 1601 
 1602         /* get the first queue group node from VSI sub-tree */
 1603         qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
 1604         while (qgrp_node) {
 1605                 /* make sure the qgroup node is part of the VSI subtree */
 1606                 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
 1607                         if (qgrp_node->num_children < max_children &&
 1608                             qgrp_node->owner == owner)
 1609                                 break;
 1610                 qgrp_node = qgrp_node->sibling;
 1611         }
 1612 
 1613         /* Select the best queue group */
 1614         return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
 1615 }
 1616 
 1617 /**
 1618  * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
 1619  * @pi: pointer to the port information structure
 1620  * @tc_node: pointer to the TC node
 1621  * @vsi_handle: software VSI handle
 1622  *
 1623  * This function retrieves a VSI node for a given VSI ID from a given
 1624  * TC branch
 1625  */
 1626 struct ice_sched_node *
 1627 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
 1628                        u16 vsi_handle)
 1629 {
 1630         struct ice_sched_node *node;
 1631         u8 vsi_layer;
 1632 
 1633         vsi_layer = ice_sched_get_vsi_layer(pi->hw);
 1634         node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
 1635 
 1636         /* Check whether it already exists */
 1637         while (node) {
 1638                 if (node->vsi_handle == vsi_handle)
 1639                         return node;
 1640                 node = node->sibling;
 1641         }
 1642 
 1643         return node;
 1644 }
 1645 
 1646 /**
 1647  * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
 1648  * @pi: pointer to the port information structure
 1649  * @tc_node: pointer to the TC node
 1650  * @agg_id: aggregator ID
 1651  *
 1652  * This function retrieves an aggregator node for a given aggregator ID from
 1653  * a given TC branch
 1654  */
 1655 static struct ice_sched_node *
 1656 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
 1657                        u32 agg_id)
 1658 {
 1659         struct ice_sched_node *node;
 1660         struct ice_hw *hw = pi->hw;
 1661         u8 agg_layer;
 1662 
 1663         if (!hw)
 1664                 return NULL;
 1665         agg_layer = ice_sched_get_agg_layer(hw);
 1666         node = ice_sched_get_first_node(pi, tc_node, agg_layer);
 1667 
 1668         /* Check whether it already exists */
 1669         while (node) {
 1670                 if (node->agg_id == agg_id)
 1671                         return node;
 1672                 node = node->sibling;
 1673         }
 1674 
 1675         return node;
 1676 }
 1677 
 1678 /**
 1679  * ice_sched_check_node - Compare node parameters between SW DB and HW DB
 1680  * @hw: pointer to the HW struct
 1681  * @node: pointer to the ice_sched_node struct
 1682  *
 1683  * This function queries and compares the HW element with SW DB node parameters
 1684  */
 1685 static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
 1686 {
 1687         struct ice_aqc_txsched_elem_data buf;
 1688         enum ice_status status;
 1689         u32 node_teid;
 1690 
 1691         node_teid = LE32_TO_CPU(node->info.node_teid);
 1692         status = ice_sched_query_elem(hw, node_teid, &buf);
 1693         if (status != ICE_SUCCESS)
 1694                 return false;
 1695 
 1696         if (memcmp(&buf, &node->info, sizeof(buf))) {
 1697                 ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n",
 1698                           node_teid);
 1699                 return false;
 1700         }
 1701 
 1702         return true;
 1703 }
 1704 
 1705 /**
 1706  * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
 1707  * @hw: pointer to the HW struct
 1708  * @num_qs: number of queues
 1709  * @num_nodes: num nodes array
 1710  *
 1711  * This function calculates the number of VSI child nodes based on the
 1712  * number of queues.
 1713  */
 1714 static void
 1715 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
 1716 {
 1717         u16 num = num_qs;
 1718         u8 i, qgl, vsil;
 1719 
 1720         qgl = ice_sched_get_qgrp_layer(hw);
 1721         vsil = ice_sched_get_vsi_layer(hw);
 1722 
 1723         /* calculate num nodes from queue group to VSI layer */
 1724         for (i = qgl; i > vsil; i--) {
 1725                 /* round to the next integer if there is a remainder */
 1726                 num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]);
 1727 
 1728                 /* need at least one node */
 1729                 num_nodes[i] = num ? num : 1;
 1730         }
 1731 }
 1732 
 1733 /**
 1734  * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
 1735  * @pi: port information structure
 1736  * @vsi_handle: software VSI handle
 1737  * @tc_node: pointer to the TC node
 1738  * @num_nodes: pointer to the num nodes that needs to be added per layer
 1739  * @owner: node owner (LAN or RDMA)
 1740  *
 1741  * This function adds the VSI child nodes to tree. It gets called for
 1742  * LAN and RDMA separately.
 1743  */
 1744 static enum ice_status
 1745 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
 1746                               struct ice_sched_node *tc_node, u16 *num_nodes,
 1747                               u8 owner)
 1748 {
 1749         struct ice_sched_node *parent, *node;
 1750         struct ice_hw *hw = pi->hw;
 1751         enum ice_status status;
 1752         u32 first_node_teid;
 1753         u16 num_added = 0;
 1754         u8 i, qgl, vsil;
 1755 
 1756         qgl = ice_sched_get_qgrp_layer(hw);
 1757         vsil = ice_sched_get_vsi_layer(hw);
 1758         parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 1759         for (i = vsil + 1; i <= qgl; i++) {
 1760                 if (!parent)
 1761                         return ICE_ERR_CFG;
 1762 
 1763                 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
 1764                                                       num_nodes[i],
 1765                                                       &first_node_teid,
 1766                                                       &num_added);
 1767                 if (status != ICE_SUCCESS || num_nodes[i] != num_added)
 1768                         return ICE_ERR_CFG;
 1769 
 1770                 /* The newly added node can be a new parent for the next
 1771                  * layer nodes
 1772                  */
 1773                 if (num_added) {
 1774                         parent = ice_sched_find_node_by_teid(tc_node,
 1775                                                              first_node_teid);
 1776                         node = parent;
 1777                         while (node) {
 1778                                 node->owner = owner;
 1779                                 node = node->sibling;
 1780                         }
 1781                 } else {
 1782                         parent = parent->children[0];
 1783                 }
 1784         }
 1785 
 1786         return ICE_SUCCESS;
 1787 }
 1788 
 1789 /**
 1790  * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
 1791  * @pi: pointer to the port info structure
 1792  * @tc_node: pointer to TC node
 1793  * @num_nodes: pointer to num nodes array
 1794  *
 1795  * This function calculates the number of supported nodes needed to add this
 1796  * VSI into Tx tree including the VSI, parent and intermediate nodes in below
 1797  * layers
 1798  */
 1799 static void
 1800 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
 1801                                  struct ice_sched_node *tc_node, u16 *num_nodes)
 1802 {
 1803         struct ice_sched_node *node;
 1804         u8 vsil;
 1805         int i;
 1806 
 1807         vsil = ice_sched_get_vsi_layer(pi->hw);
 1808         for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
 1809                 /* Add intermediate nodes if TC has no children and
 1810                  * need at least one node for VSI
 1811                  */
 1812                 if (!tc_node->num_children || i == vsil) {
 1813                         num_nodes[i]++;
 1814                 } else {
 1815                         /* If intermediate nodes are reached max children
 1816                          * then add a new one.
 1817                          */
 1818                         node = ice_sched_get_first_node(pi, tc_node, (u8)i);
 1819                         /* scan all the siblings */
 1820                         while (node) {
 1821                                 if (node->num_children <
 1822                                     pi->hw->max_children[i])
 1823                                         break;
 1824                                 node = node->sibling;
 1825                         }
 1826 
 1827                         /* tree has one intermediate node to add this new VSI.
 1828                          * So no need to calculate supported nodes for below
 1829                          * layers.
 1830                          */
 1831                         if (node)
 1832                                 break;
 1833                         /* all the nodes are full, allocate a new one */
 1834                         num_nodes[i]++;
 1835                 }
 1836 }
 1837 
 1838 /**
 1839  * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
 1840  * @pi: port information structure
 1841  * @vsi_handle: software VSI handle
 1842  * @tc_node: pointer to TC node
 1843  * @num_nodes: pointer to num nodes array
 1844  *
 1845  * This function adds the VSI supported nodes into Tx tree including the
 1846  * VSI, its parent and intermediate nodes in below layers
 1847  */
 1848 static enum ice_status
 1849 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
 1850                                 struct ice_sched_node *tc_node, u16 *num_nodes)
 1851 {
 1852         struct ice_sched_node *parent = tc_node;
 1853         enum ice_status status;
 1854         u32 first_node_teid;
 1855         u16 num_added = 0;
 1856         u8 i, vsil;
 1857 
 1858         if (!pi)
 1859                 return ICE_ERR_PARAM;
 1860 
 1861         vsil = ice_sched_get_vsi_layer(pi->hw);
 1862         for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
 1863                 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
 1864                                                       i, num_nodes[i],
 1865                                                       &first_node_teid,
 1866                                                       &num_added);
 1867                 if (status != ICE_SUCCESS || num_nodes[i] != num_added)
 1868                         return ICE_ERR_CFG;
 1869 
 1870                 /* The newly added node can be a new parent for the next
 1871                  * layer nodes
 1872                  */
 1873                 if (num_added)
 1874                         parent = ice_sched_find_node_by_teid(tc_node,
 1875                                                              first_node_teid);
 1876                 else
 1877                         parent = parent->children[0];
 1878 
 1879                 if (!parent)
 1880                         return ICE_ERR_CFG;
 1881 
 1882                 if (i == vsil)
 1883                         parent->vsi_handle = vsi_handle;
 1884         }
 1885 
 1886         return ICE_SUCCESS;
 1887 }
 1888 
 1889 /**
 1890  * ice_sched_add_vsi_to_topo - add a new VSI into tree
 1891  * @pi: port information structure
 1892  * @vsi_handle: software VSI handle
 1893  * @tc: TC number
 1894  *
 1895  * This function adds a new VSI into scheduler tree
 1896  */
 1897 static enum ice_status
 1898 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
 1899 {
 1900         u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
 1901         struct ice_sched_node *tc_node;
 1902 
 1903         tc_node = ice_sched_get_tc_node(pi, tc);
 1904         if (!tc_node)
 1905                 return ICE_ERR_PARAM;
 1906 
 1907         /* calculate number of supported nodes needed for this VSI */
 1908         ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
 1909 
 1910         /* add VSI supported nodes to TC subtree */
 1911         return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
 1912                                                num_nodes);
 1913 }
 1914 
 1915 /**
 1916  * ice_sched_update_vsi_child_nodes - update VSI child nodes
 1917  * @pi: port information structure
 1918  * @vsi_handle: software VSI handle
 1919  * @tc: TC number
 1920  * @new_numqs: new number of max queues
 1921  * @owner: owner of this subtree
 1922  *
 1923  * This function updates the VSI child nodes based on the number of queues
 1924  */
 1925 static enum ice_status
 1926 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
 1927                                  u8 tc, u16 new_numqs, u8 owner)
 1928 {
 1929         u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
 1930         struct ice_sched_node *vsi_node;
 1931         struct ice_sched_node *tc_node;
 1932         struct ice_vsi_ctx *vsi_ctx;
 1933         enum ice_status status = ICE_SUCCESS;
 1934         struct ice_hw *hw = pi->hw;
 1935         u16 prev_numqs;
 1936 
 1937         tc_node = ice_sched_get_tc_node(pi, tc);
 1938         if (!tc_node)
 1939                 return ICE_ERR_CFG;
 1940 
 1941         vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 1942         if (!vsi_node)
 1943                 return ICE_ERR_CFG;
 1944 
 1945         vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
 1946         if (!vsi_ctx)
 1947                 return ICE_ERR_PARAM;
 1948 
 1949         if (owner == ICE_SCHED_NODE_OWNER_LAN)
 1950                 prev_numqs = vsi_ctx->sched.max_lanq[tc];
 1951         else
 1952                 prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
 1953         /* num queues are not changed or less than the previous number */
 1954         if (new_numqs <= prev_numqs)
 1955                 return status;
 1956         if (owner == ICE_SCHED_NODE_OWNER_LAN) {
 1957                 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
 1958                 if (status)
 1959                         return status;
 1960         } else {
 1961                 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
 1962                 if (status)
 1963                         return status;
 1964         }
 1965 
 1966         if (new_numqs)
 1967                 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
 1968         /* Keep the max number of queue configuration all the time. Update the
 1969          * tree only if number of queues > previous number of queues. This may
 1970          * leave some extra nodes in the tree if number of queues < previous
 1971          * number but that wouldn't harm anything. Removing those extra nodes
 1972          * may complicate the code if those nodes are part of SRL or
 1973          * individually rate limited.
 1974          */
 1975         status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
 1976                                                new_num_nodes, owner);
 1977         if (status)
 1978                 return status;
 1979         if (owner == ICE_SCHED_NODE_OWNER_LAN)
 1980                 vsi_ctx->sched.max_lanq[tc] = new_numqs;
 1981         else
 1982                 vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
 1983 
 1984         return ICE_SUCCESS;
 1985 }
 1986 
 1987 /**
 1988  * ice_sched_cfg_vsi - configure the new/existing VSI
 1989  * @pi: port information structure
 1990  * @vsi_handle: software VSI handle
 1991  * @tc: TC number
 1992  * @maxqs: max number of queues
 1993  * @owner: LAN or RDMA
 1994  * @enable: TC enabled or disabled
 1995  *
 1996  * This function adds/updates VSI nodes based on the number of queues. If TC is
 1997  * enabled and VSI is in suspended state then resume the VSI back. If TC is
 1998  * disabled then suspend the VSI if it is not already.
 1999  */
 2000 enum ice_status
 2001 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
 2002                   u8 owner, bool enable)
 2003 {
 2004         struct ice_sched_node *vsi_node, *tc_node;
 2005         struct ice_vsi_ctx *vsi_ctx;
 2006         enum ice_status status = ICE_SUCCESS;
 2007         struct ice_hw *hw = pi->hw;
 2008 
 2009         ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
 2010         tc_node = ice_sched_get_tc_node(pi, tc);
 2011         if (!tc_node)
 2012                 return ICE_ERR_PARAM;
 2013         vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
 2014         if (!vsi_ctx)
 2015                 return ICE_ERR_PARAM;
 2016         vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 2017 
 2018         /* suspend the VSI if TC is not enabled */
 2019         if (!enable) {
 2020                 if (vsi_node && vsi_node->in_use) {
 2021                         u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
 2022 
 2023                         status = ice_sched_suspend_resume_elems(hw, 1, &teid,
 2024                                                                 true);
 2025                         if (!status)
 2026                                 vsi_node->in_use = false;
 2027                 }
 2028                 return status;
 2029         }
 2030 
 2031         /* TC is enabled, if it is a new VSI then add it to the tree */
 2032         if (!vsi_node) {
 2033                 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
 2034                 if (status)
 2035                         return status;
 2036 
 2037                 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 2038                 if (!vsi_node)
 2039                         return ICE_ERR_CFG;
 2040 
 2041                 vsi_ctx->sched.vsi_node[tc] = vsi_node;
 2042                 vsi_node->in_use = true;
 2043                 /* invalidate the max queues whenever VSI gets added first time
 2044                  * into the scheduler tree (boot or after reset). We need to
 2045                  * recreate the child nodes all the time in these cases.
 2046                  */
 2047                 vsi_ctx->sched.max_lanq[tc] = 0;
 2048                 vsi_ctx->sched.max_rdmaq[tc] = 0;
 2049         }
 2050 
 2051         /* update the VSI child nodes */
 2052         status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
 2053                                                   owner);
 2054         if (status)
 2055                 return status;
 2056 
 2057         /* TC is enabled, resume the VSI if it is in the suspend state */
 2058         if (!vsi_node->in_use) {
 2059                 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
 2060 
 2061                 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
 2062                 if (!status)
 2063                         vsi_node->in_use = true;
 2064         }
 2065 
 2066         return status;
 2067 }
 2068 
 2069 /**
 2070  * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
 2071  * @pi: port information structure
 2072  * @vsi_handle: software VSI handle
 2073  *
 2074  * This function removes single aggregator VSI info entry from
 2075  * aggregator list.
 2076  */
 2077 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
 2078 {
 2079         struct ice_sched_agg_info *agg_info;
 2080         struct ice_sched_agg_info *atmp;
 2081 
 2082         LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list,
 2083                                  ice_sched_agg_info,
 2084                                  list_entry) {
 2085                 struct ice_sched_agg_vsi_info *agg_vsi_info;
 2086                 struct ice_sched_agg_vsi_info *vtmp;
 2087 
 2088                 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
 2089                                          &agg_info->agg_vsi_list,
 2090                                          ice_sched_agg_vsi_info, list_entry)
 2091                         if (agg_vsi_info->vsi_handle == vsi_handle) {
 2092                                 LIST_DEL(&agg_vsi_info->list_entry);
 2093                                 ice_free(pi->hw, agg_vsi_info);
 2094                                 return;
 2095                         }
 2096         }
 2097 }
 2098 
 2099 /**
 2100  * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
 2101  * @node: pointer to the sub-tree node
 2102  *
 2103  * This function checks for a leaf node presence in a given sub-tree node.
 2104  */
 2105 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
 2106 {
 2107         u8 i;
 2108 
 2109         for (i = 0; i < node->num_children; i++)
 2110                 if (ice_sched_is_leaf_node_present(node->children[i]))
 2111                         return true;
 2112         /* check for a leaf node */
 2113         return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
 2114 }
 2115 
 2116 /**
 2117  * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
 2118  * @pi: port information structure
 2119  * @vsi_handle: software VSI handle
 2120  * @owner: LAN or RDMA
 2121  *
 2122  * This function removes the VSI and its LAN or RDMA children nodes from the
 2123  * scheduler tree.
 2124  */
 2125 static enum ice_status
 2126 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
 2127 {
 2128         enum ice_status status = ICE_ERR_PARAM;
 2129         struct ice_vsi_ctx *vsi_ctx;
 2130         u8 i;
 2131 
 2132         ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
 2133         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 2134                 return status;
 2135         ice_acquire_lock(&pi->sched_lock);
 2136         vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
 2137         if (!vsi_ctx)
 2138                 goto exit_sched_rm_vsi_cfg;
 2139 
 2140         ice_for_each_traffic_class(i) {
 2141                 struct ice_sched_node *vsi_node, *tc_node;
 2142                 u8 j = 0;
 2143 
 2144                 tc_node = ice_sched_get_tc_node(pi, i);
 2145                 if (!tc_node)
 2146                         continue;
 2147 
 2148                 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 2149                 if (!vsi_node)
 2150                         continue;
 2151 
 2152                 if (ice_sched_is_leaf_node_present(vsi_node)) {
 2153                         ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
 2154                         status = ICE_ERR_IN_USE;
 2155                         goto exit_sched_rm_vsi_cfg;
 2156                 }
 2157                 while (j < vsi_node->num_children) {
 2158                         if (vsi_node->children[j]->owner == owner) {
 2159                                 ice_free_sched_node(pi, vsi_node->children[j]);
 2160 
 2161                                 /* reset the counter again since the num
 2162                                  * children will be updated after node removal
 2163                                  */
 2164                                 j = 0;
 2165                         } else {
 2166                                 j++;
 2167                         }
 2168                 }
 2169                 /* remove the VSI if it has no children */
 2170                 if (!vsi_node->num_children) {
 2171                         ice_free_sched_node(pi, vsi_node);
 2172                         vsi_ctx->sched.vsi_node[i] = NULL;
 2173 
 2174                         /* clean up aggregator related VSI info if any */
 2175                         ice_sched_rm_agg_vsi_info(pi, vsi_handle);
 2176                 }
 2177                 if (owner == ICE_SCHED_NODE_OWNER_LAN)
 2178                         vsi_ctx->sched.max_lanq[i] = 0;
 2179                 else
 2180                         vsi_ctx->sched.max_rdmaq[i] = 0;
 2181         }
 2182         status = ICE_SUCCESS;
 2183 
 2184 exit_sched_rm_vsi_cfg:
 2185         ice_release_lock(&pi->sched_lock);
 2186         return status;
 2187 }
 2188 
 2189 /**
 2190  * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
 2191  * @pi: port information structure
 2192  * @vsi_handle: software VSI handle
 2193  *
 2194  * This function clears the VSI and its LAN children nodes from scheduler tree
 2195  * for all TCs.
 2196  */
 2197 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
 2198 {
 2199         return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
 2200 }
 2201 
 2202 /**
 2203  * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
 2204  * @pi: port information structure
 2205  * @vsi_handle: software VSI handle
 2206  *
 2207  * This function clears the VSI and its RDMA children nodes from scheduler tree
 2208  * for all TCs.
 2209  */
 2210 enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
 2211 {
 2212         return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
 2213 }
 2214 
 2215 /**
 2216  * ice_sched_is_tree_balanced - Check tree nodes are identical or not
 2217  * @hw: pointer to the HW struct
 2218  * @node: pointer to the ice_sched_node struct
 2219  *
 2220  * This function compares all the nodes for a given tree against HW DB nodes
 2221  * This function needs to be called with the port_info->sched_lock held
 2222  */
 2223 bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node)
 2224 {
 2225         u8 i;
 2226 
 2227         /* start from the leaf node */
 2228         for (i = 0; i < node->num_children; i++)
 2229                 /* Fail if node doesn't match with the SW DB
 2230                  * this recursion is intentional, and wouldn't
 2231                  * go more than 9 calls
 2232                  */
 2233                 if (!ice_sched_is_tree_balanced(hw, node->children[i]))
 2234                         return false;
 2235 
 2236         return ice_sched_check_node(hw, node);
 2237 }
 2238 
 2239 /**
 2240  * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID
 2241  * @hw: pointer to the HW struct
 2242  * @node_teid: node TEID
 2243  * @buf: pointer to buffer
 2244  * @buf_size: buffer size in bytes
 2245  * @cd: pointer to command details structure or NULL
 2246  *
 2247  * This function retrieves the tree topology from the firmware for a given
 2248  * node TEID to the root node.
 2249  */
 2250 enum ice_status
 2251 ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
 2252                           struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
 2253                           struct ice_sq_cd *cd)
 2254 {
 2255         struct ice_aqc_query_node_to_root *cmd;
 2256         struct ice_aq_desc desc;
 2257 
 2258         cmd = &desc.params.query_node_to_root;
 2259         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root);
 2260         cmd->teid = CPU_TO_LE32(node_teid);
 2261         return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 2262 }
 2263 
 2264 /**
 2265  * ice_get_agg_info - get the aggregator ID
 2266  * @hw: pointer to the hardware structure
 2267  * @agg_id: aggregator ID
 2268  *
 2269  * This function validates aggregator ID. The function returns info if
 2270  * aggregator ID is present in list otherwise it returns null.
 2271  */
 2272 static struct ice_sched_agg_info *
 2273 ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
 2274 {
 2275         struct ice_sched_agg_info *agg_info;
 2276 
 2277         LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
 2278                             list_entry)
 2279                 if (agg_info->agg_id == agg_id)
 2280                         return agg_info;
 2281 
 2282         return NULL;
 2283 }
 2284 
 2285 /**
 2286  * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
 2287  * @hw: pointer to the HW struct
 2288  * @node: pointer to a child node
 2289  * @num_nodes: num nodes count array
 2290  *
 2291  * This function walks through the aggregator subtree to find a free parent
 2292  * node
 2293  */
 2294 static struct ice_sched_node *
 2295 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
 2296                               u16 *num_nodes)
 2297 {
 2298         u8 l = node->tx_sched_layer;
 2299         u8 vsil, i;
 2300 
 2301         vsil = ice_sched_get_vsi_layer(hw);
 2302 
 2303         /* Is it VSI parent layer ? */
 2304         if (l == vsil - 1)
 2305                 return (node->num_children < hw->max_children[l]) ? node : NULL;
 2306 
 2307         /* We have intermediate nodes. Let's walk through the subtree. If the
 2308          * intermediate node has space to add a new node then clear the count
 2309          */
 2310         if (node->num_children < hw->max_children[l])
 2311                 num_nodes[l] = 0;
 2312         /* The below recursive call is intentional and wouldn't go more than
 2313          * 2 or 3 iterations.
 2314          */
 2315 
 2316         for (i = 0; i < node->num_children; i++) {
 2317                 struct ice_sched_node *parent;
 2318 
 2319                 parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
 2320                                                        num_nodes);
 2321                 if (parent)
 2322                         return parent;
 2323         }
 2324 
 2325         return NULL;
 2326 }
 2327 
 2328 /**
 2329  * ice_sched_update_parent - update the new parent in SW DB
 2330  * @new_parent: pointer to a new parent node
 2331  * @node: pointer to a child node
 2332  *
 2333  * This function removes the child from the old parent and adds it to a new
 2334  * parent
 2335  */
 2336 static void
 2337 ice_sched_update_parent(struct ice_sched_node *new_parent,
 2338                         struct ice_sched_node *node)
 2339 {
 2340         struct ice_sched_node *old_parent;
 2341         u8 i, j;
 2342 
 2343         old_parent = node->parent;
 2344 
 2345         /* update the old parent children */
 2346         for (i = 0; i < old_parent->num_children; i++)
 2347                 if (old_parent->children[i] == node) {
 2348                         for (j = i + 1; j < old_parent->num_children; j++)
 2349                                 old_parent->children[j - 1] =
 2350                                         old_parent->children[j];
 2351                         old_parent->num_children--;
 2352                         break;
 2353                 }
 2354 
 2355         /* now move the node to a new parent */
 2356         new_parent->children[new_parent->num_children++] = node;
 2357         node->parent = new_parent;
 2358         node->info.parent_teid = new_parent->info.node_teid;
 2359 }
 2360 
 2361 /**
 2362  * ice_sched_move_nodes - move child nodes to a given parent
 2363  * @pi: port information structure
 2364  * @parent: pointer to parent node
 2365  * @num_items: number of child nodes to be moved
 2366  * @list: pointer to child node teids
 2367  *
 2368  * This function move the child nodes to a given parent.
 2369  */
 2370 static enum ice_status
 2371 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
 2372                      u16 num_items, u32 *list)
 2373 {
 2374         struct ice_aqc_move_elem *buf;
 2375         struct ice_sched_node *node;
 2376         enum ice_status status = ICE_SUCCESS;
 2377         u16 i, grps_movd = 0;
 2378         struct ice_hw *hw;
 2379         u16 buf_len;
 2380 
 2381         hw = pi->hw;
 2382 
 2383         if (!parent || !num_items)
 2384                 return ICE_ERR_PARAM;
 2385 
 2386         /* Does parent have enough space */
 2387         if (parent->num_children + num_items >
 2388             hw->max_children[parent->tx_sched_layer])
 2389                 return ICE_ERR_AQ_FULL;
 2390 
 2391         buf_len = ice_struct_size(buf, teid, 1);
 2392         buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len);
 2393         if (!buf)
 2394                 return ICE_ERR_NO_MEMORY;
 2395 
 2396         for (i = 0; i < num_items; i++) {
 2397                 node = ice_sched_find_node_by_teid(pi->root, list[i]);
 2398                 if (!node) {
 2399                         status = ICE_ERR_PARAM;
 2400                         goto move_err_exit;
 2401                 }
 2402 
 2403                 buf->hdr.src_parent_teid = node->info.parent_teid;
 2404                 buf->hdr.dest_parent_teid = parent->info.node_teid;
 2405                 buf->teid[0] = node->info.node_teid;
 2406                 buf->hdr.num_elems = CPU_TO_LE16(1);
 2407                 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
 2408                                                  &grps_movd, NULL);
 2409                 if (status && grps_movd != 1) {
 2410                         status = ICE_ERR_CFG;
 2411                         goto move_err_exit;
 2412                 }
 2413 
 2414                 /* update the SW DB */
 2415                 ice_sched_update_parent(parent, node);
 2416         }
 2417 
 2418 move_err_exit:
 2419         ice_free(hw, buf);
 2420         return status;
 2421 }
 2422 
 2423 /**
 2424  * ice_sched_move_vsi_to_agg - move VSI to aggregator node
 2425  * @pi: port information structure
 2426  * @vsi_handle: software VSI handle
 2427  * @agg_id: aggregator ID
 2428  * @tc: TC number
 2429  *
 2430  * This function moves a VSI to an aggregator node or its subtree.
 2431  * Intermediate nodes may be created if required.
 2432  */
 2433 static enum ice_status
 2434 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
 2435                           u8 tc)
 2436 {
 2437         struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
 2438         u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
 2439         u32 first_node_teid, vsi_teid;
 2440         enum ice_status status;
 2441         u16 num_nodes_added;
 2442         u8 aggl, vsil, i;
 2443 
 2444         tc_node = ice_sched_get_tc_node(pi, tc);
 2445         if (!tc_node)
 2446                 return ICE_ERR_CFG;
 2447 
 2448         agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
 2449         if (!agg_node)
 2450                 return ICE_ERR_DOES_NOT_EXIST;
 2451 
 2452         vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 2453         if (!vsi_node)
 2454                 return ICE_ERR_DOES_NOT_EXIST;
 2455 
 2456         /* Is this VSI already part of given aggregator? */
 2457         if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
 2458                 return ICE_SUCCESS;
 2459 
 2460         aggl = ice_sched_get_agg_layer(pi->hw);
 2461         vsil = ice_sched_get_vsi_layer(pi->hw);
 2462 
 2463         /* set intermediate node count to 1 between aggregator and VSI layers */
 2464         for (i = aggl + 1; i < vsil; i++)
 2465                 num_nodes[i] = 1;
 2466 
 2467         /* Check if the aggregator subtree has any free node to add the VSI */
 2468         for (i = 0; i < agg_node->num_children; i++) {
 2469                 parent = ice_sched_get_free_vsi_parent(pi->hw,
 2470                                                        agg_node->children[i],
 2471                                                        num_nodes);
 2472                 if (parent)
 2473                         goto move_nodes;
 2474         }
 2475 
 2476         /* add new nodes */
 2477         parent = agg_node;
 2478         for (i = aggl + 1; i < vsil; i++) {
 2479                 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
 2480                                                       num_nodes[i],
 2481                                                       &first_node_teid,
 2482                                                       &num_nodes_added);
 2483                 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
 2484                         return ICE_ERR_CFG;
 2485 
 2486                 /* The newly added node can be a new parent for the next
 2487                  * layer nodes
 2488                  */
 2489                 if (num_nodes_added)
 2490                         parent = ice_sched_find_node_by_teid(tc_node,
 2491                                                              first_node_teid);
 2492                 else
 2493                         parent = parent->children[0];
 2494 
 2495                 if (!parent)
 2496                         return ICE_ERR_CFG;
 2497         }
 2498 
 2499 move_nodes:
 2500         vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid);
 2501         return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
 2502 }
 2503 
 2504 /**
 2505  * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
 2506  * @pi: port information structure
 2507  * @agg_info: aggregator info
 2508  * @tc: traffic class number
 2509  * @rm_vsi_info: true or false
 2510  *
 2511  * This function move all the VSI(s) to the default aggregator and delete
 2512  * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
 2513  * caller holds the scheduler lock.
 2514  */
 2515 static enum ice_status
 2516 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
 2517                              struct ice_sched_agg_info *agg_info, u8 tc,
 2518                              bool rm_vsi_info)
 2519 {
 2520         struct ice_sched_agg_vsi_info *agg_vsi_info;
 2521         struct ice_sched_agg_vsi_info *tmp;
 2522         enum ice_status status = ICE_SUCCESS;
 2523 
 2524         LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
 2525                                  ice_sched_agg_vsi_info, list_entry) {
 2526                 u16 vsi_handle = agg_vsi_info->vsi_handle;
 2527 
 2528                 /* Move VSI to default aggregator */
 2529                 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
 2530                         continue;
 2531 
 2532                 status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
 2533                                                    ICE_DFLT_AGG_ID, tc);
 2534                 if (status)
 2535                         break;
 2536 
 2537                 ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
 2538                 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
 2539                         LIST_DEL(&agg_vsi_info->list_entry);
 2540                         ice_free(pi->hw, agg_vsi_info);
 2541                 }
 2542         }
 2543 
 2544         return status;
 2545 }
 2546 
 2547 /**
 2548  * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
 2549  * @pi: port information structure
 2550  * @node: node pointer
 2551  *
 2552  * This function checks whether the aggregator is attached with any VSI or not.
 2553  */
 2554 static bool
 2555 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
 2556 {
 2557         u8 vsil, i;
 2558 
 2559         vsil = ice_sched_get_vsi_layer(pi->hw);
 2560         if (node->tx_sched_layer < vsil - 1) {
 2561                 for (i = 0; i < node->num_children; i++)
 2562                         if (ice_sched_is_agg_inuse(pi, node->children[i]))
 2563                                 return true;
 2564                 return false;
 2565         } else {
 2566                 return node->num_children ? true : false;
 2567         }
 2568 }
 2569 
 2570 /**
 2571  * ice_sched_rm_agg_cfg - remove the aggregator node
 2572  * @pi: port information structure
 2573  * @agg_id: aggregator ID
 2574  * @tc: TC number
 2575  *
 2576  * This function removes the aggregator node and intermediate nodes if any
 2577  * from the given TC
 2578  */
 2579 static enum ice_status
 2580 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
 2581 {
 2582         struct ice_sched_node *tc_node, *agg_node;
 2583         struct ice_hw *hw = pi->hw;
 2584 
 2585         tc_node = ice_sched_get_tc_node(pi, tc);
 2586         if (!tc_node)
 2587                 return ICE_ERR_CFG;
 2588 
 2589         agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
 2590         if (!agg_node)
 2591                 return ICE_ERR_DOES_NOT_EXIST;
 2592 
 2593         /* Can't remove the aggregator node if it has children */
 2594         if (ice_sched_is_agg_inuse(pi, agg_node))
 2595                 return ICE_ERR_IN_USE;
 2596 
 2597         /* need to remove the whole subtree if aggregator node is the
 2598          * only child.
 2599          */
 2600         while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
 2601                 struct ice_sched_node *parent = agg_node->parent;
 2602 
 2603                 if (!parent)
 2604                         return ICE_ERR_CFG;
 2605 
 2606                 if (parent->num_children > 1)
 2607                         break;
 2608 
 2609                 agg_node = parent;
 2610         }
 2611 
 2612         ice_free_sched_node(pi, agg_node);
 2613         return ICE_SUCCESS;
 2614 }
 2615 
 2616 /**
 2617  * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
 2618  * @pi: port information structure
 2619  * @agg_info: aggregator ID
 2620  * @tc: TC number
 2621  * @rm_vsi_info: bool value true or false
 2622  *
 2623  * This function removes aggregator reference to VSI of given TC. It removes
 2624  * the aggregator configuration completely for requested TC. The caller needs
 2625  * to hold the scheduler lock.
 2626  */
 2627 static enum ice_status
 2628 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
 2629                   u8 tc, bool rm_vsi_info)
 2630 {
 2631         enum ice_status status = ICE_SUCCESS;
 2632 
 2633         /* If nothing to remove - return success */
 2634         if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
 2635                 goto exit_rm_agg_cfg_tc;
 2636 
 2637         status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
 2638         if (status)
 2639                 goto exit_rm_agg_cfg_tc;
 2640 
 2641         /* Delete aggregator node(s) */
 2642         status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
 2643         if (status)
 2644                 goto exit_rm_agg_cfg_tc;
 2645 
 2646         ice_clear_bit(tc, agg_info->tc_bitmap);
 2647 exit_rm_agg_cfg_tc:
 2648         return status;
 2649 }
 2650 
 2651 /**
 2652  * ice_save_agg_tc_bitmap - save aggregator TC bitmap
 2653  * @pi: port information structure
 2654  * @agg_id: aggregator ID
 2655  * @tc_bitmap: 8 bits TC bitmap
 2656  *
 2657  * Save aggregator TC bitmap. This function needs to be called with scheduler
 2658  * lock held.
 2659  */
 2660 static enum ice_status
 2661 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
 2662                        ice_bitmap_t *tc_bitmap)
 2663 {
 2664         struct ice_sched_agg_info *agg_info;
 2665 
 2666         agg_info = ice_get_agg_info(pi->hw, agg_id);
 2667         if (!agg_info)
 2668                 return ICE_ERR_PARAM;
 2669         ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap,
 2670                       ICE_MAX_TRAFFIC_CLASS);
 2671         return ICE_SUCCESS;
 2672 }
 2673 
 2674 /**
 2675  * ice_sched_add_agg_cfg - create an aggregator node
 2676  * @pi: port information structure
 2677  * @agg_id: aggregator ID
 2678  * @tc: TC number
 2679  *
 2680  * This function creates an aggregator node and intermediate nodes if required
 2681  * for the given TC
 2682  */
 2683 static enum ice_status
 2684 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
 2685 {
 2686         struct ice_sched_node *parent, *agg_node, *tc_node;
 2687         u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
 2688         enum ice_status status = ICE_SUCCESS;
 2689         struct ice_hw *hw = pi->hw;
 2690         u32 first_node_teid;
 2691         u16 num_nodes_added;
 2692         u8 i, aggl;
 2693 
 2694         tc_node = ice_sched_get_tc_node(pi, tc);
 2695         if (!tc_node)
 2696                 return ICE_ERR_CFG;
 2697 
 2698         agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
 2699         /* Does Agg node already exist ? */
 2700         if (agg_node)
 2701                 return status;
 2702 
 2703         aggl = ice_sched_get_agg_layer(hw);
 2704 
 2705         /* need one node in Agg layer */
 2706         num_nodes[aggl] = 1;
 2707 
 2708         /* Check whether the intermediate nodes have space to add the
 2709          * new aggregator. If they are full, then SW needs to allocate a new
 2710          * intermediate node on those layers
 2711          */
 2712         for (i = hw->sw_entry_point_layer; i < aggl; i++) {
 2713                 parent = ice_sched_get_first_node(pi, tc_node, i);
 2714 
 2715                 /* scan all the siblings */
 2716                 while (parent) {
 2717                         if (parent->num_children < hw->max_children[i])
 2718                                 break;
 2719                         parent = parent->sibling;
 2720                 }
 2721 
 2722                 /* all the nodes are full, reserve one for this layer */
 2723                 if (!parent)
 2724                         num_nodes[i]++;
 2725         }
 2726 
 2727         /* add the aggregator node */
 2728         parent = tc_node;
 2729         for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
 2730                 if (!parent)
 2731                         return ICE_ERR_CFG;
 2732 
 2733                 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
 2734                                                       num_nodes[i],
 2735                                                       &first_node_teid,
 2736                                                       &num_nodes_added);
 2737                 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
 2738                         return ICE_ERR_CFG;
 2739 
 2740                 /* The newly added node can be a new parent for the next
 2741                  * layer nodes
 2742                  */
 2743                 if (num_nodes_added) {
 2744                         parent = ice_sched_find_node_by_teid(tc_node,
 2745                                                              first_node_teid);
 2746                         /* register aggregator ID with the aggregator node */
 2747                         if (parent && i == aggl)
 2748                                 parent->agg_id = agg_id;
 2749                 } else {
 2750                         parent = parent->children[0];
 2751                 }
 2752         }
 2753 
 2754         return ICE_SUCCESS;
 2755 }
 2756 
 2757 /**
 2758  * ice_sched_cfg_agg - configure aggregator node
 2759  * @pi: port information structure
 2760  * @agg_id: aggregator ID
 2761  * @agg_type: aggregator type queue, VSI, or aggregator group
 2762  * @tc_bitmap: bits TC bitmap
 2763  *
 2764  * It registers a unique aggregator node into scheduler services. It
 2765  * allows a user to register with a unique ID to track it's resources.
 2766  * The aggregator type determines if this is a queue group, VSI group
 2767  * or aggregator group. It then creates the aggregator node(s) for requested
 2768  * TC(s) or removes an existing aggregator node including its configuration
 2769  * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
 2770  * resources and remove aggregator ID.
 2771  * This function needs to be called with scheduler lock held.
 2772  */
 2773 static enum ice_status
 2774 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
 2775                   enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap)
 2776 {
 2777         struct ice_sched_agg_info *agg_info;
 2778         enum ice_status status = ICE_SUCCESS;
 2779         struct ice_hw *hw = pi->hw;
 2780         u8 tc;
 2781 
 2782         agg_info = ice_get_agg_info(hw, agg_id);
 2783         if (!agg_info) {
 2784                 /* Create new entry for new aggregator ID */
 2785                 agg_info = (struct ice_sched_agg_info *)
 2786                         ice_malloc(hw, sizeof(*agg_info));
 2787                 if (!agg_info)
 2788                         return ICE_ERR_NO_MEMORY;
 2789 
 2790                 agg_info->agg_id = agg_id;
 2791                 agg_info->agg_type = agg_type;
 2792                 agg_info->tc_bitmap[0] = 0;
 2793 
 2794                 /* Initialize the aggregator VSI list head */
 2795                 INIT_LIST_HEAD(&agg_info->agg_vsi_list);
 2796 
 2797                 /* Add new entry in aggregator list */
 2798                 LIST_ADD(&agg_info->list_entry, &hw->agg_list);
 2799         }
 2800         /* Create aggregator node(s) for requested TC(s) */
 2801         ice_for_each_traffic_class(tc) {
 2802                 if (!ice_is_tc_ena(*tc_bitmap, tc)) {
 2803                         /* Delete aggregator cfg TC if it exists previously */
 2804                         status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
 2805                         if (status)
 2806                                 break;
 2807                         continue;
 2808                 }
 2809 
 2810                 /* Check if aggregator node for TC already exists */
 2811                 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
 2812                         continue;
 2813 
 2814                 /* Create new aggregator node for TC */
 2815                 status = ice_sched_add_agg_cfg(pi, agg_id, tc);
 2816                 if (status)
 2817                         break;
 2818 
 2819                 /* Save aggregator node's TC information */
 2820                 ice_set_bit(tc, agg_info->tc_bitmap);
 2821         }
 2822 
 2823         return status;
 2824 }
 2825 
 2826 /**
 2827  * ice_cfg_agg - config aggregator node
 2828  * @pi: port information structure
 2829  * @agg_id: aggregator ID
 2830  * @agg_type: aggregator type queue, VSI, or aggregator group
 2831  * @tc_bitmap: bits TC bitmap
 2832  *
 2833  * This function configures aggregator node(s).
 2834  */
 2835 enum ice_status
 2836 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
 2837             u8 tc_bitmap)
 2838 {
 2839         ice_bitmap_t bitmap = tc_bitmap;
 2840         enum ice_status status;
 2841 
 2842         ice_acquire_lock(&pi->sched_lock);
 2843         status = ice_sched_cfg_agg(pi, agg_id, agg_type,
 2844                                    (ice_bitmap_t *)&bitmap);
 2845         if (!status)
 2846                 status = ice_save_agg_tc_bitmap(pi, agg_id,
 2847                                                 (ice_bitmap_t *)&bitmap);
 2848         ice_release_lock(&pi->sched_lock);
 2849         return status;
 2850 }
 2851 
 2852 /**
 2853  * ice_get_agg_vsi_info - get the aggregator ID
 2854  * @agg_info: aggregator info
 2855  * @vsi_handle: software VSI handle
 2856  *
 2857  * The function returns aggregator VSI info based on VSI handle. This function
 2858  * needs to be called with scheduler lock held.
 2859  */
 2860 static struct ice_sched_agg_vsi_info *
 2861 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
 2862 {
 2863         struct ice_sched_agg_vsi_info *agg_vsi_info;
 2864 
 2865         LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
 2866                             ice_sched_agg_vsi_info, list_entry)
 2867                 if (agg_vsi_info->vsi_handle == vsi_handle)
 2868                         return agg_vsi_info;
 2869 
 2870         return NULL;
 2871 }
 2872 
 2873 /**
 2874  * ice_get_vsi_agg_info - get the aggregator info of VSI
 2875  * @hw: pointer to the hardware structure
 2876  * @vsi_handle: Sw VSI handle
 2877  *
 2878  * The function returns aggregator info of VSI represented via vsi_handle. The
 2879  * VSI has in this case a different aggregator than the default one. This
 2880  * function needs to be called with scheduler lock held.
 2881  */
 2882 static struct ice_sched_agg_info *
 2883 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
 2884 {
 2885         struct ice_sched_agg_info *agg_info;
 2886 
 2887         LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
 2888                             list_entry) {
 2889                 struct ice_sched_agg_vsi_info *agg_vsi_info;
 2890 
 2891                 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
 2892                 if (agg_vsi_info)
 2893                         return agg_info;
 2894         }
 2895         return NULL;
 2896 }
 2897 
 2898 /**
 2899  * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
 2900  * @pi: port information structure
 2901  * @agg_id: aggregator ID
 2902  * @vsi_handle: software VSI handle
 2903  * @tc_bitmap: TC bitmap of enabled TC(s)
 2904  *
 2905  * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
 2906  * lock held.
 2907  */
 2908 static enum ice_status
 2909 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
 2910                            ice_bitmap_t *tc_bitmap)
 2911 {
 2912         struct ice_sched_agg_vsi_info *agg_vsi_info;
 2913         struct ice_sched_agg_info *agg_info;
 2914 
 2915         agg_info = ice_get_agg_info(pi->hw, agg_id);
 2916         if (!agg_info)
 2917                 return ICE_ERR_PARAM;
 2918         /* check if entry already exist */
 2919         agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
 2920         if (!agg_vsi_info)
 2921                 return ICE_ERR_PARAM;
 2922         ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
 2923                       ICE_MAX_TRAFFIC_CLASS);
 2924         return ICE_SUCCESS;
 2925 }
 2926 
 2927 /**
 2928  * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
 2929  * @pi: port information structure
 2930  * @agg_id: aggregator ID
 2931  * @vsi_handle: software VSI handle
 2932  * @tc_bitmap: TC bitmap of enabled TC(s)
 2933  *
 2934  * This function moves VSI to a new or default aggregator node. If VSI is
 2935  * already associated to the aggregator node then no operation is performed on
 2936  * the tree. This function needs to be called with scheduler lock held.
 2937  */
 2938 static enum ice_status
 2939 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
 2940                            u16 vsi_handle, ice_bitmap_t *tc_bitmap)
 2941 {
 2942         struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
 2943         struct ice_sched_agg_info *agg_info, *old_agg_info;
 2944         enum ice_status status = ICE_SUCCESS;
 2945         struct ice_hw *hw = pi->hw;
 2946         u8 tc;
 2947 
 2948         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 2949                 return ICE_ERR_PARAM;
 2950         agg_info = ice_get_agg_info(hw, agg_id);
 2951         if (!agg_info)
 2952                 return ICE_ERR_PARAM;
 2953         /* If the vsi is already part of another aggregator then update
 2954          * its vsi info list
 2955          */
 2956         old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
 2957         if (old_agg_info && old_agg_info != agg_info) {
 2958                 struct ice_sched_agg_vsi_info *vtmp;
 2959 
 2960                 LIST_FOR_EACH_ENTRY_SAFE(old_agg_vsi_info, vtmp,
 2961                                          &old_agg_info->agg_vsi_list,
 2962                                          ice_sched_agg_vsi_info, list_entry)
 2963                         if (old_agg_vsi_info->vsi_handle == vsi_handle)
 2964                                 break;
 2965         }
 2966 
 2967         /* check if entry already exist */
 2968         agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
 2969         if (!agg_vsi_info) {
 2970                 /* Create new entry for VSI under aggregator list */
 2971                 agg_vsi_info = (struct ice_sched_agg_vsi_info *)
 2972                         ice_malloc(hw, sizeof(*agg_vsi_info));
 2973                 if (!agg_vsi_info)
 2974                         return ICE_ERR_PARAM;
 2975 
 2976                 /* add VSI ID into the aggregator list */
 2977                 agg_vsi_info->vsi_handle = vsi_handle;
 2978                 LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
 2979         }
 2980         /* Move VSI node to new aggregator node for requested TC(s) */
 2981         ice_for_each_traffic_class(tc) {
 2982                 if (!ice_is_tc_ena(*tc_bitmap, tc))
 2983                         continue;
 2984 
 2985                 /* Move VSI to new aggregator */
 2986                 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
 2987                 if (status)
 2988                         break;
 2989 
 2990                 ice_set_bit(tc, agg_vsi_info->tc_bitmap);
 2991                 if (old_agg_vsi_info)
 2992                         ice_clear_bit(tc, old_agg_vsi_info->tc_bitmap);
 2993         }
 2994         if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
 2995                 LIST_DEL(&old_agg_vsi_info->list_entry);
 2996                 ice_free(pi->hw, old_agg_vsi_info);
 2997         }
 2998         return status;
 2999 }
 3000 
 3001 /**
 3002  * ice_sched_rm_unused_rl_prof - remove unused RL profile
 3003  * @hw: pointer to the hardware structure
 3004  *
 3005  * This function removes unused rate limit profiles from the HW and
 3006  * SW DB. The caller needs to hold scheduler lock.
 3007  */
 3008 static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw)
 3009 {
 3010         u16 ln;
 3011 
 3012         for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
 3013                 struct ice_aqc_rl_profile_info *rl_prof_elem;
 3014                 struct ice_aqc_rl_profile_info *rl_prof_tmp;
 3015 
 3016                 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
 3017                                          &hw->rl_prof_list[ln],
 3018                                          ice_aqc_rl_profile_info, list_entry) {
 3019                         if (!ice_sched_del_rl_profile(hw, rl_prof_elem))
 3020                                 ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n");
 3021                 }
 3022         }
 3023 }
 3024 
 3025 /**
 3026  * ice_sched_update_elem - update element
 3027  * @hw: pointer to the HW struct
 3028  * @node: pointer to node
 3029  * @info: node info to update
 3030  *
 3031  * Update the HW DB, and local SW DB of node. Update the scheduling
 3032  * parameters of node from argument info data buffer (Info->data buf) and
 3033  * returns success or error on config sched element failure. The caller
 3034  * needs to hold scheduler lock.
 3035  */
 3036 static enum ice_status
 3037 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
 3038                       struct ice_aqc_txsched_elem_data *info)
 3039 {
 3040         struct ice_aqc_txsched_elem_data buf;
 3041         enum ice_status status;
 3042         u16 elem_cfgd = 0;
 3043         u16 num_elems = 1;
 3044 
 3045         buf = *info;
 3046         /* For TC nodes, CIR config is not supported */
 3047         if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_TC)
 3048                 buf.data.valid_sections &= ~ICE_AQC_ELEM_VALID_CIR;
 3049         /* Parent TEID is reserved field in this aq call */
 3050         buf.parent_teid = 0;
 3051         /* Element type is reserved field in this aq call */
 3052         buf.data.elem_type = 0;
 3053         /* Flags is reserved field in this aq call */
 3054         buf.data.flags = 0;
 3055 
 3056         /* Update HW DB */
 3057         /* Configure element node */
 3058         status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
 3059                                         &elem_cfgd, NULL);
 3060         if (status || elem_cfgd != num_elems) {
 3061                 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
 3062                 return ICE_ERR_CFG;
 3063         }
 3064 
 3065         /* Config success case */
 3066         /* Now update local SW DB */
 3067         /* Only copy the data portion of info buffer */
 3068         node->info.data = info->data;
 3069         return status;
 3070 }
 3071 
 3072 /**
 3073  * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
 3074  * @hw: pointer to the HW struct
 3075  * @node: sched node to configure
 3076  * @rl_type: rate limit type CIR, EIR, or shared
 3077  * @bw_alloc: BW weight/allocation
 3078  *
 3079  * This function configures node element's BW allocation.
 3080  */
 3081 static enum ice_status
 3082 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
 3083                             enum ice_rl_type rl_type, u16 bw_alloc)
 3084 {
 3085         struct ice_aqc_txsched_elem_data buf;
 3086         struct ice_aqc_txsched_elem *data;
 3087         enum ice_status status;
 3088 
 3089         buf = node->info;
 3090         data = &buf.data;
 3091         if (rl_type == ICE_MIN_BW) {
 3092                 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
 3093                 data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
 3094         } else if (rl_type == ICE_MAX_BW) {
 3095                 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
 3096                 data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
 3097         } else {
 3098                 return ICE_ERR_PARAM;
 3099         }
 3100 
 3101         /* Configure element */
 3102         status = ice_sched_update_elem(hw, node, &buf);
 3103         return status;
 3104 }
 3105 
 3106 /**
 3107  * ice_move_vsi_to_agg - moves VSI to new or default aggregator
 3108  * @pi: port information structure
 3109  * @agg_id: aggregator ID
 3110  * @vsi_handle: software VSI handle
 3111  * @tc_bitmap: TC bitmap of enabled TC(s)
 3112  *
 3113  * Move or associate VSI to a new or default aggregator node.
 3114  */
 3115 enum ice_status
 3116 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
 3117                     u8 tc_bitmap)
 3118 {
 3119         ice_bitmap_t bitmap = tc_bitmap;
 3120         enum ice_status status;
 3121 
 3122         ice_acquire_lock(&pi->sched_lock);
 3123         status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
 3124                                             (ice_bitmap_t *)&bitmap);
 3125         if (!status)
 3126                 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
 3127                                                     (ice_bitmap_t *)&bitmap);
 3128         ice_release_lock(&pi->sched_lock);
 3129         return status;
 3130 }
 3131 
 3132 /**
 3133  * ice_rm_agg_cfg - remove aggregator configuration
 3134  * @pi: port information structure
 3135  * @agg_id: aggregator ID
 3136  *
 3137  * This function removes aggregator reference to VSI and delete aggregator ID
 3138  * info. It removes the aggregator configuration completely.
 3139  */
 3140 enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
 3141 {
 3142         struct ice_sched_agg_info *agg_info;
 3143         enum ice_status status = ICE_SUCCESS;
 3144         u8 tc;
 3145 
 3146         ice_acquire_lock(&pi->sched_lock);
 3147         agg_info = ice_get_agg_info(pi->hw, agg_id);
 3148         if (!agg_info) {
 3149                 status = ICE_ERR_DOES_NOT_EXIST;
 3150                 goto exit_ice_rm_agg_cfg;
 3151         }
 3152 
 3153         ice_for_each_traffic_class(tc) {
 3154                 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true);
 3155                 if (status)
 3156                         goto exit_ice_rm_agg_cfg;
 3157         }
 3158 
 3159         if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) {
 3160                 status = ICE_ERR_IN_USE;
 3161                 goto exit_ice_rm_agg_cfg;
 3162         }
 3163 
 3164         /* Safe to delete entry now */
 3165         LIST_DEL(&agg_info->list_entry);
 3166         ice_free(pi->hw, agg_info);
 3167 
 3168         /* Remove unused RL profile IDs from HW and SW DB */
 3169         ice_sched_rm_unused_rl_prof(pi->hw);
 3170 
 3171 exit_ice_rm_agg_cfg:
 3172         ice_release_lock(&pi->sched_lock);
 3173         return status;
 3174 }
 3175 
 3176 /**
 3177  * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information
 3178  * @bw_t_info: bandwidth type information structure
 3179  * @bw_alloc: Bandwidth allocation information
 3180  *
 3181  * Save or clear CIR BW alloc information (bw_alloc) in the passed param
 3182  * bw_t_info.
 3183  */
 3184 static void
 3185 ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
 3186 {
 3187         bw_t_info->cir_bw.bw_alloc = bw_alloc;
 3188         if (bw_t_info->cir_bw.bw_alloc)
 3189                 ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
 3190         else
 3191                 ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
 3192 }
 3193 
 3194 /**
 3195  * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information
 3196  * @bw_t_info: bandwidth type information structure
 3197  * @bw_alloc: Bandwidth allocation information
 3198  *
 3199  * Save or clear EIR BW alloc information (bw_alloc) in the passed param
 3200  * bw_t_info.
 3201  */
 3202 static void
 3203 ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
 3204 {
 3205         bw_t_info->eir_bw.bw_alloc = bw_alloc;
 3206         if (bw_t_info->eir_bw.bw_alloc)
 3207                 ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
 3208         else
 3209                 ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
 3210 }
 3211 
 3212 /**
 3213  * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information
 3214  * @pi: port information structure
 3215  * @vsi_handle: sw VSI handle
 3216  * @tc: traffic class
 3217  * @rl_type: rate limit type min or max
 3218  * @bw_alloc: Bandwidth allocation information
 3219  *
 3220  * Save BW alloc information of VSI type node for post replay use.
 3221  */
 3222 static enum ice_status
 3223 ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 3224                             enum ice_rl_type rl_type, u16 bw_alloc)
 3225 {
 3226         struct ice_vsi_ctx *vsi_ctx;
 3227 
 3228         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 3229                 return ICE_ERR_PARAM;
 3230         vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
 3231         if (!vsi_ctx)
 3232                 return ICE_ERR_PARAM;
 3233         switch (rl_type) {
 3234         case ICE_MIN_BW:
 3235                 ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
 3236                                            bw_alloc);
 3237                 break;
 3238         case ICE_MAX_BW:
 3239                 ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
 3240                                            bw_alloc);
 3241                 break;
 3242         default:
 3243                 return ICE_ERR_PARAM;
 3244         }
 3245         return ICE_SUCCESS;
 3246 }
 3247 
 3248 /**
 3249  * ice_set_clear_cir_bw - set or clear CIR BW
 3250  * @bw_t_info: bandwidth type information structure
 3251  * @bw: bandwidth in Kbps - Kilo bits per sec
 3252  *
 3253  * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
 3254  */
 3255 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
 3256 {
 3257         if (bw == ICE_SCHED_DFLT_BW) {
 3258                 ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
 3259                 bw_t_info->cir_bw.bw = 0;
 3260         } else {
 3261                 /* Save type of BW information */
 3262                 ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
 3263                 bw_t_info->cir_bw.bw = bw;
 3264         }
 3265 }
 3266 
 3267 /**
 3268  * ice_set_clear_eir_bw - set or clear EIR BW
 3269  * @bw_t_info: bandwidth type information structure
 3270  * @bw: bandwidth in Kbps - Kilo bits per sec
 3271  *
 3272  * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
 3273  */
 3274 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
 3275 {
 3276         if (bw == ICE_SCHED_DFLT_BW) {
 3277                 ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
 3278                 bw_t_info->eir_bw.bw = 0;
 3279         } else {
 3280                 /* save EIR BW information */
 3281                 ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
 3282                 bw_t_info->eir_bw.bw = bw;
 3283         }
 3284 }
 3285 
 3286 /**
 3287  * ice_set_clear_shared_bw - set or clear shared BW
 3288  * @bw_t_info: bandwidth type information structure
 3289  * @bw: bandwidth in Kbps - Kilo bits per sec
 3290  *
 3291  * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
 3292  */
 3293 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
 3294 {
 3295         if (bw == ICE_SCHED_DFLT_BW) {
 3296                 ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
 3297                 bw_t_info->shared_bw = 0;
 3298         } else {
 3299                 /* save shared BW information */
 3300                 ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
 3301                 bw_t_info->shared_bw = bw;
 3302         }
 3303 }
 3304 
 3305 /**
 3306  * ice_sched_save_vsi_bw - save VSI node's BW information
 3307  * @pi: port information structure
 3308  * @vsi_handle: sw VSI handle
 3309  * @tc: traffic class
 3310  * @rl_type: rate limit type min, max, or shared
 3311  * @bw: bandwidth in Kbps - Kilo bits per sec
 3312  *
 3313  * Save BW information of VSI type node for post replay use.
 3314  */
 3315 static enum ice_status
 3316 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 3317                       enum ice_rl_type rl_type, u32 bw)
 3318 {
 3319         struct ice_vsi_ctx *vsi_ctx;
 3320 
 3321         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 3322                 return ICE_ERR_PARAM;
 3323         vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
 3324         if (!vsi_ctx)
 3325                 return ICE_ERR_PARAM;
 3326         switch (rl_type) {
 3327         case ICE_MIN_BW:
 3328                 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
 3329                 break;
 3330         case ICE_MAX_BW:
 3331                 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
 3332                 break;
 3333         case ICE_SHARED_BW:
 3334                 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
 3335                 break;
 3336         default:
 3337                 return ICE_ERR_PARAM;
 3338         }
 3339         return ICE_SUCCESS;
 3340 }
 3341 
 3342 /**
 3343  * ice_set_clear_prio - set or clear priority information
 3344  * @bw_t_info: bandwidth type information structure
 3345  * @prio: priority to save
 3346  *
 3347  * Save or clear priority (prio) in the passed param bw_t_info.
 3348  */
 3349 static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
 3350 {
 3351         bw_t_info->generic = prio;
 3352         if (bw_t_info->generic)
 3353                 ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
 3354         else
 3355                 ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
 3356 }
 3357 
 3358 /**
 3359  * ice_sched_save_vsi_prio - save VSI node's priority information
 3360  * @pi: port information structure
 3361  * @vsi_handle: Software VSI handle
 3362  * @tc: traffic class
 3363  * @prio: priority to save
 3364  *
 3365  * Save priority information of VSI type node for post replay use.
 3366  */
 3367 static enum ice_status
 3368 ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 3369                         u8 prio)
 3370 {
 3371         struct ice_vsi_ctx *vsi_ctx;
 3372 
 3373         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 3374                 return ICE_ERR_PARAM;
 3375         vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
 3376         if (!vsi_ctx)
 3377                 return ICE_ERR_PARAM;
 3378         if (tc >= ICE_MAX_TRAFFIC_CLASS)
 3379                 return ICE_ERR_PARAM;
 3380         ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio);
 3381         return ICE_SUCCESS;
 3382 }
 3383 
 3384 /**
 3385  * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information
 3386  * @pi: port information structure
 3387  * @agg_id: node aggregator ID
 3388  * @tc: traffic class
 3389  * @rl_type: rate limit type min or max
 3390  * @bw_alloc: bandwidth alloc information
 3391  *
 3392  * Save BW alloc information of AGG type node for post replay use.
 3393  */
 3394 static enum ice_status
 3395 ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
 3396                             enum ice_rl_type rl_type, u16 bw_alloc)
 3397 {
 3398         struct ice_sched_agg_info *agg_info;
 3399 
 3400         agg_info = ice_get_agg_info(pi->hw, agg_id);
 3401         if (!agg_info)
 3402                 return ICE_ERR_PARAM;
 3403         if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
 3404                 return ICE_ERR_PARAM;
 3405         switch (rl_type) {
 3406         case ICE_MIN_BW:
 3407                 ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
 3408                 break;
 3409         case ICE_MAX_BW:
 3410                 ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
 3411                 break;
 3412         default:
 3413                 return ICE_ERR_PARAM;
 3414         }
 3415         return ICE_SUCCESS;
 3416 }
 3417 
 3418 /**
 3419  * ice_sched_save_agg_bw - save aggregator node's BW information
 3420  * @pi: port information structure
 3421  * @agg_id: node aggregator ID
 3422  * @tc: traffic class
 3423  * @rl_type: rate limit type min, max, or shared
 3424  * @bw: bandwidth in Kbps - Kilo bits per sec
 3425  *
 3426  * Save BW information of AGG type node for post replay use.
 3427  */
 3428 static enum ice_status
 3429 ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
 3430                       enum ice_rl_type rl_type, u32 bw)
 3431 {
 3432         struct ice_sched_agg_info *agg_info;
 3433 
 3434         agg_info = ice_get_agg_info(pi->hw, agg_id);
 3435         if (!agg_info)
 3436                 return ICE_ERR_PARAM;
 3437         if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
 3438                 return ICE_ERR_PARAM;
 3439         switch (rl_type) {
 3440         case ICE_MIN_BW:
 3441                 ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw);
 3442                 break;
 3443         case ICE_MAX_BW:
 3444                 ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw);
 3445                 break;
 3446         case ICE_SHARED_BW:
 3447                 ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw);
 3448                 break;
 3449         default:
 3450                 return ICE_ERR_PARAM;
 3451         }
 3452         return ICE_SUCCESS;
 3453 }
 3454 
 3455 /**
 3456  * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
 3457  * @pi: port information structure
 3458  * @vsi_handle: software VSI handle
 3459  * @tc: traffic class
 3460  * @rl_type: min or max
 3461  * @bw: bandwidth in Kbps
 3462  *
 3463  * This function configures BW limit of VSI scheduling node based on TC
 3464  * information.
 3465  */
 3466 enum ice_status
 3467 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 3468                           enum ice_rl_type rl_type, u32 bw)
 3469 {
 3470         enum ice_status status;
 3471 
 3472         status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
 3473                                                   ICE_AGG_TYPE_VSI,
 3474                                                   tc, rl_type, bw);
 3475         if (!status) {
 3476                 ice_acquire_lock(&pi->sched_lock);
 3477                 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
 3478                 ice_release_lock(&pi->sched_lock);
 3479         }
 3480         return status;
 3481 }
 3482 
 3483 /**
 3484  * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
 3485  * @pi: port information structure
 3486  * @vsi_handle: software VSI handle
 3487  * @tc: traffic class
 3488  * @rl_type: min or max
 3489  *
 3490  * This function configures default BW limit of VSI scheduling node based on TC
 3491  * information.
 3492  */
 3493 enum ice_status
 3494 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 3495                                enum ice_rl_type rl_type)
 3496 {
 3497         enum ice_status status;
 3498 
 3499         status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
 3500                                                   ICE_AGG_TYPE_VSI,
 3501                                                   tc, rl_type,
 3502                                                   ICE_SCHED_DFLT_BW);
 3503         if (!status) {
 3504                 ice_acquire_lock(&pi->sched_lock);
 3505                 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
 3506                                                ICE_SCHED_DFLT_BW);
 3507                 ice_release_lock(&pi->sched_lock);
 3508         }
 3509         return status;
 3510 }
 3511 
 3512 /**
 3513  * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC
 3514  * @pi: port information structure
 3515  * @agg_id: aggregator ID
 3516  * @tc: traffic class
 3517  * @rl_type: min or max
 3518  * @bw: bandwidth in Kbps
 3519  *
 3520  * This function applies BW limit to aggregator scheduling node based on TC
 3521  * information.
 3522  */
 3523 enum ice_status
 3524 ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
 3525                           enum ice_rl_type rl_type, u32 bw)
 3526 {
 3527         enum ice_status status;
 3528 
 3529         status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
 3530                                                   tc, rl_type, bw);
 3531         if (!status) {
 3532                 ice_acquire_lock(&pi->sched_lock);
 3533                 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
 3534                 ice_release_lock(&pi->sched_lock);
 3535         }
 3536         return status;
 3537 }
 3538 
 3539 /**
 3540  * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC
 3541  * @pi: port information structure
 3542  * @agg_id: aggregator ID
 3543  * @tc: traffic class
 3544  * @rl_type: min or max
 3545  *
 3546  * This function applies default BW limit to aggregator scheduling node based
 3547  * on TC information.
 3548  */
 3549 enum ice_status
 3550 ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
 3551                                enum ice_rl_type rl_type)
 3552 {
 3553         enum ice_status status;
 3554 
 3555         status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
 3556                                                   tc, rl_type,
 3557                                                   ICE_SCHED_DFLT_BW);
 3558         if (!status) {
 3559                 ice_acquire_lock(&pi->sched_lock);
 3560                 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type,
 3561                                                ICE_SCHED_DFLT_BW);
 3562                 ice_release_lock(&pi->sched_lock);
 3563         }
 3564         return status;
 3565 }
 3566 
 3567 /**
 3568  * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit
 3569  * @pi: port information structure
 3570  * @vsi_handle: software VSI handle
 3571  * @min_bw: minimum bandwidth in Kbps
 3572  * @max_bw: maximum bandwidth in Kbps
 3573  * @shared_bw: shared bandwidth in Kbps
 3574  *
 3575  * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
 3576  * classes for VSI matching handle.
 3577  */
 3578 enum ice_status
 3579 ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
 3580                           u32 max_bw, u32 shared_bw)
 3581 {
 3582         return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, min_bw, max_bw,
 3583                                                shared_bw);
 3584 }
 3585 
 3586 /**
 3587  * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter
 3588  * @pi: port information structure
 3589  * @vsi_handle: software VSI handle
 3590  *
 3591  * This function removes the shared rate limiter(SRL) of all VSI type nodes
 3592  * across all traffic classes for VSI matching handle.
 3593  */
 3594 enum ice_status
 3595 ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
 3596 {
 3597         return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle,
 3598                                                ICE_SCHED_DFLT_BW,
 3599                                                ICE_SCHED_DFLT_BW,
 3600                                                ICE_SCHED_DFLT_BW);
 3601 }
 3602 
 3603 /**
 3604  * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit
 3605  * @pi: port information structure
 3606  * @agg_id: aggregator ID
 3607  * @min_bw: minimum bandwidth in Kbps
 3608  * @max_bw: maximum bandwidth in Kbps
 3609  * @shared_bw: shared bandwidth in Kbps
 3610  *
 3611  * This function configures the shared rate limiter(SRL) of all aggregator type
 3612  * nodes across all traffic classes for aggregator matching agg_id.
 3613  */
 3614 enum ice_status
 3615 ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
 3616                           u32 max_bw, u32 shared_bw)
 3617 {
 3618         return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, min_bw, max_bw,
 3619                                                shared_bw);
 3620 }
 3621 
 3622 /**
 3623  * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter
 3624  * @pi: port information structure
 3625  * @agg_id: aggregator ID
 3626  *
 3627  * This function removes the shared rate limiter(SRL) of all aggregator type
 3628  * nodes across all traffic classes for aggregator matching agg_id.
 3629  */
 3630 enum ice_status
 3631 ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
 3632 {
 3633         return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW,
 3634                                                ICE_SCHED_DFLT_BW,
 3635                                                ICE_SCHED_DFLT_BW);
 3636 }
 3637 
 3638 /**
 3639  * ice_cfg_agg_bw_shared_lmt_per_tc - config aggregator BW shared limit per tc
 3640  * @pi: port information structure
 3641  * @agg_id: aggregator ID
 3642  * @tc: traffic class
 3643  * @min_bw: minimum bandwidth in Kbps
 3644  * @max_bw: maximum bandwidth in Kbps
 3645  * @shared_bw: shared bandwidth in Kbps
 3646  *
 3647  * This function configures the shared rate limiter(SRL) of all aggregator type
 3648  * nodes across all traffic classes for aggregator matching agg_id.
 3649  */
 3650 enum ice_status
 3651 ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
 3652                                  u32 min_bw, u32 max_bw, u32 shared_bw)
 3653 {
 3654         return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, min_bw,
 3655                                                       max_bw, shared_bw);
 3656 }
 3657 
 3658 /**
 3659  * ice_cfg_agg_bw_no_shared_lmt_per_tc - cfg aggregator BW shared limit per tc
 3660  * @pi: port information structure
 3661  * @agg_id: aggregator ID
 3662  * @tc: traffic class
 3663  *
 3664  * This function configures the shared rate limiter(SRL) of all aggregator type
 3665  * nodes across all traffic classes for aggregator matching agg_id.
 3666  */
 3667 enum ice_status
 3668 ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc)
 3669 {
 3670         return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc,
 3671                                                       ICE_SCHED_DFLT_BW,
 3672                                                       ICE_SCHED_DFLT_BW,
 3673                                                       ICE_SCHED_DFLT_BW);
 3674 }
 3675 
 3676 /**
 3677  * ice_cfg_vsi_q_priority - config VSI queue priority of node
 3678  * @pi: port information structure
 3679  * @num_qs: number of VSI queues
 3680  * @q_ids: queue IDs array
 3681  * @q_prio: queue priority array
 3682  *
 3683  * This function configures the queue node priority (Sibling Priority) of the
 3684  * passed in VSI's queue(s) for a given traffic class (TC).
 3685  */
 3686 enum ice_status
 3687 ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
 3688                        u8 *q_prio)
 3689 {
 3690         enum ice_status status = ICE_ERR_PARAM;
 3691         u16 i;
 3692 
 3693         ice_acquire_lock(&pi->sched_lock);
 3694 
 3695         for (i = 0; i < num_qs; i++) {
 3696                 struct ice_sched_node *node;
 3697 
 3698                 node = ice_sched_find_node_by_teid(pi->root, q_ids[i]);
 3699                 if (!node || node->info.data.elem_type !=
 3700                     ICE_AQC_ELEM_TYPE_LEAF) {
 3701                         status = ICE_ERR_PARAM;
 3702                         break;
 3703                 }
 3704                 /* Configure Priority */
 3705                 status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]);
 3706                 if (status)
 3707                         break;
 3708         }
 3709 
 3710         ice_release_lock(&pi->sched_lock);
 3711         return status;
 3712 }
 3713 
 3714 /**
 3715  * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC
 3716  * @pi: port information structure
 3717  * @agg_id: Aggregator ID
 3718  * @num_vsis: number of VSI(s)
 3719  * @vsi_handle_arr: array of software VSI handles
 3720  * @node_prio: pointer to node priority
 3721  * @tc: traffic class
 3722  *
 3723  * This function configures the node priority (Sibling Priority) of the
 3724  * passed in VSI's for a given traffic class (TC) of an Aggregator ID.
 3725  */
 3726 enum ice_status
 3727 ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
 3728                                 u16 num_vsis, u16 *vsi_handle_arr,
 3729                                 u8 *node_prio, u8 tc)
 3730 {
 3731         struct ice_sched_agg_vsi_info *agg_vsi_info;
 3732         struct ice_sched_node *tc_node, *agg_node;
 3733         enum ice_status status = ICE_ERR_PARAM;
 3734         struct ice_sched_agg_info *agg_info;
 3735         bool agg_id_present = false;
 3736         struct ice_hw *hw = pi->hw;
 3737         u16 i;
 3738 
 3739         ice_acquire_lock(&pi->sched_lock);
 3740         LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
 3741                             list_entry)
 3742                 if (agg_info->agg_id == agg_id) {
 3743                         agg_id_present = true;
 3744                         break;
 3745                 }
 3746         if (!agg_id_present)
 3747                 goto exit_agg_priority_per_tc;
 3748 
 3749         tc_node = ice_sched_get_tc_node(pi, tc);
 3750         if (!tc_node)
 3751                 goto exit_agg_priority_per_tc;
 3752 
 3753         agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
 3754         if (!agg_node)
 3755                 goto exit_agg_priority_per_tc;
 3756 
 3757         if (num_vsis > hw->max_children[agg_node->tx_sched_layer])
 3758                 goto exit_agg_priority_per_tc;
 3759 
 3760         for (i = 0; i < num_vsis; i++) {
 3761                 struct ice_sched_node *vsi_node;
 3762                 bool vsi_handle_valid = false;
 3763                 u16 vsi_handle;
 3764 
 3765                 status = ICE_ERR_PARAM;
 3766                 vsi_handle = vsi_handle_arr[i];
 3767                 if (!ice_is_vsi_valid(hw, vsi_handle))
 3768                         goto exit_agg_priority_per_tc;
 3769                 /* Verify child nodes before applying settings */
 3770                 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
 3771                                     ice_sched_agg_vsi_info, list_entry)
 3772                         if (agg_vsi_info->vsi_handle == vsi_handle) {
 3773                                 vsi_handle_valid = true;
 3774                                 break;
 3775                         }
 3776 
 3777                 if (!vsi_handle_valid)
 3778                         goto exit_agg_priority_per_tc;
 3779 
 3780                 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 3781                 if (!vsi_node)
 3782                         goto exit_agg_priority_per_tc;
 3783 
 3784                 if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) {
 3785                         /* Configure Priority */
 3786                         status = ice_sched_cfg_sibl_node_prio(pi, vsi_node,
 3787                                                               node_prio[i]);
 3788                         if (status)
 3789                                 break;
 3790                         status = ice_sched_save_vsi_prio(pi, vsi_handle, tc,
 3791                                                          node_prio[i]);
 3792                         if (status)
 3793                                 break;
 3794                 }
 3795         }
 3796 
 3797 exit_agg_priority_per_tc:
 3798         ice_release_lock(&pi->sched_lock);
 3799         return status;
 3800 }
 3801 
 3802 /**
 3803  * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC
 3804  * @pi: port information structure
 3805  * @vsi_handle: software VSI handle
 3806  * @ena_tcmap: enabled TC map
 3807  * @rl_type: Rate limit type CIR/EIR
 3808  * @bw_alloc: Array of BW alloc
 3809  *
 3810  * This function configures the BW allocation of the passed in VSI's
 3811  * node(s) for enabled traffic class.
 3812  */
 3813 enum ice_status
 3814 ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
 3815                      enum ice_rl_type rl_type, u8 *bw_alloc)
 3816 {
 3817         enum ice_status status = ICE_SUCCESS;
 3818         u8 tc;
 3819 
 3820         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 3821                 return ICE_ERR_PARAM;
 3822 
 3823         ice_acquire_lock(&pi->sched_lock);
 3824 
 3825         /* Return success if no nodes are present across TC */
 3826         ice_for_each_traffic_class(tc) {
 3827                 struct ice_sched_node *tc_node, *vsi_node;
 3828 
 3829                 if (!ice_is_tc_ena(ena_tcmap, tc))
 3830                         continue;
 3831 
 3832                 tc_node = ice_sched_get_tc_node(pi, tc);
 3833                 if (!tc_node)
 3834                         continue;
 3835 
 3836                 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 3837                 if (!vsi_node)
 3838                         continue;
 3839 
 3840                 status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type,
 3841                                                      bw_alloc[tc]);
 3842                 if (status)
 3843                         break;
 3844                 status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc,
 3845                                                      rl_type, bw_alloc[tc]);
 3846                 if (status)
 3847                         break;
 3848         }
 3849 
 3850         ice_release_lock(&pi->sched_lock);
 3851         return status;
 3852 }
 3853 
 3854 /**
 3855  * ice_cfg_agg_bw_alloc - config aggregator BW alloc
 3856  * @pi: port information structure
 3857  * @agg_id: aggregator ID
 3858  * @ena_tcmap: enabled TC map
 3859  * @rl_type: rate limit type CIR/EIR
 3860  * @bw_alloc: array of BW alloc
 3861  *
 3862  * This function configures the BW allocation of passed in aggregator for
 3863  * enabled traffic class(s).
 3864  */
 3865 enum ice_status
 3866 ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
 3867                      enum ice_rl_type rl_type, u8 *bw_alloc)
 3868 {
 3869         struct ice_sched_agg_info *agg_info;
 3870         bool agg_id_present = false;
 3871         enum ice_status status = ICE_SUCCESS;
 3872         struct ice_hw *hw = pi->hw;
 3873         u8 tc;
 3874 
 3875         ice_acquire_lock(&pi->sched_lock);
 3876         LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
 3877                             list_entry)
 3878                 if (agg_info->agg_id == agg_id) {
 3879                         agg_id_present = true;
 3880                         break;
 3881                 }
 3882         if (!agg_id_present) {
 3883                 status = ICE_ERR_PARAM;
 3884                 goto exit_cfg_agg_bw_alloc;
 3885         }
 3886 
 3887         /* Return success if no nodes are present across TC */
 3888         ice_for_each_traffic_class(tc) {
 3889                 struct ice_sched_node *tc_node, *agg_node;
 3890 
 3891                 if (!ice_is_tc_ena(ena_tcmap, tc))
 3892                         continue;
 3893 
 3894                 tc_node = ice_sched_get_tc_node(pi, tc);
 3895                 if (!tc_node)
 3896                         continue;
 3897 
 3898                 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
 3899                 if (!agg_node)
 3900                         continue;
 3901 
 3902                 status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type,
 3903                                                      bw_alloc[tc]);
 3904                 if (status)
 3905                         break;
 3906                 status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type,
 3907                                                      bw_alloc[tc]);
 3908                 if (status)
 3909                         break;
 3910         }
 3911 
 3912 exit_cfg_agg_bw_alloc:
 3913         ice_release_lock(&pi->sched_lock);
 3914         return status;
 3915 }
 3916 
 3917 /**
 3918  * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
 3919  * @hw: pointer to the HW struct
 3920  * @bw: bandwidth in Kbps
 3921  *
 3922  * This function calculates the wakeup parameter of RL profile.
 3923  */
 3924 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
 3925 {
 3926         s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
 3927         s32 wakeup_f_int;
 3928         u16 wakeup = 0;
 3929 
 3930         /* Get the wakeup integer value */
 3931         bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE);
 3932         wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec);
 3933         if (wakeup_int > 63) {
 3934                 wakeup = (u16)((1 << 15) | wakeup_int);
 3935         } else {
 3936                 /* Calculate fraction value up to 4 decimals
 3937                  * Convert Integer value to a constant multiplier
 3938                  */
 3939                 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
 3940                 wakeup_a = DIV_S64(ICE_RL_PROF_MULTIPLIER *
 3941                                    hw->psm_clk_freq, bytes_per_sec);
 3942 
 3943                 /* Get Fraction value */
 3944                 wakeup_f = wakeup_a - wakeup_b;
 3945 
 3946                 /* Round up the Fractional value via Ceil(Fractional value) */
 3947                 if (wakeup_f > DIV_S64(ICE_RL_PROF_MULTIPLIER, 2))
 3948                         wakeup_f += 1;
 3949 
 3950                 wakeup_f_int = (s32)DIV_S64(wakeup_f * ICE_RL_PROF_FRACTION,
 3951                                             ICE_RL_PROF_MULTIPLIER);
 3952                 wakeup |= (u16)(wakeup_int << 9);
 3953                 wakeup |= (u16)(0x1ff & wakeup_f_int);
 3954         }
 3955 
 3956         return wakeup;
 3957 }
 3958 
 3959 /**
 3960  * ice_sched_bw_to_rl_profile - convert BW to profile parameters
 3961  * @hw: pointer to the HW struct
 3962  * @bw: bandwidth in Kbps
 3963  * @profile: profile parameters to return
 3964  *
 3965  * This function converts the BW to profile structure format.
 3966  */
 3967 static enum ice_status
 3968 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
 3969                            struct ice_aqc_rl_profile_elem *profile)
 3970 {
 3971         enum ice_status status = ICE_ERR_PARAM;
 3972         s64 bytes_per_sec, ts_rate, mv_tmp;
 3973         bool found = false;
 3974         s32 encode = 0;
 3975         s64 mv = 0;
 3976         s32 i;
 3977 
 3978         /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
 3979         if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
 3980                 return status;
 3981 
 3982         /* Bytes per second from Kbps */
 3983         bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE);
 3984 
 3985         /* encode is 6 bits but really useful are 5 bits */
 3986         for (i = 0; i < 64; i++) {
 3987                 u64 pow_result = BIT_ULL(i);
 3988 
 3989                 ts_rate = DIV_S64(hw->psm_clk_freq,
 3990                                   pow_result * ICE_RL_PROF_TS_MULTIPLIER);
 3991                 if (ts_rate <= 0)
 3992                         continue;
 3993 
 3994                 /* Multiplier value */
 3995                 mv_tmp = DIV_S64(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
 3996                                  ts_rate);
 3997 
 3998                 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
 3999                 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
 4000 
 4001                 /* First multiplier value greater than the given
 4002                  * accuracy bytes
 4003                  */
 4004                 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
 4005                         encode = i;
 4006                         found = true;
 4007                         break;
 4008                 }
 4009         }
 4010         if (found) {
 4011                 u16 wm;
 4012 
 4013                 wm = ice_sched_calc_wakeup(hw, bw);
 4014                 profile->rl_multiply = CPU_TO_LE16(mv);
 4015                 profile->wake_up_calc = CPU_TO_LE16(wm);
 4016                 profile->rl_encode = CPU_TO_LE16(encode);
 4017                 status = ICE_SUCCESS;
 4018         } else {
 4019                 status = ICE_ERR_DOES_NOT_EXIST;
 4020         }
 4021 
 4022         return status;
 4023 }
 4024 
 4025 /**
 4026  * ice_sched_add_rl_profile - add RL profile
 4027  * @hw: pointer to the hardware structure
 4028  * @rl_type: type of rate limit BW - min, max, or shared
 4029  * @bw: bandwidth in Kbps - Kilo bits per sec
 4030  * @layer_num: specifies in which layer to create profile
 4031  *
 4032  * This function first checks the existing list for corresponding BW
 4033  * parameter. If it exists, it returns the associated profile otherwise
 4034  * it creates a new rate limit profile for requested BW, and adds it to
 4035  * the HW DB and local list. It returns the new profile or null on error.
 4036  * The caller needs to hold the scheduler lock.
 4037  */
 4038 static struct ice_aqc_rl_profile_info *
 4039 ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
 4040                          u32 bw, u8 layer_num)
 4041 {
 4042         struct ice_aqc_rl_profile_info *rl_prof_elem;
 4043         u16 profiles_added = 0, num_profiles = 1;
 4044         struct ice_aqc_rl_profile_elem *buf;
 4045         enum ice_status status;
 4046         u8 profile_type;
 4047 
 4048         if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
 4049                 return NULL;
 4050         switch (rl_type) {
 4051         case ICE_MIN_BW:
 4052                 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
 4053                 break;
 4054         case ICE_MAX_BW:
 4055                 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
 4056                 break;
 4057         case ICE_SHARED_BW:
 4058                 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
 4059                 break;
 4060         default:
 4061                 return NULL;
 4062         }
 4063 
 4064         if (!hw)
 4065                 return NULL;
 4066         LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
 4067                             ice_aqc_rl_profile_info, list_entry)
 4068                 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
 4069                     profile_type && rl_prof_elem->bw == bw)
 4070                         /* Return existing profile ID info */
 4071                         return rl_prof_elem;
 4072 
 4073         /* Create new profile ID */
 4074         rl_prof_elem = (struct ice_aqc_rl_profile_info *)
 4075                 ice_malloc(hw, sizeof(*rl_prof_elem));
 4076 
 4077         if (!rl_prof_elem)
 4078                 return NULL;
 4079 
 4080         status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
 4081         if (status != ICE_SUCCESS)
 4082                 goto exit_add_rl_prof;
 4083 
 4084         rl_prof_elem->bw = bw;
 4085         /* layer_num is zero relative, and fw expects level from 1 to 9 */
 4086         rl_prof_elem->profile.level = layer_num + 1;
 4087         rl_prof_elem->profile.flags = profile_type;
 4088         rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size);
 4089 
 4090         /* Create new entry in HW DB */
 4091         buf = &rl_prof_elem->profile;
 4092         status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
 4093                                        &profiles_added, NULL);
 4094         if (status || profiles_added != num_profiles)
 4095                 goto exit_add_rl_prof;
 4096 
 4097         /* Good entry - add in the list */
 4098         rl_prof_elem->prof_id_ref = 0;
 4099         LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]);
 4100         return rl_prof_elem;
 4101 
 4102 exit_add_rl_prof:
 4103         ice_free(hw, rl_prof_elem);
 4104         return NULL;
 4105 }
 4106 
 4107 /**
 4108  * ice_sched_cfg_node_bw_lmt - configure node sched params
 4109  * @hw: pointer to the HW struct
 4110  * @node: sched node to configure
 4111  * @rl_type: rate limit type CIR, EIR, or shared
 4112  * @rl_prof_id: rate limit profile ID
 4113  *
 4114  * This function configures node element's BW limit.
 4115  */
 4116 static enum ice_status
 4117 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
 4118                           enum ice_rl_type rl_type, u16 rl_prof_id)
 4119 {
 4120         struct ice_aqc_txsched_elem_data buf;
 4121         struct ice_aqc_txsched_elem *data;
 4122 
 4123         buf = node->info;
 4124         data = &buf.data;
 4125         switch (rl_type) {
 4126         case ICE_MIN_BW:
 4127                 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
 4128                 data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
 4129                 break;
 4130         case ICE_MAX_BW:
 4131                 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
 4132                 data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
 4133                 break;
 4134         case ICE_SHARED_BW:
 4135                 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
 4136                 data->srl_id = CPU_TO_LE16(rl_prof_id);
 4137                 break;
 4138         default:
 4139                 /* Unknown rate limit type */
 4140                 return ICE_ERR_PARAM;
 4141         }
 4142 
 4143         /* Configure element */
 4144         return ice_sched_update_elem(hw, node, &buf);
 4145 }
 4146 
 4147 /**
 4148  * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
 4149  * @node: sched node
 4150  * @rl_type: rate limit type
 4151  *
 4152  * If existing profile matches, it returns the corresponding rate
 4153  * limit profile ID, otherwise it returns an invalid ID as error.
 4154  */
 4155 static u16
 4156 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
 4157                               enum ice_rl_type rl_type)
 4158 {
 4159         u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
 4160         struct ice_aqc_txsched_elem *data;
 4161 
 4162         data = &node->info.data;
 4163         switch (rl_type) {
 4164         case ICE_MIN_BW:
 4165                 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
 4166                         rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx);
 4167                 break;
 4168         case ICE_MAX_BW:
 4169                 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
 4170                         rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx);
 4171                 break;
 4172         case ICE_SHARED_BW:
 4173                 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
 4174                         rl_prof_id = LE16_TO_CPU(data->srl_id);
 4175                 break;
 4176         default:
 4177                 break;
 4178         }
 4179 
 4180         return rl_prof_id;
 4181 }
 4182 
 4183 /**
 4184  * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
 4185  * @pi: port information structure
 4186  * @rl_type: type of rate limit BW - min, max, or shared
 4187  * @layer_index: layer index
 4188  *
 4189  * This function returns requested profile creation layer.
 4190  */
 4191 static u8
 4192 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
 4193                             u8 layer_index)
 4194 {
 4195         struct ice_hw *hw = pi->hw;
 4196 
 4197         if (layer_index >= hw->num_tx_sched_layers)
 4198                 return ICE_SCHED_INVAL_LAYER_NUM;
 4199         switch (rl_type) {
 4200         case ICE_MIN_BW:
 4201                 if (hw->layer_info[layer_index].max_cir_rl_profiles)
 4202                         return layer_index;
 4203                 break;
 4204         case ICE_MAX_BW:
 4205                 if (hw->layer_info[layer_index].max_eir_rl_profiles)
 4206                         return layer_index;
 4207                 break;
 4208         case ICE_SHARED_BW:
 4209                 /* if current layer doesn't support SRL profile creation
 4210                  * then try a layer up or down.
 4211                  */
 4212                 if (hw->layer_info[layer_index].max_srl_profiles)
 4213                         return layer_index;
 4214                 else if (layer_index < hw->num_tx_sched_layers - 1 &&
 4215                          hw->layer_info[layer_index + 1].max_srl_profiles)
 4216                         return layer_index + 1;
 4217                 else if (layer_index > 0 &&
 4218                          hw->layer_info[layer_index - 1].max_srl_profiles)
 4219                         return layer_index - 1;
 4220                 break;
 4221         default:
 4222                 break;
 4223         }
 4224         return ICE_SCHED_INVAL_LAYER_NUM;
 4225 }
 4226 
 4227 /**
 4228  * ice_sched_get_srl_node - get shared rate limit node
 4229  * @node: tree node
 4230  * @srl_layer: shared rate limit layer
 4231  *
 4232  * This function returns SRL node to be used for shared rate limit purpose.
 4233  * The caller needs to hold scheduler lock.
 4234  */
 4235 static struct ice_sched_node *
 4236 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
 4237 {
 4238         if (srl_layer > node->tx_sched_layer)
 4239                 return node->children[0];
 4240         else if (srl_layer < node->tx_sched_layer)
 4241                 /* Node can't be created without a parent. It will always
 4242                  * have a valid parent except root node.
 4243                  */
 4244                 return node->parent;
 4245         else
 4246                 return node;
 4247 }
 4248 
 4249 /**
 4250  * ice_sched_rm_rl_profile - remove RL profile ID
 4251  * @hw: pointer to the hardware structure
 4252  * @layer_num: layer number where profiles are saved
 4253  * @profile_type: profile type like EIR, CIR, or SRL
 4254  * @profile_id: profile ID to remove
 4255  *
 4256  * This function removes rate limit profile from layer 'layer_num' of type
 4257  * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
 4258  * scheduler lock.
 4259  */
 4260 static enum ice_status
 4261 ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
 4262                         u16 profile_id)
 4263 {
 4264         struct ice_aqc_rl_profile_info *rl_prof_elem;
 4265         enum ice_status status = ICE_SUCCESS;
 4266 
 4267         if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
 4268                 return ICE_ERR_PARAM;
 4269         /* Check the existing list for RL profile */
 4270         LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
 4271                             ice_aqc_rl_profile_info, list_entry)
 4272                 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
 4273                     profile_type &&
 4274                     LE16_TO_CPU(rl_prof_elem->profile.profile_id) ==
 4275                     profile_id) {
 4276                         if (rl_prof_elem->prof_id_ref)
 4277                                 rl_prof_elem->prof_id_ref--;
 4278 
 4279                         /* Remove old profile ID from database */
 4280                         status = ice_sched_del_rl_profile(hw, rl_prof_elem);
 4281                         if (status && status != ICE_ERR_IN_USE)
 4282                                 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
 4283                         break;
 4284                 }
 4285         if (status == ICE_ERR_IN_USE)
 4286                 status = ICE_SUCCESS;
 4287         return status;
 4288 }
 4289 
 4290 /**
 4291  * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
 4292  * @pi: port information structure
 4293  * @node: pointer to node structure
 4294  * @rl_type: rate limit type min, max, or shared
 4295  * @layer_num: layer number where RL profiles are saved
 4296  *
 4297  * This function configures node element's BW rate limit profile ID of
 4298  * type CIR, EIR, or SRL to default. This function needs to be called
 4299  * with the scheduler lock held.
 4300  */
 4301 static enum ice_status
 4302 ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
 4303                            struct ice_sched_node *node,
 4304                            enum ice_rl_type rl_type, u8 layer_num)
 4305 {
 4306         enum ice_status status;
 4307         struct ice_hw *hw;
 4308         u8 profile_type;
 4309         u16 rl_prof_id;
 4310         u16 old_id;
 4311 
 4312         hw = pi->hw;
 4313         switch (rl_type) {
 4314         case ICE_MIN_BW:
 4315                 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
 4316                 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
 4317                 break;
 4318         case ICE_MAX_BW:
 4319                 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
 4320                 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
 4321                 break;
 4322         case ICE_SHARED_BW:
 4323                 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
 4324                 /* No SRL is configured for default case */
 4325                 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
 4326                 break;
 4327         default:
 4328                 return ICE_ERR_PARAM;
 4329         }
 4330         /* Save existing RL prof ID for later clean up */
 4331         old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
 4332         /* Configure BW scheduling parameters */
 4333         status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
 4334         if (status)
 4335                 return status;
 4336 
 4337         /* Remove stale RL profile ID */
 4338         if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
 4339             old_id == ICE_SCHED_INVAL_PROF_ID)
 4340                 return ICE_SUCCESS;
 4341 
 4342         return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id);
 4343 }
 4344 
 4345 /**
 4346  * ice_sched_set_node_bw - set node's bandwidth
 4347  * @pi: port information structure
 4348  * @node: tree node
 4349  * @rl_type: rate limit type min, max, or shared
 4350  * @bw: bandwidth in Kbps - Kilo bits per sec
 4351  * @layer_num: layer number
 4352  *
 4353  * This function adds new profile corresponding to requested BW, configures
 4354  * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
 4355  * ID from local database. The caller needs to hold scheduler lock.
 4356  */
 4357 static enum ice_status
 4358 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
 4359                       enum ice_rl_type rl_type, u32 bw, u8 layer_num)
 4360 {
 4361         struct ice_aqc_rl_profile_info *rl_prof_info;
 4362         enum ice_status status = ICE_ERR_PARAM;
 4363         struct ice_hw *hw = pi->hw;
 4364         u16 old_id, rl_prof_id;
 4365 
 4366         rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num);
 4367         if (!rl_prof_info)
 4368                 return status;
 4369 
 4370         rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id);
 4371 
 4372         /* Save existing RL prof ID for later clean up */
 4373         old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
 4374         /* Configure BW scheduling parameters */
 4375         status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
 4376         if (status)
 4377                 return status;
 4378 
 4379         /* New changes has been applied */
 4380         /* Increment the profile ID reference count */
 4381         rl_prof_info->prof_id_ref++;
 4382 
 4383         /* Check for old ID removal */
 4384         if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
 4385             old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
 4386                 return ICE_SUCCESS;
 4387 
 4388         return ice_sched_rm_rl_profile(hw, layer_num,
 4389                                        rl_prof_info->profile.flags &
 4390                                        ICE_AQC_RL_PROFILE_TYPE_M, old_id);
 4391 }
 4392 
 4393 /**
 4394  * ice_sched_set_node_bw_lmt - set node's BW limit
 4395  * @pi: port information structure
 4396  * @node: tree node
 4397  * @rl_type: rate limit type min, max, or shared
 4398  * @bw: bandwidth in Kbps - Kilo bits per sec
 4399  *
 4400  * It updates node's BW limit parameters like BW RL profile ID of type CIR,
 4401  * EIR, or SRL. The caller needs to hold scheduler lock.
 4402  *
 4403  * NOTE: Caller provides the correct SRL node in case of shared profile
 4404  * settings.
 4405  */
 4406 static enum ice_status
 4407 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
 4408                           enum ice_rl_type rl_type, u32 bw)
 4409 {
 4410         struct ice_hw *hw;
 4411         u8 layer_num;
 4412 
 4413         if (!pi)
 4414                 return ICE_ERR_PARAM;
 4415         hw = pi->hw;
 4416         /* Remove unused RL profile IDs from HW and SW DB */
 4417         ice_sched_rm_unused_rl_prof(hw);
 4418 
 4419         layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
 4420                                                 node->tx_sched_layer);
 4421         if (layer_num >= hw->num_tx_sched_layers)
 4422                 return ICE_ERR_PARAM;
 4423 
 4424         if (bw == ICE_SCHED_DFLT_BW)
 4425                 return ice_sched_set_node_bw_dflt(pi, node, rl_type, layer_num);
 4426         return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num);
 4427 }
 4428 
 4429 /**
 4430  * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
 4431  * @pi: port information structure
 4432  * @node: pointer to node structure
 4433  * @rl_type: rate limit type min, max, or shared
 4434  *
 4435  * This function configures node element's BW rate limit profile ID of
 4436  * type CIR, EIR, or SRL to default. This function needs to be called
 4437  * with the scheduler lock held.
 4438  */
 4439 static enum ice_status
 4440 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
 4441                                struct ice_sched_node *node,
 4442                                enum ice_rl_type rl_type)
 4443 {
 4444         return ice_sched_set_node_bw_lmt(pi, node, rl_type,
 4445                                          ICE_SCHED_DFLT_BW);
 4446 }
 4447 
 4448 /**
 4449  * ice_sched_validate_srl_node - Check node for SRL applicability
 4450  * @node: sched node to configure
 4451  * @sel_layer: selected SRL layer
 4452  *
 4453  * This function checks if the SRL can be applied to a selceted layer node on
 4454  * behalf of the requested node (first argument). This function needs to be
 4455  * called with scheduler lock held.
 4456  */
 4457 static enum ice_status
 4458 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
 4459 {
 4460         /* SRL profiles are not available on all layers. Check if the
 4461          * SRL profile can be applied to a node above or below the
 4462          * requested node. SRL configuration is possible only if the
 4463          * selected layer's node has single child.
 4464          */
 4465         if (sel_layer == node->tx_sched_layer ||
 4466             ((sel_layer == node->tx_sched_layer + 1) &&
 4467             node->num_children == 1) ||
 4468             ((sel_layer == node->tx_sched_layer - 1) &&
 4469             (node->parent && node->parent->num_children == 1)))
 4470                 return ICE_SUCCESS;
 4471 
 4472         return ICE_ERR_CFG;
 4473 }
 4474 
 4475 /**
 4476  * ice_sched_save_q_bw - save queue node's BW information
 4477  * @q_ctx: queue context structure
 4478  * @rl_type: rate limit type min, max, or shared
 4479  * @bw: bandwidth in Kbps - Kilo bits per sec
 4480  *
 4481  * Save BW information of queue type node for post replay use.
 4482  */
 4483 static enum ice_status
 4484 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
 4485 {
 4486         switch (rl_type) {
 4487         case ICE_MIN_BW:
 4488                 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
 4489                 break;
 4490         case ICE_MAX_BW:
 4491                 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
 4492                 break;
 4493         case ICE_SHARED_BW:
 4494                 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
 4495                 break;
 4496         default:
 4497                 return ICE_ERR_PARAM;
 4498         }
 4499         return ICE_SUCCESS;
 4500 }
 4501 
 4502 /**
 4503  * ice_sched_set_q_bw_lmt - sets queue BW limit
 4504  * @pi: port information structure
 4505  * @vsi_handle: sw VSI handle
 4506  * @tc: traffic class
 4507  * @q_handle: software queue handle
 4508  * @rl_type: min, max, or shared
 4509  * @bw: bandwidth in Kbps
 4510  *
 4511  * This function sets BW limit of queue scheduling node.
 4512  */
 4513 static enum ice_status
 4514 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 4515                        u16 q_handle, enum ice_rl_type rl_type, u32 bw)
 4516 {
 4517         enum ice_status status = ICE_ERR_PARAM;
 4518         struct ice_sched_node *node;
 4519         struct ice_q_ctx *q_ctx;
 4520 
 4521         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 4522                 return ICE_ERR_PARAM;
 4523         ice_acquire_lock(&pi->sched_lock);
 4524         q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
 4525         if (!q_ctx)
 4526                 goto exit_q_bw_lmt;
 4527         node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
 4528         if (!node) {
 4529                 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
 4530                 goto exit_q_bw_lmt;
 4531         }
 4532 
 4533         /* Return error if it is not a leaf node */
 4534         if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
 4535                 goto exit_q_bw_lmt;
 4536 
 4537         /* SRL bandwidth layer selection */
 4538         if (rl_type == ICE_SHARED_BW) {
 4539                 u8 sel_layer; /* selected layer */
 4540 
 4541                 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
 4542                                                         node->tx_sched_layer);
 4543                 if (sel_layer >= pi->hw->num_tx_sched_layers) {
 4544                         status = ICE_ERR_PARAM;
 4545                         goto exit_q_bw_lmt;
 4546                 }
 4547                 status = ice_sched_validate_srl_node(node, sel_layer);
 4548                 if (status)
 4549                         goto exit_q_bw_lmt;
 4550         }
 4551 
 4552         if (bw == ICE_SCHED_DFLT_BW)
 4553                 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
 4554         else
 4555                 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
 4556 
 4557         if (!status)
 4558                 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
 4559 
 4560 exit_q_bw_lmt:
 4561         ice_release_lock(&pi->sched_lock);
 4562         return status;
 4563 }
 4564 
 4565 /**
 4566  * ice_cfg_q_bw_lmt - configure queue BW limit
 4567  * @pi: port information structure
 4568  * @vsi_handle: sw VSI handle
 4569  * @tc: traffic class
 4570  * @q_handle: software queue handle
 4571  * @rl_type: min, max, or shared
 4572  * @bw: bandwidth in Kbps
 4573  *
 4574  * This function configures BW limit of queue scheduling node.
 4575  */
 4576 enum ice_status
 4577 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 4578                  u16 q_handle, enum ice_rl_type rl_type, u32 bw)
 4579 {
 4580         return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
 4581                                       bw);
 4582 }
 4583 
 4584 /**
 4585  * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
 4586  * @pi: port information structure
 4587  * @vsi_handle: sw VSI handle
 4588  * @tc: traffic class
 4589  * @q_handle: software queue handle
 4590  * @rl_type: min, max, or shared
 4591  *
 4592  * This function configures BW default limit of queue scheduling node.
 4593  */
 4594 enum ice_status
 4595 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
 4596                       u16 q_handle, enum ice_rl_type rl_type)
 4597 {
 4598         return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
 4599                                       ICE_SCHED_DFLT_BW);
 4600 }
 4601 
 4602 /**
 4603  * ice_sched_save_tc_node_bw - save TC node BW limit
 4604  * @pi: port information structure
 4605  * @tc: TC number
 4606  * @rl_type: min or max
 4607  * @bw: bandwidth in Kbps
 4608  *
 4609  * This function saves the modified values of bandwidth settings for later
 4610  * replay purpose (restore) after reset.
 4611  */
 4612 static enum ice_status
 4613 ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
 4614                           enum ice_rl_type rl_type, u32 bw)
 4615 {
 4616         if (tc >= ICE_MAX_TRAFFIC_CLASS)
 4617                 return ICE_ERR_PARAM;
 4618         switch (rl_type) {
 4619         case ICE_MIN_BW:
 4620                 ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw);
 4621                 break;
 4622         case ICE_MAX_BW:
 4623                 ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw);
 4624                 break;
 4625         case ICE_SHARED_BW:
 4626                 ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw);
 4627                 break;
 4628         default:
 4629                 return ICE_ERR_PARAM;
 4630         }
 4631         return ICE_SUCCESS;
 4632 }
 4633 
 4634 /**
 4635  * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit
 4636  * @pi: port information structure
 4637  * @tc: TC number
 4638  * @rl_type: min or max
 4639  * @bw: bandwidth in Kbps
 4640  *
 4641  * This function configures bandwidth limit of TC node.
 4642  */
 4643 static enum ice_status
 4644 ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
 4645                              enum ice_rl_type rl_type, u32 bw)
 4646 {
 4647         enum ice_status status = ICE_ERR_PARAM;
 4648         struct ice_sched_node *tc_node;
 4649 
 4650         if (tc >= ICE_MAX_TRAFFIC_CLASS)
 4651                 return status;
 4652         ice_acquire_lock(&pi->sched_lock);
 4653         tc_node = ice_sched_get_tc_node(pi, tc);
 4654         if (!tc_node)
 4655                 goto exit_set_tc_node_bw;
 4656         if (bw == ICE_SCHED_DFLT_BW)
 4657                 status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type);
 4658         else
 4659                 status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw);
 4660         if (!status)
 4661                 status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw);
 4662 
 4663 exit_set_tc_node_bw:
 4664         ice_release_lock(&pi->sched_lock);
 4665         return status;
 4666 }
 4667 
 4668 /**
 4669  * ice_cfg_tc_node_bw_lmt - configure TC node BW limit
 4670  * @pi: port information structure
 4671  * @tc: TC number
 4672  * @rl_type: min or max
 4673  * @bw: bandwidth in Kbps
 4674  *
 4675  * This function configures BW limit of TC node.
 4676  * Note: The minimum guaranteed reservation is done via DCBX.
 4677  */
 4678 enum ice_status
 4679 ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
 4680                        enum ice_rl_type rl_type, u32 bw)
 4681 {
 4682         return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw);
 4683 }
 4684 
 4685 /**
 4686  * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit
 4687  * @pi: port information structure
 4688  * @tc: TC number
 4689  * @rl_type: min or max
 4690  *
 4691  * This function configures BW default limit of TC node.
 4692  */
 4693 enum ice_status
 4694 ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
 4695                             enum ice_rl_type rl_type)
 4696 {
 4697         return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW);
 4698 }
 4699 
 4700 /**
 4701  * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information
 4702  * @pi: port information structure
 4703  * @tc: traffic class
 4704  * @rl_type: rate limit type min or max
 4705  * @bw_alloc: Bandwidth allocation information
 4706  *
 4707  * Save BW alloc information of VSI type node for post replay use.
 4708  */
 4709 static enum ice_status
 4710 ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
 4711                                 enum ice_rl_type rl_type, u16 bw_alloc)
 4712 {
 4713         if (tc >= ICE_MAX_TRAFFIC_CLASS)
 4714                 return ICE_ERR_PARAM;
 4715         switch (rl_type) {
 4716         case ICE_MIN_BW:
 4717                 ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc],
 4718                                            bw_alloc);
 4719                 break;
 4720         case ICE_MAX_BW:
 4721                 ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc],
 4722                                            bw_alloc);
 4723                 break;
 4724         default:
 4725                 return ICE_ERR_PARAM;
 4726         }
 4727         return ICE_SUCCESS;
 4728 }
 4729 
 4730 /**
 4731  * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc
 4732  * @pi: port information structure
 4733  * @tc: TC number
 4734  * @rl_type: min or max
 4735  * @bw_alloc: bandwidth alloc
 4736  *
 4737  * This function configures bandwidth alloc of TC node, also saves the
 4738  * changed settings for replay purpose, and return success if it succeeds
 4739  * in modifying bandwidth alloc setting.
 4740  */
 4741 static enum ice_status
 4742 ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
 4743                                enum ice_rl_type rl_type, u8 bw_alloc)
 4744 {
 4745         enum ice_status status = ICE_ERR_PARAM;
 4746         struct ice_sched_node *tc_node;
 4747 
 4748         if (tc >= ICE_MAX_TRAFFIC_CLASS)
 4749                 return status;
 4750         ice_acquire_lock(&pi->sched_lock);
 4751         tc_node = ice_sched_get_tc_node(pi, tc);
 4752         if (!tc_node)
 4753                 goto exit_set_tc_node_bw_alloc;
 4754         status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type,
 4755                                              bw_alloc);
 4756         if (status)
 4757                 goto exit_set_tc_node_bw_alloc;
 4758         status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
 4759 
 4760 exit_set_tc_node_bw_alloc:
 4761         ice_release_lock(&pi->sched_lock);
 4762         return status;
 4763 }
 4764 
 4765 /**
 4766  * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc
 4767  * @pi: port information structure
 4768  * @tc: TC number
 4769  * @rl_type: min or max
 4770  * @bw_alloc: bandwidth alloc
 4771  *
 4772  * This function configures BW limit of TC node.
 4773  * Note: The minimum guaranteed reservation is done via DCBX.
 4774  */
 4775 enum ice_status
 4776 ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
 4777                          enum ice_rl_type rl_type, u8 bw_alloc)
 4778 {
 4779         return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
 4780 }
 4781 
 4782 /**
 4783  * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default
 4784  * @pi: port information structure
 4785  * @vsi_handle: software VSI handle
 4786  *
 4787  * This function retrieves the aggregator ID based on VSI ID and TC,
 4788  * and sets node's BW limit to default. This function needs to be
 4789  * called with the scheduler lock held.
 4790  */
 4791 enum ice_status
 4792 ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle)
 4793 {
 4794         struct ice_vsi_ctx *vsi_ctx;
 4795         enum ice_status status = ICE_SUCCESS;
 4796         u8 tc;
 4797 
 4798         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 4799                 return ICE_ERR_PARAM;
 4800         vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
 4801         if (!vsi_ctx)
 4802                 return ICE_ERR_PARAM;
 4803 
 4804         ice_for_each_traffic_class(tc) {
 4805                 struct ice_sched_node *node;
 4806 
 4807                 node = vsi_ctx->sched.ag_node[tc];
 4808                 if (!node)
 4809                         continue;
 4810 
 4811                 /* Set min profile to default */
 4812                 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW);
 4813                 if (status)
 4814                         break;
 4815 
 4816                 /* Set max profile to default */
 4817                 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW);
 4818                 if (status)
 4819                         break;
 4820 
 4821                 /* Remove shared profile, if there is one */
 4822                 status = ice_sched_set_node_bw_dflt_lmt(pi, node,
 4823                                                         ICE_SHARED_BW);
 4824                 if (status)
 4825                         break;
 4826         }
 4827 
 4828         return status;
 4829 }
 4830 
 4831 /**
 4832  * ice_sched_get_node_by_id_type - get node from ID type
 4833  * @pi: port information structure
 4834  * @id: identifier
 4835  * @agg_type: type of aggregator
 4836  * @tc: traffic class
 4837  *
 4838  * This function returns node identified by ID of type aggregator, and
 4839  * based on traffic class (TC). This function needs to be called with
 4840  * the scheduler lock held.
 4841  */
 4842 static struct ice_sched_node *
 4843 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
 4844                               enum ice_agg_type agg_type, u8 tc)
 4845 {
 4846         struct ice_sched_node *node = NULL;
 4847         struct ice_sched_node *child_node;
 4848 
 4849         switch (agg_type) {
 4850         case ICE_AGG_TYPE_VSI: {
 4851                 struct ice_vsi_ctx *vsi_ctx;
 4852                 u16 vsi_handle = (u16)id;
 4853 
 4854                 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 4855                         break;
 4856                 /* Get sched_vsi_info */
 4857                 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
 4858                 if (!vsi_ctx)
 4859                         break;
 4860                 node = vsi_ctx->sched.vsi_node[tc];
 4861                 break;
 4862         }
 4863 
 4864         case ICE_AGG_TYPE_AGG: {
 4865                 struct ice_sched_node *tc_node;
 4866 
 4867                 tc_node = ice_sched_get_tc_node(pi, tc);
 4868                 if (tc_node)
 4869                         node = ice_sched_get_agg_node(pi, tc_node, id);
 4870                 break;
 4871         }
 4872 
 4873         case ICE_AGG_TYPE_Q:
 4874                 /* The current implementation allows single queue to modify */
 4875                 node = ice_sched_get_node(pi, id);
 4876                 break;
 4877 
 4878         case ICE_AGG_TYPE_QG:
 4879                 /* The current implementation allows single qg to modify */
 4880                 child_node = ice_sched_get_node(pi, id);
 4881                 if (!child_node)
 4882                         break;
 4883                 node = child_node->parent;
 4884                 break;
 4885 
 4886         default:
 4887                 break;
 4888         }
 4889 
 4890         return node;
 4891 }
 4892 
 4893 /**
 4894  * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
 4895  * @pi: port information structure
 4896  * @id: ID (software VSI handle or AGG ID)
 4897  * @agg_type: aggregator type (VSI or AGG type node)
 4898  * @tc: traffic class
 4899  * @rl_type: min or max
 4900  * @bw: bandwidth in Kbps
 4901  *
 4902  * This function sets BW limit of VSI or Aggregator scheduling node
 4903  * based on TC information from passed in argument BW.
 4904  */
 4905 enum ice_status
 4906 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
 4907                                  enum ice_agg_type agg_type, u8 tc,
 4908                                  enum ice_rl_type rl_type, u32 bw)
 4909 {
 4910         enum ice_status status = ICE_ERR_PARAM;
 4911         struct ice_sched_node *node;
 4912 
 4913         if (!pi)
 4914                 return status;
 4915 
 4916         if (rl_type == ICE_UNKNOWN_BW)
 4917                 return status;
 4918 
 4919         ice_acquire_lock(&pi->sched_lock);
 4920         node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
 4921         if (!node) {
 4922                 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
 4923                 goto exit_set_node_bw_lmt_per_tc;
 4924         }
 4925         if (bw == ICE_SCHED_DFLT_BW)
 4926                 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
 4927         else
 4928                 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
 4929 
 4930 exit_set_node_bw_lmt_per_tc:
 4931         ice_release_lock(&pi->sched_lock);
 4932         return status;
 4933 }
 4934 
 4935 /**
 4936  * ice_sched_validate_vsi_srl_node - validate VSI SRL node
 4937  * @pi: port information structure
 4938  * @vsi_handle: software VSI handle
 4939  *
 4940  * This function validates SRL node of the VSI node if available SRL layer is
 4941  * different than the VSI node layer on all TC(s).This function needs to be
 4942  * called with scheduler lock held.
 4943  */
 4944 static enum ice_status
 4945 ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
 4946 {
 4947         u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
 4948         u8 tc;
 4949 
 4950         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 4951                 return ICE_ERR_PARAM;
 4952 
 4953         /* Return success if no nodes are present across TC */
 4954         ice_for_each_traffic_class(tc) {
 4955                 struct ice_sched_node *tc_node, *vsi_node;
 4956                 enum ice_rl_type rl_type = ICE_SHARED_BW;
 4957                 enum ice_status status;
 4958 
 4959                 tc_node = ice_sched_get_tc_node(pi, tc);
 4960                 if (!tc_node)
 4961                         continue;
 4962 
 4963                 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 4964                 if (!vsi_node)
 4965                         continue;
 4966 
 4967                 /* SRL bandwidth layer selection */
 4968                 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
 4969                         u8 node_layer = vsi_node->tx_sched_layer;
 4970                         u8 layer_num;
 4971 
 4972                         layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
 4973                                                                 node_layer);
 4974                         if (layer_num >= pi->hw->num_tx_sched_layers)
 4975                                 return ICE_ERR_PARAM;
 4976                         sel_layer = layer_num;
 4977                 }
 4978 
 4979                 status = ice_sched_validate_srl_node(vsi_node, sel_layer);
 4980                 if (status)
 4981                         return status;
 4982         }
 4983         return ICE_SUCCESS;
 4984 }
 4985 
 4986 /**
 4987  * ice_sched_set_save_vsi_srl_node_bw - set VSI shared limit values
 4988  * @pi: port information structure
 4989  * @vsi_handle: software VSI handle
 4990  * @tc: traffic class
 4991  * @srl_node: sched node to configure
 4992  * @rl_type: rate limit type minimum, maximum, or shared
 4993  * @bw: minimum, maximum, or shared bandwidth in Kbps
 4994  *
 4995  * Configure shared rate limiter(SRL) of VSI type nodes across given traffic
 4996  * class, and saves those value for later use for replaying purposes. The
 4997  * caller holds the scheduler lock.
 4998  */
 4999 static enum ice_status
 5000 ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle,
 5001                                    u8 tc, struct ice_sched_node *srl_node,
 5002                                    enum ice_rl_type rl_type, u32 bw)
 5003 {
 5004         enum ice_status status;
 5005 
 5006         if (bw == ICE_SCHED_DFLT_BW) {
 5007                 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
 5008         } else {
 5009                 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw);
 5010                 if (status)
 5011                         return status;
 5012                 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
 5013         }
 5014         return status;
 5015 }
 5016 
 5017 /**
 5018  * ice_sched_set_vsi_node_srl_per_tc - set VSI node BW shared limit for tc
 5019  * @pi: port information structure
 5020  * @vsi_handle: software VSI handle
 5021  * @tc: traffic class
 5022  * @min_bw: minimum bandwidth in Kbps
 5023  * @max_bw: maximum bandwidth in Kbps
 5024  * @shared_bw: shared bandwidth in Kbps
 5025  *
 5026  * Configure shared rate limiter(SRL) of  VSI type nodes across requested
 5027  * traffic class for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW
 5028  * is passed, it removes the corresponding bw from the node. The caller
 5029  * holds scheduler lock.
 5030  */
 5031 static enum ice_status
 5032 ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle,
 5033                                   u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
 5034 {
 5035         struct ice_sched_node *tc_node, *vsi_node, *cfg_node;
 5036         enum ice_status status;
 5037         u8 layer_num;
 5038 
 5039         tc_node = ice_sched_get_tc_node(pi, tc);
 5040         if (!tc_node)
 5041                 return ICE_ERR_CFG;
 5042 
 5043         vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 5044         if (!vsi_node)
 5045                 return ICE_ERR_CFG;
 5046 
 5047         layer_num = ice_sched_get_rl_prof_layer(pi, ICE_SHARED_BW,
 5048                                                 vsi_node->tx_sched_layer);
 5049         if (layer_num >= pi->hw->num_tx_sched_layers)
 5050                 return ICE_ERR_PARAM;
 5051 
 5052         /* SRL node may be different */
 5053         cfg_node = ice_sched_get_srl_node(vsi_node, layer_num);
 5054         if (!cfg_node)
 5055                 return ICE_ERR_CFG;
 5056 
 5057         status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc,
 5058                                                     cfg_node, ICE_MIN_BW,
 5059                                                     min_bw);
 5060         if (status)
 5061                 return status;
 5062 
 5063         status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc,
 5064                                                     cfg_node, ICE_MAX_BW,
 5065                                                     max_bw);
 5066         if (status)
 5067                 return status;
 5068 
 5069         return ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node,
 5070                                                   ICE_SHARED_BW, shared_bw);
 5071 }
 5072 
 5073 /**
 5074  * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit
 5075  * @pi: port information structure
 5076  * @vsi_handle: software VSI handle
 5077  * @min_bw: minimum bandwidth in Kbps
 5078  * @max_bw: maximum bandwidth in Kbps
 5079  * @shared_bw: shared bandwidth in Kbps
 5080  *
 5081  * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
 5082  * classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is
 5083  * passed, it removes those value(s) from the node.
 5084  */
 5085 enum ice_status
 5086 ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
 5087                                 u32 min_bw, u32 max_bw, u32 shared_bw)
 5088 {
 5089         enum ice_status status = ICE_SUCCESS;
 5090         u8 tc;
 5091 
 5092         if (!pi)
 5093                 return ICE_ERR_PARAM;
 5094 
 5095         if (!ice_is_vsi_valid(pi->hw, vsi_handle))
 5096                 return ICE_ERR_PARAM;
 5097 
 5098         ice_acquire_lock(&pi->sched_lock);
 5099         status = ice_sched_validate_vsi_srl_node(pi, vsi_handle);
 5100         if (status)
 5101                 goto exit_set_vsi_bw_shared_lmt;
 5102         /* Return success if no nodes are present across TC */
 5103         ice_for_each_traffic_class(tc) {
 5104                 struct ice_sched_node *tc_node, *vsi_node;
 5105 
 5106                 tc_node = ice_sched_get_tc_node(pi, tc);
 5107                 if (!tc_node)
 5108                         continue;
 5109 
 5110                 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 5111                 if (!vsi_node)
 5112                         continue;
 5113 
 5114                 status = ice_sched_set_vsi_node_srl_per_tc(pi, vsi_handle, tc,
 5115                                                            min_bw, max_bw,
 5116                                                            shared_bw);
 5117                 if (status)
 5118                         break;
 5119         }
 5120 
 5121 exit_set_vsi_bw_shared_lmt:
 5122         ice_release_lock(&pi->sched_lock);
 5123         return status;
 5124 }
 5125 
 5126 /**
 5127  * ice_sched_validate_agg_srl_node - validate AGG SRL node
 5128  * @pi: port information structure
 5129  * @agg_id: aggregator ID
 5130  *
 5131  * This function validates SRL node of the AGG node if available SRL layer is
 5132  * different than the AGG node layer on all TC(s).This function needs to be
 5133  * called with scheduler lock held.
 5134  */
 5135 static enum ice_status
 5136 ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
 5137 {
 5138         u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
 5139         struct ice_sched_agg_info *agg_info;
 5140         bool agg_id_present = false;
 5141         enum ice_status status = ICE_SUCCESS;
 5142         u8 tc;
 5143 
 5144         LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info,
 5145                             list_entry)
 5146                 if (agg_info->agg_id == agg_id) {
 5147                         agg_id_present = true;
 5148                         break;
 5149                 }
 5150         if (!agg_id_present)
 5151                 return ICE_ERR_PARAM;
 5152         /* Return success if no nodes are present across TC */
 5153         ice_for_each_traffic_class(tc) {
 5154                 struct ice_sched_node *tc_node, *agg_node;
 5155                 enum ice_rl_type rl_type = ICE_SHARED_BW;
 5156 
 5157                 tc_node = ice_sched_get_tc_node(pi, tc);
 5158                 if (!tc_node)
 5159                         continue;
 5160 
 5161                 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
 5162                 if (!agg_node)
 5163                         continue;
 5164                 /* SRL bandwidth layer selection */
 5165                 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
 5166                         u8 node_layer = agg_node->tx_sched_layer;
 5167                         u8 layer_num;
 5168 
 5169                         layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
 5170                                                                 node_layer);
 5171                         if (layer_num >= pi->hw->num_tx_sched_layers)
 5172                                 return ICE_ERR_PARAM;
 5173                         sel_layer = layer_num;
 5174                 }
 5175 
 5176                 status = ice_sched_validate_srl_node(agg_node, sel_layer);
 5177                 if (status)
 5178                         break;
 5179         }
 5180         return status;
 5181 }
 5182 
 5183 /**
 5184  * ice_sched_validate_agg_id - Validate aggregator id
 5185  * @pi: port information structure
 5186  * @agg_id: aggregator ID
 5187  *
 5188  * This function validates aggregator id. Caller holds the scheduler lock.
 5189  */
 5190 static enum ice_status
 5191 ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
 5192 {
 5193         struct ice_sched_agg_info *agg_info;
 5194         struct ice_sched_agg_info *tmp;
 5195         bool agg_id_present = false;
 5196         enum ice_status status;
 5197 
 5198         status = ice_sched_validate_agg_srl_node(pi, agg_id);
 5199         if (status)
 5200                 return status;
 5201 
 5202         LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list,
 5203                                  ice_sched_agg_info, list_entry)
 5204                 if (agg_info->agg_id == agg_id) {
 5205                         agg_id_present = true;
 5206                         break;
 5207                 }
 5208 
 5209         if (!agg_id_present)
 5210                 return ICE_ERR_PARAM;
 5211 
 5212         return ICE_SUCCESS;
 5213 }
 5214 
 5215 /**
 5216  * ice_sched_set_save_agg_srl_node_bw - set aggregator shared limit values
 5217  * @pi: port information structure
 5218  * @agg_id: aggregator ID
 5219  * @tc: traffic class
 5220  * @srl_node: sched node to configure
 5221  * @rl_type: rate limit type minimum, maximum, or shared
 5222  * @bw: minimum, maximum, or shared bandwidth in Kbps
 5223  *
 5224  * Configure shared rate limiter(SRL) of aggregator type nodes across
 5225  * requested traffic class, and saves those value for later use for
 5226  * replaying purposes. The caller holds the scheduler lock.
 5227  */
 5228 static enum ice_status
 5229 ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
 5230                                    struct ice_sched_node *srl_node,
 5231                                    enum ice_rl_type rl_type, u32 bw)
 5232 {
 5233         enum ice_status status;
 5234 
 5235         if (bw == ICE_SCHED_DFLT_BW) {
 5236                 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
 5237         } else {
 5238                 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw);
 5239                 if (status)
 5240                         return status;
 5241                 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
 5242         }
 5243         return status;
 5244 }
 5245 
 5246 /**
 5247  * ice_sched_set_agg_node_srl_per_tc - set aggregator SRL per tc
 5248  * @pi: port information structure
 5249  * @agg_id: aggregator ID
 5250  * @tc: traffic class
 5251  * @min_bw: minimum bandwidth in Kbps
 5252  * @max_bw: maximum bandwidth in Kbps
 5253  * @shared_bw: shared bandwidth in Kbps
 5254  *
 5255  * This function configures the shared rate limiter(SRL) of aggregator type
 5256  * node for a given traffic class for aggregator matching agg_id. When BW
 5257  * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller
 5258  * holds the scheduler lock.
 5259  */
 5260 static enum ice_status
 5261 ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id,
 5262                                   u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
 5263 {
 5264         struct ice_sched_node *tc_node, *agg_node, *cfg_node;
 5265         enum ice_rl_type rl_type = ICE_SHARED_BW;
 5266         enum ice_status status = ICE_ERR_CFG;
 5267         u8 layer_num;
 5268 
 5269         tc_node = ice_sched_get_tc_node(pi, tc);
 5270         if (!tc_node)
 5271                 return ICE_ERR_CFG;
 5272 
 5273         agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
 5274         if (!agg_node)
 5275                 return ICE_ERR_CFG;
 5276 
 5277         layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
 5278                                                 agg_node->tx_sched_layer);
 5279         if (layer_num >= pi->hw->num_tx_sched_layers)
 5280                 return ICE_ERR_PARAM;
 5281 
 5282         /* SRL node may be different */
 5283         cfg_node = ice_sched_get_srl_node(agg_node, layer_num);
 5284         if (!cfg_node)
 5285                 return ICE_ERR_CFG;
 5286 
 5287         status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
 5288                                                     ICE_MIN_BW, min_bw);
 5289         if (status)
 5290                 return status;
 5291 
 5292         status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
 5293                                                     ICE_MAX_BW, max_bw);
 5294         if (status)
 5295                 return status;
 5296 
 5297         status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
 5298                                                     ICE_SHARED_BW, shared_bw);
 5299         return status;
 5300 }
 5301 
 5302 /**
 5303  * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit
 5304  * @pi: port information structure
 5305  * @agg_id: aggregator ID
 5306  * @min_bw: minimum bandwidth in Kbps
 5307  * @max_bw: maximum bandwidth in Kbps
 5308  * @shared_bw: shared bandwidth in Kbps
 5309  *
 5310  * This function configures the shared rate limiter(SRL) of all aggregator type
 5311  * nodes across all traffic classes for aggregator matching agg_id. When
 5312  * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the
 5313  * node(s).
 5314  */
 5315 enum ice_status
 5316 ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id,
 5317                                 u32 min_bw, u32 max_bw, u32 shared_bw)
 5318 {
 5319         enum ice_status status;
 5320         u8 tc;
 5321 
 5322         if (!pi)
 5323                 return ICE_ERR_PARAM;
 5324 
 5325         ice_acquire_lock(&pi->sched_lock);
 5326         status = ice_sched_validate_agg_id(pi, agg_id);
 5327         if (status)
 5328                 goto exit_agg_bw_shared_lmt;
 5329 
 5330         /* Return success if no nodes are present across TC */
 5331         ice_for_each_traffic_class(tc) {
 5332                 struct ice_sched_node *tc_node, *agg_node;
 5333 
 5334                 tc_node = ice_sched_get_tc_node(pi, tc);
 5335                 if (!tc_node)
 5336                         continue;
 5337 
 5338                 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
 5339                 if (!agg_node)
 5340                         continue;
 5341 
 5342                 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc,
 5343                                                            min_bw, max_bw,
 5344                                                            shared_bw);
 5345                 if (status)
 5346                         break;
 5347         }
 5348 
 5349 exit_agg_bw_shared_lmt:
 5350         ice_release_lock(&pi->sched_lock);
 5351         return status;
 5352 }
 5353 
 5354 /**
 5355  * ice_sched_set_agg_bw_shared_lmt_per_tc - set aggregator BW shared lmt per tc
 5356  * @pi: port information structure
 5357  * @agg_id: aggregator ID
 5358  * @tc: traffic class
 5359  * @min_bw: minimum bandwidth in Kbps
 5360  * @max_bw: maximum bandwidth in Kbps
 5361  * @shared_bw: shared bandwidth in Kbps
 5362  *
 5363  * This function configures the shared rate limiter(SRL) of aggregator type
 5364  * node for a given traffic class for aggregator matching agg_id. When BW
 5365  * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node.
 5366  */
 5367 enum ice_status
 5368 ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
 5369                                        u8 tc, u32 min_bw, u32 max_bw,
 5370                                        u32 shared_bw)
 5371 {
 5372         enum ice_status status;
 5373 
 5374         if (!pi)
 5375                 return ICE_ERR_PARAM;
 5376         ice_acquire_lock(&pi->sched_lock);
 5377         status = ice_sched_validate_agg_id(pi, agg_id);
 5378         if (status)
 5379                 goto exit_agg_bw_shared_lmt_per_tc;
 5380 
 5381         status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, min_bw,
 5382                                                    max_bw, shared_bw);
 5383 
 5384 exit_agg_bw_shared_lmt_per_tc:
 5385         ice_release_lock(&pi->sched_lock);
 5386         return status;
 5387 }
 5388 
 5389 /**
 5390  * ice_sched_cfg_sibl_node_prio - configure node sibling priority
 5391  * @pi: port information structure
 5392  * @node: sched node to configure
 5393  * @priority: sibling priority
 5394  *
 5395  * This function configures node element's sibling priority only. This
 5396  * function needs to be called with scheduler lock held.
 5397  */
 5398 enum ice_status
 5399 ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
 5400                              struct ice_sched_node *node, u8 priority)
 5401 {
 5402         struct ice_aqc_txsched_elem_data buf;
 5403         struct ice_aqc_txsched_elem *data;
 5404         struct ice_hw *hw = pi->hw;
 5405         enum ice_status status;
 5406 
 5407         if (!hw)
 5408                 return ICE_ERR_PARAM;
 5409         buf = node->info;
 5410         data = &buf.data;
 5411         data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
 5412         priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) &
 5413                    ICE_AQC_ELEM_GENERIC_PRIO_M;
 5414         data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M;
 5415         data->generic |= priority;
 5416 
 5417         /* Configure element */
 5418         status = ice_sched_update_elem(hw, node, &buf);
 5419         return status;
 5420 }
 5421 
 5422 /**
 5423  * ice_cfg_rl_burst_size - Set burst size value
 5424  * @hw: pointer to the HW struct
 5425  * @bytes: burst size in bytes
 5426  *
 5427  * This function configures/set the burst size to requested new value. The new
 5428  * burst size value is used for future rate limit calls. It doesn't change the
 5429  * existing or previously created RL profiles.
 5430  */
 5431 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
 5432 {
 5433         u16 burst_size_to_prog;
 5434 
 5435         if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
 5436             bytes > ICE_MAX_BURST_SIZE_ALLOWED)
 5437                 return ICE_ERR_PARAM;
 5438         if (ice_round_to_num(bytes, 64) <=
 5439             ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
 5440                 /* 64 byte granularity case */
 5441                 /* Disable MSB granularity bit */
 5442                 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
 5443                 /* round number to nearest 64 byte granularity */
 5444                 bytes = ice_round_to_num(bytes, 64);
 5445                 /* The value is in 64 byte chunks */
 5446                 burst_size_to_prog |= (u16)(bytes / 64);
 5447         } else {
 5448                 /* k bytes granularity case */
 5449                 /* Enable MSB granularity bit */
 5450                 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
 5451                 /* round number to nearest 1024 granularity */
 5452                 bytes = ice_round_to_num(bytes, 1024);
 5453                 /* check rounding doesn't go beyond allowed */
 5454                 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
 5455                         bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
 5456                 /* The value is in k bytes */
 5457                 burst_size_to_prog |= (u16)(bytes / 1024);
 5458         }
 5459         hw->max_burst_size = burst_size_to_prog;
 5460         return ICE_SUCCESS;
 5461 }
 5462 
 5463 /**
 5464  * ice_sched_replay_node_prio - re-configure node priority
 5465  * @hw: pointer to the HW struct
 5466  * @node: sched node to configure
 5467  * @priority: priority value
 5468  *
 5469  * This function configures node element's priority value. It
 5470  * needs to be called with scheduler lock held.
 5471  */
 5472 static enum ice_status
 5473 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
 5474                            u8 priority)
 5475 {
 5476         struct ice_aqc_txsched_elem_data buf;
 5477         struct ice_aqc_txsched_elem *data;
 5478         enum ice_status status;
 5479 
 5480         buf = node->info;
 5481         data = &buf.data;
 5482         data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
 5483         data->generic = priority;
 5484 
 5485         /* Configure element */
 5486         status = ice_sched_update_elem(hw, node, &buf);
 5487         return status;
 5488 }
 5489 
 5490 /**
 5491  * ice_sched_replay_node_bw - replay node(s) BW
 5492  * @hw: pointer to the HW struct
 5493  * @node: sched node to configure
 5494  * @bw_t_info: BW type information
 5495  *
 5496  * This function restores node's BW from bw_t_info. The caller needs
 5497  * to hold the scheduler lock.
 5498  */
 5499 static enum ice_status
 5500 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
 5501                          struct ice_bw_type_info *bw_t_info)
 5502 {
 5503         struct ice_port_info *pi = hw->port_info;
 5504         enum ice_status status = ICE_ERR_PARAM;
 5505         u16 bw_alloc;
 5506 
 5507         if (!node)
 5508                 return status;
 5509         if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
 5510                 return ICE_SUCCESS;
 5511         if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) {
 5512                 status = ice_sched_replay_node_prio(hw, node,
 5513                                                     bw_t_info->generic);
 5514                 if (status)
 5515                         return status;
 5516         }
 5517         if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) {
 5518                 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
 5519                                                    bw_t_info->cir_bw.bw);
 5520                 if (status)
 5521                         return status;
 5522         }
 5523         if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) {
 5524                 bw_alloc = bw_t_info->cir_bw.bw_alloc;
 5525                 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
 5526                                                      bw_alloc);
 5527                 if (status)
 5528                         return status;
 5529         }
 5530         if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) {
 5531                 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
 5532                                                    bw_t_info->eir_bw.bw);
 5533                 if (status)
 5534                         return status;
 5535         }
 5536         if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) {
 5537                 bw_alloc = bw_t_info->eir_bw.bw_alloc;
 5538                 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
 5539                                                      bw_alloc);
 5540                 if (status)
 5541                         return status;
 5542         }
 5543         if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED))
 5544                 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
 5545                                                    bw_t_info->shared_bw);
 5546         return status;
 5547 }
 5548 
 5549 /**
 5550  * ice_sched_replay_agg_bw - replay aggregator node(s) BW
 5551  * @hw: pointer to the HW struct
 5552  * @agg_info: aggregator data structure
 5553  *
 5554  * This function re-creates aggregator type nodes. The caller needs to hold
 5555  * the scheduler lock.
 5556  */
 5557 static enum ice_status
 5558 ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info)
 5559 {
 5560         struct ice_sched_node *tc_node, *agg_node;
 5561         enum ice_status status = ICE_SUCCESS;
 5562         u8 tc;
 5563 
 5564         if (!agg_info)
 5565                 return ICE_ERR_PARAM;
 5566         ice_for_each_traffic_class(tc) {
 5567                 if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap,
 5568                                         ICE_BW_TYPE_CNT))
 5569                         continue;
 5570                 tc_node = ice_sched_get_tc_node(hw->port_info, tc);
 5571                 if (!tc_node) {
 5572                         status = ICE_ERR_PARAM;
 5573                         break;
 5574                 }
 5575                 agg_node = ice_sched_get_agg_node(hw->port_info, tc_node,
 5576                                                   agg_info->agg_id);
 5577                 if (!agg_node) {
 5578                         status = ICE_ERR_PARAM;
 5579                         break;
 5580                 }
 5581                 status = ice_sched_replay_node_bw(hw, agg_node,
 5582                                                   &agg_info->bw_t_info[tc]);
 5583                 if (status)
 5584                         break;
 5585         }
 5586         return status;
 5587 }
 5588 
 5589 /**
 5590  * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
 5591  * @pi: port info struct
 5592  * @tc_bitmap: 8 bits TC bitmap to check
 5593  * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
 5594  *
 5595  * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
 5596  * may be missing, it returns enabled TCs. This function needs to be called with
 5597  * scheduler lock held.
 5598  */
 5599 static void
 5600 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap,
 5601                             ice_bitmap_t *ena_tc_bitmap)
 5602 {
 5603         u8 tc;
 5604 
 5605         /* Some TC(s) may be missing after reset, adjust for replay */
 5606         ice_for_each_traffic_class(tc)
 5607                 if (ice_is_tc_ena(*tc_bitmap, tc) &&
 5608                     (ice_sched_get_tc_node(pi, tc)))
 5609                         ice_set_bit(tc, ena_tc_bitmap);
 5610 }
 5611 
 5612 /**
 5613  * ice_sched_replay_agg - recreate aggregator node(s)
 5614  * @hw: pointer to the HW struct
 5615  *
 5616  * This function recreate aggregator type nodes which are not replayed earlier.
 5617  * It also replay aggregator BW information. These aggregator nodes are not
 5618  * associated with VSI type node yet.
 5619  */
 5620 void ice_sched_replay_agg(struct ice_hw *hw)
 5621 {
 5622         struct ice_port_info *pi = hw->port_info;
 5623         struct ice_sched_agg_info *agg_info;
 5624 
 5625         ice_acquire_lock(&pi->sched_lock);
 5626         LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
 5627                             list_entry)
 5628                 /* replay aggregator (re-create aggregator node) */
 5629                 if (!ice_cmp_bitmap(agg_info->tc_bitmap,
 5630                                     agg_info->replay_tc_bitmap,
 5631                                     ICE_MAX_TRAFFIC_CLASS)) {
 5632                         ice_declare_bitmap(replay_bitmap,
 5633                                            ICE_MAX_TRAFFIC_CLASS);
 5634                         enum ice_status status;
 5635 
 5636                         ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
 5637                         ice_sched_get_ena_tc_bitmap(pi,
 5638                                                     agg_info->replay_tc_bitmap,
 5639                                                     replay_bitmap);
 5640                         status = ice_sched_cfg_agg(hw->port_info,
 5641                                                    agg_info->agg_id,
 5642                                                    ICE_AGG_TYPE_AGG,
 5643                                                    replay_bitmap);
 5644                         if (status) {
 5645                                 ice_info(hw, "Replay agg id[%d] failed\n",
 5646                                          agg_info->agg_id);
 5647                                 /* Move on to next one */
 5648                                 continue;
 5649                         }
 5650                         /* Replay aggregator node BW (restore aggregator BW) */
 5651                         status = ice_sched_replay_agg_bw(hw, agg_info);
 5652                         if (status)
 5653                                 ice_info(hw, "Replay agg bw [id=%d] failed\n",
 5654                                          agg_info->agg_id);
 5655                 }
 5656         ice_release_lock(&pi->sched_lock);
 5657 }
 5658 
 5659 /**
 5660  * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
 5661  * @hw: pointer to the HW struct
 5662  *
 5663  * This function initialize aggregator(s) TC bitmap to zero. A required
 5664  * preinit step for replaying aggregators.
 5665  */
 5666 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
 5667 {
 5668         struct ice_port_info *pi = hw->port_info;
 5669         struct ice_sched_agg_info *agg_info;
 5670 
 5671         ice_acquire_lock(&pi->sched_lock);
 5672         LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
 5673                             list_entry) {
 5674                 struct ice_sched_agg_vsi_info *agg_vsi_info;
 5675 
 5676                 agg_info->tc_bitmap[0] = 0;
 5677                 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
 5678                                     ice_sched_agg_vsi_info, list_entry)
 5679                         agg_vsi_info->tc_bitmap[0] = 0;
 5680         }
 5681         ice_release_lock(&pi->sched_lock);
 5682 }
 5683 
 5684 /**
 5685  * ice_sched_replay_root_node_bw - replay root node BW
 5686  * @pi: port information structure
 5687  *
 5688  * Replay root node BW settings.
 5689  */
 5690 enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi)
 5691 {
 5692         enum ice_status status = ICE_SUCCESS;
 5693 
 5694         if (!pi->hw)
 5695                 return ICE_ERR_PARAM;
 5696         ice_acquire_lock(&pi->sched_lock);
 5697 
 5698         status = ice_sched_replay_node_bw(pi->hw, pi->root,
 5699                                           &pi->root_node_bw_t_info);
 5700         ice_release_lock(&pi->sched_lock);
 5701         return status;
 5702 }
 5703 
 5704 /**
 5705  * ice_sched_replay_tc_node_bw - replay TC node(s) BW
 5706  * @pi: port information structure
 5707  *
 5708  * This function replay TC nodes.
 5709  */
 5710 enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
 5711 {
 5712         enum ice_status status = ICE_SUCCESS;
 5713         u8 tc;
 5714 
 5715         if (!pi->hw)
 5716                 return ICE_ERR_PARAM;
 5717         ice_acquire_lock(&pi->sched_lock);
 5718         ice_for_each_traffic_class(tc) {
 5719                 struct ice_sched_node *tc_node;
 5720 
 5721                 tc_node = ice_sched_get_tc_node(pi, tc);
 5722                 if (!tc_node)
 5723                         continue; /* TC not present */
 5724                 status = ice_sched_replay_node_bw(pi->hw, tc_node,
 5725                                                   &pi->tc_node_bw_t_info[tc]);
 5726                 if (status)
 5727                         break;
 5728         }
 5729         ice_release_lock(&pi->sched_lock);
 5730         return status;
 5731 }
 5732 
 5733 /**
 5734  * ice_sched_replay_vsi_bw - replay VSI type node(s) BW
 5735  * @hw: pointer to the HW struct
 5736  * @vsi_handle: software VSI handle
 5737  * @tc_bitmap: 8 bits TC bitmap
 5738  *
 5739  * This function replays VSI type nodes bandwidth. This function needs to be
 5740  * called with scheduler lock held.
 5741  */
 5742 static enum ice_status
 5743 ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
 5744                         ice_bitmap_t *tc_bitmap)
 5745 {
 5746         struct ice_sched_node *vsi_node, *tc_node;
 5747         struct ice_port_info *pi = hw->port_info;
 5748         struct ice_bw_type_info *bw_t_info;
 5749         struct ice_vsi_ctx *vsi_ctx;
 5750         enum ice_status status = ICE_SUCCESS;
 5751         u8 tc;
 5752 
 5753         vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
 5754         if (!vsi_ctx)
 5755                 return ICE_ERR_PARAM;
 5756         ice_for_each_traffic_class(tc) {
 5757                 if (!ice_is_tc_ena(*tc_bitmap, tc))
 5758                         continue;
 5759                 tc_node = ice_sched_get_tc_node(pi, tc);
 5760                 if (!tc_node)
 5761                         continue;
 5762                 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
 5763                 if (!vsi_node)
 5764                         continue;
 5765                 bw_t_info = &vsi_ctx->sched.bw_t_info[tc];
 5766                 status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info);
 5767                 if (status)
 5768                         break;
 5769         }
 5770         return status;
 5771 }
 5772 
 5773 /**
 5774  * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
 5775  * @hw: pointer to the HW struct
 5776  * @vsi_handle: software VSI handle
 5777  *
 5778  * This function replays aggregator node, VSI to aggregator type nodes, and
 5779  * their node bandwidth information. This function needs to be called with
 5780  * scheduler lock held.
 5781  */
 5782 static enum ice_status
 5783 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
 5784 {
 5785         ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
 5786         struct ice_sched_agg_vsi_info *agg_vsi_info;
 5787         struct ice_port_info *pi = hw->port_info;
 5788         struct ice_sched_agg_info *agg_info;
 5789         enum ice_status status;
 5790 
 5791         ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
 5792         if (!ice_is_vsi_valid(hw, vsi_handle))
 5793                 return ICE_ERR_PARAM;
 5794         agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
 5795         if (!agg_info)
 5796                 return ICE_SUCCESS; /* Not present in list - default Agg case */
 5797         agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
 5798         if (!agg_vsi_info)
 5799                 return ICE_SUCCESS; /* Not present in list - default Agg case */
 5800         ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
 5801                                     replay_bitmap);
 5802         /* Replay aggregator node associated to vsi_handle */
 5803         status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
 5804                                    ICE_AGG_TYPE_AGG, replay_bitmap);
 5805         if (status)
 5806                 return status;
 5807         /* Replay aggregator node BW (restore aggregator BW) */
 5808         status = ice_sched_replay_agg_bw(hw, agg_info);
 5809         if (status)
 5810                 return status;
 5811 
 5812         ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
 5813         ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
 5814                                     replay_bitmap);
 5815         /* Move this VSI (vsi_handle) to above aggregator */
 5816         status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
 5817                                             replay_bitmap);
 5818         if (status)
 5819                 return status;
 5820         /* Replay VSI BW (restore VSI BW) */
 5821         return ice_sched_replay_vsi_bw(hw, vsi_handle,
 5822                                        agg_vsi_info->tc_bitmap);
 5823 }
 5824 
 5825 /**
 5826  * ice_replay_vsi_agg - replay VSI to aggregator node
 5827  * @hw: pointer to the HW struct
 5828  * @vsi_handle: software VSI handle
 5829  *
 5830  * This function replays association of VSI to aggregator type nodes, and
 5831  * node bandwidth information.
 5832  */
 5833 enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
 5834 {
 5835         struct ice_port_info *pi = hw->port_info;
 5836         enum ice_status status;
 5837 
 5838         ice_acquire_lock(&pi->sched_lock);
 5839         status = ice_sched_replay_vsi_agg(hw, vsi_handle);
 5840         ice_release_lock(&pi->sched_lock);
 5841         return status;
 5842 }
 5843 
 5844 /**
 5845  * ice_sched_replay_q_bw - replay queue type node BW
 5846  * @pi: port information structure
 5847  * @q_ctx: queue context structure
 5848  *
 5849  * This function replays queue type node bandwidth. This function needs to be
 5850  * called with scheduler lock held.
 5851  */
 5852 enum ice_status
 5853 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
 5854 {
 5855         struct ice_sched_node *q_node;
 5856 
 5857         /* Following also checks the presence of node in tree */
 5858         q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
 5859         if (!q_node)
 5860                 return ICE_ERR_PARAM;
 5861         return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
 5862 }

Cache object: ae7fc1c8acdd36ae332a365a93413472


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.