The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/dev/rtw89/rtw8852c_rfk.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
    2 /* Copyright(c) 2019-2022  Realtek Corporation
    3  */
    4 
    5 #include "coex.h"
    6 #include "debug.h"
    7 #include "phy.h"
    8 #include "reg.h"
    9 #include "rtw8852c.h"
   10 #include "rtw8852c_rfk.h"
   11 #include "rtw8852c_rfk_table.h"
   12 #include "rtw8852c_table.h"
   13 
   14 #define _TSSI_DE_MASK GENMASK(21, 12)
   15 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858};
   16 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860};
   17 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838};
   18 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840};
   19 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848};
   20 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850};
   21 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828};
   22 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830};
   23 
   24 static const u32 rtw8852c_backup_bb_regs[] = {
   25         0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x823c, 0x8224, 0x8220,
   26         0xc1d4, 0xc1d8, 0xc1e8
   27 };
   28 
   29 static const u32 rtw8852c_backup_rf_regs[] = {
   30         0xdf, 0x8f, 0x97, 0xa3, 0x5, 0x10005
   31 };
   32 
   33 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs)
   34 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs)
   35 
   36 #define RXK_GROUP_NR 4
   37 static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316};
   38 static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00};
   39 static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318};
   40 static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00};
   41 static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360};
   42 static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3};
   43 
   44 #define TXK_GROUP_NR 3
   45 static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
   46 static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7};
   47 static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
   48 static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
   49 static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
   50 static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7};
   51 static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
   52 static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
   53 static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
   54 static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6};
   55 static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e};
   56 static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12};
   57 
   58 static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = {
   59         {0x8190, 0x8194, 0x8198, 0x81a4},
   60         {0x81a8, 0x81c4, 0x81c8, 0x81e8},
   61 };
   62 
   63 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
   64 {
   65         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,  PHY%d\n",
   66                     rtwdev->dbcc_en, phy_idx);
   67 
   68         if (!rtwdev->dbcc_en)
   69                 return RF_AB;
   70 
   71         if (phy_idx == RTW89_PHY_0)
   72                 return RF_A;
   73         else
   74                 return RF_B;
   75 }
   76 
   77 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
   78 {
   79         u32 i;
   80 
   81         for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
   82                 backup_bb_reg_val[i] =
   83                         rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
   84                                               MASKDWORD);
   85                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
   86                             "[IQK]backup bb reg : %x, value =%x\n",
   87                             rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
   88         }
   89 }
   90 
   91 static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
   92                                u8 rf_path)
   93 {
   94         u32 i;
   95 
   96         for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
   97                 backup_rf_reg_val[i] =
   98                         rtw89_read_rf(rtwdev, rf_path,
   99                                       rtw8852c_backup_rf_regs[i], RFREG_MASK);
  100                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
  101                             "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
  102                             rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
  103         }
  104 }
  105 
  106 static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
  107 {
  108         u32 i;
  109 
  110         for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
  111                 rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
  112                                        MASKDWORD, backup_bb_reg_val[i]);
  113                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
  114                             "[IQK]restore bb reg : %x, value =%x\n",
  115                             rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
  116         }
  117 }
  118 
  119 static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
  120                                 u8 rf_path)
  121 {
  122         u32 i;
  123 
  124         for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
  125                 rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i],
  126                                RFREG_MASK, backup_rf_reg_val[i]);
  127 
  128                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
  129                             "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
  130                             rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
  131         }
  132 }
  133 
  134 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
  135 {
  136         u8 path;
  137         u32 rf_mode;
  138         int ret;
  139 
  140         for (path = 0; path < RF_PATH_MAX; path++) {
  141                 if (!(kpath & BIT(path)))
  142                         continue;
  143 
  144                 ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
  145                                                2, 5000, false, rtwdev, path, 0x00,
  146                                                RR_MOD_MASK);
  147                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
  148                             "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
  149                             path, ret);
  150         }
  151 }
  152 
  153 static void _dack_dump(struct rtw89_dev *rtwdev)
  154 {
  155         struct rtw89_dack_info *dack = &rtwdev->dack;
  156         u8 i;
  157         u8 t;
  158 
  159         rtw89_debug(rtwdev, RTW89_DBG_RFK,
  160                     "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
  161                     dack->addck_d[0][0], dack->addck_d[0][1]);
  162         rtw89_debug(rtwdev, RTW89_DBG_RFK,
  163                     "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
  164                     dack->addck_d[1][0], dack->addck_d[1][1]);
  165         rtw89_debug(rtwdev, RTW89_DBG_RFK,
  166                     "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
  167                     dack->dadck_d[0][0], dack->dadck_d[0][1]);
  168         rtw89_debug(rtwdev, RTW89_DBG_RFK,
  169                     "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
  170                     dack->dadck_d[1][0], dack->dadck_d[1][1]);
  171 
  172         rtw89_debug(rtwdev, RTW89_DBG_RFK,
  173                     "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
  174                     dack->biask_d[0][0], dack->biask_d[0][1]);
  175         rtw89_debug(rtwdev, RTW89_DBG_RFK,
  176                     "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
  177                     dack->biask_d[1][0], dack->biask_d[1][1]);
  178 
  179         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
  180         for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  181                 t = dack->msbk_d[0][0][i];
  182                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  183         }
  184         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
  185         for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  186                 t = dack->msbk_d[0][1][i];
  187                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  188         }
  189         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
  190         for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  191                 t = dack->msbk_d[1][0][i];
  192                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  193         }
  194         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
  195         for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  196                 t = dack->msbk_d[1][1][i];
  197                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
  198         }
  199 }
  200 
  201 static void _addck_backup(struct rtw89_dev *rtwdev)
  202 {
  203         struct rtw89_dack_info *dack = &rtwdev->dack;
  204 
  205         rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
  206         dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
  207                                                     B_ADDCKR0_A0);
  208         dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
  209                                                     B_ADDCKR0_A1);
  210 
  211         rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
  212         dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
  213                                                     B_ADDCKR1_A0);
  214         dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
  215                                                     B_ADDCKR1_A1);
  216 }
  217 
  218 static void _addck_reload(struct rtw89_dev *rtwdev)
  219 {
  220         struct rtw89_dack_info *dack = &rtwdev->dack;
  221 
  222         rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1,
  223                                dack->addck_d[0][0]);
  224         rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0,
  225                                dack->addck_d[0][1]);
  226         rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
  227         rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1,
  228                                dack->addck_d[1][0]);
  229         rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0,
  230                                dack->addck_d[1][1]);
  231         rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
  232 }
  233 
  234 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
  235 {
  236         struct rtw89_dack_info *dack = &rtwdev->dack;
  237         u8 i;
  238 
  239         rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
  240         for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  241                 rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
  242                 dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev,
  243                                                               R_DACK_S0P2,
  244                                                               B_DACK_S0M0);
  245                 rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
  246                 dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev,
  247                                                               R_DACK_S0P3,
  248                                                               B_DACK_S0M1);
  249         }
  250         dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00,
  251                                                     B_DACK_BIAS00);
  252         dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01,
  253                                                     B_DACK_BIAS01);
  254         dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00,
  255                                                     B_DACK_DADCK00);
  256         dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01,
  257                                                     B_DACK_DADCK01);
  258 }
  259 
  260 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
  261 {
  262         struct rtw89_dack_info *dack = &rtwdev->dack;
  263         u8 i;
  264 
  265         rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
  266         for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
  267                 rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
  268                 dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev,
  269                                                               R_DACK10S,
  270                                                               B_DACK10S);
  271                 rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
  272                 dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev,
  273                                                               R_DACK11S,
  274                                                               B_DACK11S);
  275         }
  276         dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10,
  277                                                     B_DACK_BIAS10);
  278         dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11,
  279                                                     B_DACK_BIAS11);
  280         dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10,
  281                                                     B_DACK_DADCK10);
  282         dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11,
  283                                                     B_DACK_DADCK11);
  284 }
  285 
  286 static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
  287                                  enum rtw89_rf_path path, u8 index)
  288 {
  289         struct rtw89_dack_info *dack = &rtwdev->dack;
  290         u32 idx_offset, path_offset;
  291         u32 val32, offset, addr;
  292         u8 i;
  293 
  294         idx_offset = (index == 0 ? 0 : 0x14);
  295         path_offset = (path == RF_PATH_A ? 0 : 0x28);
  296         offset = idx_offset + path_offset;
  297 
  298         rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl);
  299 
  300         /* msbk_d: 15/14/13/12 */
  301         val32 = 0x0;
  302         for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  303                 val32 |= dack->msbk_d[path][index][i + 12] << (i * 8);
  304         addr = 0xc200 + offset;
  305         rtw89_phy_write32(rtwdev, addr, val32);
  306         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
  307                     rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
  308 
  309         /* msbk_d: 11/10/9/8 */
  310         val32 = 0x0;
  311         for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  312                 val32 |= dack->msbk_d[path][index][i + 8] << (i * 8);
  313         addr = 0xc204 + offset;
  314         rtw89_phy_write32(rtwdev, addr, val32);
  315         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
  316                     rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
  317 
  318         /* msbk_d: 7/6/5/4 */
  319         val32 = 0x0;
  320         for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  321                 val32 |= dack->msbk_d[path][index][i + 4] << (i * 8);
  322         addr = 0xc208 + offset;
  323         rtw89_phy_write32(rtwdev, addr, val32);
  324         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
  325                     rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
  326 
  327         /* msbk_d: 3/2/1/0 */
  328         val32 = 0x0;
  329         for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
  330                 val32 |= dack->msbk_d[path][index][i] << (i * 8);
  331         addr = 0xc20c + offset;
  332         rtw89_phy_write32(rtwdev, addr, val32);
  333         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
  334                     rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
  335 
  336         /* dadak_d/biask_d */
  337         val32 = (dack->biask_d[path][index] << 22) |
  338                 (dack->dadck_d[path][index] << 14);
  339         addr = 0xc210 + offset;
  340         rtw89_phy_write32(rtwdev, addr, val32);
  341         rtw89_phy_write32_set(rtwdev, addr, BIT(1));
  342 }
  343 
  344 static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
  345 {
  346         u8 i;
  347 
  348         for (i = 0; i < 2; i++)
  349                 _dack_reload_by_path(rtwdev, path, i);
  350 }
  351 
  352 static void _addck(struct rtw89_dev *rtwdev)
  353 {
  354         struct rtw89_dack_info *dack = &rtwdev->dack;
  355         u32 val;
  356         int ret;
  357 
  358         /* S0 */
  359         rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1);
  360         rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1);
  361         rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0);
  362         fsleep(1);
  363         rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
  364 
  365         ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
  366                                        1, 10000, false, rtwdev, 0xc0fc, BIT(0));
  367         if (ret) {
  368                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
  369                 dack->addck_timeout[0] = true;
  370         }
  371 
  372         rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0);
  373 
  374         /* S1 */
  375         rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1);
  376         rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1);
  377         rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0);
  378         udelay(1);
  379         rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
  380 
  381         ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
  382                                        1, 10000, false, rtwdev, 0xc1fc, BIT(0));
  383         if (ret) {
  384                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
  385                 dack->addck_timeout[0] = true;
  386         }
  387         rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0);
  388 }
  389 
  390 static void _dack_reset(struct rtw89_dev *rtwdev, u8 path)
  391 {
  392         rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
  393                                  &rtw8852c_dack_reset_defs_a_tbl,
  394                                  &rtw8852c_dack_reset_defs_b_tbl);
  395 }
  396 
  397 enum adc_ck {
  398         ADC_NA = 0,
  399         ADC_480M = 1,
  400         ADC_960M = 2,
  401         ADC_1920M = 3,
  402 };
  403 
  404 enum dac_ck {
  405         DAC_40M = 0,
  406         DAC_80M = 1,
  407         DAC_120M = 2,
  408         DAC_160M = 3,
  409         DAC_240M = 4,
  410         DAC_320M = 5,
  411         DAC_480M = 6,
  412         DAC_960M = 7,
  413 };
  414 
  415 enum rf_mode {
  416         RF_SHUT_DOWN = 0x0,
  417         RF_STANDBY = 0x1,
  418         RF_TX = 0x2,
  419         RF_RX = 0x3,
  420         RF_TXIQK = 0x4,
  421         RF_DPK = 0x5,
  422         RF_RXK1 = 0x6,
  423         RF_RXK2 = 0x7,
  424 };
  425 
  426 static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
  427                                 enum dac_ck ck)
  428 {
  429         rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
  430 
  431         if (!force)
  432                 return;
  433 
  434         rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
  435         rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
  436 }
  437 
  438 static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
  439                                 enum adc_ck ck)
  440 {
  441         rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
  442 
  443         if (!force)
  444                 return;
  445 
  446         rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
  447         rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
  448 }
  449 
  450 static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0)
  451 {
  452         if (s0) {
  453                 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
  454                     rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
  455                     rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
  456                     rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
  457                         return false;
  458         } else {
  459                 if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
  460                     rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
  461                     rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
  462                     rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
  463                         return false;
  464         }
  465 
  466         return true;
  467 }
  468 
  469 static void _dack_s0(struct rtw89_dev *rtwdev)
  470 {
  471         struct rtw89_dack_info *dack = &rtwdev->dack;
  472         bool done;
  473         int ret;
  474 
  475         rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
  476         rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl);
  477 
  478         _dack_reset(rtwdev, RF_PATH_A);
  479 
  480         rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
  481         ret = read_poll_timeout_atomic(_check_dack_done, done, done,
  482                                        1, 10000, false, rtwdev, true);
  483         if (ret) {
  484                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
  485                 dack->msbk_timeout[0] = true;
  486         }
  487         rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
  488         rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
  489         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
  490 
  491         _dack_backup_s0(rtwdev);
  492         _dack_reload(rtwdev, RF_PATH_A);
  493         rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
  494 }
  495 
  496 static void _dack_s1(struct rtw89_dev *rtwdev)
  497 {
  498         struct rtw89_dack_info *dack = &rtwdev->dack;
  499         bool done;
  500         int ret;
  501 
  502         rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
  503         rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl);
  504 
  505         _dack_reset(rtwdev, RF_PATH_B);
  506 
  507         rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
  508         ret = read_poll_timeout_atomic(_check_dack_done, done, done,
  509                                        1, 10000, false, rtwdev, false);
  510         if (ret) {
  511                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
  512                 dack->msbk_timeout[0] = true;
  513         }
  514         rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
  515         rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
  516         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
  517 
  518         _dack_backup_s1(rtwdev);
  519         _dack_reload(rtwdev, RF_PATH_B);
  520         rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
  521 }
  522 
  523 static void _dack(struct rtw89_dev *rtwdev)
  524 {
  525         _dack_s0(rtwdev);
  526         _dack_s1(rtwdev);
  527 }
  528 
  529 static void _drck(struct rtw89_dev *rtwdev)
  530 {
  531         u32 val;
  532         int ret;
  533 
  534         rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
  535         ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
  536                                        1, 10000, false, rtwdev, 0xc0c8, BIT(3));
  537         if (ret)
  538                 rtw89_debug(rtwdev, RTW89_DBG_RFK,  "[DACK]DRCK timeout\n");
  539 
  540         rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl);
  541 
  542         val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES);
  543         rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
  544         rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val);
  545         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
  546                     rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
  547 }
  548 
  549 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
  550 {
  551         struct rtw89_dack_info *dack = &rtwdev->dack;
  552         u32 rf0_0, rf1_0;
  553         u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
  554 
  555         dack->dack_done = false;
  556         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
  557         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
  558         rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
  559         rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
  560         _drck(rtwdev);
  561 
  562         rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
  563         rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
  564         rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
  565         rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
  566         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
  567         _addck(rtwdev);
  568         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
  569 
  570         _addck_backup(rtwdev);
  571         _addck_reload(rtwdev);
  572         rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
  573         rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
  574         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
  575         _dack(rtwdev);
  576         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
  577 
  578         _dack_dump(rtwdev);
  579         dack->dack_done = true;
  580         rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
  581         rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
  582         rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
  583         rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
  584         dack->dack_cnt++;
  585         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
  586 }
  587 
  588 #define RTW8852C_NCTL_VER 0xd
  589 #define RTW8852C_IQK_VER 0x2a
  590 #define RTW8852C_IQK_SS 2
  591 #define RTW8852C_IQK_THR_REK 8
  592 #define RTW8852C_IQK_CFIR_GROUP_NR 4
  593 
  594 enum rtw8852c_iqk_type {
  595         ID_TXAGC,
  596         ID_G_FLOK_COARSE,
  597         ID_A_FLOK_COARSE,
  598         ID_G_FLOK_FINE,
  599         ID_A_FLOK_FINE,
  600         ID_FLOK_VBUFFER,
  601         ID_TXK,
  602         ID_RXAGC,
  603         ID_RXK,
  604         ID_NBTXK,
  605         ID_NBRXK,
  606 };
  607 
  608 static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac)
  609 {
  610         if (path == RF_PATH_A)
  611                 rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac);
  612         else
  613                 rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac);
  614 }
  615 
  616 static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
  617 {
  618         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  619 
  620         if (path == RF_PATH_A)
  621                 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
  622         else
  623                 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202);
  624 
  625         switch (iqk_info->iqk_bw[path]) {
  626         case RTW89_CHANNEL_WIDTH_20:
  627         case RTW89_CHANNEL_WIDTH_40:
  628                 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
  629                 rtw8852c_rxck_force(rtwdev, path, true, ADC_480M);
  630                 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0);
  631                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x3);
  632                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xf);
  633                 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
  634                 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
  635                 break;
  636         case RTW89_CHANNEL_WIDTH_80:
  637                 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
  638                 rtw8852c_rxck_force(rtwdev, path, true, ADC_960M);
  639                 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1);
  640                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x2);
  641                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xd);
  642                 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
  643                 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
  644         break;
  645         case RTW89_CHANNEL_WIDTH_160:
  646                 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
  647                 rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
  648                 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2);
  649                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
  650                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
  651                 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
  652                 rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
  653                 break;
  654         default:
  655                 break;
  656         }
  657 
  658         rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl);
  659 
  660         if (path == RF_PATH_A)
  661                 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101);
  662         else
  663                 rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202);
  664 }
  665 
  666 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
  667 {
  668         u32 tmp;
  669         u32 val;
  670         int ret;
  671 
  672         ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
  673                                        1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
  674         if (ret)
  675                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
  676 
  677         rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
  678         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
  679         tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
  680         rtw89_debug(rtwdev, RTW89_DBG_RFK,
  681                     "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
  682 
  683         return false;
  684 }
  685 
  686 static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
  687                           enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
  688 {
  689         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  690         u32 addr_rfc_ctl = R_UPD_CLK + (path << 13);
  691         u32 iqk_cmd;
  692         bool fail;
  693 
  694         switch (ktype) {
  695         case ID_TXAGC:
  696                 iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
  697                 break;
  698         case ID_A_FLOK_COARSE:
  699                 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  700                 iqk_cmd = 0x008 | (1 << (4 + path));
  701                 break;
  702         case ID_G_FLOK_COARSE:
  703                 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  704                 iqk_cmd = 0x108 | (1 << (4 + path));
  705                 break;
  706         case ID_A_FLOK_FINE:
  707                 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  708                 iqk_cmd = 0x508 | (1 << (4 + path));
  709                 break;
  710         case ID_G_FLOK_FINE:
  711                 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  712                 iqk_cmd = 0x208 | (1 << (4 + path));
  713                 break;
  714         case ID_FLOK_VBUFFER:
  715                 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  716                 iqk_cmd = 0x308 | (1 << (4 + path));
  717                 break;
  718         case ID_TXK:
  719                 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
  720                 iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8);
  721                 break;
  722         case ID_RXAGC:
  723                 iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
  724                 break;
  725         case ID_RXK:
  726                 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  727                 iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8);
  728                 break;
  729         case ID_NBTXK:
  730                 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
  731                 iqk_cmd = 0x408 | (1 << (4 + path));
  732                 break;
  733         case ID_NBRXK:
  734                 rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
  735                 iqk_cmd = 0x608 | (1 << (4 + path));
  736                 break;
  737         default:
  738                 return false;
  739         }
  740 
  741         rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
  742         fsleep(15);
  743         fail = _iqk_check_cal(rtwdev, path, ktype);
  744         rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
  745 
  746         return fail;
  747 }
  748 
  749 static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
  750                            enum rtw89_phy_idx phy_idx, u8 path)
  751 {
  752         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  753         bool fail;
  754         u32 tmp;
  755         u32 bkrf0;
  756         u8 gp;
  757 
  758         bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
  759         if (path == RF_PATH_B) {
  760                 rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
  761                 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
  762                 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
  763                 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
  764                 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
  765         }
  766 
  767         switch (iqk_info->iqk_band[path]) {
  768         case RTW89_BAND_2G:
  769         default:
  770                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  771                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  772                 rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
  773                 break;
  774         case RTW89_BAND_5G:
  775                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  776                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  777                 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
  778                 break;
  779         case RTW89_BAND_6G:
  780                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  781                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  782                 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
  783                 break;
  784         }
  785 
  786         fsleep(10);
  787 
  788         for (gp = 0; gp < RXK_GROUP_NR; gp++) {
  789                 switch (iqk_info->iqk_band[path]) {
  790                 case RTW89_BAND_2G:
  791                 default:
  792                         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
  793                                        _rxk_g_idxrxgain[gp]);
  794                         rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF,
  795                                        _rxk_g_idxattc2[gp]);
  796                         break;
  797                 case RTW89_BAND_5G:
  798                         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
  799                                        _rxk_a_idxrxgain[gp]);
  800                         rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
  801                                        _rxk_a_idxattc2[gp]);
  802                         break;
  803                 case RTW89_BAND_6G:
  804                         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
  805                                        _rxk_a6_idxrxgain[gp]);
  806                         rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
  807                                        _rxk_a6_idxattc2[gp]);
  808                         break;
  809                 }
  810                 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  811                                        B_CFIR_LUT_SEL, 0x1);
  812                 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  813                                        B_CFIR_LUT_SET, 0x0);
  814                 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  815                                        B_CFIR_LUT_GP_V1, gp);
  816                 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
  817         }
  818 
  819         if (path == RF_PATH_B)
  820                 rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
  821         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
  822 
  823         if (fail) {
  824                 iqk_info->nb_rxcfir[path] = 0x40000002;
  825                 iqk_info->is_wb_rxiqk[path] = false;
  826         } else {
  827                 iqk_info->nb_rxcfir[path] = 0x40000000;
  828                 iqk_info->is_wb_rxiqk[path] = true;
  829         }
  830 
  831         return false;
  832 }
  833 
  834 static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
  835                        enum rtw89_phy_idx phy_idx, u8 path)
  836 {
  837         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  838         bool fail;
  839         u32 tmp;
  840         u32 bkrf0;
  841         u8 gp = 0x2;
  842 
  843         bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
  844         if (path == RF_PATH_B) {
  845                 rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
  846                 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
  847                 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
  848                 tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
  849                 rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
  850         }
  851 
  852         switch (iqk_info->iqk_band[path]) {
  853         case RTW89_BAND_2G:
  854         default:
  855                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  856                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  857                 rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
  858                 break;
  859         case RTW89_BAND_5G:
  860                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  861                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  862                 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
  863                 break;
  864         case RTW89_BAND_6G:
  865                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
  866                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
  867                 rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
  868                 break;
  869         }
  870 
  871         fsleep(10);
  872 
  873         switch (iqk_info->iqk_band[path]) {
  874         case RTW89_BAND_2G:
  875         default:
  876                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]);
  877                 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]);
  878                 break;
  879         case RTW89_BAND_5G:
  880                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]);
  881                 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]);
  882                 break;
  883         case RTW89_BAND_6G:
  884                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]);
  885                 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]);
  886                 break;
  887         }
  888 
  889         rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
  890         rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
  891         rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
  892         fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
  893 
  894         if (path == RF_PATH_B)
  895                 rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
  896 
  897         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
  898 
  899         if (fail)
  900                 iqk_info->nb_rxcfir[path] =
  901                         rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
  902                                               MASKDWORD) | 0x2;
  903         else
  904                 iqk_info->nb_rxcfir[path] = 0x40000002;
  905 
  906         iqk_info->is_wb_rxiqk[path] = false;
  907         return fail;
  908 }
  909 
  910 static bool _txk_group_sel(struct rtw89_dev *rtwdev,
  911                            enum rtw89_phy_idx phy_idx, u8 path)
  912 {
  913         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  914         bool fail;
  915         u8 gp;
  916 
  917         for (gp = 0; gp < TXK_GROUP_NR; gp++) {
  918                 switch (iqk_info->iqk_band[path]) {
  919                 case RTW89_BAND_2G:
  920                         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  921                                        _txk_g_power_range[gp]);
  922                         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  923                                        _txk_g_track_range[gp]);
  924                         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  925                                        _txk_g_gain_bb[gp]);
  926                         rtw89_phy_write32_mask(rtwdev,
  927                                                R_KIP_IQP + (path << 8),
  928                                                MASKDWORD, _txk_g_itqt[gp]);
  929                         break;
  930                 case RTW89_BAND_5G:
  931                         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  932                                        _txk_a_power_range[gp]);
  933                         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  934                                        _txk_a_track_range[gp]);
  935                         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  936                                        _txk_a_gain_bb[gp]);
  937                         rtw89_phy_write32_mask(rtwdev,
  938                                                R_KIP_IQP + (path << 8),
  939                                                MASKDWORD, _txk_a_itqt[gp]);
  940                         break;
  941                 case RTW89_BAND_6G:
  942                         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
  943                                        _txk_a6_power_range[gp]);
  944                         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
  945                                        _txk_a6_track_range[gp]);
  946                         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
  947                                        _txk_a6_gain_bb[gp]);
  948                         rtw89_phy_write32_mask(rtwdev,
  949                                                R_KIP_IQP + (path << 8),
  950                                                MASKDWORD, _txk_a6_itqt[gp]);
  951                         break;
  952                 default:
  953                         break;
  954                 }
  955                 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  956                                        B_CFIR_LUT_SEL, 0x1);
  957                 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  958                                        B_CFIR_LUT_SET, 0x1);
  959                 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  960                                        B_CFIR_LUT_G2, 0x0);
  961                 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
  962                                        B_CFIR_LUT_GP, gp + 1);
  963                 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
  964                 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
  965                 fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
  966         }
  967 
  968         if (fail) {
  969                 iqk_info->nb_txcfir[path] = 0x40000002;
  970                 iqk_info->is_wb_txiqk[path] = false;
  971         } else {
  972                 iqk_info->nb_txcfir[path] = 0x40000000;
  973                 iqk_info->is_wb_txiqk[path] = true;
  974         }
  975 
  976         return fail;
  977 }
  978 
  979 static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
  980                        enum rtw89_phy_idx phy_idx, u8 path)
  981 {
  982         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
  983         bool fail;
  984         u8 gp = 0x2;
  985 
  986         switch (iqk_info->iqk_band[path]) {
  987         case RTW89_BAND_2G:
  988                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]);
  989                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]);
  990                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]);
  991                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  992                                        MASKDWORD, _txk_g_itqt[gp]);
  993                 break;
  994         case RTW89_BAND_5G:
  995                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]);
  996                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]);
  997                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]);
  998                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
  999                                        MASKDWORD, _txk_a_itqt[gp]);
 1000                 break;
 1001         case RTW89_BAND_6G:
 1002                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]);
 1003                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]);
 1004                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]);
 1005                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1006                                        MASKDWORD, _txk_a6_itqt[gp]);
 1007         break;
 1008         default:
 1009                 break;
 1010         }
 1011 
 1012         rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
 1013         rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
 1014         rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
 1015         rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1);
 1016         rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
 1017         rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
 1018         fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
 1019 
 1020         if (!fail)
 1021                 iqk_info->nb_txcfir[path] =
 1022                         rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
 1023                                               MASKDWORD) | 0x2;
 1024         else
 1025                 iqk_info->nb_txcfir[path] = 0x40000002;
 1026 
 1027         iqk_info->is_wb_txiqk[path] = false;
 1028 
 1029         return fail;
 1030 }
 1031 
 1032 static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
 1033 {
 1034         struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
 1035         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
 1036         u8 idx = mcc_info->table_idx;
 1037         bool is_fail1,  is_fail2;
 1038         u32 val;
 1039         u32 core_i;
 1040         u32 core_q;
 1041         u32 vbuff_i;
 1042         u32 vbuff_q;
 1043 
 1044         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
 1045         val = rtw89_read_rf(rtwdev,  path, RR_TXMO, RFREG_MASK);
 1046         core_i = FIELD_GET(RR_TXMO_COI, val);
 1047         core_q = FIELD_GET(RR_TXMO_COQ, val);
 1048 
 1049         if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
 1050                 is_fail1 = true;
 1051         else
 1052                 is_fail1 = false;
 1053 
 1054         iqk_info->lok_idac[idx][path] = val;
 1055 
 1056         val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
 1057         vbuff_i = FIELD_GET(RR_LOKVB_COI, val);
 1058         vbuff_q = FIELD_GET(RR_LOKVB_COQ, val);
 1059 
 1060         if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
 1061                 is_fail2 = true;
 1062         else
 1063                 is_fail2 = false;
 1064 
 1065         iqk_info->lok_vbuf[idx][path] = val;
 1066 
 1067         return is_fail1 || is_fail2;
 1068 }
 1069 
 1070 static bool _iqk_lok(struct rtw89_dev *rtwdev,
 1071                      enum rtw89_phy_idx phy_idx, u8 path)
 1072 {
 1073         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
 1074         u8 tmp_id = 0x0;
 1075         bool fail = false;
 1076         bool tmp = false;
 1077 
 1078         /* Step 0: Init RF gain & tone idx= 8.25Mhz */
 1079         rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ);
 1080 
 1081         /* Step 1  START: _lok_coarse_fine_wi_swap */
 1082         switch (iqk_info->iqk_band[path]) {
 1083         case RTW89_BAND_2G:
 1084                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
 1085                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1086                                        B_KIP_IQP_IQSW, 0x9);
 1087                 tmp_id = ID_G_FLOK_COARSE;
 1088                 break;
 1089         case RTW89_BAND_5G:
 1090                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
 1091                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1092                                        B_KIP_IQP_IQSW, 0x9);
 1093                 tmp_id = ID_A_FLOK_COARSE;
 1094                 break;
 1095         case RTW89_BAND_6G:
 1096                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
 1097                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1098                                        B_KIP_IQP_IQSW, 0x9);
 1099                 tmp_id = ID_A_FLOK_COARSE;
 1100                 break;
 1101         default:
 1102                 break;
 1103         }
 1104         tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
 1105         iqk_info->lok_cor_fail[0][path] = tmp;
 1106 
 1107         /* Step 2 */
 1108         switch (iqk_info->iqk_band[path]) {
 1109         case RTW89_BAND_2G:
 1110                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
 1111                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1112                                        B_KIP_IQP_IQSW, 0x1b);
 1113                 break;
 1114         case RTW89_BAND_5G:
 1115                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
 1116                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1117                                        B_KIP_IQP_IQSW, 0x1b);
 1118                 break;
 1119         case RTW89_BAND_6G:
 1120                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
 1121                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1122                                        B_KIP_IQP_IQSW, 0x1b);
 1123                 break;
 1124         default:
 1125                 break;
 1126         }
 1127         tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
 1128 
 1129         /* Step 3 */
 1130         switch (iqk_info->iqk_band[path]) {
 1131         case RTW89_BAND_2G:
 1132                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
 1133                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1134                                        B_KIP_IQP_IQSW, 0x9);
 1135                 tmp_id = ID_G_FLOK_FINE;
 1136                 break;
 1137         case RTW89_BAND_5G:
 1138                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
 1139                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1140                                        B_KIP_IQP_IQSW, 0x9);
 1141                 tmp_id = ID_A_FLOK_FINE;
 1142                 break;
 1143         case RTW89_BAND_6G:
 1144                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
 1145                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1146                                        B_KIP_IQP_IQSW, 0x9);
 1147                 tmp_id = ID_A_FLOK_FINE;
 1148                 break;
 1149         default:
 1150                 break;
 1151         }
 1152         tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
 1153         iqk_info->lok_fin_fail[0][path] = tmp;
 1154 
 1155         /* Step 4 large rf gain */
 1156         switch (iqk_info->iqk_band[path]) {
 1157         case RTW89_BAND_2G:
 1158         default:
 1159                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
 1160                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1161                                        B_KIP_IQP_IQSW, 0x1b);
 1162                 break;
 1163         case RTW89_BAND_5G:
 1164                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
 1165                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1166                                        B_KIP_IQP_IQSW, 0x1b);
 1167                 break;
 1168         case RTW89_BAND_6G:
 1169                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
 1170                 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1171                                        B_KIP_IQP_IQSW, 0x1b);
 1172                 break;
 1173         }
 1174         tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
 1175         fail = _lok_finetune_check(rtwdev, path);
 1176 
 1177         return fail;
 1178 }
 1179 
 1180 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
 1181 {
 1182         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
 1183 
 1184         switch (iqk_info->iqk_band[path]) {
 1185         case RTW89_BAND_2G:
 1186         default:
 1187                 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
 1188                 rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
 1189                 rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
 1190                 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
 1191                 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
 1192                 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
 1193                 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
 1194                                0x403e0 | iqk_info->syn1to2);
 1195                 fsleep(10);
 1196                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
 1197                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
 1198                 break;
 1199         case RTW89_BAND_5G:
 1200                 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
 1201                 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
 1202                 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
 1203                 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
 1204                 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
 1205                 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
 1206                                0x403e0 | iqk_info->syn1to2);
 1207                 fsleep(10);
 1208                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
 1209                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
 1210                 break;
 1211         case RTW89_BAND_6G:
 1212                 rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
 1213                 rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
 1214                 rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
 1215                 rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
 1216                 rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
 1217                 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
 1218                                0x403e0  | iqk_info->syn1to2);
 1219                 fsleep(10);
 1220                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
 1221                 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
 1222                 break;
 1223         }
 1224 }
 1225 
 1226 static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
 1227                           u8 path)
 1228 {
 1229         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
 1230         u32 tmp;
 1231         bool flag;
 1232 
 1233         iqk_info->thermal[path] =
 1234                 ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
 1235         iqk_info->thermal_rek_en = false;
 1236         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path,
 1237                     iqk_info->thermal[path]);
 1238         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
 1239                     iqk_info->lok_cor_fail[0][path]);
 1240         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
 1241                     iqk_info->lok_fin_fail[0][path]);
 1242         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
 1243                     iqk_info->iqk_tx_fail[0][path]);
 1244         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
 1245                     iqk_info->iqk_rx_fail[0][path]);
 1246 
 1247         flag = iqk_info->lok_cor_fail[0][path];
 1248         rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
 1249         flag = iqk_info->lok_fin_fail[0][path];
 1250         rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
 1251         flag = iqk_info->iqk_tx_fail[0][path];
 1252         rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
 1253         flag = iqk_info->iqk_rx_fail[0][path];
 1254         rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
 1255 
 1256         tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
 1257         iqk_info->bp_iqkenable[path] = tmp;
 1258         tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
 1259         iqk_info->bp_txkresult[path] = tmp;
 1260         tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
 1261         iqk_info->bp_rxkresult[path] = tmp;
 1262 
 1263         rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
 1264                                iqk_info->iqk_times);
 1265 
 1266         tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
 1267         if (tmp != 0x0)
 1268                 iqk_info->iqk_fail_cnt++;
 1269         rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
 1270                                iqk_info->iqk_fail_cnt);
 1271 }
 1272 
 1273 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
 1274 {
 1275         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
 1276 
 1277         _iqk_txk_setting(rtwdev, path);
 1278         iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path);
 1279 
 1280         if (iqk_info->is_nbiqk)
 1281                 iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
 1282         else
 1283                 iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
 1284 
 1285         _iqk_rxk_setting(rtwdev, path);
 1286         if (iqk_info->is_nbiqk)
 1287                 iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
 1288         else
 1289                 iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
 1290 
 1291         _iqk_info_iqk(rtwdev, phy_idx, path);
 1292 }
 1293 
 1294 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
 1295                              enum rtw89_phy_idx phy, u8 path)
 1296 {
 1297         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 1298         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
 1299 
 1300         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
 1301 
 1302         iqk_info->iqk_band[path] = chan->band_type;
 1303         iqk_info->iqk_bw[path] = chan->band_width;
 1304         iqk_info->iqk_ch[path] = chan->channel;
 1305 
 1306         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1307                     "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
 1308                     iqk_info->iqk_band[path]);
 1309         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
 1310                     path, iqk_info->iqk_bw[path]);
 1311         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
 1312                     path, iqk_info->iqk_ch[path]);
 1313         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1314                     "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
 1315                     rtwdev->dbcc_en ? "on" : "off",
 1316                     iqk_info->iqk_band[path] == 0 ? "2G" :
 1317                     iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
 1318                     iqk_info->iqk_ch[path],
 1319                     iqk_info->iqk_bw[path] == 0 ? "20M" :
 1320                     iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
 1321         if (!rtwdev->dbcc_en)
 1322                 iqk_info->syn1to2 = 0x1;
 1323         else
 1324                 iqk_info->syn1to2 = 0x3;
 1325 
 1326         rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER);
 1327         rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
 1328                                iqk_info->iqk_band[path]);
 1329         rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
 1330                                iqk_info->iqk_bw[path]);
 1331         rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
 1332                                iqk_info->iqk_ch[path]);
 1333 
 1334         rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER);
 1335 }
 1336 
 1337 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
 1338                            u8 path)
 1339 {
 1340         _iqk_by_path(rtwdev, phy_idx, path);
 1341 }
 1342 
 1343 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
 1344 {
 1345         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
 1346         bool fail;
 1347 
 1348         rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
 1349                                iqk_info->nb_txcfir[path]);
 1350         rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
 1351                                iqk_info->nb_rxcfir[path]);
 1352         rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
 1353                                0x00001219 + (path << 4));
 1354         fsleep(200);
 1355         fail = _iqk_check_cal(rtwdev, path, 0x12);
 1356         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail  = %x\n", fail);
 1357 
 1358         rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
 1359         rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
 1360         rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
 1361 
 1362         rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
 1363         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
 1364         rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
 1365 }
 1366 
 1367 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
 1368                                enum rtw89_phy_idx phy_idx, u8 path)
 1369 {
 1370         rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
 1371                                  &rtw8852c_iqk_afebb_restore_defs_a_tbl,
 1372                                  &rtw8852c_iqk_afebb_restore_defs_b_tbl);
 1373 
 1374         rtw8852c_disable_rxagc(rtwdev, path, 0x1);
 1375 }
 1376 
 1377 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
 1378 {
 1379         struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
 1380         u8 idx = 0;
 1381 
 1382         idx = mcc_info->table_idx;
 1383         rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
 1384         rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
 1385         rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
 1386         rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
 1387         rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
 1388 }
 1389 
 1390 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
 1391                                enum rtw89_phy_idx phy_idx, u8 path)
 1392 {
 1393         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
 1394 
 1395         /* 01_BB_AFE_for DPK_S0_20210820 */
 1396         rtw89_write_rf(rtwdev,  path, RR_BBDC, RR_BBDC_SEL, 0x0);
 1397         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
 1398         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
 1399         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
 1400         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
 1401 
 1402         /* disable rxgac */
 1403         rtw8852c_disable_rxagc(rtwdev, path, 0x0);
 1404         rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd);
 1405         rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1);
 1406         rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1);
 1407 
 1408         rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
 1409         rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1);
 1410 
 1411         rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
 1412         rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2);
 1413 
 1414         rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
 1415         rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
 1416         rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1);
 1417         rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
 1418         rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
 1419         rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
 1420         rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
 1421         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
 1422         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
 1423 }
 1424 
 1425 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
 1426 {
 1427         u32 rf_reg5, rck_val = 0;
 1428         u32 val;
 1429         int ret;
 1430 
 1431         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
 1432 
 1433         rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
 1434 
 1435         rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
 1436         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
 1437 
 1438         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
 1439                     rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
 1440 
 1441         /* RCK trigger */
 1442         rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
 1443 
 1444         ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
 1445                                        false, rtwdev, path, 0x1c, BIT(3));
 1446         if (ret)
 1447                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
 1448 
 1449         rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
 1450         rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
 1451 
 1452         rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
 1453 
 1454         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1455                     "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n",
 1456                     rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
 1457                     rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK));
 1458 }
 1459 
 1460 static void _iqk_init(struct rtw89_dev *rtwdev)
 1461 {
 1462         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
 1463         u8 ch, path;
 1464 
 1465         rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
 1466         if (iqk_info->is_iqk_init)
 1467                 return;
 1468 
 1469         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
 1470         iqk_info->is_iqk_init = true;
 1471         iqk_info->is_nbiqk = false;
 1472         iqk_info->iqk_fft_en = false;
 1473         iqk_info->iqk_sram_en = false;
 1474         iqk_info->iqk_cfir_en = false;
 1475         iqk_info->iqk_xym_en = false;
 1476         iqk_info->thermal_rek_en = false;
 1477         iqk_info->iqk_times = 0x0;
 1478 
 1479         for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
 1480                 iqk_info->iqk_channel[ch] = 0x0;
 1481                 for (path = 0; path < RTW8852C_IQK_SS; path++) {
 1482                         iqk_info->lok_cor_fail[ch][path] = false;
 1483                         iqk_info->lok_fin_fail[ch][path] = false;
 1484                         iqk_info->iqk_tx_fail[ch][path] = false;
 1485                         iqk_info->iqk_rx_fail[ch][path] = false;
 1486                         iqk_info->iqk_mcc_ch[ch][path] = 0x0;
 1487                         iqk_info->iqk_table_idx[path] = 0x0;
 1488                 }
 1489         }
 1490 }
 1491 
 1492 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
 1493                    enum rtw89_phy_idx phy_idx, u8 path)
 1494 {
 1495         struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
 1496         u32 backup_bb_val[BACKUP_BB_REGS_NR];
 1497         u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
 1498         u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
 1499 
 1500         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
 1501 
 1502         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1503                     "[IQK]==========IQK strat!!!!!==========\n");
 1504         iqk_info->iqk_times++;
 1505         iqk_info->kcount = 0;
 1506         iqk_info->version = RTW8852C_IQK_VER;
 1507 
 1508         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
 1509         _iqk_get_ch_info(rtwdev, phy_idx, path);
 1510         _rfk_backup_bb_reg(rtwdev, backup_bb_val);
 1511         _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
 1512         _iqk_macbb_setting(rtwdev, phy_idx, path);
 1513         _iqk_preset(rtwdev, path);
 1514         _iqk_start_iqk(rtwdev, phy_idx, path);
 1515         _iqk_restore(rtwdev, path);
 1516         _iqk_afebb_restore(rtwdev, phy_idx, path);
 1517         _rfk_restore_bb_reg(rtwdev, backup_bb_val);
 1518         _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
 1519         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
 1520 }
 1521 
 1522 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
 1523 {
 1524         switch (_kpath(rtwdev, phy_idx)) {
 1525         case RF_A:
 1526                 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
 1527                 break;
 1528         case RF_B:
 1529                 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
 1530                 break;
 1531         case RF_AB:
 1532                 _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
 1533                 _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
 1534                 break;
 1535         default:
 1536                 break;
 1537         }
 1538 }
 1539 
 1540 static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
 1541 {
 1542         int ret;
 1543         u32 val;
 1544 
 1545         rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
 1546         rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
 1547 
 1548         ret = read_poll_timeout_atomic(rtw89_read_rf, val, val,
 1549                                        2, 1000, false, rtwdev, path, 0x93, BIT(5));
 1550         if (ret)
 1551                 rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path);
 1552         else
 1553                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path);
 1554 
 1555         rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
 1556 }
 1557 
 1558 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
 1559                         bool is_afe)
 1560 {
 1561         u8 res;
 1562 
 1563         rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
 1564 
 1565         _rx_dck_toggle(rtwdev, path);
 1566         if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0)
 1567                 return;
 1568         res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE);
 1569         if (res > 1) {
 1570                 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res);
 1571                 _rx_dck_toggle(rtwdev, path);
 1572                 rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1);
 1573         }
 1574 }
 1575 
 1576 #define RTW8852C_RF_REL_VERSION 34
 1577 #define RTW8852C_DPK_VER 0x10
 1578 #define RTW8852C_DPK_TH_AVG_NUM 4
 1579 #define RTW8852C_DPK_RF_PATH 2
 1580 #define RTW8852C_DPK_KIP_REG_NUM 5
 1581 #define RTW8852C_DPK_RXSRAM_DBG 0
 1582 
 1583 enum rtw8852c_dpk_id {
 1584         LBK_RXIQK       = 0x06,
 1585         SYNC            = 0x10,
 1586         MDPK_IDL        = 0x11,
 1587         MDPK_MPA        = 0x12,
 1588         GAIN_LOSS       = 0x13,
 1589         GAIN_CAL        = 0x14,
 1590         DPK_RXAGC       = 0x15,
 1591         KIP_PRESET      = 0x16,
 1592         KIP_RESTORE     = 0x17,
 1593         DPK_TXAGC       = 0x19,
 1594         D_KIP_PRESET    = 0x28,
 1595         D_TXAGC         = 0x29,
 1596         D_RXAGC         = 0x2a,
 1597         D_SYNC          = 0x2b,
 1598         D_GAIN_LOSS     = 0x2c,
 1599         D_MDPK_IDL      = 0x2d,
 1600         D_GAIN_NORM     = 0x2f,
 1601         D_KIP_THERMAL   = 0x30,
 1602         D_KIP_RESTORE   = 0x31
 1603 };
 1604 
 1605 #define DPK_TXAGC_LOWER 0x2e
 1606 #define DPK_TXAGC_UPPER 0x3f
 1607 #define DPK_TXAGC_INVAL 0xff
 1608 
 1609 enum dpk_agc_step {
 1610         DPK_AGC_STEP_SYNC_DGAIN,
 1611         DPK_AGC_STEP_GAIN_LOSS_IDX,
 1612         DPK_AGC_STEP_GL_GT_CRITERION,
 1613         DPK_AGC_STEP_GL_LT_CRITERION,
 1614         DPK_AGC_STEP_SET_TX_GAIN,
 1615 };
 1616 
 1617 static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
 1618                              enum rtw89_rf_path path, bool is_bybb)
 1619 {
 1620         if (is_bybb)
 1621                 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
 1622         else
 1623                 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
 1624 }
 1625 
 1626 static void _dpk_onoff(struct rtw89_dev *rtwdev,
 1627                        enum rtw89_rf_path path, bool off);
 1628 
 1629 static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
 1630                           u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
 1631 {
 1632         u8 i;
 1633 
 1634         for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
 1635                 reg_bkup[path][i] =
 1636                         rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
 1637 
 1638                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
 1639                             reg[i] + (path << 8), reg_bkup[path][i]);
 1640         }
 1641 }
 1642 
 1643 static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
 1644                             u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
 1645 {
 1646         u8 i;
 1647 
 1648         for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
 1649                 rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
 1650                                        MASKDWORD, reg_bkup[path][i]);
 1651                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
 1652                             reg[i] + (path << 8), reg_bkup[path][i]);
 1653         }
 1654 }
 1655 
 1656 static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 1657                         enum rtw89_rf_path path, enum rtw8852c_dpk_id id)
 1658 {
 1659         u16 dpk_cmd;
 1660         u32 val;
 1661         int ret;
 1662 
 1663         dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12));
 1664 
 1665         rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
 1666 
 1667         ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
 1668                                        10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
 1669         mdelay(10);
 1670         rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
 1671 
 1672         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1673                     "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
 1674                     id == 0x06 ? "LBK_RXIQK" :
 1675                     id == 0x10 ? "SYNC" :
 1676                     id == 0x11 ? "MDPK_IDL" :
 1677                     id == 0x12 ? "MDPK_MPA" :
 1678                     id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
 1679                     dpk_cmd, ret);
 1680 
 1681         if (ret) {
 1682                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1683                             "[DPK] one-shot over 20ms!!!!\n");
 1684                 return 1;
 1685         }
 1686 
 1687         return 0;
 1688 }
 1689 
 1690 static void _dpk_information(struct rtw89_dev *rtwdev,
 1691                              enum rtw89_phy_idx phy,
 1692                              enum rtw89_rf_path path)
 1693 {
 1694         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 1695         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 1696 
 1697         u8 kidx = dpk->cur_idx[path];
 1698 
 1699         dpk->bp[path][kidx].band = chan->band_type;
 1700         dpk->bp[path][kidx].ch = chan->channel;
 1701         dpk->bp[path][kidx].bw = chan->band_width;
 1702 
 1703         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1704                     "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
 1705                     path, dpk->cur_idx[path], phy,
 1706                     rtwdev->is_tssi_mode[path] ? "on" : "off",
 1707                     rtwdev->dbcc_en ? "on" : "off",
 1708                     dpk->bp[path][kidx].band == 0 ? "2G" :
 1709                     dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
 1710                     dpk->bp[path][kidx].ch,
 1711                     dpk->bp[path][kidx].bw == 0 ? "20M" :
 1712                     dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
 1713 }
 1714 
 1715 static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
 1716                                 enum rtw89_phy_idx phy,
 1717                                 enum rtw89_rf_path path, u8 kpath)
 1718 {
 1719         /*1. Keep ADC_fifo reset*/
 1720         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
 1721         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
 1722         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
 1723         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
 1724 
 1725         /*2. BB for IQK DBG mode*/
 1726         rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd);
 1727 
 1728         /*3.Set DAC clk*/
 1729         rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
 1730 
 1731         /*4. Set ADC clk*/
 1732         rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
 1733         rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
 1734         rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
 1735         rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
 1736                                B_P0_NRBW_DBG, 0x1);
 1737         rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
 1738         rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
 1739         rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
 1740         rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
 1741 
 1742         /*5. ADDA fifo rst*/
 1743         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
 1744         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
 1745 
 1746         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path);
 1747 }
 1748 
 1749 static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path)
 1750 {
 1751         rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
 1752                                B_P0_NRBW_DBG, 0x0);
 1753         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
 1754         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
 1755         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
 1756         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
 1757         rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000);
 1758         rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00);
 1759         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0);
 1760         rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0);
 1761 
 1762         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path);
 1763 }
 1764 
 1765 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
 1766                             enum rtw89_rf_path path, bool is_pause)
 1767 {
 1768         rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
 1769                                B_P0_TSSI_TRK_EN, is_pause);
 1770 
 1771         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
 1772                     is_pause ? "pause" : "resume");
 1773 }
 1774 
 1775 static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip)
 1776 {
 1777         rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip);
 1778         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n",
 1779                     ctrl_by_kip ? "KIP" : "BB");
 1780 }
 1781 
 1782 static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force)
 1783 {
 1784         rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force);
 1785         rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force);
 1786 
 1787         rtw89_debug(rtwdev, RTW89_DBG_RFK,  "[DPK] S%d txpwr_bb_force %s\n",
 1788                     path, force ? "on" : "off");
 1789 }
 1790 
 1791 static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 1792                              enum rtw89_rf_path path)
 1793 {
 1794         _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE);
 1795         _dpk_kip_control_rfc(rtwdev, path, false);
 1796         _dpk_txpwr_bb_force(rtwdev, path, false);
 1797         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
 1798 }
 1799 
 1800 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
 1801                            enum rtw89_phy_idx phy,
 1802                            enum rtw89_rf_path path)
 1803 {
 1804 #define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */
 1805         u8 cur_rxbb;
 1806         u32 rf_11, reg_81cc;
 1807 
 1808         rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
 1809         rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
 1810 
 1811         _dpk_kip_control_rfc(rtwdev, path, false);
 1812 
 1813         cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
 1814         rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK);
 1815         reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8),
 1816                                          B_KIP_IQP_SW);
 1817 
 1818         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
 1819         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3);
 1820         rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd);
 1821         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f);
 1822 
 1823         rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12);
 1824         rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3);
 1825 
 1826         _dpk_kip_control_rfc(rtwdev, path, true);
 1827 
 1828         rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX);
 1829 
 1830         _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
 1831 
 1832         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
 1833                     rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD));
 1834 
 1835         _dpk_kip_control_rfc(rtwdev, path, false);
 1836 
 1837         rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11);
 1838         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb);
 1839         rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc);
 1840 
 1841         rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
 1842         rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
 1843         rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
 1844 
 1845         _dpk_kip_control_rfc(rtwdev, path, true);
 1846 }
 1847 
 1848 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
 1849                             enum rtw89_rf_path path, u8 kidx)
 1850 {
 1851         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 1852 
 1853         if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
 1854                 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
 1855                                0x50121 | BIT(rtwdev->dbcc_en));
 1856                 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
 1857                 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2);
 1858                 rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
 1859                 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
 1860                 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
 1861 
 1862                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1863                             "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n",
 1864                             rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
 1865                             rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK),
 1866                             rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK),
 1867                             rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK),
 1868                             rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK),
 1869                             rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK));
 1870         } else {
 1871                 rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
 1872                                0x50101 | BIT(rtwdev->dbcc_en));
 1873                 rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
 1874 
 1875                 if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) {
 1876                         rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
 1877                         rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
 1878                 } else {
 1879                         rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
 1880                 }
 1881 
 1882                 rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
 1883                 rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
 1884                 rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
 1885                 rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
 1886 
 1887                 if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160)
 1888                         rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0);
 1889         }
 1890 }
 1891 
 1892 static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
 1893 {
 1894         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 1895 
 1896         if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) {
 1897                 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3);
 1898                 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30);
 1899         } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) {
 1900                 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
 1901                 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00);
 1902         } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) {
 1903                 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
 1904                 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0);
 1905         } else {
 1906                 rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
 1907                 rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0);
 1908         }
 1909         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
 1910                     dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" :
 1911                     dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
 1912                     dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
 1913 }
 1914 
 1915 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
 1916 {
 1917 #define DPK_SYNC_TH_DC_I 200
 1918 #define DPK_SYNC_TH_DC_Q 200
 1919 #define DPK_SYNC_TH_CORR 170
 1920         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 1921         u16 dc_i, dc_q;
 1922         u8 corr_val, corr_idx, rxbb;
 1923         u8 rxbb_ov;
 1924 
 1925         rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
 1926 
 1927         corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
 1928         corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
 1929 
 1930         dpk->corr_idx[path][kidx] = corr_idx;
 1931         dpk->corr_val[path][kidx] = corr_val;
 1932 
 1933         rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
 1934 
 1935         dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
 1936         dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
 1937 
 1938         dc_i = abs(sign_extend32(dc_i, 11));
 1939         dc_q = abs(sign_extend32(dc_q, 11));
 1940 
 1941         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1942                     "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n",
 1943                     path, corr_idx, corr_val, dc_i, dc_q);
 1944 
 1945         dpk->dc_i[path][kidx] = dc_i;
 1946         dpk->dc_q[path][kidx] = dc_q;
 1947 
 1948         rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8);
 1949         rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB);
 1950 
 1951         rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31);
 1952         rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV);
 1953 
 1954         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 1955                     "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n",
 1956                     path, rxbb,
 1957                     rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE),
 1958                     rxbb_ov);
 1959 
 1960         if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
 1961             corr_val < DPK_SYNC_TH_CORR)
 1962                 return true;
 1963         else
 1964                 return false;
 1965 }
 1966 
 1967 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
 1968 {
 1969         u16 dgain = 0x0;
 1970 
 1971         rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
 1972 
 1973         dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
 1974 
 1975         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain);
 1976 
 1977         return dgain;
 1978 }
 1979 
 1980 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
 1981 {
 1982         u8 result;
 1983 
 1984         rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
 1985         rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
 1986 
 1987         result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
 1988 
 1989         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result);
 1990 
 1991         return result;
 1992 }
 1993 
 1994 static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
 1995 {
 1996         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 1997 
 1998         rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10);
 1999         dpk->cur_k_set =
 2000                 rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1;
 2001 }
 2002 
 2003 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2004                                enum rtw89_rf_path path, u8 dbm, bool set_from_bb)
 2005 {
 2006         if (set_from_bb) {
 2007                 dbm = clamp_t(u8, dbm, 7, 24);
 2008                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm);
 2009                 rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2);
 2010         }
 2011         _dpk_one_shot(rtwdev, phy, path, D_TXAGC);
 2012         _dpk_kset_query(rtwdev, path);
 2013 }
 2014 
 2015 static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2016                         enum rtw89_rf_path path, u8 kidx)
 2017 {
 2018         _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS);
 2019         _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false);
 2020 
 2021         rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0);
 2022         rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0);
 2023 
 2024         return _dpk_gainloss_read(rtwdev);
 2025 }
 2026 
 2027 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
 2028 {
 2029         u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
 2030         u8 i;
 2031 
 2032         rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
 2033         rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
 2034         rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
 2035 
 2036         if (is_check) {
 2037                 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
 2038                 val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
 2039                 val1_i = abs(sign_extend32(val1_i, 11));
 2040                 val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
 2041                 val1_q = abs(sign_extend32(val1_q, 11));
 2042 
 2043                 rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
 2044                 val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
 2045                 val2_i = abs(sign_extend32(val2_i, 11));
 2046                 val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
 2047                 val2_q = abs(sign_extend32(val2_q, 11));
 2048 
 2049                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
 2050                             phy_div(val1_i * val1_i + val1_q * val1_q,
 2051                                     val2_i * val2_i + val2_q * val2_q));
 2052         } else {
 2053                 for (i = 0; i < 32; i++) {
 2054                         rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
 2055                         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
 2056                                     rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
 2057                 }
 2058         }
 2059 
 2060         if (val1_i * val1_i + val1_q * val1_q >= (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
 2061                 return true;
 2062         else
 2063                 return false;
 2064 }
 2065 
 2066 static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2067                                enum rtw89_rf_path path, u8 kidx)
 2068 {
 2069         _dpk_one_shot(rtwdev, phy, path, D_RXAGC);
 2070 
 2071         return _dpk_sync_check(rtwdev, path, kidx);
 2072 }
 2073 
 2074 static void _dpk_read_rxsram(struct rtw89_dev *rtwdev)
 2075 {
 2076         u32 addr;
 2077 
 2078         rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl);
 2079 
 2080         for (addr = 0; addr < 0x200; addr++) {
 2081                 rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr);
 2082 
 2083                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr,
 2084                             rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
 2085         }
 2086 
 2087         rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl);
 2088 }
 2089 
 2090 static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
 2091 {
 2092         rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
 2093         rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002);
 2094 
 2095         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n");
 2096 }
 2097 
 2098 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2099                    enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only)
 2100 {
 2101         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2102         u8 step = DPK_AGC_STEP_SYNC_DGAIN;
 2103         u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0;
 2104         u8 tmp_rxbb;
 2105         u8 goout = 0, agc_cnt = 0;
 2106         u16 dgain = 0;
 2107         bool is_fail = false;
 2108         int limit = 200;
 2109 
 2110         do {
 2111                 switch (step) {
 2112                 case DPK_AGC_STEP_SYNC_DGAIN:
 2113                         is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx);
 2114 
 2115                         if (RTW8852C_DPK_RXSRAM_DBG)
 2116                                 _dpk_read_rxsram(rtwdev);
 2117 
 2118                         if (is_fail) {
 2119                                 goout = 1;
 2120                                 break;
 2121                         }
 2122 
 2123                         dgain = _dpk_dgain_read(rtwdev);
 2124 
 2125                         if (dgain > 0x5fc || dgain < 0x556) {
 2126                                 _dpk_one_shot(rtwdev, phy, path, D_SYNC);
 2127                                 dgain = _dpk_dgain_read(rtwdev);
 2128                         }
 2129 
 2130                         if (agc_cnt == 0) {
 2131                                 if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
 2132                                         _dpk_bypass_rxiqc(rtwdev, path);
 2133                                 else
 2134                                         _dpk_lbk_rxiqk(rtwdev, phy, path);
 2135                         }
 2136                         step = DPK_AGC_STEP_GAIN_LOSS_IDX;
 2137                         break;
 2138 
 2139                 case DPK_AGC_STEP_GAIN_LOSS_IDX:
 2140                         tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx);
 2141 
 2142                         if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
 2143                             tmp_gl_idx >= 7)
 2144                                 step = DPK_AGC_STEP_GL_GT_CRITERION;
 2145                         else if (tmp_gl_idx == 0)
 2146                                 step = DPK_AGC_STEP_GL_LT_CRITERION;
 2147                         else
 2148                                 step = DPK_AGC_STEP_SET_TX_GAIN;
 2149                         break;
 2150 
 2151                 case DPK_AGC_STEP_GL_GT_CRITERION:
 2152                         if (tmp_dbm <= 7) {
 2153                                 goout = 1;
 2154                                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n");
 2155                         } else {
 2156                                 tmp_dbm = max_t(u8, tmp_dbm - 3, 7);
 2157                                 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
 2158                         }
 2159                         step = DPK_AGC_STEP_SYNC_DGAIN;
 2160                         agc_cnt++;
 2161                         break;
 2162 
 2163                 case DPK_AGC_STEP_GL_LT_CRITERION:
 2164                         if (tmp_dbm >= 24) {
 2165                                 goout = 1;
 2166                                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n");
 2167                         } else {
 2168                                 tmp_dbm = min_t(u8, tmp_dbm + 2, 24);
 2169                                 _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
 2170                         }
 2171                         step = DPK_AGC_STEP_SYNC_DGAIN;
 2172                         agc_cnt++;
 2173                         break;
 2174 
 2175                 case DPK_AGC_STEP_SET_TX_GAIN:
 2176                         _dpk_kip_control_rfc(rtwdev, path, false);
 2177                         tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
 2178                         if (tmp_rxbb + tmp_gl_idx > 0x1f)
 2179                                 tmp_rxbb = 0x1f;
 2180                         else
 2181                                 tmp_rxbb = tmp_rxbb + tmp_gl_idx;
 2182 
 2183                         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
 2184                         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n",
 2185                                     tmp_gl_idx, tmp_rxbb);
 2186                         _dpk_kip_control_rfc(rtwdev, path, true);
 2187                         goout = 1;
 2188                         break;
 2189                 default:
 2190                         goout = 1;
 2191                         break;
 2192                 }
 2193         } while (!goout && agc_cnt < 6 && --limit > 0);
 2194 
 2195         if (limit <= 0)
 2196                 rtw89_warn(rtwdev, "[DPK] exceed loop limit\n");
 2197 
 2198         return is_fail;
 2199 }
 2200 
 2201 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
 2202 {
 2203         static const struct rtw89_rfk_tbl *order_tbls[] = {
 2204                 &rtw8852c_dpk_mdpd_order0_defs_tbl,
 2205                 &rtw8852c_dpk_mdpd_order1_defs_tbl,
 2206                 &rtw8852c_dpk_mdpd_order2_defs_tbl,
 2207                 &rtw8852c_dpk_mdpd_order3_defs_tbl,
 2208         };
 2209 
 2210         if (order >= ARRAY_SIZE(order_tbls)) {
 2211                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order);
 2212                 return;
 2213         }
 2214 
 2215         rtw89_rfk_parser(rtwdev, order_tbls[order]);
 2216 
 2217         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
 2218                     order == 0x0 ? "(5,3,1)" :
 2219                     order == 0x1 ? "(5,3,0)" :
 2220                     order == 0x2 ? "(5,0,0)" : "(7,3,1)");
 2221 }
 2222 
 2223 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2224                          enum rtw89_rf_path path, u8 kidx)
 2225 {
 2226         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2227         u8 cnt;
 2228         u8 ov_flag;
 2229         u32 dpk_sync;
 2230 
 2231         rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1);
 2232 
 2233         if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1)
 2234                 _dpk_set_mdpd_para(rtwdev, 0x2);
 2235         else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1)
 2236                 _dpk_set_mdpd_para(rtwdev, 0x1);
 2237         else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1)
 2238                 _dpk_set_mdpd_para(rtwdev, 0x0);
 2239         else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 ||
 2240                  dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 ||
 2241                  dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20)
 2242                 _dpk_set_mdpd_para(rtwdev, 0x2);
 2243         else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ||
 2244                  dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
 2245                 _dpk_set_mdpd_para(rtwdev, 0x1);
 2246         else
 2247                 _dpk_set_mdpd_para(rtwdev, 0x0);
 2248 
 2249         rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0);
 2250         fsleep(1000);
 2251 
 2252         _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
 2253         rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
 2254         dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
 2255         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync);
 2256 
 2257         rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
 2258         ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
 2259         for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) {
 2260                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n");
 2261                 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
 2262                 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
 2263                 ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
 2264         }
 2265 
 2266         if (ov_flag) {
 2267                 _dpk_set_mdpd_para(rtwdev, 0x2);
 2268                 _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
 2269         }
 2270 }
 2271 
 2272 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2273                               enum rtw89_rf_path path)
 2274 {
 2275         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 2276         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2277         bool is_reload = false;
 2278         u8 idx, cur_band, cur_ch;
 2279 
 2280         cur_band = chan->band_type;
 2281         cur_ch = chan->channel;
 2282 
 2283         for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
 2284                 if (cur_band != dpk->bp[path][idx].band ||
 2285                     cur_ch != dpk->bp[path][idx].ch)
 2286                         continue;
 2287 
 2288                 rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
 2289                                        B_COEF_SEL_MDPD, idx);
 2290                 dpk->cur_idx[path] = idx;
 2291                 is_reload = true;
 2292                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
 2293                             "[DPK] reload S%d[%d] success\n", path, idx);
 2294         }
 2295 
 2296         return is_reload;
 2297 }
 2298 
 2299 static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on)
 2300 {
 2301         rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl :
 2302                                            &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl);
 2303 }
 2304 
 2305 static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2306                                   enum rtw89_rf_path path, u8 kidx)
 2307 {
 2308         rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
 2309                                rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
 2310 
 2311         if (rtwdev->hal.cv == CHIP_CAV)
 2312                 rtw89_phy_write32_mask(rtwdev,
 2313                                        R_DPD_CH0A + (path << 8) + (kidx << 2),
 2314                                        B_DPD_SEL, 0x01);
 2315         else
 2316                 rtw89_phy_write32_mask(rtwdev,
 2317                                        R_DPD_CH0A + (path << 8) + (kidx << 2),
 2318                                        B_DPD_SEL, 0x0c);
 2319 
 2320         _dpk_kip_control_rfc(rtwdev, path, true);
 2321         rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
 2322 
 2323         _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET);
 2324 }
 2325 
 2326 static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
 2327 {
 2328 #define _DPK_PARA_TXAGC GENMASK(15, 10)
 2329 #define _DPK_PARA_THER GENMASK(31, 26)
 2330         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2331         u32 para;
 2332 
 2333         para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
 2334                                      MASKDWORD);
 2335 
 2336         dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para);
 2337         dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para);
 2338 
 2339         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n",
 2340                     dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk);
 2341 }
 2342 
 2343 static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2344                                       enum rtw89_rf_path path, u8 kidx, bool is_execute)
 2345 {
 2346         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2347 
 2348         if (is_execute) {
 2349                 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200);
 2350                 rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3);
 2351 
 2352                 _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM);
 2353         } else {
 2354                 rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
 2355                                        0x0000007F, 0x5b);
 2356         }
 2357         dpk->bp[path][kidx].gs =
 2358                 rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
 2359                                       0x0000007F);
 2360 }
 2361 
 2362 static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
 2363 {
 2364         u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
 2365         u8 val;
 2366 
 2367         switch (val32) {
 2368         case 0:
 2369                 val = 0x6;
 2370                 break;
 2371         case 1:
 2372                 val = 0x2;
 2373                 break;
 2374         case 2:
 2375                 val = 0x0;
 2376                 break;
 2377         case 3:
 2378                 val = 0x7;
 2379                 break;
 2380         default:
 2381                 val = 0xff;
 2382                 break;
 2383         }
 2384 
 2385         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
 2386 
 2387         return val;
 2388 }
 2389 
 2390 static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2391                     enum rtw89_rf_path path, u8 kidx)
 2392 {
 2393         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2394 
 2395         rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
 2396         rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
 2397         rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
 2398                                B_DPD_ORDER, _dpk_order_convert(rtwdev));
 2399 
 2400         dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set);
 2401         dpk->bp[path][kidx].path_ok = true;
 2402 
 2403         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n",
 2404                     path, kidx, dpk->bp[path][kidx].mdpd_en);
 2405 
 2406         rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
 2407                                B_DPD_MEN, dpk->bp[path][kidx].mdpd_en);
 2408 
 2409         _dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false);
 2410 }
 2411 
 2412 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2413                       enum rtw89_rf_path path, u8 gain)
 2414 {
 2415         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2416         u8 kidx = dpk->cur_idx[path];
 2417         u8 init_xdbm = 15;
 2418         bool is_fail;
 2419 
 2420         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 2421                     "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
 2422         _dpk_kip_control_rfc(rtwdev, path, false);
 2423         _rf_direct_cntrl(rtwdev, path, false);
 2424         rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd);
 2425         _dpk_rf_setting(rtwdev, gain, path, kidx);
 2426         _set_rx_dck(rtwdev, phy, path, false);
 2427         _dpk_kip_pwr_clk_onoff(rtwdev, true);
 2428         _dpk_kip_preset_8852c(rtwdev, phy, path, kidx);
 2429         _dpk_txpwr_bb_force(rtwdev, path, true);
 2430         _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true);
 2431         _dpk_tpg_sel(rtwdev, path, kidx);
 2432 
 2433         is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false);
 2434         if (is_fail)
 2435                 goto _error;
 2436 
 2437         _dpk_idl_mpa(rtwdev, phy, path, kidx);
 2438         _dpk_para_query(rtwdev, path, kidx);
 2439         _dpk_on(rtwdev, phy, path, kidx);
 2440 
 2441 _error:
 2442         _dpk_kip_control_rfc(rtwdev, path, false);
 2443         rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX);
 2444         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx,
 2445                     dpk->cur_k_set, is_fail ? "need Check" : "is Success");
 2446 
 2447         return is_fail;
 2448 }
 2449 
 2450 static void _dpk_init(struct rtw89_dev *rtwdev, u8 path)
 2451 {
 2452         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2453         u8 kidx = dpk->cur_idx[path];
 2454 
 2455         dpk->bp[path][kidx].path_ok = false;
 2456 }
 2457 
 2458 static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb)
 2459 {
 2460         if (is_bybb)
 2461                 rtw89_write_rf(rtwdev,  path, RR_BBDC, RR_BBDC_SEL, 0x1);
 2462         else
 2463                 rtw89_write_rf(rtwdev,  path, RR_BBDC, RR_BBDC_SEL, 0x0);
 2464 }
 2465 
 2466 static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
 2467                             enum rtw89_phy_idx phy, u8 kpath)
 2468 {
 2469         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2470         static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8};
 2471         u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR];
 2472         u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {};
 2473         u8 path;
 2474         bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false};
 2475 
 2476         if (dpk->is_dpk_reload_en) {
 2477                 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
 2478                         if (!(kpath & BIT(path)))
 2479                                 continue;
 2480 
 2481                         reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
 2482                         if (!reloaded[path] && dpk->bp[path][0].ch != 0)
 2483                                 dpk->cur_idx[path] = !dpk->cur_idx[path];
 2484                         else
 2485                                 _dpk_onoff(rtwdev, path, false);
 2486                 }
 2487         } else {
 2488                 for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
 2489                         dpk->cur_idx[path] = 0;
 2490         }
 2491 
 2492         for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
 2493                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
 2494                             "[DPK] ========= S%d[%d] DPK Init =========\n",
 2495                             path, dpk->cur_idx[path]);
 2496                 _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
 2497                 _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
 2498                 _dpk_information(rtwdev, phy, path);
 2499                 _dpk_init(rtwdev, path);
 2500                 if (rtwdev->is_tssi_mode[path])
 2501                         _dpk_tssi_pause(rtwdev, path, true);
 2502         }
 2503 
 2504         for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
 2505                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
 2506                             "[DPK] ========= S%d[%d] DPK Start =========\n",
 2507                             path, dpk->cur_idx[path]);
 2508                 rtw8852c_disable_rxagc(rtwdev, path, 0x0);
 2509                 _dpk_drf_direct_cntrl(rtwdev, path, false);
 2510                 _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
 2511                 is_fail = _dpk_main(rtwdev, phy, path, 1);
 2512                 _dpk_onoff(rtwdev, path, is_fail);
 2513         }
 2514 
 2515         for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
 2516                 rtw89_debug(rtwdev, RTW89_DBG_RFK,
 2517                             "[DPK] ========= S%d[%d] DPK Restore =========\n",
 2518                             path, dpk->cur_idx[path]);
 2519                 _dpk_kip_restore(rtwdev, phy, path);
 2520                 _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
 2521                 _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
 2522                 _dpk_bb_afe_restore(rtwdev, path);
 2523                 rtw8852c_disable_rxagc(rtwdev, path, 0x1);
 2524                 if (rtwdev->is_tssi_mode[path])
 2525                         _dpk_tssi_pause(rtwdev, path, false);
 2526         }
 2527 
 2528         _dpk_kip_pwr_clk_onoff(rtwdev, false);
 2529 }
 2530 
 2531 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
 2532 {
 2533         struct rtw89_fem_info *fem = &rtwdev->fem;
 2534         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 2535         u8 band = chan->band_type;
 2536 
 2537         if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
 2538                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n");
 2539                 return true;
 2540         } else if (fem->epa_2g && band == RTW89_BAND_2G) {
 2541                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
 2542                 return true;
 2543         } else if (fem->epa_5g && band == RTW89_BAND_5G) {
 2544                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
 2545                 return true;
 2546         } else if (fem->epa_6g && band == RTW89_BAND_6G) {
 2547                 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
 2548                 return true;
 2549         }
 2550 
 2551         return false;
 2552 }
 2553 
 2554 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
 2555 {
 2556         u8 path, kpath;
 2557 
 2558         kpath = _kpath(rtwdev, phy);
 2559 
 2560         for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
 2561                 if (kpath & BIT(path))
 2562                         _dpk_onoff(rtwdev, path, true);
 2563         }
 2564 }
 2565 
 2566 static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
 2567 {
 2568         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 2569                     "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
 2570                     RTW8852C_DPK_VER, rtwdev->hal.cv,
 2571                     RTW8852C_RF_REL_VERSION);
 2572 
 2573         if (_dpk_bypass_check(rtwdev, phy))
 2574                 _dpk_force_bypass(rtwdev, phy);
 2575         else
 2576                 _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
 2577 
 2578         if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
 2579                 rtw8852c_rx_dck(rtwdev, phy, false);
 2580 }
 2581 
 2582 static void _dpk_onoff(struct rtw89_dev *rtwdev,
 2583                        enum rtw89_rf_path path, bool off)
 2584 {
 2585         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2586         u8 val, kidx = dpk->cur_idx[path];
 2587 
 2588         val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ?
 2589               dpk->bp[path][kidx].mdpd_en : 0;
 2590 
 2591         rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
 2592                                B_DPD_MEN, val);
 2593 
 2594         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
 2595                     kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
 2596 }
 2597 
 2598 static void _dpk_track(struct rtw89_dev *rtwdev)
 2599 {
 2600         struct rtw89_dpk_info *dpk = &rtwdev->dpk;
 2601         u8 path, kidx;
 2602         u8 txagc_rf = 0;
 2603         s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0;
 2604         u8 cur_ther;
 2605         s8 delta_ther = 0;
 2606         s16 pwsf_tssi_ofst;
 2607 
 2608         for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
 2609                 kidx = dpk->cur_idx[path];
 2610                 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 2611                             "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
 2612                             path, kidx, dpk->bp[path][kidx].ch);
 2613 
 2614                 txagc_rf =
 2615                         rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f);
 2616                 txagc_bb =
 2617                         rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2);
 2618                 txagc_bb_tp =
 2619                         rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP);
 2620 
 2621                 /* report from KIP */
 2622                 rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf);
 2623                 cur_ther =
 2624                         rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH);
 2625                 txagc_ofst =
 2626                         rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF);
 2627                 pwsf_tssi_ofst =
 2628                         rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI);
 2629                 pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12);
 2630 
 2631                 cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
 2632 
 2633                 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 2634                             "[DPK_TRK] thermal now = %d\n", cur_ther);
 2635 
 2636                 if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
 2637                         delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther;
 2638 
 2639                 delta_ther = delta_ther * 1 / 2;
 2640 
 2641                 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 2642                             "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n",
 2643                             delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk);
 2644                 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 2645                             "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n",
 2646                             txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf,
 2647                             dpk->bp[path][kidx].txagc_dpk);
 2648                 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 2649                             "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n",
 2650                             txagc_ofst, pwsf_tssi_ofst);
 2651                 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 2652                             "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
 2653                             txagc_bb_tp, txagc_bb);
 2654 
 2655                 if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 &&
 2656                     txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) {
 2657                         rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 2658                                     "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther);
 2659 
 2660                         rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
 2661                                                0x07FC0000, 0x78 - delta_ther);
 2662                 }
 2663         }
 2664 }
 2665 
 2666 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2667                           enum rtw89_rf_path path)
 2668 {
 2669         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 2670         enum rtw89_band band = chan->band_type;
 2671 
 2672         rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl);
 2673 
 2674         if (path == RF_PATH_A)
 2675                 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
 2676                                          &rtw8852c_tssi_sys_defs_2g_a_tbl,
 2677                                          &rtw8852c_tssi_sys_defs_5g_a_tbl);
 2678         else
 2679                 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
 2680                                          &rtw8852c_tssi_sys_defs_2g_b_tbl,
 2681                                          &rtw8852c_tssi_sys_defs_5g_b_tbl);
 2682 }
 2683 
 2684 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2685                                     enum rtw89_rf_path path)
 2686 {
 2687         rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
 2688                                  &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl,
 2689                                  &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl);
 2690 }
 2691 
 2692 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
 2693                                           enum rtw89_phy_idx phy,
 2694                                           enum rtw89_rf_path path)
 2695 {
 2696         rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
 2697                                  &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
 2698                                  &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
 2699 }
 2700 
 2701 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2702                           enum rtw89_rf_path path)
 2703 {
 2704         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 2705         enum rtw89_band band = chan->band_type;
 2706 
 2707         if (path == RF_PATH_A) {
 2708                 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl);
 2709                 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
 2710                                          &rtw8852c_tssi_dck_defs_2g_a_tbl,
 2711                                          &rtw8852c_tssi_dck_defs_5g_a_tbl);
 2712         } else {
 2713                 rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl);
 2714                 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
 2715                                          &rtw8852c_tssi_dck_defs_2g_b_tbl,
 2716                                          &rtw8852c_tssi_dck_defs_5g_b_tbl);
 2717         }
 2718 }
 2719 
 2720 static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2721                                    enum rtw89_rf_path path)
 2722 {
 2723         rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
 2724                                  &rtw8852c_tssi_set_bbgain_split_a_tbl,
 2725                                  &rtw8852c_tssi_set_bbgain_split_b_tbl);
 2726 }
 2727 
 2728 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2729                                  enum rtw89_rf_path path)
 2730 {
 2731 #define RTW8852C_TSSI_GET_VAL(ptr, idx)                 \
 2732 ({                                                      \
 2733         s8 *__ptr = (ptr);                              \
 2734         u8 __idx = (idx), __i, __v;                     \
 2735         u32 __val = 0;                                  \
 2736         for (__i = 0; __i < 4; __i++) {                 \
 2737                 __v = (__ptr[__idx + __i]);             \
 2738                 __val |= (__v << (8 * __i));            \
 2739         }                                               \
 2740         __val;                                          \
 2741 })
 2742         struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
 2743         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 2744         u8 ch = chan->channel;
 2745         u8 subband = chan->subband_type;
 2746         const s8 *thm_up_a = NULL;
 2747         const s8 *thm_down_a = NULL;
 2748         const s8 *thm_up_b = NULL;
 2749         const s8 *thm_down_b = NULL;
 2750         u8 thermal = 0xff;
 2751         s8 thm_ofst[64] = {0};
 2752         u32 tmp = 0;
 2753         u8 i, j;
 2754 
 2755         switch (subband) {
 2756         default:
 2757         case RTW89_CH_2G:
 2758                 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
 2759                 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
 2760                 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
 2761                 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
 2762                 break;
 2763         case RTW89_CH_5G_BAND_1:
 2764                 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
 2765                 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
 2766                 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
 2767                 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
 2768                 break;
 2769         case RTW89_CH_5G_BAND_3:
 2770                 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
 2771                 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
 2772                 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
 2773                 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
 2774                 break;
 2775         case RTW89_CH_5G_BAND_4:
 2776                 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
 2777                 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
 2778                 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
 2779                 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
 2780                 break;
 2781         case RTW89_CH_6G_BAND_IDX0:
 2782         case RTW89_CH_6G_BAND_IDX1:
 2783                 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
 2784                 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
 2785                 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
 2786                 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
 2787                 break;
 2788         case RTW89_CH_6G_BAND_IDX2:
 2789         case RTW89_CH_6G_BAND_IDX3:
 2790                 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
 2791                 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
 2792                 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
 2793                 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
 2794                 break;
 2795         case RTW89_CH_6G_BAND_IDX4:
 2796         case RTW89_CH_6G_BAND_IDX5:
 2797                 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
 2798                 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
 2799                 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
 2800                 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
 2801                 break;
 2802         case RTW89_CH_6G_BAND_IDX6:
 2803         case RTW89_CH_6G_BAND_IDX7:
 2804                 thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
 2805                 thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
 2806                 thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
 2807                 thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
 2808                 break;
 2809         }
 2810 
 2811         if (path == RF_PATH_A) {
 2812                 thermal = tssi_info->thermal[RF_PATH_A];
 2813 
 2814                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 2815                             "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
 2816 
 2817                 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
 2818                 rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
 2819 
 2820                 if (thermal == 0xff) {
 2821                         rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
 2822                         rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
 2823 
 2824                         for (i = 0; i < 64; i += 4) {
 2825                                 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
 2826 
 2827                                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 2828                                             "[TSSI] write 0x%x val=0x%08x\n",
 2829                                             0x5c00 + i, 0x0);
 2830                         }
 2831 
 2832                 } else {
 2833                         rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
 2834                         rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
 2835                                                thermal);
 2836 
 2837                         i = 0;
 2838                         for (j = 0; j < 32; j++)
 2839                                 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
 2840                                               -thm_down_a[i++] :
 2841                                               -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
 2842 
 2843                         i = 1;
 2844                         for (j = 63; j >= 32; j--)
 2845                                 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
 2846                                               thm_up_a[i++] :
 2847                                               thm_up_a[DELTA_SWINGIDX_SIZE - 1];
 2848 
 2849                         for (i = 0; i < 64; i += 4) {
 2850                                 tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
 2851                                 rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
 2852 
 2853                                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 2854                                             "[TSSI] write 0x%x val=0x%08x\n",
 2855                                             0x5c00 + i, tmp);
 2856                         }
 2857                 }
 2858                 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
 2859                 rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
 2860 
 2861         } else {
 2862                 thermal = tssi_info->thermal[RF_PATH_B];
 2863 
 2864                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 2865                             "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
 2866 
 2867                 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
 2868                 rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
 2869 
 2870                 if (thermal == 0xff) {
 2871                         rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
 2872                         rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
 2873 
 2874                         for (i = 0; i < 64; i += 4) {
 2875                                 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
 2876 
 2877                                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 2878                                             "[TSSI] write 0x%x val=0x%08x\n",
 2879                                             0x7c00 + i, 0x0);
 2880                         }
 2881 
 2882                 } else {
 2883                         rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
 2884                         rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
 2885                                                thermal);
 2886 
 2887                         i = 0;
 2888                         for (j = 0; j < 32; j++)
 2889                                 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
 2890                                               -thm_down_b[i++] :
 2891                                               -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
 2892 
 2893                         i = 1;
 2894                         for (j = 63; j >= 32; j--)
 2895                                 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
 2896                                               thm_up_b[i++] :
 2897                                               thm_up_b[DELTA_SWINGIDX_SIZE - 1];
 2898 
 2899                         for (i = 0; i < 64; i += 4) {
 2900                                 tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
 2901                                 rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
 2902 
 2903                                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 2904                                             "[TSSI] write 0x%x val=0x%08x\n",
 2905                                             0x7c00 + i, tmp);
 2906                         }
 2907                 }
 2908                 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
 2909                 rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
 2910         }
 2911 #undef RTW8852C_TSSI_GET_VAL
 2912 }
 2913 
 2914 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2915                                 enum rtw89_rf_path path)
 2916 {
 2917         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 2918         enum rtw89_band band = chan->band_type;
 2919 
 2920         if (path == RF_PATH_A) {
 2921                 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
 2922                                          &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl,
 2923                                          &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl);
 2924         } else {
 2925                 rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
 2926                                          &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl,
 2927                                          &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl);
 2928         }
 2929 }
 2930 
 2931 static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2932                                     enum rtw89_rf_path path)
 2933 {
 2934         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 2935         enum rtw89_band band = chan->band_type;
 2936         const struct rtw89_rfk_tbl *tbl;
 2937 
 2938         if (path == RF_PATH_A) {
 2939                 if (band == RTW89_BAND_2G)
 2940                         tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl;
 2941                 else if (band == RTW89_BAND_6G)
 2942                         tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl;
 2943                 else
 2944                         tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl;
 2945         } else {
 2946                 if (band == RTW89_BAND_2G)
 2947                         tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl;
 2948                 else if (band == RTW89_BAND_6G)
 2949                         tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl;
 2950                 else
 2951                         tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl;
 2952         }
 2953 
 2954         rtw89_rfk_parser(rtwdev, tbl);
 2955 }
 2956 
 2957 static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2958                             enum rtw89_rf_path path)
 2959 {
 2960         rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
 2961                                  &rtw8852c_tssi_slope_defs_a_tbl,
 2962                                  &rtw8852c_tssi_slope_defs_b_tbl);
 2963 }
 2964 
 2965 static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2966                             enum rtw89_rf_path path)
 2967 {
 2968         rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
 2969                                  &rtw8852c_tssi_run_slope_defs_a_tbl,
 2970                                  &rtw8852c_tssi_run_slope_defs_b_tbl);
 2971 }
 2972 
 2973 static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 2974                             enum rtw89_rf_path path)
 2975 {
 2976         rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
 2977                                  &rtw8852c_tssi_track_defs_a_tbl,
 2978                                  &rtw8852c_tssi_track_defs_b_tbl);
 2979 }
 2980 
 2981 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
 2982                                           enum rtw89_phy_idx phy,
 2983                                           enum rtw89_rf_path path)
 2984 {
 2985         rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
 2986                                  &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl,
 2987                                  &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl);
 2988 }
 2989 
 2990 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
 2991 {
 2992         struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
 2993         u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
 2994 
 2995         if (rtwdev->dbcc_en) {
 2996                 if (phy == RTW89_PHY_0) {
 2997                         path = RF_PATH_A;
 2998                         path_max = RF_PATH_B;
 2999                 } else if (phy == RTW89_PHY_1) {
 3000                         path = RF_PATH_B;
 3001                         path_max = RF_PATH_NUM_8852C;
 3002                 }
 3003         }
 3004 
 3005         for (i = path; i < path_max; i++) {
 3006                 _tssi_set_track(rtwdev, phy, i);
 3007                 _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
 3008 
 3009                 rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
 3010                                          &rtw8852c_tssi_enable_defs_a_tbl,
 3011                                          &rtw8852c_tssi_enable_defs_b_tbl);
 3012 
 3013                 tssi_info->base_thermal[i] =
 3014                         ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
 3015                 rtwdev->is_tssi_mode[i] = true;
 3016         }
 3017 }
 3018 
 3019 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
 3020 {
 3021         u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
 3022 
 3023         if (rtwdev->dbcc_en) {
 3024                 if (phy == RTW89_PHY_0) {
 3025                         path = RF_PATH_A;
 3026                         path_max = RF_PATH_B;
 3027                 } else if (phy == RTW89_PHY_1) {
 3028                         path = RF_PATH_B;
 3029                         path_max = RF_PATH_NUM_8852C;
 3030                 }
 3031         }
 3032 
 3033         for (i = path; i < path_max; i++) {
 3034                 if (i == RF_PATH_A) {
 3035                         rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl);
 3036                         rtwdev->is_tssi_mode[RF_PATH_A] = false;
 3037                 }  else if (i == RF_PATH_B) {
 3038                         rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl);
 3039                         rtwdev->is_tssi_mode[RF_PATH_B] = false;
 3040                 }
 3041         }
 3042 }
 3043 
 3044 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
 3045 {
 3046         switch (ch) {
 3047         case 1 ... 2:
 3048                 return 0;
 3049         case 3 ... 5:
 3050                 return 1;
 3051         case 6 ... 8:
 3052                 return 2;
 3053         case 9 ... 11:
 3054                 return 3;
 3055         case 12 ... 13:
 3056                 return 4;
 3057         case 14:
 3058                 return 5;
 3059         }
 3060 
 3061         return 0;
 3062 }
 3063 
 3064 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
 3065 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
 3066 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
 3067 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
 3068 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
 3069 
 3070 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
 3071 {
 3072         switch (ch) {
 3073         case 1 ... 2:
 3074                 return 0;
 3075         case 3 ... 5:
 3076                 return 1;
 3077         case 6 ... 8:
 3078                 return 2;
 3079         case 9 ... 11:
 3080                 return 3;
 3081         case 12 ... 14:
 3082                 return 4;
 3083         case 36 ... 40:
 3084                 return 5;
 3085         case 41 ... 43:
 3086                 return TSSI_EXTRA_GROUP(5);
 3087         case 44 ... 48:
 3088                 return 6;
 3089         case 49 ... 51:
 3090                 return TSSI_EXTRA_GROUP(6);
 3091         case 52 ... 56:
 3092                 return 7;
 3093         case 57 ... 59:
 3094                 return TSSI_EXTRA_GROUP(7);
 3095         case 60 ... 64:
 3096                 return 8;
 3097         case 100 ... 104:
 3098                 return 9;
 3099         case 105 ... 107:
 3100                 return TSSI_EXTRA_GROUP(9);
 3101         case 108 ... 112:
 3102                 return 10;
 3103         case 113 ... 115:
 3104                 return TSSI_EXTRA_GROUP(10);
 3105         case 116 ... 120:
 3106                 return 11;
 3107         case 121 ... 123:
 3108                 return TSSI_EXTRA_GROUP(11);
 3109         case 124 ... 128:
 3110                 return 12;
 3111         case 129 ... 131:
 3112                 return TSSI_EXTRA_GROUP(12);
 3113         case 132 ... 136:
 3114                 return 13;
 3115         case 137 ... 139:
 3116                 return TSSI_EXTRA_GROUP(13);
 3117         case 140 ... 144:
 3118                 return 14;
 3119         case 149 ... 153:
 3120                 return 15;
 3121         case 154 ... 156:
 3122                 return TSSI_EXTRA_GROUP(15);
 3123         case 157 ... 161:
 3124                 return 16;
 3125         case 162 ... 164:
 3126                 return TSSI_EXTRA_GROUP(16);
 3127         case 165 ... 169:
 3128                 return 17;
 3129         case 170 ... 172:
 3130                 return TSSI_EXTRA_GROUP(17);
 3131         case 173 ... 177:
 3132                 return 18;
 3133         }
 3134 
 3135         return 0;
 3136 }
 3137 
 3138 static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
 3139 {
 3140         switch (ch) {
 3141         case 1 ... 5:
 3142                 return 0;
 3143         case 6 ... 8:
 3144                 return TSSI_EXTRA_GROUP(0);
 3145         case 9 ... 13:
 3146                 return 1;
 3147         case 14 ... 16:
 3148                 return TSSI_EXTRA_GROUP(1);
 3149         case 17 ... 21:
 3150                 return 2;
 3151         case 22 ... 24:
 3152                 return TSSI_EXTRA_GROUP(2);
 3153         case 25 ... 29:
 3154                 return 3;
 3155         case 33 ... 37:
 3156                 return 4;
 3157         case 38 ... 40:
 3158                 return TSSI_EXTRA_GROUP(4);
 3159         case 41 ... 45:
 3160                 return 5;
 3161         case 46 ... 48:
 3162                 return TSSI_EXTRA_GROUP(5);
 3163         case 49 ... 53:
 3164                 return 6;
 3165         case 54 ... 56:
 3166                 return TSSI_EXTRA_GROUP(6);
 3167         case 57 ... 61:
 3168                 return 7;
 3169         case 65 ... 69:
 3170                 return 8;
 3171         case 70 ... 72:
 3172                 return TSSI_EXTRA_GROUP(8);
 3173         case 73 ... 77:
 3174                 return 9;
 3175         case 78 ... 80:
 3176                 return TSSI_EXTRA_GROUP(9);
 3177         case 81 ... 85:
 3178                 return 10;
 3179         case 86 ... 88:
 3180                 return TSSI_EXTRA_GROUP(10);
 3181         case 89 ... 93:
 3182                 return 11;
 3183         case 97 ... 101:
 3184                 return 12;
 3185         case 102 ... 104:
 3186                 return TSSI_EXTRA_GROUP(12);
 3187         case 105 ... 109:
 3188                 return 13;
 3189         case 110 ... 112:
 3190                 return TSSI_EXTRA_GROUP(13);
 3191         case 113 ... 117:
 3192                 return 14;
 3193         case 118 ... 120:
 3194                 return TSSI_EXTRA_GROUP(14);
 3195         case 121 ... 125:
 3196                 return 15;
 3197         case 129 ... 133:
 3198                 return 16;
 3199         case 134 ... 136:
 3200                 return TSSI_EXTRA_GROUP(16);
 3201         case 137 ... 141:
 3202                 return 17;
 3203         case 142 ... 144:
 3204                 return TSSI_EXTRA_GROUP(17);
 3205         case 145 ... 149:
 3206                 return 18;
 3207         case 150 ... 152:
 3208                 return TSSI_EXTRA_GROUP(18);
 3209         case 153 ... 157:
 3210                 return 19;
 3211         case 161 ... 165:
 3212                 return 20;
 3213         case 166 ... 168:
 3214                 return TSSI_EXTRA_GROUP(20);
 3215         case 169 ... 173:
 3216                 return 21;
 3217         case 174 ... 176:
 3218                 return TSSI_EXTRA_GROUP(21);
 3219         case 177 ... 181:
 3220                 return 22;
 3221         case 182 ... 184:
 3222                 return TSSI_EXTRA_GROUP(22);
 3223         case 185 ... 189:
 3224                 return 23;
 3225         case 193 ... 197:
 3226                 return 24;
 3227         case 198 ... 200:
 3228                 return TSSI_EXTRA_GROUP(24);
 3229         case 201 ... 205:
 3230                 return 25;
 3231         case 206 ... 208:
 3232                 return TSSI_EXTRA_GROUP(25);
 3233         case 209 ... 213:
 3234                 return 26;
 3235         case 214 ... 216:
 3236                 return TSSI_EXTRA_GROUP(26);
 3237         case 217 ... 221:
 3238                 return 27;
 3239         case 225 ... 229:
 3240                 return 28;
 3241         case 230 ... 232:
 3242                 return TSSI_EXTRA_GROUP(28);
 3243         case 233 ... 237:
 3244                 return 29;
 3245         case 238 ... 240:
 3246                 return TSSI_EXTRA_GROUP(29);
 3247         case 241 ... 245:
 3248                 return 30;
 3249         case 246 ... 248:
 3250                 return TSSI_EXTRA_GROUP(30);
 3251         case 249 ... 253:
 3252                 return 31;
 3253         }
 3254 
 3255         return 0;
 3256 }
 3257 
 3258 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
 3259 {
 3260         switch (ch) {
 3261         case 1 ... 8:
 3262                 return 0;
 3263         case 9 ... 14:
 3264                 return 1;
 3265         case 36 ... 48:
 3266                 return 2;
 3267         case 49 ... 51:
 3268                 return TSSI_EXTRA_GROUP(2);
 3269         case 52 ... 64:
 3270                 return 3;
 3271         case 100 ... 112:
 3272                 return 4;
 3273         case 113 ... 115:
 3274                 return TSSI_EXTRA_GROUP(4);
 3275         case 116 ... 128:
 3276                 return 5;
 3277         case 132 ... 144:
 3278                 return 6;
 3279         case 149 ... 177:
 3280                 return 7;
 3281         }
 3282 
 3283         return 0;
 3284 }
 3285 
 3286 static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
 3287 {
 3288         switch (ch) {
 3289         case 1 ... 13:
 3290                 return 0;
 3291         case 14 ... 16:
 3292                 return TSSI_EXTRA_GROUP(0);
 3293         case 17 ... 29:
 3294                 return 1;
 3295         case 33 ... 45:
 3296                 return 2;
 3297         case 46 ... 48:
 3298                 return TSSI_EXTRA_GROUP(2);
 3299         case 49 ... 61:
 3300                 return 3;
 3301         case 65 ... 77:
 3302                 return 4;
 3303         case 78 ... 80:
 3304                 return TSSI_EXTRA_GROUP(4);
 3305         case 81 ... 93:
 3306                 return 5;
 3307         case 97 ... 109:
 3308                 return 6;
 3309         case 110 ... 112:
 3310                 return TSSI_EXTRA_GROUP(6);
 3311         case 113 ... 125:
 3312                 return 7;
 3313         case 129 ... 141:
 3314                 return 8;
 3315         case 142 ... 144:
 3316                 return TSSI_EXTRA_GROUP(8);
 3317         case 145 ... 157:
 3318                 return 9;
 3319         case 161 ... 173:
 3320                 return 10;
 3321         case 174 ... 176:
 3322                 return TSSI_EXTRA_GROUP(10);
 3323         case 177 ... 189:
 3324                 return 11;
 3325         case 193 ... 205:
 3326                 return 12;
 3327         case 206 ... 208:
 3328                 return TSSI_EXTRA_GROUP(12);
 3329         case 209 ... 221:
 3330                 return 13;
 3331         case 225 ... 237:
 3332                 return 14;
 3333         case 238 ... 240:
 3334                 return TSSI_EXTRA_GROUP(14);
 3335         case 241 ... 253:
 3336                 return 15;
 3337         }
 3338 
 3339         return 0;
 3340 }
 3341 
 3342 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 3343                             enum rtw89_rf_path path)
 3344 {
 3345         struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
 3346         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 3347         enum rtw89_band band = chan->band_type;
 3348         u8 ch = chan->channel;
 3349         u32 gidx, gidx_1st, gidx_2nd;
 3350         s8 de_1st;
 3351         s8 de_2nd;
 3352         s8 val;
 3353 
 3354         if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
 3355                 gidx = _tssi_get_ofdm_group(rtwdev, ch);
 3356 
 3357                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3358                             "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
 3359                             path, gidx);
 3360 
 3361                 if (IS_TSSI_EXTRA_GROUP(gidx)) {
 3362                         gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
 3363                         gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
 3364                         de_1st = tssi_info->tssi_mcs[path][gidx_1st];
 3365                         de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
 3366                         val = (de_1st + de_2nd) / 2;
 3367 
 3368                         rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3369                                     "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
 3370                                     path, val, de_1st, de_2nd);
 3371                 } else {
 3372                         val = tssi_info->tssi_mcs[path][gidx];
 3373 
 3374                         rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3375                                     "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
 3376                 }
 3377         } else {
 3378                 gidx = _tssi_get_6g_ofdm_group(rtwdev, ch);
 3379 
 3380                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3381                             "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
 3382                             path, gidx);
 3383 
 3384                 if (IS_TSSI_EXTRA_GROUP(gidx)) {
 3385                         gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
 3386                         gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
 3387                         de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
 3388                         de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
 3389                         val = (de_1st + de_2nd) / 2;
 3390 
 3391                         rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3392                                     "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
 3393                                     path, val, de_1st, de_2nd);
 3394                 } else {
 3395                         val = tssi_info->tssi_6g_mcs[path][gidx];
 3396 
 3397                         rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3398                                     "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
 3399                 }
 3400         }
 3401 
 3402         return val;
 3403 }
 3404 
 3405 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
 3406                                  enum rtw89_phy_idx phy,
 3407                                  enum rtw89_rf_path path)
 3408 {
 3409         struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
 3410         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 3411         enum rtw89_band band = chan->band_type;
 3412         u8 ch = chan->channel;
 3413         u32 tgidx, tgidx_1st, tgidx_2nd;
 3414         s8 tde_1st = 0;
 3415         s8 tde_2nd = 0;
 3416         s8 val;
 3417 
 3418         if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
 3419                 tgidx = _tssi_get_trim_group(rtwdev, ch);
 3420 
 3421                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3422                             "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
 3423                             path, tgidx);
 3424 
 3425                 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
 3426                         tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
 3427                         tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
 3428                         tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
 3429                         tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
 3430                         val = (tde_1st + tde_2nd) / 2;
 3431 
 3432                         rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3433                                     "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
 3434                                     path, val, tde_1st, tde_2nd);
 3435                 } else {
 3436                         val = tssi_info->tssi_trim[path][tgidx];
 3437 
 3438                         rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3439                                     "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
 3440                                     path, val);
 3441                 }
 3442         } else {
 3443                 tgidx = _tssi_get_6g_trim_group(rtwdev, ch);
 3444 
 3445                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3446                             "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
 3447                             path, tgidx);
 3448 
 3449                 if (IS_TSSI_EXTRA_GROUP(tgidx)) {
 3450                         tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
 3451                         tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
 3452                         tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
 3453                         tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
 3454                         val = (tde_1st + tde_2nd) / 2;
 3455 
 3456                         rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3457                                     "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
 3458                                     path, val, tde_1st, tde_2nd);
 3459                 } else {
 3460                         val = tssi_info->tssi_trim_6g[path][tgidx];
 3461 
 3462                         rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3463                                     "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
 3464                                     path, val);
 3465                 }
 3466         }
 3467 
 3468         return val;
 3469 }
 3470 
 3471 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
 3472                                   enum rtw89_phy_idx phy)
 3473 {
 3474         struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
 3475         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 3476         u8 ch = chan->channel;
 3477         u8 gidx;
 3478         s8 ofdm_de;
 3479         s8 trim_de;
 3480         s32 val;
 3481         u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
 3482 
 3483         rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
 3484                     phy, ch);
 3485 
 3486         if (rtwdev->dbcc_en) {
 3487                 if (phy == RTW89_PHY_0) {
 3488                         path = RF_PATH_A;
 3489                         path_max = RF_PATH_B;
 3490                 } else if (phy == RTW89_PHY_1) {
 3491                         path = RF_PATH_B;
 3492                         path_max = RF_PATH_NUM_8852C;
 3493                 }
 3494         }
 3495 
 3496         for (i = path; i < path_max; i++) {
 3497                 gidx = _tssi_get_cck_group(rtwdev, ch);
 3498                 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
 3499                 val = tssi_info->tssi_cck[i][gidx] + trim_de;
 3500 
 3501                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3502                             "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
 3503                             i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
 3504 
 3505                 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
 3506                 rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
 3507 
 3508                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3509                             "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
 3510                             _tssi_de_cck_long[i],
 3511                             rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
 3512                                                   _TSSI_DE_MASK));
 3513 
 3514                 ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
 3515                 trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
 3516                 val = ofdm_de + trim_de;
 3517 
 3518                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3519                             "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
 3520                             i, ofdm_de, trim_de);
 3521 
 3522                 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
 3523                 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
 3524                 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
 3525                 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
 3526                 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
 3527                 rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
 3528 
 3529                 rtw89_debug(rtwdev, RTW89_DBG_TSSI,
 3530                             "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
 3531                             _tssi_de_mcs_20m[i],
 3532                             rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
 3533                                                   _TSSI_DE_MASK));
 3534         }
 3535 }
 3536 
 3537 static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
 3538                                   enum rtw89_rf_path path)
 3539 {
 3540         static const u32 tssi_trk[2] = {0x5818, 0x7818};
 3541         static const u32 tssi_en[2] = {0x5820, 0x7820};
 3542 
 3543         if (en) {
 3544                 rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
 3545                 rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
 3546                 if (rtwdev->dbcc_en && path == RF_PATH_B)
 3547                         _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
 3548                 else
 3549                         _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
 3550         } else {
 3551                 rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
 3552                 rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
 3553         }
 3554 }
 3555 
 3556 void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
 3557 {
 3558         if (!rtwdev->dbcc_en) {
 3559                 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
 3560                 rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
 3561         } else {
 3562                 if (phy_idx == RTW89_PHY_0)
 3563                         rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
 3564                 else
 3565                         rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
 3566         }
 3567 }
 3568 
 3569 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
 3570                         enum rtw89_bandwidth bw, bool is_dav)
 3571 {
 3572         u32 rf_reg18;
 3573         u32 reg_reg18_addr;
 3574 
 3575         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
 3576         if (is_dav)
 3577                 reg_reg18_addr = RR_CFGCH;
 3578         else
 3579                 reg_reg18_addr = RR_CFGCH_V1;
 3580 
 3581         rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
 3582         rf_reg18 &= ~RR_CFGCH_BW;
 3583 
 3584         switch (bw) {
 3585         case RTW89_CHANNEL_WIDTH_5:
 3586         case RTW89_CHANNEL_WIDTH_10:
 3587         case RTW89_CHANNEL_WIDTH_20:
 3588                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
 3589                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
 3590                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
 3591                 break;
 3592         case RTW89_CHANNEL_WIDTH_40:
 3593                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
 3594                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
 3595                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
 3596                 break;
 3597         case RTW89_CHANNEL_WIDTH_80:
 3598                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
 3599                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2);
 3600                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd);
 3601                 break;
 3602         case RTW89_CHANNEL_WIDTH_160:
 3603                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M);
 3604                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
 3605                 rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
 3606                 break;
 3607         default:
 3608                 break;
 3609         }
 3610 
 3611         rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
 3612 }
 3613 
 3614 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 3615                      enum rtw89_bandwidth bw)
 3616 {
 3617         bool is_dav;
 3618         u8 kpath, path;
 3619         u32 tmp = 0;
 3620 
 3621         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
 3622         kpath = _kpath(rtwdev, phy);
 3623 
 3624         for (path = 0; path < 2; path++) {
 3625                 if (!(kpath & BIT(path)))
 3626                         continue;
 3627 
 3628                 is_dav = true;
 3629                 _bw_setting(rtwdev, path, bw, is_dav);
 3630                 is_dav = false;
 3631                 _bw_setting(rtwdev, path, bw, is_dav);
 3632                 if (rtwdev->dbcc_en)
 3633                         continue;
 3634 
 3635                 if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) {
 3636                         rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
 3637                         tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
 3638                         rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3);
 3639                         rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp);
 3640                         fsleep(100);
 3641                         rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
 3642                 }
 3643         }
 3644 }
 3645 
 3646 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
 3647                         u8 central_ch, enum rtw89_band band, bool is_dav)
 3648 {
 3649         u32 rf_reg18;
 3650         u32 reg_reg18_addr;
 3651 
 3652         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
 3653         if (is_dav)
 3654                 reg_reg18_addr = 0x18;
 3655         else
 3656                 reg_reg18_addr = 0x10018;
 3657 
 3658         rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
 3659         rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH);
 3660         rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
 3661 
 3662         switch (band) {
 3663         case RTW89_BAND_2G:
 3664                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G);
 3665                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G);
 3666                 break;
 3667         case RTW89_BAND_5G:
 3668                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G);
 3669                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
 3670                 break;
 3671         case RTW89_BAND_6G:
 3672                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G);
 3673                 rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G);
 3674                 break;
 3675         default:
 3676                 break;
 3677         }
 3678         rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
 3679         fsleep(100);
 3680 }
 3681 
 3682 static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 3683                      u8 central_ch, enum rtw89_band band)
 3684 {
 3685         u8 kpath, path;
 3686 
 3687         rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
 3688         if (band != RTW89_BAND_6G) {
 3689                 if ((central_ch > 14 && central_ch < 36) ||
 3690                     (central_ch > 64 && central_ch < 100) ||
 3691                     (central_ch > 144 && central_ch < 149) || central_ch > 177)
 3692                         return;
 3693         } else {
 3694                 if (central_ch > 253 || central_ch  == 2)
 3695                         return;
 3696         }
 3697 
 3698         kpath = _kpath(rtwdev, phy);
 3699 
 3700         for (path = 0; path < 2; path++) {
 3701                 if (kpath & BIT(path)) {
 3702                         _ch_setting(rtwdev, path, central_ch, band, true);
 3703                         _ch_setting(rtwdev, path, central_ch, band, false);
 3704                 }
 3705         }
 3706 }
 3707 
 3708 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 3709                      enum rtw89_bandwidth bw)
 3710 {
 3711         u8 kpath;
 3712         u8 path;
 3713         u32 val;
 3714 
 3715         kpath = _kpath(rtwdev, phy);
 3716         for (path = 0; path < 2; path++) {
 3717                 if (!(kpath & BIT(path)))
 3718                         continue;
 3719 
 3720                 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
 3721                 rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa);
 3722                 switch (bw) {
 3723                 case RTW89_CHANNEL_WIDTH_20:
 3724                         val = 0x1b;
 3725                         break;
 3726                 case RTW89_CHANNEL_WIDTH_40:
 3727                         val = 0x13;
 3728                         break;
 3729                 case RTW89_CHANNEL_WIDTH_80:
 3730                         val = 0xb;
 3731                         break;
 3732                 case RTW89_CHANNEL_WIDTH_160:
 3733                 default:
 3734                         val = 0x3;
 3735                         break;
 3736                 }
 3737                 rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val);
 3738                 rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
 3739         }
 3740 }
 3741 
 3742 static void _lck_keep_thermal(struct rtw89_dev *rtwdev)
 3743 {
 3744         struct rtw89_lck_info *lck = &rtwdev->lck;
 3745         int path;
 3746 
 3747         for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
 3748                 lck->thermal[path] =
 3749                         ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
 3750                 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 3751                             "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]);
 3752         }
 3753 }
 3754 
 3755 static void _lck(struct rtw89_dev *rtwdev)
 3756 {
 3757         u32 tmp18[2];
 3758         int path = rtwdev->dbcc_en ? 2 : 1;
 3759         int i;
 3760 
 3761         rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n");
 3762 
 3763         tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
 3764         tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
 3765 
 3766         for (i = 0; i < path; i++) {
 3767                 rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
 3768                 rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]);
 3769                 rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
 3770         }
 3771 
 3772         _lck_keep_thermal(rtwdev);
 3773 }
 3774 
 3775 #define RTW8852C_LCK_TH 8
 3776 
 3777 void rtw8852c_lck_track(struct rtw89_dev *rtwdev)
 3778 {
 3779         struct rtw89_lck_info *lck = &rtwdev->lck;
 3780         u8 cur_thermal;
 3781         int delta;
 3782         int path;
 3783 
 3784         for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
 3785                 cur_thermal =
 3786                         ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
 3787                 delta = abs((int)cur_thermal - lck->thermal[path]);
 3788 
 3789                 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 3790                             "[LCK] path=%d current thermal=0x%x delta=0x%x\n",
 3791                             path, cur_thermal, delta);
 3792 
 3793                 if (delta >= RTW8852C_LCK_TH) {
 3794                         _lck(rtwdev);
 3795                         return;
 3796                 }
 3797         }
 3798 }
 3799 
 3800 void rtw8852c_lck_init(struct rtw89_dev *rtwdev)
 3801 {
 3802         _lck_keep_thermal(rtwdev);
 3803 }
 3804 
 3805 static
 3806 void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
 3807                          u8 central_ch, enum rtw89_band band,
 3808                          enum rtw89_bandwidth bw)
 3809 {
 3810         _ctrl_ch(rtwdev, phy, central_ch, band);
 3811         _ctrl_bw(rtwdev, phy, bw);
 3812         _rxbb_bw(rtwdev, phy, bw);
 3813 }
 3814 
 3815 void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
 3816                              const struct rtw89_chan *chan,
 3817                              enum rtw89_phy_idx phy_idx)
 3818 {
 3819         rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, chan->channel,
 3820                             chan->band_type,
 3821                             chan->band_width);
 3822 }
 3823 
 3824 void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
 3825 {
 3826         const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
 3827         struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
 3828         u8 idx = mcc_info->table_idx;
 3829         int i;
 3830 
 3831         for (i = 0; i < RTW89_IQK_CHS_NR; i++) {
 3832                 if (mcc_info->ch[idx] == 0)
 3833                         break;
 3834                 if (++idx >= RTW89_IQK_CHS_NR)
 3835                         idx = 0;
 3836         }
 3837 
 3838         mcc_info->table_idx = idx;
 3839         mcc_info->ch[idx] = chan->channel;
 3840         mcc_info->band[idx] = chan->band_type;
 3841 }
 3842 
 3843 void rtw8852c_rck(struct rtw89_dev *rtwdev)
 3844 {
 3845         u8 path;
 3846 
 3847         for (path = 0; path < 2; path++)
 3848                 _rck(rtwdev, path);
 3849 }
 3850 
 3851 void rtw8852c_dack(struct rtw89_dev *rtwdev)
 3852 {
 3853         u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
 3854 
 3855         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
 3856         _dac_cal(rtwdev, false);
 3857         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
 3858 }
 3859 
 3860 void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
 3861 {
 3862         u32 tx_en;
 3863         u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
 3864 
 3865         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
 3866         rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
 3867         _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
 3868 
 3869         _iqk_init(rtwdev);
 3870         _iqk(rtwdev, phy_idx, false);
 3871 
 3872         rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
 3873         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
 3874 }
 3875 
 3876 #define RXDCK_VER_8852C 0xe
 3877 
 3878 void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
 3879 {
 3880         struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
 3881         u8 path, kpath;
 3882         u32 rf_reg5;
 3883 
 3884         kpath = _kpath(rtwdev, phy);
 3885         rtw89_debug(rtwdev, RTW89_DBG_RFK,
 3886                     "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
 3887                     RXDCK_VER_8852C, rtwdev->hal.cv);
 3888 
 3889         for (path = 0; path < 2; path++) {
 3890                 rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
 3891                 if (!(kpath & BIT(path)))
 3892                         continue;
 3893 
 3894                 if (rtwdev->is_tssi_mode[path])
 3895                         rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
 3896                                                B_P0_TSSI_TRK_EN, 0x1);
 3897                 rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
 3898                 rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
 3899                 _set_rx_dck(rtwdev, phy, path, is_afe);
 3900                 rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
 3901                 rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
 3902 
 3903                 if (rtwdev->is_tssi_mode[path])
 3904                         rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
 3905                                                B_P0_TSSI_TRK_EN, 0x0);
 3906         }
 3907 }
 3908 
 3909 #define RTW8852C_RX_DCK_TH 8
 3910 
 3911 void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
 3912 {
 3913         struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
 3914         u8 cur_thermal;
 3915         int delta;
 3916         int path;
 3917 
 3918         for (path = 0; path < RF_PATH_NUM_8852C; path++) {
 3919                 cur_thermal =
 3920                         ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
 3921                 delta = abs((int)cur_thermal - rx_dck->thermal[path]);
 3922 
 3923                 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
 3924                             "[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n",
 3925                             path, cur_thermal, delta);
 3926 
 3927                 if (delta >= RTW8852C_RX_DCK_TH) {
 3928                         rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
 3929                         return;
 3930                 }
 3931         }
 3932 }
 3933 
 3934 void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
 3935 {
 3936         u32 tx_en;
 3937         u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
 3938 
 3939         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
 3940         rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
 3941         _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
 3942 
 3943         rtwdev->dpk.is_dpk_enable = true;
 3944         rtwdev->dpk.is_dpk_reload_en = false;
 3945         _dpk(rtwdev, phy_idx, false);
 3946 
 3947         rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
 3948         rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
 3949 }
 3950 
 3951 void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
 3952 {
 3953         _dpk_track(rtwdev);
 3954 }
 3955 
 3956 void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
 3957 {
 3958         u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
 3959 
 3960         rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
 3961 
 3962         if (rtwdev->dbcc_en) {
 3963                 if (phy == RTW89_PHY_0) {
 3964                         path = RF_PATH_A;
 3965                         path_max = RF_PATH_B;
 3966                 } else if (phy == RTW89_PHY_1) {
 3967                         path = RF_PATH_B;
 3968                         path_max = RF_PATH_NUM_8852C;
 3969                 }
 3970         }
 3971 
 3972         _tssi_disable(rtwdev, phy);
 3973 
 3974         for (i = path; i < path_max; i++) {
 3975                 _tssi_set_sys(rtwdev, phy, i);
 3976                 _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
 3977                 _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
 3978                 _tssi_set_dck(rtwdev, phy, i);
 3979                 _tssi_set_bbgain_split(rtwdev, phy, i);
 3980                 _tssi_set_tmeter_tbl(rtwdev, phy, i);
 3981                 _tssi_slope_cal_org(rtwdev, phy, i);
 3982                 _tssi_set_aligk_default(rtwdev, phy, i);
 3983                 _tssi_set_slope(rtwdev, phy, i);
 3984                 _tssi_run_slope(rtwdev, phy, i);
 3985         }
 3986 
 3987         _tssi_enable(rtwdev, phy);
 3988         _tssi_set_efuse_to_de(rtwdev, phy);
 3989 }
 3990 
 3991 void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
 3992 {
 3993         u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
 3994 
 3995         rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
 3996                     __func__, phy);
 3997 
 3998         if (!rtwdev->is_tssi_mode[RF_PATH_A])
 3999                 return;
 4000         if (!rtwdev->is_tssi_mode[RF_PATH_B])
 4001                 return;
 4002 
 4003         if (rtwdev->dbcc_en) {
 4004                 if (phy == RTW89_PHY_0) {
 4005                         path = RF_PATH_A;
 4006                         path_max = RF_PATH_B;
 4007                 } else if (phy == RTW89_PHY_1) {
 4008                         path = RF_PATH_B;
 4009                         path_max = RF_PATH_NUM_8852C;
 4010                 }
 4011         }
 4012 
 4013         _tssi_disable(rtwdev, phy);
 4014 
 4015         for (i = path; i < path_max; i++) {
 4016                 _tssi_set_sys(rtwdev, phy, i);
 4017                 _tssi_set_dck(rtwdev, phy, i);
 4018                 _tssi_set_tmeter_tbl(rtwdev, phy, i);
 4019                 _tssi_slope_cal_org(rtwdev, phy, i);
 4020                 _tssi_set_aligk_default(rtwdev, phy, i);
 4021         }
 4022 
 4023         _tssi_enable(rtwdev, phy);
 4024         _tssi_set_efuse_to_de(rtwdev, phy);
 4025 }
 4026 
 4027 static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
 4028                                         enum rtw89_phy_idx phy, bool enable)
 4029 {
 4030         struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
 4031         u8 i;
 4032 
 4033         if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
 4034                 return;
 4035 
 4036         if (enable) {
 4037                 /* SCAN_START */
 4038                 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
 4039                     rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
 4040                         for (i = 0; i < 6; i++) {
 4041                                 tssi_info->default_txagc_offset[RF_PATH_A] =
 4042                                         rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
 4043                                                               B_TXAGC_BB);
 4044                                 if (tssi_info->default_txagc_offset[RF_PATH_A])
 4045                                         break;
 4046                         }
 4047                 }
 4048 
 4049                 if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
 4050                     rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
 4051                         for (i = 0; i < 6; i++) {
 4052                                 tssi_info->default_txagc_offset[RF_PATH_B] =
 4053                                         rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
 4054                                                               B_TXAGC_BB_S1);
 4055                                 if (tssi_info->default_txagc_offset[RF_PATH_B])
 4056                                         break;
 4057                         }
 4058                 }
 4059         } else {
 4060                 /* SCAN_END */
 4061                 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
 4062                                        tssi_info->default_txagc_offset[RF_PATH_A]);
 4063                 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
 4064                                        tssi_info->default_txagc_offset[RF_PATH_B]);
 4065 
 4066                 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
 4067                 rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
 4068 
 4069                 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
 4070                 rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
 4071         }
 4072 }
 4073 
 4074 void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev,
 4075                                bool scan_start, enum rtw89_phy_idx phy_idx)
 4076 {
 4077         if (scan_start)
 4078                 rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true);
 4079         else
 4080                 rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false);
 4081 }

Cache object: 2c7c37a56839db30e37041d2134a0dff


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.