The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/dev/rtw88/fw.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
    2 /* Copyright(c) 2018-2019  Realtek Corporation
    3  */
    4 
    5 #include <linux/iopoll.h>
    6 
    7 #include "main.h"
    8 #include "coex.h"
    9 #include "fw.h"
   10 #include "tx.h"
   11 #include "reg.h"
   12 #include "sec.h"
   13 #include "debug.h"
   14 #include "util.h"
   15 #include "wow.h"
   16 #include "ps.h"
   17 
   18 static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
   19                                       struct sk_buff *skb)
   20 {
   21         struct rtw_c2h_cmd *c2h;
   22         u8 sub_cmd_id;
   23 
   24         c2h = get_c2h_from_skb(skb);
   25         sub_cmd_id = c2h->payload[0];
   26 
   27         switch (sub_cmd_id) {
   28         case C2H_CCX_RPT:
   29                 rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
   30                 break;
   31         case C2H_SCAN_STATUS_RPT:
   32                 rtw_hw_scan_status_report(rtwdev, skb);
   33                 break;
   34         case C2H_CHAN_SWITCH:
   35                 rtw_hw_scan_chan_switch(rtwdev, skb);
   36                 break;
   37         default:
   38                 break;
   39         }
   40 }
   41 
   42 static u16 get_max_amsdu_len(u32 bit_rate)
   43 {
   44         /* lower than ofdm, do not aggregate */
   45         if (bit_rate < 550)
   46                 return 1;
   47 
   48         /* lower than 20M 2ss mcs8, make it small */
   49         if (bit_rate < 1800)
   50                 return 1200;
   51 
   52         /* lower than 40M 2ss mcs9, make it medium */
   53         if (bit_rate < 4000)
   54                 return 2600;
   55 
   56         /* not yet 80M 2ss mcs8/9, make it twice regular packet size */
   57         if (bit_rate < 7000)
   58                 return 3500;
   59 
   60         /* unlimited */
   61         return 0;
   62 }
   63 
   64 struct rtw_fw_iter_ra_data {
   65         struct rtw_dev *rtwdev;
   66         u8 *payload;
   67 };
   68 
   69 static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
   70 {
   71         struct rtw_fw_iter_ra_data *ra_data = data;
   72         struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
   73         u8 mac_id, rate, sgi, bw;
   74         u8 mcs, nss;
   75         u32 bit_rate;
   76 
   77         mac_id = GET_RA_REPORT_MACID(ra_data->payload);
   78         if (si->mac_id != mac_id)
   79                 return;
   80 
   81         si->ra_report.txrate.flags = 0;
   82 
   83         rate = GET_RA_REPORT_RATE(ra_data->payload);
   84         sgi = GET_RA_REPORT_SGI(ra_data->payload);
   85         bw = GET_RA_REPORT_BW(ra_data->payload);
   86 
   87         if (rate < DESC_RATEMCS0) {
   88                 si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
   89                 goto legacy;
   90         }
   91 
   92         rtw_desc_to_mcsrate(rate, &mcs, &nss);
   93         if (rate >= DESC_RATEVHT1SS_MCS0)
   94                 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
   95         else if (rate >= DESC_RATEMCS0)
   96                 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
   97 
   98         if (rate >= DESC_RATEMCS0) {
   99                 si->ra_report.txrate.mcs = mcs;
  100                 si->ra_report.txrate.nss = nss;
  101         }
  102 
  103         if (sgi)
  104                 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
  105 
  106         if (bw == RTW_CHANNEL_WIDTH_80)
  107                 si->ra_report.txrate.bw = RATE_INFO_BW_80;
  108         else if (bw == RTW_CHANNEL_WIDTH_40)
  109                 si->ra_report.txrate.bw = RATE_INFO_BW_40;
  110         else
  111                 si->ra_report.txrate.bw = RATE_INFO_BW_20;
  112 
  113 legacy:
  114         bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
  115 
  116         si->ra_report.desc_rate = rate;
  117         si->ra_report.bit_rate = bit_rate;
  118 
  119         sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
  120 }
  121 
  122 static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
  123                                     u8 length)
  124 {
  125         struct rtw_fw_iter_ra_data ra_data;
  126 
  127         if (WARN(length < 7, "invalid ra report c2h length\n"))
  128                 return;
  129 
  130         rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload);
  131         ra_data.rtwdev = rtwdev;
  132         ra_data.payload = payload;
  133         rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
  134 }
  135 
  136 struct rtw_beacon_filter_iter_data {
  137         struct rtw_dev *rtwdev;
  138         u8 *payload;
  139 };
  140 
  141 static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac,
  142                                               struct ieee80211_vif *vif)
  143 {
  144         struct rtw_beacon_filter_iter_data *iter_data = data;
  145         struct rtw_dev *rtwdev = iter_data->rtwdev;
  146         u8 *payload = iter_data->payload;
  147         u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload);
  148         u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload);
  149         s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload);
  150 
  151         switch (type) {
  152         case BCN_FILTER_NOTIFY_SIGNAL_CHANGE:
  153                 event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
  154                         NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
  155                 ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL);
  156                 break;
  157         case BCN_FILTER_CONNECTION_LOSS:
  158                 ieee80211_connection_loss(vif);
  159                 break;
  160         case BCN_FILTER_CONNECTED:
  161                 rtwdev->beacon_loss = false;
  162                 break;
  163         case BCN_FILTER_NOTIFY_BEACON_LOSS:
  164                 rtwdev->beacon_loss = true;
  165                 rtw_leave_lps(rtwdev);
  166                 break;
  167         }
  168 }
  169 
  170 static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload,
  171                                      u8 length)
  172 {
  173         struct rtw_beacon_filter_iter_data dev_iter_data;
  174 
  175         dev_iter_data.rtwdev = rtwdev;
  176         dev_iter_data.payload = payload;
  177         rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter,
  178                          &dev_iter_data);
  179 }
  180 
  181 static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
  182                                u8 length)
  183 {
  184         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
  185 
  186         dm_info->scan_density = payload[0];
  187 
  188         rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n",
  189                 dm_info->scan_density);
  190 }
  191 
  192 static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload,
  193                                      u8 length)
  194 {
  195         struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
  196         struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload;
  197 
  198         rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
  199                 "Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n",
  200                 result->density, result->igi, result->l2h_th_init, result->l2h,
  201                 result->h2l, result->option);
  202 
  203         rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n",
  204                 rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
  205                                 edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask),
  206                 rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
  207                                 edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask));
  208 
  209         rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n",
  210                 rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ?
  211                 "Set" : "Unset");
  212 }
  213 
  214 void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
  215 {
  216         struct rtw_c2h_cmd *c2h;
  217         u32 pkt_offset;
  218         u8 len;
  219 
  220         pkt_offset = *((u32 *)skb->cb);
  221         c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
  222         len = skb->len - pkt_offset - 2;
  223 
  224         mutex_lock(&rtwdev->mutex);
  225 
  226         if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
  227                 goto unlock;
  228 
  229         switch (c2h->id) {
  230         case C2H_CCX_TX_RPT:
  231                 rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT);
  232                 break;
  233         case C2H_BT_INFO:
  234                 rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
  235                 break;
  236         case C2H_BT_HID_INFO:
  237                 rtw_coex_bt_hid_info_notify(rtwdev, c2h->payload, len);
  238                 break;
  239         case C2H_WLAN_INFO:
  240                 rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
  241                 break;
  242         case C2H_BCN_FILTER_NOTIFY:
  243                 rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len);
  244                 break;
  245         case C2H_HALMAC:
  246                 rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
  247                 break;
  248         case C2H_RA_RPT:
  249                 rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
  250                 break;
  251         default:
  252                 rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
  253                 break;
  254         }
  255 
  256 unlock:
  257         mutex_unlock(&rtwdev->mutex);
  258 }
  259 
  260 void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
  261                                struct sk_buff *skb)
  262 {
  263         struct rtw_c2h_cmd *c2h;
  264         u8 len;
  265 
  266         c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
  267         len = skb->len - pkt_offset - 2;
  268         *((u32 *)skb->cb) = pkt_offset;
  269 
  270         rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
  271                 c2h->id, c2h->seq, len);
  272 
  273         switch (c2h->id) {
  274         case C2H_BT_MP_INFO:
  275                 rtw_coex_info_response(rtwdev, skb);
  276                 break;
  277         case C2H_WLAN_RFON:
  278                 complete(&rtwdev->lps_leave_check);
  279                 dev_kfree_skb_any(skb);
  280                 break;
  281         case C2H_SCAN_RESULT:
  282                 complete(&rtwdev->fw_scan_density);
  283                 rtw_fw_scan_result(rtwdev, c2h->payload, len);
  284                 dev_kfree_skb_any(skb);
  285                 break;
  286         case C2H_ADAPTIVITY:
  287                 rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
  288                 dev_kfree_skb_any(skb);
  289                 break;
  290         default:
  291                 /* pass offset for further operation */
  292                 *((u32 *)skb->cb) = pkt_offset;
  293                 skb_queue_tail(&rtwdev->c2h_queue, skb);
  294                 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
  295                 break;
  296         }
  297 }
  298 EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
  299 
  300 void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev)
  301 {
  302         if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER)
  303                 rtw_fw_recovery(rtwdev);
  304         else
  305                 rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n");
  306 }
  307 EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
  308 
  309 static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
  310                                     u8 *h2c)
  311 {
  312         u8 box;
  313         u8 box_state;
  314         u32 box_reg, box_ex_reg;
  315         int idx;
  316         int ret;
  317 
  318         rtw_dbg(rtwdev, RTW_DBG_FW,
  319                 "send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
  320                 h2c[3], h2c[2], h2c[1], h2c[0],
  321                 h2c[7], h2c[6], h2c[5], h2c[4]);
  322 
  323         spin_lock(&rtwdev->h2c.lock);
  324 
  325         box = rtwdev->h2c.last_box_num;
  326         switch (box) {
  327         case 0:
  328                 box_reg = REG_HMEBOX0;
  329                 box_ex_reg = REG_HMEBOX0_EX;
  330                 break;
  331         case 1:
  332                 box_reg = REG_HMEBOX1;
  333                 box_ex_reg = REG_HMEBOX1_EX;
  334                 break;
  335         case 2:
  336                 box_reg = REG_HMEBOX2;
  337                 box_ex_reg = REG_HMEBOX2_EX;
  338                 break;
  339         case 3:
  340                 box_reg = REG_HMEBOX3;
  341                 box_ex_reg = REG_HMEBOX3_EX;
  342                 break;
  343         default:
  344                 WARN(1, "invalid h2c mail box number\n");
  345                 goto out;
  346         }
  347 
  348         ret = read_poll_timeout_atomic(rtw_read8, box_state,
  349                                        !((box_state >> box) & 0x1), 100, 3000,
  350                                        false, rtwdev, REG_HMETFR);
  351 
  352         if (ret) {
  353                 rtw_err(rtwdev, "failed to send h2c command\n");
  354                 goto out;
  355         }
  356 
  357         for (idx = 0; idx < 4; idx++)
  358                 rtw_write8(rtwdev, box_reg + idx, h2c[idx]);
  359         for (idx = 0; idx < 4; idx++)
  360                 rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]);
  361 
  362         if (++rtwdev->h2c.last_box_num >= 4)
  363                 rtwdev->h2c.last_box_num = 0;
  364 
  365 out:
  366         spin_unlock(&rtwdev->h2c.lock);
  367 }
  368 
  369 void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c)
  370 {
  371         rtw_fw_send_h2c_command(rtwdev, h2c);
  372 }
  373 
  374 static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
  375 {
  376         int ret;
  377 
  378         spin_lock(&rtwdev->h2c.lock);
  379 
  380         FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
  381         ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
  382         if (ret)
  383                 rtw_err(rtwdev, "failed to send h2c packet\n");
  384         rtwdev->h2c.seq++;
  385 
  386         spin_unlock(&rtwdev->h2c.lock);
  387 }
  388 
  389 void
  390 rtw_fw_send_general_info(struct rtw_dev *rtwdev)
  391 {
  392         struct rtw_fifo_conf *fifo = &rtwdev->fifo;
  393         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  394         u16 total_size = H2C_PKT_HDR_SIZE + 4;
  395 
  396         if (rtw_chip_wcpu_11n(rtwdev))
  397                 return;
  398 
  399         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
  400 
  401         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
  402 
  403         GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
  404                                         fifo->rsvd_fw_txbuf_addr -
  405                                         fifo->rsvd_boundary);
  406 
  407         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
  408 }
  409 
  410 void
  411 rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
  412 {
  413         struct rtw_hal *hal = &rtwdev->hal;
  414         struct rtw_efuse *efuse = &rtwdev->efuse;
  415         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  416         u16 total_size = H2C_PKT_HDR_SIZE + 8;
  417         u8 fw_rf_type = 0;
  418 
  419         if (rtw_chip_wcpu_11n(rtwdev))
  420                 return;
  421 
  422         if (hal->rf_type == RF_1T1R)
  423                 fw_rf_type = FW_RF_1T1R;
  424         else if (hal->rf_type == RF_2T2R)
  425                 fw_rf_type = FW_RF_2T2R;
  426 
  427         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
  428 
  429         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
  430         PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
  431         PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
  432         PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
  433         PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
  434         PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
  435 
  436         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
  437 }
  438 
  439 void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
  440 {
  441         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  442         u16 total_size = H2C_PKT_HDR_SIZE + 1;
  443 
  444         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
  445         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
  446         IQK_SET_CLEAR(h2c_pkt, para->clear);
  447         IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
  448 
  449         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
  450 }
  451 EXPORT_SYMBOL(rtw_fw_do_iqk);
  452 
  453 void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start)
  454 {
  455         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  456 
  457         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION);
  458 
  459         RFK_SET_INFORM_START(h2c_pkt, start);
  460 
  461         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  462 }
  463 EXPORT_SYMBOL(rtw_fw_inform_rfk_status);
  464 
  465 void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
  466 {
  467         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  468 
  469         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
  470 
  471         SET_QUERY_BT_INFO(h2c_pkt, true);
  472 
  473         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  474 }
  475 
  476 void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
  477 {
  478         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  479 
  480         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
  481 
  482         SET_WL_CH_INFO_LINK(h2c_pkt, link);
  483         SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
  484         SET_WL_CH_INFO_BW(h2c_pkt, bw);
  485 
  486         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  487 }
  488 
  489 void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
  490                              struct rtw_coex_info_req *req)
  491 {
  492         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  493 
  494         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
  495 
  496         SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
  497         SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
  498         SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
  499         SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
  500         SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
  501 
  502         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  503 }
  504 
  505 void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
  506 {
  507         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  508         u8 index = 0 - bt_pwr_dec_lvl;
  509 
  510         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
  511 
  512         SET_BT_TX_POWER_INDEX(h2c_pkt, index);
  513 
  514         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  515 }
  516 
  517 void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
  518 {
  519         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  520 
  521         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
  522 
  523         SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
  524 
  525         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  526 }
  527 
  528 void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
  529                            u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
  530 {
  531         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  532 
  533         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
  534 
  535         SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
  536         SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
  537         SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
  538         SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
  539         SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
  540 
  541         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  542 }
  543 
  544 void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data)
  545 {
  546         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  547 
  548         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_HID_INFO);
  549 
  550         SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, sub_id);
  551         SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, data);
  552 
  553         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  554 }
  555 
  556 void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
  557 {
  558         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  559 
  560         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
  561 
  562         SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
  563 
  564         SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
  565         SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
  566         SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
  567         SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
  568         SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
  569 
  570         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  571 }
  572 
  573 void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
  574 {
  575         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  576         u8 rssi = ewma_rssi_read(&si->avg_rssi);
  577         bool stbc_en = si->stbc_en ? true : false;
  578 
  579         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
  580 
  581         SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
  582         SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
  583         SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
  584 
  585         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  586 }
  587 
  588 void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
  589                          bool reset_ra_mask)
  590 {
  591         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  592         bool disable_pt = true;
  593 
  594         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
  595 
  596         SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
  597         SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
  598         SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
  599         SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
  600         SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
  601         SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
  602         SET_RA_INFO_NO_UPDATE(h2c_pkt, !reset_ra_mask);
  603         SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
  604         SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
  605         SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
  606         SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
  607         SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
  608         SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
  609 
  610         si->init_ra_lv = 0;
  611 
  612         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  613 }
  614 
  615 void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
  616 {
  617         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  618 
  619         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
  620         MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
  621         MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
  622 
  623         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  624 }
  625 
  626 void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
  627 {
  628         struct rtw_traffic_stats *stats = &rtwdev->stats;
  629         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
  630         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  631 
  632         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO);
  633         SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput);
  634         SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput);
  635         SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate);
  636         SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate);
  637         SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]);
  638         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  639 }
  640 
  641 void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
  642                                  struct ieee80211_vif *vif)
  643 {
  644         struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
  645         struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
  646         static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
  647         struct rtw_sta_info *si =
  648                 sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
  649         s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
  650         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  651 
  652         if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
  653                 return;
  654 
  655         if (!connect) {
  656                 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
  657                 SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
  658                 rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  659 
  660                 return;
  661         }
  662 
  663         if (!si)
  664                 return;
  665 
  666         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
  667         ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
  668         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  669 
  670         memset(h2c_pkt, 0, sizeof(h2c_pkt));
  671         threshold = clamp_t(s32, threshold, rssi_min, rssi_max);
  672         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
  673         SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
  674         SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
  675                                                BCN_FILTER_OFFLOAD_MODE_DEFAULT);
  676         SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold);
  677         SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
  678         SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
  679         SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst);
  680         SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
  681         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  682 }
  683 
  684 void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
  685 {
  686         struct rtw_lps_conf *conf = &rtwdev->lps_conf;
  687         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  688 
  689         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
  690 
  691         SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
  692         SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
  693         SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
  694         SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
  695         SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
  696         SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
  697 
  698         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  699 }
  700 
  701 void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
  702 {
  703         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  704         struct rtw_fw_wow_keep_alive_para mode = {
  705                 .adopt = true,
  706                 .pkt_type = KEEP_ALIVE_NULL_PKT,
  707                 .period = 5,
  708         };
  709 
  710         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
  711         SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
  712         SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
  713         SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
  714         SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
  715 
  716         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  717 }
  718 
  719 void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
  720 {
  721         struct rtw_wow_param *rtw_wow = &rtwdev->wow;
  722         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  723         struct rtw_fw_wow_disconnect_para mode = {
  724                 .adopt = true,
  725                 .period = 30,
  726                 .retry_count = 5,
  727         };
  728 
  729         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
  730 
  731         if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
  732                 SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
  733                 SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
  734                 SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
  735                 SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
  736         }
  737 
  738         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  739 }
  740 
  741 void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
  742 {
  743         struct rtw_wow_param *rtw_wow = &rtwdev->wow;
  744         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  745 
  746         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
  747 
  748         SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
  749         if (rtw_wow_mgd_linked(rtwdev)) {
  750                 if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
  751                         SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
  752                 if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
  753                         SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
  754                 if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
  755                         SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
  756                 if (rtw_wow->pattern_cnt)
  757                         SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
  758         }
  759 
  760         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  761 }
  762 
  763 void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
  764                                      u8 pairwise_key_enc,
  765                                      u8 group_key_enc)
  766 {
  767         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  768 
  769         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
  770 
  771         SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
  772         SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
  773 
  774         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  775 }
  776 
  777 void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
  778 {
  779         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  780 
  781         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
  782 
  783         SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
  784 
  785         if (rtw_wow_no_link(rtwdev))
  786                 SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
  787 
  788         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  789 }
  790 
  791 static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
  792                                      enum rtw_rsvd_packet_type type)
  793 {
  794         struct rtw_rsvd_page *rsvd_pkt;
  795         u8 location = 0;
  796 
  797         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
  798                 if (type == rsvd_pkt->type)
  799                         location = rsvd_pkt->page;
  800         }
  801 
  802         return location;
  803 }
  804 
  805 void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
  806 {
  807         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  808         u8 loc_nlo;
  809 
  810         loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
  811 
  812         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
  813 
  814         SET_NLO_FUN_EN(h2c_pkt, enable);
  815         if (enable) {
  816                 if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
  817                         SET_NLO_PS_32K(h2c_pkt, enable);
  818                 SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
  819                 SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
  820         }
  821 
  822         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  823 }
  824 
  825 void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
  826 {
  827         struct rtw_lps_conf *conf = &rtwdev->lps_conf;
  828         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  829         u8 loc_pg, loc_dpk;
  830 
  831         loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
  832         loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
  833 
  834         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
  835 
  836         LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
  837         LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
  838         LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
  839         LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
  840 
  841         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  842 }
  843 
  844 static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
  845                                                struct cfg80211_ssid *ssid)
  846 {
  847         struct rtw_rsvd_page *rsvd_pkt;
  848         u8 location = 0;
  849 
  850         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
  851                 if (rsvd_pkt->type != RSVD_PROBE_REQ)
  852                         continue;
  853                 if ((!ssid && !rsvd_pkt->ssid) ||
  854                     rtw_ssid_equal(rsvd_pkt->ssid, ssid))
  855                         location = rsvd_pkt->page;
  856         }
  857 
  858         return location;
  859 }
  860 
  861 static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
  862                                             struct cfg80211_ssid *ssid)
  863 {
  864         struct rtw_rsvd_page *rsvd_pkt;
  865         u16 size = 0;
  866 
  867         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
  868                 if (rsvd_pkt->type != RSVD_PROBE_REQ)
  869                         continue;
  870                 if ((!ssid && !rsvd_pkt->ssid) ||
  871                     rtw_ssid_equal(rsvd_pkt->ssid, ssid))
  872                         size = rsvd_pkt->probe_req_size;
  873         }
  874 
  875         return size;
  876 }
  877 
  878 void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
  879 {
  880         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  881         u8 location = 0;
  882 
  883         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
  884 
  885         location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
  886         *(h2c_pkt + 1) = location;
  887         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
  888 
  889         location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
  890         *(h2c_pkt + 2) = location;
  891         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
  892 
  893         location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
  894         *(h2c_pkt + 3) = location;
  895         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
  896 
  897         location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
  898         *(h2c_pkt + 4) = location;
  899         rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
  900 
  901         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  902 }
  903 
  904 static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
  905 {
  906         struct rtw_dev *rtwdev = hw->priv;
  907         struct rtw_chip_info *chip = rtwdev->chip;
  908         struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
  909         struct rtw_nlo_info_hdr *nlo_hdr;
  910         struct cfg80211_ssid *ssid;
  911         struct sk_buff *skb;
  912         u8 *pos, loc;
  913         u32 size;
  914         int i;
  915 
  916         if (!pno_req->inited || !pno_req->match_set_cnt)
  917                 return NULL;
  918 
  919         size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
  920                       IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
  921 
  922         skb = alloc_skb(size, GFP_KERNEL);
  923         if (!skb)
  924                 return NULL;
  925 
  926         skb_reserve(skb, chip->tx_pkt_desc_sz);
  927 
  928         nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
  929 
  930         nlo_hdr->nlo_count = pno_req->match_set_cnt;
  931         nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
  932 
  933         /* pattern check for firmware */
  934         memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
  935 
  936         for (i = 0; i < pno_req->match_set_cnt; i++)
  937                 nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
  938 
  939         for (i = 0; i < pno_req->match_set_cnt; i++) {
  940                 ssid = &pno_req->match_sets[i].ssid;
  941                 loc  = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
  942                 if (!loc) {
  943                         rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
  944                         kfree_skb(skb);
  945                         return NULL;
  946                 }
  947                 nlo_hdr->location[i] = loc;
  948         }
  949 
  950         for (i = 0; i < pno_req->match_set_cnt; i++) {
  951                 pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
  952                 memcpy(pos, pno_req->match_sets[i].ssid.ssid,
  953                        pno_req->match_sets[i].ssid.ssid_len);
  954         }
  955 
  956         return skb;
  957 }
  958 
  959 static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
  960 {
  961         struct rtw_dev *rtwdev = hw->priv;
  962         struct rtw_chip_info *chip = rtwdev->chip;
  963         struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
  964         struct ieee80211_channel *channels = pno_req->channels;
  965         struct sk_buff *skb;
  966         int count =  pno_req->channel_cnt;
  967         u8 *pos;
  968         int i = 0;
  969 
  970         skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
  971         if (!skb)
  972                 return NULL;
  973 
  974         skb_reserve(skb, chip->tx_pkt_desc_sz);
  975 
  976         for (i = 0; i < count; i++) {
  977                 pos = skb_put_zero(skb, 4);
  978 
  979                 CHSW_INFO_SET_CH(pos, channels[i].hw_value);
  980 
  981                 if (channels[i].flags & IEEE80211_CHAN_RADAR)
  982                         CHSW_INFO_SET_ACTION_ID(pos, 0);
  983                 else
  984                         CHSW_INFO_SET_ACTION_ID(pos, 1);
  985                 CHSW_INFO_SET_TIMEOUT(pos, 1);
  986                 CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
  987                 CHSW_INFO_SET_BW(pos, 0);
  988         }
  989 
  990         return skb;
  991 }
  992 
  993 static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
  994 {
  995         struct rtw_dev *rtwdev = hw->priv;
  996         struct rtw_chip_info *chip = rtwdev->chip;
  997         struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
  998         struct rtw_lps_pg_dpk_hdr *dpk_hdr;
  999         struct sk_buff *skb;
 1000         u32 size;
 1001 
 1002         size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
 1003         skb = alloc_skb(size, GFP_KERNEL);
 1004         if (!skb)
 1005                 return NULL;
 1006 
 1007         skb_reserve(skb, chip->tx_pkt_desc_sz);
 1008         dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
 1009         dpk_hdr->dpk_ch = dpk_info->dpk_ch;
 1010         dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
 1011         memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
 1012         memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
 1013         memcpy(dpk_hdr->coef, dpk_info->coef, 160);
 1014 
 1015         return skb;
 1016 }
 1017 
 1018 static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
 1019 {
 1020         struct rtw_dev *rtwdev = hw->priv;
 1021         struct rtw_chip_info *chip = rtwdev->chip;
 1022         struct rtw_lps_conf *conf = &rtwdev->lps_conf;
 1023         struct rtw_lps_pg_info_hdr *pg_info_hdr;
 1024         struct rtw_wow_param *rtw_wow = &rtwdev->wow;
 1025         struct sk_buff *skb;
 1026         u32 size;
 1027 
 1028         size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
 1029         skb = alloc_skb(size, GFP_KERNEL);
 1030         if (!skb)
 1031                 return NULL;
 1032 
 1033         skb_reserve(skb, chip->tx_pkt_desc_sz);
 1034         pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
 1035         pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
 1036         pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
 1037         pg_info_hdr->sec_cam_count =
 1038                 rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
 1039         pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
 1040 
 1041         conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
 1042         conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
 1043 
 1044         return skb;
 1045 }
 1046 
 1047 static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
 1048                                              struct rtw_rsvd_page *rsvd_pkt)
 1049 {
 1050         struct ieee80211_vif *vif;
 1051         struct rtw_vif *rtwvif;
 1052         struct sk_buff *skb_new;
 1053         struct cfg80211_ssid *ssid;
 1054         u16 tim_offset = 0;
 1055 
 1056         if (rsvd_pkt->type == RSVD_DUMMY) {
 1057                 skb_new = alloc_skb(1, GFP_KERNEL);
 1058                 if (!skb_new)
 1059                         return NULL;
 1060 
 1061                 skb_put(skb_new, 1);
 1062                 return skb_new;
 1063         }
 1064 
 1065         rtwvif = rsvd_pkt->rtwvif;
 1066         if (!rtwvif)
 1067                 return NULL;
 1068 
 1069         vif = rtwvif_to_vif(rtwvif);
 1070 
 1071         switch (rsvd_pkt->type) {
 1072         case RSVD_BEACON:
 1073                 skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL, 0);
 1074                 rsvd_pkt->tim_offset = tim_offset;
 1075                 break;
 1076         case RSVD_PS_POLL:
 1077                 skb_new = ieee80211_pspoll_get(hw, vif);
 1078                 break;
 1079         case RSVD_PROBE_RESP:
 1080                 skb_new = ieee80211_proberesp_get(hw, vif);
 1081                 break;
 1082         case RSVD_NULL:
 1083                 skb_new = ieee80211_nullfunc_get(hw, vif, false);
 1084                 break;
 1085         case RSVD_QOS_NULL:
 1086                 skb_new = ieee80211_nullfunc_get(hw, vif, true);
 1087                 break;
 1088         case RSVD_LPS_PG_DPK:
 1089                 skb_new = rtw_lps_pg_dpk_get(hw);
 1090                 break;
 1091         case RSVD_LPS_PG_INFO:
 1092                 skb_new = rtw_lps_pg_info_get(hw);
 1093                 break;
 1094         case RSVD_PROBE_REQ:
 1095                 ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
 1096                 if (ssid)
 1097                         skb_new = ieee80211_probereq_get(hw, vif->addr,
 1098                                                          ssid->ssid,
 1099                                                          ssid->ssid_len, 0);
 1100                 else
 1101                         skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
 1102                 if (skb_new)
 1103                         rsvd_pkt->probe_req_size = (u16)skb_new->len;
 1104                 break;
 1105         case RSVD_NLO_INFO:
 1106                 skb_new = rtw_nlo_info_get(hw);
 1107                 break;
 1108         case RSVD_CH_INFO:
 1109                 skb_new = rtw_cs_channel_info_get(hw);
 1110                 break;
 1111         default:
 1112                 return NULL;
 1113         }
 1114 
 1115         if (!skb_new)
 1116                 return NULL;
 1117 
 1118         return skb_new;
 1119 }
 1120 
 1121 static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
 1122                                     enum rtw_rsvd_packet_type type)
 1123 {
 1124         struct rtw_tx_pkt_info pkt_info = {0};
 1125         struct rtw_chip_info *chip = rtwdev->chip;
 1126         u8 *pkt_desc;
 1127 
 1128         rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
 1129         pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
 1130         memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
 1131         rtw_tx_fill_tx_desc(&pkt_info, skb);
 1132 }
 1133 
 1134 static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
 1135 {
 1136         return DIV_ROUND_UP(len, page_size);
 1137 }
 1138 
 1139 static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
 1140                                       u8 page_margin, u32 page, u8 *buf,
 1141                                       struct rtw_rsvd_page *rsvd_pkt)
 1142 {
 1143         struct sk_buff *skb = rsvd_pkt->skb;
 1144 
 1145         if (page >= 1)
 1146                 memcpy(buf + page_margin + page_size * (page - 1),
 1147                        skb->data, skb->len);
 1148         else
 1149                 memcpy(buf, skb->data, skb->len);
 1150 }
 1151 
 1152 static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
 1153                                                  enum rtw_rsvd_packet_type type,
 1154                                                  bool txdesc)
 1155 {
 1156         struct rtw_rsvd_page *rsvd_pkt = NULL;
 1157 
 1158         rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
 1159 
 1160         if (!rsvd_pkt)
 1161                 return NULL;
 1162 
 1163         INIT_LIST_HEAD(&rsvd_pkt->vif_list);
 1164         INIT_LIST_HEAD(&rsvd_pkt->build_list);
 1165         rsvd_pkt->type = type;
 1166         rsvd_pkt->add_txdesc = txdesc;
 1167 
 1168         return rsvd_pkt;
 1169 }
 1170 
 1171 static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
 1172                                  struct rtw_vif *rtwvif,
 1173                                  struct rtw_rsvd_page *rsvd_pkt)
 1174 {
 1175         lockdep_assert_held(&rtwdev->mutex);
 1176 
 1177         list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list);
 1178 }
 1179 
 1180 static void rtw_add_rsvd_page(struct rtw_dev *rtwdev,
 1181                               struct rtw_vif *rtwvif,
 1182                               enum rtw_rsvd_packet_type type,
 1183                               bool txdesc)
 1184 {
 1185         struct rtw_rsvd_page *rsvd_pkt;
 1186 
 1187         rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
 1188         if (!rsvd_pkt) {
 1189                 rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type);
 1190                 return;
 1191         }
 1192 
 1193         rsvd_pkt->rtwvif = rtwvif;
 1194         rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
 1195 }
 1196 
 1197 static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
 1198                                         struct rtw_vif *rtwvif,
 1199                                         struct cfg80211_ssid *ssid)
 1200 {
 1201         struct rtw_rsvd_page *rsvd_pkt;
 1202 
 1203         rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
 1204         if (!rsvd_pkt) {
 1205                 rtw_err(rtwdev, "failed to alloc probe req rsvd page\n");
 1206                 return;
 1207         }
 1208 
 1209         rsvd_pkt->rtwvif = rtwvif;
 1210         rsvd_pkt->ssid = ssid;
 1211         rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
 1212 }
 1213 
 1214 void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
 1215                           struct rtw_vif *rtwvif)
 1216 {
 1217         struct rtw_rsvd_page *rsvd_pkt, *tmp;
 1218 
 1219         lockdep_assert_held(&rtwdev->mutex);
 1220 
 1221         /* remove all of the rsvd pages for vif */
 1222         list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list,
 1223                                  vif_list) {
 1224                 list_del(&rsvd_pkt->vif_list);
 1225                 if (!list_empty(&rsvd_pkt->build_list))
 1226                         list_del(&rsvd_pkt->build_list);
 1227                 kfree(rsvd_pkt);
 1228         }
 1229 }
 1230 
 1231 void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
 1232                            struct rtw_vif *rtwvif)
 1233 {
 1234         struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
 1235 
 1236         if (vif->type != NL80211_IFTYPE_AP &&
 1237             vif->type != NL80211_IFTYPE_ADHOC &&
 1238             vif->type != NL80211_IFTYPE_MESH_POINT) {
 1239                 rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n",
 1240                          vif->type);
 1241                 return;
 1242         }
 1243 
 1244         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false);
 1245 }
 1246 
 1247 void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
 1248                            struct rtw_vif *rtwvif)
 1249 {
 1250         struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
 1251         struct rtw_wow_param *rtw_wow = &rtwdev->wow;
 1252         struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
 1253         struct cfg80211_ssid *ssid;
 1254         int i;
 1255 
 1256         if (vif->type != NL80211_IFTYPE_STATION) {
 1257                 rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n",
 1258                          vif->type);
 1259                 return;
 1260         }
 1261 
 1262         for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
 1263                 ssid = &rtw_pno_req->match_sets[i].ssid;
 1264                 rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid);
 1265         }
 1266 
 1267         rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL);
 1268         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false);
 1269         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true);
 1270 }
 1271 
 1272 void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
 1273                            struct rtw_vif *rtwvif)
 1274 {
 1275         struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
 1276 
 1277         if (vif->type != NL80211_IFTYPE_STATION) {
 1278                 rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n",
 1279                          vif->type);
 1280                 return;
 1281         }
 1282 
 1283         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true);
 1284         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true);
 1285         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true);
 1286         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true);
 1287         rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true);
 1288 }
 1289 
 1290 int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
 1291                                 u8 *buf, u32 size)
 1292 {
 1293         u8 bckp[2];
 1294         u8 val;
 1295         u16 rsvd_pg_head;
 1296         u32 bcn_valid_addr;
 1297         u32 bcn_valid_mask;
 1298         int ret;
 1299 
 1300         lockdep_assert_held(&rtwdev->mutex);
 1301 
 1302         if (!size)
 1303                 return -EINVAL;
 1304 
 1305         if (rtw_chip_wcpu_11n(rtwdev)) {
 1306                 rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
 1307         } else {
 1308                 pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
 1309                 pg_addr |= BIT_BCN_VALID_V1;
 1310                 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr);
 1311         }
 1312 
 1313         val = rtw_read8(rtwdev, REG_CR + 1);
 1314         bckp[0] = val;
 1315         val |= BIT_ENSWBCN >> 8;
 1316         rtw_write8(rtwdev, REG_CR + 1, val);
 1317 
 1318         val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
 1319         bckp[1] = val;
 1320         val &= ~(BIT_EN_BCNQ_DL >> 16);
 1321         rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
 1322 
 1323         ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
 1324         if (ret) {
 1325                 rtw_err(rtwdev, "failed to write data to rsvd page\n");
 1326                 goto restore;
 1327         }
 1328 
 1329         if (rtw_chip_wcpu_11n(rtwdev)) {
 1330                 bcn_valid_addr = REG_DWBCN0_CTRL;
 1331                 bcn_valid_mask = BIT_BCN_VALID;
 1332         } else {
 1333                 bcn_valid_addr = REG_FIFOPAGE_CTRL_2;
 1334                 bcn_valid_mask = BIT_BCN_VALID_V1;
 1335         }
 1336 
 1337         if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) {
 1338                 rtw_err(rtwdev, "error beacon valid\n");
 1339                 ret = -EBUSY;
 1340         }
 1341 
 1342 restore:
 1343         rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
 1344         rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
 1345                     rsvd_pg_head | BIT_BCN_VALID_V1);
 1346         rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
 1347         rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
 1348 
 1349         return ret;
 1350 }
 1351 
 1352 static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
 1353 {
 1354         u32 pg_size;
 1355         u32 pg_num = 0;
 1356         u16 pg_addr = 0;
 1357 
 1358         pg_size = rtwdev->chip->page_size;
 1359         pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
 1360         if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
 1361                 return -ENOMEM;
 1362 
 1363         pg_addr = rtwdev->fifo.rsvd_drv_addr;
 1364 
 1365         return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
 1366 }
 1367 
 1368 static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev)
 1369 {
 1370         struct rtw_rsvd_page *rsvd_pkt, *tmp;
 1371 
 1372         list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
 1373                                  build_list) {
 1374                 list_del_init(&rsvd_pkt->build_list);
 1375 
 1376                 /* Don't free except for the dummy rsvd page,
 1377                  * others will be freed when removing vif
 1378                  */
 1379                 if (rsvd_pkt->type == RSVD_DUMMY)
 1380                         kfree(rsvd_pkt);
 1381         }
 1382 }
 1383 
 1384 static void rtw_build_rsvd_page_iter(void *data, u8 *mac,
 1385                                      struct ieee80211_vif *vif)
 1386 {
 1387         struct rtw_dev *rtwdev = data;
 1388         struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
 1389         struct rtw_rsvd_page *rsvd_pkt;
 1390 
 1391         list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) {
 1392                 if (rsvd_pkt->type == RSVD_BEACON)
 1393                         list_add(&rsvd_pkt->build_list,
 1394                                  &rtwdev->rsvd_page_list);
 1395                 else
 1396                         list_add_tail(&rsvd_pkt->build_list,
 1397                                       &rtwdev->rsvd_page_list);
 1398         }
 1399 }
 1400 
 1401 static int  __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
 1402 {
 1403         struct rtw_rsvd_page *rsvd_pkt;
 1404 
 1405         __rtw_build_rsvd_page_reset(rtwdev);
 1406 
 1407         /* gather rsvd page from vifs */
 1408         rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev);
 1409 
 1410         rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
 1411                                             struct rtw_rsvd_page, build_list);
 1412         if (!rsvd_pkt) {
 1413                 WARN(1, "Should not have an empty reserved page\n");
 1414                 return -EINVAL;
 1415         }
 1416 
 1417         /* the first rsvd should be beacon, otherwise add a dummy one */
 1418         if (rsvd_pkt->type != RSVD_BEACON) {
 1419                 struct rtw_rsvd_page *dummy_pkt;
 1420 
 1421                 dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false);
 1422                 if (!dummy_pkt) {
 1423                         rtw_err(rtwdev, "failed to alloc dummy rsvd page\n");
 1424                         return -ENOMEM;
 1425                 }
 1426 
 1427                 list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list);
 1428         }
 1429 
 1430         return 0;
 1431 }
 1432 
 1433 static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
 1434 {
 1435         struct ieee80211_hw *hw = rtwdev->hw;
 1436         struct rtw_chip_info *chip = rtwdev->chip;
 1437         struct sk_buff *iter;
 1438         struct rtw_rsvd_page *rsvd_pkt;
 1439         u32 page = 0;
 1440         u8 total_page = 0;
 1441         u8 page_size, page_margin, tx_desc_sz;
 1442         u8 *buf;
 1443         int ret;
 1444 
 1445         page_size = chip->page_size;
 1446         tx_desc_sz = chip->tx_pkt_desc_sz;
 1447         page_margin = page_size - tx_desc_sz;
 1448 
 1449         ret = __rtw_build_rsvd_page_from_vifs(rtwdev);
 1450         if (ret) {
 1451                 rtw_err(rtwdev,
 1452                         "failed to build rsvd page from vifs, ret %d\n", ret);
 1453                 return NULL;
 1454         }
 1455 
 1456         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
 1457                 iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
 1458                 if (!iter) {
 1459                         rtw_err(rtwdev, "failed to build rsvd packet\n");
 1460                         goto release_skb;
 1461                 }
 1462 
 1463                 /* Fill the tx_desc for the rsvd pkt that requires one.
 1464                  * And iter->len will be added with size of tx_desc_sz.
 1465                  */
 1466                 if (rsvd_pkt->add_txdesc)
 1467                         rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type);
 1468 
 1469                 rsvd_pkt->skb = iter;
 1470                 rsvd_pkt->page = total_page;
 1471 
 1472                 /* Reserved page is downloaded via TX path, and TX path will
 1473                  * generate a tx_desc at the header to describe length of
 1474                  * the buffer. If we are not counting page numbers with the
 1475                  * size of tx_desc added at the first rsvd_pkt (usually a
 1476                  * beacon, firmware default refer to the first page as the
 1477                  * content of beacon), we could generate a buffer which size
 1478                  * is smaller than the actual size of the whole rsvd_page
 1479                  */
 1480                 if (total_page == 0) {
 1481                         if (rsvd_pkt->type != RSVD_BEACON &&
 1482                             rsvd_pkt->type != RSVD_DUMMY) {
 1483                                 rtw_err(rtwdev, "first page should be a beacon\n");
 1484                                 goto release_skb;
 1485                         }
 1486                         total_page += rtw_len_to_page(iter->len + tx_desc_sz,
 1487                                                       page_size);
 1488                 } else {
 1489                         total_page += rtw_len_to_page(iter->len, page_size);
 1490                 }
 1491         }
 1492 
 1493         if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
 1494                 rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
 1495                 goto release_skb;
 1496         }
 1497 
 1498         *size = (total_page - 1) * page_size + page_margin;
 1499         buf = kzalloc(*size, GFP_KERNEL);
 1500         if (!buf)
 1501                 goto release_skb;
 1502 
 1503         /* Copy the content of each rsvd_pkt to the buf, and they should
 1504          * be aligned to the pages.
 1505          *
 1506          * Note that the first rsvd_pkt is a beacon no matter what vif->type.
 1507          * And that rsvd_pkt does not require tx_desc because when it goes
 1508          * through TX path, the TX path will generate one for it.
 1509          */
 1510         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
 1511                 rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
 1512                                           page, buf, rsvd_pkt);
 1513                 if (page == 0)
 1514                         page += rtw_len_to_page(rsvd_pkt->skb->len +
 1515                                                 tx_desc_sz, page_size);
 1516                 else
 1517                         page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
 1518 
 1519                 kfree_skb(rsvd_pkt->skb);
 1520                 rsvd_pkt->skb = NULL;
 1521         }
 1522 
 1523         return buf;
 1524 
 1525 release_skb:
 1526         list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
 1527                 kfree_skb(rsvd_pkt->skb);
 1528                 rsvd_pkt->skb = NULL;
 1529         }
 1530 
 1531         return NULL;
 1532 }
 1533 
 1534 static int rtw_download_beacon(struct rtw_dev *rtwdev)
 1535 {
 1536         struct ieee80211_hw *hw = rtwdev->hw;
 1537         struct rtw_rsvd_page *rsvd_pkt;
 1538         struct sk_buff *skb;
 1539         int ret = 0;
 1540 
 1541         rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
 1542                                             struct rtw_rsvd_page, build_list);
 1543         if (!rsvd_pkt) {
 1544                 rtw_err(rtwdev, "failed to get rsvd page from build list\n");
 1545                 return -ENOENT;
 1546         }
 1547 
 1548         if (rsvd_pkt->type != RSVD_BEACON &&
 1549             rsvd_pkt->type != RSVD_DUMMY) {
 1550                 rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n",
 1551                         rsvd_pkt->type);
 1552                 return -EINVAL;
 1553         }
 1554 
 1555         skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
 1556         if (!skb) {
 1557                 rtw_err(rtwdev, "failed to get beacon skb\n");
 1558                 return -ENOMEM;
 1559         }
 1560 
 1561         ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
 1562         if (ret)
 1563                 rtw_err(rtwdev, "failed to download drv rsvd page\n");
 1564 
 1565         dev_kfree_skb(skb);
 1566 
 1567         return ret;
 1568 }
 1569 
 1570 int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev)
 1571 {
 1572         u8 *buf;
 1573         u32 size;
 1574         int ret;
 1575 
 1576         buf = rtw_build_rsvd_page(rtwdev, &size);
 1577         if (!buf) {
 1578                 rtw_err(rtwdev, "failed to build rsvd page pkt\n");
 1579                 return -ENOMEM;
 1580         }
 1581 
 1582         ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
 1583         if (ret) {
 1584                 rtw_err(rtwdev, "failed to download drv rsvd page\n");
 1585                 goto free;
 1586         }
 1587 
 1588         /* The last thing is to download the *ONLY* beacon again, because
 1589          * the previous tx_desc is to describe the total rsvd page. Download
 1590          * the beacon again to replace the TX desc header, and we will get
 1591          * a correct tx_desc for the beacon in the rsvd page.
 1592          */
 1593         ret = rtw_download_beacon(rtwdev);
 1594         if (ret) {
 1595                 rtw_err(rtwdev, "failed to download beacon\n");
 1596                 goto free;
 1597         }
 1598 
 1599 free:
 1600         kfree(buf);
 1601 
 1602         return ret;
 1603 }
 1604 
 1605 void rtw_fw_update_beacon_work(struct work_struct *work)
 1606 {
 1607         struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
 1608                                               update_beacon_work);
 1609 
 1610         mutex_lock(&rtwdev->mutex);
 1611         rtw_fw_download_rsvd_page(rtwdev);
 1612         mutex_unlock(&rtwdev->mutex);
 1613 }
 1614 
 1615 static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
 1616                                   u32 *buf, u32 residue, u16 start_pg)
 1617 {
 1618         u32 i;
 1619         u16 idx = 0;
 1620         u16 ctl;
 1621 
 1622         ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
 1623         /* disable rx clock gate */
 1624         rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
 1625 
 1626         do {
 1627                 rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
 1628 
 1629                 for (i = FIFO_DUMP_ADDR + residue;
 1630                      i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
 1631                         buf[idx++] = rtw_read32(rtwdev, i);
 1632                         size -= 4;
 1633                         if (size == 0)
 1634                                 goto out;
 1635                 }
 1636 
 1637                 residue = 0;
 1638                 start_pg++;
 1639         } while (size);
 1640 
 1641 out:
 1642         rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
 1643         /* restore rx clock gate */
 1644         rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
 1645 }
 1646 
 1647 static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
 1648                              u32 offset, u32 size, u32 *buf)
 1649 {
 1650         struct rtw_chip_info *chip = rtwdev->chip;
 1651         u32 start_pg, residue;
 1652 
 1653         if (sel >= RTW_FW_FIFO_MAX) {
 1654                 rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n");
 1655                 return;
 1656         }
 1657         if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE)
 1658                 offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT;
 1659         residue = offset & (FIFO_PAGE_SIZE - 1);
 1660         start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel];
 1661 
 1662         rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg);
 1663 }
 1664 
 1665 static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
 1666                                    enum rtw_fw_fifo_sel sel,
 1667                                    u32 start_addr, u32 size)
 1668 {
 1669         switch (sel) {
 1670         case RTW_FW_FIFO_SEL_TX:
 1671         case RTW_FW_FIFO_SEL_RX:
 1672                 if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel])
 1673                         return false;
 1674                 fallthrough;
 1675         default:
 1676                 return true;
 1677         }
 1678 }
 1679 
 1680 int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
 1681                      u32 *buffer)
 1682 {
 1683         if (!rtwdev->chip->fw_fifo_addr[0]) {
 1684                 rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
 1685                 return -ENOTSUPP;
 1686         }
 1687 
 1688         if (size == 0 || !buffer)
 1689                 return -EINVAL;
 1690 
 1691         if (size & 0x3) {
 1692                 rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n");
 1693                 return -EINVAL;
 1694         }
 1695 
 1696         if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) {
 1697                 rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n");
 1698                 return -EINVAL;
 1699         }
 1700 
 1701         rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer);
 1702 
 1703         return 0;
 1704 }
 1705 
 1706 static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
 1707                                 u8 location)
 1708 {
 1709         struct rtw_chip_info *chip = rtwdev->chip;
 1710         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
 1711         u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
 1712 
 1713         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
 1714 
 1715         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
 1716         UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
 1717         UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
 1718 
 1719         /* include txdesc size */
 1720         size += chip->tx_pkt_desc_sz;
 1721         UPDATE_PKT_SET_SIZE(h2c_pkt, size);
 1722 
 1723         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
 1724 }
 1725 
 1726 void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
 1727                                  struct cfg80211_ssid *ssid)
 1728 {
 1729         u8 loc;
 1730         u16 size;
 1731 
 1732         loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
 1733         if (!loc) {
 1734                 rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
 1735                 return;
 1736         }
 1737 
 1738         size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
 1739         if (!size) {
 1740                 rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
 1741                 return;
 1742         }
 1743 
 1744         __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
 1745 }
 1746 
 1747 void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
 1748 {
 1749         struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
 1750         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
 1751         u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
 1752         u8 loc_ch_info;
 1753         const struct rtw_ch_switch_option cs_option = {
 1754                 .dest_ch_en = 1,
 1755                 .dest_ch = 1,
 1756                 .periodic_option = 2,
 1757                 .normal_period = 5,
 1758                 .normal_period_sel = 0,
 1759                 .normal_cycle = 10,
 1760                 .slow_period = 1,
 1761                 .slow_period_sel = 1,
 1762         };
 1763 
 1764         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
 1765         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
 1766 
 1767         CH_SWITCH_SET_START(h2c_pkt, enable);
 1768         CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
 1769         CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
 1770         CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
 1771         CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
 1772         CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
 1773         CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
 1774         CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
 1775         CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
 1776 
 1777         CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
 1778         CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
 1779 
 1780         loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
 1781         CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
 1782 
 1783         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
 1784 }
 1785 
 1786 void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
 1787 {
 1788         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
 1789         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
 1790 
 1791         if (!rtw_edcca_enabled) {
 1792                 dm_info->edcca_mode = RTW_EDCCA_NORMAL;
 1793                 rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
 1794                         "EDCCA disabled by debugfs\n");
 1795         }
 1796 
 1797         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
 1798         SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
 1799         SET_ADAPTIVITY_OPTION(h2c_pkt, 1);
 1800         SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
 1801         SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
 1802         SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
 1803 
 1804         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
 1805 }
 1806 
 1807 void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
 1808 {
 1809         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
 1810 
 1811         SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN);
 1812         SET_SCAN_START(h2c_pkt, start);
 1813 
 1814         rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
 1815 }
 1816 
 1817 static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
 1818                                    struct sk_buff_head *list, u8 *bands,
 1819                                    struct rtw_vif *rtwvif)
 1820 {
 1821         struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
 1822         struct rtw_chip_info *chip = rtwdev->chip;
 1823         struct sk_buff *new;
 1824         u8 idx;
 1825 
 1826         for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
 1827                 if (!(BIT(idx) & chip->band))
 1828                         continue;
 1829                 new = skb_copy(skb, GFP_KERNEL);
 1830                 if (!new)
 1831                         return -ENOMEM;
 1832                 skb_put_data(new, ies->ies[idx], ies->len[idx]);
 1833                 skb_put_data(new, ies->common_ies, ies->common_ie_len);
 1834                 skb_queue_tail(list, new);
 1835                 (*bands)++;
 1836         }
 1837 
 1838         return 0;
 1839 }
 1840 
 1841 static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
 1842                                          struct sk_buff_head *probe_req_list)
 1843 {
 1844         struct rtw_chip_info *chip = rtwdev->chip;
 1845         struct sk_buff *skb, *tmp;
 1846         u8 page_offset = 1, *buf, page_size = chip->page_size;
 1847         u8 pages = page_offset + num_probes * RTW_PROBE_PG_CNT;
 1848         u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
 1849         u16 buf_offset = page_size * page_offset;
 1850         u8 tx_desc_sz = chip->tx_pkt_desc_sz;
 1851         unsigned int pkt_len;
 1852         int ret;
 1853 
 1854         buf = kzalloc(page_size * pages, GFP_KERNEL);
 1855         if (!buf)
 1856                 return -ENOMEM;
 1857 
 1858         buf_offset -= tx_desc_sz;
 1859         skb_queue_walk_safe(probe_req_list, skb, tmp) {
 1860                 skb_unlink(skb, probe_req_list);
 1861                 rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ);
 1862                 if (skb->len > page_size * RTW_PROBE_PG_CNT) {
 1863 #if defined(__FreeBSD__)
 1864                         kfree_skb(skb);
 1865 #endif
 1866                         ret = -EINVAL;
 1867                         goto out;
 1868                 }
 1869 
 1870                 memcpy(buf + buf_offset, skb->data, skb->len);
 1871                 pkt_len = skb->len - tx_desc_sz;
 1872                 loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset;
 1873                 __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc);
 1874 
 1875                 buf_offset += RTW_PROBE_PG_CNT * page_size;
 1876                 page_offset += RTW_PROBE_PG_CNT;
 1877                 kfree_skb(skb);
 1878         }
 1879 
 1880         ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, buf_offset);
 1881         if (ret) {
 1882                 rtw_err(rtwdev, "Download probe request to firmware failed\n");
 1883                 goto out;
 1884         }
 1885 
 1886         rtwdev->scan_info.probe_pg_size = page_offset;
 1887 out:
 1888         kfree(buf);
 1889         skb_queue_walk_safe(probe_req_list, skb, tmp)
 1890                 kfree_skb(skb);
 1891 
 1892         return ret;
 1893 }
 1894 
 1895 static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
 1896                                         struct rtw_vif *rtwvif)
 1897 {
 1898         struct cfg80211_scan_request *req = rtwvif->scan_req;
 1899         struct sk_buff_head list;
 1900         struct sk_buff *skb, *tmp;
 1901         u8 num = req->n_ssids, i, bands = 0;
 1902         int ret;
 1903 
 1904         skb_queue_head_init(&list);
 1905         for (i = 0; i < num; i++) {
 1906                 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
 1907                                              req->ssids[i].ssid,
 1908                                              req->ssids[i].ssid_len,
 1909                                              req->ie_len);
 1910                 if (!skb) {
 1911                         ret = -ENOMEM;
 1912                         goto out;
 1913                 }
 1914                 ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
 1915                                               rtwvif);
 1916                 if (ret)
 1917                         goto out;
 1918 
 1919                 kfree_skb(skb);
 1920         }
 1921 
 1922         return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
 1923 
 1924 out:
 1925         skb_queue_walk_safe(&list, skb, tmp)
 1926                 kfree_skb(skb);
 1927 
 1928         return ret;
 1929 }
 1930 
 1931 static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
 1932                              struct rtw_chan_list *list, u8 *buf)
 1933 {
 1934         u8 *chan = &buf[list->size];
 1935         u8 info_size = RTW_CH_INFO_SIZE;
 1936 
 1937         if (list->size > list->buf_size)
 1938                 return -ENOMEM;
 1939 
 1940         CH_INFO_SET_CH(chan, info->channel);
 1941         CH_INFO_SET_PRI_CH_IDX(chan, info->pri_ch_idx);
 1942         CH_INFO_SET_BW(chan, info->bw);
 1943         CH_INFO_SET_TIMEOUT(chan, info->timeout);
 1944         CH_INFO_SET_ACTION_ID(chan, info->action_id);
 1945         CH_INFO_SET_EXTRA_INFO(chan, info->extra_info);
 1946         if (info->extra_info) {
 1947                 EXTRA_CH_INFO_SET_ID(chan, RTW_SCAN_EXTRA_ID_DFS);
 1948                 EXTRA_CH_INFO_SET_INFO(chan, RTW_SCAN_EXTRA_ACTION_SCAN);
 1949                 EXTRA_CH_INFO_SET_SIZE(chan, RTW_EX_CH_INFO_SIZE -
 1950                                        RTW_EX_CH_INFO_HDR_SIZE);
 1951                 EXTRA_CH_INFO_SET_DFS_EXT_TIME(chan, RTW_DFS_CHAN_TIME);
 1952                 info_size += RTW_EX_CH_INFO_SIZE;
 1953         }
 1954         list->size += info_size;
 1955         list->ch_num++;
 1956 
 1957         return 0;
 1958 }
 1959 
 1960 static int rtw_add_chan_list(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
 1961                              struct rtw_chan_list *list, u8 *buf)
 1962 {
 1963         struct cfg80211_scan_request *req = rtwvif->scan_req;
 1964         struct rtw_fifo_conf *fifo = &rtwdev->fifo;
 1965         struct ieee80211_channel *channel;
 1966         int i, ret = 0;
 1967 
 1968         for (i = 0; i < req->n_channels; i++) {
 1969                 struct rtw_chan_info ch_info = {0};
 1970 
 1971                 channel = req->channels[i];
 1972                 ch_info.channel = channel->hw_value;
 1973                 ch_info.bw = RTW_SCAN_WIDTH;
 1974                 ch_info.pri_ch_idx = RTW_PRI_CH_IDX;
 1975                 ch_info.timeout = req->duration_mandatory ?
 1976                                   req->duration : RTW_CHANNEL_TIME;
 1977 
 1978                 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) {
 1979                         ch_info.action_id = RTW_CHANNEL_RADAR;
 1980                         ch_info.extra_info = 1;
 1981                         /* Overwrite duration for passive scans if necessary */
 1982                         ch_info.timeout = ch_info.timeout > RTW_PASS_CHAN_TIME ?
 1983                                           ch_info.timeout : RTW_PASS_CHAN_TIME;
 1984                 } else {
 1985                         ch_info.action_id = RTW_CHANNEL_ACTIVE;
 1986                 }
 1987 
 1988                 ret = rtw_add_chan_info(rtwdev, &ch_info, list, buf);
 1989                 if (ret)
 1990                         return ret;
 1991         }
 1992 
 1993         if (list->size > fifo->rsvd_pg_num << TX_PAGE_SIZE_SHIFT) {
 1994                 rtw_err(rtwdev, "List exceeds rsvd page total size\n");
 1995                 return -EINVAL;
 1996         }
 1997 
 1998         list->addr = fifo->rsvd_h2c_info_addr + rtwdev->scan_info.probe_pg_size;
 1999         ret = rtw_fw_write_data_rsvd_page(rtwdev, list->addr, buf, list->size);
 2000         if (ret)
 2001                 rtw_err(rtwdev, "Download channel list failed\n");
 2002 
 2003         return ret;
 2004 }
 2005 
 2006 static void rtw_fw_set_scan_offload(struct rtw_dev *rtwdev,
 2007                                     struct rtw_ch_switch_option *opt,
 2008                                     struct rtw_vif *rtwvif,
 2009                                     struct rtw_chan_list *list)
 2010 {
 2011         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
 2012         struct cfg80211_scan_request *req = rtwvif->scan_req;
 2013         struct rtw_fifo_conf *fifo = &rtwdev->fifo;
 2014         /* reserve one dummy page at the beginning for tx descriptor */
 2015         u8 pkt_loc = fifo->rsvd_h2c_info_addr - fifo->rsvd_boundary + 1;
 2016         bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
 2017         u8 h2c_pkt[H2C_PKT_SIZE] = {0};
 2018 
 2019         rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_SCAN_OFFLOAD);
 2020         SET_PKT_H2C_TOTAL_LEN(h2c_pkt, H2C_PKT_CH_SWITCH_LEN);
 2021 
 2022         SCAN_OFFLOAD_SET_START(h2c_pkt, opt->switch_en);
 2023         SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, opt->back_op_en);
 2024         SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, random_seq);
 2025         SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, req->no_cck);
 2026         SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, list->ch_num);
 2027         SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, list->size);
 2028         SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, list->addr - fifo->rsvd_boundary);
 2029         SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, scan_info->op_chan);
 2030         SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, scan_info->op_pri_ch_idx);
 2031         SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, scan_info->op_bw);
 2032         SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, rtwvif->port);
 2033         SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, req->duration_mandatory ?
 2034                                        req->duration : RTW_CHANNEL_TIME);
 2035         SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, RTW_OFF_CHAN_TIME);
 2036         SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, req->n_ssids);
 2037         SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, pkt_loc);
 2038 
 2039         rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
 2040 }
 2041 
 2042 void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
 2043                        struct ieee80211_scan_request *scan_req)
 2044 {
 2045         struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
 2046         struct cfg80211_scan_request *req = &scan_req->req;
 2047         u8 mac_addr[ETH_ALEN];
 2048 
 2049         rtwdev->scan_info.scanning_vif = vif;
 2050         rtwvif->scan_ies = &scan_req->ies;
 2051         rtwvif->scan_req = req;
 2052 
 2053         ieee80211_stop_queues(rtwdev->hw);
 2054         if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
 2055                 get_random_mask_addr(mac_addr, req->mac_addr,
 2056                                      req->mac_addr_mask);
 2057         else
 2058                 ether_addr_copy(mac_addr, vif->addr);
 2059 
 2060         rtw_core_scan_start(rtwdev, rtwvif, mac_addr, true);
 2061 
 2062         rtwdev->hal.rcr &= ~BIT_CBSSID_BCN;
 2063         rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
 2064 }
 2065 
 2066 void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
 2067                           bool aborted)
 2068 {
 2069         struct cfg80211_scan_info info = {
 2070                 .aborted = aborted,
 2071         };
 2072         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
 2073         struct rtw_hal *hal = &rtwdev->hal;
 2074         struct rtw_vif *rtwvif;
 2075         u8 chan = scan_info->op_chan;
 2076 
 2077         if (!vif)
 2078                 return;
 2079 
 2080         rtwdev->hal.rcr |= BIT_CBSSID_BCN;
 2081         rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
 2082 
 2083         rtw_core_scan_complete(rtwdev, vif, true);
 2084 
 2085         rtwvif = (struct rtw_vif *)vif->drv_priv;
 2086         if (rtwvif->net_type == RTW_NET_MGD_LINKED) {
 2087                 hal->current_channel = chan;
 2088                 hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
 2089         }
 2090         ieee80211_wake_queues(rtwdev->hw);
 2091         ieee80211_scan_completed(rtwdev->hw, &info);
 2092 
 2093         rtwvif->scan_req = NULL;
 2094         rtwvif->scan_ies = NULL;
 2095         rtwdev->scan_info.scanning_vif = NULL;
 2096 }
 2097 
 2098 static int rtw_hw_scan_prehandle(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
 2099                                  struct rtw_chan_list *list)
 2100 {
 2101         struct cfg80211_scan_request *req = rtwvif->scan_req;
 2102         int size = req->n_channels * (RTW_CH_INFO_SIZE + RTW_EX_CH_INFO_SIZE);
 2103         u8 *buf;
 2104         int ret;
 2105 
 2106         buf = kmalloc(size, GFP_KERNEL);
 2107         if (!buf)
 2108                 return -ENOMEM;
 2109 
 2110         ret = rtw_hw_scan_update_probe_req(rtwdev, rtwvif);
 2111         if (ret) {
 2112                 rtw_err(rtwdev, "Update probe request failed\n");
 2113                 goto out;
 2114         }
 2115 
 2116         list->buf_size = size;
 2117         list->size = 0;
 2118         list->ch_num = 0;
 2119         ret = rtw_add_chan_list(rtwdev, rtwvif, list, buf);
 2120 out:
 2121         kfree(buf);
 2122 
 2123         return ret;
 2124 }
 2125 
 2126 int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
 2127                         bool enable)
 2128 {
 2129         struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
 2130         struct rtw_ch_switch_option cs_option = {0};
 2131         struct rtw_chan_list chan_list = {0};
 2132         int ret = 0;
 2133 
 2134         if (!rtwvif)
 2135                 return -EINVAL;
 2136 
 2137         cs_option.switch_en = enable;
 2138         cs_option.back_op_en = rtwvif->net_type == RTW_NET_MGD_LINKED;
 2139         if (enable) {
 2140                 ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list);
 2141                 if (ret)
 2142                         goto out;
 2143         }
 2144         rtw_fw_set_scan_offload(rtwdev, &cs_option, rtwvif, &chan_list);
 2145 out:
 2146         return ret;
 2147 }
 2148 
 2149 void rtw_hw_scan_abort(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
 2150 {
 2151         if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
 2152                 return;
 2153 
 2154         rtw_hw_scan_offload(rtwdev, vif, false);
 2155         rtw_hw_scan_complete(rtwdev, vif, true);
 2156 }
 2157 
 2158 void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
 2159 {
 2160         struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
 2161         struct rtw_c2h_cmd *c2h;
 2162         bool aborted;
 2163         u8 rc;
 2164 
 2165         if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
 2166                 return;
 2167 
 2168         c2h = get_c2h_from_skb(skb);
 2169         rc = GET_SCAN_REPORT_RETURN_CODE(c2h->payload);
 2170         aborted = rc != RTW_SCAN_REPORT_SUCCESS;
 2171         rtw_hw_scan_complete(rtwdev, vif, aborted);
 2172 
 2173         if (aborted)
 2174                 rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
 2175 }
 2176 
 2177 void rtw_store_op_chan(struct rtw_dev *rtwdev)
 2178 {
 2179         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
 2180         struct rtw_hal *hal = &rtwdev->hal;
 2181 
 2182         scan_info->op_chan = hal->current_channel;
 2183         scan_info->op_bw = hal->current_band_width;
 2184         scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
 2185 }
 2186 
 2187 static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel)
 2188 {
 2189         struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
 2190 
 2191         return channel == scan_info->op_chan;
 2192 }
 2193 
 2194 void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
 2195 {
 2196         struct rtw_hal *hal = &rtwdev->hal;
 2197         struct rtw_c2h_cmd *c2h;
 2198         enum rtw_scan_notify_id id;
 2199         u8 chan, status;
 2200 
 2201         if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
 2202                 return;
 2203 
 2204         c2h = get_c2h_from_skb(skb);
 2205         chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload);
 2206         id = GET_CHAN_SWITCH_ID(c2h->payload);
 2207         status = GET_CHAN_SWITCH_STATUS(c2h->payload);
 2208 
 2209         if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) {
 2210                 if (rtw_is_op_chan(rtwdev, chan))
 2211                         ieee80211_wake_queues(rtwdev->hw);
 2212                 hal->current_channel = chan;
 2213                 hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
 2214         } else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) {
 2215                 if (IS_CH_5G_BAND(chan)) {
 2216                         rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
 2217                 } else if (IS_CH_2G_BAND(chan)) {
 2218                         u8 chan_type;
 2219 
 2220                         if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
 2221                                 chan_type = COEX_SWITCH_TO_24G;
 2222                         else
 2223                                 chan_type = COEX_SWITCH_TO_24G_NOFORSCAN;
 2224                         rtw_coex_switchband_notify(rtwdev, chan_type);
 2225                 }
 2226                 if (rtw_is_op_chan(rtwdev, chan))
 2227                         ieee80211_stop_queues(rtwdev->hw);
 2228         }
 2229 
 2230         rtw_dbg(rtwdev, RTW_DBG_HW_SCAN,
 2231                 "Chan switch: %x, id: %x, status: %x\n", chan, id, status);
 2232 }

Cache object: 798d084b085c6919c5a152ddf538cd91


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.