The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/dev/rtw89/pci.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
    2 /* Copyright(c) 2020  Realtek Corporation
    3  */
    4 
    5 #if defined(__FreeBSD__)
    6 #define LINUXKPI_PARAM_PREFIX   rtw89_pci_
    7 #endif
    8 
    9 #include <linux/pci.h>
   10 
   11 #include "mac.h"
   12 #include "pci.h"
   13 #include "reg.h"
   14 #include "ser.h"
   15 
   16 static bool rtw89_pci_disable_clkreq;
   17 static bool rtw89_pci_disable_aspm_l1;
   18 static bool rtw89_pci_disable_l1ss;
   19 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
   20 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
   21 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
   22 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
   23 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
   24 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
   25 
   26 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev)
   27 {
   28         u32 val;
   29         int ret;
   30 
   31         rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1,
   32                       rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM);
   33 
   34         ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
   35                                        1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
   36                                        rtwdev, R_AX_PCIE_INIT_CFG1);
   37 
   38         if (ret)
   39                 return -EBUSY;
   40 
   41         return 0;
   42 }
   43 
   44 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
   45                                 struct rtw89_pci_dma_ring *bd_ring,
   46                                 u32 cur_idx, bool tx)
   47 {
   48         u32 cnt, cur_rp, wp, rp, len;
   49 
   50         rp = bd_ring->rp;
   51         wp = bd_ring->wp;
   52         len = bd_ring->len;
   53 
   54         cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
   55         if (tx)
   56                 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
   57         else
   58                 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
   59 
   60         bd_ring->rp = cur_rp;
   61 
   62         return cnt;
   63 }
   64 
   65 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
   66                                  struct rtw89_pci_tx_ring *tx_ring)
   67 {
   68         struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
   69         u32 addr_idx = bd_ring->addr.idx;
   70         u32 cnt, idx;
   71 
   72         idx = rtw89_read32(rtwdev, addr_idx);
   73         cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
   74 
   75         return cnt;
   76 }
   77 
   78 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
   79                                     struct rtw89_pci *rtwpci,
   80                                     u32 cnt, bool release_all)
   81 {
   82         struct rtw89_pci_tx_data *tx_data;
   83         struct sk_buff *skb;
   84         u32 qlen;
   85 
   86         while (cnt--) {
   87                 skb = skb_dequeue(&rtwpci->h2c_queue);
   88                 if (!skb) {
   89                         rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
   90                         return;
   91                 }
   92                 skb_queue_tail(&rtwpci->h2c_release_queue, skb);
   93         }
   94 
   95         qlen = skb_queue_len(&rtwpci->h2c_release_queue);
   96         if (!release_all)
   97                qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
   98 
   99         while (qlen--) {
  100                 skb = skb_dequeue(&rtwpci->h2c_release_queue);
  101                 if (!skb) {
  102                         rtw89_err(rtwdev, "failed to release fwcmd\n");
  103                         return;
  104                 }
  105                 tx_data = RTW89_PCI_TX_SKB_CB(skb);
  106                 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
  107                                  DMA_TO_DEVICE);
  108                 dev_kfree_skb_any(skb);
  109         }
  110 }
  111 
  112 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
  113                                        struct rtw89_pci *rtwpci)
  114 {
  115         struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
  116         u32 cnt;
  117 
  118         cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
  119         if (!cnt)
  120                 return;
  121         rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
  122 }
  123 
  124 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
  125                                  struct rtw89_pci_rx_ring *rx_ring)
  126 {
  127         struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
  128         u32 addr_idx = bd_ring->addr.idx;
  129         u32 cnt, idx;
  130 
  131         idx = rtw89_read32(rtwdev, addr_idx);
  132         cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
  133 
  134         return cnt;
  135 }
  136 
  137 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
  138                                        struct sk_buff *skb)
  139 {
  140         struct rtw89_pci_rx_info *rx_info;
  141         dma_addr_t dma;
  142 
  143         rx_info = RTW89_PCI_RX_SKB_CB(skb);
  144         dma = rx_info->dma;
  145         dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
  146                                 DMA_FROM_DEVICE);
  147 }
  148 
  149 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
  150                                           struct sk_buff *skb)
  151 {
  152         struct rtw89_pci_rx_info *rx_info;
  153         dma_addr_t dma;
  154 
  155         rx_info = RTW89_PCI_RX_SKB_CB(skb);
  156         dma = rx_info->dma;
  157         dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
  158                                    DMA_FROM_DEVICE);
  159 }
  160 
  161 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
  162                                       struct sk_buff *skb)
  163 {
  164         struct rtw89_pci_rxbd_info *rxbd_info;
  165         struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
  166 
  167         rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
  168         rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
  169         rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
  170         rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
  171         rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
  172 
  173         return 0;
  174 }
  175 
  176 static bool
  177 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
  178                       struct sk_buff *new,
  179                       const struct sk_buff *skb, u32 offset,
  180                       const struct rtw89_pci_rx_info *rx_info,
  181                       const struct rtw89_rx_desc_info *desc_info)
  182 {
  183         u32 copy_len = rx_info->len - offset;
  184 
  185         if (unlikely(skb_tailroom(new) < copy_len)) {
  186                 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  187                             "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
  188                             rx_info->len, desc_info->pkt_size, offset, fs, ls);
  189                 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
  190                                skb->data, rx_info->len);
  191                 /* length of a single segment skb is desc_info->pkt_size */
  192                 if (fs && ls) {
  193                         copy_len = desc_info->pkt_size;
  194                 } else {
  195                         rtw89_info(rtwdev, "drop rx data due to invalid length\n");
  196                         return false;
  197                 }
  198         }
  199 
  200         skb_put_data(new, skb->data + offset, copy_len);
  201 
  202         return true;
  203 }
  204 
  205 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
  206                                        struct rtw89_pci_rx_ring *rx_ring)
  207 {
  208         struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
  209         struct rtw89_pci_rx_info *rx_info;
  210         struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
  211         struct sk_buff *new = rx_ring->diliver_skb;
  212         struct sk_buff *skb;
  213         u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
  214         u32 offset;
  215         u32 cnt = 1;
  216         bool fs, ls;
  217         int ret;
  218 
  219         skb = rx_ring->buf[bd_ring->wp];
  220         rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
  221 
  222         ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
  223         if (ret) {
  224                 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
  225                           bd_ring->wp, ret);
  226                 goto err_sync_device;
  227         }
  228 
  229         rx_info = RTW89_PCI_RX_SKB_CB(skb);
  230         fs = rx_info->fs;
  231         ls = rx_info->ls;
  232 
  233         if (fs) {
  234                 if (new) {
  235                         rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
  236                                     "skb should not be ready before first segment start\n");
  237                         goto err_sync_device;
  238                 }
  239                 if (desc_info->ready) {
  240                         rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
  241                         goto err_sync_device;
  242                 }
  243 
  244                 rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
  245 
  246                 new = dev_alloc_skb(desc_info->pkt_size);
  247                 if (!new)
  248                         goto err_sync_device;
  249 
  250                 rx_ring->diliver_skb = new;
  251 
  252                 /* first segment has RX desc */
  253                 offset = desc_info->offset;
  254                 offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) :
  255                           sizeof(struct rtw89_rxdesc_short);
  256         } else {
  257                 offset = sizeof(struct rtw89_pci_rxbd_info);
  258                 if (!new) {
  259                         rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
  260                         goto err_sync_device;
  261                 }
  262         }
  263         if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
  264                 goto err_sync_device;
  265         rtw89_pci_sync_skb_for_device(rtwdev, skb);
  266         rtw89_pci_rxbd_increase(rx_ring, 1);
  267 
  268         if (!desc_info->ready) {
  269                 rtw89_warn(rtwdev, "no rx desc information\n");
  270                 goto err_free_resource;
  271         }
  272         if (ls) {
  273                 rtw89_core_rx(rtwdev, desc_info, new);
  274                 rx_ring->diliver_skb = NULL;
  275                 desc_info->ready = false;
  276         }
  277 
  278         return cnt;
  279 
  280 err_sync_device:
  281         rtw89_pci_sync_skb_for_device(rtwdev, skb);
  282         rtw89_pci_rxbd_increase(rx_ring, 1);
  283 err_free_resource:
  284         if (new)
  285                 dev_kfree_skb_any(new);
  286         rx_ring->diliver_skb = NULL;
  287         desc_info->ready = false;
  288 
  289         return cnt;
  290 }
  291 
  292 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
  293                                    struct rtw89_pci_rx_ring *rx_ring,
  294                                    u32 cnt)
  295 {
  296         struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
  297         u32 rx_cnt;
  298 
  299         while (cnt && rtwdev->napi_budget_countdown > 0) {
  300                 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
  301                 if (!rx_cnt) {
  302                         rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
  303 
  304                         /* skip the rest RXBD bufs */
  305                         rtw89_pci_rxbd_increase(rx_ring, cnt);
  306                         break;
  307                 }
  308 
  309                 cnt -= rx_cnt;
  310         }
  311 
  312         rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
  313 }
  314 
  315 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
  316                                   struct rtw89_pci *rtwpci, int budget)
  317 {
  318         struct rtw89_pci_rx_ring *rx_ring;
  319         int countdown = rtwdev->napi_budget_countdown;
  320         u32 cnt;
  321 
  322         rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
  323 
  324         cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
  325         if (!cnt)
  326                 return 0;
  327 
  328         cnt = min_t(u32, budget, cnt);
  329 
  330         rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
  331 
  332         /* In case of flushing pending SKBs, the countdown may exceed. */
  333         if (rtwdev->napi_budget_countdown <= 0)
  334                 return budget;
  335 
  336         return budget - countdown;
  337 }
  338 
  339 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
  340                                 struct rtw89_pci_tx_ring *tx_ring,
  341                                 struct sk_buff *skb, u8 tx_status)
  342 {
  343         struct ieee80211_tx_info *info;
  344 
  345         info = IEEE80211_SKB_CB(skb);
  346         ieee80211_tx_info_clear_status(info);
  347 
  348         if (info->flags & IEEE80211_TX_CTL_NO_ACK)
  349                 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
  350         if (tx_status == RTW89_TX_DONE) {
  351                 info->flags |= IEEE80211_TX_STAT_ACK;
  352                 tx_ring->tx_acked++;
  353         } else {
  354                 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
  355                         rtw89_debug(rtwdev, RTW89_DBG_FW,
  356                                     "failed to TX of status %x\n", tx_status);
  357                 switch (tx_status) {
  358                 case RTW89_TX_RETRY_LIMIT:
  359                         tx_ring->tx_retry_lmt++;
  360                         break;
  361                 case RTW89_TX_LIFE_TIME:
  362                         tx_ring->tx_life_time++;
  363                         break;
  364                 case RTW89_TX_MACID_DROP:
  365                         tx_ring->tx_mac_id_drop++;
  366                         break;
  367                 default:
  368                         rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
  369                         break;
  370                 }
  371         }
  372 
  373         ieee80211_tx_status_ni(rtwdev->hw, skb);
  374 }
  375 
  376 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
  377 {
  378         struct rtw89_pci_tx_wd *txwd;
  379         u32 cnt;
  380 
  381         cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
  382         while (cnt--) {
  383                 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
  384                 if (!txwd) {
  385                         rtw89_warn(rtwdev, "No busy txwd pages available\n");
  386                         break;
  387                 }
  388 
  389                 list_del_init(&txwd->list);
  390 
  391                 /* this skb has been freed by RPP */
  392                 if (skb_queue_len(&txwd->queue) == 0)
  393                         rtw89_pci_enqueue_txwd(tx_ring, txwd);
  394         }
  395 }
  396 
  397 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
  398                                         struct rtw89_pci_tx_ring *tx_ring)
  399 {
  400         struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
  401         struct rtw89_pci_tx_wd *txwd;
  402         int i;
  403 
  404         for (i = 0; i < wd_ring->page_num; i++) {
  405                 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
  406                 if (!txwd)
  407                         break;
  408 
  409                 list_del_init(&txwd->list);
  410         }
  411 }
  412 
  413 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
  414                                        struct rtw89_pci_tx_ring *tx_ring,
  415                                        struct rtw89_pci_tx_wd *txwd, u16 seq,
  416                                        u8 tx_status)
  417 {
  418         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  419         struct rtw89_pci_tx_data *tx_data;
  420         struct sk_buff *skb, *tmp;
  421         u8 txch = tx_ring->txch;
  422 
  423         if (!list_empty(&txwd->list)) {
  424                 rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
  425                 /* In low power mode, RPP can receive before updating of TX BD.
  426                  * In normal mode, it should not happen so give it a warning.
  427                  */
  428                 if (!rtwpci->low_power && !list_empty(&txwd->list))
  429                         rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
  430                                    txch, seq);
  431         }
  432 
  433         skb_queue_walk_safe(&txwd->queue, skb, tmp) {
  434                 skb_unlink(skb, &txwd->queue);
  435 
  436                 tx_data = RTW89_PCI_TX_SKB_CB(skb);
  437                 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
  438                                  DMA_TO_DEVICE);
  439 
  440                 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
  441         }
  442 
  443         if (list_empty(&txwd->list))
  444                 rtw89_pci_enqueue_txwd(tx_ring, txwd);
  445 }
  446 
  447 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
  448                                   struct rtw89_pci_rpp_fmt *rpp)
  449 {
  450         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  451         struct rtw89_pci_tx_ring *tx_ring;
  452         struct rtw89_pci_tx_wd_ring *wd_ring;
  453         struct rtw89_pci_tx_wd *txwd;
  454         u16 seq;
  455         u8 qsel, tx_status, txch;
  456 
  457         seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
  458         qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
  459         tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
  460         txch = rtw89_core_get_ch_dma(rtwdev, qsel);
  461 
  462         if (txch == RTW89_TXCH_CH12) {
  463                 rtw89_warn(rtwdev, "should no fwcmd release report\n");
  464                 return;
  465         }
  466 
  467         tx_ring = &rtwpci->tx_rings[txch];
  468         wd_ring = &tx_ring->wd_ring;
  469         txwd = &wd_ring->pages[seq];
  470 
  471         rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
  472 }
  473 
  474 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
  475                                                struct rtw89_pci_tx_ring *tx_ring)
  476 {
  477         struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
  478         struct rtw89_pci_tx_wd *txwd;
  479         int i;
  480 
  481         for (i = 0; i < wd_ring->page_num; i++) {
  482                 txwd = &wd_ring->pages[i];
  483 
  484                 if (!list_empty(&txwd->list))
  485                         continue;
  486 
  487                 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
  488         }
  489 }
  490 
  491 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
  492                                      struct rtw89_pci_rx_ring *rx_ring,
  493                                      u32 max_cnt)
  494 {
  495         struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
  496         struct rtw89_pci_rx_info *rx_info;
  497         struct rtw89_pci_rpp_fmt *rpp;
  498         struct rtw89_rx_desc_info desc_info = {};
  499         struct sk_buff *skb;
  500         u32 cnt = 0;
  501         u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
  502         u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
  503         u32 offset;
  504         int ret;
  505 
  506         skb = rx_ring->buf[bd_ring->wp];
  507         rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
  508 
  509         ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
  510         if (ret) {
  511                 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
  512                           bd_ring->wp, ret);
  513                 goto err_sync_device;
  514         }
  515 
  516         rx_info = RTW89_PCI_RX_SKB_CB(skb);
  517         if (!rx_info->fs || !rx_info->ls) {
  518                 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
  519                 return cnt;
  520         }
  521 
  522         rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
  523 
  524         /* first segment has RX desc */
  525         offset = desc_info.offset;
  526         offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) :
  527                                           sizeof(struct rtw89_rxdesc_short);
  528         for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
  529                 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
  530                 rtw89_pci_release_rpp(rtwdev, rpp);
  531         }
  532 
  533         rtw89_pci_sync_skb_for_device(rtwdev, skb);
  534         rtw89_pci_rxbd_increase(rx_ring, 1);
  535         cnt++;
  536 
  537         return cnt;
  538 
  539 err_sync_device:
  540         rtw89_pci_sync_skb_for_device(rtwdev, skb);
  541         return 0;
  542 }
  543 
  544 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
  545                                  struct rtw89_pci_rx_ring *rx_ring,
  546                                  u32 cnt)
  547 {
  548         struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
  549         u32 release_cnt;
  550 
  551         while (cnt) {
  552                 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
  553                 if (!release_cnt) {
  554                         rtw89_err(rtwdev, "failed to release TX skbs\n");
  555 
  556                         /* skip the rest RXBD bufs */
  557                         rtw89_pci_rxbd_increase(rx_ring, cnt);
  558                         break;
  559                 }
  560 
  561                 cnt -= release_cnt;
  562         }
  563 
  564         rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
  565 }
  566 
  567 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
  568                                   struct rtw89_pci *rtwpci, int budget)
  569 {
  570         struct rtw89_pci_rx_ring *rx_ring;
  571         u32 cnt;
  572         int work_done;
  573 
  574         rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
  575 
  576         spin_lock_bh(&rtwpci->trx_lock);
  577 
  578         cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
  579         if (cnt == 0)
  580                 goto out_unlock;
  581 
  582         rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
  583 
  584 out_unlock:
  585         spin_unlock_bh(&rtwpci->trx_lock);
  586 
  587         /* always release all RPQ */
  588         work_done = min_t(int, cnt, budget);
  589         rtwdev->napi_budget_countdown -= work_done;
  590 
  591         return work_done;
  592 }
  593 
  594 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
  595                                       struct rtw89_pci *rtwpci)
  596 {
  597         struct rtw89_pci_rx_ring *rx_ring;
  598         struct rtw89_pci_dma_ring *bd_ring;
  599         u32 reg_idx;
  600         u16 hw_idx, hw_idx_next, host_idx;
  601         int i;
  602 
  603         for (i = 0; i < RTW89_RXCH_NUM; i++) {
  604                 rx_ring = &rtwpci->rx_rings[i];
  605                 bd_ring = &rx_ring->bd_ring;
  606 
  607                 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
  608                 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
  609                 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
  610                 hw_idx_next = (hw_idx + 1) % bd_ring->len;
  611 
  612                 if (hw_idx_next == host_idx)
  613                         rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
  614 
  615                 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  616                             "%d RXD unavailable, idx=0x%08x, len=%d\n",
  617                             i, reg_idx, bd_ring->len);
  618         }
  619 }
  620 
  621 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
  622                                struct rtw89_pci *rtwpci,
  623                                struct rtw89_pci_isrs *isrs)
  624 {
  625         isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
  626         isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
  627         isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
  628 
  629         rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
  630         rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
  631         rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
  632 }
  633 EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
  634 
  635 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
  636                                   struct rtw89_pci *rtwpci,
  637                                   struct rtw89_pci_isrs *isrs)
  638 {
  639         isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
  640         isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
  641                               rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
  642         isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
  643                         rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
  644         isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
  645                         rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
  646 
  647         if (isrs->halt_c2h_isrs)
  648                 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
  649         if (isrs->isrs[0])
  650                 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
  651         if (isrs->isrs[1])
  652                 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
  653 }
  654 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
  655 
  656 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
  657 {
  658         /* write 1 clear */
  659         rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00);
  660 }
  661 
  662 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
  663 {
  664         rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
  665         rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
  666         rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
  667 }
  668 EXPORT_SYMBOL(rtw89_pci_enable_intr);
  669 
  670 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
  671 {
  672         rtw89_write32(rtwdev, R_AX_HIMR0, 0);
  673         rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
  674         rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
  675 }
  676 EXPORT_SYMBOL(rtw89_pci_disable_intr);
  677 
  678 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
  679 {
  680         rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
  681         rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
  682         rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
  683         rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
  684 }
  685 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
  686 
  687 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
  688 {
  689         rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
  690 }
  691 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
  692 
  693 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
  694 {
  695         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  696         unsigned long flags;
  697 
  698         spin_lock_irqsave(&rtwpci->irq_lock, flags);
  699         rtw89_chip_disable_intr(rtwdev, rtwpci);
  700         rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
  701         rtw89_chip_enable_intr(rtwdev, rtwpci);
  702         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
  703 }
  704 
  705 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
  706 {
  707         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  708         unsigned long flags;
  709 
  710         spin_lock_irqsave(&rtwpci->irq_lock, flags);
  711         rtw89_chip_disable_intr(rtwdev, rtwpci);
  712         rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
  713         rtw89_chip_enable_intr(rtwdev, rtwpci);
  714         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
  715 }
  716 
  717 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
  718 {
  719         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  720         int budget = NAPI_POLL_WEIGHT;
  721 
  722         /* To prevent RXQ get stuck due to run out of budget. */
  723         rtwdev->napi_budget_countdown = budget;
  724 
  725         rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
  726         rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
  727 }
  728 
  729 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
  730 {
  731         struct rtw89_dev *rtwdev = dev;
  732         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  733         struct rtw89_pci_isrs isrs;
  734         unsigned long flags;
  735 
  736         spin_lock_irqsave(&rtwpci->irq_lock, flags);
  737         rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
  738         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
  739 
  740         if (unlikely(isrs.isrs[0] & B_AX_RDU_INT))
  741                 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
  742 
  743         if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
  744                 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
  745 
  746         if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN))
  747                 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
  748 
  749         if (unlikely(rtwpci->under_recovery))
  750                 goto enable_intr;
  751 
  752         if (unlikely(rtwpci->low_power)) {
  753                 rtw89_pci_low_power_interrupt_handler(rtwdev);
  754                 goto enable_intr;
  755         }
  756 
  757         if (likely(rtwpci->running)) {
  758                 local_bh_disable();
  759                 napi_schedule(&rtwdev->napi);
  760                 local_bh_enable();
  761         }
  762 
  763         return IRQ_HANDLED;
  764 
  765 enable_intr:
  766         spin_lock_irqsave(&rtwpci->irq_lock, flags);
  767         if (likely(rtwpci->running))
  768                 rtw89_chip_enable_intr(rtwdev, rtwpci);
  769         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
  770         return IRQ_HANDLED;
  771 }
  772 
  773 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
  774 {
  775         struct rtw89_dev *rtwdev = dev;
  776         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  777         unsigned long flags;
  778         irqreturn_t irqret = IRQ_WAKE_THREAD;
  779 
  780         spin_lock_irqsave(&rtwpci->irq_lock, flags);
  781 
  782         /* If interrupt event is on the road, it is still trigger interrupt
  783          * even we have done pci_stop() to turn off IMR.
  784          */
  785         if (unlikely(!rtwpci->running)) {
  786                 irqret = IRQ_HANDLED;
  787                 goto exit;
  788         }
  789 
  790         rtw89_chip_disable_intr(rtwdev, rtwpci);
  791 exit:
  792         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
  793 
  794         return irqret;
  795 }
  796 
  797 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
  798         [RTW89_TXCH_##txch] = { \
  799                 .num = R_AX_##txch##_TXBD_NUM ##v, \
  800                 .idx = R_AX_##txch##_TXBD_IDX ##v, \
  801                 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
  802                 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
  803                 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
  804         }
  805 
  806 #define DEF_TXCHADDRS(info, txch, v...) \
  807         [RTW89_TXCH_##txch] = { \
  808                 .num = R_AX_##txch##_TXBD_NUM, \
  809                 .idx = R_AX_##txch##_TXBD_IDX, \
  810                 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
  811                 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
  812                 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
  813         }
  814 
  815 #define DEF_RXCHADDRS(info, rxch, v...) \
  816         [RTW89_RXCH_##rxch] = { \
  817                 .num = R_AX_##rxch##_RXBD_NUM ##v, \
  818                 .idx = R_AX_##rxch##_RXBD_IDX ##v, \
  819                 .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
  820                 .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
  821         }
  822 
  823 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
  824         .tx = {
  825                 DEF_TXCHADDRS(info, ACH0),
  826                 DEF_TXCHADDRS(info, ACH1),
  827                 DEF_TXCHADDRS(info, ACH2),
  828                 DEF_TXCHADDRS(info, ACH3),
  829                 DEF_TXCHADDRS(info, ACH4),
  830                 DEF_TXCHADDRS(info, ACH5),
  831                 DEF_TXCHADDRS(info, ACH6),
  832                 DEF_TXCHADDRS(info, ACH7),
  833                 DEF_TXCHADDRS(info, CH8),
  834                 DEF_TXCHADDRS(info, CH9),
  835                 DEF_TXCHADDRS_TYPE1(info, CH10),
  836                 DEF_TXCHADDRS_TYPE1(info, CH11),
  837                 DEF_TXCHADDRS(info, CH12),
  838         },
  839         .rx = {
  840                 DEF_RXCHADDRS(info, RXQ),
  841                 DEF_RXCHADDRS(info, RPQ),
  842         },
  843 };
  844 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
  845 
  846 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
  847         .tx = {
  848                 DEF_TXCHADDRS(info, ACH0, _V1),
  849                 DEF_TXCHADDRS(info, ACH1, _V1),
  850                 DEF_TXCHADDRS(info, ACH2, _V1),
  851                 DEF_TXCHADDRS(info, ACH3, _V1),
  852                 DEF_TXCHADDRS(info, ACH4, _V1),
  853                 DEF_TXCHADDRS(info, ACH5, _V1),
  854                 DEF_TXCHADDRS(info, ACH6, _V1),
  855                 DEF_TXCHADDRS(info, ACH7, _V1),
  856                 DEF_TXCHADDRS(info, CH8, _V1),
  857                 DEF_TXCHADDRS(info, CH9, _V1),
  858                 DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
  859                 DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
  860                 DEF_TXCHADDRS(info, CH12, _V1),
  861         },
  862         .rx = {
  863                 DEF_RXCHADDRS(info, RXQ, _V1),
  864                 DEF_RXCHADDRS(info, RPQ, _V1),
  865         },
  866 };
  867 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
  868 
  869 #undef DEF_TXCHADDRS_TYPE1
  870 #undef DEF_TXCHADDRS
  871 #undef DEF_RXCHADDRS
  872 
  873 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
  874                                     enum rtw89_tx_channel txch,
  875                                     const struct rtw89_pci_ch_dma_addr **addr)
  876 {
  877         const struct rtw89_pci_info *info = rtwdev->pci_info;
  878 
  879         if (txch >= RTW89_TXCH_NUM)
  880                 return -EINVAL;
  881 
  882         *addr = &info->dma_addr_set->tx[txch];
  883 
  884         return 0;
  885 }
  886 
  887 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
  888                                     enum rtw89_rx_channel rxch,
  889                                     const struct rtw89_pci_ch_dma_addr **addr)
  890 {
  891         const struct rtw89_pci_info *info = rtwdev->pci_info;
  892 
  893         if (rxch >= RTW89_RXCH_NUM)
  894                 return -EINVAL;
  895 
  896         *addr = &info->dma_addr_set->rx[rxch];
  897 
  898         return 0;
  899 }
  900 
  901 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
  902 {
  903         struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
  904 
  905         /* reserved 1 desc check ring is full or not */
  906         if (bd_ring->rp > bd_ring->wp)
  907                 return bd_ring->rp - bd_ring->wp - 1;
  908 
  909         return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
  910 }
  911 
  912 static
  913 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
  914 {
  915         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  916         struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
  917         u32 cnt;
  918 
  919         spin_lock_bh(&rtwpci->trx_lock);
  920         rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
  921         cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
  922         spin_unlock_bh(&rtwpci->trx_lock);
  923 
  924         return cnt;
  925 }
  926 
  927 static
  928 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
  929                                                    u8 txch)
  930 {
  931         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  932         struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
  933         struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
  934         u32 cnt;
  935 
  936         spin_lock_bh(&rtwpci->trx_lock);
  937         cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
  938         cnt = min(cnt, wd_ring->curr_num);
  939         spin_unlock_bh(&rtwpci->trx_lock);
  940 
  941         return cnt;
  942 }
  943 
  944 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
  945                                                      u8 txch)
  946 {
  947         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
  948         struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
  949         struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
  950         u32 bd_cnt, wd_cnt, min_cnt = 0;
  951         struct rtw89_pci_rx_ring *rx_ring;
  952         u32 cnt;
  953 
  954         rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
  955 
  956         spin_lock_bh(&rtwpci->trx_lock);
  957         bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
  958         wd_cnt = wd_ring->curr_num;
  959 
  960         if (wd_cnt == 0 || bd_cnt == 0) {
  961                 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
  962                 if (cnt)
  963                         rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
  964                 else if (wd_cnt == 0)
  965                         goto out_unlock;
  966 
  967                 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
  968                 if (bd_cnt == 0)
  969                         rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
  970         }
  971 
  972         bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
  973         wd_cnt = wd_ring->curr_num;
  974         min_cnt = min(bd_cnt, wd_cnt);
  975         if (min_cnt == 0)
  976                 rtw89_debug(rtwdev, rtwpci->low_power ? RTW89_DBG_TXRX : RTW89_DBG_UNEXP,
  977                             "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
  978                             wd_cnt, bd_cnt);
  979 
  980 out_unlock:
  981         spin_unlock_bh(&rtwpci->trx_lock);
  982 
  983         return min_cnt;
  984 }
  985 
  986 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
  987                                                    u8 txch)
  988 {
  989         if (rtwdev->hci.paused)
  990                 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
  991 
  992         if (txch == RTW89_TXCH_CH12)
  993                 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
  994 
  995         return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
  996 }
  997 
  998 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
  999 {
 1000         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1001         struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
 1002         u32 host_idx, addr;
 1003 
 1004         spin_lock_bh(&rtwpci->trx_lock);
 1005 
 1006         addr = bd_ring->addr.idx;
 1007         host_idx = bd_ring->wp;
 1008         rtw89_write16(rtwdev, addr, host_idx);
 1009 
 1010         spin_unlock_bh(&rtwpci->trx_lock);
 1011 }
 1012 
 1013 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
 1014                                         int n_txbd)
 1015 {
 1016         struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
 1017         u32 host_idx, len;
 1018 
 1019         len = bd_ring->len;
 1020         host_idx = bd_ring->wp + n_txbd;
 1021         host_idx = host_idx < len ? host_idx : host_idx - len;
 1022 
 1023         bd_ring->wp = host_idx;
 1024 }
 1025 
 1026 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
 1027 {
 1028         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1029         struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
 1030 
 1031         if (rtwdev->hci.paused) {
 1032                 set_bit(txch, rtwpci->kick_map);
 1033                 return;
 1034         }
 1035 
 1036         __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
 1037 }
 1038 
 1039 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
 1040 {
 1041         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1042         struct rtw89_pci_tx_ring *tx_ring;
 1043         int txch;
 1044 
 1045         for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
 1046                 if (!test_and_clear_bit(txch, rtwpci->kick_map))
 1047                         continue;
 1048 
 1049                 tx_ring = &rtwpci->tx_rings[txch];
 1050                 __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
 1051         }
 1052 }
 1053 
 1054 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
 1055 {
 1056         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1057         struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
 1058         struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
 1059         u32 cur_idx, cur_rp;
 1060         u8 i;
 1061 
 1062         /* Because the time taked by the I/O is a bit dynamic, it's hard to
 1063          * define a reasonable fixed total timeout to use read_poll_timeout*
 1064          * helper. Instead, we can ensure a reasonable polling times, so we
 1065          * just use for loop with udelay here.
 1066          */
 1067         for (i = 0; i < 60; i++) {
 1068                 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
 1069                 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
 1070                 if (cur_rp == bd_ring->wp)
 1071                         return;
 1072 
 1073                 udelay(1);
 1074         }
 1075 
 1076         if (!drop)
 1077                 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
 1078 }
 1079 
 1080 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
 1081                                         bool drop)
 1082 {
 1083         u8 i;
 1084 
 1085         for (i = 0; i < RTW89_TXCH_NUM; i++) {
 1086                 /* It may be unnecessary to flush FWCMD queue. */
 1087                 if (i == RTW89_TXCH_CH12)
 1088                         continue;
 1089 
 1090                 if (txchs & BIT(i))
 1091                         __pci_flush_txch(rtwdev, i, drop);
 1092         }
 1093 }
 1094 
 1095 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
 1096                                        bool drop)
 1097 {
 1098         __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
 1099 }
 1100 
 1101 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
 1102                                void *txaddr_info_addr, u32 total_len,
 1103                                dma_addr_t dma, u8 *add_info_nr)
 1104 {
 1105         struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
 1106 
 1107         txaddr_info->length = cpu_to_le16(total_len);
 1108         txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
 1109                                           RTW89_PCI_ADDR_NUM(1));
 1110         txaddr_info->dma = cpu_to_le32(dma);
 1111 
 1112         *add_info_nr = 1;
 1113 
 1114         return sizeof(*txaddr_info);
 1115 }
 1116 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
 1117 
 1118 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
 1119                                   void *txaddr_info_addr, u32 total_len,
 1120                                   dma_addr_t dma, u8 *add_info_nr)
 1121 {
 1122         struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
 1123         u32 remain = total_len;
 1124         u32 len;
 1125         u16 length_option;
 1126         int n;
 1127 
 1128         for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
 1129                 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
 1130                       TXADDR_INFO_LENTHG_V1_MAX : remain;
 1131                 remain -= len;
 1132 
 1133                 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
 1134                                 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
 1135                                 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
 1136                 txaddr_info->length_opt = cpu_to_le16(length_option);
 1137                 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
 1138                 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
 1139 
 1140                 dma += len;
 1141                 txaddr_info++;
 1142         }
 1143 
 1144         WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
 1145                   remain, total_len);
 1146 
 1147         *add_info_nr = n;
 1148 
 1149         return n * sizeof(*txaddr_info);
 1150 }
 1151 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
 1152 
 1153 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
 1154                                  struct rtw89_pci_tx_ring *tx_ring,
 1155                                  struct rtw89_pci_tx_wd *txwd,
 1156                                  struct rtw89_core_tx_request *tx_req)
 1157 {
 1158         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1159         const struct rtw89_chip_info *chip = rtwdev->chip;
 1160         struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
 1161         struct rtw89_txwd_info *txwd_info;
 1162         struct rtw89_pci_tx_wp_info *txwp_info;
 1163         void *txaddr_info_addr;
 1164         struct pci_dev *pdev = rtwpci->pdev;
 1165         struct sk_buff *skb = tx_req->skb;
 1166         struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
 1167         bool en_wd_info = desc_info->en_wd_info;
 1168         u32 txwd_len;
 1169         u32 txwp_len;
 1170         u32 txaddr_info_len;
 1171         dma_addr_t dma;
 1172         int ret;
 1173 
 1174         dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
 1175         if (dma_mapping_error(&pdev->dev, dma)) {
 1176                 rtw89_err(rtwdev, "failed to map skb dma data\n");
 1177                 ret = -EBUSY;
 1178                 goto err;
 1179         }
 1180 
 1181         tx_data->dma = dma;
 1182 
 1183         txwp_len = sizeof(*txwp_info);
 1184         txwd_len = chip->txwd_body_size;
 1185         txwd_len += en_wd_info ? sizeof(*txwd_info) : 0;
 1186 
 1187 #if defined(__linux__)
 1188         txwp_info = txwd->vaddr + txwd_len;
 1189 #elif defined(__FreeBSD__)
 1190         txwp_info = (struct rtw89_pci_tx_wp_info *)((u8 *)txwd->vaddr + txwd_len);
 1191 #endif
 1192         txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
 1193         txwp_info->seq1 = 0;
 1194         txwp_info->seq2 = 0;
 1195         txwp_info->seq3 = 0;
 1196 
 1197         tx_ring->tx_cnt++;
 1198 #if defined(__linux__)
 1199         txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
 1200 #elif defined(__FreeBSD__)
 1201         txaddr_info_addr = (u8 *)txwd->vaddr + txwd_len + txwp_len;
 1202 #endif
 1203         txaddr_info_len =
 1204                 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
 1205                                             dma, &desc_info->addr_info_nr);
 1206 
 1207         txwd->len = txwd_len + txwp_len + txaddr_info_len;
 1208 
 1209         rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
 1210 
 1211         skb_queue_tail(&txwd->queue, skb);
 1212 
 1213         return 0;
 1214 
 1215 err:
 1216         return ret;
 1217 }
 1218 
 1219 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
 1220                                   struct rtw89_pci_tx_ring *tx_ring,
 1221                                   struct rtw89_pci_tx_bd_32 *txbd,
 1222                                   struct rtw89_core_tx_request *tx_req)
 1223 {
 1224         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1225         const struct rtw89_chip_info *chip = rtwdev->chip;
 1226         struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
 1227         void *txdesc;
 1228         int txdesc_size = chip->h2c_desc_size;
 1229         struct pci_dev *pdev = rtwpci->pdev;
 1230         struct sk_buff *skb = tx_req->skb;
 1231         struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
 1232         dma_addr_t dma;
 1233 
 1234         txdesc = skb_push(skb, txdesc_size);
 1235         memset(txdesc, 0, txdesc_size);
 1236         rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
 1237 
 1238         dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
 1239         if (dma_mapping_error(&pdev->dev, dma)) {
 1240                 rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
 1241                 return -EBUSY;
 1242         }
 1243 
 1244         tx_data->dma = dma;
 1245         txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
 1246         txbd->length = cpu_to_le16(skb->len);
 1247         txbd->dma = cpu_to_le32(tx_data->dma);
 1248         skb_queue_tail(&rtwpci->h2c_queue, skb);
 1249 
 1250         rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
 1251 
 1252         return 0;
 1253 }
 1254 
 1255 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
 1256                                  struct rtw89_pci_tx_ring *tx_ring,
 1257                                  struct rtw89_pci_tx_bd_32 *txbd,
 1258                                  struct rtw89_core_tx_request *tx_req)
 1259 {
 1260         struct rtw89_pci_tx_wd *txwd;
 1261         int ret;
 1262 
 1263         /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
 1264          * buffer with WD BODY only. So here we don't need to check the free
 1265          * pages of the wd ring.
 1266          */
 1267         if (tx_ring->txch == RTW89_TXCH_CH12)
 1268                 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
 1269 
 1270         txwd = rtw89_pci_dequeue_txwd(tx_ring);
 1271         if (!txwd) {
 1272                 rtw89_err(rtwdev, "no available TXWD\n");
 1273                 ret = -ENOSPC;
 1274                 goto err;
 1275         }
 1276 
 1277         ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
 1278         if (ret) {
 1279                 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
 1280                 goto err_enqueue_wd;
 1281         }
 1282 
 1283         list_add_tail(&txwd->list, &tx_ring->busy_pages);
 1284 
 1285         txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
 1286         txbd->length = cpu_to_le16(txwd->len);
 1287         txbd->dma = cpu_to_le32(txwd->paddr);
 1288 
 1289         rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
 1290 
 1291         return 0;
 1292 
 1293 err_enqueue_wd:
 1294         rtw89_pci_enqueue_txwd(tx_ring, txwd);
 1295 err:
 1296         return ret;
 1297 }
 1298 
 1299 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
 1300                               u8 txch)
 1301 {
 1302         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1303         struct rtw89_pci_tx_ring *tx_ring;
 1304         struct rtw89_pci_tx_bd_32 *txbd;
 1305         u32 n_avail_txbd;
 1306         int ret = 0;
 1307 
 1308         /* check the tx type and dma channel for fw cmd queue */
 1309         if ((txch == RTW89_TXCH_CH12 ||
 1310              tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
 1311             (txch != RTW89_TXCH_CH12 ||
 1312              tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
 1313                 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
 1314                 return -EINVAL;
 1315         }
 1316 
 1317         tx_ring = &rtwpci->tx_rings[txch];
 1318         spin_lock_bh(&rtwpci->trx_lock);
 1319 
 1320         n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
 1321         if (n_avail_txbd == 0) {
 1322                 rtw89_err(rtwdev, "no available TXBD\n");
 1323                 ret = -ENOSPC;
 1324                 goto err_unlock;
 1325         }
 1326 
 1327         txbd = rtw89_pci_get_next_txbd(tx_ring);
 1328         ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
 1329         if (ret) {
 1330                 rtw89_err(rtwdev, "failed to submit TXBD\n");
 1331                 goto err_unlock;
 1332         }
 1333 
 1334         spin_unlock_bh(&rtwpci->trx_lock);
 1335         return 0;
 1336 
 1337 err_unlock:
 1338         spin_unlock_bh(&rtwpci->trx_lock);
 1339         return ret;
 1340 }
 1341 
 1342 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
 1343 {
 1344         struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
 1345         int ret;
 1346 
 1347         ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
 1348         if (ret) {
 1349                 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
 1350                 return ret;
 1351         }
 1352 
 1353         return 0;
 1354 }
 1355 
 1356 static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
 1357         [RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
 1358         [RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
 1359         [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
 1360         [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
 1361         [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
 1362         [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
 1363         [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
 1364         [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
 1365         [RTW89_TXCH_CH8]  = {.start_idx = 40, .max_num = 5, .min_num = 1},
 1366         [RTW89_TXCH_CH9]  = {.start_idx = 45, .max_num = 5, .min_num = 1},
 1367         [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
 1368         [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
 1369         [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
 1370 };
 1371 
 1372 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
 1373 {
 1374         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1375         struct rtw89_pci_tx_ring *tx_ring;
 1376         struct rtw89_pci_rx_ring *rx_ring;
 1377         struct rtw89_pci_dma_ring *bd_ring;
 1378         const struct rtw89_pci_bd_ram *bd_ram;
 1379         u32 addr_num;
 1380         u32 addr_bdram;
 1381         u32 addr_desa_l;
 1382         u32 val32;
 1383         int i;
 1384 
 1385         for (i = 0; i < RTW89_TXCH_NUM; i++) {
 1386                 tx_ring = &rtwpci->tx_rings[i];
 1387                 bd_ring = &tx_ring->bd_ring;
 1388                 bd_ram = &bd_ram_table[i];
 1389                 addr_num = bd_ring->addr.num;
 1390                 addr_bdram = bd_ring->addr.bdram;
 1391                 addr_desa_l = bd_ring->addr.desa_l;
 1392                 bd_ring->wp = 0;
 1393                 bd_ring->rp = 0;
 1394 
 1395                 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
 1396                         FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
 1397                         FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
 1398 
 1399                 rtw89_write16(rtwdev, addr_num, bd_ring->len);
 1400                 rtw89_write32(rtwdev, addr_bdram, val32);
 1401                 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
 1402         }
 1403 
 1404         for (i = 0; i < RTW89_RXCH_NUM; i++) {
 1405                 rx_ring = &rtwpci->rx_rings[i];
 1406                 bd_ring = &rx_ring->bd_ring;
 1407                 addr_num = bd_ring->addr.num;
 1408                 addr_desa_l = bd_ring->addr.desa_l;
 1409                 bd_ring->wp = 0;
 1410                 bd_ring->rp = 0;
 1411                 rx_ring->diliver_skb = NULL;
 1412                 rx_ring->diliver_desc.ready = false;
 1413 
 1414                 rtw89_write16(rtwdev, addr_num, bd_ring->len);
 1415                 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
 1416         }
 1417 }
 1418 
 1419 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
 1420                                       struct rtw89_pci_tx_ring *tx_ring)
 1421 {
 1422         rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
 1423         rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
 1424 }
 1425 
 1426 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
 1427 {
 1428         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1429         int txch;
 1430 
 1431         rtw89_pci_reset_trx_rings(rtwdev);
 1432 
 1433         spin_lock_bh(&rtwpci->trx_lock);
 1434         for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
 1435                 if (txch == RTW89_TXCH_CH12) {
 1436                         rtw89_pci_release_fwcmd(rtwdev, rtwpci,
 1437                                                 skb_queue_len(&rtwpci->h2c_queue), true);
 1438                         continue;
 1439                 }
 1440                 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
 1441         }
 1442         spin_unlock_bh(&rtwpci->trx_lock);
 1443 }
 1444 
 1445 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
 1446 {
 1447         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1448         unsigned long flags;
 1449 
 1450         spin_lock_irqsave(&rtwpci->irq_lock, flags);
 1451         rtwpci->running = true;
 1452         rtw89_chip_enable_intr(rtwdev, rtwpci);
 1453         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 1454 }
 1455 
 1456 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
 1457 {
 1458         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1459         unsigned long flags;
 1460 
 1461         spin_lock_irqsave(&rtwpci->irq_lock, flags);
 1462         rtwpci->running = false;
 1463         rtw89_chip_disable_intr(rtwdev, rtwpci);
 1464         spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 1465 }
 1466 
 1467 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
 1468 {
 1469         rtw89_core_napi_start(rtwdev);
 1470         rtw89_pci_enable_intr_lock(rtwdev);
 1471 
 1472         return 0;
 1473 }
 1474 
 1475 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
 1476 {
 1477         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1478         struct pci_dev *pdev = rtwpci->pdev;
 1479 
 1480         rtw89_pci_disable_intr_lock(rtwdev);
 1481         synchronize_irq(pdev->irq);
 1482         rtw89_core_napi_stop(rtwdev);
 1483 }
 1484 
 1485 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
 1486 {
 1487         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1488         struct pci_dev *pdev = rtwpci->pdev;
 1489 
 1490         if (pause) {
 1491                 rtw89_pci_disable_intr_lock(rtwdev);
 1492                 synchronize_irq(pdev->irq);
 1493                 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
 1494                         napi_synchronize(&rtwdev->napi);
 1495         } else {
 1496                 rtw89_pci_enable_intr_lock(rtwdev);
 1497                 rtw89_pci_tx_kick_off_pending(rtwdev);
 1498         }
 1499 }
 1500 
 1501 static
 1502 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
 1503 {
 1504         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1505         const struct rtw89_pci_info *info = rtwdev->pci_info;
 1506         const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
 1507         const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
 1508         struct rtw89_pci_tx_ring *tx_ring;
 1509         struct rtw89_pci_rx_ring *rx_ring;
 1510         int i;
 1511 
 1512         if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
 1513                 return;
 1514 
 1515         for (i = 0; i < RTW89_TXCH_NUM; i++) {
 1516                 tx_ring = &rtwpci->tx_rings[i];
 1517                 tx_ring->bd_ring.addr.idx = low_power ?
 1518                                             bd_idx_addr->tx_bd_addrs[i] :
 1519                                             dma_addr_set->tx[i].idx;
 1520         }
 1521 
 1522         for (i = 0; i < RTW89_RXCH_NUM; i++) {
 1523                 rx_ring = &rtwpci->rx_rings[i];
 1524                 rx_ring->bd_ring.addr.idx = low_power ?
 1525                                             bd_idx_addr->rx_bd_addrs[i] :
 1526                                             dma_addr_set->rx[i].idx;
 1527         }
 1528 }
 1529 
 1530 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
 1531 {
 1532         enum rtw89_pci_intr_mask_cfg cfg;
 1533 
 1534         WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
 1535 
 1536         cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
 1537         rtw89_chip_config_intr_mask(rtwdev, cfg);
 1538         rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
 1539 }
 1540 
 1541 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
 1542 
 1543 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
 1544 {
 1545         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1546 #if defined(__linux__)
 1547         u32 val = readl(rtwpci->mmap + addr);
 1548 #elif defined(__FreeBSD__)
 1549         u32 val;
 1550 
 1551         val = bus_read_4((struct resource *)rtwpci->mmap, addr);
 1552         rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
 1553 #endif
 1554         int count;
 1555 
 1556         for (count = 0; ; count++) {
 1557                 if (val != RTW89_R32_DEAD)
 1558                         return val;
 1559                 if (count >= MAC_REG_POOL_COUNT) {
 1560                         rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
 1561                         return RTW89_R32_DEAD;
 1562                 }
 1563                 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
 1564 #if defined(__linux__)
 1565                 val = readl(rtwpci->mmap + addr);
 1566 #elif defined(__FreeBSD__)
 1567                 val = bus_read_4((struct resource *)rtwpci->mmap, addr);
 1568                 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
 1569 #endif
 1570         }
 1571 
 1572         return val;
 1573 }
 1574 
 1575 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
 1576 {
 1577         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1578         u32 addr32, val32, shift;
 1579 
 1580         if (!ACCESS_CMAC(addr))
 1581 #if defined(__linux__)
 1582                 return readb(rtwpci->mmap + addr);
 1583 #elif defined(__FreeBSD__)
 1584         {
 1585                 u8 val;
 1586 
 1587                 val = bus_read_1((struct resource *)rtwpci->mmap, addr);
 1588                 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val);
 1589                 return (val);
 1590         }
 1591 #endif
 1592 
 1593         addr32 = addr & ~0x3;
 1594         shift = (addr & 0x3) * 8;
 1595         val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
 1596         return val32 >> shift;
 1597 }
 1598 
 1599 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
 1600 {
 1601         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1602         u32 addr32, val32, shift;
 1603 
 1604         if (!ACCESS_CMAC(addr))
 1605 #if defined(__linux__)
 1606                 return readw(rtwpci->mmap + addr);
 1607 #elif defined(__FreeBSD__)
 1608         {
 1609                 u16 val;
 1610 
 1611                 val = bus_read_2((struct resource *)rtwpci->mmap, addr);
 1612                 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val);
 1613                 return (val);
 1614         }
 1615 #endif
 1616 
 1617         addr32 = addr & ~0x3;
 1618         shift = (addr & 0x3) * 8;
 1619         val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
 1620         return val32 >> shift;
 1621 }
 1622 
 1623 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
 1624 {
 1625         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1626 
 1627         if (!ACCESS_CMAC(addr))
 1628 #if defined(__linux__)
 1629                 return readl(rtwpci->mmap + addr);
 1630 #elif defined(__FreeBSD__)
 1631         {
 1632                 u32 val;
 1633 
 1634                 val = bus_read_4((struct resource *)rtwpci->mmap, addr);
 1635                 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
 1636                 return (val);
 1637         }
 1638 #endif
 1639 
 1640         return rtw89_pci_ops_read32_cmac(rtwdev, addr);
 1641 }
 1642 
 1643 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
 1644 {
 1645         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1646 
 1647 #if defined(__linux__)
 1648         writeb(data, rtwpci->mmap + addr);
 1649 #elif defined(__FreeBSD__)
 1650         rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, data);
 1651         return (bus_write_1((struct resource *)rtwpci->mmap, addr, data));
 1652 #endif
 1653 }
 1654 
 1655 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
 1656 {
 1657         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1658 
 1659 #if defined(__linux__)
 1660         writew(data, rtwpci->mmap + addr);
 1661 #elif defined(__FreeBSD__)
 1662         rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, data);
 1663         return (bus_write_2((struct resource *)rtwpci->mmap, addr, data));
 1664 #endif
 1665 }
 1666 
 1667 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
 1668 {
 1669         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1670 
 1671 #if defined(__linux__)
 1672         writel(data, rtwpci->mmap + addr);
 1673 #elif defined(__FreeBSD__)
 1674         rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, data);
 1675         return (bus_write_4((struct resource *)rtwpci->mmap, addr, data));
 1676 #endif
 1677 }
 1678 
 1679 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
 1680 {
 1681         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 1682         const struct rtw89_pci_info *info = rtwdev->pci_info;
 1683         u32 txhci_en = info->txhci_en_bit;
 1684         u32 rxhci_en = info->rxhci_en_bit;
 1685 
 1686         if (enable) {
 1687                 if (chip_id != RTL8852C)
 1688                         rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
 1689                                           B_AX_STOP_PCIEIO);
 1690                 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
 1691                                   txhci_en | rxhci_en);
 1692                 if (chip_id == RTL8852C)
 1693                         rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
 1694                                           B_AX_STOP_AXI_MST);
 1695         } else {
 1696                 if (chip_id != RTL8852C)
 1697                         rtw89_write32_set(rtwdev, info->dma_stop1_reg,
 1698                                           B_AX_STOP_PCIEIO);
 1699                 else
 1700                         rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
 1701                                           B_AX_STOP_AXI_MST);
 1702                 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
 1703                                   txhci_en | rxhci_en);
 1704                 if (chip_id == RTL8852C)
 1705                         rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
 1706                                           B_AX_STOP_AXI_MST);
 1707         }
 1708 }
 1709 
 1710 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
 1711 {
 1712         u16 val;
 1713 
 1714         rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
 1715 
 1716         val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
 1717         switch (speed) {
 1718         case PCIE_PHY_GEN1:
 1719                 if (addr < 0x20)
 1720                         val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
 1721                 else
 1722                         val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
 1723                 break;
 1724         case PCIE_PHY_GEN2:
 1725                 if (addr < 0x20)
 1726                         val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
 1727                 else
 1728                         val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
 1729                 break;
 1730         default:
 1731                 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
 1732                 return -EINVAL;
 1733         }
 1734         rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
 1735         rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
 1736 
 1737         return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
 1738                                  false, rtwdev, R_AX_MDIO_CFG);
 1739 }
 1740 
 1741 static int
 1742 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
 1743 {
 1744         int ret;
 1745 
 1746         ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
 1747         if (ret) {
 1748                 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
 1749                 return ret;
 1750         }
 1751         *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
 1752 
 1753         return 0;
 1754 }
 1755 
 1756 static int
 1757 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
 1758 {
 1759         int ret;
 1760 
 1761         rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
 1762         ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
 1763         if (ret) {
 1764                 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
 1765                 return ret;
 1766         }
 1767 
 1768         return 0;
 1769 }
 1770 
 1771 static int
 1772 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
 1773 {
 1774         u32 shift;
 1775         int ret;
 1776         u16 val;
 1777 
 1778         ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
 1779         if (ret)
 1780                 return ret;
 1781 
 1782         shift = __ffs(mask);
 1783         val &= ~mask;
 1784         val |= ((data << shift) & mask);
 1785 
 1786         ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
 1787         if (ret)
 1788                 return ret;
 1789 
 1790         return 0;
 1791 }
 1792 
 1793 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
 1794 {
 1795         int ret;
 1796         u16 val;
 1797 
 1798         ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
 1799         if (ret)
 1800                 return ret;
 1801         ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
 1802         if (ret)
 1803                 return ret;
 1804 
 1805         return 0;
 1806 }
 1807 
 1808 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
 1809 {
 1810         int ret;
 1811         u16 val;
 1812 
 1813         ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
 1814         if (ret)
 1815                 return ret;
 1816         ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
 1817         if (ret)
 1818                 return ret;
 1819 
 1820         return 0;
 1821 }
 1822 
 1823 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
 1824                                        u8 data)
 1825 {
 1826         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1827         struct pci_dev *pdev = rtwpci->pdev;
 1828 
 1829         return pci_write_config_byte(pdev, addr, data);
 1830 }
 1831 
 1832 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
 1833                                       u8 *value)
 1834 {
 1835         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 1836         struct pci_dev *pdev = rtwpci->pdev;
 1837 
 1838         return pci_read_config_byte(pdev, addr, value);
 1839 }
 1840 
 1841 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
 1842                                      u8 bit)
 1843 {
 1844         u8 value;
 1845         int ret;
 1846 
 1847         ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
 1848         if (ret)
 1849                 return ret;
 1850 
 1851         value |= bit;
 1852         ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
 1853 
 1854         return ret;
 1855 }
 1856 
 1857 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
 1858                                      u8 bit)
 1859 {
 1860         u8 value;
 1861         int ret;
 1862 
 1863         ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
 1864         if (ret)
 1865                 return ret;
 1866 
 1867         value &= ~bit;
 1868         ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
 1869 
 1870         return ret;
 1871 }
 1872 
 1873 static int
 1874 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
 1875 {
 1876         u16 val, tar;
 1877         int ret;
 1878 
 1879         /* Enable counter */
 1880         ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
 1881         if (ret)
 1882                 return ret;
 1883         ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
 1884                                  phy_rate);
 1885         if (ret)
 1886                 return ret;
 1887         ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
 1888                                  phy_rate);
 1889         if (ret)
 1890                 return ret;
 1891 
 1892         fsleep(300);
 1893 
 1894         ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
 1895         if (ret)
 1896                 return ret;
 1897         ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
 1898                                  phy_rate);
 1899         if (ret)
 1900                 return ret;
 1901 
 1902         tar = tar & 0x0FFF;
 1903         if (tar == 0 || tar == 0x0FFF) {
 1904                 rtw89_err(rtwdev, "[ERR]Get target failed.\n");
 1905                 return -EINVAL;
 1906         }
 1907 
 1908         *target = tar;
 1909 
 1910         return 0;
 1911 }
 1912 
 1913 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
 1914 {
 1915         enum rtw89_pcie_phy phy_rate;
 1916         u16 val16, mgn_set, div_set, tar;
 1917         u8 val8, bdr_ori;
 1918         bool l1_flag = false;
 1919         int ret = 0;
 1920 
 1921         if (rtwdev->chip->chip_id != RTL8852B)
 1922                 return 0;
 1923 
 1924         ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
 1925         if (ret) {
 1926                 rtw89_err(rtwdev, "[ERR]pci config read %X\n",
 1927                           RTW89_PCIE_PHY_RATE);
 1928                 return ret;
 1929         }
 1930 
 1931         if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
 1932                 phy_rate = PCIE_PHY_GEN1;
 1933         } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
 1934                 phy_rate = PCIE_PHY_GEN2;
 1935         } else {
 1936                 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
 1937                 return -EOPNOTSUPP;
 1938         }
 1939         /* Disable L1BD */
 1940         ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
 1941         if (ret) {
 1942                 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
 1943                 return ret;
 1944         }
 1945 
 1946         if (bdr_ori & RTW89_PCIE_BIT_L1) {
 1947                 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
 1948                                                   bdr_ori & ~RTW89_PCIE_BIT_L1);
 1949                 if (ret) {
 1950                         rtw89_err(rtwdev, "[ERR]pci config write %X\n",
 1951                                   RTW89_PCIE_L1_CTRL);
 1952                         return ret;
 1953                 }
 1954                 l1_flag = true;
 1955         }
 1956 
 1957         ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
 1958         if (ret) {
 1959                 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
 1960                 goto end;
 1961         }
 1962 
 1963         if (val16 & B_AX_CALIB_EN) {
 1964                 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
 1965                                          val16 & ~B_AX_CALIB_EN, phy_rate);
 1966                 if (ret) {
 1967                         rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
 1968                         goto end;
 1969                 }
 1970         }
 1971 
 1972         if (!autook_en)
 1973                 goto end;
 1974         /* Set div */
 1975         ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
 1976         if (ret) {
 1977                 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
 1978                 goto end;
 1979         }
 1980 
 1981         /* Obtain div and margin */
 1982         ret = __get_target(rtwdev, &tar, phy_rate);
 1983         if (ret) {
 1984                 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
 1985                 goto end;
 1986         }
 1987 
 1988         mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
 1989 
 1990         if (mgn_set >= 128) {
 1991                 div_set = 0x0003;
 1992                 mgn_set = 0x000F;
 1993         } else if (mgn_set >= 64) {
 1994                 div_set = 0x0003;
 1995                 mgn_set >>= 3;
 1996         } else if (mgn_set >= 32) {
 1997                 div_set = 0x0002;
 1998                 mgn_set >>= 2;
 1999         } else if (mgn_set >= 16) {
 2000                 div_set = 0x0001;
 2001                 mgn_set >>= 1;
 2002         } else if (mgn_set == 0) {
 2003                 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
 2004                 goto end;
 2005         } else {
 2006                 div_set = 0x0000;
 2007         }
 2008 
 2009         ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
 2010         if (ret) {
 2011                 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
 2012                 goto end;
 2013         }
 2014 
 2015         val16 |= u16_encode_bits(div_set, B_AX_DIV);
 2016 
 2017         ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
 2018         if (ret) {
 2019                 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
 2020                 goto end;
 2021         }
 2022 
 2023         ret = __get_target(rtwdev, &tar, phy_rate);
 2024         if (ret) {
 2025                 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
 2026                 goto end;
 2027         }
 2028 
 2029         rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
 2030                     tar, div_set, mgn_set);
 2031         ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
 2032                                  (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
 2033         if (ret) {
 2034                 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
 2035                 goto end;
 2036         }
 2037 
 2038         /* Enable function */
 2039         ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
 2040         if (ret) {
 2041                 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
 2042                 goto end;
 2043         }
 2044 
 2045         /* CLK delay = 0 */
 2046         ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
 2047                                           PCIE_CLKDLY_HW_0);
 2048 
 2049 end:
 2050         /* Set L1BD to ori */
 2051         if (l1_flag) {
 2052                 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
 2053                                                   bdr_ori);
 2054                 if (ret) {
 2055                         rtw89_err(rtwdev, "[ERR]pci config write %X\n",
 2056                                   RTW89_PCIE_L1_CTRL);
 2057                         return ret;
 2058                 }
 2059         }
 2060 
 2061         return ret;
 2062 }
 2063 
 2064 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
 2065 {
 2066         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 2067         int ret;
 2068 
 2069         if (chip_id == RTL8852A) {
 2070                 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
 2071                                              PCIE_PHY_GEN1);
 2072                 if (ret)
 2073                         return ret;
 2074                 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
 2075                                              PCIE_PHY_GEN2);
 2076                 if (ret)
 2077                         return ret;
 2078         } else if (chip_id == RTL8852C) {
 2079                 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
 2080                                   B_AX_DEGLITCH);
 2081                 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
 2082                                   B_AX_DEGLITCH);
 2083         }
 2084 
 2085         return 0;
 2086 }
 2087 
 2088 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
 2089 {
 2090         if (rtwdev->chip->chip_id != RTL8852A)
 2091                 return;
 2092 
 2093         rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
 2094 }
 2095 
 2096 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
 2097 {
 2098         if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B)
 2099                 return;
 2100 
 2101         rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
 2102 }
 2103 
 2104 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
 2105 {
 2106         int ret;
 2107 
 2108         if (rtwdev->chip->chip_id != RTL8852A)
 2109                 return 0;
 2110 
 2111         ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
 2112                                      PCIE_PHY_GEN1);
 2113         if (ret)
 2114                 return ret;
 2115 
 2116         ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
 2117                                      PCIE_PHY_GEN2);
 2118         if (ret)
 2119                 return ret;
 2120 
 2121         return 0;
 2122 }
 2123 
 2124 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
 2125 {
 2126         if (rtwdev->chip->chip_id != RTL8852A)
 2127                 return;
 2128 
 2129         rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
 2130 }
 2131 
 2132 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
 2133 {
 2134         if (rtwdev->chip->chip_id == RTL8852A ||
 2135             rtwdev->chip->chip_id == RTL8852B) {
 2136                 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
 2137                                   B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
 2138                 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
 2139                                   B_AX_PCIE_DIS_WLSUS_AFT_PDN);
 2140         } else if (rtwdev->chip->chip_id == RTL8852C) {
 2141                 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
 2142                                   B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
 2143         }
 2144 }
 2145 
 2146 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
 2147 {
 2148         if (rtwdev->chip->chip_id != RTL8852B)
 2149                 return 0;
 2150 
 2151         return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
 2152                                        PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
 2153 }
 2154 
 2155 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
 2156 {
 2157         if (pwr_up)
 2158                 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
 2159         else
 2160                 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
 2161 }
 2162 
 2163 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
 2164 {
 2165         if (rtwdev->chip->chip_id != RTL8852C)
 2166                 return;
 2167 
 2168         rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
 2169         rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
 2170 }
 2171 
 2172 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
 2173 {
 2174         if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
 2175                 return;
 2176 
 2177         rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
 2178 }
 2179 
 2180 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
 2181 {
 2182         if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
 2183                 return;
 2184 
 2185         rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
 2186                           B_AX_SYSON_DIS_PMCR_AX_WRMSK);
 2187         rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
 2188         rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
 2189                           B_AX_SYSON_DIS_PMCR_AX_WRMSK);
 2190 }
 2191 
 2192 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
 2193 {
 2194         if (rtwdev->chip->chip_id != RTL8852C)
 2195                 return;
 2196 
 2197         rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
 2198 }
 2199 
 2200 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
 2201 {
 2202         if (rtwdev->chip->chip_id != RTL8852C)
 2203                 return;
 2204 
 2205         rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
 2206 }
 2207 
 2208 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
 2209 {
 2210         if (rtwdev->chip->chip_id == RTL8852C)
 2211                 return;
 2212 
 2213         rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
 2214                           B_AX_SIC_EN_FORCE_CLKREQ);
 2215 }
 2216 
 2217 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
 2218 {
 2219         const struct rtw89_pci_info *info = rtwdev->pci_info;
 2220         u32 lbc;
 2221 
 2222         if (rtwdev->chip->chip_id == RTL8852C)
 2223                 return;
 2224 
 2225         lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
 2226         if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
 2227                 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
 2228                 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
 2229                 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
 2230         } else {
 2231                 lbc &= ~B_AX_LBC_EN;
 2232         }
 2233         rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
 2234 }
 2235 
 2236 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
 2237 {
 2238         const struct rtw89_pci_info *info = rtwdev->pci_info;
 2239         u32 val32;
 2240 
 2241         if (rtwdev->chip->chip_id != RTL8852C)
 2242                 return;
 2243 
 2244         if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
 2245                 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
 2246                                    info->io_rcy_tmr);
 2247                 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
 2248                 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
 2249                 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
 2250 
 2251                 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
 2252                 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
 2253                 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
 2254         } else {
 2255                 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
 2256                 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
 2257                 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
 2258         }
 2259 
 2260         rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
 2261 }
 2262 
 2263 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
 2264 {
 2265         if (rtwdev->chip->chip_id == RTL8852C)
 2266                 return;
 2267 
 2268         rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
 2269                           B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
 2270 
 2271         if (rtwdev->chip->chip_id == RTL8852A)
 2272                 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
 2273                                   B_AX_EN_CHKDSC_NO_RX_STUCK);
 2274 }
 2275 
 2276 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
 2277 {
 2278         if (rtwdev->chip->chip_id == RTL8852C)
 2279                 return;
 2280 
 2281         rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
 2282                           B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
 2283 }
 2284 
 2285 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
 2286 {
 2287         const struct rtw89_pci_info *info = rtwdev->pci_info;
 2288         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 2289         u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
 2290                   B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
 2291                   B_AX_CLR_CH12_IDX;
 2292         u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
 2293         u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
 2294 
 2295         if (chip_id == RTL8852A || chip_id == RTL8852C)
 2296                 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
 2297                        B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
 2298         /* clear DMA indexes */
 2299         rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
 2300         if (chip_id == RTL8852A || chip_id == RTL8852C)
 2301                 rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
 2302                                   B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
 2303         rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
 2304                           B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
 2305 }
 2306 
 2307 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
 2308 {
 2309         const struct rtw89_pci_info *info = rtwdev->pci_info;
 2310         u32 ret, check, dma_busy;
 2311         u32 dma_busy1 = info->dma_busy1_reg;
 2312         u32 dma_busy2 = info->dma_busy2_reg;
 2313 
 2314         check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY |
 2315                 B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY |
 2316                 B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY |
 2317                 B_AX_CH9_BUSY | B_AX_CH12_BUSY;
 2318 
 2319         ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
 2320                                 10, 100, false, rtwdev, dma_busy1);
 2321         if (ret)
 2322                 return ret;
 2323 
 2324         check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
 2325 
 2326         ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
 2327                                 10, 100, false, rtwdev, dma_busy2);
 2328         if (ret)
 2329                 return ret;
 2330 
 2331         return 0;
 2332 }
 2333 
 2334 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
 2335 {
 2336         const struct rtw89_pci_info *info = rtwdev->pci_info;
 2337         u32 ret, check, dma_busy;
 2338         u32 dma_busy3 = info->dma_busy3_reg;
 2339 
 2340         check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
 2341 
 2342         ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
 2343                                 10, 100, false, rtwdev, dma_busy3);
 2344         if (ret)
 2345                 return ret;
 2346 
 2347         return 0;
 2348 }
 2349 
 2350 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
 2351 {
 2352         u32 ret;
 2353 
 2354         ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev);
 2355         if (ret) {
 2356                 rtw89_err(rtwdev, "txdma ch busy\n");
 2357                 return ret;
 2358         }
 2359 
 2360         ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev);
 2361         if (ret) {
 2362                 rtw89_err(rtwdev, "rxdma ch busy\n");
 2363                 return ret;
 2364         }
 2365 
 2366         return 0;
 2367 }
 2368 
 2369 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
 2370 {
 2371         const struct rtw89_pci_info *info = rtwdev->pci_info;
 2372         enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
 2373         enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
 2374         enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
 2375         enum mac_ax_tag_mode tag_mode = info->tag_mode;
 2376         enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
 2377         enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
 2378         enum mac_ax_tx_burst tx_burst = info->tx_burst;
 2379         enum mac_ax_rx_burst rx_burst = info->rx_burst;
 2380         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 2381         u8 cv = rtwdev->hal.cv;
 2382         u32 val32;
 2383 
 2384         if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
 2385                 if (chip_id == RTL8852A && cv == CHIP_CBV)
 2386                         rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
 2387         } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
 2388                 if (chip_id == RTL8852A || chip_id == RTL8852B)
 2389                         rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
 2390         }
 2391 
 2392         if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
 2393                 if (chip_id == RTL8852A && cv == CHIP_CBV)
 2394                         rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
 2395         } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
 2396                 if (chip_id == RTL8852A || chip_id == RTL8852B)
 2397                         rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
 2398         }
 2399 
 2400         if (rxbd_mode == MAC_AX_RXBD_PKT) {
 2401                 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
 2402         } else if (rxbd_mode == MAC_AX_RXBD_SEP) {
 2403                 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
 2404 
 2405                 if (chip_id == RTL8852A || chip_id == RTL8852B)
 2406                         rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
 2407                                            B_AX_PCIE_RX_APPLEN_MASK, 0);
 2408         }
 2409 
 2410         if (chip_id == RTL8852A || chip_id == RTL8852B) {
 2411                 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
 2412                 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
 2413         } else if (chip_id == RTL8852C) {
 2414                 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
 2415                 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
 2416         }
 2417 
 2418         if (chip_id == RTL8852A || chip_id == RTL8852B) {
 2419                 if (tag_mode == MAC_AX_TAG_SGL) {
 2420                         val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
 2421                                             ~B_AX_LATENCY_CONTROL;
 2422                         rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
 2423                 } else if (tag_mode == MAC_AX_TAG_MULTI) {
 2424                         val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
 2425                                             B_AX_LATENCY_CONTROL;
 2426                         rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
 2427                 }
 2428         }
 2429 
 2430         rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
 2431                            info->multi_tag_num);
 2432 
 2433         if (chip_id == RTL8852A || chip_id == RTL8852B) {
 2434                 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
 2435                                    wd_dma_idle_intvl);
 2436                 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
 2437                                    wd_dma_act_intvl);
 2438         } else if (chip_id == RTL8852C) {
 2439                 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
 2440                                    wd_dma_idle_intvl);
 2441                 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
 2442                                    wd_dma_act_intvl);
 2443         }
 2444 
 2445         if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
 2446                 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
 2447                                   B_AX_HOST_ADDR_INFO_8B_SEL);
 2448                 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
 2449         } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
 2450                 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
 2451                                   B_AX_HOST_ADDR_INFO_8B_SEL);
 2452                 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
 2453         }
 2454 
 2455         return 0;
 2456 }
 2457 
 2458 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
 2459 {
 2460         const struct rtw89_pci_info *info = rtwdev->pci_info;
 2461 
 2462         if (rtwdev->chip->chip_id == RTL8852A) {
 2463                 /* ltr sw trigger */
 2464                 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
 2465         }
 2466         info->ltr_set(rtwdev, false);
 2467         rtw89_pci_ctrl_dma_all(rtwdev, false);
 2468         rtw89_pci_clr_idx_all(rtwdev);
 2469 
 2470         return 0;
 2471 }
 2472 
 2473 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
 2474 {
 2475         const struct rtw89_pci_info *info = rtwdev->pci_info;
 2476         int ret;
 2477 
 2478         rtw89_pci_rxdma_prefth(rtwdev);
 2479         rtw89_pci_l1off_pwroff(rtwdev);
 2480         rtw89_pci_deglitch_setting(rtwdev);
 2481         ret = rtw89_pci_l2_rxen_lat(rtwdev);
 2482         if (ret) {
 2483                 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
 2484                 return ret;
 2485         }
 2486 
 2487         rtw89_pci_aphy_pwrcut(rtwdev);
 2488         rtw89_pci_hci_ldo(rtwdev);
 2489         rtw89_pci_dphy_delay(rtwdev);
 2490 
 2491         ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
 2492         if (ret) {
 2493                 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
 2494                 return ret;
 2495         }
 2496 
 2497         rtw89_pci_power_wake(rtwdev, true);
 2498         rtw89_pci_autoload_hang(rtwdev);
 2499         rtw89_pci_l12_vmain(rtwdev);
 2500         rtw89_pci_gen2_force_ib(rtwdev);
 2501         rtw89_pci_l1_ent_lat(rtwdev);
 2502         rtw89_pci_wd_exit_l1(rtwdev);
 2503         rtw89_pci_set_sic(rtwdev);
 2504         rtw89_pci_set_lbc(rtwdev);
 2505         rtw89_pci_set_io_rcy(rtwdev);
 2506         rtw89_pci_set_dbg(rtwdev);
 2507         rtw89_pci_set_keep_reg(rtwdev);
 2508 
 2509         rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA);
 2510 
 2511         /* stop DMA activities */
 2512         rtw89_pci_ctrl_dma_all(rtwdev, false);
 2513 
 2514         ret = rtw89_pci_poll_dma_all_idle(rtwdev);
 2515         if (ret) {
 2516                 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
 2517                 return ret;
 2518         }
 2519 
 2520         rtw89_pci_clr_idx_all(rtwdev);
 2521         rtw89_pci_mode_op(rtwdev);
 2522 
 2523         /* fill TRX BD indexes */
 2524         rtw89_pci_ops_reset(rtwdev);
 2525 
 2526         ret = rtw89_pci_rst_bdram_pcie(rtwdev);
 2527         if (ret) {
 2528                 rtw89_warn(rtwdev, "reset bdram busy\n");
 2529                 return ret;
 2530         }
 2531 
 2532         /* enable FW CMD queue to download firmware */
 2533         rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
 2534         rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12);
 2535         rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
 2536 
 2537         /* start DMA activities */
 2538         rtw89_pci_ctrl_dma_all(rtwdev, true);
 2539 
 2540         return 0;
 2541 }
 2542 
 2543 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
 2544 {
 2545         u32 val;
 2546 
 2547         if (!en)
 2548                 return 0;
 2549 
 2550         val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
 2551         if (rtw89_pci_ltr_is_err_reg_val(val))
 2552                 return -EINVAL;
 2553         val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
 2554         if (rtw89_pci_ltr_is_err_reg_val(val))
 2555                 return -EINVAL;
 2556         val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
 2557         if (rtw89_pci_ltr_is_err_reg_val(val))
 2558                 return -EINVAL;
 2559         val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
 2560         if (rtw89_pci_ltr_is_err_reg_val(val))
 2561                 return -EINVAL;
 2562 
 2563         rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN);
 2564         rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN);
 2565         rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
 2566                            PCI_LTR_SPC_500US);
 2567         rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
 2568                            PCI_LTR_IDLE_TIMER_800US);
 2569         rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
 2570         rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
 2571         rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0);
 2572         rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
 2573 
 2574         return 0;
 2575 }
 2576 EXPORT_SYMBOL(rtw89_pci_ltr_set);
 2577 
 2578 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
 2579 {
 2580         u32 dec_ctrl;
 2581         u32 val32;
 2582 
 2583         val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
 2584         if (rtw89_pci_ltr_is_err_reg_val(val32))
 2585                 return -EINVAL;
 2586         val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
 2587         if (rtw89_pci_ltr_is_err_reg_val(val32))
 2588                 return -EINVAL;
 2589         dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
 2590         if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
 2591                 return -EINVAL;
 2592         val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
 2593         if (rtw89_pci_ltr_is_err_reg_val(val32))
 2594                 return -EINVAL;
 2595         val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
 2596         if (rtw89_pci_ltr_is_err_reg_val(val32))
 2597                 return -EINVAL;
 2598 
 2599         if (!en) {
 2600                 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
 2601                 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
 2602                             B_AX_LTR_REQ_DRV;
 2603         } else {
 2604                 dec_ctrl |= B_AX_LTR_HW_DEC_EN;
 2605         }
 2606 
 2607         dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
 2608         dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
 2609 
 2610         if (en)
 2611                 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
 2612                                   B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
 2613         rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
 2614                            PCI_LTR_IDLE_TIMER_3_2MS);
 2615         rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
 2616         rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
 2617         rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
 2618         rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
 2619         rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
 2620 
 2621         return 0;
 2622 }
 2623 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
 2624 
 2625 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
 2626 {
 2627         const struct rtw89_pci_info *info = rtwdev->pci_info;
 2628         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 2629         int ret;
 2630 
 2631         ret = info->ltr_set(rtwdev, true);
 2632         if (ret) {
 2633                 rtw89_err(rtwdev, "pci ltr set fail\n");
 2634                 return ret;
 2635         }
 2636         if (chip_id == RTL8852A) {
 2637                 /* ltr sw trigger */
 2638                 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
 2639         }
 2640         if (chip_id == RTL8852A || chip_id == RTL8852B) {
 2641                 /* ADDR info 8-byte mode */
 2642                 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
 2643                                   B_AX_HOST_ADDR_INFO_8B_SEL);
 2644                 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
 2645         }
 2646 
 2647         /* enable DMA for all queues */
 2648         rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
 2649         rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
 2650 
 2651         /* Release PCI IO */
 2652         rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
 2653                           B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
 2654 
 2655         return 0;
 2656 }
 2657 
 2658 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
 2659                                   struct pci_dev *pdev)
 2660 {
 2661         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 2662         int ret;
 2663 
 2664         ret = pci_enable_device(pdev);
 2665         if (ret) {
 2666                 rtw89_err(rtwdev, "failed to enable pci device\n");
 2667                 return ret;
 2668         }
 2669 
 2670         pci_set_master(pdev);
 2671         pci_set_drvdata(pdev, rtwdev->hw);
 2672 
 2673         rtwpci->pdev = pdev;
 2674 
 2675         return 0;
 2676 }
 2677 
 2678 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
 2679                                      struct pci_dev *pdev)
 2680 {
 2681         pci_clear_master(pdev);
 2682         pci_disable_device(pdev);
 2683 }
 2684 
 2685 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
 2686                                    struct pci_dev *pdev)
 2687 {
 2688         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 2689         unsigned long resource_len;
 2690         u8 bar_id = 2;
 2691         int ret;
 2692 
 2693         ret = pci_request_regions(pdev, KBUILD_MODNAME);
 2694         if (ret) {
 2695                 rtw89_err(rtwdev, "failed to request pci regions\n");
 2696                 goto err;
 2697         }
 2698 
 2699         ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 2700         if (ret) {
 2701                 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
 2702                 goto err_release_regions;
 2703         }
 2704 
 2705         ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 2706         if (ret) {
 2707                 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
 2708                 goto err_release_regions;
 2709         }
 2710 
 2711 #if defined(__FreeBSD__)
 2712         linuxkpi_pcim_want_to_use_bus_functions(pdev);
 2713 #endif
 2714         resource_len = pci_resource_len(pdev, bar_id);
 2715         rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
 2716         if (!rtwpci->mmap) {
 2717                 rtw89_err(rtwdev, "failed to map pci io\n");
 2718                 ret = -EIO;
 2719                 goto err_release_regions;
 2720         }
 2721 
 2722         return 0;
 2723 
 2724 err_release_regions:
 2725         pci_release_regions(pdev);
 2726 err:
 2727         return ret;
 2728 }
 2729 
 2730 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
 2731                                     struct pci_dev *pdev)
 2732 {
 2733         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 2734 
 2735         if (rtwpci->mmap) {
 2736                 pci_iounmap(pdev, rtwpci->mmap);
 2737                 pci_release_regions(pdev);
 2738         }
 2739 }
 2740 
 2741 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
 2742                                       struct pci_dev *pdev,
 2743                                       struct rtw89_pci_tx_ring *tx_ring)
 2744 {
 2745         struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
 2746         u8 *head = wd_ring->head;
 2747         dma_addr_t dma = wd_ring->dma;
 2748         u32 page_size = wd_ring->page_size;
 2749         u32 page_num = wd_ring->page_num;
 2750         u32 ring_sz = page_size * page_num;
 2751 
 2752         dma_free_coherent(&pdev->dev, ring_sz, head, dma);
 2753         wd_ring->head = NULL;
 2754 }
 2755 
 2756 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
 2757                                    struct pci_dev *pdev,
 2758                                    struct rtw89_pci_tx_ring *tx_ring)
 2759 {
 2760         int ring_sz;
 2761         u8 *head;
 2762         dma_addr_t dma;
 2763 
 2764         head = tx_ring->bd_ring.head;
 2765         dma = tx_ring->bd_ring.dma;
 2766         ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
 2767         dma_free_coherent(&pdev->dev, ring_sz, head, dma);
 2768 
 2769         tx_ring->bd_ring.head = NULL;
 2770 }
 2771 
 2772 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
 2773                                     struct pci_dev *pdev)
 2774 {
 2775         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 2776         struct rtw89_pci_tx_ring *tx_ring;
 2777         int i;
 2778 
 2779         for (i = 0; i < RTW89_TXCH_NUM; i++) {
 2780                 tx_ring = &rtwpci->tx_rings[i];
 2781                 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
 2782                 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
 2783         }
 2784 }
 2785 
 2786 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
 2787                                    struct pci_dev *pdev,
 2788                                    struct rtw89_pci_rx_ring *rx_ring)
 2789 {
 2790         struct rtw89_pci_rx_info *rx_info;
 2791         struct sk_buff *skb;
 2792         dma_addr_t dma;
 2793         u32 buf_sz;
 2794         u8 *head;
 2795         int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
 2796         int i;
 2797 
 2798         buf_sz = rx_ring->buf_sz;
 2799         for (i = 0; i < rx_ring->bd_ring.len; i++) {
 2800                 skb = rx_ring->buf[i];
 2801                 if (!skb)
 2802                         continue;
 2803 
 2804                 rx_info = RTW89_PCI_RX_SKB_CB(skb);
 2805                 dma = rx_info->dma;
 2806                 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
 2807                 dev_kfree_skb(skb);
 2808                 rx_ring->buf[i] = NULL;
 2809         }
 2810 
 2811         head = rx_ring->bd_ring.head;
 2812         dma = rx_ring->bd_ring.dma;
 2813         dma_free_coherent(&pdev->dev, ring_sz, head, dma);
 2814 
 2815         rx_ring->bd_ring.head = NULL;
 2816 }
 2817 
 2818 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
 2819                                     struct pci_dev *pdev)
 2820 {
 2821         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 2822         struct rtw89_pci_rx_ring *rx_ring;
 2823         int i;
 2824 
 2825         for (i = 0; i < RTW89_RXCH_NUM; i++) {
 2826                 rx_ring = &rtwpci->rx_rings[i];
 2827                 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
 2828         }
 2829 }
 2830 
 2831 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
 2832                                      struct pci_dev *pdev)
 2833 {
 2834         rtw89_pci_free_rx_rings(rtwdev, pdev);
 2835         rtw89_pci_free_tx_rings(rtwdev, pdev);
 2836 }
 2837 
 2838 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
 2839                                 struct rtw89_pci_rx_ring *rx_ring,
 2840                                 struct sk_buff *skb, int buf_sz, u32 idx)
 2841 {
 2842         struct rtw89_pci_rx_info *rx_info;
 2843         struct rtw89_pci_rx_bd_32 *rx_bd;
 2844         dma_addr_t dma;
 2845 
 2846         if (!skb)
 2847                 return -EINVAL;
 2848 
 2849         dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
 2850         if (dma_mapping_error(&pdev->dev, dma))
 2851                 return -EBUSY;
 2852 
 2853         rx_info = RTW89_PCI_RX_SKB_CB(skb);
 2854         rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
 2855 
 2856         memset(rx_bd, 0, sizeof(*rx_bd));
 2857         rx_bd->buf_size = cpu_to_le16(buf_sz);
 2858         rx_bd->dma = cpu_to_le32(dma);
 2859         rx_info->dma = dma;
 2860 
 2861         return 0;
 2862 }
 2863 
 2864 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
 2865                                       struct pci_dev *pdev,
 2866                                       struct rtw89_pci_tx_ring *tx_ring,
 2867                                       enum rtw89_tx_channel txch)
 2868 {
 2869         struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
 2870         struct rtw89_pci_tx_wd *txwd;
 2871         dma_addr_t dma;
 2872         dma_addr_t cur_paddr;
 2873         u8 *head;
 2874         u8 *cur_vaddr;
 2875         u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
 2876         u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
 2877         u32 ring_sz = page_size * page_num;
 2878         u32 page_offset;
 2879         int i;
 2880 
 2881         /* FWCMD queue doesn't use txwd as pages */
 2882         if (txch == RTW89_TXCH_CH12)
 2883                 return 0;
 2884 
 2885         head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
 2886         if (!head)
 2887                 return -ENOMEM;
 2888 
 2889         INIT_LIST_HEAD(&wd_ring->free_pages);
 2890         wd_ring->head = head;
 2891         wd_ring->dma = dma;
 2892         wd_ring->page_size = page_size;
 2893         wd_ring->page_num = page_num;
 2894 
 2895         page_offset = 0;
 2896         for (i = 0; i < page_num; i++) {
 2897                 txwd = &wd_ring->pages[i];
 2898                 cur_paddr = dma + page_offset;
 2899                 cur_vaddr = head + page_offset;
 2900 
 2901                 skb_queue_head_init(&txwd->queue);
 2902                 INIT_LIST_HEAD(&txwd->list);
 2903                 txwd->paddr = cur_paddr;
 2904                 txwd->vaddr = cur_vaddr;
 2905                 txwd->len = page_size;
 2906                 txwd->seq = i;
 2907                 rtw89_pci_enqueue_txwd(tx_ring, txwd);
 2908 
 2909                 page_offset += page_size;
 2910         }
 2911 
 2912         return 0;
 2913 }
 2914 
 2915 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
 2916                                    struct pci_dev *pdev,
 2917                                    struct rtw89_pci_tx_ring *tx_ring,
 2918                                    u32 desc_size, u32 len,
 2919                                    enum rtw89_tx_channel txch)
 2920 {
 2921         const struct rtw89_pci_ch_dma_addr *txch_addr;
 2922         int ring_sz = desc_size * len;
 2923         u8 *head;
 2924         dma_addr_t dma;
 2925         int ret;
 2926 
 2927         ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
 2928         if (ret) {
 2929                 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
 2930                 goto err;
 2931         }
 2932 
 2933         ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
 2934         if (ret) {
 2935                 rtw89_err(rtwdev, "failed to get address of txch %d", txch);
 2936                 goto err_free_wd_ring;
 2937         }
 2938 
 2939         head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
 2940         if (!head) {
 2941                 ret = -ENOMEM;
 2942                 goto err_free_wd_ring;
 2943         }
 2944 
 2945         INIT_LIST_HEAD(&tx_ring->busy_pages);
 2946         tx_ring->bd_ring.head = head;
 2947         tx_ring->bd_ring.dma = dma;
 2948         tx_ring->bd_ring.len = len;
 2949         tx_ring->bd_ring.desc_size = desc_size;
 2950         tx_ring->bd_ring.addr = *txch_addr;
 2951         tx_ring->bd_ring.wp = 0;
 2952         tx_ring->bd_ring.rp = 0;
 2953         tx_ring->txch = txch;
 2954 
 2955         return 0;
 2956 
 2957 err_free_wd_ring:
 2958         rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
 2959 err:
 2960         return ret;
 2961 }
 2962 
 2963 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
 2964                                     struct pci_dev *pdev)
 2965 {
 2966         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 2967         struct rtw89_pci_tx_ring *tx_ring;
 2968         u32 desc_size;
 2969         u32 len;
 2970         u32 i, tx_allocated;
 2971         int ret;
 2972 
 2973         for (i = 0; i < RTW89_TXCH_NUM; i++) {
 2974                 tx_ring = &rtwpci->tx_rings[i];
 2975                 desc_size = sizeof(struct rtw89_pci_tx_bd_32);
 2976                 len = RTW89_PCI_TXBD_NUM_MAX;
 2977                 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
 2978                                               desc_size, len, i);
 2979                 if (ret) {
 2980 #if defined(__linux__)
 2981                         rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
 2982 #elif defined(__FreeBSD__)
 2983                         rtw89_err(rtwdev, "failed to alloc tx ring %d: ret=%d\n", i, ret);
 2984 #endif
 2985                         goto err_free;
 2986                 }
 2987         }
 2988 
 2989         return 0;
 2990 
 2991 err_free:
 2992         tx_allocated = i;
 2993         for (i = 0; i < tx_allocated; i++) {
 2994                 tx_ring = &rtwpci->tx_rings[i];
 2995                 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
 2996         }
 2997 
 2998         return ret;
 2999 }
 3000 
 3001 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
 3002                                    struct pci_dev *pdev,
 3003                                    struct rtw89_pci_rx_ring *rx_ring,
 3004                                    u32 desc_size, u32 len, u32 rxch)
 3005 {
 3006         const struct rtw89_pci_ch_dma_addr *rxch_addr;
 3007         struct sk_buff *skb;
 3008         u8 *head;
 3009         dma_addr_t dma;
 3010         int ring_sz = desc_size * len;
 3011         int buf_sz = RTW89_PCI_RX_BUF_SIZE;
 3012         int i, allocated;
 3013         int ret;
 3014 
 3015         ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
 3016         if (ret) {
 3017                 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
 3018                 return ret;
 3019         }
 3020 
 3021         head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
 3022         if (!head) {
 3023                 ret = -ENOMEM;
 3024                 goto err;
 3025         }
 3026 
 3027         rx_ring->bd_ring.head = head;
 3028         rx_ring->bd_ring.dma = dma;
 3029         rx_ring->bd_ring.len = len;
 3030         rx_ring->bd_ring.desc_size = desc_size;
 3031         rx_ring->bd_ring.addr = *rxch_addr;
 3032         rx_ring->bd_ring.wp = 0;
 3033         rx_ring->bd_ring.rp = 0;
 3034         rx_ring->buf_sz = buf_sz;
 3035         rx_ring->diliver_skb = NULL;
 3036         rx_ring->diliver_desc.ready = false;
 3037 
 3038         for (i = 0; i < len; i++) {
 3039                 skb = dev_alloc_skb(buf_sz);
 3040                 if (!skb) {
 3041                         ret = -ENOMEM;
 3042                         goto err_free;
 3043                 }
 3044 
 3045                 memset(skb->data, 0, buf_sz);
 3046                 rx_ring->buf[i] = skb;
 3047                 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
 3048                                            buf_sz, i);
 3049                 if (ret) {
 3050 #if defined(__linux__)
 3051                         rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
 3052 #elif defined(__FreeBSD__)
 3053                         rtw89_err(rtwdev, "failed to init rx buf %d ret=%d\n", i, ret);
 3054 #endif
 3055                         dev_kfree_skb_any(skb);
 3056                         rx_ring->buf[i] = NULL;
 3057                         goto err_free;
 3058                 }
 3059         }
 3060 
 3061         return 0;
 3062 
 3063 err_free:
 3064         allocated = i;
 3065         for (i = 0; i < allocated; i++) {
 3066                 skb = rx_ring->buf[i];
 3067                 if (!skb)
 3068                         continue;
 3069                 dma = *((dma_addr_t *)skb->cb);
 3070                 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
 3071                 dev_kfree_skb(skb);
 3072                 rx_ring->buf[i] = NULL;
 3073         }
 3074 
 3075         head = rx_ring->bd_ring.head;
 3076         dma = rx_ring->bd_ring.dma;
 3077         dma_free_coherent(&pdev->dev, ring_sz, head, dma);
 3078 
 3079         rx_ring->bd_ring.head = NULL;
 3080 err:
 3081         return ret;
 3082 }
 3083 
 3084 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
 3085                                     struct pci_dev *pdev)
 3086 {
 3087         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3088         struct rtw89_pci_rx_ring *rx_ring;
 3089         u32 desc_size;
 3090         u32 len;
 3091         int i, rx_allocated;
 3092         int ret;
 3093 
 3094         for (i = 0; i < RTW89_RXCH_NUM; i++) {
 3095                 rx_ring = &rtwpci->rx_rings[i];
 3096                 desc_size = sizeof(struct rtw89_pci_rx_bd_32);
 3097                 len = RTW89_PCI_RXBD_NUM_MAX;
 3098                 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
 3099                                               desc_size, len, i);
 3100                 if (ret) {
 3101                         rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
 3102                         goto err_free;
 3103                 }
 3104         }
 3105 
 3106         return 0;
 3107 
 3108 err_free:
 3109         rx_allocated = i;
 3110         for (i = 0; i < rx_allocated; i++) {
 3111                 rx_ring = &rtwpci->rx_rings[i];
 3112                 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
 3113         }
 3114 
 3115         return ret;
 3116 }
 3117 
 3118 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
 3119                                      struct pci_dev *pdev)
 3120 {
 3121         int ret;
 3122 
 3123         ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
 3124         if (ret) {
 3125                 rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
 3126                 goto err;
 3127         }
 3128 
 3129         ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
 3130         if (ret) {
 3131                 rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
 3132                 goto err_free_tx_rings;
 3133         }
 3134 
 3135         return 0;
 3136 
 3137 err_free_tx_rings:
 3138         rtw89_pci_free_tx_rings(rtwdev, pdev);
 3139 err:
 3140         return ret;
 3141 }
 3142 
 3143 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
 3144                                struct rtw89_pci *rtwpci)
 3145 {
 3146         skb_queue_head_init(&rtwpci->h2c_queue);
 3147         skb_queue_head_init(&rtwpci->h2c_release_queue);
 3148 }
 3149 
 3150 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
 3151                                     struct pci_dev *pdev)
 3152 {
 3153         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3154         int ret;
 3155 
 3156         ret = rtw89_pci_setup_mapping(rtwdev, pdev);
 3157         if (ret) {
 3158                 rtw89_err(rtwdev, "failed to setup pci mapping\n");
 3159                 goto err;
 3160         }
 3161 
 3162         ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
 3163         if (ret) {
 3164                 rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
 3165                 goto err_pci_unmap;
 3166         }
 3167 
 3168         rtw89_pci_h2c_init(rtwdev, rtwpci);
 3169 
 3170         spin_lock_init(&rtwpci->irq_lock);
 3171         spin_lock_init(&rtwpci->trx_lock);
 3172 
 3173         return 0;
 3174 
 3175 err_pci_unmap:
 3176         rtw89_pci_clear_mapping(rtwdev, pdev);
 3177 err:
 3178         return ret;
 3179 }
 3180 
 3181 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
 3182                                      struct pci_dev *pdev)
 3183 {
 3184         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3185 
 3186         rtw89_pci_free_trx_rings(rtwdev, pdev);
 3187         rtw89_pci_clear_mapping(rtwdev, pdev);
 3188         rtw89_pci_release_fwcmd(rtwdev, rtwpci,
 3189                                 skb_queue_len(&rtwpci->h2c_queue), true);
 3190 }
 3191 
 3192 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
 3193 {
 3194         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3195 
 3196         rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
 3197 
 3198         if (rtwpci->under_recovery) {
 3199                 rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN;
 3200                 rtwpci->intrs[1] = 0;
 3201         } else {
 3202                 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
 3203                                    B_AX_RXDMA_INT_EN |
 3204                                    B_AX_RXP1DMA_INT_EN |
 3205                                    B_AX_RPQDMA_INT_EN |
 3206                                    B_AX_RXDMA_STUCK_INT_EN |
 3207                                    B_AX_RDU_INT_EN |
 3208                                    B_AX_RPQBD_FULL_INT_EN |
 3209                                    B_AX_HS0ISR_IND_INT_EN;
 3210 
 3211                 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
 3212         }
 3213 }
 3214 EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
 3215 
 3216 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
 3217 {
 3218         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3219 
 3220         rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
 3221         rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
 3222         rtwpci->intrs[0] = 0;
 3223         rtwpci->intrs[1] = 0;
 3224 }
 3225 
 3226 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
 3227 {
 3228         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3229 
 3230         rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
 3231                             B_AX_HS1ISR_IND_INT_EN |
 3232                             B_AX_HS0ISR_IND_INT_EN;
 3233         rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
 3234         rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
 3235                            B_AX_RXDMA_INT_EN |
 3236                            B_AX_RXP1DMA_INT_EN |
 3237                            B_AX_RPQDMA_INT_EN |
 3238                            B_AX_RXDMA_STUCK_INT_EN |
 3239                            B_AX_RDU_INT_EN |
 3240                            B_AX_RPQBD_FULL_INT_EN;
 3241         rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
 3242 }
 3243 
 3244 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
 3245 {
 3246         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3247 
 3248         rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
 3249                             B_AX_HS0ISR_IND_INT_EN;
 3250         rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
 3251         rtwpci->intrs[0] = 0;
 3252         rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
 3253 }
 3254 
 3255 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
 3256 {
 3257         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3258 
 3259         if (rtwpci->under_recovery)
 3260                 rtw89_pci_recovery_intr_mask_v1(rtwdev);
 3261         else if (rtwpci->low_power)
 3262                 rtw89_pci_low_power_intr_mask_v1(rtwdev);
 3263         else
 3264                 rtw89_pci_default_intr_mask_v1(rtwdev);
 3265 }
 3266 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
 3267 
 3268 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
 3269                                  struct pci_dev *pdev)
 3270 {
 3271         unsigned long flags = 0;
 3272         int ret;
 3273 
 3274         flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI;
 3275         ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
 3276         if (ret < 0) {
 3277                 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
 3278                 goto err;
 3279         }
 3280 
 3281         ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
 3282                                         rtw89_pci_interrupt_handler,
 3283                                         rtw89_pci_interrupt_threadfn,
 3284                                         IRQF_SHARED, KBUILD_MODNAME, rtwdev);
 3285         if (ret) {
 3286                 rtw89_err(rtwdev, "failed to request threaded irq\n");
 3287                 goto err_free_vector;
 3288         }
 3289 
 3290         rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
 3291 
 3292         return 0;
 3293 
 3294 err_free_vector:
 3295         pci_free_irq_vectors(pdev);
 3296 err:
 3297         return ret;
 3298 }
 3299 
 3300 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
 3301                                struct pci_dev *pdev)
 3302 {
 3303         devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
 3304         pci_free_irq_vectors(pdev);
 3305 }
 3306 
 3307 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num)
 3308 {
 3309         u16 bin = 0, gray_bit;
 3310         u32 bit_idx;
 3311 
 3312         for (bit_idx = 0; bit_idx < bit_num; bit_idx++) {
 3313                 gray_bit = (gray_code >> bit_idx) & 0x1;
 3314                 if (bit_num - bit_idx > 1)
 3315                         gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1;
 3316                 bin |= (gray_bit << bit_idx);
 3317         }
 3318 
 3319         return bin;
 3320 }
 3321 
 3322 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
 3323 {
 3324         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3325         struct pci_dev *pdev = rtwpci->pdev;
 3326         u16 val16, filter_out_val;
 3327         u32 val, phy_offset;
 3328         int ret;
 3329 
 3330         if (rtwdev->chip->chip_id != RTL8852C)
 3331                 return 0;
 3332 
 3333         val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
 3334         if (val == B_AX_ASPM_CTRL_L1)
 3335                 return 0;
 3336 
 3337         ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
 3338         if (ret)
 3339                 return ret;
 3340 
 3341         val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
 3342         if (val == RTW89_PCIE_GEN1_SPEED) {
 3343                 phy_offset = R_RAC_DIRECT_OFFSET_G1;
 3344         } else if (val == RTW89_PCIE_GEN2_SPEED) {
 3345                 phy_offset = R_RAC_DIRECT_OFFSET_G2;
 3346                 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
 3347                 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
 3348                                   val16 | B_PCIE_BIT_PINOUT_DIS);
 3349                 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
 3350                                   val16 & ~B_PCIE_BIT_RD_SEL);
 3351 
 3352                 val16 = rtw89_read16_mask(rtwdev,
 3353                                           phy_offset + RAC_ANA1F * RAC_MULT,
 3354                                           FILTER_OUT_EQ_MASK);
 3355                 val16 = gray_code_to_bin(val16, hweight16(val16));
 3356                 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
 3357                                               RAC_MULT);
 3358                 filter_out_val &= ~REG_FILTER_OUT_MASK;
 3359                 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
 3360 
 3361                 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
 3362                               filter_out_val);
 3363                 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
 3364                                   B_BAC_EQ_SEL);
 3365                 rtw89_write16_set(rtwdev,
 3366                                   R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
 3367                                   B_PCIE_BIT_PSAVE);
 3368         } else {
 3369                 return -EOPNOTSUPP;
 3370         }
 3371         rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
 3372                           B_PCIE_BIT_PSAVE);
 3373 
 3374         return 0;
 3375 }
 3376 
 3377 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
 3378 {
 3379         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 3380         int ret;
 3381 
 3382         if (rtw89_pci_disable_clkreq)
 3383                 return;
 3384 
 3385         ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
 3386                                           PCIE_CLKDLY_HW_30US);
 3387         if (ret)
 3388                 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
 3389 
 3390         if (chip_id == RTL8852A) {
 3391                 if (enable)
 3392                         ret = rtw89_pci_config_byte_set(rtwdev,
 3393                                                         RTW89_PCIE_L1_CTRL,
 3394                                                         RTW89_PCIE_BIT_CLK);
 3395                 else
 3396                         ret = rtw89_pci_config_byte_clr(rtwdev,
 3397                                                         RTW89_PCIE_L1_CTRL,
 3398                                                         RTW89_PCIE_BIT_CLK);
 3399                 if (ret)
 3400                         rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
 3401                                   enable ? "set" : "unset", ret);
 3402         } else if (chip_id == RTL8852C) {
 3403                 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
 3404                                   B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
 3405                 if (enable)
 3406                         rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
 3407                                           B_AX_CLK_REQ_N);
 3408                 else
 3409                         rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
 3410                                           B_AX_CLK_REQ_N);
 3411         }
 3412 }
 3413 
 3414 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
 3415 {
 3416         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 3417         u8 value = 0;
 3418         int ret;
 3419 
 3420         if (rtw89_pci_disable_aspm_l1)
 3421                 return;
 3422 
 3423         ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
 3424         if (ret)
 3425                 rtw89_err(rtwdev, "failed to read ASPM Delay\n");
 3426 
 3427         value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK);
 3428         value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
 3429                  FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
 3430 
 3431         ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
 3432         if (ret)
 3433                 rtw89_err(rtwdev, "failed to read ASPM Delay\n");
 3434 
 3435         if (chip_id == RTL8852A || chip_id == RTL8852B) {
 3436                 if (enable)
 3437                         ret = rtw89_pci_config_byte_set(rtwdev,
 3438                                                         RTW89_PCIE_L1_CTRL,
 3439                                                         RTW89_PCIE_BIT_L1);
 3440                 else
 3441                         ret = rtw89_pci_config_byte_clr(rtwdev,
 3442                                                         RTW89_PCIE_L1_CTRL,
 3443                                                         RTW89_PCIE_BIT_L1);
 3444         } else if (chip_id == RTL8852C) {
 3445                 if (enable)
 3446                         rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
 3447                                           B_AX_ASPM_CTRL_L1);
 3448                 else
 3449                         rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
 3450                                           B_AX_ASPM_CTRL_L1);
 3451         }
 3452         if (ret)
 3453                 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
 3454                           enable ? "set" : "unset", ret);
 3455 }
 3456 
 3457 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
 3458 {
 3459         struct rtw89_traffic_stats *stats = &rtwdev->stats;
 3460         enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
 3461         enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
 3462         u32 val = 0;
 3463 
 3464         if (!rtwdev->scanning &&
 3465             (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH))
 3466                 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
 3467                       FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
 3468                       FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
 3469                       FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
 3470 
 3471         rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val);
 3472 }
 3473 
 3474 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
 3475 {
 3476         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3477         struct pci_dev *pdev = rtwpci->pdev;
 3478         u16 link_ctrl;
 3479         int ret;
 3480 
 3481         /* Though there is standard PCIE configuration space to set the
 3482          * link control register, but by Realtek's design, driver should
 3483          * check if host supports CLKREQ/ASPM to enable the HW module.
 3484          *
 3485          * These functions are implemented by two HW modules associated,
 3486          * one is responsible to access PCIE configuration space to
 3487          * follow the host settings, and another is in charge of doing
 3488          * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
 3489          * the host does not support it, and due to some reasons or wrong
 3490          * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
 3491          * loss if HW misbehaves on the link.
 3492          *
 3493          * Hence it's designed that driver should first check the PCIE
 3494          * configuration space is sync'ed and enabled, then driver can turn
 3495          * on the other module that is actually working on the mechanism.
 3496          */
 3497         ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
 3498         if (ret) {
 3499                 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
 3500                 return;
 3501         }
 3502 
 3503         if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
 3504                 rtw89_pci_clkreq_set(rtwdev, true);
 3505 
 3506         if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
 3507                 rtw89_pci_aspm_set(rtwdev, true);
 3508 }
 3509 
 3510 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
 3511 {
 3512         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 3513         int ret;
 3514 
 3515         if (chip_id == RTL8852A || chip_id == RTL8852B) {
 3516                 if (enable)
 3517                         ret = rtw89_pci_config_byte_set(rtwdev,
 3518                                                         RTW89_PCIE_TIMER_CTRL,
 3519                                                         RTW89_PCIE_BIT_L1SUB);
 3520                 else
 3521                         ret = rtw89_pci_config_byte_clr(rtwdev,
 3522                                                         RTW89_PCIE_TIMER_CTRL,
 3523                                                         RTW89_PCIE_BIT_L1SUB);
 3524                 if (ret)
 3525                         rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
 3526                                   enable ? "set" : "unset", ret);
 3527         } else if (chip_id == RTL8852C) {
 3528                 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
 3529                                                 RTW89_PCIE_BIT_ASPM_L11 |
 3530                                                 RTW89_PCIE_BIT_PCI_L11);
 3531                 if (ret)
 3532                         rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
 3533                 if (enable)
 3534                         rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
 3535                                           B_AX_L1SUB_DISABLE);
 3536                 else
 3537                         rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
 3538                                           B_AX_L1SUB_DISABLE);
 3539         }
 3540 }
 3541 
 3542 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
 3543 {
 3544         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3545         struct pci_dev *pdev = rtwpci->pdev;
 3546         u32 l1ss_cap_ptr, l1ss_ctrl;
 3547 
 3548         if (rtw89_pci_disable_l1ss)
 3549                 return;
 3550 
 3551         l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
 3552         if (!l1ss_cap_ptr)
 3553                 return;
 3554 
 3555         pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
 3556 
 3557         if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
 3558                 rtw89_pci_l1ss_set(rtwdev, true);
 3559 }
 3560 
 3561 static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en)
 3562 {
 3563         const struct rtw89_pci_info *info = rtwdev->pci_info;
 3564         u32 val32;
 3565 
 3566         if (en == MAC_AX_FUNC_EN) {
 3567                 val32 = B_AX_STOP_PCIEIO;
 3568                 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32);
 3569 
 3570                 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
 3571                 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
 3572         } else {
 3573                 val32 = B_AX_STOP_PCIEIO;
 3574                 rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32);
 3575 
 3576                 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
 3577                 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
 3578         }
 3579 }
 3580 
 3581 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
 3582 {
 3583         int ret = 0;
 3584         u32 sts;
 3585         u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
 3586 
 3587         ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
 3588                                        10, 1000, false, rtwdev,
 3589                                        R_AX_PCIE_DMA_BUSY1);
 3590         if (ret) {
 3591                 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
 3592                           rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
 3593                 return -EINVAL;
 3594         }
 3595         return ret;
 3596 }
 3597 
 3598 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
 3599 {
 3600         u32 val, dma_rst = 0;
 3601         int ret;
 3602 
 3603         rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS);
 3604         ret = rtw89_pci_poll_io_idle(rtwdev);
 3605         if (ret) {
 3606                 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
 3607                 rtw89_debug(rtwdev, RTW89_DBG_HCI,
 3608                             "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
 3609                             R_AX_DBG_ERR_FLAG, val);
 3610                 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
 3611                         dma_rst |= B_AX_HCI_TXDMA_EN;
 3612                 if (val & B_AX_RX_STUCK)
 3613                         dma_rst |= B_AX_HCI_RXDMA_EN;
 3614                 val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN);
 3615                 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst);
 3616                 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst);
 3617                 ret = rtw89_pci_poll_io_idle(rtwdev);
 3618                 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
 3619                 rtw89_debug(rtwdev, RTW89_DBG_HCI,
 3620                             "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
 3621                             R_AX_DBG_ERR_FLAG, val);
 3622         }
 3623 
 3624         return ret;
 3625 }
 3626 
 3627 static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en)
 3628 {
 3629         u32 val32;
 3630 
 3631         if (en == MAC_AX_FUNC_EN) {
 3632                 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
 3633                 rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32);
 3634         } else {
 3635                 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
 3636                 rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32);
 3637         }
 3638 }
 3639 
 3640 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
 3641 {
 3642         int ret = 0;
 3643         u32 val32, sts;
 3644 
 3645         val32 = B_AX_RST_BDRAM;
 3646         rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
 3647 
 3648         ret = read_poll_timeout_atomic(rtw89_read32, sts,
 3649                                        (sts & B_AX_RST_BDRAM) == 0x0, 1, 100,
 3650                                        true, rtwdev, R_AX_PCIE_INIT_CFG1);
 3651         return ret;
 3652 }
 3653 
 3654 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
 3655 {
 3656         u32 ret;
 3657 
 3658         rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS);
 3659         rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN);
 3660         rtw89_pci_clr_idx_all(rtwdev);
 3661 
 3662         ret = rtw89_pci_rst_bdram(rtwdev);
 3663         if (ret)
 3664                 return ret;
 3665 
 3666         rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN);
 3667         return ret;
 3668 }
 3669 
 3670 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
 3671                                           enum rtw89_lv1_rcvy_step step)
 3672 {
 3673         int ret;
 3674 
 3675         switch (step) {
 3676         case RTW89_LV1_RCVY_STEP_1:
 3677                 ret = rtw89_pci_lv1rst_stop_dma(rtwdev);
 3678                 if (ret)
 3679                         rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
 3680 
 3681                 break;
 3682 
 3683         case RTW89_LV1_RCVY_STEP_2:
 3684                 ret = rtw89_pci_lv1rst_start_dma(rtwdev);
 3685                 if (ret)
 3686                         rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
 3687                 break;
 3688 
 3689         default:
 3690                 return -EINVAL;
 3691         }
 3692 
 3693         return ret;
 3694 }
 3695 
 3696 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
 3697 {
 3698         rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
 3699                    rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
 3700         rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
 3701                    rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
 3702         rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
 3703                    rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
 3704 }
 3705 
 3706 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
 3707 {
 3708         struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
 3709         struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 3710         unsigned long flags;
 3711         int work_done;
 3712 
 3713         rtwdev->napi_budget_countdown = budget;
 3714 
 3715         rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT);
 3716         work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
 3717         if (work_done == budget)
 3718                 return budget;
 3719 
 3720         rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT);
 3721         work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
 3722         if (work_done < budget && napi_complete_done(napi, work_done)) {
 3723                 spin_lock_irqsave(&rtwpci->irq_lock, flags);
 3724                 if (likely(rtwpci->running))
 3725                         rtw89_chip_enable_intr(rtwdev, rtwpci);
 3726                 spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 3727         }
 3728 
 3729         return work_done;
 3730 }
 3731 
 3732 static int __maybe_unused rtw89_pci_suspend(struct device *dev)
 3733 {
 3734         struct ieee80211_hw *hw = dev_get_drvdata(dev);
 3735         struct rtw89_dev *rtwdev = hw->priv;
 3736         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 3737 
 3738         rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
 3739         rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
 3740         rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
 3741         if (chip_id == RTL8852A || chip_id == RTL8852B) {
 3742                 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
 3743                                   B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
 3744                 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
 3745                                   B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
 3746         } else {
 3747                 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
 3748                                   B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
 3749         }
 3750 
 3751         return 0;
 3752 }
 3753 
 3754 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
 3755 {
 3756         if (rtwdev->chip->chip_id == RTL8852C)
 3757                 return;
 3758 
 3759         /* Hardware need write the reg twice to ensure the setting work */
 3760         rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
 3761                                     RTW89_PCIE_BIT_CFG_RST_MSTATE);
 3762         rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
 3763                                     RTW89_PCIE_BIT_CFG_RST_MSTATE);
 3764 }
 3765 
 3766 static int __maybe_unused rtw89_pci_resume(struct device *dev)
 3767 {
 3768         struct ieee80211_hw *hw = dev_get_drvdata(dev);
 3769         struct rtw89_dev *rtwdev = hw->priv;
 3770         enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
 3771 
 3772         rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
 3773         rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
 3774         rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
 3775         if (chip_id == RTL8852A || chip_id == RTL8852B) {
 3776                 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
 3777                                   B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
 3778                 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
 3779                                   B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
 3780         } else {
 3781                 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
 3782                                   B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
 3783                 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
 3784                                   B_AX_SEL_REQ_ENTR_L1);
 3785         }
 3786         rtw89_pci_l2_hci_ldo(rtwdev);
 3787         rtw89_pci_filter_out(rtwdev);
 3788         rtw89_pci_link_cfg(rtwdev);
 3789         rtw89_pci_l1ss_cfg(rtwdev);
 3790 
 3791         return 0;
 3792 }
 3793 
 3794 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
 3795 EXPORT_SYMBOL(rtw89_pm_ops);
 3796 
 3797 static const struct rtw89_hci_ops rtw89_pci_ops = {
 3798         .tx_write       = rtw89_pci_ops_tx_write,
 3799         .tx_kick_off    = rtw89_pci_ops_tx_kick_off,
 3800         .flush_queues   = rtw89_pci_ops_flush_queues,
 3801         .reset          = rtw89_pci_ops_reset,
 3802         .start          = rtw89_pci_ops_start,
 3803         .stop           = rtw89_pci_ops_stop,
 3804         .pause          = rtw89_pci_ops_pause,
 3805         .switch_mode    = rtw89_pci_ops_switch_mode,
 3806         .recalc_int_mit = rtw89_pci_recalc_int_mit,
 3807 
 3808         .read8          = rtw89_pci_ops_read8,
 3809         .read16         = rtw89_pci_ops_read16,
 3810         .read32         = rtw89_pci_ops_read32,
 3811         .write8         = rtw89_pci_ops_write8,
 3812         .write16        = rtw89_pci_ops_write16,
 3813         .write32        = rtw89_pci_ops_write32,
 3814 
 3815         .mac_pre_init   = rtw89_pci_ops_mac_pre_init,
 3816         .mac_post_init  = rtw89_pci_ops_mac_post_init,
 3817         .deinit         = rtw89_pci_ops_deinit,
 3818 
 3819         .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
 3820         .mac_lv1_rcvy   = rtw89_pci_ops_mac_lv1_recovery,
 3821         .dump_err_status = rtw89_pci_ops_dump_err_status,
 3822         .napi_poll      = rtw89_pci_napi_poll,
 3823 
 3824         .recovery_start = rtw89_pci_ops_recovery_start,
 3825         .recovery_complete = rtw89_pci_ops_recovery_complete,
 3826 };
 3827 
 3828 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 3829 {
 3830         struct rtw89_dev *rtwdev;
 3831         const struct rtw89_driver_info *info;
 3832         const struct rtw89_pci_info *pci_info;
 3833         int ret;
 3834 
 3835         info = (const struct rtw89_driver_info *)id->driver_data;
 3836 
 3837         rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
 3838                                           sizeof(struct rtw89_pci),
 3839                                           info->chip);
 3840         if (!rtwdev) {
 3841                 dev_err(&pdev->dev, "failed to allocate hw\n");
 3842                 return -ENOMEM;
 3843         }
 3844 
 3845         pci_info = info->bus.pci;
 3846 
 3847         rtwdev->pci_info = info->bus.pci;
 3848         rtwdev->hci.ops = &rtw89_pci_ops;
 3849         rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
 3850         rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
 3851         rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
 3852 
 3853         SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
 3854 
 3855         ret = rtw89_core_init(rtwdev);
 3856         if (ret) {
 3857                 rtw89_err(rtwdev, "failed to initialise core\n");
 3858                 goto err_release_hw;
 3859         }
 3860 
 3861         ret = rtw89_pci_claim_device(rtwdev, pdev);
 3862         if (ret) {
 3863                 rtw89_err(rtwdev, "failed to claim pci device\n");
 3864                 goto err_core_deinit;
 3865         }
 3866 
 3867         ret = rtw89_pci_setup_resource(rtwdev, pdev);
 3868         if (ret) {
 3869                 rtw89_err(rtwdev, "failed to setup pci resource\n");
 3870                 goto err_declaim_pci;
 3871         }
 3872 
 3873         ret = rtw89_chip_info_setup(rtwdev);
 3874         if (ret) {
 3875                 rtw89_err(rtwdev, "failed to setup chip information\n");
 3876                 goto err_clear_resource;
 3877         }
 3878 
 3879         rtw89_pci_filter_out(rtwdev);
 3880         rtw89_pci_link_cfg(rtwdev);
 3881         rtw89_pci_l1ss_cfg(rtwdev);
 3882 
 3883         ret = rtw89_core_register(rtwdev);
 3884         if (ret) {
 3885                 rtw89_err(rtwdev, "failed to register core\n");
 3886                 goto err_clear_resource;
 3887         }
 3888 
 3889         rtw89_core_napi_init(rtwdev);
 3890 
 3891         ret = rtw89_pci_request_irq(rtwdev, pdev);
 3892         if (ret) {
 3893                 rtw89_err(rtwdev, "failed to request pci irq\n");
 3894                 goto err_unregister;
 3895         }
 3896 
 3897         return 0;
 3898 
 3899 err_unregister:
 3900         rtw89_core_napi_deinit(rtwdev);
 3901         rtw89_core_unregister(rtwdev);
 3902 err_clear_resource:
 3903         rtw89_pci_clear_resource(rtwdev, pdev);
 3904 err_declaim_pci:
 3905         rtw89_pci_declaim_device(rtwdev, pdev);
 3906 err_core_deinit:
 3907         rtw89_core_deinit(rtwdev);
 3908 err_release_hw:
 3909         rtw89_free_ieee80211_hw(rtwdev);
 3910 
 3911         return ret;
 3912 }
 3913 EXPORT_SYMBOL(rtw89_pci_probe);
 3914 
 3915 void rtw89_pci_remove(struct pci_dev *pdev)
 3916 {
 3917         struct ieee80211_hw *hw = pci_get_drvdata(pdev);
 3918         struct rtw89_dev *rtwdev;
 3919 
 3920         rtwdev = hw->priv;
 3921 
 3922         rtw89_pci_free_irq(rtwdev, pdev);
 3923         rtw89_core_napi_deinit(rtwdev);
 3924         rtw89_core_unregister(rtwdev);
 3925         rtw89_pci_clear_resource(rtwdev, pdev);
 3926         rtw89_pci_declaim_device(rtwdev, pdev);
 3927         rtw89_core_deinit(rtwdev);
 3928         rtw89_free_ieee80211_hw(rtwdev);
 3929 }
 3930 EXPORT_SYMBOL(rtw89_pci_remove);
 3931 
 3932 MODULE_AUTHOR("Realtek Corporation");
 3933 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
 3934 MODULE_LICENSE("Dual BSD/GPL");
 3935 #if defined(__FreeBSD__)
 3936 MODULE_VERSION(rtw89_pci, 1);
 3937 MODULE_DEPEND(rtw89_pci, linuxkpi, 1, 1, 1);
 3938 MODULE_DEPEND(rtw89_pci, linuxkpi_wlan, 1, 1, 1);
 3939 #ifdef CONFIG_RTW89_DEBUGFS
 3940 MODULE_DEPEND(rtw89_pci, lindebugfs, 1, 1, 1);
 3941 #endif
 3942 #endif

Cache object: fbb738d68618eae47af84268ff6272dc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.