The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/dev/iwlwifi/queue/tx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
    2 /*
    3  * Copyright (C) 2020-2022 Intel Corporation
    4  */
    5 #ifdef CONFIG_INET
    6 #include <net/tso.h>
    7 #endif
    8 #include <linux/tcp.h>
    9 
   10 #include "iwl-debug.h"
   11 #include "iwl-io.h"
   12 #include "fw/api/commands.h"
   13 #include "fw/api/tx.h"
   14 #include "fw/api/datapath.h"
   15 #include "queue/tx.h"
   16 #include "iwl-fh.h"
   17 #include "iwl-scd.h"
   18 #include <linux/dmapool.h>
   19 #if defined(__FreeBSD__)
   20 #include <net/mac80211.h>
   21 #endif
   22 
   23 /*
   24  * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
   25  */
   26 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
   27                                           struct iwl_txq *txq, u16 byte_cnt,
   28                                           int num_tbs)
   29 {
   30         int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
   31         u8 filled_tfd_size, num_fetch_chunks;
   32         u16 len = byte_cnt;
   33         __le16 bc_ent;
   34 
   35         if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
   36                 return;
   37 
   38         filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
   39                           num_tbs * sizeof(struct iwl_tfh_tb);
   40         /*
   41          * filled_tfd_size contains the number of filled bytes in the TFD.
   42          * Dividing it by 64 will give the number of chunks to fetch
   43          * to SRAM- 0 for one chunk, 1 for 2 and so on.
   44          * If, for example, TFD contains only 3 TBs then 32 bytes
   45          * of the TFD are used, and only one chunk of 64 bytes should
   46          * be fetched
   47          */
   48         num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
   49 
   50         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
   51                 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
   52 
   53                 /* Starting from AX210, the HW expects bytes */
   54                 WARN_ON(trans->txqs.bc_table_dword);
   55                 WARN_ON(len > 0x3FFF);
   56                 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
   57                 scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
   58         } else {
   59                 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
   60 
   61                 /* Before AX210, the HW expects DW */
   62                 WARN_ON(!trans->txqs.bc_table_dword);
   63                 len = DIV_ROUND_UP(len, 4);
   64                 WARN_ON(len > 0xFFF);
   65                 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
   66                 scd_bc_tbl->tfd_offset[idx] = bc_ent;
   67         }
   68 }
   69 
   70 /*
   71  * iwl_txq_inc_wr_ptr - Send new write index to hardware
   72  */
   73 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
   74 {
   75         lockdep_assert_held(&txq->lock);
   76 
   77         IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
   78 
   79         /*
   80          * if not in power-save mode, uCode will never sleep when we're
   81          * trying to tx (during RFKILL, we're not trying to tx).
   82          */
   83         iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
   84 }
   85 
   86 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
   87                                    struct iwl_tfh_tfd *tfd)
   88 {
   89         return le16_to_cpu(tfd->num_tbs) & 0x1f;
   90 }
   91 
   92 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
   93                             struct iwl_tfh_tfd *tfd)
   94 {
   95         int i, num_tbs;
   96 
   97         /* Sanity check on number of chunks */
   98         num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
   99 
  100         if (num_tbs > trans->txqs.tfd.max_tbs) {
  101                 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
  102                 return;
  103         }
  104 
  105         /* first TB is never freed - it's the bidirectional DMA data */
  106         for (i = 1; i < num_tbs; i++) {
  107                 if (meta->tbs & BIT(i))
  108                         dma_unmap_page(trans->dev,
  109                                        le64_to_cpu(tfd->tbs[i].addr),
  110                                        le16_to_cpu(tfd->tbs[i].tb_len),
  111                                        DMA_TO_DEVICE);
  112                 else
  113                         dma_unmap_single(trans->dev,
  114                                          le64_to_cpu(tfd->tbs[i].addr),
  115                                          le16_to_cpu(tfd->tbs[i].tb_len),
  116                                          DMA_TO_DEVICE);
  117         }
  118 
  119         tfd->num_tbs = 0;
  120 }
  121 
  122 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
  123 {
  124         /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
  125          * idx is bounded by n_window
  126          */
  127         int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
  128         struct sk_buff *skb;
  129 
  130         lockdep_assert_held(&txq->lock);
  131 
  132         if (!txq->entries)
  133                 return;
  134 
  135         iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
  136                                iwl_txq_get_tfd(trans, txq, idx));
  137 
  138         skb = txq->entries[idx].skb;
  139 
  140         /* Can be called from irqs-disabled context
  141          * If skb is not NULL, it means that the whole queue is being
  142          * freed and that the queue is not empty - free the skb
  143          */
  144         if (skb) {
  145                 iwl_op_mode_free_skb(trans->op_mode, skb);
  146                 txq->entries[idx].skb = NULL;
  147         }
  148 }
  149 
  150 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
  151                         dma_addr_t addr, u16 len)
  152 {
  153         int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
  154         struct iwl_tfh_tb *tb;
  155 
  156         /*
  157          * Only WARN here so we know about the issue, but we mess up our
  158          * unmap path because not every place currently checks for errors
  159          * returned from this function - it can only return an error if
  160          * there's no more space, and so when we know there is enough we
  161          * don't always check ...
  162          */
  163         WARN(iwl_txq_crosses_4g_boundary(addr, len),
  164              "possible DMA problem with iova:0x%llx, len:%d\n",
  165              (unsigned long long)addr, len);
  166 
  167         if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
  168                 return -EINVAL;
  169         tb = &tfd->tbs[idx];
  170 
  171         /* Each TFD can point to a maximum max_tbs Tx buffers */
  172         if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
  173                 IWL_ERR(trans, "Error can not send more than %d chunks\n",
  174                         trans->txqs.tfd.max_tbs);
  175                 return -EINVAL;
  176         }
  177 
  178         put_unaligned_le64(addr, &tb->addr);
  179         tb->tb_len = cpu_to_le16(len);
  180 
  181         tfd->num_tbs = cpu_to_le16(idx + 1);
  182 
  183         return idx;
  184 }
  185 
  186 static struct page *get_workaround_page(struct iwl_trans *trans,
  187                                         struct sk_buff *skb)
  188 {
  189         struct page **page_ptr;
  190         struct page *ret;
  191 
  192         page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
  193 
  194         ret = alloc_page(GFP_ATOMIC);
  195         if (!ret)
  196                 return NULL;
  197 
  198         /* set the chaining pointer to the previous page if there */
  199         *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
  200         *page_ptr = ret;
  201 
  202         return ret;
  203 }
  204 
  205 /*
  206  * Add a TB and if needed apply the FH HW bug workaround;
  207  * meta != NULL indicates that it's a page mapping and we
  208  * need to dma_unmap_page() and set the meta->tbs bit in
  209  * this case.
  210  */
  211 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
  212                                        struct sk_buff *skb,
  213                                        struct iwl_tfh_tfd *tfd,
  214                                        dma_addr_t phys, void *virt,
  215                                        u16 len, struct iwl_cmd_meta *meta)
  216 {
  217         dma_addr_t oldphys = phys;
  218         struct page *page;
  219         int ret;
  220 
  221         if (unlikely(dma_mapping_error(trans->dev, phys)))
  222                 return -ENOMEM;
  223 
  224         if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
  225                 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
  226 
  227                 if (ret < 0)
  228                         goto unmap;
  229 
  230                 if (meta)
  231                         meta->tbs |= BIT(ret);
  232 
  233                 ret = 0;
  234                 goto trace;
  235         }
  236 
  237         /*
  238          * Work around a hardware bug. If (as expressed in the
  239          * condition above) the TB ends on a 32-bit boundary,
  240          * then the next TB may be accessed with the wrong
  241          * address.
  242          * To work around it, copy the data elsewhere and make
  243          * a new mapping for it so the device will not fail.
  244          */
  245 
  246         if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
  247                 ret = -ENOBUFS;
  248                 goto unmap;
  249         }
  250 
  251         page = get_workaround_page(trans, skb);
  252         if (!page) {
  253                 ret = -ENOMEM;
  254                 goto unmap;
  255         }
  256 
  257         memcpy(page_address(page), virt, len);
  258 
  259         phys = dma_map_single(trans->dev, page_address(page), len,
  260                               DMA_TO_DEVICE);
  261         if (unlikely(dma_mapping_error(trans->dev, phys)))
  262                 return -ENOMEM;
  263         ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
  264         if (ret < 0) {
  265                 /* unmap the new allocation as single */
  266                 oldphys = phys;
  267                 meta = NULL;
  268                 goto unmap;
  269         }
  270         IWL_WARN(trans,
  271                  "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
  272                  len, (unsigned long long)oldphys, (unsigned long long)phys);
  273 
  274         ret = 0;
  275 unmap:
  276         if (meta)
  277                 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
  278         else
  279                 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
  280 trace:
  281         trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
  282 
  283         return ret;
  284 }
  285 
  286 #ifdef CONFIG_INET
  287 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
  288                                       struct sk_buff *skb)
  289 {
  290         struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
  291         struct page **page_ptr;
  292 
  293         page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
  294 
  295         if (WARN_ON(*page_ptr))
  296                 return NULL;
  297 
  298         if (!p->page)
  299                 goto alloc;
  300 
  301         /*
  302          * Check if there's enough room on this page
  303          *
  304          * Note that we put a page chaining pointer *last* in the
  305          * page - we need it somewhere, and if it's there then we
  306          * avoid DMA mapping the last bits of the page which may
  307          * trigger the 32-bit boundary hardware bug.
  308          *
  309          * (see also get_workaround_page() in tx-gen2.c)
  310          */
  311         if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
  312                            sizeof(void *))
  313                 goto out;
  314 
  315         /* We don't have enough room on this page, get a new one. */
  316         __free_page(p->page);
  317 
  318 alloc:
  319         p->page = alloc_page(GFP_ATOMIC);
  320         if (!p->page)
  321                 return NULL;
  322         p->pos = page_address(p->page);
  323         /* set the chaining pointer to NULL */
  324         *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
  325 out:
  326         *page_ptr = p->page;
  327         get_page(p->page);
  328         return p;
  329 }
  330 #endif
  331 
  332 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
  333                                     struct sk_buff *skb,
  334                                     struct iwl_tfh_tfd *tfd, int start_len,
  335                                     u8 hdr_len,
  336                                     struct iwl_device_tx_cmd *dev_cmd)
  337 {
  338 #ifdef CONFIG_INET
  339         struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
  340         struct ieee80211_hdr *hdr = (void *)skb->data;
  341         unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
  342         unsigned int mss = skb_shinfo(skb)->gso_size;
  343         u16 length, amsdu_pad;
  344         u8 *start_hdr;
  345         struct iwl_tso_hdr_page *hdr_page;
  346         struct tso_t tso;
  347 
  348         trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
  349                              &dev_cmd->hdr, start_len, 0);
  350 
  351         ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
  352         snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
  353         total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
  354         amsdu_pad = 0;
  355 
  356         /* total amount of header we may need for this A-MSDU */
  357         hdr_room = DIV_ROUND_UP(total_len, mss) *
  358                 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
  359 
  360         /* Our device supports 9 segments at most, it will fit in 1 page */
  361         hdr_page = get_page_hdr(trans, hdr_room, skb);
  362         if (!hdr_page)
  363                 return -ENOMEM;
  364 
  365         start_hdr = hdr_page->pos;
  366 
  367         /*
  368          * Pull the ieee80211 header to be able to use TSO core,
  369          * we will restore it for the tx_status flow.
  370          */
  371         skb_pull(skb, hdr_len);
  372 
  373         /*
  374          * Remove the length of all the headers that we don't actually
  375          * have in the MPDU by themselves, but that we duplicate into
  376          * all the different MSDUs inside the A-MSDU.
  377          */
  378         le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
  379 
  380         tso_start(skb, &tso);
  381 
  382         while (total_len) {
  383                 /* this is the data left for this subframe */
  384                 unsigned int data_left = min_t(unsigned int, mss, total_len);
  385                 unsigned int tb_len;
  386                 dma_addr_t tb_phys;
  387                 u8 *subf_hdrs_start = hdr_page->pos;
  388 
  389                 total_len -= data_left;
  390 
  391                 memset(hdr_page->pos, 0, amsdu_pad);
  392                 hdr_page->pos += amsdu_pad;
  393                 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
  394                                   data_left)) & 0x3;
  395                 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
  396                 hdr_page->pos += ETH_ALEN;
  397                 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
  398                 hdr_page->pos += ETH_ALEN;
  399 
  400                 length = snap_ip_tcp_hdrlen + data_left;
  401                 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
  402                 hdr_page->pos += sizeof(length);
  403 
  404                 /*
  405                  * This will copy the SNAP as well which will be considered
  406                  * as MAC header.
  407                  */
  408                 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
  409 
  410                 hdr_page->pos += snap_ip_tcp_hdrlen;
  411 
  412                 tb_len = hdr_page->pos - start_hdr;
  413                 tb_phys = dma_map_single(trans->dev, start_hdr,
  414                                          tb_len, DMA_TO_DEVICE);
  415                 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
  416                         goto out_err;
  417                 /*
  418                  * No need for _with_wa, this is from the TSO page and
  419                  * we leave some space at the end of it so can't hit
  420                  * the buggy scenario.
  421                  */
  422                 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
  423                 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
  424                                         tb_phys, tb_len);
  425                 /* add this subframe's headers' length to the tx_cmd */
  426                 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
  427 
  428                 /* prepare the start_hdr for the next subframe */
  429                 start_hdr = hdr_page->pos;
  430 
  431                 /* put the payload */
  432                 while (data_left) {
  433                         int ret;
  434 
  435                         tb_len = min_t(unsigned int, tso.size, data_left);
  436                         tb_phys = dma_map_single(trans->dev, tso.data,
  437                                                  tb_len, DMA_TO_DEVICE);
  438                         ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
  439                                                           tb_phys, tso.data,
  440                                                           tb_len, NULL);
  441                         if (ret)
  442                                 goto out_err;
  443 
  444                         data_left -= tb_len;
  445                         tso_build_data(skb, &tso, tb_len);
  446                 }
  447         }
  448 
  449         /* re -add the WiFi header */
  450         skb_push(skb, hdr_len);
  451 
  452         return 0;
  453 
  454 out_err:
  455 #endif
  456         return -EINVAL;
  457 }
  458 
  459 static struct
  460 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
  461                                          struct iwl_txq *txq,
  462                                          struct iwl_device_tx_cmd *dev_cmd,
  463                                          struct sk_buff *skb,
  464                                          struct iwl_cmd_meta *out_meta,
  465                                          int hdr_len,
  466                                          int tx_cmd_len)
  467 {
  468         int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
  469         struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
  470         dma_addr_t tb_phys;
  471         int len;
  472         void *tb1_addr;
  473 
  474         tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
  475 
  476         /*
  477          * No need for _with_wa, the first TB allocation is aligned up
  478          * to a 64-byte boundary and thus can't be at the end or cross
  479          * a page boundary (much less a 2^32 boundary).
  480          */
  481         iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
  482 
  483         /*
  484          * The second TB (tb1) points to the remainder of the TX command
  485          * and the 802.11 header - dword aligned size
  486          * (This calculation modifies the TX command, so do it before the
  487          * setup of the first TB)
  488          */
  489         len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
  490               IWL_FIRST_TB_SIZE;
  491 
  492         /* do not align A-MSDU to dword as the subframe header aligns it */
  493 
  494         /* map the data for TB1 */
  495         tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
  496         tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
  497         if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
  498                 goto out_err;
  499         /*
  500          * No need for _with_wa(), we ensure (via alignment) that the data
  501          * here can never cross or end at a page boundary.
  502          */
  503         iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
  504 
  505         if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
  506                                      hdr_len, dev_cmd))
  507                 goto out_err;
  508 
  509         /* building the A-MSDU might have changed this data, memcpy it now */
  510         memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
  511         return tfd;
  512 
  513 out_err:
  514         iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
  515         return NULL;
  516 }
  517 
  518 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
  519                                      struct sk_buff *skb,
  520                                      struct iwl_tfh_tfd *tfd,
  521                                      struct iwl_cmd_meta *out_meta)
  522 {
  523         int i;
  524 
  525         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  526                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  527                 dma_addr_t tb_phys;
  528                 unsigned int fragsz = skb_frag_size(frag);
  529                 int ret;
  530 
  531                 if (!fragsz)
  532                         continue;
  533 
  534                 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
  535                                            fragsz, DMA_TO_DEVICE);
  536                 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
  537                                                   skb_frag_address(frag),
  538                                                   fragsz, out_meta);
  539                 if (ret)
  540                         return ret;
  541         }
  542 
  543         return 0;
  544 }
  545 
  546 static struct
  547 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
  548                                    struct iwl_txq *txq,
  549                                    struct iwl_device_tx_cmd *dev_cmd,
  550                                    struct sk_buff *skb,
  551                                    struct iwl_cmd_meta *out_meta,
  552                                    int hdr_len,
  553                                    int tx_cmd_len,
  554                                    bool pad)
  555 {
  556         int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
  557         struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
  558         dma_addr_t tb_phys;
  559         int len, tb1_len, tb2_len;
  560         void *tb1_addr;
  561         struct sk_buff *frag;
  562 
  563         tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
  564 
  565         /* The first TB points to bi-directional DMA data */
  566         memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
  567 
  568         /*
  569          * No need for _with_wa, the first TB allocation is aligned up
  570          * to a 64-byte boundary and thus can't be at the end or cross
  571          * a page boundary (much less a 2^32 boundary).
  572          */
  573         iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
  574 
  575         /*
  576          * The second TB (tb1) points to the remainder of the TX command
  577          * and the 802.11 header - dword aligned size
  578          * (This calculation modifies the TX command, so do it before the
  579          * setup of the first TB)
  580          */
  581         len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
  582               IWL_FIRST_TB_SIZE;
  583 
  584         if (pad)
  585                 tb1_len = ALIGN(len, 4);
  586         else
  587                 tb1_len = len;
  588 
  589         /* map the data for TB1 */
  590         tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
  591         tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
  592         if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
  593                 goto out_err;
  594         /*
  595          * No need for _with_wa(), we ensure (via alignment) that the data
  596          * here can never cross or end at a page boundary.
  597          */
  598         iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
  599         trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
  600                              IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
  601 
  602         /* set up TFD's third entry to point to remainder of skb's head */
  603         tb2_len = skb_headlen(skb) - hdr_len;
  604 
  605         if (tb2_len > 0) {
  606                 int ret;
  607 
  608                 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
  609                                          tb2_len, DMA_TO_DEVICE);
  610                 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
  611                                                   skb->data + hdr_len, tb2_len,
  612                                                   NULL);
  613                 if (ret)
  614                         goto out_err;
  615         }
  616 
  617         if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
  618                 goto out_err;
  619 
  620         skb_walk_frags(skb, frag) {
  621                 int ret;
  622 
  623                 tb_phys = dma_map_single(trans->dev, frag->data,
  624                                          skb_headlen(frag), DMA_TO_DEVICE);
  625                 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
  626                                                   frag->data,
  627                                                   skb_headlen(frag), NULL);
  628                 if (ret)
  629                         goto out_err;
  630                 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
  631                         goto out_err;
  632         }
  633 
  634         return tfd;
  635 
  636 out_err:
  637         iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
  638         return NULL;
  639 }
  640 
  641 static
  642 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
  643                                            struct iwl_txq *txq,
  644                                            struct iwl_device_tx_cmd *dev_cmd,
  645                                            struct sk_buff *skb,
  646                                            struct iwl_cmd_meta *out_meta)
  647 {
  648         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  649         int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
  650         struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
  651         int len, hdr_len;
  652         bool amsdu;
  653 
  654         /* There must be data left over for TB1 or this code must be changed */
  655         BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
  656 
  657         memset(tfd, 0, sizeof(*tfd));
  658 
  659         if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
  660                 len = sizeof(struct iwl_tx_cmd_gen2);
  661         else
  662                 len = sizeof(struct iwl_tx_cmd_gen3);
  663 
  664         amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
  665                         (*ieee80211_get_qos_ctl(hdr) &
  666                          IEEE80211_QOS_CTL_A_MSDU_PRESENT);
  667 
  668         hdr_len = ieee80211_hdrlen(hdr->frame_control);
  669 
  670         /*
  671          * Only build A-MSDUs here if doing so by GSO, otherwise it may be
  672          * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
  673          * built in the higher layers already.
  674          */
  675         if (amsdu && skb_shinfo(skb)->gso_size)
  676                 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
  677                                                     out_meta, hdr_len, len);
  678         return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
  679                                       hdr_len, len, !amsdu);
  680 }
  681 
  682 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
  683 {
  684         unsigned int max;
  685         unsigned int used;
  686 
  687         /*
  688          * To avoid ambiguity between empty and completely full queues, there
  689          * should always be less than max_tfd_queue_size elements in the queue.
  690          * If q->n_window is smaller than max_tfd_queue_size, there is no need
  691          * to reserve any queue entries for this purpose.
  692          */
  693         if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
  694                 max = q->n_window;
  695         else
  696                 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
  697 
  698         /*
  699          * max_tfd_queue_size is a power of 2, so the following is equivalent to
  700          * modulo by max_tfd_queue_size and is well defined.
  701          */
  702         used = (q->write_ptr - q->read_ptr) &
  703                 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
  704 
  705         if (WARN_ON(used > max))
  706                 return 0;
  707 
  708         return max - used;
  709 }
  710 
  711 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
  712                     struct iwl_device_tx_cmd *dev_cmd, int txq_id)
  713 {
  714         struct iwl_cmd_meta *out_meta;
  715         struct iwl_txq *txq = trans->txqs.txq[txq_id];
  716         u16 cmd_len;
  717         int idx;
  718         void *tfd;
  719 
  720         if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
  721                       "queue %d out of range", txq_id))
  722                 return -EINVAL;
  723 
  724         if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
  725                       "TX on unused queue %d\n", txq_id))
  726                 return -EINVAL;
  727 
  728         if (skb_is_nonlinear(skb) &&
  729             skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
  730             __skb_linearize(skb))
  731                 return -ENOMEM;
  732 
  733         spin_lock(&txq->lock);
  734 
  735         if (iwl_txq_space(trans, txq) < txq->high_mark) {
  736                 iwl_txq_stop(trans, txq);
  737 
  738                 /* don't put the packet on the ring, if there is no room */
  739                 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
  740                         struct iwl_device_tx_cmd **dev_cmd_ptr;
  741 
  742                         dev_cmd_ptr = (void *)((u8 *)skb->cb +
  743                                                trans->txqs.dev_cmd_offs);
  744 
  745                         *dev_cmd_ptr = dev_cmd;
  746                         __skb_queue_tail(&txq->overflow_q, skb);
  747                         spin_unlock(&txq->lock);
  748                         return 0;
  749                 }
  750         }
  751 
  752         idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
  753 
  754         /* Set up driver data for this TFD */
  755         txq->entries[idx].skb = skb;
  756         txq->entries[idx].cmd = dev_cmd;
  757 
  758         dev_cmd->hdr.sequence =
  759                 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  760                             INDEX_TO_SEQ(idx)));
  761 
  762         /* Set up first empty entry in queue's array of Tx/cmd buffers */
  763         out_meta = &txq->entries[idx].meta;
  764         out_meta->flags = 0;
  765 
  766         tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
  767         if (!tfd) {
  768                 spin_unlock(&txq->lock);
  769                 return -1;
  770         }
  771 
  772         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
  773                 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
  774                         (void *)dev_cmd->payload;
  775 
  776                 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
  777         } else {
  778                 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
  779                         (void *)dev_cmd->payload;
  780 
  781                 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
  782         }
  783 
  784         /* Set up entry for this TFD in Tx byte-count array */
  785         iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
  786                                       iwl_txq_gen2_get_num_tbs(trans, tfd));
  787 
  788         /* start timer if queue currently empty */
  789         if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
  790                 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
  791 
  792         /* Tell device the write index *just past* this latest filled TFD */
  793         txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
  794         iwl_txq_inc_wr_ptr(trans, txq);
  795         /*
  796          * At this point the frame is "transmitted" successfully
  797          * and we will get a TX status notification eventually.
  798          */
  799         spin_unlock(&txq->lock);
  800         return 0;
  801 }
  802 
  803 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
  804 
  805 /*
  806  * iwl_txq_gen2_unmap -  Unmap any remaining DMA mappings and free skb's
  807  */
  808 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
  809 {
  810         struct iwl_txq *txq = trans->txqs.txq[txq_id];
  811 
  812         spin_lock_bh(&txq->lock);
  813         while (txq->write_ptr != txq->read_ptr) {
  814                 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
  815                                    txq_id, txq->read_ptr);
  816 
  817                 if (txq_id != trans->txqs.cmd.q_id) {
  818                         int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
  819                         struct sk_buff *skb = txq->entries[idx].skb;
  820 
  821                         if (!WARN_ON_ONCE(!skb))
  822                                 iwl_txq_free_tso_page(trans, skb);
  823                 }
  824                 iwl_txq_gen2_free_tfd(trans, txq);
  825                 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
  826         }
  827 
  828         while (!skb_queue_empty(&txq->overflow_q)) {
  829                 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
  830 
  831                 iwl_op_mode_free_skb(trans->op_mode, skb);
  832         }
  833 
  834         spin_unlock_bh(&txq->lock);
  835 
  836         /* just in case - this queue may have been stopped */
  837         iwl_wake_queue(trans, txq);
  838 }
  839 
  840 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
  841                                      struct iwl_txq *txq)
  842 {
  843         struct device *dev = trans->dev;
  844 
  845         /* De-alloc circular buffer of TFDs */
  846         if (txq->tfds) {
  847                 dma_free_coherent(dev,
  848                                   trans->txqs.tfd.size * txq->n_window,
  849                                   txq->tfds, txq->dma_addr);
  850                 dma_free_coherent(dev,
  851                                   sizeof(*txq->first_tb_bufs) * txq->n_window,
  852                                   txq->first_tb_bufs, txq->first_tb_dma);
  853         }
  854 
  855         kfree(txq->entries);
  856         if (txq->bc_tbl.addr)
  857                 dma_pool_free(trans->txqs.bc_pool,
  858                               txq->bc_tbl.addr, txq->bc_tbl.dma);
  859         kfree(txq);
  860 }
  861 
  862 /*
  863  * iwl_pcie_txq_free - Deallocate DMA queue.
  864  * @txq: Transmit queue to deallocate.
  865  *
  866  * Empty queue by removing and destroying all BD's.
  867  * Free all buffers.
  868  * 0-fill, but do not free "txq" descriptor structure.
  869  */
  870 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
  871 {
  872         struct iwl_txq *txq;
  873         int i;
  874 
  875         if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
  876                       "queue %d out of range", txq_id))
  877                 return;
  878 
  879         txq = trans->txqs.txq[txq_id];
  880 
  881         if (WARN_ON(!txq))
  882                 return;
  883 
  884         iwl_txq_gen2_unmap(trans, txq_id);
  885 
  886         /* De-alloc array of command/tx buffers */
  887         if (txq_id == trans->txqs.cmd.q_id)
  888                 for (i = 0; i < txq->n_window; i++) {
  889                         kfree_sensitive(txq->entries[i].cmd);
  890                         kfree_sensitive(txq->entries[i].free_buf);
  891                 }
  892         del_timer_sync(&txq->stuck_timer);
  893 
  894         iwl_txq_gen2_free_memory(trans, txq);
  895 
  896         trans->txqs.txq[txq_id] = NULL;
  897 
  898         clear_bit(txq_id, trans->txqs.queue_used);
  899 }
  900 
  901 /*
  902  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  903  */
  904 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
  905 {
  906         q->n_window = slots_num;
  907 
  908         /* slots_num must be power-of-two size, otherwise
  909          * iwl_txq_get_cmd_index is broken. */
  910         if (WARN_ON(!is_power_of_2(slots_num)))
  911                 return -EINVAL;
  912 
  913         q->low_mark = q->n_window / 4;
  914         if (q->low_mark < 4)
  915                 q->low_mark = 4;
  916 
  917         q->high_mark = q->n_window / 8;
  918         if (q->high_mark < 2)
  919                 q->high_mark = 2;
  920 
  921         q->write_ptr = 0;
  922         q->read_ptr = 0;
  923 
  924         return 0;
  925 }
  926 
  927 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
  928                  bool cmd_queue)
  929 {
  930         int ret;
  931         u32 tfd_queue_max_size =
  932                 trans->trans_cfg->base_params->max_tfd_queue_size;
  933 
  934         txq->need_update = false;
  935 
  936         /* max_tfd_queue_size must be power-of-two size, otherwise
  937          * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
  938         if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
  939                       "Max tfd queue size must be a power of two, but is %d",
  940                       tfd_queue_max_size))
  941                 return -EINVAL;
  942 
  943         /* Initialize queue's high/low-water marks, and head/tail indexes */
  944         ret = iwl_queue_init(txq, slots_num);
  945         if (ret)
  946                 return ret;
  947 
  948         spin_lock_init(&txq->lock);
  949 
  950 #ifdef CONFIG_LOCKDEP
  951         if (cmd_queue) {
  952                 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
  953 
  954                 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
  955         }
  956 #endif
  957 
  958         __skb_queue_head_init(&txq->overflow_q);
  959 
  960         return 0;
  961 }
  962 
  963 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
  964 {
  965         struct page **page_ptr;
  966         struct page *next;
  967 
  968         page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
  969         next = *page_ptr;
  970         *page_ptr = NULL;
  971 
  972         while (next) {
  973                 struct page *tmp = next;
  974 
  975                 next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
  976                                   sizeof(void *));
  977                 __free_page(tmp);
  978         }
  979 }
  980 
  981 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
  982 {
  983         u32 txq_id = txq->id;
  984         u32 status;
  985         bool active;
  986         u8 fifo;
  987 
  988         if (trans->trans_cfg->use_tfh) {
  989                 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
  990                         txq->read_ptr, txq->write_ptr);
  991 #if defined(__FreeBSD__)
  992                 /*
  993                  * Dump some more queue and timer information to rule
  994                  * out a LinuxKPI issues and gather some extra data.
  995                  */
  996                 IWL_ERR(trans, "  need_update %d frozen %d ampdu %d "
  997                    "now %ju stuck_timer.expires %ju "
  998                    "frozen_expiry_remainder %ju wd_timeout %ju\n",
  999                     txq->need_update, txq->frozen, txq->ampdu,
 1000                     (uintmax_t)jiffies, (uintmax_t)txq->stuck_timer.expires,
 1001                     (uintmax_t)txq->frozen_expiry_remainder,
 1002                     (uintmax_t)txq->wd_timeout);
 1003 #endif
 1004                 /* TODO: access new SCD registers and dump them */
 1005                 return;
 1006         }
 1007 
 1008         status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
 1009         fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
 1010         active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
 1011 
 1012         IWL_ERR(trans,
 1013                 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
 1014                 txq_id, active ? "" : "in", fifo,
 1015                 jiffies_to_msecs(txq->wd_timeout),
 1016                 txq->read_ptr, txq->write_ptr,
 1017                 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
 1018                         (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
 1019                         iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
 1020                         (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
 1021                         iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
 1022 }
 1023 
 1024 static void iwl_txq_stuck_timer(struct timer_list *t)
 1025 {
 1026         struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
 1027         struct iwl_trans *trans = txq->trans;
 1028 
 1029         spin_lock(&txq->lock);
 1030         /* check if triggered erroneously */
 1031         if (txq->read_ptr == txq->write_ptr) {
 1032                 spin_unlock(&txq->lock);
 1033                 return;
 1034         }
 1035         spin_unlock(&txq->lock);
 1036 
 1037         iwl_txq_log_scd_error(trans, txq);
 1038 
 1039         iwl_force_nmi(trans);
 1040 }
 1041 
 1042 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
 1043                   bool cmd_queue)
 1044 {
 1045         size_t tfd_sz = trans->txqs.tfd.size *
 1046                 trans->trans_cfg->base_params->max_tfd_queue_size;
 1047         size_t tb0_buf_sz;
 1048         int i;
 1049 
 1050         if (WARN_ON(txq->entries || txq->tfds))
 1051                 return -EINVAL;
 1052 
 1053         if (trans->trans_cfg->use_tfh)
 1054                 tfd_sz = trans->txqs.tfd.size * slots_num;
 1055 
 1056         timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
 1057         txq->trans = trans;
 1058 
 1059         txq->n_window = slots_num;
 1060 
 1061         txq->entries = kcalloc(slots_num,
 1062                                sizeof(struct iwl_pcie_txq_entry),
 1063                                GFP_KERNEL);
 1064 
 1065         if (!txq->entries)
 1066                 goto error;
 1067 
 1068         if (cmd_queue)
 1069                 for (i = 0; i < slots_num; i++) {
 1070                         txq->entries[i].cmd =
 1071                                 kmalloc(sizeof(struct iwl_device_cmd),
 1072                                         GFP_KERNEL);
 1073                         if (!txq->entries[i].cmd)
 1074                                 goto error;
 1075                 }
 1076 
 1077         /* Circular buffer of transmit frame descriptors (TFDs),
 1078          * shared with device */
 1079         txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
 1080                                        &txq->dma_addr, GFP_KERNEL);
 1081         if (!txq->tfds)
 1082                 goto error;
 1083 
 1084         BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
 1085 
 1086         tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
 1087 
 1088         txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
 1089                                                 &txq->first_tb_dma,
 1090                                                 GFP_KERNEL);
 1091         if (!txq->first_tb_bufs)
 1092                 goto err_free_tfds;
 1093 
 1094         return 0;
 1095 err_free_tfds:
 1096         dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
 1097         txq->tfds = NULL;
 1098 error:
 1099         if (txq->entries && cmd_queue)
 1100                 for (i = 0; i < slots_num; i++)
 1101                         kfree(txq->entries[i].cmd);
 1102         kfree(txq->entries);
 1103         txq->entries = NULL;
 1104 
 1105         return -ENOMEM;
 1106 }
 1107 
 1108 static struct iwl_txq *
 1109 iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
 1110 {
 1111         size_t bc_tbl_size, bc_tbl_entries;
 1112         struct iwl_txq *txq;
 1113         int ret;
 1114 
 1115         WARN_ON(!trans->txqs.bc_tbl_size);
 1116 
 1117         bc_tbl_size = trans->txqs.bc_tbl_size;
 1118         bc_tbl_entries = bc_tbl_size / sizeof(u16);
 1119 
 1120         if (WARN_ON(size > bc_tbl_entries))
 1121                 return ERR_PTR(-EINVAL);
 1122 
 1123         txq = kzalloc(sizeof(*txq), GFP_KERNEL);
 1124         if (!txq)
 1125                 return ERR_PTR(-ENOMEM);
 1126 
 1127         txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
 1128                                           &txq->bc_tbl.dma);
 1129         if (!txq->bc_tbl.addr) {
 1130                 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
 1131                 kfree(txq);
 1132                 return ERR_PTR(-ENOMEM);
 1133         }
 1134 
 1135         ret = iwl_txq_alloc(trans, txq, size, false);
 1136         if (ret) {
 1137                 IWL_ERR(trans, "Tx queue alloc failed\n");
 1138                 goto error;
 1139         }
 1140         ret = iwl_txq_init(trans, txq, size, false);
 1141         if (ret) {
 1142                 IWL_ERR(trans, "Tx queue init failed\n");
 1143                 goto error;
 1144         }
 1145 
 1146         txq->wd_timeout = msecs_to_jiffies(timeout);
 1147 
 1148         return txq;
 1149 
 1150 error:
 1151         iwl_txq_gen2_free_memory(trans, txq);
 1152         return ERR_PTR(ret);
 1153 }
 1154 
 1155 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
 1156                                   struct iwl_host_cmd *hcmd)
 1157 {
 1158         struct iwl_tx_queue_cfg_rsp *rsp;
 1159         int ret, qid;
 1160         u32 wr_ptr;
 1161 
 1162         if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
 1163                     sizeof(*rsp))) {
 1164                 ret = -EINVAL;
 1165                 goto error_free_resp;
 1166         }
 1167 
 1168         rsp = (void *)hcmd->resp_pkt->data;
 1169         qid = le16_to_cpu(rsp->queue_number);
 1170         wr_ptr = le16_to_cpu(rsp->write_pointer);
 1171 
 1172         if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
 1173                 WARN_ONCE(1, "queue index %d unsupported", qid);
 1174                 ret = -EIO;
 1175                 goto error_free_resp;
 1176         }
 1177 
 1178         if (test_and_set_bit(qid, trans->txqs.queue_used)) {
 1179                 WARN_ONCE(1, "queue %d already used", qid);
 1180                 ret = -EIO;
 1181                 goto error_free_resp;
 1182         }
 1183 
 1184         if (WARN_ONCE(trans->txqs.txq[qid],
 1185                       "queue %d already allocated\n", qid)) {
 1186                 ret = -EIO;
 1187                 goto error_free_resp;
 1188         }
 1189 
 1190         txq->id = qid;
 1191         trans->txqs.txq[qid] = txq;
 1192         wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
 1193 
 1194         /* Place first TFD at index corresponding to start sequence number */
 1195         txq->read_ptr = wr_ptr;
 1196         txq->write_ptr = wr_ptr;
 1197 
 1198         IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
 1199 
 1200         iwl_free_resp(hcmd);
 1201         return qid;
 1202 
 1203 error_free_resp:
 1204         iwl_free_resp(hcmd);
 1205         iwl_txq_gen2_free_memory(trans, txq);
 1206         return ret;
 1207 }
 1208 
 1209 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
 1210                       u8 tid, int size, unsigned int timeout)
 1211 {
 1212         struct iwl_txq *txq;
 1213         union {
 1214                 struct iwl_tx_queue_cfg_cmd old;
 1215                 struct iwl_scd_queue_cfg_cmd new;
 1216         } cmd;
 1217         struct iwl_host_cmd hcmd = {
 1218                 .flags = CMD_WANT_SKB,
 1219         };
 1220         int ret;
 1221 
 1222         if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
 1223             trans->hw_rev_step == SILICON_A_STEP)
 1224                 size = 4096;
 1225 
 1226         txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
 1227         if (IS_ERR(txq))
 1228                 return PTR_ERR(txq);
 1229 
 1230         if (trans->txqs.queue_alloc_cmd_ver == 0) {
 1231                 memset(&cmd.old, 0, sizeof(cmd.old));
 1232                 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
 1233                 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
 1234                 cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
 1235                 cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
 1236                 cmd.old.tid = tid;
 1237 
 1238                 if (hweight32(sta_mask) != 1) {
 1239                         ret = -EINVAL;
 1240                         goto error;
 1241                 }
 1242                 cmd.old.sta_id = ffs(sta_mask) - 1;
 1243 
 1244                 hcmd.id = SCD_QUEUE_CFG;
 1245                 hcmd.len[0] = sizeof(cmd.old);
 1246                 hcmd.data[0] = &cmd.old;
 1247         } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
 1248                 memset(&cmd.new, 0, sizeof(cmd.new));
 1249                 cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
 1250                 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
 1251                 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
 1252                 cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
 1253                 cmd.new.u.add.flags = cpu_to_le32(flags);
 1254                 cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
 1255                 cmd.new.u.add.tid = tid;
 1256 
 1257                 hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
 1258                 hcmd.len[0] = sizeof(cmd.new);
 1259                 hcmd.data[0] = &cmd.new;
 1260         } else {
 1261                 ret = -EOPNOTSUPP;
 1262                 goto error;
 1263         }
 1264 
 1265         ret = iwl_trans_send_cmd(trans, &hcmd);
 1266         if (ret)
 1267                 goto error;
 1268 
 1269         return iwl_txq_alloc_response(trans, txq, &hcmd);
 1270 
 1271 error:
 1272         iwl_txq_gen2_free_memory(trans, txq);
 1273         return ret;
 1274 }
 1275 
 1276 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
 1277 {
 1278         if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
 1279                  "queue %d out of range", queue))
 1280                 return;
 1281 
 1282         /*
 1283          * Upon HW Rfkill - we stop the device, and then stop the queues
 1284          * in the op_mode. Just for the sake of the simplicity of the op_mode,
 1285          * allow the op_mode to call txq_disable after it already called
 1286          * stop_device.
 1287          */
 1288         if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
 1289                 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
 1290                           "queue %d not used", queue);
 1291                 return;
 1292         }
 1293 
 1294         iwl_txq_gen2_free(trans, queue);
 1295 
 1296         IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
 1297 }
 1298 
 1299 void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
 1300 {
 1301         int i;
 1302 
 1303         memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
 1304 
 1305         /* Free all TX queues */
 1306         for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
 1307                 if (!trans->txqs.txq[i])
 1308                         continue;
 1309 
 1310                 iwl_txq_gen2_free(trans, i);
 1311         }
 1312 }
 1313 
 1314 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
 1315 {
 1316         struct iwl_txq *queue;
 1317         int ret;
 1318 
 1319         /* alloc and init the tx queue */
 1320         if (!trans->txqs.txq[txq_id]) {
 1321                 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
 1322                 if (!queue) {
 1323                         IWL_ERR(trans, "Not enough memory for tx queue\n");
 1324                         return -ENOMEM;
 1325                 }
 1326                 trans->txqs.txq[txq_id] = queue;
 1327                 ret = iwl_txq_alloc(trans, queue, queue_size, true);
 1328                 if (ret) {
 1329                         IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
 1330                         goto error;
 1331                 }
 1332         } else {
 1333                 queue = trans->txqs.txq[txq_id];
 1334         }
 1335 
 1336         ret = iwl_txq_init(trans, queue, queue_size,
 1337                            (txq_id == trans->txqs.cmd.q_id));
 1338         if (ret) {
 1339                 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
 1340                 goto error;
 1341         }
 1342         trans->txqs.txq[txq_id]->id = txq_id;
 1343         set_bit(txq_id, trans->txqs.queue_used);
 1344 
 1345         return 0;
 1346 
 1347 error:
 1348         iwl_txq_gen2_tx_free(trans);
 1349         return ret;
 1350 }
 1351 
 1352 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
 1353                                                       void *_tfd, u8 idx)
 1354 {
 1355         struct iwl_tfd *tfd;
 1356         struct iwl_tfd_tb *tb;
 1357         dma_addr_t addr;
 1358         dma_addr_t hi_len;
 1359 
 1360         if (trans->trans_cfg->use_tfh) {
 1361                 struct iwl_tfh_tfd *tfh_tfd = _tfd;
 1362                 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
 1363 
 1364                 return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
 1365         }
 1366 
 1367         tfd = _tfd;
 1368         tb = &tfd->tbs[idx];
 1369         addr = get_unaligned_le32(&tb->lo);
 1370 
 1371         if (sizeof(dma_addr_t) <= sizeof(u32))
 1372                 return addr;
 1373 
 1374         hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
 1375 
 1376         /*
 1377          * shift by 16 twice to avoid warnings on 32-bit
 1378          * (where this code never runs anyway due to the
 1379          * if statement above)
 1380          */
 1381         return addr | ((hi_len << 16) << 16);
 1382 }
 1383 
 1384 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
 1385                             struct iwl_cmd_meta *meta,
 1386                             struct iwl_txq *txq, int index)
 1387 {
 1388         int i, num_tbs;
 1389         void *tfd = iwl_txq_get_tfd(trans, txq, index);
 1390 
 1391         /* Sanity check on number of chunks */
 1392         num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
 1393 
 1394         if (num_tbs > trans->txqs.tfd.max_tbs) {
 1395                 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
 1396                 /* @todo issue fatal error, it is quite serious situation */
 1397                 return;
 1398         }
 1399 
 1400         /* first TB is never freed - it's the bidirectional DMA data */
 1401 
 1402         for (i = 1; i < num_tbs; i++) {
 1403                 if (meta->tbs & BIT(i))
 1404                         dma_unmap_page(trans->dev,
 1405                                        iwl_txq_gen1_tfd_tb_get_addr(trans,
 1406                                                                     tfd, i),
 1407                                        iwl_txq_gen1_tfd_tb_get_len(trans,
 1408                                                                    tfd, i),
 1409                                        DMA_TO_DEVICE);
 1410                 else
 1411                         dma_unmap_single(trans->dev,
 1412                                          iwl_txq_gen1_tfd_tb_get_addr(trans,
 1413                                                                       tfd, i),
 1414                                          iwl_txq_gen1_tfd_tb_get_len(trans,
 1415                                                                      tfd, i),
 1416                                          DMA_TO_DEVICE);
 1417         }
 1418 
 1419         meta->tbs = 0;
 1420 
 1421         if (trans->trans_cfg->use_tfh) {
 1422                 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
 1423 
 1424                 tfd_fh->num_tbs = 0;
 1425         } else {
 1426                 struct iwl_tfd *tfd_fh = (void *)tfd;
 1427 
 1428                 tfd_fh->num_tbs = 0;
 1429         }
 1430 }
 1431 
 1432 #define IWL_TX_CRC_SIZE 4
 1433 #define IWL_TX_DELIMITER_SIZE 4
 1434 
 1435 /*
 1436  * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 1437  */
 1438 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
 1439                                       struct iwl_txq *txq, u16 byte_cnt,
 1440                                       int num_tbs)
 1441 {
 1442         struct iwlagn_scd_bc_tbl *scd_bc_tbl;
 1443         int write_ptr = txq->write_ptr;
 1444         int txq_id = txq->id;
 1445         u8 sec_ctl = 0;
 1446         u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
 1447         __le16 bc_ent;
 1448         struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
 1449         struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
 1450         u8 sta_id = tx_cmd->sta_id;
 1451 
 1452         scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
 1453 
 1454         sec_ctl = tx_cmd->sec_ctl;
 1455 
 1456         switch (sec_ctl & TX_CMD_SEC_MSK) {
 1457         case TX_CMD_SEC_CCM:
 1458                 len += IEEE80211_CCMP_MIC_LEN;
 1459                 break;
 1460         case TX_CMD_SEC_TKIP:
 1461                 len += IEEE80211_TKIP_ICV_LEN;
 1462                 break;
 1463         case TX_CMD_SEC_WEP:
 1464                 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
 1465                 break;
 1466         }
 1467         if (trans->txqs.bc_table_dword)
 1468                 len = DIV_ROUND_UP(len, 4);
 1469 
 1470         if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
 1471                 return;
 1472 
 1473         bc_ent = cpu_to_le16(len | (sta_id << 12));
 1474 
 1475         scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
 1476 
 1477         if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
 1478                 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
 1479                         bc_ent;
 1480 }
 1481 
 1482 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
 1483                                      struct iwl_txq *txq)
 1484 {
 1485         struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
 1486         int txq_id = txq->id;
 1487         int read_ptr = txq->read_ptr;
 1488         u8 sta_id = 0;
 1489         __le16 bc_ent;
 1490         struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
 1491         struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
 1492 
 1493         WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
 1494 
 1495         if (txq_id != trans->txqs.cmd.q_id)
 1496                 sta_id = tx_cmd->sta_id;
 1497 
 1498         bc_ent = cpu_to_le16(1 | (sta_id << 12));
 1499 
 1500         scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
 1501 
 1502         if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
 1503                 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
 1504                         bc_ent;
 1505 }
 1506 
 1507 /*
 1508  * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
 1509  * @trans - transport private data
 1510  * @txq - tx queue
 1511  * @dma_dir - the direction of the DMA mapping
 1512  *
 1513  * Does NOT advance any TFD circular buffer read/write indexes
 1514  * Does NOT free the TFD itself (which is within circular buffer)
 1515  */
 1516 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 1517 {
 1518         /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
 1519          * idx is bounded by n_window
 1520          */
 1521         int rd_ptr = txq->read_ptr;
 1522         int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
 1523         struct sk_buff *skb;
 1524 
 1525         lockdep_assert_held(&txq->lock);
 1526 
 1527         if (!txq->entries)
 1528                 return;
 1529 
 1530         /* We have only q->n_window txq->entries, but we use
 1531          * TFD_QUEUE_SIZE_MAX tfds
 1532          */
 1533         iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
 1534 
 1535         /* free SKB */
 1536         skb = txq->entries[idx].skb;
 1537 
 1538         /* Can be called from irqs-disabled context
 1539          * If skb is not NULL, it means that the whole queue is being
 1540          * freed and that the queue is not empty - free the skb
 1541          */
 1542         if (skb) {
 1543                 iwl_op_mode_free_skb(trans->op_mode, skb);
 1544                 txq->entries[idx].skb = NULL;
 1545         }
 1546 }
 1547 
 1548 void iwl_txq_progress(struct iwl_txq *txq)
 1549 {
 1550         lockdep_assert_held(&txq->lock);
 1551 
 1552         if (!txq->wd_timeout)
 1553                 return;
 1554 
 1555         /*
 1556          * station is asleep and we send data - that must
 1557          * be uAPSD or PS-Poll. Don't rearm the timer.
 1558          */
 1559         if (txq->frozen)
 1560                 return;
 1561 
 1562         /*
 1563          * if empty delete timer, otherwise move timer forward
 1564          * since we're making progress on this queue
 1565          */
 1566         if (txq->read_ptr == txq->write_ptr)
 1567                 del_timer(&txq->stuck_timer);
 1568         else
 1569                 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
 1570 }
 1571 
 1572 /* Frees buffers until index _not_ inclusive */
 1573 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 1574                      struct sk_buff_head *skbs)
 1575 {
 1576         struct iwl_txq *txq = trans->txqs.txq[txq_id];
 1577         int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
 1578         int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
 1579         int last_to_free;
 1580 
 1581         /* This function is not meant to release cmd queue*/
 1582         if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
 1583                 return;
 1584 
 1585         spin_lock_bh(&txq->lock);
 1586 
 1587         if (!test_bit(txq_id, trans->txqs.queue_used)) {
 1588                 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
 1589                                     txq_id, ssn);
 1590                 goto out;
 1591         }
 1592 
 1593         if (read_ptr == tfd_num)
 1594                 goto out;
 1595 
 1596         IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
 1597                            txq_id, txq->read_ptr, tfd_num, ssn);
 1598 
 1599         /*Since we free until index _not_ inclusive, the one before index is
 1600          * the last we will free. This one must be used */
 1601         last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
 1602 
 1603         if (!iwl_txq_used(txq, last_to_free)) {
 1604                 IWL_ERR(trans,
 1605                         "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
 1606                         __func__, txq_id, last_to_free,
 1607                         trans->trans_cfg->base_params->max_tfd_queue_size,
 1608                         txq->write_ptr, txq->read_ptr);
 1609 
 1610                 iwl_op_mode_time_point(trans->op_mode,
 1611                                        IWL_FW_INI_TIME_POINT_FAKE_TX,
 1612                                        NULL);
 1613                 goto out;
 1614         }
 1615 
 1616         if (WARN_ON(!skb_queue_empty(skbs)))
 1617                 goto out;
 1618 
 1619         for (;
 1620              read_ptr != tfd_num;
 1621              txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
 1622              read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
 1623                 struct sk_buff *skb = txq->entries[read_ptr].skb;
 1624 
 1625                 if (WARN_ON_ONCE(!skb))
 1626                         continue;
 1627 
 1628                 iwl_txq_free_tso_page(trans, skb);
 1629 
 1630                 __skb_queue_tail(skbs, skb);
 1631 
 1632                 txq->entries[read_ptr].skb = NULL;
 1633 
 1634                 if (!trans->trans_cfg->use_tfh)
 1635                         iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
 1636 
 1637                 iwl_txq_free_tfd(trans, txq);
 1638         }
 1639 
 1640         iwl_txq_progress(txq);
 1641 
 1642         if (iwl_txq_space(trans, txq) > txq->low_mark &&
 1643             test_bit(txq_id, trans->txqs.queue_stopped)) {
 1644                 struct sk_buff_head overflow_skbs;
 1645 
 1646                 __skb_queue_head_init(&overflow_skbs);
 1647                 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
 1648 
 1649                 /*
 1650                  * We are going to transmit from the overflow queue.
 1651                  * Remember this state so that wait_for_txq_empty will know we
 1652                  * are adding more packets to the TFD queue. It cannot rely on
 1653                  * the state of &txq->overflow_q, as we just emptied it, but
 1654                  * haven't TXed the content yet.
 1655                  */
 1656                 txq->overflow_tx = true;
 1657 
 1658                 /*
 1659                  * This is tricky: we are in reclaim path which is non
 1660                  * re-entrant, so noone will try to take the access the
 1661                  * txq data from that path. We stopped tx, so we can't
 1662                  * have tx as well. Bottom line, we can unlock and re-lock
 1663                  * later.
 1664                  */
 1665                 spin_unlock_bh(&txq->lock);
 1666 
 1667                 while (!skb_queue_empty(&overflow_skbs)) {
 1668                         struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
 1669                         struct iwl_device_tx_cmd *dev_cmd_ptr;
 1670 
 1671                         dev_cmd_ptr = *(void **)((u8 *)skb->cb +
 1672                                                  trans->txqs.dev_cmd_offs);
 1673 
 1674                         /*
 1675                          * Note that we can very well be overflowing again.
 1676                          * In that case, iwl_txq_space will be small again
 1677                          * and we won't wake mac80211's queue.
 1678                          */
 1679                         iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
 1680                 }
 1681 
 1682                 if (iwl_txq_space(trans, txq) > txq->low_mark)
 1683                         iwl_wake_queue(trans, txq);
 1684 
 1685                 spin_lock_bh(&txq->lock);
 1686                 txq->overflow_tx = false;
 1687         }
 1688 
 1689 out:
 1690         spin_unlock_bh(&txq->lock);
 1691 }
 1692 
 1693 /* Set wr_ptr of specific device and txq  */
 1694 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
 1695 {
 1696         struct iwl_txq *txq = trans->txqs.txq[txq_id];
 1697 
 1698         spin_lock_bh(&txq->lock);
 1699 
 1700         txq->write_ptr = ptr;
 1701         txq->read_ptr = txq->write_ptr;
 1702 
 1703         spin_unlock_bh(&txq->lock);
 1704 }
 1705 
 1706 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
 1707                                 bool freeze)
 1708 {
 1709         int queue;
 1710 
 1711         for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
 1712                 struct iwl_txq *txq = trans->txqs.txq[queue];
 1713                 unsigned long now;
 1714 
 1715                 spin_lock_bh(&txq->lock);
 1716 
 1717                 now = jiffies;
 1718 
 1719                 if (txq->frozen == freeze)
 1720                         goto next_queue;
 1721 
 1722                 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
 1723                                     freeze ? "Freezing" : "Waking", queue);
 1724 
 1725                 txq->frozen = freeze;
 1726 
 1727                 if (txq->read_ptr == txq->write_ptr)
 1728                         goto next_queue;
 1729 
 1730                 if (freeze) {
 1731                         if (unlikely(time_after(now,
 1732                                                 txq->stuck_timer.expires))) {
 1733                                 /*
 1734                                  * The timer should have fired, maybe it is
 1735                                  * spinning right now on the lock.
 1736                                  */
 1737                                 goto next_queue;
 1738                         }
 1739                         /* remember how long until the timer fires */
 1740                         txq->frozen_expiry_remainder =
 1741                                 txq->stuck_timer.expires - now;
 1742                         del_timer(&txq->stuck_timer);
 1743                         goto next_queue;
 1744                 }
 1745 
 1746                 /*
 1747                  * Wake a non-empty queue -> arm timer with the
 1748                  * remainder before it froze
 1749                  */
 1750                 mod_timer(&txq->stuck_timer,
 1751                           now + txq->frozen_expiry_remainder);
 1752 
 1753 next_queue:
 1754                 spin_unlock_bh(&txq->lock);
 1755         }
 1756 }
 1757 
 1758 #define HOST_COMPLETE_TIMEOUT   (2 * HZ)
 1759 
 1760 static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
 1761                                         struct iwl_host_cmd *cmd)
 1762 {
 1763         const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
 1764         struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
 1765         int cmd_idx;
 1766         int ret;
 1767 
 1768         IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
 1769 
 1770         if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
 1771                                   &trans->status),
 1772                  "Command %s: a command is already active!\n", cmd_str))
 1773                 return -EIO;
 1774 
 1775         IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
 1776 
 1777         cmd_idx = trans->ops->send_cmd(trans, cmd);
 1778         if (cmd_idx < 0) {
 1779                 ret = cmd_idx;
 1780                 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 1781                 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
 1782                         cmd_str, ret);
 1783                 return ret;
 1784         }
 1785 
 1786         ret = wait_event_timeout(trans->wait_command_queue,
 1787                                  !test_bit(STATUS_SYNC_HCMD_ACTIVE,
 1788                                            &trans->status),
 1789                                  HOST_COMPLETE_TIMEOUT);
 1790         if (!ret) {
 1791                 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
 1792                         cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
 1793 
 1794                 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
 1795                         txq->read_ptr, txq->write_ptr);
 1796 
 1797                 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 1798                 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
 1799                                cmd_str);
 1800                 ret = -ETIMEDOUT;
 1801 
 1802                 iwl_trans_sync_nmi(trans);
 1803                 goto cancel;
 1804         }
 1805 
 1806         if (test_bit(STATUS_FW_ERROR, &trans->status)) {
 1807                 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
 1808                                         &trans->status)) {
 1809                         IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
 1810                         dump_stack();
 1811                 }
 1812                 ret = -EIO;
 1813                 goto cancel;
 1814         }
 1815 
 1816         if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
 1817             test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
 1818                 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
 1819                 ret = -ERFKILL;
 1820                 goto cancel;
 1821         }
 1822 
 1823         if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
 1824                 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
 1825                 ret = -EIO;
 1826                 goto cancel;
 1827         }
 1828 
 1829         return 0;
 1830 
 1831 cancel:
 1832         if (cmd->flags & CMD_WANT_SKB) {
 1833                 /*
 1834                  * Cancel the CMD_WANT_SKB flag for the cmd in the
 1835                  * TX cmd queue. Otherwise in case the cmd comes
 1836                  * in later, it will possibly set an invalid
 1837                  * address (cmd->meta.source).
 1838                  */
 1839                 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
 1840         }
 1841 
 1842         if (cmd->resp_pkt) {
 1843                 iwl_free_resp(cmd);
 1844                 cmd->resp_pkt = NULL;
 1845         }
 1846 
 1847         return ret;
 1848 }
 1849 
 1850 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
 1851                             struct iwl_host_cmd *cmd)
 1852 {
 1853         /* Make sure the NIC is still alive in the bus */
 1854         if (test_bit(STATUS_TRANS_DEAD, &trans->status))
 1855                 return -ENODEV;
 1856 
 1857         if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
 1858             test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
 1859                 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
 1860                                   cmd->id);
 1861                 return -ERFKILL;
 1862         }
 1863 
 1864         if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
 1865                      !(cmd->flags & CMD_SEND_IN_D3))) {
 1866                 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
 1867                 return -EHOSTDOWN;
 1868         }
 1869 
 1870         if (cmd->flags & CMD_ASYNC) {
 1871                 int ret;
 1872 
 1873                 /* An asynchronous command can not expect an SKB to be set. */
 1874                 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
 1875                         return -EINVAL;
 1876 
 1877                 ret = trans->ops->send_cmd(trans, cmd);
 1878                 if (ret < 0) {
 1879                         IWL_ERR(trans,
 1880                                 "Error sending %s: enqueue_hcmd failed: %d\n",
 1881                                 iwl_get_cmd_string(trans, cmd->id), ret);
 1882                         return ret;
 1883                 }
 1884                 return 0;
 1885         }
 1886 
 1887         return iwl_trans_txq_send_hcmd_sync(trans, cmd);
 1888 }
 1889 

Cache object: d67e918bf0fdb23f6c8293ea3d915fb5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.