The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/dev/iwlwifi/iwl-trans.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
    2 /*
    3  * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
    4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
    5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
    6  */
    7 #ifndef __iwl_trans_h__
    8 #define __iwl_trans_h__
    9 
   10 #include <linux/ieee80211.h>
   11 #include <linux/mm.h> /* for page_address */
   12 #include <linux/lockdep.h>
   13 #include <linux/kernel.h>
   14 
   15 #include "iwl-debug.h"
   16 #include "iwl-config.h"
   17 #include "fw/img.h"
   18 #include "iwl-op-mode.h"
   19 #include <linux/firmware.h>
   20 #include "fw/api/cmdhdr.h"
   21 #include "fw/api/txq.h"
   22 #include "fw/api/dbg-tlv.h"
   23 #include "iwl-dbg-tlv.h"
   24 #if defined(__FreeBSD__)
   25 #include <linux/skbuff.h>
   26 #include "iwl-modparams.h"
   27 #endif
   28 
   29 /**
   30  * DOC: Transport layer - what is it ?
   31  *
   32  * The transport layer is the layer that deals with the HW directly. It provides
   33  * an abstraction of the underlying HW to the upper layer. The transport layer
   34  * doesn't provide any policy, algorithm or anything of this kind, but only
   35  * mechanisms to make the HW do something. It is not completely stateless but
   36  * close to it.
   37  * We will have an implementation for each different supported bus.
   38  */
   39 
   40 /**
   41  * DOC: Life cycle of the transport layer
   42  *
   43  * The transport layer has a very precise life cycle.
   44  *
   45  *      1) A helper function is called during the module initialization and
   46  *         registers the bus driver's ops with the transport's alloc function.
   47  *      2) Bus's probe calls to the transport layer's allocation functions.
   48  *         Of course this function is bus specific.
   49  *      3) This allocation functions will spawn the upper layer which will
   50  *         register mac80211.
   51  *
   52  *      4) At some point (i.e. mac80211's start call), the op_mode will call
   53  *         the following sequence:
   54  *         start_hw
   55  *         start_fw
   56  *
   57  *      5) Then when finished (or reset):
   58  *         stop_device
   59  *
   60  *      6) Eventually, the free function will be called.
   61  */
   62 
   63 #define IWL_TRANS_FW_DBG_DOMAIN(trans)  IWL_FW_INI_DOMAIN_ALWAYS_ON
   64 
   65 #define FH_RSCSR_FRAME_SIZE_MSK         0x00003FFF      /* bits 0-13 */
   66 #define FH_RSCSR_FRAME_INVALID          0x55550000
   67 #define FH_RSCSR_FRAME_ALIGN            0x40
   68 #define FH_RSCSR_RPA_EN                 BIT(25)
   69 #define FH_RSCSR_RADA_EN                BIT(26)
   70 #define FH_RSCSR_RXQ_POS                16
   71 #define FH_RSCSR_RXQ_MASK               0x3F0000
   72 
   73 struct iwl_rx_packet {
   74         /*
   75          * The first 4 bytes of the RX frame header contain both the RX frame
   76          * size and some flags.
   77          * Bit fields:
   78          * 31:    flag flush RB request
   79          * 30:    flag ignore TC (terminal counter) request
   80          * 29:    flag fast IRQ request
   81          * 28-27: Reserved
   82          * 26:    RADA enabled
   83          * 25:    Offload enabled
   84          * 24:    RPF enabled
   85          * 23:    RSS enabled
   86          * 22:    Checksum enabled
   87          * 21-16: RX queue
   88          * 15-14: Reserved
   89          * 13-00: RX frame size
   90          */
   91         __le32 len_n_flags;
   92         struct iwl_cmd_header hdr;
   93         u8 data[];
   94 } __packed;
   95 
   96 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
   97 {
   98         return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
   99 }
  100 
  101 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
  102 {
  103         return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
  104 }
  105 
  106 /**
  107  * enum CMD_MODE - how to send the host commands ?
  108  *
  109  * @CMD_ASYNC: Return right away and don't wait for the response
  110  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
  111  *      the response. The caller needs to call iwl_free_resp when done.
  112  * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
  113  *      called after this command completes. Valid only with CMD_ASYNC.
  114  * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
  115  *      SUSPEND and RESUME commands. We are in D3 mode when we set
  116  *      trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
  117  */
  118 enum CMD_MODE {
  119         CMD_ASYNC               = BIT(0),
  120         CMD_WANT_SKB            = BIT(1),
  121         CMD_SEND_IN_RFKILL      = BIT(2),
  122         CMD_WANT_ASYNC_CALLBACK = BIT(3),
  123         CMD_SEND_IN_D3          = BIT(4),
  124 };
  125 
  126 #define DEF_CMD_PAYLOAD_SIZE 320
  127 
  128 /**
  129  * struct iwl_device_cmd
  130  *
  131  * For allocation of the command and tx queues, this establishes the overall
  132  * size of the largest command we send to uCode, except for commands that
  133  * aren't fully copied and use other TFD space.
  134  */
  135 struct iwl_device_cmd {
  136         union {
  137                 struct {
  138                         struct iwl_cmd_header hdr;      /* uCode API */
  139                         u8 payload[DEF_CMD_PAYLOAD_SIZE];
  140                 };
  141                 struct {
  142                         struct iwl_cmd_header_wide hdr_wide;
  143                         u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
  144                                         sizeof(struct iwl_cmd_header_wide) +
  145                                         sizeof(struct iwl_cmd_header)];
  146                 };
  147         };
  148 } __packed;
  149 
  150 /**
  151  * struct iwl_device_tx_cmd - buffer for TX command
  152  * @hdr: the header
  153  * @payload: the payload placeholder
  154  *
  155  * The actual structure is sized dynamically according to need.
  156  */
  157 struct iwl_device_tx_cmd {
  158         struct iwl_cmd_header hdr;
  159         u8 payload[];
  160 } __packed;
  161 
  162 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
  163 
  164 /*
  165  * number of transfer buffers (fragments) per transmit frame descriptor;
  166  * this is just the driver's idea, the hardware supports 20
  167  */
  168 #define IWL_MAX_CMD_TBS_PER_TFD 2
  169 
  170 /* We need 2 entries for the TX command and header, and another one might
  171  * be needed for potential data in the SKB's head. The remaining ones can
  172  * be used for frags.
  173  */
  174 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
  175 
  176 /**
  177  * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
  178  *
  179  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
  180  *      ring. The transport layer doesn't map the command's buffer to DMA, but
  181  *      rather copies it to a previously allocated DMA buffer. This flag tells
  182  *      the transport layer not to copy the command, but to map the existing
  183  *      buffer (that is passed in) instead. This saves the memcpy and allows
  184  *      commands that are bigger than the fixed buffer to be submitted.
  185  *      Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
  186  * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
  187  *      chunk internally and free it again after the command completes. This
  188  *      can (currently) be used only once per command.
  189  *      Note that a TFD entry after a DUP one cannot be a normal copied one.
  190  */
  191 enum iwl_hcmd_dataflag {
  192         IWL_HCMD_DFL_NOCOPY     = BIT(0),
  193         IWL_HCMD_DFL_DUP        = BIT(1),
  194 };
  195 
  196 enum iwl_error_event_table_status {
  197         IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
  198         IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
  199         IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
  200         IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
  201         IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
  202         IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
  203         IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
  204 };
  205 
  206 /**
  207  * struct iwl_host_cmd - Host command to the uCode
  208  *
  209  * @data: array of chunks that composes the data of the host command
  210  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
  211  * @_rx_page_order: (internally used to free response packet);
  212  *      FreeBSD uses _page instead.
  213  * @_rx_page_addr: (internally used to free response packet)
  214  * @flags: can be CMD_*
  215  * @len: array of the lengths of the chunks in data
  216  * @dataflags: IWL_HCMD_DFL_*
  217  * @id: command id of the host command, for wide commands encoding the
  218  *      version and group as well
  219  */
  220 struct iwl_host_cmd {
  221         const void *data[IWL_MAX_CMD_TBS_PER_TFD];
  222         struct iwl_rx_packet *resp_pkt;
  223 #if defined(__linux__)
  224         unsigned long _rx_page_addr;
  225 #elif defined(__FreeBSD__)
  226         struct page *_page;
  227 #endif
  228         u32 _rx_page_order;
  229 
  230         u32 flags;
  231         u32 id;
  232         u16 len[IWL_MAX_CMD_TBS_PER_TFD];
  233         u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
  234 };
  235 
  236 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
  237 {
  238 #if defined(__linux__)
  239         free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
  240 #elif defined(__FreeBSD__)
  241         __free_pages(cmd->_page, cmd->_rx_page_order);
  242 #endif
  243 }
  244 
  245 struct iwl_rx_cmd_buffer {
  246         struct page *_page;
  247         int _offset;
  248         bool _page_stolen;
  249         u32 _rx_page_order;
  250         unsigned int truesize;
  251 };
  252 
  253 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
  254 {
  255         return (void *)((unsigned long)page_address(r->_page) + r->_offset);
  256 }
  257 
  258 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
  259 {
  260         return r->_offset;
  261 }
  262 
  263 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
  264 {
  265         r->_page_stolen = true;
  266         get_page(r->_page);
  267         return r->_page;
  268 }
  269 
  270 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
  271 {
  272         __free_pages(r->_page, r->_rx_page_order);
  273 }
  274 
  275 #define MAX_NO_RECLAIM_CMDS     6
  276 
  277 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
  278 
  279 /*
  280  * Maximum number of HW queues the transport layer
  281  * currently supports
  282  */
  283 #define IWL_MAX_HW_QUEUES               32
  284 #define IWL_MAX_TVQM_QUEUES             512
  285 
  286 #define IWL_MAX_TID_COUNT       8
  287 #define IWL_MGMT_TID            15
  288 #define IWL_FRAME_LIMIT 64
  289 #define IWL_MAX_RX_HW_QUEUES    16
  290 #define IWL_9000_MAX_RX_HW_QUEUES       6
  291 
  292 /**
  293  * enum iwl_wowlan_status - WoWLAN image/device status
  294  * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
  295  * @IWL_D3_STATUS_RESET: device was reset while suspended
  296  */
  297 enum iwl_d3_status {
  298         IWL_D3_STATUS_ALIVE,
  299         IWL_D3_STATUS_RESET,
  300 };
  301 
  302 /**
  303  * enum iwl_trans_status: transport status flags
  304  * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
  305  * @STATUS_DEVICE_ENABLED: APM is enabled
  306  * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
  307  * @STATUS_INT_ENABLED: interrupts are enabled
  308  * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
  309  * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
  310  * @STATUS_FW_ERROR: the fw is in error state
  311  * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
  312  *      are sent
  313  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
  314  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
  315  * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
  316  *      e.g. for testing
  317  */
  318 enum iwl_trans_status {
  319         STATUS_SYNC_HCMD_ACTIVE,
  320         STATUS_DEVICE_ENABLED,
  321         STATUS_TPOWER_PMI,
  322         STATUS_INT_ENABLED,
  323         STATUS_RFKILL_HW,
  324         STATUS_RFKILL_OPMODE,
  325         STATUS_FW_ERROR,
  326         STATUS_TRANS_GOING_IDLE,
  327         STATUS_TRANS_IDLE,
  328         STATUS_TRANS_DEAD,
  329         STATUS_SUPPRESS_CMD_ERROR_ONCE,
  330 };
  331 
  332 static inline int
  333 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
  334 {
  335         switch (rb_size) {
  336         case IWL_AMSDU_2K:
  337                 return get_order(2 * 1024);
  338         case IWL_AMSDU_4K:
  339                 return get_order(4 * 1024);
  340         case IWL_AMSDU_8K:
  341                 return get_order(8 * 1024);
  342         case IWL_AMSDU_12K:
  343                 return get_order(16 * 1024);
  344         default:
  345                 WARN_ON(1);
  346                 return -1;
  347         }
  348 }
  349 
  350 static inline int
  351 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
  352 {
  353         switch (rb_size) {
  354         case IWL_AMSDU_2K:
  355                 return 2 * 1024;
  356         case IWL_AMSDU_4K:
  357                 return 4 * 1024;
  358         case IWL_AMSDU_8K:
  359                 return 8 * 1024;
  360         case IWL_AMSDU_12K:
  361                 return 16 * 1024;
  362         default:
  363                 WARN_ON(1);
  364                 return 0;
  365         }
  366 }
  367 
  368 struct iwl_hcmd_names {
  369         u8 cmd_id;
  370         const char *const cmd_name;
  371 };
  372 
  373 #define HCMD_NAME(x)    \
  374         { .cmd_id = x, .cmd_name = #x }
  375 
  376 struct iwl_hcmd_arr {
  377         const struct iwl_hcmd_names *arr;
  378         int size;
  379 };
  380 
  381 #define HCMD_ARR(x)     \
  382         { .arr = x, .size = ARRAY_SIZE(x) }
  383 
  384 /**
  385  * struct iwl_dump_sanitize_ops - dump sanitization operations
  386  * @frob_txf: Scrub the TX FIFO data
  387  * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header
  388  *      but that might be short or long (&struct iwl_cmd_header or
  389  *      &struct iwl_cmd_header_wide)
  390  * @frob_mem: Scrub memory data
  391  */
  392 struct iwl_dump_sanitize_ops {
  393         void (*frob_txf)(void *ctx, void *buf, size_t buflen);
  394         void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
  395         void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
  396 };
  397 
  398 /**
  399  * struct iwl_trans_config - transport configuration
  400  *
  401  * @op_mode: pointer to the upper layer.
  402  * @cmd_queue: the index of the command queue.
  403  *      Must be set before start_fw.
  404  * @cmd_fifo: the fifo for host commands
  405  * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
  406  * @no_reclaim_cmds: Some devices erroneously don't set the
  407  *      SEQ_RX_FRAME bit on some notifications, this is the
  408  *      list of such notifications to filter. Max length is
  409  *      %MAX_NO_RECLAIM_CMDS.
  410  * @n_no_reclaim_cmds: # of commands in list
  411  * @rx_buf_size: RX buffer size needed for A-MSDUs
  412  *      if unset 4k will be the RX buffer size
  413  * @bc_table_dword: set to true if the BC table expects the byte count to be
  414  *      in DWORD (as opposed to bytes)
  415  * @scd_set_active: should the transport configure the SCD for HCMD queue
  416  * @command_groups: array of command groups, each member is an array of the
  417  *      commands in the group; for debugging only
  418  * @command_groups_size: number of command groups, to avoid illegal access
  419  * @cb_data_offs: offset inside skb->cb to store transport data at, must have
  420  *      space for at least two pointers
  421  * @fw_reset_handshake: firmware supports reset flow handshake
  422  * @queue_alloc_cmd_ver: queue allocation command version, set to 0
  423  *      for using the older SCD_QUEUE_CFG, set to the version of
  424  *      SCD_QUEUE_CONFIG_CMD otherwise.
  425  */
  426 struct iwl_trans_config {
  427         struct iwl_op_mode *op_mode;
  428 
  429         u8 cmd_queue;
  430         u8 cmd_fifo;
  431         unsigned int cmd_q_wdg_timeout;
  432         const u8 *no_reclaim_cmds;
  433         unsigned int n_no_reclaim_cmds;
  434 
  435         enum iwl_amsdu_size rx_buf_size;
  436         bool bc_table_dword;
  437         bool scd_set_active;
  438         const struct iwl_hcmd_arr *command_groups;
  439         int command_groups_size;
  440 
  441         u8 cb_data_offs;
  442         bool fw_reset_handshake;
  443         u8 queue_alloc_cmd_ver;
  444 };
  445 
  446 struct iwl_trans_dump_data {
  447         u32 len;
  448         u8 data[];
  449 };
  450 
  451 struct iwl_trans;
  452 
  453 struct iwl_trans_txq_scd_cfg {
  454         u8 fifo;
  455         u8 sta_id;
  456         u8 tid;
  457         bool aggregate;
  458         int frame_limit;
  459 };
  460 
  461 /**
  462  * struct iwl_trans_rxq_dma_data - RX queue DMA data
  463  * @fr_bd_cb: DMA address of free BD cyclic buffer
  464  * @fr_bd_wid: Initial write index of the free BD cyclic buffer
  465  * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
  466  * @ur_bd_cb: DMA address of used BD cyclic buffer
  467  */
  468 struct iwl_trans_rxq_dma_data {
  469         u64 fr_bd_cb;
  470         u32 fr_bd_wid;
  471         u64 urbd_stts_wrptr;
  472         u64 ur_bd_cb;
  473 };
  474 
  475 /**
  476  * struct iwl_trans_ops - transport specific operations
  477  *
  478  * All the handlers MUST be implemented
  479  *
  480  * @start_hw: starts the HW. From that point on, the HW can send interrupts.
  481  *      May sleep.
  482  * @op_mode_leave: Turn off the HW RF kill indication if on
  483  *      May sleep
  484  * @start_fw: allocates and inits all the resources for the transport
  485  *      layer. Also kick a fw image.
  486  *      May sleep
  487  * @fw_alive: called when the fw sends alive notification. If the fw provides
  488  *      the SCD base address in SRAM, then provide it here, or 0 otherwise.
  489  *      May sleep
  490  * @stop_device: stops the whole device (embedded CPU put to reset) and stops
  491  *      the HW. From that point on, the HW will be stopped but will still issue
  492  *      an interrupt if the HW RF kill switch is triggered.
  493  *      This callback must do the right thing and not crash even if %start_hw()
  494  *      was called but not &start_fw(). May sleep.
  495  * @d3_suspend: put the device into the correct mode for WoWLAN during
  496  *      suspend. This is optional, if not implemented WoWLAN will not be
  497  *      supported. This callback may sleep.
  498  * @d3_resume: resume the device after WoWLAN, enabling the opmode to
  499  *      talk to the WoWLAN image to get its status. This is optional, if not
  500  *      implemented WoWLAN will not be supported. This callback may sleep.
  501  * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
  502  *      If RFkill is asserted in the middle of a SYNC host command, it must
  503  *      return -ERFKILL straight away.
  504  *      May sleep only if CMD_ASYNC is not set
  505  * @tx: send an skb. The transport relies on the op_mode to zero the
  506  *      the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
  507  *      the CSUM will be taken care of (TCP CSUM and IP header in case of
  508  *      IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
  509  *      header if it is IPv4.
  510  *      Must be atomic
  511  * @reclaim: free packet until ssn. Returns a list of freed packets.
  512  *      Must be atomic
  513  * @txq_enable: setup a queue. To setup an AC queue, use the
  514  *      iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
  515  *      this one. The op_mode must not configure the HCMD queue. The scheduler
  516  *      configuration may be %NULL, in which case the hardware will not be
  517  *      configured. If true is returned, the operation mode needs to increment
  518  *      the sequence number of the packets routed to this queue because of a
  519  *      hardware scheduler bug. May sleep.
  520  * @txq_disable: de-configure a Tx queue to send AMPDUs
  521  *      Must be atomic
  522  * @txq_set_shared_mode: change Tx queue shared/unshared marking
  523  * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
  524  * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
  525  * @freeze_txq_timer: prevents the timer of the queue from firing until the
  526  *      queue is set to awake. Must be atomic.
  527  * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
  528  *      that the transport needs to refcount the calls since this function
  529  *      will be called several times with block = true, and then the queues
  530  *      need to be unblocked only after the same number of calls with
  531  *      block = false.
  532  * @write8: write a u8 to a register at offset ofs from the BAR
  533  * @write32: write a u32 to a register at offset ofs from the BAR
  534  * @read32: read a u32 register at offset ofs from the BAR
  535  * @read_prph: read a DWORD from a periphery register
  536  * @write_prph: write a DWORD to a periphery register
  537  * @read_mem: read device's SRAM in DWORD
  538  * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
  539  *      will be zeroed.
  540  * @read_config32: read a u32 value from the device's config space at
  541  *      the given offset.
  542  * @configure: configure parameters required by the transport layer from
  543  *      the op_mode. May be called several times before start_fw, can't be
  544  *      called after that.
  545  * @set_pmi: set the power pmi state
  546  * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
  547  *      Sleeping is not allowed between grab_nic_access and
  548  *      release_nic_access.
  549  * @release_nic_access: let the NIC go to sleep. The "flags" parameter
  550  *      must be the same one that was sent before to the grab_nic_access.
  551  * @set_bits_mask - set SRAM register according to value and mask.
  552  * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
  553  *      TX'ed commands and similar. The buffer will be vfree'd by the caller.
  554  *      Note that the transport must fill in the proper file headers.
  555  * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
  556  *      of the trans debugfs
  557  * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
  558  *      context info.
  559  * @interrupts: disable/enable interrupts to transport
  560  */
  561 struct iwl_trans_ops {
  562 
  563         int (*start_hw)(struct iwl_trans *iwl_trans);
  564         void (*op_mode_leave)(struct iwl_trans *iwl_trans);
  565         int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
  566                         bool run_in_rfkill);
  567         void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
  568         void (*stop_device)(struct iwl_trans *trans);
  569 
  570         int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
  571         int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
  572                          bool test, bool reset);
  573 
  574         int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
  575 
  576         int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
  577                   struct iwl_device_tx_cmd *dev_cmd, int queue);
  578         void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
  579                         struct sk_buff_head *skbs);
  580 
  581         void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
  582 
  583         bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
  584                            const struct iwl_trans_txq_scd_cfg *cfg,
  585                            unsigned int queue_wdg_timeout);
  586         void (*txq_disable)(struct iwl_trans *trans, int queue,
  587                             bool configure_scd);
  588         /* 22000 functions */
  589         int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
  590                          u32 sta_mask, u8 tid,
  591                          int size, unsigned int queue_wdg_timeout);
  592         void (*txq_free)(struct iwl_trans *trans, int queue);
  593         int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
  594                             struct iwl_trans_rxq_dma_data *data);
  595 
  596         void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
  597                                     bool shared);
  598 
  599         int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
  600         int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
  601         void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
  602                                  bool freeze);
  603         void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
  604 
  605         void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
  606         void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
  607         u32 (*read32)(struct iwl_trans *trans, u32 ofs);
  608         u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
  609         void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
  610         int (*read_mem)(struct iwl_trans *trans, u32 addr,
  611                         void *buf, int dwords);
  612         int (*write_mem)(struct iwl_trans *trans, u32 addr,
  613                          const void *buf, int dwords);
  614         int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
  615         void (*configure)(struct iwl_trans *trans,
  616                           const struct iwl_trans_config *trans_cfg);
  617         void (*set_pmi)(struct iwl_trans *trans, bool state);
  618         int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
  619         bool (*grab_nic_access)(struct iwl_trans *trans);
  620         void (*release_nic_access)(struct iwl_trans *trans);
  621         void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
  622                               u32 value);
  623 
  624         struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
  625                                                  u32 dump_mask,
  626                                                  const struct iwl_dump_sanitize_ops *sanitize_ops,
  627                                                  void *sanitize_ctx);
  628         void (*debugfs_cleanup)(struct iwl_trans *trans);
  629         void (*sync_nmi)(struct iwl_trans *trans);
  630         int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
  631         int (*set_reduce_power)(struct iwl_trans *trans,
  632                                 const void *data, u32 len);
  633         void (*interrupts)(struct iwl_trans *trans, bool enable);
  634         int (*imr_dma_data)(struct iwl_trans *trans,
  635                             u32 dst_addr, u64 src_addr,
  636                             u32 byte_cnt);
  637 
  638 };
  639 
  640 /**
  641  * enum iwl_trans_state - state of the transport layer
  642  *
  643  * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
  644  * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
  645  * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
  646  */
  647 enum iwl_trans_state {
  648         IWL_TRANS_NO_FW,
  649         IWL_TRANS_FW_STARTED,
  650         IWL_TRANS_FW_ALIVE,
  651 };
  652 
  653 /**
  654  * DOC: Platform power management
  655  *
  656  * In system-wide power management the entire platform goes into a low
  657  * power state (e.g. idle or suspend to RAM) at the same time and the
  658  * device is configured as a wakeup source for the entire platform.
  659  * This is usually triggered by userspace activity (e.g. the user
  660  * presses the suspend button or a power management daemon decides to
  661  * put the platform in low power mode).  The device's behavior in this
  662  * mode is dictated by the wake-on-WLAN configuration.
  663  *
  664  * The terms used for the device's behavior are as follows:
  665  *
  666  *      - D0: the device is fully powered and the host is awake;
  667  *      - D3: the device is in low power mode and only reacts to
  668  *              specific events (e.g. magic-packet received or scan
  669  *              results found);
  670  *
  671  * These terms reflect the power modes in the firmware and are not to
  672  * be confused with the physical device power state.
  673  */
  674 
  675 /**
  676  * enum iwl_plat_pm_mode - platform power management mode
  677  *
  678  * This enumeration describes the device's platform power management
  679  * behavior when in system-wide suspend (i.e WoWLAN).
  680  *
  681  * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
  682  *      device.  In system-wide suspend mode, it means that the all
  683  *      connections will be closed automatically by mac80211 before
  684  *      the platform is suspended.
  685  * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
  686  */
  687 enum iwl_plat_pm_mode {
  688         IWL_PLAT_PM_MODE_DISABLED,
  689         IWL_PLAT_PM_MODE_D3,
  690 };
  691 
  692 /**
  693  * enum iwl_ini_cfg_state
  694  * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
  695  * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
  696  * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
  697  *      are corrupted. The rest of the debug TLVs will still be used
  698  */
  699 enum iwl_ini_cfg_state {
  700         IWL_INI_CFG_STATE_NOT_LOADED,
  701         IWL_INI_CFG_STATE_LOADED,
  702         IWL_INI_CFG_STATE_CORRUPTED,
  703 };
  704 
  705 /* Max time to wait for nmi interrupt */
  706 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
  707 
  708 /**
  709  * struct iwl_dram_data
  710  * @physical: page phy pointer
  711  * @block: pointer to the allocated block/page
  712  * @size: size of the block/page
  713  */
  714 struct iwl_dram_data {
  715         dma_addr_t physical;
  716         void *block;
  717         int size;
  718 };
  719 
  720 /**
  721  * struct iwl_fw_mon - fw monitor per allocation id
  722  * @num_frags: number of fragments
  723  * @frags: an array of DRAM buffer fragments
  724  */
  725 struct iwl_fw_mon {
  726         u32 num_frags;
  727         struct iwl_dram_data *frags;
  728 };
  729 
  730 /**
  731  * struct iwl_self_init_dram - dram data used by self init process
  732  * @fw: lmac and umac dram data
  733  * @fw_cnt: total number of items in array
  734  * @paging: paging dram data
  735  * @paging_cnt: total number of items in array
  736  */
  737 struct iwl_self_init_dram {
  738         struct iwl_dram_data *fw;
  739         int fw_cnt;
  740         struct iwl_dram_data *paging;
  741         int paging_cnt;
  742 };
  743 
  744 /**
  745  * struct iwl_imr_data - imr dram data used during debug process
  746  * @imr_enable: imr enable status received from fw
  747  * @imr_size: imr dram size received from fw
  748  * @sram_addr: sram address from debug tlv
  749  * @sram_size: sram size from debug tlv
  750  * @imr2sram_remainbyte`: size remained after each dma transfer
  751  * @imr_curr_addr: current dst address used during dma transfer
  752  * @imr_base_addr: imr address received from fw
  753  */
  754 struct iwl_imr_data {
  755         u32 imr_enable;
  756         u32 imr_size;
  757         u32 sram_addr;
  758         u32 sram_size;
  759         u32 imr2sram_remainbyte;
  760         u64 imr_curr_addr;
  761         __le64 imr_base_addr;
  762 };
  763 
  764 /**
  765  * struct iwl_trans_debug - transport debug related data
  766  *
  767  * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
  768  * @rec_on: true iff there is a fw debug recording currently active
  769  * @dest_tlv: points to the destination TLV for debug
  770  * @conf_tlv: array of pointers to configuration TLVs for debug
  771  * @trigger_tlv: array of pointers to triggers TLVs for debug
  772  * @lmac_error_event_table: addrs of lmacs error tables
  773  * @umac_error_event_table: addr of umac error table
  774  * @tcm_error_event_table: address(es) of TCM error table(s)
  775  * @rcm_error_event_table: address(es) of RCM error table(s)
  776  * @error_event_table_tlv_status: bitmap that indicates what error table
  777  *      pointers was recevied via TLV. uses enum &iwl_error_event_table_status
  778  * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
  779  * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
  780  * @fw_mon_cfg: debug buffer allocation configuration
  781  * @fw_mon_ini: DRAM buffer fragments per allocation id
  782  * @fw_mon: DRAM buffer for firmware monitor
  783  * @hw_error: equals true if hw error interrupt was received from the FW
  784  * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
  785  * @active_regions: active regions
  786  * @debug_info_tlv_list: list of debug info TLVs
  787  * @time_point: array of debug time points
  788  * @periodic_trig_list: periodic triggers list
  789  * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
  790  * @ucode_preset: preset based on ucode
  791  */
  792 struct iwl_trans_debug {
  793         u8 n_dest_reg;
  794         bool rec_on;
  795 
  796         const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
  797         const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
  798         struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
  799 
  800         u32 lmac_error_event_table[2];
  801         u32 umac_error_event_table;
  802         u32 tcm_error_event_table[2];
  803         u32 rcm_error_event_table[2];
  804         unsigned int error_event_table_tlv_status;
  805 
  806         enum iwl_ini_cfg_state internal_ini_cfg;
  807         enum iwl_ini_cfg_state external_ini_cfg;
  808 
  809         struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
  810         struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
  811 
  812         struct iwl_dram_data fw_mon;
  813 
  814         bool hw_error;
  815         enum iwl_fw_ini_buffer_location ini_dest;
  816 
  817         u64 unsupported_region_msk;
  818         struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
  819         struct list_head debug_info_tlv_list;
  820         struct iwl_dbg_tlv_time_point_data
  821                 time_point[IWL_FW_INI_TIME_POINT_NUM];
  822         struct list_head periodic_trig_list;
  823 
  824         u32 domains_bitmap;
  825         u32 ucode_preset;
  826         bool restart_required;
  827         u32 last_tp_resetfw;
  828         struct iwl_imr_data imr_data;
  829 };
  830 
  831 struct iwl_dma_ptr {
  832         dma_addr_t dma;
  833         void *addr;
  834         size_t size;
  835 };
  836 
  837 struct iwl_cmd_meta {
  838         /* only for SYNC commands, iff the reply skb is wanted */
  839         struct iwl_host_cmd *source;
  840         u32 flags;
  841         u32 tbs;
  842 };
  843 
  844 /*
  845  * The FH will write back to the first TB only, so we need to copy some data
  846  * into the buffer regardless of whether it should be mapped or not.
  847  * This indicates how big the first TB must be to include the scratch buffer
  848  * and the assigned PN.
  849  * Since PN location is 8 bytes at offset 12, it's 20 now.
  850  * If we make it bigger then allocations will be bigger and copy slower, so
  851  * that's probably not useful.
  852  */
  853 #define IWL_FIRST_TB_SIZE       20
  854 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
  855 
  856 struct iwl_pcie_txq_entry {
  857         void *cmd;
  858         struct sk_buff *skb;
  859         /* buffer to free after command completes */
  860         const void *free_buf;
  861         struct iwl_cmd_meta meta;
  862 };
  863 
  864 struct iwl_pcie_first_tb_buf {
  865         u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
  866 };
  867 
  868 /**
  869  * struct iwl_txq - Tx Queue for DMA
  870  * @q: generic Rx/Tx queue descriptor
  871  * @tfds: transmit frame descriptors (DMA memory)
  872  * @first_tb_bufs: start of command headers, including scratch buffers, for
  873  *      the writeback -- this is DMA memory and an array holding one buffer
  874  *      for each command on the queue
  875  * @first_tb_dma: DMA address for the first_tb_bufs start
  876  * @entries: transmit entries (driver state)
  877  * @lock: queue lock
  878  * @stuck_timer: timer that fires if queue gets stuck
  879  * @trans: pointer back to transport (for timer)
  880  * @need_update: indicates need to update read/write index
  881  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
  882  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
  883  * @frozen: tx stuck queue timer is frozen
  884  * @frozen_expiry_remainder: remember how long until the timer fires
  885  * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
  886  * @write_ptr: 1-st empty entry (index) host_w
  887  * @read_ptr: last used entry (index) host_r
  888  * @dma_addr:  physical addr for BD's
  889  * @n_window: safe queue window
  890  * @id: queue id
  891  * @low_mark: low watermark, resume queue if free space more than this
  892  * @high_mark: high watermark, stop queue if free space less than this
  893  *
  894  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
  895  * descriptors) and required locking structures.
  896  *
  897  * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
  898  * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
  899  * there might be HW changes in the future). For the normal TX
  900  * queues, n_window, which is the size of the software queue data
  901  * is also 256; however, for the command queue, n_window is only
  902  * 32 since we don't need so many commands pending. Since the HW
  903  * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
  904  * This means that we end up with the following:
  905  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
  906  *  SW entries:           | 0      | ... | 31          |
  907  * where N is a number between 0 and 7. This means that the SW
  908  * data is a window overlayed over the HW queue.
  909  */
  910 struct iwl_txq {
  911         void *tfds;
  912         struct iwl_pcie_first_tb_buf *first_tb_bufs;
  913         dma_addr_t first_tb_dma;
  914         struct iwl_pcie_txq_entry *entries;
  915         /* lock for syncing changes on the queue */
  916         spinlock_t lock;
  917         unsigned long frozen_expiry_remainder;
  918         struct timer_list stuck_timer;
  919         struct iwl_trans *trans;
  920         bool need_update;
  921         bool frozen;
  922         bool ampdu;
  923         int block;
  924         unsigned long wd_timeout;
  925         struct sk_buff_head overflow_q;
  926         struct iwl_dma_ptr bc_tbl;
  927 
  928         int write_ptr;
  929         int read_ptr;
  930         dma_addr_t dma_addr;
  931         int n_window;
  932         u32 id;
  933         int low_mark;
  934         int high_mark;
  935 
  936         bool overflow_tx;
  937 };
  938 
  939 /**
  940  * struct iwl_trans_txqs - transport tx queues data
  941  *
  942  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  943  * @page_offs: offset from skb->cb to mac header page pointer
  944  * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
  945  * @queue_used - bit mask of used queues
  946  * @queue_stopped - bit mask of stopped queues
  947  * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
  948  * @queue_alloc_cmd_ver: queue allocation command version
  949  */
  950 struct iwl_trans_txqs {
  951         unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
  952         unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
  953         struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
  954         struct dma_pool *bc_pool;
  955         size_t bc_tbl_size;
  956         bool bc_table_dword;
  957         u8 page_offs;
  958         u8 dev_cmd_offs;
  959         struct iwl_tso_hdr_page __percpu *tso_hdr_page;
  960 
  961         struct {
  962                 u8 fifo;
  963                 u8 q_id;
  964                 unsigned int wdg_timeout;
  965         } cmd;
  966 
  967         struct {
  968                 u8 max_tbs;
  969                 u16 size;
  970                 u8 addr_size;
  971         } tfd;
  972 
  973         struct iwl_dma_ptr scd_bc_tbls;
  974 
  975         u8 queue_alloc_cmd_ver;
  976 };
  977 
  978 /**
  979  * struct iwl_trans - transport common data
  980  *
  981  * @csme_own - true if we couldn't get ownership on the device
  982  * @ops - pointer to iwl_trans_ops
  983  * @op_mode - pointer to the op_mode
  984  * @trans_cfg: the trans-specific configuration part
  985  * @cfg - pointer to the configuration
  986  * @drv - pointer to iwl_drv
  987  * @status: a bit-mask of transport status flags
  988  * @dev - pointer to struct device * that represents the device
  989  * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
  990  *      0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
  991  * @hw_rf_id a u32 with the device RF ID
  992  * @hw_id: a u32 with the ID of the device / sub-device.
  993  *      Set during transport allocation.
  994  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  995  * @hw_rev_step: The mac step of the HW
  996  * @pm_support: set to true in start_hw if link pm is supported
  997  * @ltr_enabled: set to true if the LTR is enabled
  998  * @wide_cmd_header: true when ucode supports wide command header format
  999  * @wait_command_queue: wait queue for sync commands
 1000  * @num_rx_queues: number of RX queues allocated by the transport;
 1001  *      the transport must set this before calling iwl_drv_start()
 1002  * @iml_len: the length of the image loader
 1003  * @iml: a pointer to the image loader itself
 1004  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
 1005  *      The user should use iwl_trans_{alloc,free}_tx_cmd.
 1006  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
 1007  *      starting the firmware, used for tracing
 1008  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
 1009  *      start of the 802.11 header in the @rx_mpdu_cmd
 1010  * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
 1011  * @system_pm_mode: the system-wide power management mode in use.
 1012  *      This mode is set dynamically, depending on the WoWLAN values
 1013  *      configured from the userspace at runtime.
 1014  * @iwl_trans_txqs: transport tx queues data.
 1015  */
 1016 struct iwl_trans {
 1017         bool csme_own;
 1018         const struct iwl_trans_ops *ops;
 1019         struct iwl_op_mode *op_mode;
 1020         const struct iwl_cfg_trans_params *trans_cfg;
 1021         const struct iwl_cfg *cfg;
 1022         struct iwl_drv *drv;
 1023         enum iwl_trans_state state;
 1024         unsigned long status;
 1025 
 1026         struct device *dev;
 1027         u32 max_skb_frags;
 1028         u32 hw_rev;
 1029         u32 hw_rev_step;
 1030         u32 hw_rf_id;
 1031         u32 hw_id;
 1032         char hw_id_str[52];
 1033         u32 sku_id[3];
 1034 
 1035         u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
 1036 
 1037         bool pm_support;
 1038         bool ltr_enabled;
 1039         u8 pnvm_loaded:1;
 1040         u8 reduce_power_loaded:1;
 1041 
 1042         const struct iwl_hcmd_arr *command_groups;
 1043         int command_groups_size;
 1044         bool wide_cmd_header;
 1045 
 1046         wait_queue_head_t wait_command_queue;
 1047         u8 num_rx_queues;
 1048 
 1049         size_t iml_len;
 1050         u8 *iml;
 1051 
 1052         /* The following fields are internal only */
 1053         struct kmem_cache *dev_cmd_pool;
 1054         char dev_cmd_pool_name[50];
 1055 
 1056         struct dentry *dbgfs_dir;
 1057 
 1058 #ifdef CONFIG_LOCKDEP
 1059         struct lockdep_map sync_cmd_lockdep_map;
 1060 #endif
 1061 
 1062         struct iwl_trans_debug dbg;
 1063         struct iwl_self_init_dram init_dram;
 1064 
 1065         enum iwl_plat_pm_mode system_pm_mode;
 1066 
 1067         const char *name;
 1068         struct iwl_trans_txqs txqs;
 1069 
 1070         /* pointer to trans specific struct */
 1071         /*Ensure that this pointer will always be aligned to sizeof pointer */
 1072         char trans_specific[] __aligned(sizeof(void *));
 1073 };
 1074 
 1075 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
 1076 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
 1077 
 1078 static inline void iwl_trans_configure(struct iwl_trans *trans,
 1079                                        const struct iwl_trans_config *trans_cfg)
 1080 {
 1081         trans->op_mode = trans_cfg->op_mode;
 1082 
 1083         trans->ops->configure(trans, trans_cfg);
 1084         WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
 1085 }
 1086 
 1087 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
 1088 {
 1089         might_sleep();
 1090 
 1091         return trans->ops->start_hw(trans);
 1092 }
 1093 
 1094 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
 1095 {
 1096         might_sleep();
 1097 
 1098         if (trans->ops->op_mode_leave)
 1099                 trans->ops->op_mode_leave(trans);
 1100 
 1101         trans->op_mode = NULL;
 1102 
 1103         trans->state = IWL_TRANS_NO_FW;
 1104 }
 1105 
 1106 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
 1107 {
 1108         might_sleep();
 1109 
 1110         trans->state = IWL_TRANS_FW_ALIVE;
 1111 
 1112         trans->ops->fw_alive(trans, scd_addr);
 1113 }
 1114 
 1115 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
 1116                                      const struct fw_img *fw,
 1117                                      bool run_in_rfkill)
 1118 {
 1119         int ret;
 1120 
 1121         might_sleep();
 1122 
 1123         WARN_ON_ONCE(!trans->rx_mpdu_cmd);
 1124 
 1125         clear_bit(STATUS_FW_ERROR, &trans->status);
 1126         ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
 1127         if (ret == 0)
 1128                 trans->state = IWL_TRANS_FW_STARTED;
 1129 
 1130         return ret;
 1131 }
 1132 
 1133 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
 1134 {
 1135         might_sleep();
 1136 
 1137         trans->ops->stop_device(trans);
 1138 
 1139         trans->state = IWL_TRANS_NO_FW;
 1140 }
 1141 
 1142 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
 1143                                        bool reset)
 1144 {
 1145         might_sleep();
 1146         if (!trans->ops->d3_suspend)
 1147                 return 0;
 1148 
 1149         return trans->ops->d3_suspend(trans, test, reset);
 1150 }
 1151 
 1152 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
 1153                                       enum iwl_d3_status *status,
 1154                                       bool test, bool reset)
 1155 {
 1156         might_sleep();
 1157         if (!trans->ops->d3_resume)
 1158                 return 0;
 1159 
 1160         return trans->ops->d3_resume(trans, status, test, reset);
 1161 }
 1162 
 1163 static inline struct iwl_trans_dump_data *
 1164 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
 1165                     const struct iwl_dump_sanitize_ops *sanitize_ops,
 1166                     void *sanitize_ctx)
 1167 {
 1168         if (!trans->ops->dump_data)
 1169                 return NULL;
 1170         return trans->ops->dump_data(trans, dump_mask,
 1171                                      sanitize_ops, sanitize_ctx);
 1172 }
 1173 
 1174 static inline struct iwl_device_tx_cmd *
 1175 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
 1176 {
 1177         return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
 1178 }
 1179 
 1180 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 1181 
 1182 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
 1183                                          struct iwl_device_tx_cmd *dev_cmd)
 1184 {
 1185         kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
 1186 }
 1187 
 1188 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
 1189                                struct iwl_device_tx_cmd *dev_cmd, int queue)
 1190 {
 1191         if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
 1192                 return -EIO;
 1193 
 1194         if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
 1195                 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 1196                 return -EIO;
 1197         }
 1198 
 1199         return trans->ops->tx(trans, skb, dev_cmd, queue);
 1200 }
 1201 
 1202 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
 1203                                      int ssn, struct sk_buff_head *skbs)
 1204 {
 1205         if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
 1206                 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 1207                 return;
 1208         }
 1209 
 1210         trans->ops->reclaim(trans, queue, ssn, skbs);
 1211 }
 1212 
 1213 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
 1214                                         int ptr)
 1215 {
 1216         if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
 1217                 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 1218                 return;
 1219         }
 1220 
 1221         trans->ops->set_q_ptrs(trans, queue, ptr);
 1222 }
 1223 
 1224 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
 1225                                          bool configure_scd)
 1226 {
 1227         trans->ops->txq_disable(trans, queue, configure_scd);
 1228 }
 1229 
 1230 static inline bool
 1231 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
 1232                          const struct iwl_trans_txq_scd_cfg *cfg,
 1233                          unsigned int queue_wdg_timeout)
 1234 {
 1235         might_sleep();
 1236 
 1237         if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
 1238                 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 1239                 return false;
 1240         }
 1241 
 1242         return trans->ops->txq_enable(trans, queue, ssn,
 1243                                       cfg, queue_wdg_timeout);
 1244 }
 1245 
 1246 static inline int
 1247 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
 1248                            struct iwl_trans_rxq_dma_data *data)
 1249 {
 1250         if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
 1251                 return -ENOTSUPP;
 1252 
 1253         return trans->ops->rxq_dma_data(trans, queue, data);
 1254 }
 1255 
 1256 static inline void
 1257 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
 1258 {
 1259         if (WARN_ON_ONCE(!trans->ops->txq_free))
 1260                 return;
 1261 
 1262         trans->ops->txq_free(trans, queue);
 1263 }
 1264 
 1265 static inline int
 1266 iwl_trans_txq_alloc(struct iwl_trans *trans,
 1267                     u32 flags, u32 sta_mask, u8 tid,
 1268                     int size, unsigned int wdg_timeout)
 1269 {
 1270         might_sleep();
 1271 
 1272         if (WARN_ON_ONCE(!trans->ops->txq_alloc))
 1273                 return -ENOTSUPP;
 1274 
 1275         if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
 1276                 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 1277                 return -EIO;
 1278         }
 1279 
 1280         return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
 1281                                      size, wdg_timeout);
 1282 }
 1283 
 1284 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
 1285                                                  int queue, bool shared_mode)
 1286 {
 1287         if (trans->ops->txq_set_shared_mode)
 1288                 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
 1289 }
 1290 
 1291 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
 1292                                         int fifo, int sta_id, int tid,
 1293                                         int frame_limit, u16 ssn,
 1294                                         unsigned int queue_wdg_timeout)
 1295 {
 1296         struct iwl_trans_txq_scd_cfg cfg = {
 1297                 .fifo = fifo,
 1298                 .sta_id = sta_id,
 1299                 .tid = tid,
 1300                 .frame_limit = frame_limit,
 1301                 .aggregate = sta_id >= 0,
 1302         };
 1303 
 1304         iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
 1305 }
 1306 
 1307 static inline
 1308 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
 1309                              unsigned int queue_wdg_timeout)
 1310 {
 1311         struct iwl_trans_txq_scd_cfg cfg = {
 1312                 .fifo = fifo,
 1313                 .sta_id = -1,
 1314                 .tid = IWL_MAX_TID_COUNT,
 1315                 .frame_limit = IWL_FRAME_LIMIT,
 1316                 .aggregate = false,
 1317         };
 1318 
 1319         iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
 1320 }
 1321 
 1322 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
 1323                                               unsigned long txqs,
 1324                                               bool freeze)
 1325 {
 1326         if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
 1327                 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 1328                 return;
 1329         }
 1330 
 1331         if (trans->ops->freeze_txq_timer)
 1332                 trans->ops->freeze_txq_timer(trans, txqs, freeze);
 1333 }
 1334 
 1335 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
 1336                                             bool block)
 1337 {
 1338         if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
 1339                 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 1340                 return;
 1341         }
 1342 
 1343         if (trans->ops->block_txq_ptrs)
 1344                 trans->ops->block_txq_ptrs(trans, block);
 1345 }
 1346 
 1347 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
 1348                                                  u32 txqs)
 1349 {
 1350         if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
 1351                 return -ENOTSUPP;
 1352 
 1353         /* No need to wait if the firmware is not alive */
 1354         if (trans->state != IWL_TRANS_FW_ALIVE) {
 1355                 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 1356                 return -EIO;
 1357         }
 1358 
 1359         return trans->ops->wait_tx_queues_empty(trans, txqs);
 1360 }
 1361 
 1362 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
 1363 {
 1364         if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
 1365                 return -ENOTSUPP;
 1366 
 1367         if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
 1368                 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 1369                 return -EIO;
 1370         }
 1371 
 1372         return trans->ops->wait_txq_empty(trans, queue);
 1373 }
 1374 
 1375 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
 1376 {
 1377         trans->ops->write8(trans, ofs, val);
 1378 }
 1379 
 1380 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
 1381 {
 1382         trans->ops->write32(trans, ofs, val);
 1383 }
 1384 
 1385 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
 1386 {
 1387         return trans->ops->read32(trans, ofs);
 1388 }
 1389 
 1390 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
 1391 {
 1392         return trans->ops->read_prph(trans, ofs);
 1393 }
 1394 
 1395 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
 1396                                         u32 val)
 1397 {
 1398         return trans->ops->write_prph(trans, ofs, val);
 1399 }
 1400 
 1401 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
 1402                                      void *buf, int dwords)
 1403 {
 1404         return trans->ops->read_mem(trans, addr, buf, dwords);
 1405 }
 1406 
 1407 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)                   \
 1408         do {                                                                  \
 1409                 if (__builtin_constant_p(bufsize))                            \
 1410                         BUILD_BUG_ON((bufsize) % sizeof(u32));                \
 1411                 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
 1412         } while (0)
 1413 
 1414 static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
 1415                                           u32 dst_addr, u64 src_addr,
 1416                                           u32 byte_cnt)
 1417 {
 1418         if (trans->ops->imr_dma_data)
 1419                 return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
 1420         return 0;
 1421 }
 1422 
 1423 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
 1424 {
 1425         u32 value;
 1426 
 1427         if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
 1428                 return 0xa5a5a5a5;
 1429 
 1430         return value;
 1431 }
 1432 
 1433 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
 1434                                       const void *buf, int dwords)
 1435 {
 1436         return trans->ops->write_mem(trans, addr, buf, dwords);
 1437 }
 1438 
 1439 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
 1440                                         u32 val)
 1441 {
 1442         return iwl_trans_write_mem(trans, addr, &val, 1);
 1443 }
 1444 
 1445 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
 1446 {
 1447         if (trans->ops->set_pmi)
 1448                 trans->ops->set_pmi(trans, state);
 1449 }
 1450 
 1451 static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
 1452                                      bool retake_ownership)
 1453 {
 1454         if (trans->ops->sw_reset)
 1455                 return trans->ops->sw_reset(trans, retake_ownership);
 1456         return 0;
 1457 }
 1458 
 1459 static inline void
 1460 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
 1461 {
 1462         trans->ops->set_bits_mask(trans, reg, mask, value);
 1463 }
 1464 
 1465 #define iwl_trans_grab_nic_access(trans)                \
 1466         __cond_lock(nic_access,                         \
 1467                     likely((trans)->ops->grab_nic_access(trans)))
 1468 
 1469 static inline void __releases(nic_access)
 1470 iwl_trans_release_nic_access(struct iwl_trans *trans)
 1471 {
 1472         trans->ops->release_nic_access(trans);
 1473         __release(nic_access);
 1474 }
 1475 
 1476 static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
 1477 {
 1478         if (WARN_ON_ONCE(!trans->op_mode))
 1479                 return;
 1480 
 1481         /* prevent double restarts due to the same erroneous FW */
 1482         if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
 1483                 iwl_op_mode_nic_error(trans->op_mode, sync);
 1484                 trans->state = IWL_TRANS_NO_FW;
 1485         }
 1486 }
 1487 
 1488 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
 1489 {
 1490         return trans->state == IWL_TRANS_FW_ALIVE;
 1491 }
 1492 
 1493 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
 1494 {
 1495         if (trans->ops->sync_nmi)
 1496                 trans->ops->sync_nmi(trans);
 1497 }
 1498 
 1499 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
 1500                                   u32 sw_err_bit);
 1501 
 1502 static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
 1503                                      const void *data, u32 len)
 1504 {
 1505         if (trans->ops->set_pnvm) {
 1506                 int ret = trans->ops->set_pnvm(trans, data, len);
 1507 
 1508                 if (ret)
 1509                         return ret;
 1510         }
 1511 
 1512         trans->pnvm_loaded = true;
 1513 
 1514         return 0;
 1515 }
 1516 
 1517 static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
 1518                                              const void *data, u32 len)
 1519 {
 1520         if (trans->ops->set_reduce_power) {
 1521                 int ret = trans->ops->set_reduce_power(trans, data, len);
 1522 
 1523                 if (ret)
 1524                         return ret;
 1525         }
 1526 
 1527         trans->reduce_power_loaded = true;
 1528         return 0;
 1529 }
 1530 
 1531 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
 1532 {
 1533         return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
 1534                 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
 1535 }
 1536 
 1537 static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
 1538 {
 1539         if (trans->ops->interrupts)
 1540                 trans->ops->interrupts(trans, enable);
 1541 }
 1542 
 1543 /*****************************************************
 1544  * transport helper functions
 1545  *****************************************************/
 1546 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
 1547                           struct device *dev,
 1548                           const struct iwl_trans_ops *ops,
 1549                           const struct iwl_cfg_trans_params *cfg_trans);
 1550 int iwl_trans_init(struct iwl_trans *trans);
 1551 void iwl_trans_free(struct iwl_trans *trans);
 1552 
 1553 /*****************************************************
 1554 * driver (transport) register/unregister functions
 1555 ******************************************************/
 1556 int __must_check iwl_pci_register_driver(void);
 1557 void iwl_pci_unregister_driver(void);
 1558 
 1559 #endif /* __iwl_trans_h__ */

Cache object: aa6878f2cfc39c31c2f9078083e6375b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.