The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/dev/iwlwifi/mvm/fw.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
    2 /*
    3  * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
    4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
    5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
    6  */
    7 #include <net/mac80211.h>
    8 #include <linux/netdevice.h>
    9 #include <linux/dmi.h>
   10 
   11 #include "iwl-trans.h"
   12 #include "iwl-op-mode.h"
   13 #include "fw/img.h"
   14 #include "iwl-debug.h"
   15 #include "iwl-prph.h"
   16 #include "fw/acpi.h"
   17 #include "fw/pnvm.h"
   18 
   19 #include "mvm.h"
   20 #include "fw/dbg.h"
   21 #include "iwl-phy-db.h"
   22 #include "iwl-modparams.h"
   23 #include "iwl-nvm-parse.h"
   24 
   25 #define MVM_UCODE_ALIVE_TIMEOUT (HZ)
   26 #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
   27 
   28 #define IWL_TAS_US_MCC 0x5553
   29 #define IWL_TAS_CANADA_MCC 0x4341
   30 
   31 struct iwl_mvm_alive_data {
   32         bool valid;
   33         u32 scd_base_addr;
   34 };
   35 
   36 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
   37 {
   38         struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
   39                 .valid = cpu_to_le32(valid_tx_ant),
   40         };
   41 
   42         IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
   43         return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
   44                                     sizeof(tx_ant_cmd), &tx_ant_cmd);
   45 }
   46 
   47 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
   48 {
   49         int i;
   50         struct iwl_rss_config_cmd cmd = {
   51                 .flags = cpu_to_le32(IWL_RSS_ENABLE),
   52                 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
   53                              BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
   54                              BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
   55                              BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
   56                              BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
   57                              BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
   58         };
   59 
   60         if (mvm->trans->num_rx_queues == 1)
   61                 return 0;
   62 
   63         /* Do not direct RSS traffic to Q 0 which is our fallback queue */
   64         for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
   65                 cmd.indirection_table[i] =
   66                         1 + (i % (mvm->trans->num_rx_queues - 1));
   67         netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
   68 
   69         return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
   70 }
   71 
   72 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
   73 {
   74         struct iwl_dqa_enable_cmd dqa_cmd = {
   75                 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
   76         };
   77         u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
   78         int ret;
   79 
   80         ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
   81         if (ret)
   82                 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
   83         else
   84                 IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
   85 
   86         return ret;
   87 }
   88 
   89 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
   90                                    struct iwl_rx_cmd_buffer *rxb)
   91 {
   92         struct iwl_rx_packet *pkt = rxb_addr(rxb);
   93         struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
   94         __le32 *dump_data = mfu_dump_notif->data;
   95         int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
   96         int i;
   97 
   98         if (mfu_dump_notif->index_num == 0)
   99                 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
  100                          le32_to_cpu(mfu_dump_notif->assert_id));
  101 
  102         for (i = 0; i < n_words; i++)
  103                 IWL_DEBUG_INFO(mvm,
  104                                "MFUART assert dump, dword %u: 0x%08x\n",
  105                                le16_to_cpu(mfu_dump_notif->index_num) *
  106                                n_words + i,
  107                                le32_to_cpu(dump_data[i]));
  108 }
  109 
  110 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
  111                          struct iwl_rx_packet *pkt, void *data)
  112 {
  113         unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
  114         struct iwl_mvm *mvm =
  115                 container_of(notif_wait, struct iwl_mvm, notif_wait);
  116         struct iwl_mvm_alive_data *alive_data = data;
  117         struct iwl_umac_alive *umac;
  118         struct iwl_lmac_alive *lmac1;
  119         struct iwl_lmac_alive *lmac2 = NULL;
  120         u16 status;
  121         u32 lmac_error_event_table, umac_error_table;
  122         u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
  123                                               UCODE_ALIVE_NTFY, 0);
  124         u32 i;
  125 
  126         if (version == 6) {
  127                 struct iwl_alive_ntf_v6 *palive;
  128 
  129                 if (pkt_len < sizeof(*palive))
  130                         return false;
  131 
  132                 palive = (void *)pkt->data;
  133                 mvm->trans->dbg.imr_data.imr_enable =
  134                         le32_to_cpu(palive->imr.enabled);
  135                 mvm->trans->dbg.imr_data.imr_size =
  136                         le32_to_cpu(palive->imr.size);
  137                 mvm->trans->dbg.imr_data.imr2sram_remainbyte =
  138                         mvm->trans->dbg.imr_data.imr_size;
  139                 mvm->trans->dbg.imr_data.imr_base_addr =
  140                         palive->imr.base_addr;
  141                 mvm->trans->dbg.imr_data.imr_curr_addr =
  142                         le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
  143                 IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x  size 0x0%x Address 0x%016llx\n",
  144                              mvm->trans->dbg.imr_data.imr_enable,
  145                              mvm->trans->dbg.imr_data.imr_size,
  146                              le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
  147 
  148                 if (!mvm->trans->dbg.imr_data.imr_enable) {
  149                         for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) {
  150                                 struct iwl_ucode_tlv *reg_tlv;
  151                                 struct iwl_fw_ini_region_tlv *reg;
  152 
  153                                 reg_tlv = mvm->trans->dbg.active_regions[i];
  154                                 if (!reg_tlv)
  155                                         continue;
  156 
  157                                 reg = (void *)reg_tlv->data;
  158                                 /*
  159                                  * We have only one DRAM IMR region, so we
  160                                  * can break as soon as we find the first
  161                                  * one.
  162                                  */
  163                                 if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
  164                                         mvm->trans->dbg.unsupported_region_msk |= BIT(i);
  165                                         break;
  166                                 }
  167                         }
  168                 }
  169         }
  170 
  171         if (version >= 5) {
  172                 struct iwl_alive_ntf_v5 *palive;
  173 
  174                 if (pkt_len < sizeof(*palive))
  175                         return false;
  176 
  177                 palive = (void *)pkt->data;
  178                 umac = &palive->umac_data;
  179                 lmac1 = &palive->lmac_data[0];
  180                 lmac2 = &palive->lmac_data[1];
  181                 status = le16_to_cpu(palive->status);
  182 
  183                 mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
  184                 mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
  185                 mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
  186 
  187                 IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
  188                              mvm->trans->sku_id[0],
  189                              mvm->trans->sku_id[1],
  190                              mvm->trans->sku_id[2]);
  191         } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) {
  192                 struct iwl_alive_ntf_v4 *palive;
  193 
  194                 if (pkt_len < sizeof(*palive))
  195                         return false;
  196 
  197                 palive = (void *)pkt->data;
  198                 umac = &palive->umac_data;
  199                 lmac1 = &palive->lmac_data[0];
  200                 lmac2 = &palive->lmac_data[1];
  201                 status = le16_to_cpu(palive->status);
  202         } else if (iwl_rx_packet_payload_len(pkt) ==
  203                    sizeof(struct iwl_alive_ntf_v3)) {
  204                 struct iwl_alive_ntf_v3 *palive3;
  205 
  206                 if (pkt_len < sizeof(*palive3))
  207                         return false;
  208 
  209                 palive3 = (void *)pkt->data;
  210                 umac = &palive3->umac_data;
  211                 lmac1 = &palive3->lmac_data;
  212                 status = le16_to_cpu(palive3->status);
  213         } else {
  214                 WARN(1, "unsupported alive notification (size %d)\n",
  215                      iwl_rx_packet_payload_len(pkt));
  216                 /* get timeout later */
  217                 return false;
  218         }
  219 
  220         lmac_error_event_table =
  221                 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
  222         iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
  223 
  224         if (lmac2)
  225                 mvm->trans->dbg.lmac_error_event_table[1] =
  226                         le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
  227 
  228         umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
  229                                                         ~FW_ADDR_CACHE_CONTROL;
  230 
  231         if (umac_error_table) {
  232                 if (umac_error_table >=
  233                     mvm->trans->cfg->min_umac_error_event_table) {
  234                         iwl_fw_umac_set_alive_err_table(mvm->trans,
  235                                                         umac_error_table);
  236                 } else {
  237                         IWL_ERR(mvm,
  238                                 "Not valid error log pointer 0x%08X for %s uCode\n",
  239                                 umac_error_table,
  240                                 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
  241                                 "Init" : "RT");
  242                 }
  243         }
  244 
  245         alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
  246         alive_data->valid = status == IWL_ALIVE_STATUS_OK;
  247 
  248         IWL_DEBUG_FW(mvm,
  249                      "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
  250                      status, lmac1->ver_type, lmac1->ver_subtype);
  251 
  252         if (lmac2)
  253                 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
  254 
  255         IWL_DEBUG_FW(mvm,
  256                      "UMAC version: Major - 0x%x, Minor - 0x%x\n",
  257                      le32_to_cpu(umac->umac_major),
  258                      le32_to_cpu(umac->umac_minor));
  259 
  260         iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);
  261 
  262         return true;
  263 }
  264 
  265 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
  266                                    struct iwl_rx_packet *pkt, void *data)
  267 {
  268         WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
  269 
  270         return true;
  271 }
  272 
  273 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
  274                                   struct iwl_rx_packet *pkt, void *data)
  275 {
  276         struct iwl_phy_db *phy_db = data;
  277 
  278         if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
  279                 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
  280                 return true;
  281         }
  282 
  283         WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
  284 
  285         return false;
  286 }
  287 
  288 static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
  289 {
  290         struct iwl_trans *trans = mvm->trans;
  291         enum iwl_device_family device_family = trans->trans_cfg->device_family;
  292 
  293         if (device_family < IWL_DEVICE_FAMILY_8000)
  294                 return;
  295 
  296         if (device_family <= IWL_DEVICE_FAMILY_9000)
  297                 IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n",
  298                         iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION));
  299         else
  300                 IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n",
  301                         iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION));
  302 
  303         IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n",
  304                 iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE));
  305 
  306 }
  307 
  308 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
  309                                          enum iwl_ucode_type ucode_type)
  310 {
  311         struct iwl_notification_wait alive_wait;
  312         struct iwl_mvm_alive_data alive_data = {};
  313         const struct fw_img *fw;
  314         int ret;
  315         enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
  316         static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
  317         bool run_in_rfkill =
  318                 ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
  319 
  320         if (ucode_type == IWL_UCODE_REGULAR &&
  321             iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
  322             !(fw_has_capa(&mvm->fw->ucode_capa,
  323                           IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
  324                 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
  325         else
  326                 fw = iwl_get_ucode_image(mvm->fw, ucode_type);
  327         if (WARN_ON(!fw))
  328                 return -EINVAL;
  329         iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
  330         clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
  331 
  332         iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
  333                                    alive_cmd, ARRAY_SIZE(alive_cmd),
  334                                    iwl_alive_fn, &alive_data);
  335 
  336         /*
  337          * We want to load the INIT firmware even in RFKILL
  338          * For the unified firmware case, the ucode_type is not
  339          * INIT, but we still need to run it.
  340          */
  341         ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
  342         if (ret) {
  343                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
  344                 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
  345                 return ret;
  346         }
  347 
  348         /*
  349          * Some things may run in the background now, but we
  350          * just wait for the ALIVE notification here.
  351          */
  352         ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
  353                                     MVM_UCODE_ALIVE_TIMEOUT);
  354         if (ret) {
  355                 struct iwl_trans *trans = mvm->trans;
  356 
  357                 /* SecBoot info */
  358                 if (trans->trans_cfg->device_family >=
  359                                         IWL_DEVICE_FAMILY_22000) {
  360                         IWL_ERR(mvm,
  361                                 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
  362                                 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
  363                                 iwl_read_umac_prph(trans,
  364                                                    UMAG_SB_CPU_2_STATUS));
  365                 } else if (trans->trans_cfg->device_family >=
  366                            IWL_DEVICE_FAMILY_8000) {
  367                         IWL_ERR(mvm,
  368                                 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
  369                                 iwl_read_prph(trans, SB_CPU_1_STATUS),
  370                                 iwl_read_prph(trans, SB_CPU_2_STATUS));
  371                 }
  372 
  373                 iwl_mvm_print_pd_notification(mvm);
  374 
  375                 /* LMAC/UMAC PC info */
  376                 if (trans->trans_cfg->device_family >=
  377                                         IWL_DEVICE_FAMILY_9000) {
  378                         IWL_ERR(mvm, "UMAC PC: 0x%x\n",
  379                                 iwl_read_umac_prph(trans,
  380                                                    UREG_UMAC_CURRENT_PC));
  381                         IWL_ERR(mvm, "LMAC PC: 0x%x\n",
  382                                 iwl_read_umac_prph(trans,
  383                                                    UREG_LMAC1_CURRENT_PC));
  384                         if (iwl_mvm_is_cdb_supported(mvm))
  385                                 IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
  386                                         iwl_read_umac_prph(trans,
  387                                                 UREG_LMAC2_CURRENT_PC));
  388                 }
  389 
  390                 if (ret == -ETIMEDOUT)
  391                         iwl_fw_dbg_error_collect(&mvm->fwrt,
  392                                                  FW_DBG_TRIGGER_ALIVE_TIMEOUT);
  393 
  394                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
  395                 return ret;
  396         }
  397 
  398         if (!alive_data.valid) {
  399                 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
  400                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
  401                 return -EIO;
  402         }
  403 
  404         ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait);
  405         if (ret) {
  406                 IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
  407                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
  408                 return ret;
  409         }
  410 
  411         iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
  412 
  413         /*
  414          * Note: all the queues are enabled as part of the interface
  415          * initialization, but in firmware restart scenarios they
  416          * could be stopped, so wake them up. In firmware restart,
  417          * mac80211 will have the queues stopped as well until the
  418          * reconfiguration completes. During normal startup, they
  419          * will be empty.
  420          */
  421 
  422         memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
  423         /*
  424          * Set a 'fake' TID for the command queue, since we use the
  425          * hweight() of the tid_bitmap as a refcount now. Not that
  426          * we ever even consider the command queue as one we might
  427          * want to reuse, but be safe nevertheless.
  428          */
  429         mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
  430                 BIT(IWL_MAX_TID_COUNT + 2);
  431 
  432         set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
  433 #ifdef CONFIG_IWLWIFI_DEBUGFS
  434         iwl_fw_set_dbg_rec_on(&mvm->fwrt);
  435 #endif
  436 
  437         /*
  438          * All the BSSes in the BSS table include the GP2 in the system
  439          * at the beacon Rx time, this is of course no longer relevant
  440          * since we are resetting the firmware.
  441          * Purge all the BSS table.
  442          */
  443         cfg80211_bss_flush(mvm->hw->wiphy);
  444 
  445         return 0;
  446 }
  447 
  448 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
  449 {
  450         struct iwl_notification_wait init_wait;
  451         struct iwl_nvm_access_complete_cmd nvm_complete = {};
  452         struct iwl_init_extended_cfg_cmd init_cfg = {
  453                 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
  454         };
  455         static const u16 init_complete[] = {
  456                 INIT_COMPLETE_NOTIF,
  457         };
  458         int ret;
  459 
  460         if (mvm->trans->cfg->tx_with_siso_diversity)
  461                 init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
  462 
  463         lockdep_assert_held(&mvm->mutex);
  464 
  465         mvm->rfkill_safe_init_done = false;
  466 
  467         iwl_init_notification_wait(&mvm->notif_wait,
  468                                    &init_wait,
  469                                    init_complete,
  470                                    ARRAY_SIZE(init_complete),
  471                                    iwl_wait_init_complete,
  472                                    NULL);
  473 
  474         iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
  475 
  476         /* Will also start the device */
  477         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
  478         if (ret) {
  479                 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
  480                 goto error;
  481         }
  482         iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
  483                                NULL);
  484 
  485         /* Send init config command to mark that we are sending NVM access
  486          * commands
  487          */
  488         ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
  489                                                 INIT_EXTENDED_CFG_CMD),
  490                                    CMD_SEND_IN_RFKILL,
  491                                    sizeof(init_cfg), &init_cfg);
  492         if (ret) {
  493                 IWL_ERR(mvm, "Failed to run init config command: %d\n",
  494                         ret);
  495                 goto error;
  496         }
  497 
  498         /* Load NVM to NIC if needed */
  499         if (mvm->nvm_file_name) {
  500                 ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
  501                                             mvm->nvm_sections);
  502                 if (ret)
  503                         goto error;
  504                 ret = iwl_mvm_load_nvm_to_nic(mvm);
  505                 if (ret)
  506                         goto error;
  507         }
  508 
  509         if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
  510                 ret = iwl_nvm_init(mvm);
  511                 if (ret) {
  512                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
  513                         goto error;
  514                 }
  515         }
  516 
  517         ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
  518                                                 NVM_ACCESS_COMPLETE),
  519                                    CMD_SEND_IN_RFKILL,
  520                                    sizeof(nvm_complete), &nvm_complete);
  521         if (ret) {
  522                 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
  523                         ret);
  524                 goto error;
  525         }
  526 
  527         /* We wait for the INIT complete notification */
  528         ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
  529                                     MVM_UCODE_ALIVE_TIMEOUT);
  530         if (ret)
  531                 return ret;
  532 
  533         /* Read the NVM only at driver load time, no need to do this twice */
  534         if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
  535                 mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
  536                 if (IS_ERR(mvm->nvm_data)) {
  537                         ret = PTR_ERR(mvm->nvm_data);
  538                         mvm->nvm_data = NULL;
  539                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
  540                         return ret;
  541                 }
  542         }
  543 
  544         mvm->rfkill_safe_init_done = true;
  545 
  546         return 0;
  547 
  548 error:
  549         iwl_remove_notification(&mvm->notif_wait, &init_wait);
  550         return ret;
  551 }
  552 
  553 #ifdef CONFIG_ACPI
  554 static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
  555                                     struct iwl_phy_specific_cfg *phy_filters)
  556 {
  557         /*
  558          * TODO: read specific phy config from BIOS
  559          * ACPI table for this feature has not been defined yet,
  560          * so for now we use hardcoded values.
  561          */
  562 
  563         if (IWL_MVM_PHY_FILTER_CHAIN_A) {
  564                 phy_filters->filter_cfg_chain_a =
  565                         cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A);
  566         }
  567         if (IWL_MVM_PHY_FILTER_CHAIN_B) {
  568                 phy_filters->filter_cfg_chain_b =
  569                         cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B);
  570         }
  571         if (IWL_MVM_PHY_FILTER_CHAIN_C) {
  572                 phy_filters->filter_cfg_chain_c =
  573                         cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C);
  574         }
  575         if (IWL_MVM_PHY_FILTER_CHAIN_D) {
  576                 phy_filters->filter_cfg_chain_d =
  577                         cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D);
  578         }
  579 }
  580 #else /* CONFIG_ACPI */
  581 
  582 static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
  583                                     struct iwl_phy_specific_cfg *phy_filters)
  584 {
  585 }
  586 #endif /* CONFIG_ACPI */
  587 
  588 #if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
  589 static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
  590 {
  591         u8 cmd_ver;
  592         int ret;
  593         struct iwl_host_cmd cmd = {
  594                 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
  595                               SAR_OFFSET_MAPPING_TABLE_CMD),
  596                 .flags = 0,
  597                 .data[0] = &mvm->fwrt.sgom_table,
  598                 .len[0] =  sizeof(mvm->fwrt.sgom_table),
  599                 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
  600         };
  601 
  602         if (!mvm->fwrt.sgom_enabled) {
  603                 IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n");
  604                 return 0;
  605         }
  606 
  607         cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
  608                                         IWL_FW_CMD_VER_UNKNOWN);
  609 
  610         if (cmd_ver != 2) {
  611                 IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n",
  612                                 cmd_ver);
  613                 return 0;
  614         }
  615 
  616         ret = iwl_mvm_send_cmd(mvm, &cmd);
  617         if (ret < 0)
  618                 IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
  619 
  620         return ret;
  621 }
  622 #else
  623 
  624 static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
  625 {
  626         return 0;
  627 }
  628 #endif
  629 
  630 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
  631 {
  632         u32 cmd_id = PHY_CONFIGURATION_CMD;
  633         struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
  634         enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
  635         struct iwl_phy_specific_cfg phy_filters = {};
  636         u8 cmd_ver;
  637         size_t cmd_size;
  638 
  639         if (iwl_mvm_has_unified_ucode(mvm) &&
  640             !mvm->trans->cfg->tx_with_siso_diversity)
  641                 return 0;
  642 
  643         if (mvm->trans->cfg->tx_with_siso_diversity) {
  644                 /*
  645                  * TODO: currently we don't set the antenna but letting the NIC
  646                  * to decide which antenna to use. This should come from BIOS.
  647                  */
  648                 phy_cfg_cmd.phy_cfg =
  649                         cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
  650         }
  651 
  652         /* Set parameters */
  653         phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
  654 
  655         /* set flags extra PHY configuration flags from the device's cfg */
  656         phy_cfg_cmd.phy_cfg |=
  657                 cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
  658 
  659         phy_cfg_cmd.calib_control.event_trigger =
  660                 mvm->fw->default_calib[ucode_type].event_trigger;
  661         phy_cfg_cmd.calib_control.flow_trigger =
  662                 mvm->fw->default_calib[ucode_type].flow_trigger;
  663 
  664         cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
  665                                         IWL_FW_CMD_VER_UNKNOWN);
  666         if (cmd_ver == 3) {
  667                 iwl_mvm_phy_filter_init(mvm, &phy_filters);
  668                 memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters,
  669                        sizeof(struct iwl_phy_specific_cfg));
  670         }
  671 
  672         IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
  673                        phy_cfg_cmd.phy_cfg);
  674         cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
  675                                     sizeof(struct iwl_phy_cfg_cmd_v1);
  676         return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
  677 }
  678 
  679 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
  680 {
  681         struct iwl_notification_wait calib_wait;
  682         static const u16 init_complete[] = {
  683                 INIT_COMPLETE_NOTIF,
  684                 CALIB_RES_NOTIF_PHY_DB
  685         };
  686         int ret;
  687 
  688         if (iwl_mvm_has_unified_ucode(mvm))
  689                 return iwl_run_unified_mvm_ucode(mvm);
  690 
  691         lockdep_assert_held(&mvm->mutex);
  692 
  693         mvm->rfkill_safe_init_done = false;
  694 
  695         iwl_init_notification_wait(&mvm->notif_wait,
  696                                    &calib_wait,
  697                                    init_complete,
  698                                    ARRAY_SIZE(init_complete),
  699                                    iwl_wait_phy_db_entry,
  700                                    mvm->phy_db);
  701 
  702         iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
  703 
  704         /* Will also start the device */
  705         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
  706         if (ret) {
  707                 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
  708                 goto remove_notif;
  709         }
  710 
  711         if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
  712                 ret = iwl_mvm_send_bt_init_conf(mvm);
  713                 if (ret)
  714                         goto remove_notif;
  715         }
  716 
  717         /* Read the NVM only at driver load time, no need to do this twice */
  718         if (!mvm->nvm_data) {
  719                 ret = iwl_nvm_init(mvm);
  720                 if (ret) {
  721                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
  722                         goto remove_notif;
  723                 }
  724         }
  725 
  726         /* In case we read the NVM from external file, load it to the NIC */
  727         if (mvm->nvm_file_name) {
  728                 ret = iwl_mvm_load_nvm_to_nic(mvm);
  729                 if (ret)
  730                         goto remove_notif;
  731         }
  732 
  733         WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
  734                   "Too old NVM version (0x%0x, required = 0x%0x)",
  735                   mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
  736 
  737         /*
  738          * abort after reading the nvm in case RF Kill is on, we will complete
  739          * the init seq later when RF kill will switch to off
  740          */
  741         if (iwl_mvm_is_radio_hw_killed(mvm)) {
  742                 IWL_DEBUG_RF_KILL(mvm,
  743                                   "jump over all phy activities due to RF kill\n");
  744                 goto remove_notif;
  745         }
  746 
  747         mvm->rfkill_safe_init_done = true;
  748 
  749         /* Send TX valid antennas before triggering calibrations */
  750         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
  751         if (ret)
  752                 goto remove_notif;
  753 
  754         ret = iwl_send_phy_cfg_cmd(mvm);
  755         if (ret) {
  756                 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
  757                         ret);
  758                 goto remove_notif;
  759         }
  760 
  761         /*
  762          * Some things may run in the background now, but we
  763          * just wait for the calibration complete notification.
  764          */
  765         ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
  766                                     MVM_UCODE_CALIB_TIMEOUT);
  767         if (!ret)
  768                 goto out;
  769 
  770         if (iwl_mvm_is_radio_hw_killed(mvm)) {
  771                 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
  772                 ret = 0;
  773         } else {
  774                 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
  775                         ret);
  776         }
  777 
  778         goto out;
  779 
  780 remove_notif:
  781         iwl_remove_notification(&mvm->notif_wait, &calib_wait);
  782 out:
  783         mvm->rfkill_safe_init_done = false;
  784         if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
  785                 /* we want to debug INIT and we have no NVM - fake */
  786                 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
  787                                         sizeof(struct ieee80211_channel) +
  788                                         sizeof(struct ieee80211_rate),
  789                                         GFP_KERNEL);
  790                 if (!mvm->nvm_data)
  791                         return -ENOMEM;
  792                 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
  793                 mvm->nvm_data->bands[0].n_channels = 1;
  794                 mvm->nvm_data->bands[0].n_bitrates = 1;
  795                 mvm->nvm_data->bands[0].bitrates =
  796                         (void *)(mvm->nvm_data->channels + 1);
  797                 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
  798         }
  799 
  800         return ret;
  801 }
  802 
  803 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
  804 {
  805         struct iwl_ltr_config_cmd cmd = {
  806                 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
  807         };
  808 
  809         if (!mvm->trans->ltr_enabled)
  810                 return 0;
  811 
  812         return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
  813                                     sizeof(cmd), &cmd);
  814 }
  815 
  816 #ifdef CONFIG_ACPI
  817 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
  818 {
  819         u32 cmd_id = REDUCE_TX_POWER_CMD;
  820         struct iwl_dev_tx_power_cmd cmd = {
  821                 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
  822         };
  823         __le16 *per_chain;
  824         int ret;
  825         u16 len = 0;
  826         u32 n_subbands;
  827         u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
  828                                            IWL_FW_CMD_VER_UNKNOWN);
  829         if (cmd_ver == 7) {
  830                 len = sizeof(cmd.v7);
  831                 n_subbands = IWL_NUM_SUB_BANDS_V2;
  832                 per_chain = cmd.v7.per_chain[0][0];
  833                 cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
  834         } else if (cmd_ver == 6) {
  835                 len = sizeof(cmd.v6);
  836                 n_subbands = IWL_NUM_SUB_BANDS_V2;
  837                 per_chain = cmd.v6.per_chain[0][0];
  838         } else if (fw_has_api(&mvm->fw->ucode_capa,
  839                               IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
  840                 len = sizeof(cmd.v5);
  841                 n_subbands = IWL_NUM_SUB_BANDS_V1;
  842                 per_chain = cmd.v5.per_chain[0][0];
  843         } else if (fw_has_capa(&mvm->fw->ucode_capa,
  844                                IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
  845                 len = sizeof(cmd.v4);
  846                 n_subbands = IWL_NUM_SUB_BANDS_V1;
  847                 per_chain = cmd.v4.per_chain[0][0];
  848         } else {
  849                 len = sizeof(cmd.v3);
  850                 n_subbands = IWL_NUM_SUB_BANDS_V1;
  851                 per_chain = cmd.v3.per_chain[0][0];
  852         }
  853 
  854         /* all structs have the same common part, add it */
  855         len += sizeof(cmd.common);
  856 
  857         ret = iwl_sar_select_profile(&mvm->fwrt, per_chain,
  858                                      IWL_NUM_CHAIN_TABLES,
  859                                      n_subbands, prof_a, prof_b);
  860 
  861         /* return on error or if the profile is disabled (positive number) */
  862         if (ret)
  863                 return ret;
  864 
  865         iwl_mei_set_power_limit(per_chain);
  866 
  867         IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
  868         return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
  869 }
  870 
  871 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
  872 {
  873         union iwl_geo_tx_power_profiles_cmd geo_tx_cmd;
  874         struct iwl_geo_tx_power_profiles_resp *resp;
  875         u16 len;
  876         int ret;
  877         struct iwl_host_cmd cmd = {
  878                 .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
  879                 .flags = CMD_WANT_SKB,
  880                 .data = { &geo_tx_cmd },
  881         };
  882         u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
  883                                            IWL_FW_CMD_VER_UNKNOWN);
  884 
  885         /* the ops field is at the same spot for all versions, so set in v1 */
  886         geo_tx_cmd.v1.ops =
  887                 cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
  888 
  889         if (cmd_ver == 5)
  890                 len = sizeof(geo_tx_cmd.v5);
  891         else if (cmd_ver == 4)
  892                 len = sizeof(geo_tx_cmd.v4);
  893         else if (cmd_ver == 3)
  894                 len = sizeof(geo_tx_cmd.v3);
  895         else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
  896                             IWL_UCODE_TLV_API_SAR_TABLE_VER))
  897                 len = sizeof(geo_tx_cmd.v2);
  898         else
  899                 len = sizeof(geo_tx_cmd.v1);
  900 
  901         if (!iwl_sar_geo_support(&mvm->fwrt))
  902                 return -EOPNOTSUPP;
  903 
  904         cmd.len[0] = len;
  905 
  906         ret = iwl_mvm_send_cmd(mvm, &cmd);
  907         if (ret) {
  908                 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
  909                 return ret;
  910         }
  911 
  912         resp = (void *)cmd.resp_pkt->data;
  913         ret = le32_to_cpu(resp->profile_idx);
  914 
  915         if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3))
  916                 ret = -EIO;
  917 
  918         iwl_free_resp(&cmd);
  919         return ret;
  920 }
  921 
  922 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
  923 {
  924         u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
  925         union iwl_geo_tx_power_profiles_cmd cmd;
  926         u16 len;
  927         u32 n_bands;
  928         u32 n_profiles;
  929         u32 sk = 0;
  930         int ret;
  931         u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
  932                                            IWL_FW_CMD_VER_UNKNOWN);
  933 
  934         BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
  935                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) ||
  936                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) !=
  937                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) ||
  938                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) !=
  939                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) ||
  940                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
  941                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
  942 
  943         /* the ops field is at the same spot for all versions, so set in v1 */
  944         cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
  945 
  946         if (cmd_ver == 5) {
  947                 len = sizeof(cmd.v5);
  948                 n_bands = ARRAY_SIZE(cmd.v5.table[0]);
  949                 n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
  950         } else if (cmd_ver == 4) {
  951                 len = sizeof(cmd.v4);
  952                 n_bands = ARRAY_SIZE(cmd.v4.table[0]);
  953                 n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
  954         } else if (cmd_ver == 3) {
  955                 len = sizeof(cmd.v3);
  956                 n_bands = ARRAY_SIZE(cmd.v3.table[0]);
  957                 n_profiles = ACPI_NUM_GEO_PROFILES;
  958         } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
  959                               IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
  960                 len = sizeof(cmd.v2);
  961                 n_bands = ARRAY_SIZE(cmd.v2.table[0]);
  962                 n_profiles = ACPI_NUM_GEO_PROFILES;
  963         } else {
  964                 len = sizeof(cmd.v1);
  965                 n_bands = ARRAY_SIZE(cmd.v1.table[0]);
  966                 n_profiles = ACPI_NUM_GEO_PROFILES;
  967         }
  968 
  969         BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
  970                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) ||
  971                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) !=
  972                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) ||
  973                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) !=
  974                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) ||
  975                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
  976                      offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
  977         /* the table is at the same position for all versions, so set use v1 */
  978         ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0],
  979                                n_bands, n_profiles);
  980 
  981         /*
  982          * It is a valid scenario to not support SAR, or miss wgds table,
  983          * but in that case there is no need to send the command.
  984          */
  985         if (ret)
  986                 return 0;
  987 
  988         /* Only set to South Korea if the table revision is 1 */
  989         if (mvm->fwrt.geo_rev == 1)
  990                 sk = 1;
  991 
  992         /*
  993          * Set the table_revision to South Korea (1) or not (0).  The
  994          * element name is misleading, as it doesn't contain the table
  995          * revision number, but whether the South Korea variation
  996          * should be used.
  997          * This must be done after calling iwl_sar_geo_init().
  998          */
  999         if (cmd_ver == 5)
 1000                 cmd.v5.table_revision = cpu_to_le32(sk);
 1001         else if (cmd_ver == 4)
 1002                 cmd.v4.table_revision = cpu_to_le32(sk);
 1003         else if (cmd_ver == 3)
 1004                 cmd.v3.table_revision = cpu_to_le32(sk);
 1005         else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
 1006                             IWL_UCODE_TLV_API_SAR_TABLE_VER))
 1007                 cmd.v2.table_revision = cpu_to_le32(sk);
 1008 
 1009         return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
 1010 }
 1011 
 1012 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
 1013 {
 1014         union iwl_ppag_table_cmd cmd;
 1015         int ret, cmd_size;
 1016 
 1017         ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
 1018         /* Not supporting PPAG table is a valid scenario */
 1019         if(ret < 0)
 1020                 return 0;
 1021 
 1022         IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
 1023         ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
 1024                                                 PER_PLATFORM_ANT_GAIN_CMD),
 1025                                    0, cmd_size, &cmd);
 1026         if (ret < 0)
 1027                 IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
 1028                         ret);
 1029 
 1030         return ret;
 1031 }
 1032 
 1033 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
 1034 {
 1035         /* no need to read the table, done in INIT stage */
 1036         if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
 1037                 return 0;
 1038 
 1039         return iwl_mvm_ppag_send_cmd(mvm);
 1040 }
 1041 
 1042 static const struct dmi_system_id dmi_tas_approved_list[] = {
 1043         { .ident = "HP",
 1044           .matches = {
 1045                         DMI_MATCH(DMI_SYS_VENDOR, "HP"),
 1046                 },
 1047         },
 1048         { .ident = "SAMSUNG",
 1049           .matches = {
 1050                         DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
 1051                 },
 1052         },
 1053                 { .ident = "LENOVO",
 1054           .matches = {
 1055                         DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"),
 1056                 },
 1057         },
 1058         { .ident = "DELL",
 1059           .matches = {
 1060                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
 1061                 },
 1062         },
 1063 
 1064         /* keep last */
 1065         {}
 1066 };
 1067 
 1068 static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
 1069 {
 1070         int i;
 1071         u32 size = le32_to_cpu(*le_size);
 1072 
 1073         /* Verify that there is room for another country */
 1074         if (size >= IWL_TAS_BLOCK_LIST_MAX)
 1075                 return false;
 1076 
 1077         for (i = 0; i < size; i++) {
 1078                 if (list[i] == cpu_to_le32(mcc))
 1079                         return true;
 1080         }
 1081 
 1082         list[size++] = cpu_to_le32(mcc);
 1083         *le_size = cpu_to_le32(size);
 1084         return true;
 1085 }
 1086 
 1087 static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
 1088 {
 1089         u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
 1090         int ret;
 1091         union iwl_tas_config_cmd cmd = {};
 1092         int cmd_size, fw_ver;
 1093 
 1094         BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
 1095                      APCI_WTAS_BLACK_LIST_MAX);
 1096 
 1097         if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
 1098                 IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
 1099                 return;
 1100         }
 1101 
 1102         fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
 1103                                        IWL_FW_CMD_VER_UNKNOWN);
 1104 
 1105         ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
 1106         if (ret < 0) {
 1107                 IWL_DEBUG_RADIO(mvm,
 1108                                 "TAS table invalid or unavailable. (%d)\n",
 1109                                 ret);
 1110                 return;
 1111         }
 1112 
 1113         if (ret == 0)
 1114                 return;
 1115 
 1116         if (!dmi_check_system(dmi_tas_approved_list)) {
 1117                 IWL_DEBUG_RADIO(mvm,
 1118                                 "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
 1119                                 dmi_get_system_info(DMI_SYS_VENDOR));
 1120                 if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
 1121                                                     &cmd.v4.block_list_size,
 1122                                                         IWL_TAS_US_MCC)) ||
 1123                     (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
 1124                                                     &cmd.v4.block_list_size,
 1125                                                         IWL_TAS_CANADA_MCC))) {
 1126                         IWL_DEBUG_RADIO(mvm,
 1127                                         "Unable to add US/Canada to TAS block list, disabling TAS\n");
 1128                         return;
 1129                 }
 1130         }
 1131 
 1132         /* v4 is the same size as v3, so no need to differentiate here */
 1133         cmd_size = fw_ver < 3 ?
 1134                 sizeof(struct iwl_tas_config_cmd_v2) :
 1135                 sizeof(struct iwl_tas_config_cmd_v3);
 1136 
 1137         ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
 1138         if (ret < 0)
 1139                 IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
 1140 }
 1141 
 1142 static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
 1143 {
 1144         u8 value;
 1145         int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE,
 1146                                       &iwl_rfi_guid, &value);
 1147 
 1148         if (ret < 0) {
 1149                 IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
 1150 
 1151         } else if (value >= DSM_VALUE_RFI_MAX) {
 1152                 IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
 1153                                 value);
 1154 
 1155         } else if (value == DSM_VALUE_RFI_ENABLE) {
 1156                 IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
 1157                 return DSM_VALUE_RFI_ENABLE;
 1158         }
 1159 
 1160         IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
 1161 
 1162         /* default behaviour is disabled */
 1163         return DSM_VALUE_RFI_DISABLE;
 1164 }
 1165 
 1166 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
 1167 {
 1168         int ret;
 1169         u32 value;
 1170         struct iwl_lari_config_change_cmd_v6 cmd = {};
 1171 
 1172         cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
 1173 
 1174         ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT,
 1175                                    &iwl_guid, &value);
 1176         if (!ret)
 1177                 cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
 1178 
 1179         ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
 1180                                    DSM_FUNC_ENABLE_UNII4_CHAN,
 1181                                    &iwl_guid, &value);
 1182         if (!ret)
 1183                 cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
 1184 
 1185         ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
 1186                                    DSM_FUNC_ACTIVATE_CHANNEL,
 1187                                    &iwl_guid, &value);
 1188         if (!ret)
 1189                 cmd.chan_state_active_bitmap = cpu_to_le32(value);
 1190 
 1191         ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
 1192                                    DSM_FUNC_ENABLE_6E,
 1193                                    &iwl_guid, &value);
 1194         if (!ret)
 1195                 cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
 1196 
 1197         ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
 1198                                    DSM_FUNC_FORCE_DISABLE_CHANNELS,
 1199                                    &iwl_guid, &value);
 1200         if (!ret)
 1201                 cmd.force_disable_channels_bitmap = cpu_to_le32(value);
 1202 
 1203         if (cmd.config_bitmap ||
 1204             cmd.oem_uhb_allow_bitmap ||
 1205             cmd.oem_11ax_allow_bitmap ||
 1206             cmd.oem_unii4_allow_bitmap ||
 1207             cmd.chan_state_active_bitmap ||
 1208             cmd.force_disable_channels_bitmap) {
 1209                 size_t cmd_size;
 1210                 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
 1211                                                    WIDE_ID(REGULATORY_AND_NVM_GROUP,
 1212                                                            LARI_CONFIG_CHANGE),
 1213                                                    1);
 1214                 switch (cmd_ver) {
 1215                 case 6:
 1216                         cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
 1217                         break;
 1218                 case 5:
 1219                         cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
 1220                         break;
 1221                 case 4:
 1222                         cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
 1223                         break;
 1224                 case 3:
 1225                         cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
 1226                         break;
 1227                 case 2:
 1228                         cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
 1229                         break;
 1230                 default:
 1231                         cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
 1232                         break;
 1233                 }
 1234 
 1235                 IWL_DEBUG_RADIO(mvm,
 1236                                 "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
 1237                                 le32_to_cpu(cmd.config_bitmap),
 1238                                 le32_to_cpu(cmd.oem_11ax_allow_bitmap));
 1239                 IWL_DEBUG_RADIO(mvm,
 1240                                 "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
 1241                                 le32_to_cpu(cmd.oem_unii4_allow_bitmap),
 1242                                 le32_to_cpu(cmd.chan_state_active_bitmap),
 1243                                 cmd_ver);
 1244                 IWL_DEBUG_RADIO(mvm,
 1245                                 "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
 1246                                 le32_to_cpu(cmd.oem_uhb_allow_bitmap),
 1247                                 le32_to_cpu(cmd.force_disable_channels_bitmap));
 1248                 ret = iwl_mvm_send_cmd_pdu(mvm,
 1249                                            WIDE_ID(REGULATORY_AND_NVM_GROUP,
 1250                                                    LARI_CONFIG_CHANGE),
 1251                                            0, cmd_size, &cmd);
 1252                 if (ret < 0)
 1253                         IWL_DEBUG_RADIO(mvm,
 1254                                         "Failed to send LARI_CONFIG_CHANGE (%d)\n",
 1255                                         ret);
 1256         }
 1257 }
 1258 
 1259 void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
 1260 {
 1261         int ret;
 1262 
 1263         /* read PPAG table */
 1264         ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
 1265         if (ret < 0) {
 1266                 IWL_DEBUG_RADIO(mvm,
 1267                                 "PPAG BIOS table invalid or unavailable. (%d)\n",
 1268                                 ret);
 1269         }
 1270 
 1271         /* read SAR tables */
 1272         ret = iwl_sar_get_wrds_table(&mvm->fwrt);
 1273         if (ret < 0) {
 1274                 IWL_DEBUG_RADIO(mvm,
 1275                                 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
 1276                                 ret);
 1277                 /*
 1278                  * If not available, don't fail and don't bother with EWRD and
 1279                  * WGDS */
 1280 
 1281                 if (!iwl_sar_get_wgds_table(&mvm->fwrt)) {
 1282                         /*
 1283                          * If basic SAR is not available, we check for WGDS,
 1284                          * which should *not* be available either.  If it is
 1285                          * available, issue an error, because we can't use SAR
 1286                          * Geo without basic SAR.
 1287                          */
 1288                         IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
 1289                 }
 1290 
 1291         } else {
 1292                 ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
 1293                 /* if EWRD is not available, we can still use
 1294                 * WRDS, so don't fail */
 1295                 if (ret < 0)
 1296                         IWL_DEBUG_RADIO(mvm,
 1297                                         "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
 1298                                         ret);
 1299 
 1300                 /* read geo SAR table */
 1301                 if (iwl_sar_geo_support(&mvm->fwrt)) {
 1302                         ret = iwl_sar_get_wgds_table(&mvm->fwrt);
 1303                         if (ret < 0)
 1304                                 IWL_DEBUG_RADIO(mvm,
 1305                                                 "Geo SAR BIOS table invalid or unavailable. (%d)\n",
 1306                                                 ret);
 1307                                 /* we don't fail if the table is not available */
 1308                 }
 1309         }
 1310 }
 1311 #else /* CONFIG_ACPI */
 1312 
 1313 inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
 1314                                       int prof_a, int prof_b)
 1315 {
 1316         return 1;
 1317 }
 1318 
 1319 inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 1320 {
 1321         return -ENOENT;
 1322 }
 1323 
 1324 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
 1325 {
 1326         return 0;
 1327 }
 1328 
 1329 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
 1330 {
 1331         return -ENOENT;
 1332 }
 1333 
 1334 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
 1335 {
 1336         return 0;
 1337 }
 1338 
 1339 static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
 1340 {
 1341 }
 1342 
 1343 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
 1344 {
 1345 }
 1346 
 1347 static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
 1348 {
 1349         return DSM_VALUE_RFI_DISABLE;
 1350 }
 1351 
 1352 void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
 1353 {
 1354 }
 1355 
 1356 #endif /* CONFIG_ACPI */
 1357 
 1358 void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
 1359 {
 1360         u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
 1361         int ret;
 1362         u32 resp;
 1363 
 1364         struct iwl_fw_error_recovery_cmd recovery_cmd = {
 1365                 .flags = cpu_to_le32(flags),
 1366                 .buf_size = 0,
 1367         };
 1368         struct iwl_host_cmd host_cmd = {
 1369                 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
 1370                 .flags = CMD_WANT_SKB,
 1371                 .data = {&recovery_cmd, },
 1372                 .len = {sizeof(recovery_cmd), },
 1373         };
 1374 
 1375         /* no error log was defined in TLV */
 1376         if (!error_log_size)
 1377                 return;
 1378 
 1379         if (flags & ERROR_RECOVERY_UPDATE_DB) {
 1380                 /* no buf was allocated while HW reset */
 1381                 if (!mvm->error_recovery_buf)
 1382                         return;
 1383 
 1384                 host_cmd.data[1] = mvm->error_recovery_buf;
 1385                 host_cmd.len[1] =  error_log_size;
 1386                 host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
 1387                 recovery_cmd.buf_size = cpu_to_le32(error_log_size);
 1388         }
 1389 
 1390         ret = iwl_mvm_send_cmd(mvm, &host_cmd);
 1391         kfree(mvm->error_recovery_buf);
 1392         mvm->error_recovery_buf = NULL;
 1393 
 1394         if (ret) {
 1395                 IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
 1396                 return;
 1397         }
 1398 
 1399         /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
 1400         if (flags & ERROR_RECOVERY_UPDATE_DB) {
 1401                 resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
 1402                 if (resp)
 1403                         IWL_ERR(mvm,
 1404                                 "Failed to send recovery cmd blob was invalid %d\n",
 1405                                 resp);
 1406         }
 1407 }
 1408 
 1409 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
 1410 {
 1411         return iwl_mvm_sar_select_profile(mvm, 1, 1);
 1412 }
 1413 
 1414 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
 1415 {
 1416         int ret;
 1417 
 1418         if (iwl_mvm_has_unified_ucode(mvm))
 1419                 return iwl_run_unified_mvm_ucode(mvm);
 1420 
 1421         ret = iwl_run_init_mvm_ucode(mvm);
 1422 
 1423         if (ret) {
 1424                 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
 1425 
 1426                 if (iwlmvm_mod_params.init_dbg)
 1427                         return 0;
 1428                 return ret;
 1429         }
 1430 
 1431         iwl_fw_dbg_stop_sync(&mvm->fwrt);
 1432         iwl_trans_stop_device(mvm->trans);
 1433         ret = iwl_trans_start_hw(mvm->trans);
 1434         if (ret)
 1435                 return ret;
 1436 
 1437         mvm->rfkill_safe_init_done = false;
 1438         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
 1439         if (ret)
 1440                 return ret;
 1441 
 1442         mvm->rfkill_safe_init_done = true;
 1443 
 1444         iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
 1445                                NULL);
 1446 
 1447         return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
 1448 }
 1449 
 1450 int iwl_mvm_up(struct iwl_mvm *mvm)
 1451 {
 1452         int ret, i;
 1453         struct ieee80211_channel *chan;
 1454         struct cfg80211_chan_def chandef;
 1455         struct ieee80211_supported_band *sband = NULL;
 1456 
 1457         lockdep_assert_held(&mvm->mutex);
 1458 
 1459         ret = iwl_trans_start_hw(mvm->trans);
 1460         if (ret)
 1461                 return ret;
 1462 
 1463         ret = iwl_mvm_load_rt_fw(mvm);
 1464         if (ret) {
 1465                 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
 1466                 if (ret != -ERFKILL)
 1467                         iwl_fw_dbg_error_collect(&mvm->fwrt,
 1468                                                  FW_DBG_TRIGGER_DRIVER);
 1469                 goto error;
 1470         }
 1471 
 1472         iwl_get_shared_mem_conf(&mvm->fwrt);
 1473 
 1474         ret = iwl_mvm_sf_update(mvm, NULL, false);
 1475         if (ret)
 1476                 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
 1477 
 1478         if (!iwl_trans_dbg_ini_valid(mvm->trans)) {
 1479                 mvm->fwrt.dump.conf = FW_DBG_INVALID;
 1480                 /* if we have a destination, assume EARLY START */
 1481                 if (mvm->fw->dbg.dest_tlv)
 1482                         mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
 1483                 iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
 1484         }
 1485 
 1486         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
 1487         if (ret)
 1488                 goto error;
 1489 
 1490         if (!iwl_mvm_has_unified_ucode(mvm)) {
 1491                 /* Send phy db control command and then phy db calibration */
 1492                 ret = iwl_send_phy_db_data(mvm->phy_db);
 1493                 if (ret)
 1494                         goto error;
 1495         }
 1496 
 1497         ret = iwl_send_phy_cfg_cmd(mvm);
 1498         if (ret)
 1499                 goto error;
 1500 
 1501         ret = iwl_mvm_send_bt_init_conf(mvm);
 1502         if (ret)
 1503                 goto error;
 1504 
 1505         if (fw_has_capa(&mvm->fw->ucode_capa,
 1506                         IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
 1507                 ret = iwl_set_soc_latency(&mvm->fwrt);
 1508                 if (ret)
 1509                         goto error;
 1510         }
 1511 
 1512         /* Init RSS configuration */
 1513         ret = iwl_configure_rxq(&mvm->fwrt);
 1514         if (ret)
 1515                 goto error;
 1516 
 1517         if (iwl_mvm_has_new_rx_api(mvm)) {
 1518                 ret = iwl_send_rss_cfg_cmd(mvm);
 1519                 if (ret) {
 1520                         IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
 1521                                 ret);
 1522                         goto error;
 1523                 }
 1524         }
 1525 
 1526         /* init the fw <-> mac80211 STA mapping */
 1527         for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
 1528                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
 1529 
 1530         mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
 1531 
 1532         /* reset quota debouncing buffer - 0xff will yield invalid data */
 1533         memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
 1534 
 1535         if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) {
 1536                 ret = iwl_mvm_send_dqa_cmd(mvm);
 1537                 if (ret)
 1538                         goto error;
 1539         }
 1540 
 1541         /*
 1542          * Add auxiliary station for scanning.
 1543          * Newer versions of this command implies that the fw uses
 1544          * internal aux station for all aux activities that don't
 1545          * requires a dedicated data queue.
 1546          */
 1547         if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
 1548                  /*
 1549                   * In old version the aux station uses mac id like other
 1550                   * station and not lmac id
 1551                   */
 1552                 ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
 1553                 if (ret)
 1554                         goto error;
 1555         }
 1556 
 1557         /* Add all the PHY contexts */
 1558         i = 0;
 1559         while (!sband && i < NUM_NL80211_BANDS)
 1560                 sband = mvm->hw->wiphy->bands[i++];
 1561 
 1562         if (WARN_ON_ONCE(!sband)) {
 1563                 ret = -ENODEV;
 1564                 goto error;
 1565         }
 1566 
 1567         chan = &sband->channels[0];
 1568 
 1569         cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
 1570         for (i = 0; i < NUM_PHY_CTX; i++) {
 1571                 /*
 1572                  * The channel used here isn't relevant as it's
 1573                  * going to be overwritten in the other flows.
 1574                  * For now use the first channel we have.
 1575                  */
 1576                 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
 1577                                            &chandef, 1, 1);
 1578                 if (ret)
 1579                         goto error;
 1580         }
 1581 
 1582         if (iwl_mvm_is_tt_in_fw(mvm)) {
 1583                 /* in order to give the responsibility of ct-kill and
 1584                  * TX backoff to FW we need to send empty temperature reporting
 1585                  * cmd during init time
 1586                  */
 1587                 iwl_mvm_send_temp_report_ths_cmd(mvm);
 1588         } else {
 1589                 /* Initialize tx backoffs to the minimal possible */
 1590                 iwl_mvm_tt_tx_backoff(mvm, 0);
 1591         }
 1592 
 1593 #ifdef CONFIG_THERMAL
 1594         /* TODO: read the budget from BIOS / Platform NVM */
 1595 
 1596         /*
 1597          * In case there is no budget from BIOS / Platform NVM the default
 1598          * budget should be 2000mW (cooling state 0).
 1599          */
 1600         if (iwl_mvm_is_ctdp_supported(mvm)) {
 1601                 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
 1602                                            mvm->cooling_dev.cur_state);
 1603                 if (ret)
 1604                         goto error;
 1605         }
 1606 #endif
 1607 
 1608         if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2))
 1609                 WARN_ON(iwl_mvm_config_ltr(mvm));
 1610 
 1611         ret = iwl_mvm_power_update_device(mvm);
 1612         if (ret)
 1613                 goto error;
 1614 
 1615         iwl_mvm_lari_cfg(mvm);
 1616         /*
 1617          * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
 1618          * anyway, so don't init MCC.
 1619          */
 1620         if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
 1621                 ret = iwl_mvm_init_mcc(mvm);
 1622                 if (ret)
 1623                         goto error;
 1624         }
 1625 
 1626         if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
 1627                 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
 1628                 mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
 1629                 ret = iwl_mvm_config_scan(mvm);
 1630                 if (ret)
 1631                         goto error;
 1632         }
 1633 
 1634         if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
 1635                 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
 1636 
 1637         if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
 1638                 IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
 1639 
 1640         ret = iwl_mvm_ppag_init(mvm);
 1641         if (ret)
 1642                 goto error;
 1643 
 1644         ret = iwl_mvm_sar_init(mvm);
 1645         if (ret == 0)
 1646                 ret = iwl_mvm_sar_geo_init(mvm);
 1647         if (ret < 0)
 1648                 goto error;
 1649 
 1650         ret = iwl_mvm_sgom_init(mvm);
 1651         if (ret)
 1652                 goto error;
 1653 
 1654         iwl_mvm_tas_init(mvm);
 1655         iwl_mvm_leds_sync(mvm);
 1656 
 1657         iwl_mvm_ftm_initiator_smooth_config(mvm);
 1658 
 1659         if (fw_has_capa(&mvm->fw->ucode_capa,
 1660                         IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) {
 1661                 if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
 1662                         iwl_rfi_send_config_cmd(mvm, NULL);
 1663         }
 1664 
 1665         IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
 1666         return 0;
 1667  error:
 1668         if (!iwlmvm_mod_params.init_dbg || !ret)
 1669                 iwl_mvm_stop_device(mvm);
 1670         return ret;
 1671 }
 1672 
 1673 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
 1674 {
 1675         int ret, i;
 1676 
 1677         lockdep_assert_held(&mvm->mutex);
 1678 
 1679         ret = iwl_trans_start_hw(mvm->trans);
 1680         if (ret)
 1681                 return ret;
 1682 
 1683         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
 1684         if (ret) {
 1685                 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
 1686                 goto error;
 1687         }
 1688 
 1689         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
 1690         if (ret)
 1691                 goto error;
 1692 
 1693         /* Send phy db control command and then phy db calibration*/
 1694         ret = iwl_send_phy_db_data(mvm->phy_db);
 1695         if (ret)
 1696                 goto error;
 1697 
 1698         ret = iwl_send_phy_cfg_cmd(mvm);
 1699         if (ret)
 1700                 goto error;
 1701 
 1702         /* init the fw <-> mac80211 STA mapping */
 1703         for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
 1704                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
 1705 
 1706         if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
 1707                 /*
 1708                  * Add auxiliary station for scanning.
 1709                  * Newer versions of this command implies that the fw uses
 1710                  * internal aux station for all aux activities that don't
 1711                  * requires a dedicated data queue.
 1712                  * In old version the aux station uses mac id like other
 1713                  * station and not lmac id
 1714                  */
 1715                 ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
 1716                 if (ret)
 1717                         goto error;
 1718         }
 1719 
 1720         return 0;
 1721  error:
 1722         iwl_mvm_stop_device(mvm);
 1723         return ret;
 1724 }
 1725 
 1726 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
 1727                              struct iwl_rx_cmd_buffer *rxb)
 1728 {
 1729         struct iwl_rx_packet *pkt = rxb_addr(rxb);
 1730         struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
 1731 
 1732         IWL_DEBUG_INFO(mvm,
 1733                        "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
 1734                        le32_to_cpu(mfuart_notif->installed_ver),
 1735                        le32_to_cpu(mfuart_notif->external_ver),
 1736                        le32_to_cpu(mfuart_notif->status),
 1737                        le32_to_cpu(mfuart_notif->duration));
 1738 
 1739         if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
 1740                 IWL_DEBUG_INFO(mvm,
 1741                                "MFUART: image size: 0x%08x\n",
 1742                                le32_to_cpu(mfuart_notif->image_size));
 1743 }

Cache object: e49bdd7b5b5c7a484b2cb543f485162c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.