The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/dev/rtw89/ser.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
    2 /* Copyright(c) 2019-2020  Realtek Corporation
    3  */
    4 
    5 #include <linux/devcoredump.h>
    6 
    7 #include "cam.h"
    8 #include "chan.h"
    9 #include "debug.h"
   10 #include "fw.h"
   11 #include "mac.h"
   12 #include "ps.h"
   13 #include "reg.h"
   14 #include "ser.h"
   15 #include "util.h"
   16 
   17 #define SER_RECFG_TIMEOUT 1000
   18 
   19 enum ser_evt {
   20         SER_EV_NONE,
   21         SER_EV_STATE_IN,
   22         SER_EV_STATE_OUT,
   23         SER_EV_L1_RESET, /* M1 */
   24         SER_EV_DO_RECOVERY, /* M3 */
   25         SER_EV_MAC_RESET_DONE, /* M5 */
   26         SER_EV_L2_RESET,
   27         SER_EV_L2_RECFG_DONE,
   28         SER_EV_L2_RECFG_TIMEOUT,
   29         SER_EV_M3_TIMEOUT,
   30         SER_EV_FW_M5_TIMEOUT,
   31         SER_EV_L0_RESET,
   32         SER_EV_MAXX
   33 };
   34 
   35 enum ser_state {
   36         SER_IDLE_ST,
   37         SER_RESET_TRX_ST,
   38         SER_DO_HCI_ST,
   39         SER_L2_RESET_ST,
   40         SER_ST_MAX_ST
   41 };
   42 
   43 struct ser_msg {
   44         struct list_head list;
   45         u8 event;
   46 };
   47 
   48 struct state_ent {
   49         u8 state;
   50         char *name;
   51         void (*st_func)(struct rtw89_ser *ser, u8 event);
   52 };
   53 
   54 struct event_ent {
   55         u8 event;
   56         char *name;
   57 };
   58 
   59 static char *ser_ev_name(struct rtw89_ser *ser, u8 event)
   60 {
   61         if (event < SER_EV_MAXX)
   62                 return ser->ev_tbl[event].name;
   63 
   64         return "err_ev_name";
   65 }
   66 
   67 static char *ser_st_name(struct rtw89_ser *ser)
   68 {
   69         if (ser->state < SER_ST_MAX_ST)
   70                 return ser->st_tbl[ser->state].name;
   71 
   72         return "err_st_name";
   73 }
   74 
   75 #define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
   76 struct ser_cd_ ## _name { \
   77         u32 type; \
   78         u32 type_size; \
   79         u64 padding; \
   80         u8 data[_size]; \
   81 } __packed; \
   82 static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
   83 { \
   84         p->type = _type; \
   85         p->type_size = sizeof(p->data); \
   86         p->padding = 0x0123456789abcdef; \
   87 }
   88 
   89 enum rtw89_ser_cd_type {
   90         RTW89_SER_CD_FW_RSVD_PLE        = 0,
   91         RTW89_SER_CD_FW_BACKTRACE       = 1,
   92 };
   93 
   94 RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
   95                       RTW89_SER_CD_FW_RSVD_PLE,
   96                       RTW89_FW_RSVD_PLE_SIZE);
   97 
   98 RTW89_DEF_SER_CD_TYPE(fw_backtrace,
   99                       RTW89_SER_CD_FW_BACKTRACE,
  100                       RTW89_FW_BACKTRACE_MAX_SIZE);
  101 
  102 struct rtw89_ser_cd_buffer {
  103         struct ser_cd_fw_rsvd_ple fwple;
  104         struct ser_cd_fw_backtrace fwbt;
  105 } __packed;
  106 
  107 static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
  108 {
  109         struct rtw89_ser_cd_buffer *buf;
  110 
  111         buf = vzalloc(sizeof(*buf));
  112         if (!buf)
  113                 return NULL;
  114 
  115         ser_cd_fw_rsvd_ple_init(&buf->fwple);
  116         ser_cd_fw_backtrace_init(&buf->fwbt);
  117 
  118         return buf;
  119 }
  120 
  121 static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
  122                               struct rtw89_ser_cd_buffer *buf)
  123 {
  124         rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n");
  125 
  126         /* After calling dev_coredump, buf's lifetime is supposed to be
  127          * handled by the device coredump framework. Note that a new dump
  128          * will be discarded if a previous one hasn't been released by
  129          * framework yet.
  130          */
  131         dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL);
  132 }
  133 
  134 static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
  135                               struct rtw89_ser_cd_buffer *buf, bool free_self)
  136 {
  137         if (!free_self)
  138                 return;
  139 
  140         rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n");
  141 
  142         /* When some problems happen during filling data of core dump,
  143          * we won't send it to device coredump framework. Instead, we
  144          * free buf by ourselves.
  145          */
  146         vfree(buf);
  147 }
  148 
  149 static void ser_state_run(struct rtw89_ser *ser, u8 evt)
  150 {
  151         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  152 
  153         rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
  154                     ser_st_name(ser), ser_ev_name(ser, evt));
  155 
  156         mutex_lock(&rtwdev->mutex);
  157         rtw89_leave_lps(rtwdev);
  158         mutex_unlock(&rtwdev->mutex);
  159 
  160         ser->st_tbl[ser->state].st_func(ser, evt);
  161 }
  162 
  163 static void ser_state_goto(struct rtw89_ser *ser, u8 new_state)
  164 {
  165         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  166 
  167         if (ser->state == new_state || new_state >= SER_ST_MAX_ST)
  168                 return;
  169         ser_state_run(ser, SER_EV_STATE_OUT);
  170 
  171         rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n",
  172                     ser_st_name(ser), ser->st_tbl[new_state].name);
  173 
  174         ser->state = new_state;
  175         ser_state_run(ser, SER_EV_STATE_IN);
  176 }
  177 
  178 static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser)
  179 {
  180         struct ser_msg *msg;
  181 
  182         spin_lock_irq(&ser->msg_q_lock);
  183         msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list);
  184         if (msg)
  185                 list_del(&msg->list);
  186         spin_unlock_irq(&ser->msg_q_lock);
  187 
  188         return msg;
  189 }
  190 
  191 static void rtw89_ser_hdl_work(struct work_struct *work)
  192 {
  193         struct ser_msg *msg;
  194         struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
  195                                              ser_hdl_work);
  196 
  197         while ((msg = __rtw89_ser_dequeue_msg(ser))) {
  198                 ser_state_run(ser, msg->event);
  199                 kfree(msg);
  200         }
  201 }
  202 
  203 static int ser_send_msg(struct rtw89_ser *ser, u8 event)
  204 {
  205         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  206         struct ser_msg *msg = NULL;
  207 
  208         if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
  209                 return -EIO;
  210 
  211         msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
  212         if (!msg)
  213                 return -ENOMEM;
  214 
  215         msg->event = event;
  216 
  217         spin_lock_irq(&ser->msg_q_lock);
  218         list_add(&msg->list, &ser->msg_q);
  219         spin_unlock_irq(&ser->msg_q_lock);
  220 
  221         ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
  222         return 0;
  223 }
  224 
  225 static void rtw89_ser_alarm_work(struct work_struct *work)
  226 {
  227         struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
  228                                              ser_alarm_work.work);
  229 
  230         ser_send_msg(ser, ser->alarm_event);
  231         ser->alarm_event = SER_EV_NONE;
  232 }
  233 
  234 static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event)
  235 {
  236         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  237 
  238         if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
  239                 return;
  240 
  241         ser->alarm_event = event;
  242         ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work,
  243                                      msecs_to_jiffies(ms));
  244 }
  245 
  246 static void ser_del_alarm(struct rtw89_ser *ser)
  247 {
  248         cancel_delayed_work(&ser->ser_alarm_work);
  249         ser->alarm_event = SER_EV_NONE;
  250 }
  251 
  252 /* driver function */
  253 static void drv_stop_tx(struct rtw89_ser *ser)
  254 {
  255         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  256 
  257         ieee80211_stop_queues(rtwdev->hw);
  258         set_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
  259 }
  260 
  261 static void drv_stop_rx(struct rtw89_ser *ser)
  262 {
  263         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  264 
  265         clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
  266         set_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
  267 }
  268 
  269 static void drv_trx_reset(struct rtw89_ser *ser)
  270 {
  271         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  272 
  273         rtw89_hci_reset(rtwdev);
  274 }
  275 
  276 static void drv_resume_tx(struct rtw89_ser *ser)
  277 {
  278         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  279 
  280         if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags))
  281                 return;
  282 
  283         ieee80211_wake_queues(rtwdev->hw);
  284         clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
  285 }
  286 
  287 static void drv_resume_rx(struct rtw89_ser *ser)
  288 {
  289         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  290 
  291         if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags))
  292                 return;
  293 
  294         set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
  295         clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
  296 }
  297 
  298 static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  299 {
  300         rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
  301         rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
  302         rtwvif->trigger = false;
  303 }
  304 
  305 static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
  306 {
  307         struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
  308         struct rtw89_dev *rtwdev = rtwvif->rtwdev;
  309         struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  310 
  311         if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
  312                 rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
  313         if (sta->tdls)
  314                 rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
  315 
  316         INIT_LIST_HEAD(&rtwsta->ba_cam_list);
  317 }
  318 
  319 static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  320 {
  321         ieee80211_iterate_stations_atomic(rtwdev->hw,
  322                                           ser_sta_deinit_cam_iter,
  323                                           rtwvif);
  324 
  325         rtw89_cam_deinit(rtwdev, rtwvif);
  326 
  327         bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM);
  328 }
  329 
  330 static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
  331 {
  332         struct rtw89_vif *rtwvif;
  333 
  334         rtw89_cam_reset_keys(rtwdev);
  335         rtw89_for_each_rtwvif(rtwdev, rtwvif)
  336                 ser_deinit_cam(rtwdev, rtwvif);
  337 
  338         rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
  339         rtw89_for_each_rtwvif(rtwdev, rtwvif)
  340                 ser_reset_vif(rtwdev, rtwvif);
  341 }
  342 
  343 /* hal function */
  344 static int hal_enable_dma(struct rtw89_ser *ser)
  345 {
  346         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  347         int ret;
  348 
  349         if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags))
  350                 return 0;
  351 
  352         if (!rtwdev->hci.ops->mac_lv1_rcvy)
  353                 return -EIO;
  354 
  355         ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
  356         if (!ret)
  357                 clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
  358 
  359         return ret;
  360 }
  361 
  362 static int hal_stop_dma(struct rtw89_ser *ser)
  363 {
  364         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  365         int ret;
  366 
  367         if (!rtwdev->hci.ops->mac_lv1_rcvy)
  368                 return -EIO;
  369 
  370         ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
  371         if (!ret)
  372                 set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
  373 
  374         return ret;
  375 }
  376 
  377 static void hal_send_m2_event(struct rtw89_ser *ser)
  378 {
  379         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  380 
  381         rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN);
  382 }
  383 
  384 static void hal_send_m4_event(struct rtw89_ser *ser)
  385 {
  386         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  387 
  388         rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN);
  389 }
  390 
  391 /* state handler */
  392 static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
  393 {
  394         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  395 
  396         switch (evt) {
  397         case SER_EV_STATE_IN:
  398                 rtw89_hci_recovery_complete(rtwdev);
  399                 break;
  400         case SER_EV_L1_RESET:
  401                 ser_state_goto(ser, SER_RESET_TRX_ST);
  402                 break;
  403         case SER_EV_L2_RESET:
  404                 ser_state_goto(ser, SER_L2_RESET_ST);
  405                 break;
  406         case SER_EV_STATE_OUT:
  407                 rtw89_hci_recovery_start(rtwdev);
  408                 break;
  409         default:
  410                 break;
  411         }
  412 }
  413 
  414 static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
  415 {
  416         switch (evt) {
  417         case SER_EV_STATE_IN:
  418                 drv_stop_tx(ser);
  419 
  420                 if (hal_stop_dma(ser)) {
  421                         ser_state_goto(ser, SER_L2_RESET_ST);
  422                         break;
  423                 }
  424 
  425                 drv_stop_rx(ser);
  426                 drv_trx_reset(ser);
  427 
  428                 /* wait m3 */
  429                 hal_send_m2_event(ser);
  430 
  431                 /* set alarm to prevent FW response timeout */
  432                 ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT);
  433                 break;
  434 
  435         case SER_EV_DO_RECOVERY:
  436                 ser_state_goto(ser, SER_DO_HCI_ST);
  437                 break;
  438 
  439         case SER_EV_M3_TIMEOUT:
  440                 ser_state_goto(ser, SER_L2_RESET_ST);
  441                 break;
  442 
  443         case SER_EV_STATE_OUT:
  444                 ser_del_alarm(ser);
  445                 hal_enable_dma(ser);
  446                 drv_resume_rx(ser);
  447                 drv_resume_tx(ser);
  448                 break;
  449 
  450         default:
  451                 break;
  452         }
  453 }
  454 
  455 static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
  456 {
  457         switch (evt) {
  458         case SER_EV_STATE_IN:
  459                 /* wait m5 */
  460                 hal_send_m4_event(ser);
  461 
  462                 /* prevent FW response timeout */
  463                 ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT);
  464                 break;
  465 
  466         case SER_EV_FW_M5_TIMEOUT:
  467                 ser_state_goto(ser, SER_L2_RESET_ST);
  468                 break;
  469 
  470         case SER_EV_MAC_RESET_DONE:
  471                 ser_state_goto(ser, SER_IDLE_ST);
  472                 break;
  473 
  474         case SER_EV_STATE_OUT:
  475                 ser_del_alarm(ser);
  476                 break;
  477 
  478         default:
  479                 break;
  480         }
  481 }
  482 
  483 static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
  484                              u8 sel, u32 start_addr, u32 len)
  485 {
  486         u32 *ptr = (u32 *)buf;
  487         u32 base_addr, start_page, residue;
  488         u32 cnt = 0;
  489         u32 i;
  490 
  491         start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
  492         residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
  493         base_addr = rtw89_mac_mem_base_addrs[sel];
  494         base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
  495 
  496         while (cnt < len) {
  497                 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr);
  498 
  499                 for (i = R_AX_INDIR_ACCESS_ENTRY + residue;
  500                      i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE;
  501                      i += 4, ptr++) {
  502                         *ptr = rtw89_read32(rtwdev, i);
  503                         cnt += 4;
  504                         if (cnt >= len)
  505                                 break;
  506                 }
  507 
  508                 residue = 0;
  509                 base_addr += MAC_MEM_DUMP_PAGE_SIZE;
  510         }
  511 }
  512 
  513 static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
  514 {
  515         u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
  516 
  517         rtw89_debug(rtwdev, RTW89_DBG_SER,
  518                     "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
  519                     start_addr);
  520         ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr,
  521                          RTW89_FW_RSVD_PLE_SIZE);
  522 }
  523 
  524 struct __fw_backtrace_entry {
  525         u32 wcpu_addr;
  526         u32 size;
  527         u32 key;
  528 } __packed;
  529 
  530 struct __fw_backtrace_info {
  531         u32 ra;
  532         u32 sp;
  533 } __packed;
  534 
  535 #if defined(__linux__)
  536 static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
  537 #elif defined(__FreeBSD__)
  538 rtw89_static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
  539 #endif
  540               sizeof(struct __fw_backtrace_info));
  541 
  542 static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
  543                                        const struct __fw_backtrace_entry *ent)
  544 {
  545         struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
  546         u32 fwbt_addr = ent->wcpu_addr - RTW89_WCPU_BASE_ADDR;
  547         u32 fwbt_size = ent->size;
  548         u32 fwbt_key = ent->key;
  549         u32 i;
  550 
  551         if (fwbt_addr == 0) {
  552                 rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
  553                            fwbt_addr);
  554                 return -EINVAL;
  555         }
  556 
  557         if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
  558                 rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
  559                            fwbt_key);
  560                 return -EINVAL;
  561         }
  562 
  563         if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
  564             fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
  565                 rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
  566                            fwbt_size);
  567                 return -EINVAL;
  568         }
  569 
  570         rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n");
  571         rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr);
  572 
  573         for (i = R_AX_INDIR_ACCESS_ENTRY;
  574              i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size;
  575              i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
  576                 *ptr = (struct __fw_backtrace_info){
  577                         .ra = rtw89_read32(rtwdev, i),
  578                         .sp = rtw89_read32(rtwdev, i + 4),
  579                 };
  580                 rtw89_debug(rtwdev, RTW89_DBG_SER,
  581                             "next sp: 0x%x, next ra: 0x%x\n",
  582                             ptr->sp, ptr->ra);
  583         }
  584 
  585         rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n");
  586         return 0;
  587 }
  588 
  589 static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
  590 {
  591         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  592         struct rtw89_ser_cd_buffer *buf;
  593         struct __fw_backtrace_entry fwbt_ent;
  594         int ret = 0;
  595 
  596         buf = rtw89_ser_cd_prep(rtwdev);
  597         if (!buf) {
  598                 ret = -ENOMEM;
  599                 goto bottom;
  600         }
  601 
  602         rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data);
  603 
  604         fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
  605         ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent);
  606         if (ret)
  607                 goto bottom;
  608 
  609         rtw89_ser_cd_send(rtwdev, buf);
  610 
  611 bottom:
  612         rtw89_ser_cd_free(rtwdev, buf, !!ret);
  613 
  614         ser_reset_mac_binding(rtwdev);
  615         rtw89_core_stop(rtwdev);
  616         rtw89_entity_init(rtwdev);
  617         INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
  618 }
  619 
  620 static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
  621 {
  622         struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  623 
  624         switch (evt) {
  625         case SER_EV_STATE_IN:
  626                 mutex_lock(&rtwdev->mutex);
  627                 ser_l2_reset_st_pre_hdl(ser);
  628                 mutex_unlock(&rtwdev->mutex);
  629 
  630                 ieee80211_restart_hw(rtwdev->hw);
  631                 ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
  632                 break;
  633 
  634         case SER_EV_L2_RECFG_TIMEOUT:
  635                 rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n");
  636                 fallthrough;
  637         case SER_EV_L2_RECFG_DONE:
  638                 ser_state_goto(ser, SER_IDLE_ST);
  639                 clear_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
  640                 break;
  641 
  642         case SER_EV_STATE_OUT:
  643                 ser_del_alarm(ser);
  644                 break;
  645 
  646         default:
  647                 break;
  648         }
  649 }
  650 
  651 static const struct event_ent ser_ev_tbl[] = {
  652         {SER_EV_NONE, "SER_EV_NONE"},
  653         {SER_EV_STATE_IN, "SER_EV_STATE_IN"},
  654         {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
  655         {SER_EV_L1_RESET, "SER_EV_L1_RESET"},
  656         {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"},
  657         {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"},
  658         {SER_EV_L2_RESET, "SER_EV_L2_RESET"},
  659         {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"},
  660         {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"},
  661         {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"},
  662         {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"},
  663         {SER_EV_L0_RESET, "SER_EV_L0_RESET"},
  664         {SER_EV_MAXX, "SER_EV_MAX"}
  665 };
  666 
  667 static const struct state_ent ser_st_tbl[] = {
  668         {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
  669         {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
  670         {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
  671         {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl}
  672 };
  673 
  674 int rtw89_ser_init(struct rtw89_dev *rtwdev)
  675 {
  676         struct rtw89_ser *ser = &rtwdev->ser;
  677 
  678         memset(ser, 0, sizeof(*ser));
  679         INIT_LIST_HEAD(&ser->msg_q);
  680         ser->state = SER_IDLE_ST;
  681         ser->st_tbl = ser_st_tbl;
  682         ser->ev_tbl = ser_ev_tbl;
  683 
  684         bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS);
  685         spin_lock_init(&ser->msg_q_lock);
  686         INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work);
  687         INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work);
  688         return 0;
  689 }
  690 
  691 int rtw89_ser_deinit(struct rtw89_dev *rtwdev)
  692 {
  693         struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser;
  694 
  695         set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
  696         cancel_delayed_work_sync(&ser->ser_alarm_work);
  697         cancel_work_sync(&ser->ser_hdl_work);
  698         clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
  699         return 0;
  700 }
  701 
  702 void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev)
  703 {
  704         ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE);
  705 }
  706 
  707 int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
  708 {
  709         u8 event = SER_EV_NONE;
  710 
  711         rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
  712 
  713         switch (err) {
  714         case MAC_AX_ERR_L1_ERR_DMAC:
  715         case MAC_AX_ERR_L0_PROMOTE_TO_L1:
  716                 event = SER_EV_L1_RESET; /* M1 */
  717                 break;
  718         case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE:
  719                 event = SER_EV_DO_RECOVERY; /* M3 */
  720                 break;
  721         case MAC_AX_ERR_L1_RESET_RECOVERY_DONE:
  722                 event = SER_EV_MAC_RESET_DONE; /* M5 */
  723                 break;
  724         case MAC_AX_ERR_L0_ERR_CMAC0:
  725         case MAC_AX_ERR_L0_ERR_CMAC1:
  726         case MAC_AX_ERR_L0_RESET_DONE:
  727                 event = SER_EV_L0_RESET;
  728                 break;
  729         default:
  730                 if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 ||
  731                     (err >= MAC_AX_ERR_L2_ERR_AH_DMA &&
  732                      err <= MAC_AX_GET_ERR_MAX))
  733                         event = SER_EV_L2_RESET;
  734                 break;
  735         }
  736 
  737         if (event == SER_EV_NONE) {
  738                 rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
  739                 return -EINVAL;
  740         }
  741 
  742         ser_send_msg(&rtwdev->ser, event);
  743         return 0;
  744 }
  745 EXPORT_SYMBOL(rtw89_ser_notify);

Cache object: e317657a82101ba3f7ca7afe06f1d8a2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.