return true;
}
+static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+
+ return true;
+}
+
static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
return false;
}
+static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
+{
+ const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
+ int ret;
+
+ /*
+ * Configure and operate fw paging mechanism.
+ * The driver configures the paging flow only once.
+ * The CPU2 paging image is included in the IWL_UCODE_INIT image.
+ */
+ if (!fw->paging_mem_size)
+ return 0;
+
+ /*
+ * When dma is not enabled, the driver needs to copy / write
+ * the downloaded / uploaded page to / from the smem.
+ * This gets the location of the place were the pages are
+ * stored.
+ */
+ if (!is_device_dma_capable(mvm->trans->dev)) {
+ ret = iwl_trans_get_paging_item(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "failed to get FW paging item\n");
+ return ret;
+ }
+ }
+
+ ret = iwl_save_fw_paging(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to save the FW paging image\n");
+ return ret;
+ }
+
+ ret = iwl_send_paging_cmd(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to send the paging cmd\n");
+ iwl_free_fw_paging(mvm);
+ return ret;
+ }
+
+ return 0;
+}
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
- /*
- * configure and operate fw paging mechanism.
- * driver configures the paging flow only once, CPU2 paging image
- * included in the IWL_UCODE_INIT image.
- */
- if (fw->paging_mem_size) {
- /*
- * When dma is not enabled, the driver needs to copy / write
- * the downloaded / uploaded page to / from the smem.
- * This gets the location of the place were the pages are
- * stored.
- */
- if (!is_device_dma_capable(mvm->trans->dev)) {
- ret = iwl_trans_get_paging_item(mvm);
- if (ret) {
- IWL_ERR(mvm, "failed to get FW paging item\n");
- return ret;
- }
- }
-
- ret = iwl_save_fw_paging(mvm, fw);
- if (ret) {
- IWL_ERR(mvm, "failed to save the FW paging image\n");
- return ret;
- }
-
- ret = iwl_send_paging_cmd(mvm, fw);
- if (ret) {
- IWL_ERR(mvm, "failed to send the paging cmd\n");
- iwl_free_fw_paging(mvm);
- return ret;
- }
- }
-
/*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
return ret;
}
+int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+ struct iwl_notification_wait init_wait;
+ struct iwl_nvm_access_complete_cmd nvm_complete = {};
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &init_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_init_complete,
+ NULL);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ /* TODO: remove when integrating context info */
+ ret = iwl_mvm_init_paging(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to init paging: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (read_nvm) {
+ /* Read nvm */
+ ret = iwl_nvm_init(mvm, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto error;
+ }
+ }
+
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (mvm->nvm_file_name)
+ iwl_mvm_load_nvm_to_nic(mvm);
+
+ ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
+ if (WARN_ON(ret))
+ goto error;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ NVM_ACCESS_COMPLETE), 0,
+ sizeof(nvm_complete), &nvm_complete);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* We wait for the INIT complete notification */
+ return iwl_wait_notification(&mvm->notif_wait, &init_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+
+error:
+ iwl_remove_notification(&mvm->notif_wait, &init_wait);
+ return ret;
+}
+
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
return ret;
}
-int iwl_mvm_up(struct iwl_mvm *mvm)
+static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
{
- int ret, i;
- struct ieee80211_channel *chan;
- struct cfg80211_chan_def chandef;
-
- lockdep_assert_held(&mvm->mutex);
+ int ret;
- ret = iwl_trans_start_hw(mvm->trans);
- if (ret)
- return ret;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_run_unified_mvm_ucode(mvm, false);
- /*
- * If we haven't completed the run of the init ucode during
- * module loading, load init ucode now
- * (for example, if we were in RFKILL)
- */
ret = iwl_run_init_mvm_ucode(mvm, false);
if (iwlmvm_mod_params.init_dbg)
/* this can't happen */
if (WARN_ON(ret > 0))
ret = -ERFKILL;
- goto error;
+ return ret;
}
/*
_iwl_trans_stop_device(mvm->trans, false);
ret = _iwl_trans_start_hw(mvm->trans, false);
if (ret)
- goto error;
+ return ret;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_init_paging(mvm);
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+ int ret, i;
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def chandef;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
goto error;
goto error;
/* Send phy db control command and then phy db calibration*/
- ret = iwl_send_phy_db_data(mvm->phy_db);
- if (ret)
- goto error;
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
- ret = iwl_send_phy_cfg_cmd(mvm);
- if (ret)
- goto error;
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+ }
/* Init RSS configuration */
if (iwl_mvm_has_new_rx_api(mvm)) {
HCMD_NAME(STORED_BEACON_NTF),
};
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
+ HCMD_NAME(NVM_ACCESS_COMPLETE),
+};
+
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
+ [REGULATORY_AND_NVM_GROUP] =
+ HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
};
/* this forward declaration can avoid to export the function */
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
mvm->sf_state = SF_UNINIT;
- mvm->cur_ucode = IWL_UCODE_INIT;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ mvm->cur_ucode = IWL_UCODE_REGULAR;
+ else
+ mvm->cur_ucode = IWL_UCODE_INIT;
mvm->drop_bcn_ap_mode = true;
mutex_init(&mvm->mutex);
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
- err = iwl_run_init_mvm_ucode(mvm, true);
+ if (iwl_mvm_has_new_tx_api(mvm))
+ err = iwl_run_unified_mvm_ucode(mvm, true);
+ else
+ err = iwl_run_init_mvm_ucode(mvm, true);
if (!err || !iwlmvm_mod_params.init_dbg)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_mvm_mac_setup_register(mvm);
if (err)
goto out_free;
+ mvm->hw_registered = true;
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_thermal_initialize(mvm, min_backoff);
out_unregister:
ieee80211_unregister_hw(mvm->hw);
+ mvm->hw_registered = false;
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free:
reprobe->dev = mvm->trans->dev;
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
- } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
+ } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
+ mvm->hw_registered) {
/* don't let the transport/FW power down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);