mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-09 10:11:52 -04:00
wifi: iwlwifi: pass full FW info to transport
The code currently passes only the specific image that should be loaded, but then has to pass the IML (image loader) out of band, which is confusing. Pass the full FW data together with desired image type, and use the IML from that. This also cleans up the code in the various sub-drivers a bit as they no longer have to look up and check for the image. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com> Link: https://patch.msgid.link/20250503224231.eac4006e81c5.Iebadc56bb2762e5f4d71f66bb2609d74b33daf11@changeid
This commit is contained in:
committed by
Miri Korenblit
parent
9babfb5f1f
commit
a94d018996
@@ -3,6 +3,7 @@
|
||||
*
|
||||
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright (C) 2025 Intel Corporation
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
@@ -293,15 +294,10 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
||||
{
|
||||
struct iwl_notification_wait alive_wait;
|
||||
struct iwl_alive_data alive_data;
|
||||
const struct fw_img *fw;
|
||||
int ret;
|
||||
enum iwl_ucode_type old_type;
|
||||
static const u16 alive_cmd[] = { REPLY_ALIVE };
|
||||
|
||||
fw = iwl_get_ucode_image(priv->fw, ucode_type);
|
||||
if (WARN_ON(!fw))
|
||||
return -EINVAL;
|
||||
|
||||
old_type = priv->cur_ucode;
|
||||
priv->cur_ucode = ucode_type;
|
||||
priv->ucode_loaded = false;
|
||||
@@ -310,7 +306,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
||||
alive_cmd, ARRAY_SIZE(alive_cmd),
|
||||
iwl_alive_fn, &alive_data);
|
||||
|
||||
ret = iwl_trans_start_fw(priv->trans, fw, false);
|
||||
ret = iwl_trans_start_fw(priv->trans, priv->fw, ucode_type, false);
|
||||
if (ret) {
|
||||
priv->cur_ucode = old_type;
|
||||
iwl_remove_notification(&priv->notif_wait, &alive_wait);
|
||||
|
||||
@@ -324,7 +324,8 @@ struct iwl_context_info_gen3 {
|
||||
} __packed; /* IPC_CONTEXT_INFO_S */
|
||||
|
||||
int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
|
||||
const struct fw_img *fw);
|
||||
const struct iwl_fw *fw,
|
||||
const struct fw_img *img);
|
||||
void iwl_pcie_ctxt_info_gen3_kick(struct iwl_trans *trans);
|
||||
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
|
||||
|
||||
|
||||
@@ -181,7 +181,7 @@ struct iwl_context_info {
|
||||
__le32 reserved3[16];
|
||||
} __packed;
|
||||
|
||||
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
|
||||
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *img);
|
||||
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
|
||||
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
|
||||
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
|
||||
|
||||
@@ -590,21 +590,28 @@ void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_trans_fw_alive);
|
||||
|
||||
int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
|
||||
bool run_in_rfkill)
|
||||
int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
|
||||
enum iwl_ucode_type ucode_type, bool run_in_rfkill)
|
||||
{
|
||||
const struct fw_img *img;
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
|
||||
WARN_ON_ONCE(!trans->rx_mpdu_cmd);
|
||||
|
||||
img = iwl_get_ucode_image(fw, ucode_type);
|
||||
if (!img)
|
||||
return -EINVAL;
|
||||
|
||||
clear_bit(STATUS_FW_ERROR, &trans->status);
|
||||
|
||||
if (trans->trans_cfg->gen2)
|
||||
ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill);
|
||||
ret = iwl_trans_pcie_gen2_start_fw(trans, fw, img,
|
||||
run_in_rfkill);
|
||||
else
|
||||
ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill);
|
||||
ret = iwl_trans_pcie_start_fw(trans, fw, img,
|
||||
run_in_rfkill);
|
||||
|
||||
if (ret == 0)
|
||||
trans->state = IWL_TRANS_FW_STARTED;
|
||||
|
||||
@@ -838,8 +838,6 @@ struct iwl_txq {
|
||||
* @wide_cmd_header: true when ucode supports wide command header format
|
||||
* @num_rx_queues: number of RX queues allocated by the transport;
|
||||
* the transport must set this before calling iwl_drv_start()
|
||||
* @iml_len: the length of the image loader
|
||||
* @iml: a pointer to the image loader itself
|
||||
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
|
||||
* The user should use iwl_trans_{alloc,free}_tx_cmd.
|
||||
* @dev_cmd_pool_name: name for the TX command allocation pool
|
||||
@@ -914,9 +912,6 @@ struct iwl_trans {
|
||||
|
||||
u8 num_rx_queues;
|
||||
|
||||
size_t iml_len;
|
||||
u8 *iml;
|
||||
|
||||
/* The following fields are internal only */
|
||||
struct kmem_cache *dev_cmd_pool;
|
||||
char dev_cmd_pool_name[50];
|
||||
@@ -961,8 +956,8 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans);
|
||||
|
||||
void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr);
|
||||
|
||||
int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
|
||||
bool run_in_rfkill);
|
||||
int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
|
||||
enum iwl_ucode_type ucode_type, bool run_in_rfkill);
|
||||
|
||||
void iwl_trans_stop_device(struct iwl_trans *trans);
|
||||
|
||||
|
||||
@@ -227,8 +227,6 @@ static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld)
|
||||
|
||||
static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
|
||||
{
|
||||
const struct fw_img *fw =
|
||||
iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR);
|
||||
static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
|
||||
struct iwl_notification_wait alive_wait;
|
||||
bool alive_valid = false;
|
||||
@@ -242,7 +240,7 @@ static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
|
||||
|
||||
iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
|
||||
|
||||
ret = iwl_trans_start_fw(mld->trans, fw, true);
|
||||
ret = iwl_trans_start_fw(mld->trans, mld->fw, IWL_UCODE_REGULAR, true);
|
||||
if (ret) {
|
||||
iwl_remove_notification(&mld->notif_wait, &alive_wait);
|
||||
return ret;
|
||||
|
||||
@@ -348,8 +348,6 @@ iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
|
||||
|
||||
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
|
||||
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
|
||||
trans->iml = mld->fw->iml;
|
||||
trans->iml_len = mld->fw->iml_len;
|
||||
trans->wide_cmd_header = true;
|
||||
|
||||
iwl_trans_configure(trans, &trans_cfg);
|
||||
|
||||
@@ -315,7 +315,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
{
|
||||
struct iwl_notification_wait alive_wait;
|
||||
struct iwl_mvm_alive_data alive_data = {};
|
||||
const struct fw_img *fw;
|
||||
int ret;
|
||||
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
|
||||
static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
|
||||
@@ -328,11 +327,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
|
||||
!(fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
|
||||
fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
|
||||
else
|
||||
fw = iwl_get_ucode_image(mvm->fw, ucode_type);
|
||||
if (WARN_ON(!fw))
|
||||
return -EINVAL;
|
||||
ucode_type = IWL_UCODE_REGULAR_USNIFFER;
|
||||
iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
|
||||
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
|
||||
@@ -345,7 +340,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
* For the unified firmware case, the ucode_type is not
|
||||
* INIT, but we still need to run it.
|
||||
*/
|
||||
ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
|
||||
ret = iwl_trans_start_fw(mvm->trans, mvm->fw, ucode_type,
|
||||
run_in_rfkill);
|
||||
if (ret) {
|
||||
iwl_fw_set_current_image(&mvm->fwrt, old_type);
|
||||
iwl_remove_notification(&mvm->notif_wait, &alive_wait);
|
||||
|
||||
@@ -1482,9 +1482,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
|
||||
trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
|
||||
|
||||
trans->iml = mvm->fw->iml;
|
||||
trans->iml_len = mvm->fw->iml_len;
|
||||
|
||||
/* set up notification wait support */
|
||||
iwl_notification_wait_init(&mvm->notif_wait);
|
||||
|
||||
|
||||
@@ -98,7 +98,8 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
|
||||
const struct fw_img *fw)
|
||||
const struct iwl_fw *fw,
|
||||
const struct fw_img *img)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_context_info_gen3 *ctxt_info_gen3;
|
||||
@@ -187,7 +188,7 @@ int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
|
||||
prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step);
|
||||
|
||||
/* allocate ucode sections in dram and set addresses */
|
||||
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram.common);
|
||||
ret = iwl_pcie_init_fw_sec(trans, img, &prph_scratch->dram.common);
|
||||
if (ret)
|
||||
goto err_free_prph_scratch;
|
||||
|
||||
@@ -261,7 +262,8 @@ int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
|
||||
trans_pcie->prph_scratch = prph_scratch;
|
||||
|
||||
/* Allocate IML */
|
||||
trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
|
||||
trans_pcie->iml_len = fw->iml_len;
|
||||
trans_pcie->iml = dma_alloc_coherent(trans->dev, fw->iml_len,
|
||||
&trans_pcie->iml_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!trans_pcie->iml) {
|
||||
@@ -269,7 +271,7 @@ int iwl_pcie_ctxt_info_gen3_alloc(struct iwl_trans *trans,
|
||||
goto err_free_ctxt_info;
|
||||
}
|
||||
|
||||
memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
|
||||
memcpy(trans_pcie->iml, fw->iml, fw->iml_len);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -298,11 +300,9 @@ void iwl_pcie_ctxt_info_gen3_kick(struct iwl_trans *trans)
|
||||
iwl_enable_fw_load_int_ctx_info(trans, trans->do_top_reset);
|
||||
|
||||
/* kick FW self load */
|
||||
iwl_write64(trans, CSR_CTXT_INFO_ADDR,
|
||||
trans_pcie->ctxt_info_dma_addr);
|
||||
iwl_write64(trans, CSR_IML_DATA_ADDR,
|
||||
trans_pcie->iml_dma_addr);
|
||||
iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
|
||||
iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr);
|
||||
iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr);
|
||||
iwl_write32(trans, CSR_IML_SIZE_ADDR, trans_pcie->iml_len);
|
||||
|
||||
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
|
||||
CSR_AUTO_FUNC_BOOT_ENA);
|
||||
@@ -313,9 +313,11 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (trans_pcie->iml) {
|
||||
dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
|
||||
dma_free_coherent(trans->dev, trans_pcie->iml_len,
|
||||
trans_pcie->iml,
|
||||
trans_pcie->iml_dma_addr);
|
||||
trans_pcie->iml_dma_addr = 0;
|
||||
trans_pcie->iml_len = 0;
|
||||
trans_pcie->iml = NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -161,7 +161,7 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
||||
const struct fw_img *fw)
|
||||
const struct fw_img *img)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_context_info *ctxt_info;
|
||||
@@ -223,7 +223,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
||||
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
|
||||
|
||||
/* allocate ucode sections in dram and set addresses */
|
||||
ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
|
||||
ret = iwl_pcie_init_fw_sec(trans, img, &ctxt_info->dram);
|
||||
if (ret) {
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
|
||||
ctxt_info, trans_pcie->ctxt_info_dma_addr);
|
||||
|
||||
@@ -353,6 +353,7 @@ struct iwl_pcie_txqs {
|
||||
* @prph_scratch_dma_addr: dma addr of prph scratch
|
||||
* @ctxt_info_dma_addr: dma addr of context information
|
||||
* @iml: image loader image virtual address
|
||||
* @iml_len: image loader image size
|
||||
* @iml_dma_addr: image loader image DMA address
|
||||
* @trans: pointer to the generic transport area
|
||||
* @scd_base_addr: scheduler sram base address in SRAM
|
||||
@@ -438,6 +439,7 @@ struct iwl_trans_pcie {
|
||||
struct iwl_prph_info *prph_info;
|
||||
struct iwl_prph_scratch *prph_scratch;
|
||||
void *iml;
|
||||
size_t iml_len;
|
||||
dma_addr_t ctxt_info_dma_addr;
|
||||
dma_addr_t prph_info_dma_addr;
|
||||
dma_addr_t prph_scratch_dma_addr;
|
||||
@@ -1135,7 +1137,9 @@ void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
|
||||
/* transport gen 1 exported functions */
|
||||
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
|
||||
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw, bool run_in_rfkill);
|
||||
const struct iwl_fw *fw,
|
||||
const struct fw_img *img,
|
||||
bool run_in_rfkill);
|
||||
void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
|
||||
|
||||
/* common functions that are used by gen2 transport */
|
||||
@@ -1158,7 +1162,9 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
|
||||
|
||||
/* transport gen 2 exported functions */
|
||||
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw, bool run_in_rfkill);
|
||||
const struct iwl_fw *fw,
|
||||
const struct fw_img *img,
|
||||
bool run_in_rfkill);
|
||||
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
|
||||
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd);
|
||||
|
||||
@@ -484,7 +484,9 @@ static void iwl_pcie_spin_for_iml(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw, bool run_in_rfkill)
|
||||
const struct iwl_fw *fw,
|
||||
const struct fw_img *img,
|
||||
bool run_in_rfkill)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
bool hw_rfkill, keep_ram_busy;
|
||||
@@ -553,14 +555,14 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
||||
if (!top_reset_done) {
|
||||
ret = iwl_pcie_ctxt_info_gen3_alloc(trans, fw);
|
||||
ret = iwl_pcie_ctxt_info_gen3_alloc(trans, fw, img);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
iwl_pcie_ctxt_info_gen3_kick(trans);
|
||||
} else {
|
||||
ret = iwl_pcie_ctxt_info_init(trans, fw);
|
||||
ret = iwl_pcie_ctxt_info_init(trans, img);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1337,7 +1337,9 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw, bool run_in_rfkill)
|
||||
const struct iwl_fw *fw,
|
||||
const struct fw_img *img,
|
||||
bool run_in_rfkill)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
bool hw_rfkill;
|
||||
@@ -1409,9 +1411,9 @@ int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||
|
||||
/* Load the given image to the HW */
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
|
||||
ret = iwl_pcie_load_given_ucode_8000(trans, fw);
|
||||
ret = iwl_pcie_load_given_ucode_8000(trans, img);
|
||||
else
|
||||
ret = iwl_pcie_load_given_ucode(trans, fw);
|
||||
ret = iwl_pcie_load_given_ucode(trans, img);
|
||||
|
||||
/* re-check RF-Kill state since we may have missed the interrupt */
|
||||
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
|
||||
|
||||
Reference in New Issue
Block a user