Merge tag 'ath-next-20250922' of git://git.kernel.org/pub/scm/linux/kernel/git/ath/ath

Jeff Johnson says:
==================
ath.git patches for v6.18

Highlights for some specific drivers include:

ath10k:
Fix connection after GTK rekeying

ath12k:
Fix Issues in REO RX Queue Updates
Handle inactivity STA kickout event

And of course there is the usual set of cleanups and bug fixes across
the entire family of "ath" drivers.
==================

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Johannes Berg
2025-09-23 09:43:20 +02:00
29 changed files with 669 additions and 283 deletions

View File

@@ -27,7 +27,7 @@ static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev,
goto out;
ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low;
ath10k_wmi_gpio_output(ar, led->gpio, ar->leds.gpio_state_pin);
ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, ar->leds.gpio_state_pin);
out:
mutex_unlock(&ar->conf_mutex);
@@ -64,7 +64,6 @@ int ath10k_leds_register(struct ath10k *ar)
snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s",
wiphy_name(ar->hw->wiphy));
ar->leds.wifi_led.active_low = 1;
ar->leds.wifi_led.gpio = ar->hw_params.led_pin;
ar->leds.wifi_led.name = ar->leds.label;
ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP;

View File

@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/bitfield.h>
#include <linux/random.h>
#include "hif.h"
#include "core.h"
@@ -290,8 +291,15 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (cmd == DISABLE_KEY) {
arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
arg.key_data = NULL;
if (flags & WMI_KEY_GROUP) {
/* Not all hardware handles group-key deletion operation
* correctly. Replace the key with a junk value to invalidate it.
*/
get_random_bytes(key->key, key->keylen);
} else {
arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
arg.key_data = NULL;
}
}
return ath10k_wmi_vdev_install_key(arvif->ar, &arg);

View File

@@ -13,7 +13,7 @@
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc/qcom_rproc.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/iommu.h>
#include "ce.h"
@@ -1559,19 +1559,11 @@ static void ath10k_modem_deinit(struct ath10k *ar)
static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
{
struct device *dev = ar->dev;
struct device_node *node;
struct resource r;
int ret;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
if (!ret) {
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,

View File

@@ -1764,33 +1764,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ;
unsigned long time_left, i;
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
if (!time_left) {
/* Sometimes the PCI HIF doesn't receive interrupt
* for the service ready message even if the buffer
* was completed. PCIe sniffer shows that it's
* because the corresponding CE ring doesn't fires
* it. Workaround here by polling CE rings once.
*/
ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
/* Sometimes the PCI HIF doesn't receive interrupt
* for the service ready message even if the buffer
* was completed. PCIe sniffer shows that it's
* because the corresponding CE ring doesn't fires
* it. Workaround here by polling CE rings. Since
* the message could arrive at any time, continue
* polling until timeout.
*/
do {
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(ar, i, 1);
/* The 100 ms granularity is a tradeoff considering scheduler
* overhead and response latency
*/
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
if (!time_left) {
ath10k_warn(ar, "polling timed out\n");
return -ETIMEDOUT;
}
msecs_to_jiffies(100));
if (time_left)
return 0;
} while (time_before(jiffies, timeout));
ath10k_warn(ar, "service ready completion received, continuing normally\n");
}
return 0;
ath10k_warn(ar, "failed to receive service ready completion\n");
return -ETIMEDOUT;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)

View File

@@ -9,8 +9,8 @@
#include <linux/property.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/dma-mapping.h>
#include <linux/of_address.h>
#include <linux/iommu.h>
#include "ahb.h"
#include "debug.h"
@@ -919,16 +919,10 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *dev = ab->dev;
struct device_node *node;
struct resource r;
int ret;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node)
return -ENOENT;
ret = of_address_to_resource(node, 0, &r);
of_node_put(node);
ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
@@ -937,12 +931,7 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
ab_ahb->fw.msa_paddr = r.start;
ab_ahb->fw.msa_size = resource_size(&r);
node = of_parse_phandle(dev->of_node, "memory-region", 1);
if (!node)
return -ENOENT;
ret = of_address_to_resource(node, 0, &r);
of_node_put(node);
ret = of_reserved_mem_region_to_resource(dev->of_node, 1, &r);
if (ret) {
dev_err(dev, "failed to resolve ce fixed region\n");
return ret;

View File

@@ -354,7 +354,8 @@ static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
ath11k_dbg(ab, ATH11K_DBG_CE, "failed to enqueue rx buf: %d\n",
ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);

View File

@@ -2215,14 +2215,10 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
ath11k_dp_free(ab);
ath11k_hal_srng_deinit(ab);
ath11k_hal_srng_clear(ab);
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
ret = ath11k_hal_srng_init(ab);
if (ret)
return ret;
clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
ret = ath11k_core_qmi_firmware_ready(ab);

View File

@@ -4615,7 +4615,6 @@ static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
msdu_details[i].buf_addr_info.info0) == 0) {
msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
msdu_desc_info->info0 |= last;
;
break;
}
msdu_desc_info = &msdu_details[i].rx_msdu_info;

View File

@@ -1386,6 +1386,22 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_hal_srng_deinit);
void ath11k_hal_srng_clear(struct ath11k_base *ab)
{
/* No need to memset rdp and wrp memory since each individual
* segment would get cleared in ath11k_hal_srng_src_hw_init()
* and ath11k_hal_srng_dst_hw_init().
*/
memset(ab->hal.srng_list, 0,
sizeof(ab->hal.srng_list));
memset(ab->hal.shadow_reg_addr, 0,
sizeof(ab->hal.shadow_reg_addr));
ab->hal.avail_blk_resource = 0;
ab->hal.current_blk_index = 0;
ab->hal.num_shadow_reg_configured = 0;
}
EXPORT_SYMBOL(ath11k_hal_srng_clear);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
{
struct hal_srng *srng;

View File

@@ -965,6 +965,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
struct hal_srng_params *params);
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
void ath11k_hal_srng_clear(struct ath11k_base *ab);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
u32 **cfg, u32 *len);

View File

@@ -13,7 +13,7 @@
#include "debug.h"
#include "hif.h"
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/ioport.h>
#include <linux/firmware.h>
#include <linux/of_irq.h>
@@ -2040,23 +2040,14 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
{
struct device *dev = ab->dev;
struct device_node *hremote_node = NULL;
struct resource res;
struct resource res = {};
u32 host_ddr_sz;
int i, idx, ret;
for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
switch (ab->qmi.target_mem[i].type) {
case HOST_DDR_REGION_TYPE:
hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!hremote_node) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"fail to get hremote_node\n");
return -ENODEV;
}
ret = of_address_to_resource(hremote_node, 0, &res);
of_node_put(hremote_node);
ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"fail to get reg from hremote\n");
@@ -2095,7 +2086,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
}
if (ath11k_core_coldboot_cal_support(ab)) {
if (hremote_node) {
if (resource_size(&res)) {
ab->qmi.target_mem[idx].paddr =
res.start + host_ddr_sz;
ab->qmi.target_mem[idx].iaddr =
@@ -2557,7 +2548,7 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
fw->size);
m3_len);
ret = -ENOMEM;
goto out;
}

View File

@@ -392,7 +392,8 @@ static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
ath12k_dbg(ab, ATH12K_DBG_CE, "failed to enqueue rx buf: %d\n",
ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
@@ -478,7 +479,7 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
}
while ((skb = __skb_dequeue(&list))) {
ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
ath12k_dbg(ab, ATH12K_DBG_CE, "rx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->recv_cb(ab, skb);
}

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_CORE_H
@@ -72,6 +72,9 @@
#define ATH12K_MAX_MLO_PEERS 256
#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF
#define ATH12K_INVALID_RSSI_FULL -1
#define ATH12K_INVALID_RSSI_EMPTY -128
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@@ -560,6 +563,7 @@ struct ath12k_link_sta {
u32 bw_prev;
u32 peer_nss;
s8 rssi_beacon;
s8 chain_signal[IEEE80211_MAX_CHAINS];
/* For now the assoc link will be considered primary */
bool is_assoc_link;
@@ -730,6 +734,7 @@ struct ath12k {
u32 txpower_scale;
u32 power_scale;
u32 chan_tx_pwr;
u32 rts_threshold;
u32 num_stations;
u32 max_num_stations;

View File

@@ -26,6 +26,7 @@ enum ath12k_debug_mask {
ATH12K_DBG_DP_TX = 0x00002000,
ATH12K_DBG_DP_RX = 0x00004000,
ATH12K_DBG_WOW = 0x00008000,
ATH12K_DBG_CE = 0x00010000,
ATH12K_DBG_ANY = 0xffffffff,
};

View File

@@ -1745,7 +1745,9 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
INIT_LIST_HEAD(&dp->reo_cmd_list);
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
INIT_LIST_HEAD(&dp->reo_cmd_update_rx_queue_list);
spin_lock_init(&dp->reo_cmd_lock);
spin_lock_init(&dp->reo_rxq_flush_lock);
dp->reo_cmd_cache_flush_count = 0;
dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);

View File

@@ -184,7 +184,7 @@ struct ath12k_pdev_dp {
#define DP_REO_REINJECT_RING_SIZE 32
#define DP_RX_RELEASE_RING_SIZE 1024
#define DP_REO_EXCEPTION_RING_SIZE 128
#define DP_REO_CMD_RING_SIZE 128
#define DP_REO_CMD_RING_SIZE 256
#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
#define DP_RX_MAC_BUF_RING_SIZE 2048
@@ -389,15 +389,19 @@ struct ath12k_dp {
struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list;
struct list_head reo_cmd_update_rx_queue_list;
struct list_head reo_cmd_cache_flush_list;
u32 reo_cmd_cache_flush_count;
/* protects access to below fields,
* - reo_cmd_list
* - reo_cmd_update_rx_queue_list
* - reo_cmd_cache_flush_list
* - reo_cmd_cache_flush_count
*/
spinlock_t reo_rxq_flush_lock;
struct list_head reo_cmd_list;
/* protects access to below fields,
* - reo_cmd_list
*/
spinlock_t reo_cmd_lock;
struct ath12k_hp_update_timer reo_cmd_timer;
struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];

View File

@@ -1440,6 +1440,34 @@ static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
*errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
}
static void
ath12k_parse_cmn_usr_info(const struct hal_phyrx_common_user_info *cmn_usr_info,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
u32 known, data, cp_setting, ltf_size;
known = __le32_to_cpu(eht->known);
known |= IEEE80211_RADIOTAP_EHT_KNOWN_GI |
IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF;
eht->known = cpu_to_le32(known);
cp_setting = le32_get_bits(cmn_usr_info->info0,
HAL_RX_CMN_USR_INFO0_CP_SETTING);
ltf_size = le32_get_bits(cmn_usr_info->info0,
HAL_RX_CMN_USR_INFO0_LTF_SIZE);
data = __le32_to_cpu(eht->data[0]);
data |= u32_encode_bits(cp_setting, IEEE80211_RADIOTAP_EHT_DATA0_GI);
data |= u32_encode_bits(ltf_size, IEEE80211_RADIOTAP_EHT_DATA0_LTF);
eht->data[0] = cpu_to_le32(data);
if (!ppdu_info->ltf_size)
ppdu_info->ltf_size = ltf_size;
if (!ppdu_info->gi)
ppdu_info->gi = cp_setting;
}
static void
ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
const struct hal_rx_msdu_end *msdu_end)
@@ -1627,25 +1655,22 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
info[0] = __le32_to_cpu(rssi->info0);
info[1] = __le32_to_cpu(rssi->info1);
info[2] = __le32_to_cpu(rssi->info2);
/* TODO: Please note that the combined rssi will not be accurate
* in MU case. Rssi in MU needs to be retrieved from
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
u32_get_bits(info[1],
HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB);
u32_get_bits(info[2],
HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU);
ppdu_info->bw = u32_get_bits(info[0],
HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW);
break;
}
case HAL_PHYRX_OTHER_RECEIVE_INFO: {
const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data;
ppdu_info->gi = le32_get_bits(cmn_usr_info->info0,
HAL_RX_PHY_CMN_USER_INFO0_GI);
case HAL_PHYRX_COMMON_USER_INFO: {
ath12k_parse_cmn_usr_info(tlv_data, ppdu_info);
break;
}
case HAL_RX_PPDU_START_USER_INFO:
@@ -2154,8 +2179,12 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
spin_unlock_bh(&ar->data_lock);
rxs->flag |= RX_FLAG_MACTIME_START;
rxs->signal = ppduinfo->rssi_comb + noise_floor;
rxs->nss = ppduinfo->nss + 1;
if (test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
ar->ab->wmi_ab.svc_map))
rxs->signal = ppduinfo->rssi_comb;
else
rxs->signal = ppduinfo->rssi_comb + noise_floor;
if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
rxs->flag |= RX_FLAG_AMPDU_DETAILS;
@@ -2244,6 +2273,7 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
const struct hal_rx_mon_ppdu_info *ppduinfo,
struct ieee80211_rx_status *status,
u8 decap)
{
@@ -2257,7 +2287,6 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
struct ieee80211_sta *pubsta = NULL;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ath12k_dp_rx_info rx_info;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol_tkip = rxcb->is_eapol;
@@ -2271,8 +2300,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
}
spin_lock_bh(&ar->ab->base_lock);
rx_info.addr2_present = false;
peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, &rx_info);
peer = ath12k_peer_find_by_id(ar->ab, ppduinfo->peer_id);
if (peer && peer->sta) {
pubsta = peer->sta;
if (pubsta->valid_links) {
@@ -2365,7 +2393,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
decap = mon_mpdu->decap_format;
ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs, decap);
ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, ppduinfo, rxs, decap);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;

View File

@@ -21,6 +21,9 @@
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
struct ath12k_dp_rx_tid_rxq *rx_tid);
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -581,49 +584,71 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
return 0;
}
static void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq,
struct ath12k_dp_rx_tid *rx_tid)
{
rx_tid_rxq->tid = rx_tid->tid;
rx_tid_rxq->active = rx_tid->active;
rx_tid_rxq->qbuf = rx_tid->qbuf;
}
static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab,
struct ath12k_reoq_buf *tid_qbuf)
{
if (tid_qbuf->vaddr) {
dma_unmap_single(ab->dev, tid_qbuf->paddr_aligned,
tid_qbuf->size, DMA_BIDIRECTIONAL);
kfree(tid_qbuf->vaddr);
tid_qbuf->vaddr = NULL;
}
}
void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue;
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned,
cmd->data.qbuf.size, DMA_BIDIRECTIONAL);
kfree(cmd->data.qbuf.vaddr);
kfree(cmd);
spin_lock_bh(&dp->reo_rxq_flush_lock);
list_for_each_entry_safe(cmd_queue, tmp_queue, &dp->reo_cmd_update_rx_queue_list,
list) {
list_del(&cmd_queue->list);
ath12k_dp_rx_tid_cleanup(ab, &cmd_queue->rx_tid.qbuf);
kfree(cmd_queue);
}
list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned,
cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL);
kfree(cmd_cache->data.qbuf.vaddr);
ath12k_dp_rx_tid_cleanup(ab, &cmd_cache->data.qbuf);
kfree(cmd_cache);
}
spin_unlock_bh(&dp->reo_rxq_flush_lock);
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
ath12k_dp_rx_tid_cleanup(ab, &cmd->data.qbuf);
kfree(cmd);
}
spin_unlock_bh(&dp->reo_cmd_lock);
}
static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
struct ath12k_dp_rx_tid *rx_tid = ctx;
struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
if (status != HAL_REO_CMD_SUCCESS)
ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->qbuf.vaddr);
rx_tid->qbuf.vaddr = NULL;
ath12k_dp_rx_tid_cleanup(dp->ab, &rx_tid->qbuf);
}
static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab,
struct ath12k_dp_rx_tid_rxq *rx_tid,
enum hal_reo_cmd_type type,
struct ath12k_hal_reo_cmd *cmd,
void (*cb)(struct ath12k_dp *dp, void *ctx,
@@ -668,51 +693,95 @@ static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_ti
return 0;
}
static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
struct ath12k_dp_rx_tid *rx_tid)
static int ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
struct ath12k_dp_rx_tid_rxq *rx_tid)
{
struct ath12k_hal_reo_cmd cmd = {};
unsigned long tot_desc_sz, desc_sz;
int ret;
tot_desc_sz = rx_tid->qbuf.size;
desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
while (tot_desc_sz > desc_sz) {
tot_desc_sz -= desc_sz;
cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz);
cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE, &cmd,
NULL);
if (ret)
ath12k_warn(ab,
"failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
rx_tid->tid, ret);
}
memset(&cmd, 0, sizeof(cmd));
cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
/* HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS - all pending MPDUs
*in the bitmap will be forwarded/flushed to REO output rings
*/
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS |
HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS;
/* For all QoS TIDs (except NON_QOS), the driver allocates a maximum
* window size of 1024. In such cases, the driver can issue a single
* 1KB descriptor flush command instead of sending multiple 128-byte
* flush commands for each QoS TID, improving efficiency.
*/
if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID)
cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC;
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE,
&cmd, ath12k_dp_reo_cmd_free);
if (ret) {
ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->qbuf.vaddr);
rx_tid->qbuf.vaddr = NULL;
return ret;
}
static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
if (peer_id & ATH12K_PEER_ML_ID_VALID) {
peer_id &= ~ATH12K_PEER_ML_ID_VALID;
ml_peer = true;
}
if (ml_peer)
qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
else
qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
u32_encode_bits(tid, DP_REO_QREF_NUM);
}
static void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp)
{
struct ath12k_base *ab = dp->ab;
struct dp_reo_update_rx_queue_elem *elem, *tmp;
spin_lock_bh(&dp->reo_rxq_flush_lock);
list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_update_rx_queue_list, list) {
if (elem->rx_tid.active)
continue;
if (ath12k_dp_rx_tid_delete_handler(ab, &elem->rx_tid))
break;
ath12k_peer_rx_tid_qref_reset(ab,
elem->is_ml_peer ? elem->ml_peer_id :
elem->peer_id,
elem->rx_tid.tid);
if (ab->hw_params->reoq_lut_support)
ath12k_hal_reo_shared_qaddr_cache_clear(ab);
list_del(&elem->list);
kfree(elem);
}
spin_unlock_bh(&dp->reo_rxq_flush_lock);
}
static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
struct ath12k_base *ab = dp->ab;
struct ath12k_dp_rx_tid *rx_tid = ctx;
struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
if (status == HAL_REO_CMD_DRAIN) {
@@ -724,6 +793,13 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
return;
}
/* Retry the HAL_REO_CMD_UPDATE_RX_QUEUE command for entries
* in the pending queue list marked TID as inactive
*/
spin_lock_bh(&dp->ab->base_lock);
ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
spin_unlock_bh(&dp->ab->base_lock);
elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
if (!elem)
goto free_desc;
@@ -731,7 +807,7 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
elem->ts = jiffies;
memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
spin_lock_bh(&dp->reo_cmd_lock);
spin_lock_bh(&dp->reo_rxq_flush_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
dp->reo_cmd_cache_flush_count++;
@@ -741,32 +817,44 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
time_after(jiffies, elem->ts +
msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
/* The reo_cmd_cache_flush_list is used in only two contexts,
* one is in this function called from napi and the
* other in ath12k_dp_free during core destroy.
* If cache command sent is success, delete the element in
* the cache list. ath12k_dp_rx_reo_cmd_list_cleanup
* will be called during core destroy.
*/
if (ath12k_dp_reo_cache_flush(ab, &elem->data))
break;
list_del(&elem->list);
dp->reo_cmd_cache_flush_count--;
/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
* within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
* is used in only two contexts, one is in this function called
* from napi and the other in ath12k_dp_free during core destroy.
* Before dp_free, the irqs would be disabled and would wait to
* synchronize. Hence there wouldnt be any race against add or
* delete to this list. Hence unlock-lock is safe here.
*/
spin_unlock_bh(&dp->reo_cmd_lock);
ath12k_dp_reo_cache_flush(ab, &elem->data);
kfree(elem);
spin_lock_bh(&dp->reo_cmd_lock);
}
}
spin_unlock_bh(&dp->reo_cmd_lock);
spin_unlock_bh(&dp->reo_rxq_flush_lock);
return;
free_desc:
dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->qbuf.vaddr);
rx_tid->qbuf.vaddr = NULL;
ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
}
static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
struct ath12k_dp_rx_tid_rxq *rx_tid)
{
struct ath12k_hal_reo_cmd cmd = {};
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
/* Observed flush cache failure, to avoid that set vld bit during delete */
cmd.upd1 |= HAL_REO_CMD_UPD1_VLD;
return ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
ath12k_dp_rx_tid_del_func);
}
static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
@@ -799,64 +887,38 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
ath12k_hal_reo_shared_qaddr_cache_clear(ab);
}
static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
static void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid)
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
bool ml_peer = false;
struct dp_reo_update_rx_queue_elem *elem;
struct ath12k_dp_rx_tid_rxq *rx_tid;
if (!ab->hw_params->reoq_lut_support)
return;
if (peer_id & ATH12K_PEER_ML_ID_VALID) {
peer_id &= ~ATH12K_PEER_ML_ID_VALID;
ml_peer = true;
spin_lock_bh(&dp->reo_rxq_flush_lock);
list_for_each_entry(elem, &dp->reo_cmd_update_rx_queue_list, list) {
if (elem->peer_id == peer_id) {
rx_tid = &elem->rx_tid;
if (rx_tid->tid == tid) {
rx_tid->active = false;
break;
}
}
}
if (ml_peer)
qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
else
qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
u32_encode_bits(tid, DP_REO_QREF_NUM);
spin_unlock_bh(&dp->reo_rxq_flush_lock);
}
void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
struct ath12k_peer *peer, u8 tid)
{
struct ath12k_hal_reo_cmd cmd = {};
struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
int ret;
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
if (!rx_tid->active)
return;
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
ath12k_dp_rx_tid_del_func);
if (ret) {
ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned,
rx_tid->qbuf.size, DMA_BIDIRECTIONAL);
kfree(rx_tid->qbuf.vaddr);
rx_tid->qbuf.vaddr = NULL;
}
if (peer->mlo)
ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
else
ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
rx_tid->active = false;
ath12k_dp_mark_tid_as_inactive(dp, peer->peer_id, tid);
ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
}
int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
@@ -941,9 +1003,12 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
{
struct ath12k_hal_reo_cmd cmd = {};
int ret;
struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid);
cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
cmd.ba_window_size = ba_win_sz;
@@ -953,12 +1018,12 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
}
ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
ret = ath12k_dp_reo_cmd_send(ar->ab, &rx_tid_rxq,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
NULL);
if (ret) {
ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
rx_tid->tid, ret);
rx_tid_rxq.tid, ret);
return ret;
}
@@ -1018,6 +1083,29 @@ static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
return 0;
}
static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp,
struct ath12k_peer *peer,
struct ath12k_dp_rx_tid *rx_tid)
{
struct dp_reo_update_rx_queue_elem *elem;
elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
if (!elem)
return -ENOMEM;
elem->peer_id = peer->peer_id;
elem->is_ml_peer = peer->mlo;
elem->ml_peer_id = peer->ml_id;
ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid);
spin_lock_bh(&dp->reo_rxq_flush_lock);
list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list);
spin_unlock_bh(&dp->reo_rxq_flush_lock);
return 0;
}
int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type)
@@ -1098,6 +1186,19 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return ret;
}
/* Pre-allocate the update_rxq_list for the corresponding tid
* This will be used during the tid delete. The reason we are not
* allocating during tid delete is that, if any alloc fail in update_rxq_list
* we may not be able to delete the tid vaddr/paddr and may lead to leak
*/
ret = ath12k_dp_prepare_reo_update_elem(dp, peer, rx_tid);
if (ret) {
ath12k_warn(ab, "failed to alloc update_rxq_list for rx tid %u\n", tid);
ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
spin_unlock_bh(&ab->base_lock);
return ret;
}
paddr_aligned = rx_tid->qbuf.paddr_aligned;
if (ab->hw_params->reoq_lut_support) {
/* Update the REO queue LUT at the corresponding peer id
@@ -1207,6 +1308,7 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
struct ath12k_hal_reo_cmd cmd = {};
struct ath12k_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
u8 tid;
int ret = 0;
@@ -1253,9 +1355,11 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->active)
continue;
cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid);
cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
ret = ath12k_dp_reo_cmd_send(ab, &rx_tid_rxq,
HAL_REO_CMD_UPDATE_RX_QUEUE,
&cmd, NULL);
if (ret) {
@@ -2533,6 +2637,8 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
channel_num = meta_data;
center_freq = meta_data >> 16;
rx_status->band = NUM_NL80211_BANDS;
if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
center_freq <= ATH12K_MAX_6GHZ_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
@@ -2541,21 +2647,33 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
rx_status->band = NL80211_BAND_5GHZ;
} else {
}
if (unlikely(rx_status->band == NUM_NL80211_BANDS ||
!ath12k_ar_to_hw(ar)->wiphy->bands[rx_status->band])) {
ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
rx_status->band, channel_num, center_freq, ar->pdev_idx);
spin_lock_bh(&ar->data_lock);
channel = ar->rx_channel;
if (channel) {
rx_status->band = channel->band;
channel_num =
ieee80211_frequency_to_channel(channel->center_freq);
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
rx_status->band);
} else {
ath12k_err(ar->ab, "unable to determine channel, band for rx packet");
}
spin_unlock_bh(&ar->data_lock);
goto h_rate;
}
if (rx_status->band != NL80211_BAND_6GHZ)
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
rx_status->band);
h_rate:
ath12k_dp_rx_h_rate(ar, rx_info);
}

View File

@@ -31,15 +31,29 @@ struct ath12k_dp_rx_tid {
struct ath12k_base *ab;
};
struct ath12k_dp_rx_tid_rxq {
u8 tid;
bool active;
struct ath12k_reoq_buf qbuf;
};
struct ath12k_dp_rx_reo_cache_flush_elem {
struct list_head list;
struct ath12k_dp_rx_tid data;
struct ath12k_dp_rx_tid_rxq data;
unsigned long ts;
};
struct dp_reo_update_rx_queue_elem {
struct list_head list;
struct ath12k_dp_rx_tid_rxq rx_tid;
int peer_id;
bool is_ml_peer;
u16 ml_peer_id;
};
struct ath12k_dp_rx_reo_cmd {
struct list_head list;
struct ath12k_dp_rx_tid data;
struct ath12k_dp_rx_tid_rxq data;
int cmd_num;
void (*handler)(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status);

View File

@@ -832,6 +832,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
#define HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC BIT(9)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)

View File

@@ -1225,6 +1225,7 @@ struct hal_reo_flush_queue {
#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_QUEUE_1K_DESC BIT(15)
struct hal_reo_flush_cache {
struct hal_reo_cmd_hdr cmd;

View File

@@ -89,6 +89,9 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL);
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC)
desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_QUEUE_1K_DESC);
return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
}

View File

@@ -483,15 +483,16 @@ enum hal_rx_ul_reception_type {
HAL_RECEPTION_TYPE_FRAMELESS
};
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
#define HAL_RX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
#define HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
#define HAL_RX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
#define HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU GENMASK(7, 0)
struct hal_rx_phyrx_rssi_legacy_info {
__le32 info0;
__le32 rsvd0[39];
__le32 info1;
__le32 rsvd1;
__le32 info2;
} __packed;
#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
@@ -695,7 +696,8 @@ struct hal_rx_resp_req_info {
#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6)
#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
#define HAL_RX_PHY_CMN_USER_INFO0_GI GENMASK(17, 16)
#define HAL_RX_CMN_USR_INFO0_CP_SETTING GENMASK(17, 16)
#define HAL_RX_CMN_USR_INFO0_LTF_SIZE GENMASK(19, 18)
struct hal_phyrx_common_user_info {
__le32 rsvd[2];

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <net/mac80211.h>
@@ -1822,22 +1822,16 @@ void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb)
skb);
}
static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
void ath12k_mac_handle_beacon_miss(struct ath12k *ar,
struct ath12k_link_vif *arvif)
{
u32 *vdev_id = data;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif = &ahvif->deflink;
struct ieee80211_hw *hw;
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
if (!arvif->is_created || arvif->vdev_id != *vdev_id)
return;
if (!arvif->is_up)
if (!(arvif->is_created && arvif->is_up))
return;
ieee80211_beacon_loss(vif);
hw = ath12k_ar_to_hw(arvif->ar);
/* Firmware doesn't report beacon loss events repeatedly. If AP probe
* (done by mac80211) succeeds but beacons do not resume then it
@@ -1848,14 +1842,6 @@ static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
ATH12K_CONNECTION_LOSS_HZ);
}
void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id)
{
ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
IEEE80211_IFACE_ITER_NORMAL,
ath12k_mac_handle_beacon_miss_iter,
&vdev_id);
}
static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work)
{
struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif,
@@ -9860,6 +9846,7 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
param_value = hw->wiphy->rts_threshold;
ar->rts_threshold = param_value;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
@@ -11240,8 +11227,8 @@ void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
struct ieee80211_channel *chan, *temp_chan;
u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
bool is_psd_power = false, is_tpe_present = false;
s8 max_tx_power[ATH12K_NUM_PWR_LEVELS],
psd_power, tx_power, eirp_power;
s8 max_tx_power[ATH12K_NUM_PWR_LEVELS], psd_power, tx_power;
s8 eirp_power = 0;
struct ath12k_vif *ahvif = arvif->ahvif;
u16 start_freq, center_freq;
u8 reg_6ghz_power_mode;
@@ -11447,8 +11434,10 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
tpc_info->num_pwr_levels = max(local_psd->count,
reg_psd->count);
if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
tpc_info->num_pwr_levels =
min3(tpc_info->num_pwr_levels,
IEEE80211_TPE_PSD_ENTRIES_320MHZ,
ATH12K_NUM_PWR_LEVELS);
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_psd->power[i],
@@ -11463,8 +11452,10 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
tpc_info->num_pwr_levels = max(local_non_psd->count,
reg_non_psd->count);
if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
tpc_info->num_pwr_levels =
min3(tpc_info->num_pwr_levels,
IEEE80211_TPE_EIRP_ENTRIES_320MHZ,
ATH12K_NUM_PWR_LEVELS);
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_non_psd->power[i],
@@ -11687,16 +11678,32 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
int radio_idx, u32 value)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct wiphy *wiphy = hw->wiphy;
struct ath12k *ar;
int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i;
int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
int ret = 0, ret_err, i;
lockdep_assert_wiphy(hw->wiphy);
/* Currently we set the rts threshold value to all the vifs across
* all radios of the single wiphy.
* TODO Once support for vif specific RTS threshold in mac80211 is
* available, ath12k can make use of it.
*/
if (radio_idx >= wiphy->n_radio || radio_idx < -1)
return -EINVAL;
if (radio_idx != -1) {
/* Update RTS threshold in specified radio */
ar = ath12k_ah_to_ar(ah, radio_idx);
ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
if (ret) {
ath12k_warn(ar->ab,
"failed to set RTS config for all vdevs of pdev %d",
ar->pdev->pdev_id);
return ret;
}
ar->rts_threshold = value;
return 0;
}
/* Radio_index passed is -1, so set RTS threshold for all radios. */
for_each_ar(ah, ar, i) {
ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
if (ret) {
@@ -11705,6 +11712,25 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
break;
}
}
if (!ret) {
/* Setting new RTS threshold for vdevs of all radios passed, so update
* the RTS threshold value for all radios
*/
for_each_ar(ah, ar, i)
ar->rts_threshold = value;
return 0;
}
/* RTS threshold config failed, revert to the previous RTS threshold */
for (i = i - 1; i >= 0; i--) {
ar = ath12k_ah_to_ar(ah, i);
ret_err = ath12k_set_vdev_param_to_all_vifs(ar, param_id,
ar->rts_threshold);
if (ret_err)
ath12k_warn(ar->ab,
"failed to restore RTS threshold for all vdevs of pdev %d",
ar->pdev->pdev_id);
}
return ret;
}
@@ -12610,6 +12636,27 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
static void ath12k_mac_put_chain_rssi(struct station_info *sinfo,
struct ath12k_link_sta *arsta)
{
s8 rssi;
int i;
for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
sinfo->chains &= ~BIT(i);
rssi = arsta->chain_signal[i];
if (rssi != ATH12K_DEFAULT_NOISE_FLOOR &&
rssi != ATH12K_INVALID_RSSI_FULL &&
rssi != ATH12K_INVALID_RSSI_EMPTY &&
rssi != 0) {
sinfo->chain_signal[i] = rssi;
sinfo->chains |= BIT(i);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
}
}
}
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -12667,6 +12714,12 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
!(ath12k_mac_get_fw_stats(ar, &params)))
signal = arsta->rssi_beacon;
params.stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
!(ath12k_mac_get_fw_stats(ar, &params)))
ath12k_mac_put_chain_rssi(sinfo, arsta);
spin_lock_bh(&ar->data_lock);
noise_floor = ath12k_pdev_get_noise_floor(ar);
spin_unlock_bh(&ar->data_lock);

View File

@@ -168,7 +168,8 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable);
int ath12k_mac_rfkill_config(struct ath12k *ar);
int ath12k_mac_wait_tx_complete(struct ath12k *ar);
void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb);
void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id);
void ath12k_mac_handle_beacon_miss(struct ath12k *ar,
struct ath12k_link_vif *arvif);
int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);

View File

@@ -3307,20 +3307,28 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
/* This is number of CE configs */
req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
req->tgt_cfg[pipe_num].pipe_num =
__le32_to_cpu(ce_cfg[pipe_num].pipenum);
req->tgt_cfg[pipe_num].pipe_dir =
__le32_to_cpu(ce_cfg[pipe_num].pipedir);
req->tgt_cfg[pipe_num].nentries =
__le32_to_cpu(ce_cfg[pipe_num].nentries);
req->tgt_cfg[pipe_num].nbytes_max =
__le32_to_cpu(ce_cfg[pipe_num].nbytes_max);
req->tgt_cfg[pipe_num].flags =
__le32_to_cpu(ce_cfg[pipe_num].flags);
}
req->svc_cfg_valid = 1;
/* This is number of Service/CE configs */
req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
req->svc_cfg[pipe_num].service_id =
__le32_to_cpu(svc_cfg[pipe_num].service_id);
req->svc_cfg[pipe_num].pipe_dir =
__le32_to_cpu(svc_cfg[pipe_num].pipedir);
req->svc_cfg[pipe_num].pipe_num =
__le32_to_cpu(svc_cfg[pipe_num].pipenum);
}
/* set shadow v3 configuration */

View File

@@ -392,17 +392,17 @@ enum qmi_wlanfw_pipedir_enum_v01 {
};
struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
__le32 pipe_num;
__le32 pipe_dir;
__le32 nentries;
__le32 nbytes_max;
__le32 flags;
u32 pipe_num;
u32 pipe_dir;
u32 nentries;
u32 nbytes_max;
u32 flags;
};
struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
__le32 service_id;
__le32 pipe_dir;
__le32 pipe_num;
u32 service_id;
u32 pipe_dir;
u32 pipe_num;
};
struct qmi_wlanfw_shadow_reg_cfg_s_v01 {

View File

@@ -30,6 +30,9 @@ struct ath12k_wmi_svc_ready_parse {
struct wmi_tlv_fw_stats_parse {
const struct wmi_stats_event *ev;
struct ath12k_fw_stats *stats;
const struct wmi_per_chain_rssi_stat_params *rssi;
int rssi_num;
bool chain_rssi_done;
};
struct ath12k_wmi_dma_ring_caps_parse {
@@ -185,6 +188,8 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_p2p_noa_event) },
[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
.min_len = sizeof(struct wmi_11d_new_cc_event) },
[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
.min_len = sizeof(struct wmi_per_chain_rssi_stat_params) },
};
__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -6462,6 +6467,8 @@ static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buf
}
arg->mac_addr = ev->peer_macaddr.addr;
arg->reason = le32_to_cpu(ev->reason);
arg->rssi = le32_to_cpu(ev->rssi);
kfree(tb);
return 0;
@@ -7298,8 +7305,10 @@ static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_sta_kickout_arg arg = {};
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
struct ath12k_peer *peer;
unsigned int link_id;
struct ath12k *ar;
if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
@@ -7319,25 +7328,49 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
if (!ar) {
arvif = ath12k_mac_get_arvif_by_vdev_id(ab, peer->vdev_id);
if (!arvif) {
ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
peer->vdev_id);
goto exit;
}
sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
arg.mac_addr, NULL);
ar = arvif->ar;
if (peer->mlo) {
sta = ieee80211_find_sta_by_link_addrs(ath12k_ar_to_hw(ar),
arg.mac_addr,
NULL, &link_id);
if (peer->link_id != link_id) {
ath12k_warn(ab,
"Spurious quick kickout for MLO STA %pM with invalid link_id, peer: %d, sta: %d\n",
arg.mac_addr, peer->link_id, link_id);
goto exit;
}
} else {
sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
arg.mac_addr, NULL);
}
if (!sta) {
ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
arg.mac_addr);
ath12k_warn(ab, "Spurious quick kickout for %sSTA %pM\n",
peer->mlo ? "MLO " : "", arg.mac_addr);
goto exit;
}
ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
arg.mac_addr);
ath12k_dbg(ab, ATH12K_DBG_WMI,
"peer sta kickout event %pM reason: %d rssi: %d\n",
arg.mac_addr, arg.reason, arg.rssi);
ieee80211_report_low_ack(sta, 10);
switch (arg.reason) {
case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY:
if (arvif->ahvif->vif->type == NL80211_IFTYPE_STATION) {
ath12k_mac_handle_beacon_miss(ar, arvif);
break;
}
fallthrough;
default:
ieee80211_report_low_ack(sta, 10);
}
exit:
spin_unlock_bh(&ab->base_lock);
@@ -7346,6 +7379,7 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k_link_vif *arvif;
struct wmi_roam_event roam_ev = {};
struct ath12k *ar;
u32 vdev_id;
@@ -7364,21 +7398,22 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
"wmi roam event vdev %u reason %d rssi %d\n",
vdev_id, roam_reason, roam_ev.rssi);
rcu_read_lock();
ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
guard(rcu)();
arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id);
if (!arvif) {
ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
rcu_read_unlock();
return;
}
ar = arvif->ar;
if (roam_reason >= WMI_ROAM_REASON_MAX)
ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
roam_reason, vdev_id);
switch (roam_reason) {
case WMI_ROAM_REASON_BEACON_MISS:
ath12k_mac_handle_beacon_miss(ar, vdev_id);
ath12k_mac_handle_beacon_miss(ar, arvif);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
@@ -7388,8 +7423,6 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
roam_reason, vdev_id);
break;
}
rcu_read_unlock();
}
static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
@@ -8219,6 +8252,77 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
return ret;
}
static int ath12k_wmi_tlv_rssi_chain_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
const struct wmi_rssi_stat_params *stats_rssi = ptr;
struct wmi_tlv_fw_stats_parse *parse = data;
const struct wmi_stats_event *ev = parse->ev;
struct ath12k_fw_stats *stats = parse->stats;
struct ath12k_link_vif *arvif;
struct ath12k_link_sta *arsta;
struct ieee80211_sta *sta;
struct ath12k_sta *ahsta;
struct ath12k *ar;
int vdev_id;
int j;
if (!ev) {
ath12k_warn(ab, "failed to fetch update stats ev");
return -EPROTO;
}
if (tag != WMI_TAG_RSSI_STATS)
return -EPROTO;
if (!stats)
return -EINVAL;
stats->pdev_id = le32_to_cpu(ev->pdev_id);
vdev_id = le32_to_cpu(stats_rssi->vdev_id);
guard(rcu)();
ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
if (!ar) {
ath12k_warn(ab, "invalid pdev id %d in rssi chain parse\n",
stats->pdev_id);
return -EPROTO;
}
arvif = ath12k_mac_get_arvif(ar, vdev_id);
if (!arvif) {
ath12k_warn(ab, "not found vif for vdev id %d\n", vdev_id);
return -EPROTO;
}
ath12k_dbg(ab, ATH12K_DBG_WMI,
"stats bssid %pM vif %p\n",
arvif->bssid, arvif->ahvif->vif);
sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
arvif->bssid,
NULL);
if (!sta) {
ath12k_dbg(ab, ATH12K_DBG_WMI,
"not found station of bssid %pM for rssi chain\n",
arvif->bssid);
return -EPROTO;
}
ahsta = ath12k_sta_to_ahsta(sta);
arsta = &ahsta->deflink;
BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++)
arsta->chain_signal[j] = le32_to_cpu(stats_rssi->rssi_avg_beacon[j]);
stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
return 0;
}
static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
@@ -8233,6 +8337,22 @@ static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
case WMI_TAG_ARRAY_BYTE:
ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
break;
case WMI_TAG_PER_CHAIN_RSSI_STATS:
parse->rssi = ptr;
if (le32_to_cpu(parse->ev->stats_id) & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
parse->rssi_num = le32_to_cpu(parse->rssi->num_per_chain_rssi);
break;
case WMI_TAG_ARRAY_STRUCT:
if (parse->rssi_num && !parse->chain_rssi_done) {
ret = ath12k_wmi_tlv_iter(ab, ptr, len,
ath12k_wmi_tlv_rssi_chain_parse,
parse);
if (ret)
return ret;
parse->chain_rssi_done = true;
}
break;
default:
break;
}
@@ -8346,6 +8466,12 @@ static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *sk
goto complete;
}
/* Handle WMI_REQUEST_RSSI_PER_CHAIN_STAT status update */
if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
complete(&ar->fw_stats_done);
goto complete;
}
/* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
ath12k_wmi_fw_stats_process(ar, &stats);

View File

@@ -4548,12 +4548,27 @@ struct wmi_scan_event {
__le32 tsf_timestamp;
} __packed;
enum wmi_peer_sta_kickout_reason {
WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED = 0,
WMI_PEER_STA_KICKOUT_REASON_XRETRY = 1,
WMI_PEER_STA_KICKOUT_REASON_INACTIVITY = 2,
WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT = 3,
WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT = 4,
WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT = 5,
WMI_PEER_STA_KICKOUT_REASON_ROAMING_EVENT = 6,
WMI_PEER_STA_KICKOUT_REASON_PMF_ERROR = 7,
};
struct wmi_peer_sta_kickout_arg {
const u8 *mac_addr;
enum wmi_peer_sta_kickout_reason reason;
u32 rssi;
};
struct wmi_peer_sta_kickout_event {
struct ath12k_wmi_mac_addr_params peer_macaddr;
__le32 reason;
__le32 rssi;
} __packed;
#define WMI_ROAM_REASON_MASK GENMASK(3, 0)
@@ -5875,9 +5890,10 @@ struct wmi_stats_event {
} __packed;
enum wmi_stats_id {
WMI_REQUEST_PDEV_STAT = BIT(2),
WMI_REQUEST_VDEV_STAT = BIT(3),
WMI_REQUEST_BCN_STAT = BIT(11),
WMI_REQUEST_PDEV_STAT = BIT(2),
WMI_REQUEST_VDEV_STAT = BIT(3),
WMI_REQUEST_RSSI_PER_CHAIN_STAT = BIT(8),
WMI_REQUEST_BCN_STAT = BIT(11),
};
struct wmi_request_stats_cmd {
@@ -5888,6 +5904,17 @@ struct wmi_request_stats_cmd {
__le32 pdev_id;
} __packed;
struct wmi_rssi_stat_params {
__le32 vdev_id;
__le32 rssi_avg_beacon[WMI_MAX_CHAINS];
__le32 rssi_avg_data[WMI_MAX_CHAINS];
struct ath12k_wmi_mac_addr_params peer_macaddr;
} __packed;
struct wmi_per_chain_rssi_stat_params {
__le32 num_per_chain_rssi;
} __packed;
#define WLAN_MAX_AC 4
#define MAX_TX_RATE_VALUES 10