Merge tag 'ath-next-20250521' of git://git.kernel.org/pub/scm/linux/kernel/git/ath/ath

Jeff Johnson says:
==================
ath.git patches for v6.16

ath12k:
Add monitor mode support for WCN7850.
Enhance regulatory support including 6 GHz power modes.

In addition, perform the usual set of bug fixes and cleanups across
all supported drivers.
==================

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Johannes Berg
2025-05-22 13:09:53 +02:00
40 changed files with 2756 additions and 675 deletions

View File

@@ -21,6 +21,12 @@ properties:
reg:
maxItems: 1
firmware-name:
maxItems: 1
description:
If present, a board or platform specific string used to lookup
usecase-specific firmware files for the device.
vddaon-supply:
description: VDD_AON supply regulator handle

View File

@@ -80,7 +80,7 @@ static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,
struct ath10k_hw_ce_regs_addr_map *addr_map)
const struct ath10k_hw_ce_regs_addr_map *addr_map)
{
return ((offset << addr_map->lsb) & addr_map->mask);
}
@@ -203,7 +203,7 @@ static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -217,7 +217,7 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -231,7 +231,7 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -313,7 +313,7 @@ static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
@@ -325,7 +325,7 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
@@ -337,7 +337,7 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
@@ -349,7 +349,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
@@ -360,7 +360,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -372,7 +372,7 @@ static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -384,7 +384,7 @@ static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -396,7 +396,7 @@ static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
u32 misc_ie_addr = ath10k_ce_read32(ar,
ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
@@ -410,7 +410,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int mask)
{
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
}
@@ -1230,7 +1230,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
/*

View File

@@ -212,40 +212,40 @@ const struct ath10k_hw_regs wcn3990_regs = {
.pcie_intr_fw_mask = 0x00100000,
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(17, 17),
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
.msb = 0x00000012,
.lsb = 0x00000012,
.mask = GENMASK(18, 18),
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
static const struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
static const struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
.addr = 0x00000018,
.src_ring = &wcn3990_src_ring,
.dst_ring = &wcn3990_dst_ring,
.dmax = &wcn3990_dmax,
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
static const struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
.mask = GENMASK(0, 0),
};
static struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
static const struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
.copy_complete = &wcn3990_host_ie_cc,
};
static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
static const struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
@@ -255,7 +255,7 @@ static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
.addr = 0x00000030,
};
static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
static const struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
.axi_err = 0x00000100,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
@@ -266,19 +266,19 @@ static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
.addr = 0x00000038,
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
.msb = 0x00000000,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -286,18 +286,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
.wm_high = &wcn3990_src_wm_high,
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -305,7 +305,7 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.wm_high = &wcn3990_dst_wm_high,
};
static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
static const struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
.shift = 19,
.mask = 0x00080000,
.enable = 0x00000000,
@@ -344,25 +344,25 @@ const struct ath10k_hw_values wcn3990_values = {
.ce_desc_meta_data_lsb = 4,
};
static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
static const struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(16, 16),
};
static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
static const struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
.msb = 0x00000011,
.lsb = 0x00000011,
.mask = GENMASK(17, 17),
};
static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
static const struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
static const struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
.addr = 0x00000010,
.hw_mask = 0x0007ffff,
.sw_mask = 0x0007ffff,
@@ -375,31 +375,31 @@ static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
.dmax = &qcax_dmax,
};
static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
static const struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
.msb = 0x00000003,
.lsb = 0x00000003,
.mask = GENMASK(3, 3),
};
static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
static const struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
.msb = 0x00000000,
.mask = GENMASK(0, 0),
.status_reset = 0x00000000,
.status = &qcax_cmd_halt_status,
};
static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
static const struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(0, 0),
};
static struct ath10k_hw_ce_host_ie qcax_host_ie = {
static const struct ath10k_hw_ce_host_ie qcax_host_ie = {
.copy_complete_reset = 0x00000000,
.copy_complete = &qcax_host_ie_cc,
};
static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
static const struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
@@ -409,7 +409,7 @@ static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
.addr = 0x00000030,
};
static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
static const struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
.axi_err = 0x00000400,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
@@ -420,19 +420,19 @@ static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
.addr = 0x00000038,
};
static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
.msb = 0x0000001f,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -440,18 +440,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
.wm_high = &qcax_src_wm_high,
};
static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,

View File

@@ -289,19 +289,22 @@ struct ath10k_hw_ce_ctrl1 {
u32 sw_wr_mask;
u32 reset_mask;
u32 reset;
struct ath10k_hw_ce_regs_addr_map *src_ring;
struct ath10k_hw_ce_regs_addr_map *dst_ring;
struct ath10k_hw_ce_regs_addr_map *dmax; };
const struct ath10k_hw_ce_regs_addr_map *src_ring;
const struct ath10k_hw_ce_regs_addr_map *dst_ring;
const struct ath10k_hw_ce_regs_addr_map *dmax;
};
struct ath10k_hw_ce_cmd_halt {
u32 status_reset;
u32 msb;
u32 mask;
struct ath10k_hw_ce_regs_addr_map *status; };
const struct ath10k_hw_ce_regs_addr_map *status;
};
struct ath10k_hw_ce_host_ie {
u32 copy_complete_reset;
struct ath10k_hw_ce_regs_addr_map *copy_complete; };
const struct ath10k_hw_ce_regs_addr_map *copy_complete;
};
struct ath10k_hw_ce_host_wm_regs {
u32 dstr_lmask;
@@ -328,8 +331,9 @@ struct ath10k_hw_ce_dst_src_wm_regs {
u32 addr;
u32 low_rst;
u32 high_rst;
struct ath10k_hw_ce_regs_addr_map *wm_low;
struct ath10k_hw_ce_regs_addr_map *wm_high; };
const struct ath10k_hw_ce_regs_addr_map *wm_low;
const struct ath10k_hw_ce_regs_addr_map *wm_high;
};
struct ath10k_hw_ce_ctrl1_upd {
u32 shift;
@@ -355,14 +359,14 @@ struct ath10k_hw_ce_regs {
u32 ce_rri_low;
u32 ce_rri_high;
u32 host_ie_addr;
struct ath10k_hw_ce_host_wm_regs *wm_regs;
struct ath10k_hw_ce_misc_regs *misc_regs;
struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
struct ath10k_hw_ce_cmd_halt *cmd_halt;
struct ath10k_hw_ce_host_ie *host_ie;
struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
struct ath10k_hw_ce_ctrl1_upd *upd;
const struct ath10k_hw_ce_host_wm_regs *wm_regs;
const struct ath10k_hw_ce_misc_regs *misc_regs;
const struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
const struct ath10k_hw_ce_cmd_halt *cmd_halt;
const struct ath10k_hw_ce_host_ie *host_ie;
const struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
const struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
const struct ath10k_hw_ce_ctrl1_upd *upd;
};
struct ath10k_hw_values {

View File

@@ -1844,7 +1844,7 @@ static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) {
ath10k_warn(ar,
"unable to read hi_acs_flags for htt tx comple : %d\n", ret);
"unable to read hi_acs_flags for htt tx complete: %d\n", ret);
return ret;
}

View File

@@ -393,11 +393,10 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
goto err;
}
/* Make sure descriptor is read after the head pointer. */
dma_rmb();
*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
if (*nbytes == 0) {
ret = -EIO;
goto err;
}
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
@@ -430,8 +429,8 @@ static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;

View File

@@ -2637,7 +2637,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
struct ath11k *ar;
struct hal_reo_dest_ring *desc;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
u32 cookie, info0, rx_msdu_info0, rx_mpdu_info0;
int i;
for (i = 0; i < MAX_RADIOS; i++)
@@ -2650,11 +2650,14 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
try_again:
ath11k_hal_srng_access_begin(ab, srng);
/* Make sure descriptor is read after the head pointer. */
dma_rmb();
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
desc->buf_addr_info.info1);
READ_ONCE(desc->buf_addr_info.info1));
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@@ -2683,8 +2686,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
num_buffs_reaped[mac_id]++;
info0 = READ_ONCE(desc->info0);
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
desc->info0);
info0);
if (unlikely(push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
@@ -2692,18 +2696,21 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
continue;
}
rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
rx_msdu_info0 = READ_ONCE(desc->rx_msdu_info.info0);
rx_mpdu_info0 = READ_ONCE(desc->rx_mpdu_info.info0);
rxcb->is_first_msdu = !!(rx_msdu_info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
rxcb->is_last_msdu = !!(rx_msdu_info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
rxcb->is_continuation = !!(rx_msdu_info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
desc->rx_mpdu_info.meta_data);
READ_ONCE(desc->rx_mpdu_info.meta_data));
rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
desc->rx_mpdu_info.info0);
rx_mpdu_info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
desc->info0);
info0);
rxcb->mac_id = mac_id;
__skb_queue_tail(&msdu_list[mac_id], msdu);

View File

@@ -599,7 +599,7 @@ u32 ath11k_hal_ce_dst_status_get_length(void *buf)
struct hal_ce_srng_dst_status_desc *desc = buf;
u32 len;
len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, READ_ONCE(desc->flags));
desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
return len;
@@ -829,7 +829,7 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
} else {
srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
/* Try to prefetch the next descriptor in the ring */
if (srng->flags & HAL_SRNG_FLAGS_CACHED)

View File

@@ -9972,12 +9972,17 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
struct ath11k_base *ab = ar->ab;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits;
int n_limits, n_combos;
bool p2p;
p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
if (ab->hw_params.support_dual_stations)
n_combos = 2;
else
n_combos = 1;
combinations = kcalloc(n_combos, sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
@@ -9992,7 +9997,9 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
limits[1].max = 16;
limits[1].types |= BIT(NL80211_IFTYPE_AP);
if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
@@ -10002,25 +10009,24 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
combinations[0].n_limits = n_limits;
combinations[0].beacon_int_infra_match = true;
combinations[0].beacon_int_min_gcd = 100;
combinations[0].max_interfaces = 16;
combinations[0].num_different_channels = 1;
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_80P80) |
BIT(NL80211_CHAN_WIDTH_160);
if (ab->hw_params.support_dual_stations) {
limits[0].max = 2;
limits[1].max = 1;
combinations[0].max_interfaces = ab->hw_params.num_vdevs;
combinations[0].num_different_channels = 2;
} else {
limits[0].max = 1;
limits[1].max = 16;
combinations[0].max_interfaces = 16;
combinations[0].num_different_channels = 1;
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_80P80) |
BIT(NL80211_CHAN_WIDTH_160);
combinations[1].limits = limits;
combinations[1].n_limits = n_limits;
combinations[1].beacon_int_infra_match = true;
combinations[1].beacon_int_min_gcd = 100;
combinations[1].max_interfaces = ab->hw_params.num_vdevs;
combinations[1].num_different_channels = 2;
}
if (p2p) {
@@ -10031,7 +10037,7 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
}
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
ar->hw->wiphy->n_iface_combinations = n_combos;
return 0;
}

View File

@@ -1993,6 +1993,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
chunk->prev_size == chunk->size)
continue;
if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"size/type mismatch (current %d %u) (prev %d %u), try later with small size\n",
chunk->size, chunk->type,
chunk->prev_size, chunk->prev_type);
ab->qmi.target_mem_delayed = true;
return 0;
}
/* cannot reuse the existing chunk */
dma_free_coherent(ab->dev, chunk->prev_size,
chunk->vaddr, chunk->paddr);

View File

@@ -1125,14 +1125,13 @@ static void ath12k_ahb_remove(struct platform_device *pdev)
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_ahb_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
goto qmi_fail;
}
ath12k_ahb_remove_prepare(ab);
ath12k_core_deinit(ab);
ath12k_core_hw_group_cleanup(ab->ag);
qmi_fail:
ath12k_core_deinit(ab);
ath12k_ahb_free_resources(ab);
}

View File

@@ -433,11 +433,10 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
goto err;
}
/* Make sure descriptor is read after the head pointer. */
dma_rmb();
*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
if (*nbytes == 0) {
ret = -EIO;
goto err;
}
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
@@ -470,8 +469,8 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
ath12k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;

View File

@@ -697,7 +697,7 @@ static void ath12k_core_stop(struct ath12k_base *ab)
/* De-Init of components as needed */
}
static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
{
struct ath12k_base *ab = data;
const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
@@ -719,6 +719,28 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
return;
}
spin_lock_bh(&ab->base_lock);
switch (smbios->country_code_flag) {
case ATH12K_SMBIOS_CC_ISO:
ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
ab->new_alpha2[0], ab->new_alpha2[1]);
break;
case ATH12K_SMBIOS_CC_WW:
ab->new_alpha2[0] = '0';
ab->new_alpha2[1] = '0';
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
break;
default:
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
smbios->country_code_flag);
break;
}
spin_unlock_bh(&ab->base_lock);
if (!smbios->bdf_enabled) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
return;
@@ -758,7 +780,7 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
int ath12k_core_check_smbios(struct ath12k_base *ab)
{
ab->qmi.target.bdf_ext[0] = '\0';
dmi_walk(ath12k_core_check_bdfext, ab);
dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
if (ab->qmi.target.bdf_ext[0] == '\0')
return -ENODATA;
@@ -789,6 +811,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
goto err_qmi_deinit;
}
ath12k_debugfs_pdev_create(ab);
return 0;
err_qmi_deinit:
@@ -1812,9 +1836,9 @@ static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
of_node_put(next_rx_endpoint);
device_count++;
if (device_count > ATH12K_MAX_SOCS) {
if (device_count > ATH12K_MAX_DEVICES) {
ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
device_count, ATH12K_MAX_SOCS);
device_count, ATH12K_MAX_DEVICES);
of_node_put(next_wsi_dev);
return -EINVAL;
}
@@ -1990,7 +2014,7 @@ static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
}
}
static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int i;
@@ -2136,10 +2160,9 @@ int ath12k_core_init(struct ath12k_base *ab)
void ath12k_core_deinit(struct ath12k_base *ab)
{
ath12k_core_panic_notifier_unregister(ab);
ath12k_core_hw_group_cleanup(ab->ag);
ath12k_core_hw_group_destroy(ab->ag);
ath12k_core_hw_group_unassign(ab);
ath12k_core_panic_notifier_unregister(ab);
}
void ath12k_core_free(struct ath12k_base *ab)

View File

@@ -17,6 +17,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/panic_notifier.h>
#include <linux/average.h>
#include <linux/of.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -63,8 +64,8 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
#define ATH12K_MAX_SOCS 3
#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
#define ATH12K_MAX_DEVICES 3
#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_DEVICES * MAX_RADIOS)
#define ATH12K_INVALID_GROUP_ID 0xFF
#define ATH12K_INVALID_DEVICE_ID 0xFF
@@ -175,9 +176,34 @@ struct ath12k_ext_irq_grp {
struct net_device *napi_ndev;
};
enum ath12k_smbios_cc_type {
/* disable country code setting from SMBIOS */
ATH12K_SMBIOS_CC_DISABLE = 0,
/* set country code by ANSI country name, based on ISO3166-1 alpha2 */
ATH12K_SMBIOS_CC_ISO = 1,
/* worldwide regdomain */
ATH12K_SMBIOS_CC_WW = 2,
};
struct ath12k_smbios_bdf {
struct dmi_header hdr;
u32 padding;
u8 features_disabled;
/* enum ath12k_smbios_cc_type */
u8 country_code_flag;
/* To set specific country, you need to set country code
* flag=ATH12K_SMBIOS_CC_ISO first, then if country is United
* States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'=
* 0x53). To set country to INDONESIA, then country code value =
* 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag =
* ATH12K_SMBIOS_CC_WW, then you can use worldwide regulatory
* setting.
*/
u16 cc_code;
u8 bdf_enabled;
u8 bdf_ext[];
} __packed;
@@ -317,6 +343,8 @@ struct ath12k_link_vif {
/* only used in station mode */
bool is_sta_assoc_link;
struct ath12k_reg_tpc_power_info reg_tpc_info;
};
struct ath12k_vif {
@@ -784,6 +812,8 @@ struct ath12k {
u8 ftm_msgref;
struct ath12k_fw_stats fw_stats;
unsigned long last_tx_power_update;
s8 max_allowed_tx_power;
};
struct ath12k_hw {
@@ -875,7 +905,7 @@ struct ath12k_board_data {
size_t len;
};
struct ath12k_soc_dp_tx_err_stats {
struct ath12k_device_dp_tx_err_stats {
/* TCL Ring Descriptor unavailable */
u32 desc_na[DP_TCL_NUM_RING_MAX];
/* Other failures during dp_tx due to mem allocation failure
@@ -884,13 +914,20 @@ struct ath12k_soc_dp_tx_err_stats {
atomic_t misc_fail;
};
struct ath12k_soc_dp_stats {
struct ath12k_device_dp_stats {
u32 err_ring_pkts;
u32 invalid_rbm;
u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
u32 hal_reo_error[DP_REO_DST_RING_MAX];
struct ath12k_soc_dp_tx_err_stats tx_err;
struct ath12k_device_dp_tx_err_stats tx_err;
u32 reo_rx[DP_REO_DST_RING_MAX][ATH12K_MAX_DEVICES];
u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX][ATH12K_MAX_DEVICES];
u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON];
u32 fw_tx_status[MAX_FW_TX_STATUS];
u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX];
u32 tx_enqueued[DP_TCL_NUM_RING_MAX];
u32 tx_completed[DP_TCL_NUM_RING_MAX];
};
struct ath12k_reg_freq {
@@ -919,7 +956,7 @@ struct ath12k_hw_group {
u8 num_probed;
u8 num_started;
unsigned long flags;
struct ath12k_base *ab[ATH12K_MAX_SOCS];
struct ath12k_base *ab[ATH12K_MAX_DEVICES];
/* protects access to this struct */
struct mutex mutex;
@@ -933,7 +970,7 @@ struct ath12k_hw_group {
struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
u8 num_hw;
bool mlo_capable;
struct device_node *wsi_node[ATH12K_MAX_SOCS];
struct device_node *wsi_node[ATH12K_MAX_DEVICES];
struct ath12k_mlo_memory mlo_mem;
struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
bool hw_link_id_init_done;
@@ -1041,9 +1078,11 @@ struct ath12k_base {
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
struct ath12k_reg_info *reg_info[MAX_RADIOS];
/* Current DFS Regulatory */
enum ath12k_dfs_region dfs_region;
struct ath12k_soc_dp_stats soc_stats;
struct ath12k_device_dp_stats device_stats;
#ifdef CONFIG_ATH12K_DEBUGFS
struct dentry *debugfs_soc;
#endif
@@ -1072,8 +1111,6 @@ struct ath12k_base {
struct ath12k_dbring_cap *db_caps;
u32 num_db_cap;
struct timer_list mon_reap_timer;
struct completion htc_suspend;
u64 fw_soc_drop_count;
@@ -1246,6 +1283,7 @@ struct ath12k_fw_stats_pdev {
};
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
void ath12k_core_deinit(struct ath12k_base *ath12k);
@@ -1342,8 +1380,16 @@ static inline void ath12k_core_create_firmware_path(struct ath12k_base *ab,
const char *filename,
void *buf, size_t buf_len)
{
snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
ab->hw_params->fw.dir, filename);
const char *fw_name = NULL;
of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name);
if (fw_name && strncmp(filename, "board", 5))
snprintf(buf, buf_len, "%s/%s/%s/%s", ATH12K_FW_DIR,
ab->hw_params->fw.dir, fw_name, filename);
else
snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
ab->hw_params->fw.dir, filename);
}
static inline const char *ath12k_bus_str(enum ath12k_bus bus)

View File

@@ -33,6 +33,76 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
static ssize_t ath12k_read_simulate_fw_crash(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
const char buf[] =
"To simulate firmware crash write one of the keywords to this file:\n"
"`assert` - send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n";
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
static ssize_t
ath12k_write_simulate_fw_crash(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath12k_base *ab = file->private_data;
struct ath12k_pdev *pdev;
struct ath12k *ar = NULL;
char buf[32] = {0};
int i, ret;
ssize_t rc;
/* filter partial writes and invalid commands */
if (*ppos != 0 || count >= sizeof(buf) || count == 0)
return -EINVAL;
rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (rc < 0)
return rc;
/* drop the possible '\n' from the end */
if (buf[*ppos - 1] == '\n')
buf[*ppos - 1] = '\0';
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ar)
break;
}
if (!ar)
return -ENETDOWN;
if (!strcmp(buf, "assert")) {
ath12k_info(ab, "simulating firmware assert crash\n");
ret = ath12k_wmi_force_fw_hang_cmd(ar,
ATH12K_WMI_FW_HANG_ASSERT_TYPE,
ATH12K_WMI_FW_HANG_DELAY);
} else {
return -EINVAL;
}
if (ret) {
ath12k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
return ret;
}
return count;
}
static const struct file_operations fops_simulate_fw_crash = {
.read = ath12k_read_simulate_fw_crash,
.write = ath12k_write_simulate_fw_crash,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath12k_write_tpc_stats_type(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -951,6 +1021,199 @@ void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
&ath12k_fops_link_stats);
}
static ssize_t ath12k_debugfs_dump_device_dp_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath12k_base *ab = file->private_data;
struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
int len = 0, i, j, ret;
struct ath12k *ar;
const int size = 4096;
static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
[HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR] = "Overflow",
[HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR] = "MPDU len",
[HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR] = "FCS",
[HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR] = "Decrypt",
[HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR] = "TKIP MIC",
[HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR] = "Unencrypt",
[HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR] = "MSDU len",
[HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR] = "MSDU limit",
[HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR] = "WiFi parse",
[HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR] = "AMSDU parse",
[HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR] = "SA timeout",
[HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR] = "DA timeout",
[HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR] = "Flow timeout",
[HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR] = "Flush req",
[HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR] = "AMSDU frag",
[HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR] = "Multicast echo",
[HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR] = "AMSDU mismatch",
[HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR] = "Unauth WDS",
[HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR] = "AMSDU or WDS"};
static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
[HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO] = "Desc addr zero",
[HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID] = "Desc inval",
[HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA] = "AMPDU in non BA",
[HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE] = "Non BA dup",
[HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE] = "BA dup",
[HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP] = "Frame 2k jump",
[HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP] = "BAR 2k jump",
[HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR] = "Frame OOR",
[HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR] = "BAR OOR",
[HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION] = "No BA session",
[HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN] = "Frame SN equal SSN",
[HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED] = "PN check fail",
[HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET] = "2k err",
[HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET] = "PN err",
[HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED] = "Desc blocked"};
static const char *wbm_rel_src[HAL_WBM_REL_SRC_MODULE_MAX] = {
[HAL_WBM_REL_SRC_MODULE_TQM] = "TQM",
[HAL_WBM_REL_SRC_MODULE_RXDMA] = "Rxdma",
[HAL_WBM_REL_SRC_MODULE_REO] = "Reo",
[HAL_WBM_REL_SRC_MODULE_FW] = "FW",
[HAL_WBM_REL_SRC_MODULE_SW] = "SW"};
char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
len += scnprintf(buf + len, size - len, "DEVICE RX STATS:\n\n");
len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
device_stats->err_ring_pkts);
len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
device_stats->invalid_rbm);
len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
len += scnprintf(buf + len, size - len, "%s: %u\n",
rxdma_err[i], device_stats->rxdma_error[i]);
len += scnprintf(buf + len, size - len, "\nREO errors:\n");
for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
len += scnprintf(buf + len, size - len, "%s: %u\n",
reo_err[i], device_stats->reo_error[i]);
len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
for (i = 0; i < DP_REO_DST_RING_MAX; i++)
len += scnprintf(buf + len, size - len,
"ring%d: %u\n", i,
device_stats->hal_reo_error[i]);
len += scnprintf(buf + len, size - len, "\nDEVICE TX STATS:\n");
len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
len += scnprintf(buf + len, size - len, "ring%d: %u\n",
i, device_stats->tx_err.desc_na[i]);
len += scnprintf(buf + len, size - len,
"\nMisc Transmit Failures: %d\n",
atomic_read(&device_stats->tx_err.misc_fail));
len += scnprintf(buf + len, size - len, "\ntx_wbm_rel_source:");
for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++)
len += scnprintf(buf + len, size - len, " %d:%u",
i, device_stats->tx_wbm_rel_source[i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, "\ntqm_rel_reason:");
for (i = 0; i < MAX_TQM_RELEASE_REASON; i++)
len += scnprintf(buf + len, size - len, " %d:%u",
i, device_stats->tqm_rel_reason[i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, "\nfw_tx_status:");
for (i = 0; i < MAX_FW_TX_STATUS; i++)
len += scnprintf(buf + len, size - len, " %d:%u",
i, device_stats->fw_tx_status[i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, "\ntx_enqueued:");
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
len += scnprintf(buf + len, size - len, " %d:%u", i,
device_stats->tx_enqueued[i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, "\ntx_completed:");
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
len += scnprintf(buf + len, size - len, " %d:%u",
i, device_stats->tx_completed[i]);
len += scnprintf(buf + len, size - len, "\n");
for (i = 0; i < ab->num_radios; i++) {
ar = ath12k_mac_get_ar_by_pdev_id(ab, DP_SW2HW_MACID(i));
if (ar) {
len += scnprintf(buf + len, size - len,
"\nradio%d tx_pending: %u\n", i,
atomic_read(&ar->dp.num_tx_pending));
}
}
len += scnprintf(buf + len, size - len, "\nREO Rx Received:\n");
for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
len += scnprintf(buf + len, size - len, "Ring%d:", i + 1);
for (j = 0; j < ATH12K_MAX_DEVICES; j++) {
len += scnprintf(buf + len, size - len,
"\t%d:%u", j,
device_stats->reo_rx[i][j]);
}
len += scnprintf(buf + len, size - len, "\n");
}
len += scnprintf(buf + len, size - len, "\nRx WBM REL SRC Errors:\n");
for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++) {
len += scnprintf(buf + len, size - len, "%s:", wbm_rel_src[i]);
for (j = 0; j < ATH12K_MAX_DEVICES; j++) {
len += scnprintf(buf + len,
size - len,
"\t%d:%u", j,
device_stats->rx_wbm_rel_source[i][j]);
}
len += scnprintf(buf + len, size - len, "\n");
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
return ret;
}
static const struct file_operations fops_device_dp_stats = {
.read = ath12k_debugfs_dump_device_dp_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath12k_debugfs_pdev_create(struct ath12k_base *ab)
{
debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
&fops_simulate_fw_crash);
debugfs_create_file("device_dp_stats", 0400, ab->debugfs_soc, ab,
&fops_device_dp_stats);
}
void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
bool dput_needed;

View File

@@ -16,6 +16,7 @@ void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
struct ath12k_fw_stats *stats);
void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void ath12k_debugfs_pdev_create(struct ath12k_base *ab);
static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
{
@@ -144,6 +145,10 @@ static inline void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
}
static inline void ath12k_debugfs_pdev_create(struct ath12k_base *ab)
{
}
#endif /* CONFIG_ATH12K_DEBUGFS */
#endif /* _ATH12K_DEBUGFS_H_ */

View File

@@ -168,6 +168,8 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
grp_mask = &ab->hw_params->ring_mask->reo_status[0];
break;
case HAL_RXDMA_MONITOR_STATUS:
grp_mask = &ab->hw_params->ring_mask->rx_mon_status[0];
break;
case HAL_RXDMA_MONITOR_DST:
grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
break;
@@ -274,12 +276,17 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
break;
case HAL_RXDMA_BUF:
case HAL_RXDMA_MONITOR_BUF:
case HAL_RXDMA_MONITOR_STATUS:
params.low_threshold = num_entries >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.intr_batch_cntr_thres_entries = 0;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
case HAL_RXDMA_MONITOR_STATUS:
params.low_threshold = num_entries >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.intr_batch_cntr_thres_entries = 1;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
case HAL_TX_MONITOR_DST:
params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
@@ -354,7 +361,10 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
/* only valid if idx_lookup_override is not set in tcl_data_cmd */
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
else
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
HAL_TX_BANK_CONFIG_ADDRX_EN) |
@@ -919,6 +929,25 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
goto done;
}
if (ab->hw_params->ring_mask->rx_mon_status[grp_id]) {
ring_mask = ab->hw_params->ring_mask->rx_mon_status[grp_id];
for (i = 0; i < ab->num_radios; i++) {
for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
ath12k_dp_mon_process_ring(ab, id, napi, budget,
0);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
}
}
}
if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
@@ -982,11 +1011,6 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
int i;
if (!ab->mon_reap_timer.function)
return;
timer_delete_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
}
@@ -1024,27 +1048,6 @@ void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
ab->hal_rx_ops->rx_desc_get_desc_size();
}
static void ath12k_dp_service_mon_ring(struct timer_list *t)
{
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
ATH12K_DP_RX_MONITOR_MODE);
mod_timer(&ab->mon_reap_timer, jiffies +
msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
}
static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
{
if (ab->hw_params->rxdma1_enable)
return;
timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
}
int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -1055,8 +1058,6 @@ int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
if (ret)
goto out;
ath12k_dp_mon_reap_timer_init(ab);
/* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -1107,11 +1108,8 @@ static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
{
switch (arvif->ahvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
/* TODO: Verify the search type and flags since ast hash
* is not part of peer mapv3
*/
arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
break;
case WMI_VDEV_TYPE_AP:
case WMI_VDEV_TYPE_IBSS:

View File

@@ -70,6 +70,16 @@ struct ath12k_pdev_mon_stats {
u32 dest_mpdu_drop;
u32 dup_mon_linkdesc_cnt;
u32 dup_mon_buf_cnt;
u32 dest_mon_stuck;
u32 dest_mon_not_reaped;
};
enum dp_mon_status_buf_state {
DP_MON_STATUS_MATCH,
DP_MON_STATUS_NO_DMA,
DP_MON_STATUS_LAG,
DP_MON_STATUS_LEAD,
DP_MON_STATUS_REPLINISH,
};
struct dp_link_desc_bank {
@@ -121,8 +131,11 @@ struct ath12k_mon_data {
u32 mon_last_buf_cookie;
u64 mon_last_linkdesc_paddr;
u16 chan_noise_floor;
u32 err_bitmap;
u8 decap_format;
struct ath12k_pdev_mon_stats rx_mon_stats;
enum dp_mon_status_buf_state buf_state;
/* lock for monitor data */
spinlock_t mon_lock;
struct sk_buff_head rx_status_q;
@@ -191,6 +204,14 @@ struct ath12k_pdev_dp {
#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
#define RX_MON_STATUS_BASE_BUF_SIZE 2048
#define RX_MON_STATUS_BUF_ALIGN 128
#define RX_MON_STATUS_BUF_RESERVATION 128
#define RX_MON_STATUS_BUF_SIZE (RX_MON_STATUS_BASE_BUF_SIZE - \
(RX_MON_STATUS_BUF_RESERVATION + \
RX_MON_STATUS_BUF_ALIGN + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(19, 18)
@@ -266,6 +287,9 @@ struct ath12k_pdev_dp {
/* Invalid TX Bank ID value */
#define DP_INVALID_BANK_ID -1
#define MAX_TQM_RELEASE_REASON 15
#define MAX_FW_TX_STATUS 7
struct ath12k_dp_tx_bank_profile {
u8 is_configured;
u32 num_users;
@@ -338,6 +362,7 @@ struct ath12k_link_stats {
struct ath12k_dp {
struct ath12k_base *ab;
u32 mon_dest_ring_stuck_cnt;
u8 num_bank_profiles;
/* protects the access and update of bank_profiles */
spinlock_t tx_bank_lock;
@@ -390,6 +415,7 @@ struct ath12k_dp {
struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
struct dp_rxdma_mon_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
struct ath12k_reo_q_addr_lut reoq_lut;
struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
@@ -1330,6 +1356,8 @@ struct htt_t2h_version_conf_msg {
#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
#define HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID GENMASK(15, 0)
#define HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL GENMASK(31, 16)
#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16

View File

@@ -1416,6 +1416,40 @@ ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_
}
}
static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
{
if (info & RX_MSDU_END_INFO13_FCS_ERR)
*errmap |= HAL_RX_MPDU_ERR_FCS;
if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
*errmap |= HAL_RX_MPDU_ERR_DECRYPT;
if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
*errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
*errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
*errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
*errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
*errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
}
static void
ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
const struct hal_rx_msdu_end *msdu_end)
{
ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2),
&pmon->err_bitmap);
pmon->decap_format = le32_get_bits(msdu_end->info1,
RX_MSDU_END_INFO11_DECAP_FORMAT);
}
static enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
struct ath12k_mon_data *pmon,
@@ -1655,6 +1689,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
case HAL_MON_BUF_ADDR:
return HAL_RX_MON_STATUS_BUF_ADDR;
case HAL_RX_MSDU_END:
ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data);
return HAL_RX_MON_STATUS_MSDU_END;
case HAL_RX_MPDU_END:
return HAL_RX_MON_STATUS_MPDU_END;
@@ -1716,6 +1751,124 @@ ath12k_dp_mon_fill_rx_stats_info(struct ath12k *ar,
}
}
static struct sk_buff
*ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *rx_ring,
int *buf_id)
{
struct sk_buff *skb;
dma_addr_t paddr;
skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
if (!skb)
goto fail_alloc_skb;
if (!IS_ALIGNED((unsigned long)skb->data,
RX_MON_STATUS_BUF_ALIGN)) {
skb_pull(skb, PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
skb->data);
}
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ab->dev, paddr)))
goto fail_free_skb;
spin_lock_bh(&rx_ring->idr_lock);
*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
rx_ring->bufs_max, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (*buf_id < 0)
goto fail_dma_unmap;
ATH12K_SKB_RXCB(skb)->paddr = paddr;
return skb;
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
fail_alloc_skb:
return NULL;
}
static enum dp_mon_status_buf_state
ath12k_dp_rx_mon_buf_done(struct ath12k_base *ab, struct hal_srng *srng,
struct dp_rxdma_mon_ring *rx_ring)
{
struct ath12k_skb_rxcb *rxcb;
struct hal_tlv_64_hdr *tlv;
struct sk_buff *skb;
void *status_desc;
dma_addr_t paddr;
u32 cookie;
int buf_id;
u8 rbm;
status_desc = ath12k_hal_srng_src_next_peek(ab, srng);
if (!status_desc)
return DP_MON_STATUS_NO_DMA;
ath12k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
spin_lock_bh(&rx_ring->idr_lock);
skb = idr_find(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
if (!skb)
return DP_MON_STATUS_NO_DMA;
rxcb = ATH12K_SKB_RXCB(skb);
dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
tlv = (struct hal_tlv_64_hdr *)skb->data;
if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != HAL_RX_STATUS_BUFFER_DONE)
return DP_MON_STATUS_NO_DMA;
return DP_MON_STATUS_REPLINISH;
}
static u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id)
{
u32 ret = 0;
if ((*ppdu_id < msdu_ppdu_id) &&
((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
/* Hold on mon dest ring, and reap mon status ring. */
*ppdu_id = msdu_ppdu_id;
ret = msdu_ppdu_id;
} else if ((*ppdu_id > msdu_ppdu_id) &&
((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
/* PPDU ID has exceeded the maximum value and will
* restart from 0.
*/
*ppdu_id = msdu_ppdu_id;
ret = msdu_ppdu_id;
}
return ret;
}
static
void ath12k_dp_mon_next_link_desc_get(struct hal_rx_msdu_link *msdu_link,
dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm,
struct ath12k_buffer_addr **pp_buf_addr_info)
{
struct ath12k_buffer_addr *buf_addr_info;
buf_addr_info = &msdu_link->buf_addr_info;
ath12k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
*pp_buf_addr_info = buf_addr_info;
}
static void
ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppdu_info,
@@ -1795,6 +1948,24 @@ ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
}
}
static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
struct sk_buff *head_msdu,
struct sk_buff *tail_msdu)
{
u32 rx_pkt_offset, l2_hdr_offset, total_offset;
rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset =
ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data);
if (ar->ab->hw_params->rxdma1_enable)
total_offset = ATH12K_MON_RX_PKT_OFFSET;
else
total_offset = rx_pkt_offset + l2_hdr_offset;
skb_pull(head_msdu, total_offset);
}
static struct sk_buff *
ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
struct dp_mon_mpdu *mon_mpdu,
@@ -1803,7 +1974,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
{
struct ath12k_base *ab = ar->ab;
struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list;
struct sk_buff *head_msdu;
struct sk_buff *head_msdu, *tail_msdu;
struct hal_rx_desc *rx_desc;
u8 *hdr_desc, *dest, decap_format = mon_mpdu->decap_format;
struct ieee80211_hdr_3addr *wh;
@@ -1813,8 +1984,9 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
mpdu_buf = NULL;
head_msdu = mon_mpdu->head;
tail_msdu = mon_mpdu->tail;
if (!head_msdu)
if (!head_msdu || !tail_msdu)
goto err_merge_fail;
ath12k_dp_mon_fill_rx_stats_info(ar, ppdu_info, rxs);
@@ -1842,14 +2014,14 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
ath12k_dp_mon_fill_rx_rate(ar, ppdu_info, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
skb_pull(head_msdu, ATH12K_MON_RX_PKT_OFFSET);
ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
prev_buf = head_msdu;
msdu = head_msdu->next;
head_frag_list = NULL;
while (msdu) {
skb_pull(msdu, ATH12K_MON_RX_PKT_OFFSET);
ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
if (!head_frag_list)
head_frag_list = msdu;
@@ -1884,7 +2056,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
msdu = head_msdu;
while (msdu) {
skb_pull(msdu, ATH12K_MON_RX_PKT_OFFSET);
ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
@@ -2223,43 +2395,30 @@ static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
return 0;
}
static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
/* Hardware fill buffer with 128 bytes aligned. So need to reap it
* with 128 bytes aligned.
*/
#define RXDMA_DATA_DMA_BLOCK_SIZE 128
static void
ath12k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
bool *is_frag, u32 *total_len,
u32 *frag_len, u32 *msdu_cnt)
{
if (info & RX_MSDU_END_INFO13_FCS_ERR)
*errmap |= HAL_RX_MPDU_ERR_FCS;
if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
*is_frag = true;
*frag_len = (RX_MON_STATUS_BASE_BUF_SIZE -
sizeof(struct hal_rx_desc)) &
~(RXDMA_DATA_DMA_BLOCK_SIZE - 1);
*total_len += *frag_len;
} else {
if (*is_frag)
*frag_len = info->msdu_len - *total_len;
else
*frag_len = info->msdu_len;
if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
*errmap |= HAL_RX_MPDU_ERR_DECRYPT;
if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
*errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
*errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
*errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
*errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
*errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
}
static int
ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
const struct hal_rx_msdu_end *msdu_end)
{
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2),
&mon_mpdu->err_bitmap);
mon_mpdu->decap_format = le32_get_bits(msdu_end->info1,
RX_MSDU_END_INFO11_DECAP_FORMAT);
return 0;
*msdu_cnt -= 1;
}
}
static int
@@ -2335,7 +2494,9 @@ ath12k_dp_mon_parse_rx_dest_tlv(struct ath12k *ar,
pmon->mon_mpdu = NULL;
break;
case HAL_RX_MON_STATUS_MSDU_END:
return ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data);
pmon->mon_mpdu->decap_format = pmon->decap_format;
pmon->mon_mpdu->err_bitmap = pmon->err_bitmap;
break;
default:
break;
}
@@ -2370,7 +2531,7 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv);
if (ar->monitor_started &&
if (ar->monitor_started && ar->ab->hw_params->rxdma1_enable &&
ath12k_dp_mon_parse_rx_dest_tlv(ar, pmon, hal_status, tlv->value))
return HAL_RX_MON_STATUS_PPDU_DONE;
@@ -2495,6 +2656,94 @@ int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
return -ENOMEM;
}
int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *rx_ring,
int req_entries)
{
enum hal_rx_buf_return_buf_manager mgr =
ab->hw_params->hal_params->rx_buf_rbm;
int num_free, num_remain, buf_id;
struct ath12k_buffer_addr *desc;
struct hal_srng *srng;
struct sk_buff *skb;
dma_addr_t paddr;
u32 cookie;
req_entries = min(req_entries, rx_ring->bufs_max);
srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
req_entries = num_free;
req_entries = min(num_free, req_entries);
num_remain = req_entries;
while (num_remain > 0) {
skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
if (!skb)
break;
if (!IS_ALIGNED((unsigned long)skb->data,
RX_MON_STATUS_BUF_ALIGN)) {
skb_pull(skb,
PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
skb->data);
}
paddr = dma_map_single(ab->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (dma_mapping_error(ab->dev, paddr))
goto fail_free_skb;
spin_lock_bh(&rx_ring->idr_lock);
buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
rx_ring->bufs_max * 3, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (buf_id < 0)
goto fail_dma_unmap;
cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
goto fail_buf_unassign;
ATH12K_SKB_RXCB(skb)->paddr = paddr;
num_remain--;
ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
fail_buf_unassign:
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
}
static struct dp_mon_tx_ppdu_info *
ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
unsigned int ppdu_id,
@@ -3641,6 +3890,487 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget,
return num_buffs_reaped;
}
static int ath12k_dp_rx_reap_mon_status_ring(struct ath12k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
const struct ath12k_hw_hal_params *hal_params;
int buf_id, srng_id, num_buffs_reaped = 0;
enum dp_mon_status_buf_state reap_status;
struct dp_rxdma_mon_ring *rx_ring;
struct ath12k_mon_data *pmon;
struct ath12k_skb_rxcb *rxcb;
struct hal_tlv_64_hdr *tlv;
void *rx_mon_status_desc;
struct hal_srng *srng;
struct ath12k_dp *dp;
struct sk_buff *skb;
struct ath12k *ar;
dma_addr_t paddr;
u32 cookie;
u8 rbm;
ar = ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar;
dp = &ab->dp;
pmon = &ar->dp.mon_data;
srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
while (*budget) {
*budget -= 1;
rx_mon_status_desc = ath12k_hal_srng_src_peek(ab, srng);
if (!rx_mon_status_desc) {
pmon->buf_state = DP_MON_STATUS_REPLINISH;
break;
}
ath12k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
&cookie, &rbm);
if (paddr) {
buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
spin_lock_bh(&rx_ring->idr_lock);
skb = idr_find(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
if (!skb) {
ath12k_warn(ab, "rx monitor status with invalid buf_id %d\n",
buf_id);
pmon->buf_state = DP_MON_STATUS_REPLINISH;
goto move_next;
}
rxcb = ATH12K_SKB_RXCB(skb);
dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
tlv = (struct hal_tlv_64_hdr *)skb->data;
if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) !=
HAL_RX_STATUS_BUFFER_DONE) {
pmon->buf_state = DP_MON_STATUS_NO_DMA;
ath12k_warn(ab,
"mon status DONE not set %llx, buf_id %d\n",
le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG),
buf_id);
/* RxDMA status done bit might not be set even
* though tp is moved by HW.
*/
/* If done status is missing:
* 1. As per MAC team's suggestion,
* when HP + 1 entry is peeked and if DMA
* is not done and if HP + 2 entry's DMA done
* is set. skip HP + 1 entry and
* start processing in next interrupt.
* 2. If HP + 2 entry's DMA done is not set,
* poll onto HP + 1 entry DMA done to be set.
* Check status for same buffer for next time
* dp_rx_mon_status_srng_process
*/
reap_status = ath12k_dp_rx_mon_buf_done(ab, srng,
rx_ring);
if (reap_status == DP_MON_STATUS_NO_DMA)
continue;
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
pmon->buf_state = DP_MON_STATUS_REPLINISH;
goto move_next;
}
spin_lock_bh(&rx_ring->idr_lock);
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (ath12k_dp_pkt_set_pktlen(skb, RX_MON_STATUS_BUF_SIZE)) {
dev_kfree_skb_any(skb);
goto move_next;
}
__skb_queue_tail(skb_list, skb);
} else {
pmon->buf_state = DP_MON_STATUS_REPLINISH;
}
move_next:
skb = ath12k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
&buf_id);
if (!skb) {
ath12k_warn(ab, "failed to alloc buffer for status ring\n");
hal_params = ab->hw_params->hal_params;
ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
hal_params->rx_buf_rbm);
num_buffs_reaped++;
break;
}
rxcb = ATH12K_SKB_RXCB(skb);
cookie = u32_encode_bits(mac_id, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
cookie,
ab->hw_params->hal_params->rx_buf_rbm);
ath12k_hal_srng_src_get_next_entry(ab, srng);
num_buffs_reaped++;
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
return num_buffs_reaped;
}
static u32
ath12k_dp_rx_mon_mpdu_pop(struct ath12k *ar, int mac_id,
void *ring_entry, struct sk_buff **head_msdu,
struct sk_buff **tail_msdu,
struct list_head *used_list,
u32 *npackets, u32 *ppdu_id)
{
struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
struct ath12k_buffer_addr *p_buf_addr_info, *p_last_buf_addr_info;
u32 msdu_ppdu_id = 0, msdu_cnt = 0, total_len = 0, frag_len = 0;
u32 rx_buf_size, rx_pkt_offset, sw_cookie;
bool is_frag, is_first_msdu, drop_mpdu = false;
struct hal_reo_entrance_ring *ent_desc =
(struct hal_reo_entrance_ring *)ring_entry;
u32 rx_bufs_used = 0, i = 0, desc_bank = 0;
struct hal_rx_desc *rx_desc, *tail_rx_desc;
struct hal_rx_msdu_link *msdu_link_desc;
struct sk_buff *msdu = NULL, *last = NULL;
struct ath12k_rx_desc_info *desc_info;
struct ath12k_buffer_addr buf_info;
struct hal_rx_msdu_list msdu_list;
struct ath12k_skb_rxcb *rxcb;
u16 num_msdus = 0;
dma_addr_t paddr;
u8 rbm;
ath12k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
&sw_cookie,
&p_last_buf_addr_info, &rbm,
&msdu_cnt);
spin_lock_bh(&pmon->mon_lock);
if (le32_get_bits(ent_desc->info1,
HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON) ==
HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
u8 rxdma_err = le32_get_bits(ent_desc->info1,
HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE);
if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
drop_mpdu = true;
pmon->rx_mon_stats.dest_mpdu_drop++;
}
}
is_frag = false;
is_first_msdu = true;
rx_pkt_offset = sizeof(struct hal_rx_desc);
do {
if (pmon->mon_last_linkdesc_paddr == paddr) {
pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
spin_unlock_bh(&pmon->mon_lock);
return rx_bufs_used;
}
desc_bank = u32_get_bits(sw_cookie, DP_LINK_DESC_BANK_MASK);
msdu_link_desc =
ar->ab->dp.link_desc_banks[desc_bank].vaddr +
(paddr - ar->ab->dp.link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_list_get(ar, msdu_link_desc, &msdu_list,
&num_msdus);
desc_info = ath12k_dp_get_rx_desc(ar->ab,
msdu_list.sw_cookie[num_msdus - 1]);
tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data;
for (i = 0; i < num_msdus; i++) {
u32 l2_hdr_offset;
if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
"i %d last_cookie %d is same\n",
i, pmon->mon_last_buf_cookie);
drop_mpdu = true;
pmon->rx_mon_stats.dup_mon_buf_cnt++;
continue;
}
desc_info =
ath12k_dp_get_rx_desc(ar->ab, msdu_list.sw_cookie[i]);
msdu = desc_info->skb;
if (!msdu) {
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
"msdu_pop: invalid msdu (%d/%d)\n",
i + 1, num_msdus);
goto next_msdu;
}
rxcb = ATH12K_SKB_RXCB(msdu);
if (rxcb->paddr != msdu_list.paddr[i]) {
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
"i %d paddr %lx != %lx\n",
i, (unsigned long)rxcb->paddr,
(unsigned long)msdu_list.paddr[i]);
drop_mpdu = true;
continue;
}
if (!rxcb->unmapped) {
dma_unmap_single(ar->ab->dev, rxcb->paddr,
msdu->len +
skb_tailroom(msdu),
DMA_FROM_DEVICE);
rxcb->unmapped = 1;
}
if (drop_mpdu) {
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
"i %d drop msdu %p *ppdu_id %x\n",
i, msdu, *ppdu_id);
dev_kfree_skb_any(msdu);
msdu = NULL;
goto next_msdu;
}
rx_desc = (struct hal_rx_desc *)msdu->data;
l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, tail_rx_desc);
if (is_first_msdu) {
if (!ath12k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
drop_mpdu = true;
dev_kfree_skb_any(msdu);
msdu = NULL;
pmon->mon_last_linkdesc_paddr = paddr;
goto next_msdu;
}
msdu_ppdu_id =
ath12k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
if (ath12k_dp_mon_comp_ppduid(msdu_ppdu_id,
ppdu_id)) {
spin_unlock_bh(&pmon->mon_lock);
return rx_bufs_used;
}
pmon->mon_last_linkdesc_paddr = paddr;
is_first_msdu = false;
}
ath12k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
&is_frag, &total_len,
&frag_len, &msdu_cnt);
rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
if (ath12k_dp_pkt_set_pktlen(msdu, rx_buf_size)) {
dev_kfree_skb_any(msdu);
goto next_msdu;
}
if (!(*head_msdu))
*head_msdu = msdu;
else if (last)
last->next = msdu;
last = msdu;
next_msdu:
pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
rx_bufs_used++;
desc_info->skb = NULL;
list_add_tail(&desc_info->list, used_list);
}
ath12k_hal_rx_buf_addr_info_set(&buf_info, paddr, sw_cookie, rbm);
ath12k_dp_mon_next_link_desc_get(msdu_link_desc, &paddr,
&sw_cookie, &rbm,
&p_buf_addr_info);
ath12k_dp_rx_link_desc_return(ar->ab, &buf_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
p_last_buf_addr_info = p_buf_addr_info;
} while (paddr && msdu_cnt);
spin_unlock_bh(&pmon->mon_lock);
if (last)
last->next = NULL;
*tail_msdu = msdu;
if (msdu_cnt == 0)
*npackets = 1;
return rx_bufs_used;
}
/* The destination ring processing is stuck if the destination is not
* moving while status ring moves 16 PPDU. The destination ring processing
* skips this destination ring PPDU as a workaround.
*/
#define MON_DEST_RING_STUCK_MAX_CNT 16
static void ath12k_dp_rx_mon_dest_process(struct ath12k *ar, int mac_id,
u32 quota, struct napi_struct *napi)
{
struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
struct ath12k_pdev_mon_stats *rx_mon_stats;
u32 ppdu_id, rx_bufs_used = 0, ring_id;
u32 mpdu_rx_bufs_used, npackets = 0;
struct ath12k_dp *dp = &ar->ab->dp;
struct ath12k_base *ab = ar->ab;
void *ring_entry, *mon_dst_srng;
struct dp_mon_mpdu *tmp_mpdu;
LIST_HEAD(rx_desc_used_list);
struct hal_srng *srng;
ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
srng = &ab->hal.srng_list[ring_id];
mon_dst_srng = &ab->hal.srng_list[ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, mon_dst_srng);
ppdu_id = pmon->mon_ppdu_info.ppdu_id;
rx_mon_stats = &pmon->rx_mon_stats;
while ((ring_entry = ath12k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
struct sk_buff *head_msdu, *tail_msdu;
head_msdu = NULL;
tail_msdu = NULL;
mpdu_rx_bufs_used = ath12k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
&head_msdu, &tail_msdu,
&rx_desc_used_list,
&npackets, &ppdu_id);
rx_bufs_used += mpdu_rx_bufs_used;
if (mpdu_rx_bufs_used) {
dp->mon_dest_ring_stuck_cnt = 0;
} else {
dp->mon_dest_ring_stuck_cnt++;
rx_mon_stats->dest_mon_not_reaped++;
}
if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
rx_mon_stats->dest_mon_stuck++;
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
"status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
pmon->mon_ppdu_info.ppdu_id, ppdu_id,
dp->mon_dest_ring_stuck_cnt,
rx_mon_stats->dest_mon_not_reaped,
rx_mon_stats->dest_mon_stuck);
spin_lock_bh(&pmon->mon_lock);
pmon->mon_ppdu_info.ppdu_id = ppdu_id;
spin_unlock_bh(&pmon->mon_lock);
continue;
}
if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
spin_lock_bh(&pmon->mon_lock);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
spin_unlock_bh(&pmon->mon_lock);
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
"dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
ppdu_id, pmon->mon_ppdu_info.ppdu_id,
rx_mon_stats->dest_mon_not_reaped,
rx_mon_stats->dest_mon_stuck);
break;
}
if (head_msdu && tail_msdu) {
tmp_mpdu = kzalloc(sizeof(*tmp_mpdu), GFP_ATOMIC);
if (!tmp_mpdu)
break;
tmp_mpdu->head = head_msdu;
tmp_mpdu->tail = tail_msdu;
tmp_mpdu->err_bitmap = pmon->err_bitmap;
tmp_mpdu->decap_format = pmon->decap_format;
ath12k_dp_mon_rx_deliver(ar, tmp_mpdu,
&pmon->mon_ppdu_info, napi);
rx_mon_stats->dest_mpdu_done++;
kfree(tmp_mpdu);
}
ring_entry = ath12k_hal_srng_dst_get_next_entry(ar->ab,
mon_dst_srng);
}
ath12k_hal_srng_access_end(ar->ab, mon_dst_srng);
spin_unlock_bh(&srng->lock);
if (rx_bufs_used) {
rx_mon_stats->dest_ppdu_done++;
ath12k_dp_rx_bufs_replenish(ar->ab,
&dp->rx_refill_buf_ring,
&rx_desc_used_list,
rx_bufs_used);
}
}
static int
__ath12k_dp_mon_process_ring(struct ath12k *ar, int mac_id,
struct napi_struct *napi, int *budget)
{
struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
struct ath12k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
enum hal_rx_mon_status hal_status;
struct sk_buff_head skb_list;
int num_buffs_reaped;
struct sk_buff *skb;
__skb_queue_head_init(&skb_list);
num_buffs_reaped = ath12k_dp_rx_reap_mon_status_ring(ar->ab, mac_id,
budget, &skb_list);
if (!num_buffs_reaped)
goto exit;
while ((skb = __skb_dequeue(&skb_list))) {
memset(ppdu_info, 0, sizeof(*ppdu_info));
ppdu_info->peer_id = HAL_INVALID_PEERID;
hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
if (ar->monitor_started &&
pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
hal_status == HAL_TLV_STATUS_PPDU_DONE) {
rx_mon_stats->status_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
ath12k_dp_rx_mon_dest_process(ar, mac_id, *budget, napi);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
}
dev_kfree_skb_any(skb);
}
exit:
return num_buffs_reaped;
}
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode)
@@ -3651,6 +4381,10 @@ int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
if (ab->hw_params->rxdma1_enable) {
if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi);
} else {
if (ar->monitor_started)
num_buffs_reaped =
__ath12k_dp_mon_process_ring(ar, mac_id, napi, &budget);
}
return num_buffs_reaped;

View File

@@ -85,6 +85,9 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *buf_ring,
int req_entries);
int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *rx_ring,
int req_entries);
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode);

View File

@@ -194,6 +194,22 @@ static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
}
u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
struct hal_rx_desc *rx_desc)
{
return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
}
bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
struct hal_rx_desc *rx_desc)
{
u32 tlv_tag;
tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc);
return tlv_tag == HAL_RX_MPDU_START;
}
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -414,9 +430,17 @@ static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i;
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
if (ab->hw_params->rxdma1_enable)
return 0;
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_rxdma_mon_buf_ring_free(ab,
&dp->rx_mon_status_refill_ring[i]);
return 0;
}
@@ -430,7 +454,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
ath12k_hal_srng_get_entrysize(ab, ringtype);
rx_ring->bufs_max = num_entries;
ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
if (ringtype == HAL_RXDMA_MONITOR_STATUS)
ath12k_dp_mon_status_bufs_replenish(ab, rx_ring,
num_entries);
else
ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
return 0;
}
@@ -451,7 +480,8 @@ static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int ret;
struct dp_rxdma_mon_ring *mon_ring;
int ret, i;
ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
if (ret) {
@@ -464,9 +494,19 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
&dp->rxdma_mon_buf_ring,
HAL_RXDMA_MONITOR_BUF);
if (ret) {
if (ret)
ath12k_warn(ab,
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
mon_ring = &dp->rx_mon_status_refill_ring[i];
ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring,
HAL_RXDMA_MONITOR_STATUS);
if (ret) {
ath12k_warn(ab,
"failed to setup HAL_RXDMA_MONITOR_STATUS\n");
return ret;
}
}
@@ -819,15 +859,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
rx_tid->active = false;
}
/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
* to struct hal_wbm_release_ring, I couldn't figure out the logic behind
* that.
*/
static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
struct hal_reo_dest_ring *ring,
enum hal_wbm_rel_bm_act action)
int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action)
{
struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
struct hal_wbm_release_ring *desc;
struct ath12k_dp *dp = &ab->dp;
struct hal_srng *srng;
@@ -845,7 +880,7 @@ static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
goto exit;
}
ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action);
exit:
ath12k_hal_srng_access_end(ab, srng);
@@ -858,14 +893,17 @@ static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
bool rel_link_desc)
{
struct ath12k_buffer_addr *buf_addr_info;
struct ath12k_base *ab = rx_tid->ab;
lockdep_assert_held(&ab->base_lock);
if (rx_tid->dst_ring_desc) {
if (rel_link_desc)
ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
if (rel_link_desc) {
buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
ath12k_dp_rx_link_desc_return(ab, buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
kfree(rx_tid->dst_ring_desc);
rx_tid->dst_ring_desc = NULL;
}
@@ -1802,8 +1840,12 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
peer_mac_h16, mac_addr);
ast_hash = le32_get_bits(resp->peer_map_ev.info2,
HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
peer_id);
hw_peer_id);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
@@ -2592,7 +2634,7 @@ static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
return true;
ab->soc_stats.invalid_rbm++;
ab->device_stats.invalid_rbm++;
WARN_ON_ONCE(1);
return false;
}
@@ -2755,9 +2797,9 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
struct ath12k_hw_group *ag = ab->ag;
struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
struct ath12k_hw_link *hw_links = ag->hw_links;
int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
@@ -2774,7 +2816,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
__skb_queue_head_init(&msdu_list);
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
@@ -2835,13 +2877,14 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
DMA_FROM_DEVICE);
num_buffs_reaped[device_id]++;
ab->device_stats.reo_rx[ring_id][ab->device_id]++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
ab->soc_stats.hal_reo_error[ring_id]++;
ab->device_stats.hal_reo_error[ring_id]++;
continue;
}
@@ -2891,7 +2934,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
if (!total_msdu_reaped)
goto exit;
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
@@ -3483,7 +3526,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
goto out_unlock;
}
} else {
ath12k_dp_rx_link_desc_return(ab, ring_desc,
ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
@@ -3596,7 +3639,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
dev_kfree_skb_any(msdu);
ath12k_dp_rx_link_desc_return(ar->ab, desc,
ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
exit:
@@ -3608,9 +3651,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int budget)
{
struct ath12k_hw_group *ag = ab->ag;
struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
@@ -3632,7 +3675,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
tot_n_bufs_reaped = 0;
quota = budget;
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
reo_except = &ab->dp.reo_except_ring;
@@ -3646,7 +3689,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
drop = false;
ab->soc_stats.err_ring_pkts++;
ab->device_stats.err_ring_pkts++;
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
@@ -3673,9 +3716,10 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
if (rbm != partner_ab->dp.idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
ab->device_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
ath12k_dp_rx_link_desc_return(partner_ab,
&reo_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
@@ -3693,7 +3737,8 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
drop = true;
/* Return the link desc back to wbm idle list */
ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
ath12k_dp_rx_link_desc_return(partner_ab,
&reo_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
@@ -3720,7 +3765,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
spin_unlock_bh(&srng->lock);
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
@@ -3836,7 +3881,7 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
bool drop = false;
ar->ab->soc_stats.reo_error[rxcb->err_code]++;
ar->ab->device_stats.reo_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
@@ -3909,7 +3954,7 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
bool drop = false;
u32 err_bitmap;
ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
ar->ab->device_stats.rxdma_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
@@ -3968,7 +4013,7 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
@@ -3979,9 +4024,10 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
int total_num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
struct ath12k_hw_link *hw_links = ag->hw_links;
struct ath12k_base *partner_ab;
u8 hw_link_id, device_id;
@@ -3991,7 +4037,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
__skb_queue_head_init(&msdu_list);
__skb_queue_head_init(&scatter_msdu_list);
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
@@ -4115,7 +4161,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!total_num_buffs_reaped)
goto done;
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
@@ -4155,6 +4201,12 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
dev_kfree_skb_any(msdu);
continue;
}
if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
device_id = ar->ab->device_id;
device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
}
ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
}
rcu_read_unlock();
@@ -4244,6 +4296,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
void ath12k_dp_rx_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct dp_srng *srng;
int i;
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
@@ -4251,6 +4304,10 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
if (!ab->hw_params->rxdma1_enable) {
srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
ath12k_dp_srng_cleanup(ab, srng);
}
}
for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
@@ -4399,6 +4456,19 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
ret);
return ret;
}
} else {
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id =
dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i,
HAL_RXDMA_MONITOR_STATUS);
if (ret) {
ath12k_warn(ab,
"failed to configure mon_status_refill_ring%d %d\n",
i, ret);
return ret;
}
}
}
ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
@@ -4413,6 +4483,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
int ath12k_dp_rx_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct dp_srng *srng;
int i, ret;
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
@@ -4460,6 +4531,23 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
} else {
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr);
spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock);
}
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
ret = ath12k_dp_srng_setup(ab, srng,
HAL_RXDMA_MONITOR_STATUS, 0, i,
DP_RXDMA_MON_STATUS_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup mon status ring %d\n",
i);
return ret;
}
}
}
ret = ath12k_dp_rxdma_buf_setup(ab);
@@ -4530,17 +4618,15 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
return ret;
}
/* if rxdma1_enable is false, no need to setup
* rxdma_mon_desc_ring.
*/
pmon->mon_last_linkdesc_paddr = 0;
pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
spin_lock_init(&pmon->mon_lock);
if (!ar->ab->hw_params->rxdma1_enable)
return 0;
pmon->mon_last_linkdesc_paddr = 0;
pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list);
pmon->mon_mpdu = NULL;
spin_lock_init(&pmon->mon_lock);
return 0;
}

View File

@@ -165,5 +165,13 @@ void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_d
struct ath12k_dp_rx_info *rx_info);
int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype);
u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
struct hal_rx_desc *rx_desc);
bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
struct hal_rx_desc *rx_desc);
int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action);
bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
struct hal_rx_desc *rx_desc);
#endif /* ATH12K_DP_RX_H */

View File

@@ -350,7 +350,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
atomic_inc(&ab->device_stats.tx_err.misc_fail);
goto fail_remove_tx_buf;
}
@@ -373,7 +373,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
atomic_inc(&ab->device_stats.tx_err.misc_fail);
ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
goto fail_remove_tx_buf;
@@ -448,7 +448,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
* desc because the desc is directly enqueued onto hw queue.
*/
ath12k_hal_srng_access_end(ab, tcl_ring);
ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
ab->device_stats.tx_err.desc_na[ti.ring_id]++;
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
@@ -477,6 +477,8 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
arvif->link_stats.tx_enqueued++;
spin_unlock_bh(&arvif->link_stats_lock);
ab->device_stats.tx_enqueued[ti.ring_id]++;
ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
ath12k_hal_srng_access_end(ab, tcl_ring);
@@ -557,6 +559,7 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
info = IEEE80211_SKB_CB(msdu);
ar = skb_cb->ar;
ab->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
@@ -614,6 +617,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
wbm_status = le32_get_bits(status_desc->info0,
HTT_TX_WBM_COMP_INFO0_STATUS);
ab->device_stats.fw_tx_status[wbm_status]++;
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
@@ -760,7 +764,8 @@ static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status
static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct ath12k_tx_desc_params *desc_params,
struct hal_tx_status *ts)
struct hal_tx_status *ts,
int ring)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_hw *ah = ar->ah;
@@ -777,6 +782,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
}
skb_cb = ATH12K_SKB_CB(msdu);
ab->device_stats.tx_completed[ring]++;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc) {
@@ -907,6 +913,8 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
struct hal_wbm_release_ring *desc;
u8 pdev_id;
u64 desc_va;
enum hal_wbm_rel_src_module buf_rel_source;
enum hal_wbm_tqm_rel_reason rel_status;
spin_lock_bh(&status_ring->lock);
@@ -963,6 +971,15 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
desc_params.skb = tx_desc->skb;
desc_params.skb_ext_desc = tx_desc->skb_ext_desc;
/* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */
buf_rel_source = le32_get_bits(tx_status->info0,
HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
ab->device_stats.tx_wbm_rel_source[buf_rel_source]++;
rel_status = le32_get_bits(tx_status->info0,
HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
ab->device_stats.tqm_rel_reason[rel_status]++;
/* Release descriptor as soon as extracting necessary info
* to reduce contention
*/
@@ -979,7 +996,8 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
ath12k_dp_tx_complete_msdu(ar, &desc_params, &ts);
ath12k_dp_tx_complete_msdu(ar, &desc_params, &ts,
tx_ring->tcl_data_ring_id);
}
}
@@ -1511,6 +1529,44 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
return ret;
}
}
return 0;
}
if (!reset) {
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
i,
HAL_RXDMA_BUF,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
if (ret) {
ath12k_err(ab,
"failed to setup filter for mon rx buf %d\n",
ret);
return ret;
}
}
}
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
if (!reset) {
tlv_filter.rx_filter =
HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
}
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
i,
HAL_RXDMA_MONITOR_STATUS,
RX_MON_STATUS_BUF_SIZE,
&tlv_filter);
if (ret) {
ath12k_err(ab,
"failed to setup filter for mon status buf %d\n",
ret);
return ret;
}
}
return 0;

View File

@@ -154,7 +154,14 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
[HAL_RXDMA_MONITOR_STATUS] = { 0, },
[HAL_RXDMA_MONITOR_STATUS] = {
.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
.max_rings = 1,
.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
.mac_type = ATH12K_HAL_SRNG_PMAC,
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
[HAL_RXDMA_MONITOR_DESC] = { 0, },
[HAL_RXDMA_DIR_BUF] = {
.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
@@ -1943,7 +1950,7 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc
{
u32 len;
len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
len = le32_get_bits(READ_ONCE(desc->flags), HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
return len;
@@ -2035,6 +2042,24 @@ int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
}
void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab,
struct hal_srng *srng)
{
void *desc;
u32 next_hp;
lockdep_assert_held(&srng->lock);
next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
if (next_hp == srng->u.src_ring.cached_tp)
return NULL;
desc = srng->ring_base_vaddr + next_hp;
return desc;
}
void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng)
{
@@ -2068,6 +2093,17 @@ void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
return desc;
}
void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
srng->u.src_ring.cached_tp)
return NULL;
return srng->ring_base_vaddr + srng->u.src_ring.hp;
}
void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
struct hal_srng *srng)
{
@@ -2113,7 +2149,7 @@ void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
else
srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
}
/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()

View File

@@ -498,6 +498,7 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0 = HAL_SRNG_RING_ID_PMAC1_ID_START,
HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
@@ -1143,6 +1144,7 @@ void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
struct hal_srng_params *params);
void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng);
void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng);
void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng);
int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr);
@@ -1150,6 +1152,8 @@ void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
struct hal_srng *srng);
void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
struct hal_srng *srng);
void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab,
struct hal_srng *srng);
void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng);
int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,

View File

@@ -707,7 +707,7 @@ enum hal_rx_msdu_desc_reo_dest_ind {
#define RX_MSDU_DESC_INFO0_DECAP_FORMAT GENMASK(30, 29)
#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
(u32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
(le32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
struct rx_msdu_desc {
__le32 info0;
@@ -1008,6 +1008,10 @@ enum hal_reo_entr_rxdma_ecode {
HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
};
@@ -1809,6 +1813,7 @@ enum hal_wbm_rel_src_module {
HAL_WBM_REL_SRC_MODULE_REO,
HAL_WBM_REL_SRC_MODULE_FW,
HAL_WBM_REL_SRC_MODULE_SW,
HAL_WBM_REL_SRC_MODULE_MAX,
};
enum hal_wbm_rel_desc_type {

View File

@@ -326,7 +326,7 @@ int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
err_code = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_ERROR_CODE);
ab->soc_stats.reo_error[err_code]++;
ab->device_stats.reo_error[err_code]++;
if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
@@ -381,7 +381,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
val = le32_get_bits(wbm_desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_RET_BUF_MGR);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ab->device_stats.invalid_rbm++;
return -EINVAL;
}
@@ -393,7 +393,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
val = le32_get_bits(wbm_cc_desc->info0,
HAL_WBM_RELEASE_RX_CC_INFO0_RBM);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ab->device_stats.invalid_rbm++;
return -EINVAL;
}
@@ -446,17 +446,97 @@ void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
*cookie = le32_get_bits(buff_addr->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
}
void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
u32 *sw_cookie,
struct ath12k_buffer_addr **pp_buf_addr,
u8 *rbm, u32 *msdu_cnt)
{
struct hal_reo_entrance_ring *reo_ent_ring =
(struct hal_reo_entrance_ring *)rx_desc;
struct ath12k_buffer_addr *buf_addr_info;
struct rx_mpdu_desc *rx_mpdu_desc_info_details;
rx_mpdu_desc_info_details =
(struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
*msdu_cnt = le32_get_bits(rx_mpdu_desc_info_details->info0,
RX_MPDU_DESC_INFO0_MSDU_COUNT);
buf_addr_info = (struct ath12k_buffer_addr *)&reo_ent_ring->buf_addr_info;
*paddr = (((u64)le32_get_bits(buf_addr_info->info1,
BUFFER_ADDR_INFO1_ADDR)) << 32) |
le32_get_bits(buf_addr_info->info0,
BUFFER_ADDR_INFO0_ADDR);
*sw_cookie = le32_get_bits(buf_addr_info->info1,
BUFFER_ADDR_INFO1_SW_COOKIE);
*rbm = le32_get_bits(buf_addr_info->info1,
BUFFER_ADDR_INFO1_RET_BUF_MGR);
*pp_buf_addr = (void *)buf_addr_info;
}
void ath12k_hal_rx_msdu_list_get(struct ath12k *ar,
struct hal_rx_msdu_link *link_desc,
struct hal_rx_msdu_list *msdu_list,
u16 *num_msdus)
{
struct hal_rx_msdu_details *msdu_details = NULL;
struct rx_msdu_desc *msdu_desc_info = NULL;
u32 last = 0, first = 0;
u8 tmp = 0;
int i;
last = u32_encode_bits(last, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
first = u32_encode_bits(first, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
msdu_details = &link_desc->msdu_link[0];
for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
if (!i && le32_get_bits(msdu_details[i].buf_addr_info.info0,
BUFFER_ADDR_INFO0_ADDR) == 0)
break;
if (le32_get_bits(msdu_details[i].buf_addr_info.info0,
BUFFER_ADDR_INFO0_ADDR) == 0) {
msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
msdu_desc_info->info0 |= cpu_to_le32(last);
break;
}
msdu_desc_info = &msdu_details[i].rx_msdu_info;
if (!i)
msdu_desc_info->info0 |= cpu_to_le32(first);
else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
msdu_desc_info->info0 |= cpu_to_le32(last);
msdu_list->msdu_info[i].msdu_flags = le32_to_cpu(msdu_desc_info->info0);
msdu_list->msdu_info[i].msdu_len =
HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
msdu_list->sw_cookie[i] =
le32_get_bits(msdu_details[i].buf_addr_info.info1,
BUFFER_ADDR_INFO1_SW_COOKIE);
tmp = le32_get_bits(msdu_details[i].buf_addr_info.info1,
BUFFER_ADDR_INFO1_RET_BUF_MGR);
msdu_list->paddr[i] =
((u64)(le32_get_bits(msdu_details[i].buf_addr_info.info1,
BUFFER_ADDR_INFO1_ADDR)) << 32) |
le32_get_bits(msdu_details[i].buf_addr_info.info0,
BUFFER_ADDR_INFO0_ADDR);
msdu_list->rbm[i] = tmp;
}
*num_msdus = i;
}
void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
struct hal_wbm_release_ring *dst_desc,
struct hal_wbm_release_ring *src_desc,
struct hal_wbm_release_ring *desc,
struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action)
{
dst_desc->buf_addr_info = src_desc->buf_addr_info;
dst_desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) |
le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) |
le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
HAL_WBM_RELEASE_INFO0_DESC_TYPE);
desc->buf_addr_info = *buf_addr_info;
desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) |
le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) |
le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
HAL_WBM_RELEASE_INFO0_DESC_TYPE);
}
void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,

View File

@@ -538,6 +538,7 @@ struct hal_rx_msdu_desc_info {
#define HAL_RX_NUM_MSDU_DESC 6
struct hal_rx_msdu_list {
struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
u64 paddr[HAL_RX_NUM_MSDU_DESC];
u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
u8 rbm[HAL_RX_NUM_MSDU_DESC];
};
@@ -1141,8 +1142,8 @@ void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_ms
u32 *msdu_cookies,
enum hal_rx_buf_return_buf_manager *rbm);
void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
struct hal_wbm_release_ring *dst_desc,
struct hal_wbm_release_ring *src_desc,
struct hal_wbm_release_ring *desc,
struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action);
void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
dma_addr_t paddr, u32 cookie, u8 manager);
@@ -1157,5 +1158,12 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
struct ath12k_buffer_addr *buff_addr,
dma_addr_t *paddr, u32 *cookie);
void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie,
struct ath12k_buffer_addr **pp_buf_addr,
u8 *rbm, u32 *msdu_cnt);
void ath12k_hal_rx_msdu_list_get(struct ath12k *ar,
struct hal_rx_msdu_link *link_desc,
struct hal_rx_msdu_list *msdu_list,
u16 *num_msdus);
#endif

View File

@@ -118,6 +118,10 @@ static const struct ath12k_hw_ops wcn7850_ops = {
#define ATH12K_TX_MON_RING_MASK_0 0x1
#define ATH12K_TX_MON_RING_MASK_1 0x2
#define ATH12K_RX_MON_STATUS_RING_MASK_0 0x1
#define ATH12K_RX_MON_STATUS_RING_MASK_1 0x2
#define ATH12K_RX_MON_STATUS_RING_MASK_2 0x4
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = {
/* CE0: host->target HTC control and raw streams */
@@ -836,6 +840,12 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
},
.rx_mon_dest = {
},
.rx_mon_status = {
0, 0, 0, 0,
ATH12K_RX_MON_STATUS_RING_MASK_0,
ATH12K_RX_MON_STATUS_RING_MASK_1,
ATH12K_RX_MON_STATUS_RING_MASK_2,
},
.rx = {
0, 0, 0,
ATH12K_RX_RING_MASK_0,
@@ -1370,7 +1380,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.supports_monitor = true,
.idle_ps = true,
.download_calib = false,

View File

@@ -135,6 +135,7 @@ enum hal_encrypt_type;
struct ath12k_hw_ring_mask {
u8 tx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_mon_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX];

View File

@@ -229,7 +229,8 @@ ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = {
const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = {
.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE |
HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO,
.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
@@ -580,22 +581,16 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id,
return 0;
}
static struct ath12k_link_vif *ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif)
static struct ath12k_link_vif *
ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_bss_conf *link_conf, *tx_bss_conf;
struct ieee80211_bss_conf *tx_bss_conf;
struct ath12k *ar = arvif->ar;
struct ath12k_vif *tx_ahvif;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
link_conf = ath12k_mac_get_link_bss_conf(arvif);
if (!link_conf) {
ath12k_warn(ar->ab,
"unable to access bss link conf for link %u required to retrieve transmitting link conf\n",
arvif->link_id);
return NULL;
}
tx_bss_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
link_conf->tx_bss_conf);
if (tx_bss_conf) {
@@ -1623,7 +1618,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
return -ENOLINK;
}
tx_arvif = ath12k_mac_get_tx_arvif(arvif);
tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (tx_arvif) {
if (tx_arvif != arvif && arvif->is_up)
return 0;
@@ -1693,6 +1688,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
{
struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_bss_conf *link_conf;
struct ath12k_link_vif *tx_arvif;
struct ath12k *ar = arvif->ar;
int ret;
@@ -1725,7 +1721,15 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
tx_arvif = ath12k_mac_get_tx_arvif(arvif);
link_conf = ath12k_mac_get_link_bss_conf(arvif);
if (!link_conf) {
ath12k_warn(ar->ab,
"unable to access bss link conf for link %u required to retrieve transmitting link conf\n",
arvif->link_id);
return;
}
tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = info->bssid_index;
@@ -2935,6 +2939,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
const struct ieee80211_sta_eht_cap *eht_cap;
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_link_sta *link_sta;
struct ieee80211_bss_conf *link_conf;
u32 *rx_mcs, *tx_mcs;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -2946,6 +2951,12 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
return;
}
link_conf = ath12k_mac_get_link_bss_conf(arvif);
if (!link_conf) {
ath12k_warn(ar->ab, "unable to access link_conf in peer assoc eht set\n");
return;
}
eht_cap = &link_sta->eht_cap;
he_cap = &link_sta->he_cap;
if (!he_cap->has_he || !eht_cap->has_eht)
@@ -3017,6 +3028,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
}
arg->punct_bitmap = ~arvif->punct_bitmap;
arg->eht_disable_mcs15 = link_conf->eht_disable_mcs15;
}
static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
@@ -3047,6 +3059,7 @@ static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
ml->ml_peer_id = ahsta->ml_peer_id;
ml->ieee_link_id = arsta->link_id;
ml->num_partner_links = 0;
ml->eml_cap = sta->eml_cap;
links = ahsta->links_map;
rcu_read_lock();
@@ -3480,25 +3493,18 @@ static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
if (arvif)
return arvif;
if (!vif->valid_links) {
/* Use deflink for Non-ML VIFs and mark the link id as 0
*/
link_id = 0;
/* If this is the first link arvif being created for an ML VIF
* use the preallocated deflink memory except for scan arvifs
*/
if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
arvif = &ahvif->deflink;
if (vif->type == NL80211_IFTYPE_STATION)
arvif->is_sta_assoc_link = true;
} else {
/* If this is the first link arvif being created for an ML VIF
* use the preallocated deflink memory except for scan arvifs
*/
if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
arvif = &ahvif->deflink;
if (vif->type == NL80211_IFTYPE_STATION)
arvif->is_sta_assoc_link = true;
} else {
arvif = (struct ath12k_link_vif *)
kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
if (!arvif)
return NULL;
}
arvif = kzalloc(sizeof(*arvif), GFP_KERNEL);
if (!arvif)
return NULL;
}
ath12k_mac_init_arvif(ahvif, arvif, link_id);
@@ -3746,6 +3752,18 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
psmode, arvif->vdev_id, ret);
}
static bool ath12k_mac_supports_station_tpc(struct ath12k *ar,
struct ath12k_vif *ahvif,
const struct cfg80211_chan_def *chandef)
{
return ath12k_wmi_supports_6ghz_cc_ext(ar) &&
test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) &&
ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
chandef->chan &&
chandef->chan->band == NL80211_BAND_6GHZ;
}
static void ath12k_mac_bss_info_changed(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *info,
@@ -4647,6 +4665,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH12K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
goto exit;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started");
@@ -5779,12 +5798,15 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state new_state)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_bss_conf *link_conf;
struct ath12k *ar = arvif->ar;
struct ath12k_reg_info *reg_info;
struct ath12k_base *ab = ar->ab;
int ret = 0;
lockdep_assert_wiphy(hw->wiphy);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
ath12k_dbg(ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
arsta->link_id, arsta->addr, old_state, new_state);
/* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station
@@ -5794,7 +5816,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST)) {
ret = ath12k_mac_station_remove(ar, arvif, arsta);
if (ret) {
ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
ath12k_warn(ab, "Failed to remove station: %pM for VDEV: %d\n",
arsta->addr, arvif->vdev_id);
goto exit;
}
@@ -5805,7 +5827,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NONE) {
ret = ath12k_mac_station_add(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
ath12k_warn(ab, "Failed to add station: %pM for VDEV: %d\n",
arsta->addr, arvif->vdev_id);
/* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for
@@ -5818,7 +5840,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath12k_mac_station_assoc(ar, arvif, arsta, false);
if (ret)
ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
ath12k_warn(ab, "Failed to associate station: %pM\n",
arsta->addr);
/* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
@@ -5827,9 +5849,21 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
ret = ath12k_mac_station_authorize(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to authorize station: %pM\n",
if (ret) {
ath12k_warn(ab, "Failed to authorize station: %pM\n",
arsta->addr);
goto exit;
}
if (ath12k_wmi_supports_6ghz_cc_ext(ar) &&
arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
link_conf = ath12k_mac_get_link_bss_conf(arvif);
reg_info = ab->reg_info[ar->pdev_idx];
ath12k_dbg(ab, ATH12K_DBG_MAC, "connection done, update reg rules\n");
ath12k_hw_to_ah(hw)->regd_updated = false;
ath12k_reg_handle_chan_list(ab, reg_info, arvif->ahvif->vdev_type,
link_conf->power_type);
}
/* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal,
* deauthorize it.
@@ -5848,7 +5882,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath12k_mac_station_disassoc(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
ath12k_warn(ab, "Failed to disassociate station: %pM\n",
arsta->addr);
}
@@ -7466,6 +7500,7 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ath12k_peer *peer;
unsigned long links_map;
bool is_mcast = false;
bool is_dvlan = false;
struct ethhdr *eth;
bool is_prb_rsp;
u16 mcbc_gsn;
@@ -7531,7 +7566,14 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
if (!vif->valid_links || !is_mcast ||
/* Checking if it is a DVLAN frame */
if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
!(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
!(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
ieee80211_has_protected(hdr->frame_control))
is_dvlan = true;
if (!vif->valid_links || !is_mcast || is_dvlan ||
test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) {
ret = ath12k_dp_tx(ar, arvif, skb, false, 0, is_mcast);
if (unlikely(ret)) {
@@ -7990,7 +8032,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
return -ENOLINK;
}
tx_arvif = ath12k_mac_get_tx_arvif(arvif);
tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (!tx_arvif)
return 0;
@@ -8319,50 +8361,9 @@ void ath12k_mac_11d_scan_stop_all(struct ath12k_base *ab)
}
}
int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
static void ath12k_mac_determine_vdev_type(struct ieee80211_vif *vif,
struct ath12k_vif *ahvif)
{
struct ath12k_hw *ah = ar->ah;
struct ath12k_base *ab = ar->ab;
struct ieee80211_hw *hw = ah->hw;
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
struct ath12k_wmi_peer_create_arg peer_param = {0};
struct ieee80211_bss_conf *link_conf;
u32 param_id, param_value;
u16 nss;
int i;
int ret, vdev_id;
u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
/* In NO_VIRTUAL_MONITOR, its necessary to restrict only one monitor
* interface in each radio
*/
if (vif->type == NL80211_IFTYPE_MONITOR && ar->monitor_vdev_created)
return -EINVAL;
/* If no link is active and scan vdev is requested
* use a default link conf for scan address purpose.
*/
if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links)
link_id = ffs(vif->valid_links) - 1;
else
link_id = arvif->link_id;
link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
if (!link_conf) {
ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
vif->addr, arvif->link_id);
return -ENOLINK;
}
memcpy(arvif->bssid, link_conf->addr, ETH_ALEN);
arvif->ar = ar;
vdev_id = __ffs64(ab->free_vdev_map);
arvif->vdev_id = vdev_id;
ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
switch (vif->type) {
@@ -8386,7 +8387,6 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
break;
case NL80211_IFTYPE_MONITOR:
ahvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = vdev_id;
break;
case NL80211_IFTYPE_P2P_DEVICE:
ahvif->vdev_type = WMI_VDEV_TYPE_STA;
@@ -8396,6 +8396,53 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
WARN_ON(1);
break;
}
}
int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
{
struct ath12k_hw *ah = ar->ah;
struct ath12k_base *ab = ar->ab;
struct ieee80211_hw *hw = ah->hw;
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
struct ath12k_wmi_peer_create_arg peer_param = {0};
struct ieee80211_bss_conf *link_conf = NULL;
u32 param_id, param_value;
u16 nss;
int i;
int ret, vdev_id;
u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
/* In NO_VIRTUAL_MONITOR, its necessary to restrict only one monitor
* interface in each radio
*/
if (vif->type == NL80211_IFTYPE_MONITOR && ar->monitor_vdev_created)
return -EINVAL;
link_id = arvif->link_id;
if (link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
if (!link_conf) {
ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
vif->addr, arvif->link_id);
return -ENOLINK;
}
}
if (link_conf)
memcpy(arvif->bssid, link_conf->addr, ETH_ALEN);
else
memcpy(arvif->bssid, vif->addr, ETH_ALEN);
arvif->ar = ar;
vdev_id = __ffs64(ab->free_vdev_map);
arvif->vdev_id = vdev_id;
if (vif->type == NL80211_IFTYPE_MONITOR)
ar->monitor_vdev_id = vdev_id;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n",
arvif->vdev_id, ahvif->vdev_type, ahvif->vdev_subtype,
@@ -8513,7 +8560,11 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
break;
}
arvif->txpower = link_conf->txpower;
if (link_conf)
arvif->txpower = link_conf->txpower;
else
arvif->txpower = NL80211_TX_POWER_AUTOMATIC;
ret = ath12k_mac_txpower_recalc(ar);
if (ret)
goto err_peer_del;
@@ -8751,7 +8802,10 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_reg_info *reg_info;
struct ath12k_link_vif *arvif;
struct ath12k_base *ab;
struct ath12k *ar;
int i;
lockdep_assert_wiphy(hw->wiphy);
@@ -8770,6 +8824,22 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
ath12k_mac_determine_vdev_type(vif, ahvif);
for_each_ar(ah, ar, i) {
if (!ath12k_wmi_supports_6ghz_cc_ext(ar))
continue;
ab = ar->ab;
reg_info = ab->reg_info[ar->pdev_idx];
ath12k_dbg(ab, ATH12K_DBG_MAC, "interface added to change reg rules\n");
ah->regd_updated = false;
ath12k_reg_handle_chan_list(ab, reg_info, ahvif->vdev_type,
IEEE80211_REG_UNSET_AP);
break;
}
/* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
* will not know if this interface is an ML vif at this point.
*/
@@ -9326,6 +9396,15 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
return ret;
}
/* TODO: For now we only set TPC power here. However when
* channel changes, say CSA, it should be updated again.
*/
if (ath12k_mac_supports_station_tpc(ar, ahvif, chandef)) {
ath12k_mac_fill_reg_tpc_info(ar, arvif, ctx);
ath12k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
&arvif->reg_tpc_info);
}
ar->num_started_vdevs++;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
ahvif->vif->addr, arvif->vdev_id);
@@ -9581,7 +9660,7 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
tx_arvif = ath12k_mac_get_tx_arvif(arvif);
tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = link_conf->bssid_index;
@@ -9718,6 +9797,391 @@ static int ath12k_start_vdev_delay(struct ath12k *ar,
return 0;
}
static u8 ath12k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
{
if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
switch (chan_def->width) {
case NL80211_CHAN_WIDTH_20:
return 1;
case NL80211_CHAN_WIDTH_40:
return 2;
case NL80211_CHAN_WIDTH_80:
return 4;
case NL80211_CHAN_WIDTH_160:
return 8;
case NL80211_CHAN_WIDTH_320:
return 16;
default:
return 1;
}
} else {
switch (chan_def->width) {
case NL80211_CHAN_WIDTH_20:
return 1;
case NL80211_CHAN_WIDTH_40:
return 2;
case NL80211_CHAN_WIDTH_80:
return 3;
case NL80211_CHAN_WIDTH_160:
return 4;
case NL80211_CHAN_WIDTH_320:
return 5;
default:
return 1;
}
}
}
static u16 ath12k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def)
{
u16 diff_seq;
/* It is to get the lowest channel number's center frequency of the chan.
* For example,
* bandwidth=40 MHz, center frequency is 5965, lowest channel is 1
* with center frequency 5955, its diff is 5965 - 5955 = 10.
* bandwidth=80 MHz, center frequency is 5985, lowest channel is 1
* with center frequency 5955, its diff is 5985 - 5955 = 30.
* bandwidth=160 MHz, center frequency is 6025, lowest channel is 1
* with center frequency 5955, its diff is 6025 - 5955 = 70.
* bandwidth=320 MHz, center frequency is 6105, lowest channel is 1
* with center frequency 5955, its diff is 6105 - 5955 = 70.
*/
switch (chan_def->width) {
case NL80211_CHAN_WIDTH_320:
diff_seq = 150;
break;
case NL80211_CHAN_WIDTH_160:
diff_seq = 70;
break;
case NL80211_CHAN_WIDTH_80:
diff_seq = 30;
break;
case NL80211_CHAN_WIDTH_40:
diff_seq = 10;
break;
default:
diff_seq = 0;
}
return chan_def->center_freq1 - diff_seq;
}
static u16 ath12k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def,
u16 start_seq, u8 seq)
{
u16 seg_seq;
/* It is to get the center frequency of the specific bandwidth.
* start_seq means the lowest channel number's center frequency.
* seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz.
* For example,
* lowest channel is 1, its center frequency 5955,
* center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0.
* lowest channel is 1, its center frequency 5955,
* center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10.
* lowest channel is 1, its center frequency 5955,
* center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30.
* lowest channel is 1, its center frequency 5955,
* center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70.
*/
seg_seq = 10 * (BIT(seq) - 1);
return seg_seq + start_seq;
}
static void ath12k_mac_get_psd_channel(struct ath12k *ar,
u16 step_freq,
u16 *start_freq,
u16 *center_freq,
u8 i,
struct ieee80211_channel **temp_chan,
s8 *tx_power)
{
/* It is to get the center frequency for each 20 MHz.
* For example, if the chan is 160 MHz and center frequency is 6025,
* then it include 8 channels, they are 1/5/9/13/17/21/25/29,
* channel number 1's center frequency is 5955, it is parameter start_freq.
* parameter i is the step of the 8 channels. i is 0~7 for the 8 channels.
* the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7,
* and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095,
* the gap is 20 for each channel, parameter step_freq means the gap.
* after get the center frequency of each channel, it is easy to find the
* struct ieee80211_channel of it and get the max_reg_power.
*/
*center_freq = *start_freq + i * step_freq;
*temp_chan = ieee80211_get_channel(ar->ah->hw->wiphy, *center_freq);
*tx_power = (*temp_chan)->max_reg_power;
}
static void ath12k_mac_get_eirp_power(struct ath12k *ar,
u16 *start_freq,
u16 *center_freq,
u8 i,
struct ieee80211_channel **temp_chan,
struct cfg80211_chan_def *def,
s8 *tx_power)
{
/* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/
* 160 MHz bandwidth, and then plus 10 to the center frequency,
* it is the center frequency of a channel number.
* For example, when configured channel number is 1.
* center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975,
* then it is channel number 5.
* center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995,
* then it is channel number 9.
* center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035,
* then it is channel number 17.
* after get the center frequency of each channel, it is easy to find the
* struct ieee80211_channel of it and get the max_reg_power.
*/
*center_freq = ath12k_mac_get_seg_freq(def, *start_freq, i);
/* For the 20 MHz, its center frequency is same with same channel */
if (i != 0)
*center_freq += 10;
*temp_chan = ieee80211_get_channel(ar->ah->hw->wiphy, *center_freq);
*tx_power = (*temp_chan)->max_reg_power;
}
void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
struct ieee80211_bss_conf *bss_conf = ath12k_mac_get_link_bss_conf(arvif);
struct ieee80211_channel *chan, *temp_chan;
u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
bool is_psd_power = false, is_tpe_present = false;
s8 max_tx_power[ATH12K_NUM_PWR_LEVELS],
psd_power, tx_power, eirp_power;
u16 start_freq, center_freq;
chan = ctx->def.chan;
start_freq = ath12k_mac_get_6ghz_start_frequency(&ctx->def);
pwr_reduction = bss_conf->pwr_reduction;
if (arvif->reg_tpc_info.num_pwr_levels) {
is_tpe_present = true;
num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
} else {
num_pwr_levels = ath12k_mac_get_num_pwr_levels(&ctx->def);
}
for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
/* STA received TPE IE*/
if (is_tpe_present) {
/* local power is PSD power*/
if (chan->flags & IEEE80211_CHAN_PSD) {
/* Connecting AP is psd power */
if (reg_tpc_info->is_psd_power) {
is_psd_power = true;
ath12k_mac_get_psd_channel(ar, 20,
&start_freq,
&center_freq,
pwr_lvl_idx,
&temp_chan,
&tx_power);
psd_power = temp_chan->psd;
eirp_power = tx_power;
max_tx_power[pwr_lvl_idx] =
min_t(s8,
psd_power,
reg_tpc_info->tpe[pwr_lvl_idx]);
/* Connecting AP is not psd power */
} else {
ath12k_mac_get_eirp_power(ar,
&start_freq,
&center_freq,
pwr_lvl_idx,
&temp_chan,
&ctx->def,
&tx_power);
psd_power = temp_chan->psd;
/* convert psd power to EIRP power based
* on channel width
*/
tx_power =
min_t(s8, tx_power,
psd_power + 13 + pwr_lvl_idx * 3);
max_tx_power[pwr_lvl_idx] =
min_t(s8,
tx_power,
reg_tpc_info->tpe[pwr_lvl_idx]);
}
/* local power is not PSD power */
} else {
/* Connecting AP is psd power */
if (reg_tpc_info->is_psd_power) {
is_psd_power = true;
ath12k_mac_get_psd_channel(ar, 20,
&start_freq,
&center_freq,
pwr_lvl_idx,
&temp_chan,
&tx_power);
eirp_power = tx_power;
max_tx_power[pwr_lvl_idx] =
reg_tpc_info->tpe[pwr_lvl_idx];
/* Connecting AP is not psd power */
} else {
ath12k_mac_get_eirp_power(ar,
&start_freq,
&center_freq,
pwr_lvl_idx,
&temp_chan,
&ctx->def,
&tx_power);
max_tx_power[pwr_lvl_idx] =
min_t(s8,
tx_power,
reg_tpc_info->tpe[pwr_lvl_idx]);
}
}
/* STA not received TPE IE */
} else {
/* local power is PSD power*/
if (chan->flags & IEEE80211_CHAN_PSD) {
is_psd_power = true;
ath12k_mac_get_psd_channel(ar, 20,
&start_freq,
&center_freq,
pwr_lvl_idx,
&temp_chan,
&tx_power);
psd_power = temp_chan->psd;
eirp_power = tx_power;
max_tx_power[pwr_lvl_idx] = psd_power;
} else {
ath12k_mac_get_eirp_power(ar,
&start_freq,
&center_freq,
pwr_lvl_idx,
&temp_chan,
&ctx->def,
&tx_power);
max_tx_power[pwr_lvl_idx] = tx_power;
}
}
if (is_psd_power) {
/* If AP local power constraint is present */
if (pwr_reduction)
eirp_power = eirp_power - pwr_reduction;
/* If firmware updated max tx power is non zero, then take
* the min of firmware updated ap tx power
* and max power derived from above mentioned parameters.
*/
ath12k_dbg(ab, ATH12K_DBG_MAC,
"eirp power : %d firmware report power : %d\n",
eirp_power, ar->max_allowed_tx_power);
/* Firmware reports lower max_allowed_tx_power during vdev
* start response. In case of 6 GHz, firmware is not aware
* of EIRP power unless driver sets EIRP power through WMI
* TPC command. So radio which does not support idle power
* save can set maximum calculated EIRP power directly to
* firmware through TPC command without min comparison with
* vdev start response's max_allowed_tx_power.
*/
if (ar->max_allowed_tx_power && ab->hw_params->idle_ps)
eirp_power = min_t(s8,
eirp_power,
ar->max_allowed_tx_power);
} else {
/* If AP local power constraint is present */
if (pwr_reduction)
max_tx_power[pwr_lvl_idx] =
max_tx_power[pwr_lvl_idx] - pwr_reduction;
/* If firmware updated max tx power is non zero, then take
* the min of firmware updated ap tx power
* and max power derived from above mentioned parameters.
*/
if (ar->max_allowed_tx_power && ab->hw_params->idle_ps)
max_tx_power[pwr_lvl_idx] =
min_t(s8,
max_tx_power[pwr_lvl_idx],
ar->max_allowed_tx_power);
}
reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq;
reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power =
max_tx_power[pwr_lvl_idx];
}
reg_tpc_info->num_pwr_levels = num_pwr_levels;
reg_tpc_info->is_psd_power = is_psd_power;
reg_tpc_info->eirp_power = eirp_power;
reg_tpc_info->ap_power_type =
ath12k_reg_ap_pwr_convert(bss_conf->power_type);
}
static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
struct ath12k_link_vif *arvif)
{
struct ieee80211_bss_conf *bss_conf = ath12k_mac_get_link_bss_conf(arvif);
struct ath12k_reg_tpc_power_info *tpc_info = &arvif->reg_tpc_info;
struct ieee80211_parsed_tpe_eirp *local_non_psd, *reg_non_psd;
struct ieee80211_parsed_tpe_psd *local_psd, *reg_psd;
struct ieee80211_parsed_tpe *tpe = &bss_conf->tpe;
enum wmi_reg_6g_client_type client_type;
struct ath12k_reg_info *reg_info;
struct ath12k_base *ab = ar->ab;
bool psd_valid, non_psd_valid;
int i;
reg_info = ab->reg_info[ar->pdev_idx];
client_type = reg_info->client_type;
local_psd = &tpe->psd_local[client_type];
reg_psd = &tpe->psd_reg_client[client_type];
local_non_psd = &tpe->max_local[client_type];
reg_non_psd = &tpe->max_reg_client[client_type];
psd_valid = local_psd->valid | reg_psd->valid;
non_psd_valid = local_non_psd->valid | reg_non_psd->valid;
if (!psd_valid && !non_psd_valid) {
ath12k_warn(ab,
"no transmit power envelope match client power type %d\n",
client_type);
return;
};
if (psd_valid) {
tpc_info->is_psd_power = true;
tpc_info->num_pwr_levels = max(local_psd->count,
reg_psd->count);
if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_psd->power[i],
reg_psd->power[i]) / 2;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"TPE PSD power[%d] : %d\n",
i, tpc_info->tpe[i]);
}
} else {
tpc_info->is_psd_power = false;
tpc_info->eirp_power = 0;
tpc_info->num_pwr_levels = max(local_non_psd->count,
reg_non_psd->count);
if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_non_psd->power[i],
reg_non_psd->power[i]) / 2;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"non PSD power[%d] : %d\n",
i, tpc_info->tpe[i]);
}
}
}
static int
ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -9756,6 +10220,11 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
"mac chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
if (ath12k_wmi_supports_6ghz_cc_ext(ar) &&
ctx->def.chan->band == NL80211_BAND_6GHZ &&
ahvif->vdev_type == WMI_VDEV_TYPE_STA)
ath12k_mac_parse_tx_pwr_env(ar, arvif);
arvif->punct_bitmap = ctx->def.punctured;
/* for some targets bss peer must be created before vdev_start */
@@ -9860,6 +10329,11 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
reinit_completion(&ar->completed_11d_scan);
ar->state_11d = ATH12K_11D_PREPARING;
}
if (ar->scan.arvif == arvif && ar->scan.state == ATH12K_SCAN_RUNNING) {
ath12k_scan_abort(ar);
ar->scan.arvif = NULL;
}
}
static int
@@ -10889,8 +11363,20 @@ void ath12k_mac_update_freq_range(struct ath12k *ar,
if (!(freq_low && freq_high))
return;
ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
if (ar->freq_range.start_freq || ar->freq_range.end_freq) {
ar->freq_range.start_freq = min(ar->freq_range.start_freq,
MHZ_TO_KHZ(freq_low));
ar->freq_range.end_freq = max(ar->freq_range.end_freq,
MHZ_TO_KHZ(freq_high));
} else {
ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac pdev %u freq limit updated. New range %u->%u MHz\n",
ar->pdev->pdev_id, KHZ_TO_MHZ(ar->freq_range.start_freq),
KHZ_TO_MHZ(ar->freq_range.end_freq));
}
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -11374,6 +11860,7 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
for_each_ar(ah, ar, i) {
cancel_work_sync(&ar->regd_update_work);
ath12k_debugfs_unregister(ar);
ath12k_fw_stats_reset(ar);
}
ieee80211_unregister_hw(hw);
@@ -11651,6 +12138,18 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
goto err_unregister_hw;
}
if (ar->ab->hw_params->current_cc_support && ab->new_alpha2[0]) {
struct wmi_set_current_country_arg current_cc = {};
memcpy(&current_cc.alpha2, ab->new_alpha2, 2);
memcpy(&ar->alpha2, ab->new_alpha2, 2);
ret = ath12k_wmi_send_set_current_country_cmd(ar, &current_cc);
if (ret)
ath12k_warn(ar->ab,
"failed set cc code for mac register: %d\n",
ret);
}
ath12k_fw_stats_init(ar);
ath12k_debugfs_register(ar);
}
@@ -12025,6 +12524,7 @@ int ath12k_mac_allocate(struct ath12k_hw_group *ag)
if (!ab)
continue;
ath12k_debugfs_pdev_create(ab);
ath12k_mac_set_device_defaults(ab);
total_radio += ab->num_radios;
}

View File

@@ -67,6 +67,46 @@ struct ath12k_mac_get_any_chanctx_conf_arg {
struct ieee80211_chanctx_conf *chanctx_conf;
};
/**
* struct ath12k_chan_power_info - TPE containing power info per channel chunk
* @chan_cfreq: channel center freq (MHz)
* e.g.
* channel 37/20 MHz, it is 6135
* channel 37/40 MHz, it is 6125
* channel 37/80 MHz, it is 6145
* channel 37/160 MHz, it is 6185
* @tx_power: transmit power (dBm)
*/
struct ath12k_chan_power_info {
u16 chan_cfreq;
s8 tx_power;
};
/* ath12k only deals with 320 MHz, so 16 subchannels */
#define ATH12K_NUM_PWR_LEVELS 16
/**
* struct ath12k_reg_tpc_power_info - regulatory TPC power info
* @is_psd_power: is PSD power or not
* @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD
* @ap_power_type: type of power (SP/LPI/VLP)
* @num_pwr_levels: number of power levels
* @reg_max: Array of maximum TX power (dBm) per PSD value
* @ap_constraint_power: AP constraint power (dBm)
* @tpe: TPE values processed from TPE IE
* @chan_power_info: power info to send to firmware
*/
struct ath12k_reg_tpc_power_info {
bool is_psd_power;
u8 eirp_power;
enum wmi_reg_6g_ap_type ap_power_type;
u8 num_pwr_levels;
u8 reg_max[ATH12K_NUM_PWR_LEVELS];
u8 ap_constraint_power;
s8 tpe[ATH12K_NUM_PWR_LEVELS];
struct ath12k_chan_power_info chan_power_info[ATH12K_NUM_PWR_LEVELS];
};
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
#define ATH12K_SCAN_11D_INTERVAL 600000
@@ -128,4 +168,7 @@ struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
int ath12k_mac_get_fw_stats(struct ath12k *ar, struct ath12k_fw_stats_req_params *param);
void ath12k_mac_update_freq_range(struct ath12k *ar,
u32 freq_low, u32 freq_high);
void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx);
#endif

View File

@@ -600,7 +600,8 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
ab->hw_params->ring_mask->rx_wbm_rel[i] ||
ab->hw_params->ring_mask->reo_status[i] ||
ab->hw_params->ring_mask->host2rxdma[i] ||
ab->hw_params->ring_mask->rx_mon_dest[i]) {
ab->hw_params->ring_mask->rx_mon_dest[i] ||
ab->hw_params->ring_mask->rx_mon_status[i]) {
num_irq = 1;
}
@@ -1733,8 +1734,6 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
ath12k_core_hw_group_unassign(ab);
goto qmi_fail;
}
@@ -1742,9 +1741,10 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
ath12k_core_hw_group_cleanup(ab->ag);
qmi_fail:
ath12k_core_deinit(ab);
ath12k_fw_unmap(ab);
ath12k_mhi_unregister(ab_pci);

View File

@@ -139,7 +139,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait)
int num_channels = 0;
int i, ret, left;
if (wait && ar->state_11d != ATH12K_11D_IDLE) {
if (wait && ar->state_11d == ATH12K_11D_RUNNING) {
left = wait_for_completion_timeout(&ar->completed_11d_scan,
ATH12K_SCAN_TIMEOUT_HZ);
if (!left) {
@@ -265,8 +265,8 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
u32 phy_id, freq_low = 0, freq_high = 0, supported_bands, band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
u32 phy_id, freq_low, freq_high, supported_bands;
struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
struct ieee80211_hw *hw = ah->hw;
struct ieee80211_regdomain *regd, *regd_copy = NULL;
@@ -276,45 +276,45 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
ab = ar->ab;
supported_bands = ar->pdev->cap.supported_bands;
if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
band = NL80211_BAND_2GHZ;
} else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz) {
band = NL80211_BAND_5GHZ;
} else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz) {
band = NL80211_BAND_6GHZ;
} else {
/* This condition is not expected.
*/
WARN_ON(1);
ret = -EINVAL;
goto err;
}
reg_cap = &ab->hal_reg_cap[ar->pdev_idx];
if (ab->hw_params->single_pdev_only && !ar->supports_6ghz) {
phy_id = ar->pdev->cap.band[band].phy_id;
reg_cap = &ab->hal_reg_cap[phy_id];
}
/* Possible that due to reg change, current limits for supported
* frequency changed. Update that
* frequency changed. Update it. As a first step, reset the
* previous values and then compute and set the new values.
*/
ar->freq_range.start_freq = 0;
ar->freq_range.end_freq = 0;
if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
if (ab->hw_params->single_pdev_only) {
phy_id = ar->pdev->cap.band[WMI_HOST_WLAN_2GHZ_CAP].phy_id;
reg_cap = &ab->hal_reg_cap[phy_id];
}
freq_low = max(reg_cap->low_2ghz_chan, ab->reg_freq_2ghz.start_freq);
freq_high = min(reg_cap->high_2ghz_chan, ab->reg_freq_2ghz.end_freq);
} else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz) {
freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_5ghz.start_freq);
freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_5ghz.end_freq);
} else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz) {
freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_6ghz.start_freq);
freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_6ghz.end_freq);
ath12k_mac_update_freq_range(ar, freq_low, freq_high);
}
ath12k_mac_update_freq_range(ar, freq_low, freq_high);
if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz) {
if (ab->hw_params->single_pdev_only) {
phy_id = ar->pdev->cap.band[WMI_HOST_WLAN_5GHZ_CAP].phy_id;
reg_cap = &ab->hal_reg_cap[phy_id];
}
ath12k_dbg(ab, ATH12K_DBG_REG, "pdev %u reg updated freq limits %u->%u MHz\n",
ar->pdev->pdev_id, freq_low, freq_high);
freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_5ghz.start_freq);
freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_5ghz.end_freq);
ath12k_mac_update_freq_range(ar, freq_low, freq_high);
}
if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz) {
freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_6ghz.start_freq);
freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_6ghz.end_freq);
ath12k_mac_update_freq_range(ar, freq_low, freq_high);
}
/* If one of the radios within ah has already updated the regd for
* the wiphy, then avoid setting regd again
@@ -454,129 +454,6 @@ static u32 ath12k_map_fw_phy_flags(u32 phy_flags)
return flags;
}
static bool
ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
if ((start_freq1 >= start_freq2 &&
start_freq1 < end_freq2) ||
(start_freq2 > start_freq1 &&
start_freq2 < end_freq1))
return true;
/* TODO: Should we restrict intersection feasibility
* based on min bandwidth of the intersected region also,
* say the intersected rule should have a min bandwidth
* of 20MHz?
*/
return false;
}
static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2,
struct ieee80211_reg_rule *new_rule)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
u32 freq_diff, max_bw;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
start_freq2);
new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
freq_diff = new_rule->freq_range.end_freq_khz -
new_rule->freq_range.start_freq_khz;
max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
rule2->freq_range.max_bandwidth_khz);
new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
new_rule->power_rule.max_antenna_gain =
min_t(u32, rule1->power_rule.max_antenna_gain,
rule2->power_rule.max_antenna_gain);
new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
rule2->power_rule.max_eirp);
/* Use the flags of both the rules */
new_rule->flags = rule1->flags | rule2->flags;
/* To be safe, lts use the max cac timeout of both rules */
new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
}
static struct ieee80211_regdomain *
ath12k_regd_intersect(struct ieee80211_regdomain *default_regd,
struct ieee80211_regdomain *curr_regd)
{
u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
struct ieee80211_regdomain *new_regd = NULL;
u8 i, j, k;
num_old_regd_rules = default_regd->n_reg_rules;
num_curr_regd_rules = curr_regd->n_reg_rules;
num_new_regd_rules = 0;
/* Find the number of intersecting rules to allocate new regd memory */
for (i = 0; i < num_old_regd_rules; i++) {
old_rule = default_regd->reg_rules + i;
for (j = 0; j < num_curr_regd_rules; j++) {
curr_rule = curr_regd->reg_rules + j;
if (ath12k_reg_can_intersect(old_rule, curr_rule))
num_new_regd_rules++;
}
}
if (!num_new_regd_rules)
return NULL;
new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!new_regd)
return NULL;
/* We set the new country and dfs region directly and only trim
* the freq, power, antenna gain by intersecting with the
* default regdomain. Also MAX of the dfs cac timeout is selected.
*/
new_regd->n_reg_rules = num_new_regd_rules;
memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
new_regd->dfs_region = curr_regd->dfs_region;
new_rule = new_regd->reg_rules;
for (i = 0, k = 0; i < num_old_regd_rules; i++) {
old_rule = default_regd->reg_rules + i;
for (j = 0; j < num_curr_regd_rules; j++) {
curr_rule = curr_regd->reg_rules + j;
if (ath12k_reg_can_intersect(old_rule, curr_rule))
ath12k_reg_intersect_rules(old_rule, curr_rule,
(new_rule + k++));
}
}
return new_regd;
}
static const char *
ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
{
@@ -613,13 +490,14 @@ ath12k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
static void
ath12k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
u32 reg_flags)
s8 psd, u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
reg_rule->psd = psd;
reg_rule->flags = reg_flags;
}
@@ -641,7 +519,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
ath12k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
reg_rule->psd_eirp, flags);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -663,7 +541,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
ath12k_reg_update_rule(regd->reg_rules + i,
ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
reg_rule->psd_eirp, flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
@@ -688,7 +566,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
reg_rule->psd_eirp, flags);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -710,26 +588,67 @@ static void ath12k_reg_update_freq_range(struct ath12k_reg_freq *reg_freq,
reg_freq->end_freq = reg_rule->end_freq;
}
enum wmi_reg_6g_ap_type
ath12k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type)
{
switch (power_type) {
case IEEE80211_REG_LPI_AP:
return WMI_REG_INDOOR_AP;
case IEEE80211_REG_SP_AP:
return WMI_REG_STD_POWER_AP;
case IEEE80211_REG_VLP_AP:
return WMI_REG_VLP_AP;
default:
return WMI_REG_MAX_AP_TYPE;
}
}
struct ieee80211_regdomain *
ath12k_reg_build_regd(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info, bool intersect)
struct ath12k_reg_info *reg_info,
enum wmi_vdev_type vdev_type,
enum ieee80211_ap_reg_power power_type)
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
struct ath12k_reg_rule *reg_rule;
struct ieee80211_regdomain *new_regd = NULL;
struct ath12k_reg_rule *reg_rule, *reg_rule_6ghz;
u32 flags, reg_6ghz_number, max_bw_6ghz;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
u32 flags;
char alpha2[3];
num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
/* FIXME: Currently taking reg rules for 6G only from Indoor AP mode list.
* This can be updated to choose the combination dynamically based on AP
* type and client type, after complete 6G regulatory support is added.
*/
if (reg_info->is_ext_reg_event)
num_rules += reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP];
if (reg_info->is_ext_reg_event) {
if (vdev_type == WMI_VDEV_TYPE_STA) {
enum wmi_reg_6g_ap_type ap_type;
ap_type = ath12k_reg_ap_pwr_convert(power_type);
if (ap_type == WMI_REG_MAX_AP_TYPE)
ap_type = WMI_REG_INDOOR_AP;
reg_6ghz_number = reg_info->num_6g_reg_rules_cl
[ap_type][WMI_REG_DEFAULT_CLIENT];
if (reg_6ghz_number == 0) {
ap_type = WMI_REG_INDOOR_AP;
reg_6ghz_number = reg_info->num_6g_reg_rules_cl
[ap_type][WMI_REG_DEFAULT_CLIENT];
}
reg_rule_6ghz = reg_info->reg_rules_6g_client_ptr
[ap_type][WMI_REG_DEFAULT_CLIENT];
max_bw_6ghz = reg_info->max_bw_6g_client
[ap_type][WMI_REG_DEFAULT_CLIENT];
} else {
reg_6ghz_number = reg_info->num_6g_reg_rules_ap
[WMI_REG_INDOOR_AP];
reg_rule_6ghz =
reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP];
max_bw_6ghz = reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP];
}
num_rules += reg_6ghz_number;
}
if (!num_rules)
goto ret;
@@ -738,20 +657,20 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI)
num_rules += 2;
tmp_regd = kzalloc(sizeof(*tmp_regd) +
new_regd = kzalloc(sizeof(*new_regd) +
(num_rules * sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!tmp_regd)
if (!new_regd)
goto ret;
memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
memcpy(new_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
alpha2[2] = '\0';
tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
new_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region),
alpha2, ath12k_reg_get_regdom_str(new_regd->dfs_region),
reg_info->dfs_region, num_rules);
/* Reset start and end frequency for each band
@@ -788,13 +707,13 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
*/
flags = NL80211_RRF_AUTO_BW;
ath12k_reg_update_freq_range(&ab->reg_freq_5ghz, reg_rule);
} else if (reg_info->is_ext_reg_event &&
reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] &&
(k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) {
reg_rule = reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP] + k++;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]);
} else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
(k < reg_6ghz_number)) {
reg_rule = reg_rule_6ghz + k++;
max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
flags = NL80211_RRF_AUTO_BW;
if (reg_rule->psd_flag)
flags |= NL80211_RRF_PSD;
ath12k_reg_update_freq_range(&ab->reg_freq_6ghz, reg_rule);
} else {
break;
@@ -803,11 +722,11 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
flags |= ath12k_map_fw_reg_flags(reg_rule->flags);
flags |= ath12k_map_fw_phy_flags(reg_info->phybitmap);
ath12k_reg_update_rule(tmp_regd->reg_rules + i,
ath12k_reg_update_rule(new_regd->reg_rules + i,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
flags);
reg_rule->psd_eirp, flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
@@ -818,7 +737,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
reg_info->dfs_region == ATH12K_DFS_REG_ETSI &&
(reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
ath12k_reg_update_weather_radar_band(ab, tmp_regd,
ath12k_reg_update_weather_radar_band(ab, new_regd,
reg_rule, &i,
flags, max_bw);
continue;
@@ -828,36 +747,19 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms,
new_regd->reg_rules[i].dfs_cac_ms,
flags, reg_rule->psd_flag, reg_rule->psd_eirp);
} else {
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
tmp_regd->reg_rules[i].dfs_cac_ms,
new_regd->reg_rules[i].dfs_cac_ms,
flags);
}
}
tmp_regd->n_reg_rules = i;
if (intersect) {
default_regd = ab->default_regd[reg_info->phy_id];
/* Get a new regd by intersecting the received regd with
* our default regd.
*/
new_regd = ath12k_regd_intersect(default_regd, tmp_regd);
kfree(tmp_regd);
if (!new_regd) {
ath12k_warn(ab, "Unable to create intersected regdomain\n");
goto ret;
}
} else {
new_regd = tmp_regd;
}
new_regd->n_reg_rules = i;
ret:
return new_regd;
}
@@ -879,6 +781,105 @@ void ath12k_regd_update_work(struct work_struct *work)
}
}
void ath12k_reg_reset_reg_info(struct ath12k_reg_info *reg_info)
{
u8 i, j;
if (!reg_info)
return;
kfree(reg_info->reg_rules_2g_ptr);
kfree(reg_info->reg_rules_5g_ptr);
if (reg_info->is_ext_reg_event) {
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
kfree(reg_info->reg_rules_6g_ap_ptr[i]);
for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
kfree(reg_info->reg_rules_6g_client_ptr[i][j]);
}
}
}
enum ath12k_reg_status ath12k_reg_validate_reg_info(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info)
{
int pdev_idx = reg_info->phy_id;
if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
/* In case of failure to set the requested country,
* firmware retains the current regd. We print a failure info
* and return from here.
*/
ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
return ATH12K_REG_STATUS_DROP;
}
if (pdev_idx >= ab->num_radios) {
/* Process the event for phy0 only if single_pdev_only
* is true. If pdev_idx is valid but not 0, discard the
* event. Otherwise, it goes to fallback.
*/
if (ab->hw_params->single_pdev_only &&
pdev_idx < ab->hw_params->num_rxdma_per_pdev)
return ATH12K_REG_STATUS_DROP;
else
return ATH12K_REG_STATUS_FALLBACK;
}
/* Avoid multiple overwrites to default regd, during core
* stop-start after mac registration.
*/
if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
!memcmp(ab->default_regd[pdev_idx]->alpha2,
reg_info->alpha2, 2))
return ATH12K_REG_STATUS_DROP;
return ATH12K_REG_STATUS_VALID;
}
int ath12k_reg_handle_chan_list(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info,
enum wmi_vdev_type vdev_type,
enum ieee80211_ap_reg_power power_type)
{
struct ieee80211_regdomain *regd = NULL;
int pdev_idx = reg_info->phy_id;
struct ath12k *ar;
regd = ath12k_reg_build_regd(ab, reg_info, vdev_type, power_type);
if (!regd)
return -EINVAL;
spin_lock_bh(&ab->base_lock);
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
/* Once mac is registered, ar is valid and all CC events from
* firmware is considered to be received due to user requests
* currently.
* Free previously built regd before assigning the newly
* generated regd to ar. NULL pointer handling will be
* taken care by kfree itself.
*/
ar = ab->pdevs[pdev_idx].ar;
kfree(ab->new_regd[pdev_idx]);
ab->new_regd[pdev_idx] = regd;
queue_work(ab->workqueue, &ar->regd_update_work);
} else {
/* Multiple events for the same *ar is not expected. But we
* can still clear any previously stored default_regd if we
* are receiving this event for the same radio by mistake.
* NULL pointer handling will be taken care by kfree itself.
*/
kfree(ab->default_regd[pdev_idx]);
/* This regd would be applied during mac registration */
ab->default_regd[pdev_idx] = regd;
}
ab->dfs_region = reg_info->dfs_region;
spin_unlock_bh(&ab->base_lock);
return 0;
}
void ath12k_reg_init(struct ieee80211_hw *hw)
{
hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
@@ -891,6 +892,12 @@ void ath12k_reg_free(struct ath12k_base *ab)
int i;
mutex_lock(&ab->core_lock);
for (i = 0; i < MAX_RADIOS; i++) {
ath12k_reg_reset_reg_info(ab->reg_info[i]);
kfree(ab->reg_info[i]);
ab->reg_info[i] = NULL;
}
for (i = 0; i < ab->hw_params->max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);

View File

@@ -92,13 +92,29 @@ enum ath12k_reg_phy_bitmap {
ATH12K_REG_PHY_BITMAP_NO11BE = BIT(6),
};
enum ath12k_reg_status {
ATH12K_REG_STATUS_VALID,
ATH12K_REG_STATUS_DROP,
ATH12K_REG_STATUS_FALLBACK,
};
void ath12k_reg_init(struct ieee80211_hw *hw);
void ath12k_reg_free(struct ath12k_base *ab);
void ath12k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info,
bool intersect);
enum wmi_vdev_type vdev_type,
enum ieee80211_ap_reg_power power_type);
int ath12k_regd_update(struct ath12k *ar, bool init);
int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait);
void ath12k_reg_reset_reg_info(struct ath12k_reg_info *reg_info);
int ath12k_reg_handle_chan_list(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info,
enum wmi_vdev_type vdev_type,
enum ieee80211_ap_reg_power power_type);
enum wmi_reg_6g_ap_type
ath12k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
enum ath12k_reg_status ath12k_reg_validate_reg_info(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info);
#endif

View File

@@ -2187,9 +2187,10 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 peer_legacy_rates_align;
u32 peer_ht_rates_align;
u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay;
u32 peer_ht_rates_align, eml_trans_timeout;
int i, ret, len;
u16 eml_cap;
__le32 v;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
@@ -2361,6 +2362,24 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
eml_cap = arg->ml.eml_cap;
if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
/* Padding delay */
eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap);
ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay);
/* Transition delay */
eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap);
ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay);
/* Transition timeout */
eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap);
ml_params->emlsr_trans_timeout_us =
cpu_to_le32(eml_trans_timeout);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u",
arg->peer_mac, eml_pad_delay, eml_trans_delay,
eml_trans_timeout);
}
ptr += sizeof(*ml_params);
skip_ml_params:
@@ -2380,6 +2399,10 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*eht_mcs);
}
/* Update MCS15 capability */
if (arg->eht_disable_mcs15)
cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE);
tlv = ptr;
len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
/* fill ML Partner links */
@@ -2420,7 +2443,7 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
send:
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
"wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
cmd->peer_listen_intval, cmd->peer_ht_caps,
@@ -2433,7 +2456,7 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
cmd->peer_eht_cap_phy[2]);
cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
if (ret) {
@@ -4733,6 +4756,7 @@ static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
return 0;
err:
kfree(svc_rdy_ext.mac_phy_caps);
ath12k_wmi_free_dbring_caps(ab);
return ret;
}
@@ -6101,6 +6125,7 @@ static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *s
pdev = &ab->pdevs[i];
ar = pdev->ar;
ar->state_11d = ATH12K_11D_IDLE;
ar->ah->regd_updated = false;
complete(&ar->completed_11d_scan);
}
@@ -6115,24 +6140,11 @@ static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
dev_kfree_skb(skb);
}
static bool ath12k_reg_is_world_alpha(char *alpha)
{
if (alpha[0] == '0' && alpha[1] == '0')
return true;
if (alpha[0] == 'n' && alpha[1] == 'a')
return true;
return false;
}
static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k_reg_info *reg_info = NULL;
struct ieee80211_regdomain *regd = NULL;
bool intersect = false;
int ret = 0, pdev_idx, i, j;
struct ath12k *ar;
struct ath12k_reg_info *reg_info;
u8 pdev_idx;
int ret;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
if (!reg_info) {
@@ -6141,86 +6153,52 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
}
ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
if (ret) {
ath12k_warn(ab, "failed to extract regulatory info from received event\n");
goto fallback;
}
if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
/* In case of failure to set the requested ctry,
* fw retains the current regd. We print a failure info
* and return from here.
*/
ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
goto mem_free;
}
ret = ath12k_reg_validate_reg_info(ab, reg_info);
if (ret == ATH12K_REG_STATUS_FALLBACK) {
ath12k_warn(ab, "failed to validate reg info %d\n", ret);
/* firmware has successfully switches to new regd but host can not
* continue, so free reginfo and fallback to old regd
*/
goto mem_free;
} else if (ret == ATH12K_REG_STATUS_DROP) {
/* reg info is valid but we will not store it and
* not going to create new regd for it
*/
ret = ATH12K_REG_STATUS_VALID;
goto mem_free;
}
/* free old reg_info if it exist */
pdev_idx = reg_info->phy_id;
if (pdev_idx >= ab->num_radios) {
/* Process the event for phy0 only if single_pdev_only
* is true. If pdev_idx is valid but not 0, discard the
* event. Otherwise, it goes to fallback.
*/
if (ab->hw_params->single_pdev_only &&
pdev_idx < ab->hw_params->num_rxdma_per_pdev)
goto mem_free;
else
goto fallback;
if (ab->reg_info[pdev_idx]) {
ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]);
kfree(ab->reg_info[pdev_idx]);
}
/* Avoid multiple overwrites to default regd, during core
* stop-start after mac registration.
/* reg_info is valid, we store it for later use
* even below regd build failed
*/
if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
!memcmp(ab->default_regd[pdev_idx]->alpha2,
reg_info->alpha2, 2))
goto mem_free;
ab->reg_info[pdev_idx] = reg_info;
/* Intersect new rules with default regd if a new country setting was
* requested, i.e a default regd was already set during initialization
* and the regd coming from this event has a valid country info.
*/
if (ab->default_regd[pdev_idx] &&
!ath12k_reg_is_world_alpha((char *)
ab->default_regd[pdev_idx]->alpha2) &&
!ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
intersect = true;
regd = ath12k_reg_build_regd(ab, reg_info, intersect);
if (!regd) {
ath12k_warn(ab, "failed to build regd from reg_info\n");
ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC,
IEEE80211_REG_UNSET_AP);
if (ret) {
ath12k_warn(ab, "failed to handle chan list %d\n", ret);
goto fallback;
}
spin_lock(&ab->base_lock);
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
/* Once mac is registered, ar is valid and all CC events from
* fw is considered to be received due to user requests
* currently.
* Free previously built regd before assigning the newly
* generated regd to ar. NULL pointer handling will be
* taken care by kfree itself.
*/
ar = ab->pdevs[pdev_idx].ar;
kfree(ab->new_regd[pdev_idx]);
ab->new_regd[pdev_idx] = regd;
queue_work(ab->workqueue, &ar->regd_update_work);
} else {
/* Multiple events for the same *ar is not expected. But we
* can still clear any previously stored default_regd if we
* are receiving this event for the same radio by mistake.
* NULL pointer handling will be taken care by kfree itself.
*/
kfree(ab->default_regd[pdev_idx]);
/* This regd would be applied during mac registration */
ab->default_regd[pdev_idx] = regd;
}
ab->dfs_region = reg_info->dfs_region;
spin_unlock(&ab->base_lock);
goto out;
goto mem_free;
mem_free:
ath12k_reg_reset_reg_info(reg_info);
kfree(reg_info);
if (ret == ATH12K_REG_STATUS_VALID)
return ret;
fallback:
/* Fallback to older reg (by sending previous country setting
@@ -6232,20 +6210,8 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
*/
/* TODO: This is rare, but still should also be handled */
WARN_ON(1);
mem_free:
if (reg_info) {
kfree(reg_info->reg_rules_2g_ptr);
kfree(reg_info->reg_rules_5g_ptr);
if (reg_info->is_ext_reg_event) {
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
kfree(reg_info->reg_rules_6g_ap_ptr[i]);
for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
}
kfree(reg_info);
}
out:
return ret;
}
@@ -6401,13 +6367,14 @@ static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff
ar->last_wmi_vdev_start_status = 0;
status = le32_to_cpu(vdev_start_resp.status);
if (WARN_ON_ONCE(status)) {
ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
status, ath12k_wmi_vdev_resp_print(status));
ar->last_wmi_vdev_start_status = status;
}
ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power);
complete(&ar->vdev_setup_done);
rcu_read_unlock();
@@ -9878,3 +9845,69 @@ int ath12k_wmi_mlo_teardown(struct ath12k *ar)
return 0;
}
bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar)
{
return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
}
int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
u32 vdev_id,
struct ath12k_reg_tpc_power_info *param)
{
struct wmi_vdev_set_tpc_power_cmd *cmd;
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_ch_power_params *ch;
int i, ret, len, array_len;
struct sk_buff *skb;
struct wmi_tlv *tlv;
u8 *ptr;
array_len = sizeof(*ch) * param->num_pwr_levels;
len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
ptr = skb->data;
cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->psd_power = cpu_to_le32(param->is_psd_power);
cmd->eirp_power = cpu_to_le32(param->eirp_power);
cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
ptr += sizeof(*cmd);
tlv = (struct wmi_tlv *)ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len);
ptr += TLV_HDR_SIZE;
ch = (struct wmi_vdev_ch_power_params *)ptr;
for (i = 0; i < param->num_pwr_levels; i++, ch++) {
ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO,
sizeof(*ch));
ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq);
ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n",
ch->chan_cfreq, ch->tx_power);
}
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
if (ret) {
ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
dev_kfree_skb(skb);
return ret;
}
return 0;
}

View File

@@ -26,6 +26,7 @@ struct ath12k_base;
struct ath12k;
struct ath12k_link_vif;
struct ath12k_fw_stats;
struct ath12k_reg_tpc_power_info;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@@ -386,6 +387,22 @@ enum wmi_tlv_cmd_id {
WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
WMI_VDEV_SET_ARP_STAT_CMDID,
WMI_VDEV_GET_ARP_STAT_CMDID,
WMI_VDEV_GET_TX_POWER_CMDID,
WMI_VDEV_LIMIT_OFFCHAN_CMDID,
WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID,
WMI_VDEV_CHAINMASK_CONFIG_CMDID,
WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID,
WMI_VDEV_GET_MWS_COEX_INFO_CMDID,
WMI_VDEV_DELETE_ALL_PEER_CMDID,
WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID,
WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID,
WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID,
WMI_VDEV_SET_PCL_CMDID,
WMI_VDEV_GET_BIG_DATA_CMDID,
WMI_VDEV_GET_BIG_DATA_P2_CMDID,
WMI_VDEV_SET_TPC_POWER_CMDID,
WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
WMI_PEER_DELETE_CMDID,
WMI_PEER_FLUSH_TIDS_CMDID,
@@ -1955,6 +1972,8 @@ enum wmi_tlv_tag {
WMI_TAG_TPC_STATS_REG_PWR_ALLOWED,
WMI_TAG_TPC_STATS_RATES_ARRAY,
WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
WMI_TAG_VDEV_CH_POWER_INFO,
WMI_TAG_EHT_RATE_SET = 0x3C4,
WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
WMI_TAG_MLO_TX_SEND_PARAMS,
@@ -2201,6 +2220,8 @@ enum wmi_tlv_service {
WMI_MAX_EXT_SERVICE = 256,
WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280,
WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
WMI_TLV_SERVICE_11BE = 289,
@@ -3755,6 +3776,7 @@ struct peer_assoc_mlo_params {
u32 ieee_link_id;
u8 num_partner_links;
struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
u16 eml_cap;
};
struct wmi_rate_set_arg {
@@ -3833,6 +3855,7 @@ struct ath12k_wmi_peer_assoc_arg {
u32 punct_bitmap;
bool is_assoc;
struct peer_assoc_mlo_params ml;
bool eht_disable_mcs15;
};
#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
@@ -4162,6 +4185,7 @@ struct wmi_vdev_start_resp_event {
};
__le32 cfgd_tx_streams;
__le32 cfgd_rx_streams;
__le32 max_allowed_tx_power;
} __packed;
/* VDEV start response status codes */
@@ -4507,6 +4531,7 @@ struct ath12k_wmi_target_cap_arg {
};
enum wmi_vdev_type {
WMI_VDEV_TYPE_UNSPEC = 0,
WMI_VDEV_TYPE_AP = 1,
WMI_VDEV_TYPE_STA = 2,
WMI_VDEV_TYPE_IBSS = 3,
@@ -5937,6 +5962,41 @@ struct wmi_tpc_stats_arg {
struct wmi_tpc_ctl_pwr_table_arg ctl_array;
};
struct wmi_vdev_ch_power_params {
__le32 tlv_header;
/* Channel center frequency (MHz) */
__le32 chan_cfreq;
/* Unit: dBm, either PSD/EIRP power for this frequency or
* incremental for non-PSD BW
*/
__le32 tx_power;
} __packed;
struct wmi_vdev_set_tpc_power_cmd {
__le32 tlv_header;
__le32 vdev_id;
/* Value: 0 or 1, is PSD power or not */
__le32 psd_power;
/* Maximum EIRP power (dBm units), valid only if power is PSD */
__le32 eirp_power;
/* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */
__le32 power_type_6ghz;
/* This fixed_param TLV is followed by the below TLVs:
* num_pwr_levels of wmi_vdev_ch_power_info
* For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks).
* For non-PSD power, the power values are for 20, 40, and till
* BSS BW power levels.
* The num_pwr_levels will be checked by sw how many elements present
* in the variable-length array.
*/
} __packed;
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -6131,5 +6191,9 @@ int ath12k_wmi_mlo_teardown(struct ath12k *ar);
void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
struct ath12k_fw_stats *fw_stats, u32 stats_id,
char *buf);
bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar);
int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
u32 vdev_id,
struct ath12k_reg_tpc_power_info *param);
#endif

View File

@@ -74,7 +74,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
void __iomem *mem;
struct ath_softc *sc;
struct ieee80211_hw *hw;
struct resource *res;
const struct platform_device_id *id = platform_get_device_id(pdev);
int irq;
int ret = 0;
@@ -86,16 +85,10 @@ static int ath_ahb_probe(struct platform_device *pdev)
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no memory resource found\n");
return -ENXIO;
}
mem = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (mem == NULL) {
mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem)) {
dev_err(&pdev->dev, "ioremap failed\n");
return -ENOMEM;
return PTR_ERR(mem);
}
irq = platform_get_irq(pdev, 0);

View File

@@ -290,6 +290,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
struct ath_common *common = ath9k_hw_common(priv->ah);
int slot;
if (!priv->cur_beacon_conf.enable_beacon)
return;
if (swba->beacon_pending != 0) {
priv->beacon.bmisscnt++;
if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {

View File

@@ -366,8 +366,7 @@ static void carl9170_tx_shift_bm(struct ar9170 *ar,
if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
return;
if (!bitmap_empty(tid_info->bitmap, off))
off = find_first_bit(tid_info->bitmap, off);
off = min(off, find_first_bit(tid_info->bitmap, off));
tid_info->bsn += off;
tid_info->bsn &= 0x0fff;