Merge branch 'enable-multiple-irq-lines-support-in-airoha_eth-driver'

Lorenzo Bianconi says:

====================
Enable multiple IRQ lines support in airoha_eth driver

EN7581 ethernet SoC supports 4 programmable IRQ lines each one composed
by 4 IRQ configuration registers to map Tx/Rx queues. Enable multiple
IRQ lines support.
====================

Link: https://patch.msgid.link/20250418-airoha-eth-multi-irq-v1-0-1ab0083ca3c1@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-04-23 17:03:54 -07:00
3 changed files with 283 additions and 94 deletions

View File

@@ -34,37 +34,40 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
return val;
}
static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
u32 clear, u32 set)
static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
int index, u32 clear, u32 set)
{
struct airoha_qdma *qdma = irq_bank->qdma;
int bank = irq_bank - &qdma->irq_banks[0];
unsigned long flags;
if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
return;
spin_lock_irqsave(&qdma->irq_lock, flags);
spin_lock_irqsave(&irq_bank->irq_lock, flags);
qdma->irqmask[index] &= ~clear;
qdma->irqmask[index] |= set;
airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
irq_bank->irqmask[index] &= ~clear;
irq_bank->irqmask[index] |= set;
airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
irq_bank->irqmask[index]);
/* Read irq_enable register in order to guarantee the update above
* completes in the spinlock critical section.
*/
airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
spin_unlock_irqrestore(&qdma->irq_lock, flags);
spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
}
static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
u32 mask)
static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
int index, u32 mask)
{
airoha_qdma_set_irqmask(qdma, index, 0, mask);
airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
}
static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
u32 mask)
static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
int index, u32 mask)
{
airoha_qdma_set_irqmask(qdma, index, mask, 0);
airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
}
static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
@@ -739,9 +742,20 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
done += cur;
} while (cur && done < budget);
if (done < budget && napi_complete(napi))
airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
RX_DONE_INT_MASK);
if (done < budget && napi_complete(napi)) {
struct airoha_qdma *qdma = q->qdma;
int i, qid = q - &qdma->q_rx[0];
int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
: QDMA_INT_REG_IDX2;
for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
continue;
airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
BIT(qid % RX_DONE_HIGH_OFFSET));
}
}
return done;
}
@@ -944,7 +958,7 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
}
if (done < budget && napi_complete(napi))
airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(id));
return done;
@@ -1174,14 +1188,24 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
/* clear pending irqs */
for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
/* clear pending irqs */
airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
/* setup irqs */
airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
/* setup rx irqs */
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
}
/* setup tx irqs */
airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
TX_COHERENT_HIGH_INT_MASK);
/* setup irq binding */
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
@@ -1226,30 +1250,39 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
{
struct airoha_qdma *qdma = dev_instance;
u32 intr[ARRAY_SIZE(qdma->irqmask)];
struct airoha_irq_bank *irq_bank = dev_instance;
struct airoha_qdma *qdma = irq_bank->qdma;
u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
int i;
for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
for (i = 0; i < ARRAY_SIZE(intr); i++) {
intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
intr[i] &= qdma->irqmask[i];
intr[i] &= irq_bank->irqmask[i];
airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
}
if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
return IRQ_NONE;
if (intr[1] & RX_DONE_INT_MASK) {
airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
RX_DONE_INT_MASK);
rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
if (rx_intr1) {
airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
rx_intr_mask |= rx_intr1;
}
for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
if (!qdma->q_rx[i].ndesc)
continue;
rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
if (rx_intr2) {
airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
rx_intr_mask |= (rx_intr2 << 16);
}
if (intr[1] & BIT(i))
napi_schedule(&qdma->q_rx[i].napi);
}
for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
if (!qdma->q_rx[i].ndesc)
continue;
if (rx_intr_mask & BIT(i))
napi_schedule(&qdma->q_rx[i].napi);
}
if (intr[0] & INT_TX_MASK) {
@@ -1257,7 +1290,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
napi_schedule(&qdma->q_tx_irq[i].napi);
}
@@ -1266,6 +1299,39 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
return IRQ_HANDLED;
}
static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
struct airoha_qdma *qdma)
{
struct airoha_eth *eth = qdma->eth;
int i, id = qdma - &eth->qdma[0];
for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
int err, irq_index = 4 * id + i;
const char *name;
spin_lock_init(&irq_bank->irq_lock);
irq_bank->qdma = qdma;
irq_bank->irq = platform_get_irq(pdev, irq_index);
if (irq_bank->irq < 0)
return irq_bank->irq;
name = devm_kasprintf(eth->dev, GFP_KERNEL,
KBUILD_MODNAME ".%d", irq_index);
if (!name)
return -ENOMEM;
err = devm_request_irq(eth->dev, irq_bank->irq,
airoha_irq_handler, IRQF_SHARED, name,
irq_bank);
if (err)
return err;
}
return 0;
}
static int airoha_qdma_init(struct platform_device *pdev,
struct airoha_eth *eth,
struct airoha_qdma *qdma)
@@ -1273,9 +1339,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
int err, id = qdma - &eth->qdma[0];
const char *res;
spin_lock_init(&qdma->irq_lock);
qdma->eth = eth;
res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
if (!res)
return -ENOMEM;
@@ -1285,12 +1349,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
"failed to iomap qdma%d regs\n", id);
qdma->irq = platform_get_irq(pdev, 4 * id);
if (qdma->irq < 0)
return qdma->irq;
err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, qdma);
err = airoha_qdma_init_irq_banks(pdev, qdma);
if (err)
return err;
@@ -2784,7 +2843,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
dev->features |= dev->hw_features;
dev->vlan_features = dev->hw_features;
dev->dev.of_node = np;
dev->irq = qdma->irq;
dev->irq = qdma->irq_banks[0].irq;
SET_NETDEV_DEV(dev, eth->dev);
/* reserve hw queues for HTB offloading */

View File

@@ -17,6 +17,7 @@
#define AIROHA_MAX_NUM_GDM_PORTS 4
#define AIROHA_MAX_NUM_QDMA 2
#define AIROHA_MAX_NUM_IRQ_BANKS 4
#define AIROHA_MAX_DSA_PORTS 7
#define AIROHA_MAX_NUM_RSTS 3
#define AIROHA_MAX_NUM_XSI_RSTS 5
@@ -452,17 +453,34 @@ struct airoha_flow_table_entry {
unsigned long cookie;
};
struct airoha_qdma {
struct airoha_eth *eth;
void __iomem *regs;
/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
#define RX_IRQ0_BANK_PIN_MASK 0x839f
#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
#define RX_IRQ2_BANK_PIN_MASK 0x20
#define RX_IRQ3_BANK_PIN_MASK 0x40
#define RX_IRQ_BANK_PIN_MASK(_n) \
(((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \
((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \
((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \
RX_IRQ0_BANK_PIN_MASK)
struct airoha_irq_bank {
struct airoha_qdma *qdma;
/* protect concurrent irqmask accesses */
spinlock_t irq_lock;
u32 irqmask[QDMA_INT_REG_MAX];
int irq;
};
struct airoha_qdma {
struct airoha_eth *eth;
void __iomem *regs;
atomic_t users;
struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
struct airoha_queue q_tx[AIROHA_NUM_TX_RING];

View File

@@ -423,11 +423,12 @@
((_n) == 2) ? 0x0720 : \
((_n) == 1) ? 0x0024 : 0x0020)
#define REG_INT_ENABLE(_n) \
(((_n) == 4) ? 0x0750 : \
((_n) == 3) ? 0x0744 : \
((_n) == 2) ? 0x0740 : \
((_n) == 1) ? 0x002c : 0x0028)
#define REG_INT_ENABLE(_b, _n) \
(((_n) == 4) ? 0x0750 + ((_b) << 5) : \
((_n) == 3) ? 0x0744 + ((_b) << 5) : \
((_n) == 2) ? 0x0740 + ((_b) << 5) : \
((_n) == 1) ? 0x002c + ((_b) << 3) : \
0x0028 + ((_b) << 3))
/* QDMA_CSR_INT_ENABLE1 */
#define RX15_COHERENT_INT_MASK BIT(31)
@@ -462,6 +463,26 @@
#define IRQ0_FULL_INT_MASK BIT(1)
#define IRQ0_INT_MASK BIT(0)
#define RX_COHERENT_LOW_INT_MASK \
(RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \
RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK | \
RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK | \
RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK | \
RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK | \
RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK | \
RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK)
#define RX_COHERENT_LOW_OFFSET __ffs(RX_COHERENT_LOW_INT_MASK)
#define INT_RX0_MASK(_n) \
(((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK)
#define TX_COHERENT_LOW_INT_MASK \
(TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK | \
TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK | \
TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK | \
TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
#define TX_DONE_INT_MASK(_n) \
((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
: IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
@@ -470,17 +491,6 @@
(IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
#define INT_IDX0_MASK \
(TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
RX15_COHERENT_INT_MASK | INT_TX_MASK)
/* QDMA_CSR_INT_ENABLE2 */
#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
@@ -515,19 +525,121 @@
#define RX1_DONE_INT_MASK BIT(1)
#define RX0_DONE_INT_MASK BIT(0)
#define RX_DONE_INT_MASK \
(RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
RX15_DONE_INT_MASK)
#define INT_IDX1_MASK \
(RX_DONE_INT_MASK | \
RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
RX15_NO_CPU_DSCP_INT_MASK)
#define RX_NO_CPU_DSCP_LOW_INT_MASK \
(RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \
RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK | \
RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK | \
RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK | \
RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK | \
RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK | \
RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK)
#define RX_DONE_LOW_INT_MASK \
(RX15_DONE_INT_MASK | RX14_DONE_INT_MASK | \
RX13_DONE_INT_MASK | RX12_DONE_INT_MASK | \
RX11_DONE_INT_MASK | RX10_DONE_INT_MASK | \
RX9_DONE_INT_MASK | RX8_DONE_INT_MASK | \
RX7_DONE_INT_MASK | RX6_DONE_INT_MASK | \
RX5_DONE_INT_MASK | RX4_DONE_INT_MASK | \
RX3_DONE_INT_MASK | RX2_DONE_INT_MASK | \
RX1_DONE_INT_MASK | RX0_DONE_INT_MASK)
#define RX_NO_CPU_DSCP_LOW_OFFSET __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK)
#define INT_RX1_MASK(_n) \
((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) | \
(RX_DONE_LOW_INT_MASK & (_n)))
/* QDMA_CSR_INT_ENABLE3 */
#define RX31_NO_CPU_DSCP_INT_MASK BIT(31)
#define RX30_NO_CPU_DSCP_INT_MASK BIT(30)
#define RX29_NO_CPU_DSCP_INT_MASK BIT(29)
#define RX28_NO_CPU_DSCP_INT_MASK BIT(28)
#define RX27_NO_CPU_DSCP_INT_MASK BIT(27)
#define RX26_NO_CPU_DSCP_INT_MASK BIT(26)
#define RX25_NO_CPU_DSCP_INT_MASK BIT(25)
#define RX24_NO_CPU_DSCP_INT_MASK BIT(24)
#define RX23_NO_CPU_DSCP_INT_MASK BIT(23)
#define RX22_NO_CPU_DSCP_INT_MASK BIT(22)
#define RX21_NO_CPU_DSCP_INT_MASK BIT(21)
#define RX20_NO_CPU_DSCP_INT_MASK BIT(20)
#define RX19_NO_CPU_DSCP_INT_MASK BIT(19)
#define RX18_NO_CPU_DSCP_INT_MASK BIT(18)
#define RX17_NO_CPU_DSCP_INT_MASK BIT(17)
#define RX16_NO_CPU_DSCP_INT_MASK BIT(16)
#define RX31_DONE_INT_MASK BIT(15)
#define RX30_DONE_INT_MASK BIT(14)
#define RX29_DONE_INT_MASK BIT(13)
#define RX28_DONE_INT_MASK BIT(12)
#define RX27_DONE_INT_MASK BIT(11)
#define RX26_DONE_INT_MASK BIT(10)
#define RX25_DONE_INT_MASK BIT(9)
#define RX24_DONE_INT_MASK BIT(8)
#define RX23_DONE_INT_MASK BIT(7)
#define RX22_DONE_INT_MASK BIT(6)
#define RX21_DONE_INT_MASK BIT(5)
#define RX20_DONE_INT_MASK BIT(4)
#define RX19_DONE_INT_MASK BIT(3)
#define RX18_DONE_INT_MASK BIT(2)
#define RX17_DONE_INT_MASK BIT(1)
#define RX16_DONE_INT_MASK BIT(0)
#define RX_NO_CPU_DSCP_HIGH_INT_MASK \
(RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK | \
RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK | \
RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK | \
RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK | \
RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK | \
RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK | \
RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK | \
RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK)
#define RX_DONE_HIGH_INT_MASK \
(RX31_DONE_INT_MASK | RX30_DONE_INT_MASK | \
RX29_DONE_INT_MASK | RX28_DONE_INT_MASK | \
RX27_DONE_INT_MASK | RX26_DONE_INT_MASK | \
RX25_DONE_INT_MASK | RX24_DONE_INT_MASK | \
RX23_DONE_INT_MASK | RX22_DONE_INT_MASK | \
RX21_DONE_INT_MASK | RX20_DONE_INT_MASK | \
RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
#define RX_DONE_INT_MASK (RX_DONE_HIGH_INT_MASK | RX_DONE_LOW_INT_MASK)
#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
#define INT_RX2_MASK(_n) \
((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
(((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK))
/* QDMA_CSR_INT_ENABLE4 */
#define RX31_COHERENT_INT_MASK BIT(31)
#define RX30_COHERENT_INT_MASK BIT(30)
#define RX29_COHERENT_INT_MASK BIT(29)
#define RX28_COHERENT_INT_MASK BIT(28)
#define RX27_COHERENT_INT_MASK BIT(27)
#define RX26_COHERENT_INT_MASK BIT(26)
#define RX25_COHERENT_INT_MASK BIT(25)
#define RX24_COHERENT_INT_MASK BIT(24)
#define RX23_COHERENT_INT_MASK BIT(23)
#define RX22_COHERENT_INT_MASK BIT(22)
#define RX21_COHERENT_INT_MASK BIT(21)
#define RX20_COHERENT_INT_MASK BIT(20)
#define RX19_COHERENT_INT_MASK BIT(19)
#define RX18_COHERENT_INT_MASK BIT(18)
#define RX17_COHERENT_INT_MASK BIT(17)
#define RX16_COHERENT_INT_MASK BIT(16)
#define RX_COHERENT_HIGH_INT_MASK \
(RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK | \
RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK | \
RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK | \
RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK | \
RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK | \
RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK | \
RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK | \
RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK)
#define INT_RX3_MASK(_n) (RX_COHERENT_HIGH_INT_MASK & (_n))
/* QDMA_CSR_INT_ENABLE5 */
#define TX31_COHERENT_INT_MASK BIT(31)
@@ -555,19 +667,19 @@
#define TX9_COHERENT_INT_MASK BIT(9)
#define TX8_COHERENT_INT_MASK BIT(8)
#define INT_IDX4_MASK \
(TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
#define TX_COHERENT_HIGH_INT_MASK \
(TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK | \
TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK | \
TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK | \
TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK | \
TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK | \
TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK | \
TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK | \
TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK | \
TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK | \
TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK | \
TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK | \
TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK)
#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)