Merge branch 'introduce-flowtable-hw-offloading-in-airoha_eth-driver'

Lorenzo Bianconi says:

====================
Introduce flowtable hw offloading in airoha_eth driver

Introduce netfilter flowtable integration in airoha_eth driver to
offload 5-tuple flower rules learned by the PPE module if the user
accelerates them using a nft configuration similar to the one reported
below:

table inet filter {
	flowtable ft {
		hook ingress priority filter
		devices = { lan1, lan2, lan3, lan4, eth1 }
		flags offload;
	}
	chain forward {
		type filter hook forward priority filter; policy accept;
		meta l4proto { tcp, udp } flow add @ft
	}
}

Packet Processor Engine (PPE) module available on EN7581 SoC populates
the PPE table with 5-tuples flower rules learned from traffic forwarded
between the GDM ports connected to the Packet Switch Engine (PSE) module.
airoha_eth driver configures and collects data from the PPE module via a
Network Processor Unit (NPU) RISC-V module available on the EN7581 SoC.
Move airoha_eth driver in a dedicated folder
(drivers/net/ethernet/airoha).

v7: https://lore.kernel.org/r/20250224-airoha-en7581-flowtable-offload-v7-0-b4a22ad8364e@kernel.org
v6: https://lore.kernel.org/r/20250221-airoha-en7581-flowtable-offload-v6-0-d593af0e9487@kernel.org
v5: https://lore.kernel.org/r/20250217-airoha-en7581-flowtable-offload-v5-0-28be901cb735@kernel.org
v4: https://lore.kernel.org/r/20250213-airoha-en7581-flowtable-offload-v4-0-b69ca16d74db@kernel.org
v3: https://lore.kernel.org/r/20250209-airoha-en7581-flowtable-offload-v3-0-dba60e755563@kernel.org
v2: https://lore.kernel.org/r/20250207-airoha-en7581-flowtable-offload-v2-0-3a2239692a67@kernel.org
v1: https://lore.kernel.org/r/20250205-airoha-en7581-flowtable-offload-v1-0-d362cfa97b01@kernel.org
====================

Link: https://patch.msgid.link/20250228-airoha-en7581-flowtable-offload-v8-0-01dc1653f46e@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni
2025-03-04 13:22:12 +01:00
17 changed files with 3423 additions and 1001 deletions

View File

@@ -63,6 +63,14 @@ properties:
"#size-cells":
const: 0
airoha,npu:
$ref: /schemas/types.yaml#/definitions/phandle
description:
Phandle to the node used to configure the NPU module.
The Airoha Network Processor Unit (NPU) provides a configuration
interface to implement hardware flow offloading programming Packet
Processor Engine (PPE) flow table.
patternProperties:
"^ethernet@[1-4]$":
type: object
@@ -132,6 +140,8 @@ examples:
<GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
airoha,npu = <&npu>;
#address-cells = <1>;
#size-cells = <0>;

View File

@@ -0,0 +1,84 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/airoha,en7581-npu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Airoha Network Processor Unit for EN7581 SoC
maintainers:
- Lorenzo Bianconi <lorenzo@kernel.org>
description:
The Airoha Network Processor Unit (NPU) provides a configuration interface
to implement wired and wireless hardware flow offloading programming Packet
Processor Engine (PPE) flow table.
properties:
compatible:
enum:
- airoha,en7581-npu
reg:
maxItems: 1
interrupts:
items:
- description: mbox host irq line
- description: watchdog0 irq line
- description: watchdog1 irq line
- description: watchdog2 irq line
- description: watchdog3 irq line
- description: watchdog4 irq line
- description: watchdog5 irq line
- description: watchdog6 irq line
- description: watchdog7 irq line
- description: wlan irq line0
- description: wlan irq line1
- description: wlan irq line2
- description: wlan irq line3
- description: wlan irq line4
- description: wlan irq line5
memory-region:
maxItems: 1
description:
Memory used to store NPU firmware binary.
required:
- compatible
- reg
- interrupts
- memory-region
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
npu@1e900000 {
compatible = "airoha,en7581-npu";
reg = <0 0x1e900000 0 0x313000>;
interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
memory-region = <&npu_binary>;
};
};

View File

@@ -2586,6 +2586,11 @@ mt7531_setup_common(struct dsa_switch *ds)
/* Allow mirroring frames received on the local port (monitor port). */
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
/* Enable Special Tag for rx frames */
if (priv->id == ID_EN7581)
mt7530_write(priv, MT753X_CPORT_SPTAG_CFG,
CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN);
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)

View File

@@ -627,6 +627,10 @@ enum mt7531_xtal_fsel {
#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
#define MT7531_EXT_P_MDIO_12 (2 << 16)
#define MT753X_CPORT_SPTAG_CFG 0x7c10
#define CPORT_SW2FE_STAG_EN BIT(1)
#define CPORT_FE2SW_STAG_EN BIT(0)
/* Registers for LED GPIO control (MT7530 only)
* All registers follow this pattern:
* [ 2: 0] port 0

View File

@@ -20,6 +20,8 @@ source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
source "drivers/net/ethernet/airoha/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"

View File

@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/

View File

@@ -0,0 +1,27 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_AIROHA
bool "Airoha devices"
depends on ARCH_AIROHA || COMPILE_TEST
help
If you have a Airoha SoC with ethernet, say Y.
if NET_VENDOR_AIROHA
config NET_AIROHA_NPU
tristate "Airoha NPU support"
select WANT_DEV_COREDUMP
select REGMAP_MMIO
help
This driver supports Airoha Network Processor (NPU) available
on the Airoha Soc family.
config NET_AIROHA
tristate "Airoha SoC Gigabit Ethernet support"
depends on NET_DSA || !NET_DSA
select NET_AIROHA_NPU
select PAGE_POOL
help
This driver supports the gigabit ethernet MACs in the
Airoha SoC family.
endif #NET_VENDOR_AIROHA

View File

@@ -0,0 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# Airoha for the Mediatek SoCs built-in ethernet macs
#
obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
airoha-eth-y := airoha_eth.o airoha_ppe.o
airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o
obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o

View File

@@ -0,0 +1,551 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2024 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
#ifndef AIROHA_ETH_H
#define AIROHA_ETH_H
#include <linux/debugfs.h>
#include <linux/etherdevice.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/reset.h>
#include <net/dsa.h>
#define AIROHA_MAX_NUM_GDM_PORTS 4
#define AIROHA_MAX_NUM_QDMA 2
#define AIROHA_MAX_DSA_PORTS 7
#define AIROHA_MAX_NUM_RSTS 3
#define AIROHA_MAX_NUM_XSI_RSTS 5
#define AIROHA_MAX_MTU 2000
#define AIROHA_MAX_PACKET_SIZE 2048
#define AIROHA_NUM_QOS_CHANNELS 4
#define AIROHA_NUM_QOS_QUEUES 8
#define AIROHA_NUM_TX_RING 32
#define AIROHA_NUM_RX_RING 32
#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
AIROHA_NUM_QOS_CHANNELS)
#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
#define AIROHA_FE_MC_MAX_VLAN_PORT 16
#define AIROHA_NUM_TX_IRQ 2
#define HW_DSCP_NUM 2048
#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
#define TX_DSCP_NUM 1024
#define RX_DSCP_NUM(_n) \
((_n) == 2 ? 128 : \
(_n) == 11 ? 128 : \
(_n) == 15 ? 128 : \
(_n) == 0 ? 1024 : 16)
#define PSE_RSV_PAGES 128
#define PSE_QUEUE_RSV_PAGES 64
#define QDMA_METER_IDX(_n) ((_n) & 0xff)
#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
#define PPE_NUM 2
#define PPE1_SRAM_NUM_ENTRIES (8 * 1024)
#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES)
#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1)
#define PPE_ENTRY_SIZE 80
#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
#define MTK_HDR_LEN 4
#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
enum {
QDMA_INT_REG_IDX0,
QDMA_INT_REG_IDX1,
QDMA_INT_REG_IDX2,
QDMA_INT_REG_IDX3,
QDMA_INT_REG_IDX4,
QDMA_INT_REG_MAX
};
enum {
HSGMII_LAN_PCIE0_SRCPORT = 0x16,
HSGMII_LAN_PCIE1_SRCPORT,
HSGMII_LAN_ETH_SRCPORT,
HSGMII_LAN_USB_SRCPORT,
};
enum {
XSI_PCIE0_VIP_PORT_MASK = BIT(22),
XSI_PCIE1_VIP_PORT_MASK = BIT(23),
XSI_USB_VIP_PORT_MASK = BIT(25),
XSI_ETH_VIP_PORT_MASK = BIT(24),
};
enum {
DEV_STATE_INITIALIZED,
};
enum {
CDM_CRSN_QSEL_Q1 = 1,
CDM_CRSN_QSEL_Q5 = 5,
CDM_CRSN_QSEL_Q6 = 6,
CDM_CRSN_QSEL_Q15 = 15,
};
enum {
CRSN_08 = 0x8,
CRSN_21 = 0x15, /* KA */
CRSN_22 = 0x16, /* hit bind and force route to CPU */
CRSN_24 = 0x18,
CRSN_25 = 0x19,
};
enum {
FE_PSE_PORT_CDM1,
FE_PSE_PORT_GDM1,
FE_PSE_PORT_GDM2,
FE_PSE_PORT_GDM3,
FE_PSE_PORT_PPE1,
FE_PSE_PORT_CDM2,
FE_PSE_PORT_CDM3,
FE_PSE_PORT_CDM4,
FE_PSE_PORT_PPE2,
FE_PSE_PORT_GDM4,
FE_PSE_PORT_CDM5,
FE_PSE_PORT_DROP = 0xf,
};
enum tx_sched_mode {
TC_SCH_WRR8,
TC_SCH_SP,
TC_SCH_WRR7,
TC_SCH_WRR6,
TC_SCH_WRR5,
TC_SCH_WRR4,
TC_SCH_WRR3,
TC_SCH_WRR2,
};
enum trtcm_param_type {
TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
TRTCM_TOKEN_RATE_MODE,
TRTCM_BUCKETSIZE_SHIFT_MODE,
TRTCM_BUCKET_COUNTER_MODE,
};
enum trtcm_mode_type {
TRTCM_COMMIT_MODE,
TRTCM_PEAK_MODE,
};
enum trtcm_param {
TRTCM_TICK_SEL = BIT(0),
TRTCM_PKT_MODE = BIT(1),
TRTCM_METER_MODE = BIT(2),
};
#define MIN_TOKEN_SIZE 4096
#define MAX_TOKEN_SIZE_OFFSET 17
#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
struct airoha_queue_entry {
union {
void *buf;
struct sk_buff *skb;
};
dma_addr_t dma_addr;
u16 dma_len;
};
struct airoha_queue {
struct airoha_qdma *qdma;
/* protect concurrent queue accesses */
spinlock_t lock;
struct airoha_queue_entry *entry;
struct airoha_qdma_desc *desc;
u16 head;
u16 tail;
int queued;
int ndesc;
int free_thr;
int buf_size;
struct napi_struct napi;
struct page_pool *page_pool;
};
struct airoha_tx_irq_queue {
struct airoha_qdma *qdma;
struct napi_struct napi;
int size;
u32 *q;
};
struct airoha_hw_stats {
/* protect concurrent hw_stats accesses */
spinlock_t lock;
struct u64_stats_sync syncp;
/* get_stats64 */
u64 rx_ok_pkts;
u64 tx_ok_pkts;
u64 rx_ok_bytes;
u64 tx_ok_bytes;
u64 rx_multicast;
u64 rx_errors;
u64 rx_drops;
u64 tx_drops;
u64 rx_crc_error;
u64 rx_over_errors;
/* ethtool stats */
u64 tx_broadcast;
u64 tx_multicast;
u64 tx_len[7];
u64 rx_broadcast;
u64 rx_fragment;
u64 rx_jabber;
u64 rx_len[7];
};
enum {
PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
};
enum {
AIROHA_FOE_STATE_INVALID,
AIROHA_FOE_STATE_UNBIND,
AIROHA_FOE_STATE_BIND,
AIROHA_FOE_STATE_FIN
};
enum {
PPE_PKT_TYPE_IPV4_HNAPT = 0,
PPE_PKT_TYPE_IPV4_ROUTE = 1,
PPE_PKT_TYPE_BRIDGE = 2,
PPE_PKT_TYPE_IPV4_DSLITE = 3,
PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
PPE_PKT_TYPE_IPV6_6RD = 7,
};
#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
struct airoha_foe_mac_info_common {
u16 vlan1;
u16 etype;
u32 dest_mac_hi;
u16 vlan2;
u16 dest_mac_lo;
u32 src_mac_hi;
};
struct airoha_foe_mac_info {
struct airoha_foe_mac_info_common common;
u16 pppoe_id;
u16 src_mac_lo;
};
#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
#define AIROHA_FOE_IB1_BIND_STATIC BIT(31)
#define AIROHA_FOE_IB1_BIND_UDP BIT(30)
#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28)
#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25)
#define AIROHA_FOE_IB1_BIND_TTL BIT(24)
#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22)
#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20)
#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16)
#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15)
#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24)
#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13)
#define AIROHA_FOE_IB2_PCP BIT(12)
#define AIROHA_FOE_IB2_MULTICAST BIT(11)
#define AIROHA_FOE_IB2_FAST_PATH BIT(10)
#define AIROHA_FOE_IB2_PSE_QOS BIT(9)
#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5)
#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0)
#define AIROHA_FOE_ACTDP GENMASK(31, 24)
#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16)
#define AIROHA_FOE_CHANNEL GENMASK(15, 11)
#define AIROHA_FOE_QID GENMASK(10, 8)
#define AIROHA_FOE_DPI BIT(7)
#define AIROHA_FOE_TUNNEL BIT(6)
#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
struct airoha_foe_bridge {
u32 dest_mac_hi;
u16 src_mac_hi;
u16 dest_mac_lo;
u32 src_mac_lo;
u32 ib2;
u32 rsv[5];
u32 data;
struct airoha_foe_mac_info l2;
};
struct airoha_foe_ipv4_tuple {
u32 src_ip;
u32 dest_ip;
union {
struct {
u16 dest_port;
u16 src_port;
};
struct {
u8 protocol;
u8 _pad[3]; /* fill with 0xa5a5a5 */
};
u32 ports;
};
};
struct airoha_foe_ipv4 {
struct airoha_foe_ipv4_tuple orig_tuple;
u32 ib2;
struct airoha_foe_ipv4_tuple new_tuple;
u32 rsv[2];
u32 data;
struct airoha_foe_mac_info l2;
};
struct airoha_foe_ipv4_dslite {
struct airoha_foe_ipv4_tuple ip4;
u32 ib2;
u8 flow_label[3];
u8 priority;
u32 rsv[4];
u32 data;
struct airoha_foe_mac_info l2;
};
struct airoha_foe_ipv6 {
u32 src_ip[4];
u32 dest_ip[4];
union {
struct {
u16 dest_port;
u16 src_port;
};
struct {
u8 protocol;
u8 pad[3];
};
u32 ports;
};
u32 data;
u32 ib2;
struct airoha_foe_mac_info_common l2;
};
struct airoha_foe_entry {
union {
struct {
u32 ib1;
union {
struct airoha_foe_bridge bridge;
struct airoha_foe_ipv4 ipv4;
struct airoha_foe_ipv4_dslite dslite;
struct airoha_foe_ipv6 ipv6;
DECLARE_FLEX_ARRAY(u32, d);
};
};
u8 data[PPE_ENTRY_SIZE];
};
};
struct airoha_flow_data {
struct ethhdr eth;
union {
struct {
__be32 src_addr;
__be32 dst_addr;
} v4;
struct {
struct in6_addr src_addr;
struct in6_addr dst_addr;
} v6;
};
__be16 src_port;
__be16 dst_port;
struct {
struct {
u16 id;
__be16 proto;
} hdr[2];
u8 num;
} vlan;
struct {
u16 sid;
u8 num;
} pppoe;
};
struct airoha_flow_table_entry {
struct hlist_node list;
struct airoha_foe_entry data;
u32 hash;
struct rhash_head node;
unsigned long cookie;
};
struct airoha_qdma {
struct airoha_eth *eth;
void __iomem *regs;
/* protect concurrent irqmask accesses */
spinlock_t irq_lock;
u32 irqmask[QDMA_INT_REG_MAX];
int irq;
atomic_t users;
struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
/* descriptor and packet buffers for qdma hw forward */
struct {
void *desc;
void *q;
} hfwd;
};
struct airoha_gdm_port {
struct airoha_qdma *qdma;
struct net_device *dev;
int id;
struct airoha_hw_stats stats;
DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
/* qos stats counters */
u64 cpu_tx_packets;
u64 fwd_tx_packets;
struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
};
#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16)
#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
struct airoha_ppe {
struct airoha_eth *eth;
void *foe;
dma_addr_t foe_dma;
struct hlist_head *foe_flow;
u16 foe_check_time[PPE_NUM_ENTRIES];
struct dentry *debugfs_dir;
};
struct airoha_eth {
struct device *dev;
unsigned long state;
void __iomem *fe_regs;
struct airoha_npu __rcu *npu;
struct airoha_ppe *ppe;
struct rhashtable flow_table;
struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
struct net_device *napi_dev;
struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
};
u32 airoha_rr(void __iomem *base, u32 offset);
void airoha_wr(void __iomem *base, u32 offset, u32 val);
u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
#define airoha_fe_rr(eth, offset) \
airoha_rr((eth)->fe_regs, (offset))
#define airoha_fe_wr(eth, offset, val) \
airoha_wr((eth)->fe_regs, (offset), (val))
#define airoha_fe_rmw(eth, offset, mask, val) \
airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
#define airoha_fe_set(eth, offset, val) \
airoha_rmw((eth)->fe_regs, (offset), 0, (val))
#define airoha_fe_clear(eth, offset, val) \
airoha_rmw((eth)->fe_regs, (offset), (val), 0)
#define airoha_qdma_rr(qdma, offset) \
airoha_rr((qdma)->regs, (offset))
#define airoha_qdma_wr(qdma, offset, val) \
airoha_wr((qdma)->regs, (offset), (val))
#define airoha_qdma_rmw(qdma, offset, mask, val) \
airoha_rmw((qdma)->regs, (offset), (mask), (val))
#define airoha_qdma_set(qdma, offset, val) \
airoha_rmw((qdma)->regs, (offset), 0, (val))
#define airoha_qdma_clear(qdma, offset, val) \
airoha_rmw((qdma)->regs, (offset), (val), 0)
void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv);
int airoha_ppe_init(struct airoha_eth *eth);
void airoha_ppe_deinit(struct airoha_eth *eth);
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
u32 hash);
#if CONFIG_DEBUG_FS
int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
#else
static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
{
return 0;
}
#endif
#endif /* AIROHA_ETH_H */

View File

@@ -0,0 +1,520 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2025 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include <linux/devcoredump.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/regmap.h>
#include "airoha_npu.h"
#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000
#define NPU_DUMP_SIZE 512
#define REG_NPU_LOCAL_SRAM 0x0
#define NPU_PC_BASE_ADDR 0x305000
#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100))
#define NPU_CLUSTER_BASE_ADDR 0x306000
#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000)
#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004)
#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2))
#define NPU_MBOX_BASE_ADDR 0x30c000
#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000)
#define MBOX_INT_STATUS_MASK BIT(8)
#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2))
#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2))
#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
#define NPU_TIMER_BASE_ADDR 0x310100
#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
#define WDT_EN_MASK BIT(25)
#define WDT_INTR_MASK BIT(21)
enum {
NPU_OP_SET = 1,
NPU_OP_SET_NO_WAIT,
NPU_OP_GET,
NPU_OP_GET_NO_WAIT,
};
enum {
NPU_FUNC_WIFI,
NPU_FUNC_TUNNEL,
NPU_FUNC_NOTIFY,
NPU_FUNC_DBA,
NPU_FUNC_TR471,
NPU_FUNC_PPE,
};
enum {
NPU_MBOX_ERROR,
NPU_MBOX_SUCCESS,
};
enum {
PPE_FUNC_SET_WAIT,
PPE_FUNC_SET_WAIT_HWNAT_INIT,
PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
PPE_FUNC_SET_WAIT_API,
};
enum {
PPE2_SRAM_SET_ENTRY,
PPE_SRAM_SET_ENTRY,
PPE_SRAM_SET_VAL,
PPE_SRAM_RESET_VAL,
};
enum {
QDMA_WAN_ETHER = 1,
QDMA_WAN_PON_XDSL,
};
#define MBOX_MSG_FUNC_ID GENMASK(14, 11)
#define MBOX_MSG_STATIC_BUF BIT(5)
#define MBOX_MSG_STATUS GENMASK(4, 2)
#define MBOX_MSG_DONE BIT(1)
#define MBOX_MSG_WAIT_RSP BIT(0)
#define PPE_TYPE_L2B_IPV4 2
#define PPE_TYPE_L2B_IPV4_IPV6 3
struct ppe_mbox_data {
u32 func_type;
u32 func_id;
union {
struct {
u8 cds;
u8 xpon_hal_api;
u8 wan_xsi;
u8 ct_joyme4;
int ppe_type;
int wan_mode;
int wan_sel;
} init_info;
struct {
int func_id;
u32 size;
u32 data;
} set_info;
};
};
static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
void *p, int size)
{
u16 core = 0; /* FIXME */
u32 val, offset = core << 4;
dma_addr_t dma_addr;
void *addr;
int ret;
addr = kmemdup(p, size, GFP_ATOMIC);
if (!addr)
return -ENOMEM;
dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
ret = dma_mapping_error(npu->dev, dma_addr);
if (ret)
goto out;
spin_lock_bh(&npu->cores[core].lock);
regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr);
regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size);
regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val);
regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1);
val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP;
regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val);
ret = regmap_read_poll_timeout_atomic(npu->regmap,
REG_CR_MBQ0_CTRL(3) + offset,
val, (val & MBOX_MSG_DONE),
100, 100 * MSEC_PER_SEC);
if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS)
ret = -EINVAL;
spin_unlock_bh(&npu->cores[core].lock);
dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
out:
kfree(addr);
return ret;
}
static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
struct reserved_mem *rmem)
{
const struct firmware *fw;
void __iomem *addr;
int ret;
ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev);
if (ret)
return ret == -ENOENT ? -EPROBE_DEFER : ret;
if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) {
dev_err(dev, "%s: fw size too overlimit (%zu)\n",
NPU_EN7581_FIRMWARE_RV32, fw->size);
ret = -E2BIG;
goto out;
}
addr = devm_ioremap(dev, rmem->base, rmem->size);
if (!addr) {
ret = -ENOMEM;
goto out;
}
memcpy_toio(addr, fw->data, fw->size);
release_firmware(fw);
ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev);
if (ret)
return ret == -ENOENT ? -EPROBE_DEFER : ret;
if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) {
dev_err(dev, "%s: fw size too overlimit (%zu)\n",
NPU_EN7581_FIRMWARE_DATA, fw->size);
ret = -E2BIG;
goto out;
}
memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size);
out:
release_firmware(fw);
return ret;
}
static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
{
struct airoha_npu *npu = npu_instance;
/* clear mbox interrupt status */
regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS,
MBOX_INT_STATUS_MASK);
/* acknowledge npu */
regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3),
MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE);
return IRQ_HANDLED;
}
static void airoha_npu_wdt_work(struct work_struct *work)
{
struct airoha_npu_core *core;
struct airoha_npu *npu;
void *dump;
u32 val[3];
int c;
core = container_of(work, struct airoha_npu_core, wdt_work);
npu = core->npu;
dump = vzalloc(NPU_DUMP_SIZE);
if (!dump)
return;
c = core - &npu->cores[0];
regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val));
snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n",
val[0], val[1], val[2]);
dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL);
}
static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
{
struct airoha_npu_core *core = core_instance;
struct airoha_npu *npu = core->npu;
int c = core - &npu->cores[0];
u32 val;
regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK);
if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) &&
FIELD_GET(WDT_EN_MASK, val))
schedule_work(&core->wdt_work);
return IRQ_HANDLED;
}
static int airoha_npu_ppe_init(struct airoha_npu *npu)
{
struct ppe_mbox_data ppe_data = {
.func_type = NPU_OP_SET,
.func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
.init_info = {
.ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
.wan_mode = QDMA_WAN_ETHER,
},
};
return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
sizeof(struct ppe_mbox_data));
}
static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
{
struct ppe_mbox_data ppe_data = {
.func_type = NPU_OP_SET,
.func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
};
return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
sizeof(struct ppe_mbox_data));
}
static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
dma_addr_t foe_addr,
int sram_num_entries)
{
struct ppe_mbox_data ppe_data = {
.func_type = NPU_OP_SET,
.func_id = PPE_FUNC_SET_WAIT_API,
.set_info = {
.func_id = PPE_SRAM_RESET_VAL,
.data = foe_addr,
.size = sram_num_entries,
},
};
return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
sizeof(struct ppe_mbox_data));
}
static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
dma_addr_t foe_addr,
u32 entry_size, u32 hash, bool ppe2)
{
struct ppe_mbox_data ppe_data = {
.func_type = NPU_OP_SET,
.func_id = PPE_FUNC_SET_WAIT_API,
.set_info = {
.data = foe_addr,
.size = entry_size,
},
};
int err;
ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
: PPE_SRAM_SET_ENTRY;
err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
sizeof(struct ppe_mbox_data));
if (err)
return err;
ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
ppe_data.set_info.data = hash;
ppe_data.set_info.size = sizeof(u32);
return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
sizeof(struct ppe_mbox_data));
}
struct airoha_npu *airoha_npu_get(struct device *dev)
{
struct platform_device *pdev;
struct device_node *np;
struct airoha_npu *npu;
np = of_parse_phandle(dev->of_node, "airoha,npu", 0);
if (!np)
return ERR_PTR(-ENODEV);
pdev = of_find_device_by_node(np);
of_node_put(np);
if (!pdev) {
dev_err(dev, "cannot find device node %s\n", np->name);
return ERR_PTR(-ENODEV);
}
if (!try_module_get(THIS_MODULE)) {
dev_err(dev, "failed to get the device driver module\n");
npu = ERR_PTR(-ENODEV);
goto error_pdev_put;
}
npu = platform_get_drvdata(pdev);
if (!npu) {
npu = ERR_PTR(-ENODEV);
goto error_module_put;
}
if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
dev_err(&pdev->dev,
"failed to create device link to consumer %s\n",
dev_name(dev));
npu = ERR_PTR(-EINVAL);
goto error_module_put;
}
return npu;
error_module_put:
module_put(THIS_MODULE);
error_pdev_put:
platform_device_put(pdev);
return npu;
}
EXPORT_SYMBOL_GPL(airoha_npu_get);
void airoha_npu_put(struct airoha_npu *npu)
{
module_put(THIS_MODULE);
put_device(npu->dev);
}
EXPORT_SYMBOL_GPL(airoha_npu_put);
static const struct of_device_id of_airoha_npu_match[] = {
{ .compatible = "airoha,en7581-npu" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
static const struct regmap_config regmap_config = {
.name = "npu",
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.disable_locking = true,
};
static int airoha_npu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct reserved_mem *rmem;
struct airoha_npu *npu;
struct device_node *np;
void __iomem *base;
int i, irq, err;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL);
if (!npu)
return -ENOMEM;
npu->dev = dev;
npu->ops.ppe_init = airoha_npu_ppe_init;
npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
if (IS_ERR(npu->regmap))
return PTR_ERR(npu->regmap);
np = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!np)
return -ENODEV;
rmem = of_reserved_mem_lookup(np);
of_node_put(np);
if (!rmem)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
err = devm_request_irq(dev, irq, airoha_npu_mbox_handler,
IRQF_SHARED, "airoha-npu-mbox", npu);
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(npu->cores); i++) {
struct airoha_npu_core *core = &npu->cores[i];
spin_lock_init(&core->lock);
core->npu = npu;
irq = platform_get_irq(pdev, i + 1);
if (irq < 0)
return irq;
err = devm_request_irq(dev, irq, airoha_npu_wdt_handler,
IRQF_SHARED, "airoha-npu-wdt", core);
if (err)
return err;
INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
}
err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (err)
return err;
err = airoha_npu_run_firmware(dev, base, rmem);
if (err)
return dev_err_probe(dev, err, "failed to run npu firmware\n");
regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
msleep(100);
/* setting booting address */
for (i = 0; i < NPU_NUM_CORES; i++)
regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base);
usleep_range(1000, 2000);
/* enable NPU cores */
/* do not start core3 since it is used for WiFi offloading */
regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
msleep(100);
platform_set_drvdata(pdev, npu);
return 0;
}
static void airoha_npu_remove(struct platform_device *pdev)
{
struct airoha_npu *npu = platform_get_drvdata(pdev);
int i;
for (i = 0; i < ARRAY_SIZE(npu->cores); i++)
cancel_work_sync(&npu->cores[i].wdt_work);
}
static struct platform_driver airoha_npu_driver = {
.probe = airoha_npu_probe,
.remove = airoha_npu_remove,
.driver = {
.name = "airoha-npu",
.of_match_table = of_airoha_npu_match,
},
};
module_platform_driver(airoha_npu_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_DESCRIPTION("Airoha Network Processor Unit driver");

View File

@@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2025 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
#define NPU_NUM_CORES 8
struct airoha_npu {
struct device *dev;
struct regmap *regmap;
struct airoha_npu_core {
struct airoha_npu *npu;
/* protect concurrent npu memory accesses */
spinlock_t lock;
struct work_struct wdt_work;
} cores[NPU_NUM_CORES];
struct {
int (*ppe_init)(struct airoha_npu *npu);
int (*ppe_deinit)(struct airoha_npu *npu);
int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
dma_addr_t foe_addr,
int sram_num_entries);
int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
dma_addr_t foe_addr,
u32 entry_size, u32 hash,
bool ppe2);
} ops;
};
struct airoha_npu *airoha_npu_get(struct device *dev);
void airoha_npu_put(struct airoha_npu *npu);

View File

@@ -0,0 +1,910 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2025 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/rhashtable.h>
#include <net/ipv6.h>
#include <net/pkt_cls.h>
#include "airoha_npu.h"
#include "airoha_regs.h"
#include "airoha_eth.h"
static DEFINE_MUTEX(flow_offload_mutex);
static DEFINE_SPINLOCK(ppe_lock);
static const struct rhashtable_params airoha_flow_table_params = {
.head_offset = offsetof(struct airoha_flow_table_entry, node),
.key_offset = offsetof(struct airoha_flow_table_entry, cookie),
.key_len = sizeof(unsigned long),
.automatic_shrinking = true,
};
static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
{
return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
}
static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
{
u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
}
static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
{
u32 sram_tb_size, sram_num_entries, dram_num_entries;
struct airoha_eth *eth = ppe->eth;
int i;
sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
for (i = 0; i < PPE_NUM; i++) {
int p;
airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
ppe->foe_dma + sram_tb_size);
airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
PPE_BIND_AGE0_DELTA_NON_L4 |
PPE_BIND_AGE0_DELTA_UDP,
FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
PPE_BIND_AGE1_DELTA_TCP_FIN |
PPE_BIND_AGE1_DELTA_TCP,
FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
PPE_SRAM_TABLE_EN_MASK |
PPE_SRAM_HASH1_EN_MASK |
PPE_DRAM_TABLE_EN_MASK |
PPE_SRAM_HASH0_MODE_MASK |
PPE_SRAM_HASH1_MODE_MASK |
PPE_DRAM_HASH0_MODE_MASK |
PPE_DRAM_HASH1_MODE_MASK,
FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
PPE_TB_CFG_SEARCH_MISS_MASK |
PPE_TB_ENTRY_SIZE_MASK,
FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
FP0_EGRESS_MTU_MASK |
FP1_EGRESS_MTU_MASK,
FIELD_PREP(FP0_EGRESS_MTU_MASK,
AIROHA_MAX_MTU) |
FIELD_PREP(FP1_EGRESS_MTU_MASK,
AIROHA_MAX_MTU));
}
if (airoha_ppe2_is_enabled(eth)) {
sram_num_entries =
PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
PPE_SRAM_TB_NUM_ENTRY_MASK |
PPE_DRAM_TB_NUM_ENTRY_MASK,
FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
sram_num_entries) |
FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
dram_num_entries));
airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
PPE_SRAM_TB_NUM_ENTRY_MASK |
PPE_DRAM_TB_NUM_ENTRY_MASK,
FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
sram_num_entries) |
FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
dram_num_entries));
} else {
sram_num_entries =
PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
PPE_SRAM_TB_NUM_ENTRY_MASK |
PPE_DRAM_TB_NUM_ENTRY_MASK,
FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
sram_num_entries) |
FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
dram_num_entries));
}
}
static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
{
void *dest = eth + act->mangle.offset;
const void *src = &act->mangle.val;
if (act->mangle.offset > 8)
return;
if (act->mangle.mask == 0xffff) {
src += 2;
dest += 2;
}
memcpy(dest, src, act->mangle.mask ? 2 : 4);
}
static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
struct airoha_flow_data *data)
{
u32 val = be32_to_cpu((__force __be32)act->mangle.val);
switch (act->mangle.offset) {
case 0:
if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
data->dst_port = cpu_to_be16(val);
else
data->src_port = cpu_to_be16(val >> 16);
break;
case 2:
data->dst_port = cpu_to_be16(val);
break;
default:
return -EINVAL;
}
return 0;
}
static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
struct airoha_flow_data *data)
{
__be32 *dest;
switch (act->mangle.offset) {
case offsetof(struct iphdr, saddr):
dest = &data->v4.src_addr;
break;
case offsetof(struct iphdr, daddr):
dest = &data->v4.dst_addr;
break;
default:
return -EINVAL;
}
memcpy(dest, &act->mangle.val, sizeof(u32));
return 0;
}
static int airoha_get_dsa_port(struct net_device **dev)
{
#if IS_ENABLED(CONFIG_NET_DSA)
struct dsa_port *dp = dsa_port_from_netdev(*dev);
if (IS_ERR(dp))
return -ENODEV;
*dev = dsa_port_to_conduit(dp);
return dp->index;
#else
return -ENODEV;
#endif
}
static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe,
struct net_device *dev, int type,
struct airoha_flow_data *data,
int l4proto)
{
int dsa_port = airoha_get_dsa_port(&dev);
struct airoha_foe_mac_info_common *l2;
u32 qdata, ports_pad, val;
memset(hwe, 0, sizeof(*hwe));
val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
AIROHA_FOE_IB1_BIND_TTL;
hwe->ib1 = val;
val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
AIROHA_FOE_IB2_PSE_QOS;
if (dsa_port >= 0)
val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
if (dev) {
struct airoha_gdm_port *port = netdev_priv(dev);
u8 pse_port;
if (dsa_port >= 0)
pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
else
pse_port = 2; /* uplink relies on GDM2 loopback */
val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
}
if (is_multicast_ether_addr(data->eth.h_dest))
val |= AIROHA_FOE_IB2_MULTICAST;
ports_pad = 0xa5a5a500 | (l4proto & 0xff);
if (type == PPE_PKT_TYPE_IPV4_ROUTE)
hwe->ipv4.orig_tuple.ports = ports_pad;
if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
hwe->ipv6.ports = ports_pad;
qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
if (type == PPE_PKT_TYPE_BRIDGE) {
hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
hwe->bridge.dest_mac_lo =
get_unaligned_be16(data->eth.h_dest + 4);
hwe->bridge.src_mac_hi =
get_unaligned_be16(data->eth.h_source);
hwe->bridge.src_mac_lo =
get_unaligned_be32(data->eth.h_source + 2);
hwe->bridge.data = qdata;
hwe->bridge.ib2 = val;
l2 = &hwe->bridge.l2.common;
} else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
hwe->ipv6.data = qdata;
hwe->ipv6.ib2 = val;
l2 = &hwe->ipv6.l2;
} else {
hwe->ipv4.data = qdata;
hwe->ipv4.ib2 = val;
l2 = &hwe->ipv4.l2.common;
}
l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
hwe->ipv4.l2.src_mac_lo =
get_unaligned_be16(data->eth.h_source + 4);
} else {
l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
}
if (data->vlan.num) {
l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
l2->vlan1 = data->vlan.hdr[0].id;
if (data->vlan.num == 2)
l2->vlan2 = data->vlan.hdr[1].id;
} else if (dsa_port >= 0) {
l2->etype = BIT(15) | BIT(dsa_port);
} else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
l2->etype = ETH_P_IPV6;
} else {
l2->etype = ETH_P_IP;
}
return 0;
}
static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
struct airoha_flow_data *data,
bool egress)
{
int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
struct airoha_foe_ipv4_tuple *t;
switch (type) {
case PPE_PKT_TYPE_IPV4_HNAPT:
if (egress) {
t = &hwe->ipv4.new_tuple;
break;
}
fallthrough;
case PPE_PKT_TYPE_IPV4_DSLITE:
case PPE_PKT_TYPE_IPV4_ROUTE:
t = &hwe->ipv4.orig_tuple;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
t->src_ip = be32_to_cpu(data->v4.src_addr);
t->dest_ip = be32_to_cpu(data->v4.dst_addr);
if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
t->src_port = be16_to_cpu(data->src_port);
t->dest_port = be16_to_cpu(data->dst_port);
}
return 0;
}
static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
struct airoha_flow_data *data)
{
int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
u32 *src, *dest;
switch (type) {
case PPE_PKT_TYPE_IPV6_ROUTE_5T:
case PPE_PKT_TYPE_IPV6_6RD:
hwe->ipv6.src_port = be16_to_cpu(data->src_port);
hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
fallthrough;
case PPE_PKT_TYPE_IPV6_ROUTE_3T:
src = hwe->ipv6.src_ip;
dest = hwe->ipv6.dest_ip;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
return 0;
}
static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
{
int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
u32 hash, hv1, hv2, hv3;
switch (type) {
case PPE_PKT_TYPE_IPV4_ROUTE:
case PPE_PKT_TYPE_IPV4_HNAPT:
hv1 = hwe->ipv4.orig_tuple.ports;
hv2 = hwe->ipv4.orig_tuple.dest_ip;
hv3 = hwe->ipv4.orig_tuple.src_ip;
break;
case PPE_PKT_TYPE_IPV6_ROUTE_3T:
case PPE_PKT_TYPE_IPV6_ROUTE_5T:
hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
hv1 ^= hwe->ipv6.ports;
hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
hv2 ^= hwe->ipv6.dest_ip[0];
hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
hv3 ^= hwe->ipv6.src_ip[0];
break;
case PPE_PKT_TYPE_IPV4_DSLITE:
case PPE_PKT_TYPE_IPV6_6RD:
default:
WARN_ON_ONCE(1);
return PPE_HASH_MASK;
}
hash = (hv1 & hv2) | ((~hv1) & hv3);
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
hash ^= hv1 ^ hv2 ^ hv3;
hash ^= hash >> 16;
hash &= PPE_NUM_ENTRIES - 1;
return hash;
}
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
u32 hash)
{
if (hash < PPE_SRAM_NUM_ENTRIES) {
u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
struct airoha_eth *eth = ppe->eth;
bool ppe2;
u32 val;
int i;
ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
hash >= PPE1_SRAM_NUM_ENTRIES;
airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
PPE_SRAM_CTRL_REQ_MASK);
if (read_poll_timeout_atomic(airoha_fe_rr, val,
val & PPE_SRAM_CTRL_ACK_MASK,
10, 100, false, eth,
REG_PPE_RAM_CTRL(ppe2)))
return NULL;
for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
hwe[i] = airoha_fe_rr(eth,
REG_PPE_RAM_ENTRY(ppe2, i));
}
return ppe->foe + hash * sizeof(struct airoha_foe_entry);
}
static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
struct airoha_foe_entry *hwe)
{
int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
int len;
if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
return false;
if (type > PPE_PKT_TYPE_IPV4_DSLITE)
len = offsetof(struct airoha_foe_entry, ipv6.data);
else
len = offsetof(struct airoha_foe_entry, ipv4.ib2);
return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
}
static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
struct airoha_foe_entry *e,
u32 hash)
{
struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
u32 ts = airoha_ppe_get_timestamp(ppe);
struct airoha_eth *eth = ppe->eth;
memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
wmb();
e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
hwe->ib1 = e->ib1;
if (hash < PPE_SRAM_NUM_ENTRIES) {
dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
bool ppe2 = airoha_ppe2_is_enabled(eth) &&
hash >= PPE1_SRAM_NUM_ENTRIES;
struct airoha_npu *npu;
int err = -ENODEV;
rcu_read_lock();
npu = rcu_dereference(eth->npu);
if (npu)
err = npu->ops.ppe_foe_commit_entry(npu, addr,
sizeof(*hwe), hash,
ppe2);
rcu_read_unlock();
return err;
}
return 0;
}
static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
{
struct airoha_flow_table_entry *e;
struct airoha_foe_entry *hwe;
struct hlist_node *n;
u32 index, state;
spin_lock_bh(&ppe_lock);
hwe = airoha_ppe_foe_get_entry(ppe, hash);
if (!hwe)
goto unlock;
state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
if (state == AIROHA_FOE_STATE_BIND)
goto unlock;
index = airoha_ppe_foe_get_entry_hash(hwe);
hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
if (airoha_ppe_foe_compare_entry(e, hwe)) {
airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
e->hash = hash;
break;
}
}
unlock:
spin_unlock_bh(&ppe_lock);
}
static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *e)
{
u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
e->hash = 0xffff;
spin_lock_bh(&ppe_lock);
hlist_add_head(&e->list, &ppe->foe_flow[hash]);
spin_unlock_bh(&ppe_lock);
return 0;
}
static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *e)
{
spin_lock_bh(&ppe_lock);
hlist_del_init(&e->list);
if (e->hash != 0xffff) {
e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
AIROHA_FOE_STATE_INVALID);
airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
e->hash = 0xffff;
}
spin_unlock_bh(&ppe_lock);
}
static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct airoha_eth *eth = port->qdma->eth;
struct airoha_flow_table_entry *e;
struct airoha_flow_data data = {};
struct net_device *odev = NULL;
struct flow_action_entry *act;
struct airoha_foe_entry hwe;
int err, i, offload_type;
u16 addr_type = 0;
u8 l4proto = 0;
if (rhashtable_lookup(&eth->flow_table, &f->cookie,
airoha_flow_table_params))
return -EEXIST;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
return -EOPNOTSUPP;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
if (flow_rule_has_control_flags(match.mask->flags,
f->common.extack))
return -EOPNOTSUPP;
} else {
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
l4proto = match.key->ip_proto;
} else {
return -EOPNOTSUPP;
}
switch (addr_type) {
case 0:
offload_type = PPE_PKT_TYPE_BRIDGE;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
flow_rule_match_eth_addrs(rule, &match);
memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
} else {
return -EOPNOTSUPP;
}
break;
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
break;
default:
return -EOPNOTSUPP;
}
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_MANGLE:
if (offload_type == PPE_PKT_TYPE_BRIDGE)
return -EOPNOTSUPP;
if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
airoha_ppe_flow_mangle_eth(act, &data.eth);
break;
case FLOW_ACTION_REDIRECT:
odev = act->dev;
break;
case FLOW_ACTION_CSUM:
break;
case FLOW_ACTION_VLAN_PUSH:
if (data.vlan.num == 2 ||
act->vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
data.vlan.num++;
break;
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
break;
default:
return -EOPNOTSUPP;
}
}
if (!is_valid_ether_addr(data.eth.h_source) ||
!is_valid_ether_addr(data.eth.h_dest))
return -EINVAL;
err = airoha_ppe_foe_entry_prepare(&hwe, odev, offload_type,
&data, l4proto);
if (err)
return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports ports;
if (offload_type == PPE_PKT_TYPE_BRIDGE)
return -EOPNOTSUPP;
flow_rule_match_ports(rule, &ports);
data.src_port = ports.key->src;
data.dst_port = ports.key->dst;
} else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs addrs;
flow_rule_match_ipv4_addrs(rule, &addrs);
data.v4.src_addr = addrs.key->src;
data.v4.dst_addr = addrs.key->dst;
airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_match_ipv6_addrs addrs;
flow_rule_match_ipv6_addrs(rule, &addrs);
data.v6.src_addr = addrs.key->src;
data.v6.dst_addr = addrs.key->dst;
airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
}
flow_action_for_each(i, act, &rule->action) {
if (act->id != FLOW_ACTION_MANGLE)
continue;
if (offload_type == PPE_PKT_TYPE_BRIDGE)
return -EOPNOTSUPP;
switch (act->mangle.htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
err = airoha_ppe_flow_mangle_ports(act, &data);
break;
case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
err = airoha_ppe_flow_mangle_ipv4(act, &data);
break;
case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
/* handled earlier */
break;
default:
return -EOPNOTSUPP;
}
if (err)
return err;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
if (err)
return err;
}
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
e->cookie = f->cookie;
memcpy(&e->data, &hwe, sizeof(e->data));
err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
if (err)
goto free_entry;
err = rhashtable_insert_fast(&eth->flow_table, &e->node,
airoha_flow_table_params);
if (err < 0)
goto remove_foe_entry;
return 0;
remove_foe_entry:
airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
free_entry:
kfree(e);
return err;
}
static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
struct flow_cls_offload *f)
{
struct airoha_eth *eth = port->qdma->eth;
struct airoha_flow_table_entry *e;
e = rhashtable_lookup(&eth->flow_table, &f->cookie,
airoha_flow_table_params);
if (!e)
return -ENOENT;
airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
rhashtable_remove_fast(&eth->flow_table, &e->node,
airoha_flow_table_params);
kfree(e);
return 0;
}
static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
struct flow_cls_offload *f)
{
switch (f->command) {
case FLOW_CLS_REPLACE:
return airoha_ppe_flow_offload_replace(port, f);
case FLOW_CLS_DESTROY:
return airoha_ppe_flow_offload_destroy(port, f);
default:
break;
}
return -EOPNOTSUPP;
}
static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
struct airoha_npu *npu)
{
int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
struct airoha_foe_entry *hwe = ppe->foe;
if (airoha_ppe2_is_enabled(ppe->eth))
sram_num_entries = sram_num_entries / 2;
for (i = 0; i < sram_num_entries; i++)
memset(&hwe[i], 0, sizeof(*hwe));
return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
PPE_SRAM_NUM_ENTRIES);
}
static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
{
struct airoha_npu *npu = airoha_npu_get(eth->dev);
if (IS_ERR(npu)) {
request_module("airoha-npu");
npu = airoha_npu_get(eth->dev);
}
return npu;
}
static int airoha_ppe_offload_setup(struct airoha_eth *eth)
{
struct airoha_npu *npu = airoha_ppe_npu_get(eth);
int err;
if (IS_ERR(npu))
return PTR_ERR(npu);
err = npu->ops.ppe_init(npu);
if (err)
goto error_npu_put;
airoha_ppe_hw_init(eth->ppe);
err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
if (err)
goto error_npu_put;
rcu_assign_pointer(eth->npu, npu);
synchronize_rcu();
return 0;
error_npu_put:
airoha_npu_put(npu);
return err;
}
int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct flow_cls_offload *cls = type_data;
struct net_device *dev = cb_priv;
struct airoha_gdm_port *port = netdev_priv(dev);
struct airoha_eth *eth = port->qdma->eth;
int err = 0;
if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
mutex_lock(&flow_offload_mutex);
if (!eth->npu)
err = airoha_ppe_offload_setup(eth);
if (!err)
err = airoha_ppe_flow_offload_cmd(port, cls);
mutex_unlock(&flow_offload_mutex);
return err;
}
void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
{
u16 now, diff;
if (hash > PPE_HASH_MASK)
return;
now = (u16)jiffies;
diff = now - ppe->foe_check_time[hash];
if (diff < HZ / 10)
return;
ppe->foe_check_time[hash] = now;
airoha_ppe_foe_insert_entry(ppe, hash);
}
int airoha_ppe_init(struct airoha_eth *eth)
{
struct airoha_ppe *ppe;
int foe_size, err;
ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
if (!ppe)
return -ENOMEM;
foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
GFP_KERNEL);
if (!ppe->foe)
return -ENOMEM;
ppe->eth = eth;
eth->ppe = ppe;
ppe->foe_flow = devm_kzalloc(eth->dev,
PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
GFP_KERNEL);
if (!ppe->foe_flow)
return -ENOMEM;
err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
if (err)
return err;
err = airoha_ppe_debugfs_init(ppe);
if (err)
rhashtable_destroy(&eth->flow_table);
return err;
}
void airoha_ppe_deinit(struct airoha_eth *eth)
{
struct airoha_npu *npu;
rcu_read_lock();
npu = rcu_dereference(eth->npu);
if (npu) {
npu->ops.ppe_deinit(npu);
airoha_npu_put(npu);
}
rcu_read_unlock();
rhashtable_destroy(&eth->flow_table);
debugfs_remove(eth->ppe->debugfs_dir);
}

View File

@@ -0,0 +1,181 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2025 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include "airoha_eth.h"
static void airoha_debugfs_ppe_print_tuple(struct seq_file *m,
void *src_addr, void *dest_addr,
u16 *src_port, u16 *dest_port,
bool ipv6)
{
__be32 n_addr[IPV6_ADDR_WORDS];
if (ipv6) {
ipv6_addr_cpu_to_be32(n_addr, src_addr);
seq_printf(m, "%pI6", n_addr);
} else {
seq_printf(m, "%pI4h", src_addr);
}
if (src_port)
seq_printf(m, ":%d", *src_port);
seq_puts(m, "->");
if (ipv6) {
ipv6_addr_cpu_to_be32(n_addr, dest_addr);
seq_printf(m, "%pI6", n_addr);
} else {
seq_printf(m, "%pI4h", dest_addr);
}
if (dest_port)
seq_printf(m, ":%d", *dest_port);
}
static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
bool bind)
{
static const char *const ppe_type_str[] = {
[PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
[PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
[PPE_PKT_TYPE_BRIDGE] = "L2B",
[PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
[PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
[PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
[PPE_PKT_TYPE_IPV6_6RD] = "6RD",
};
static const char *const ppe_state_str[] = {
[AIROHA_FOE_STATE_INVALID] = "INV",
[AIROHA_FOE_STATE_UNBIND] = "UNB",
[AIROHA_FOE_STATE_BIND] = "BND",
[AIROHA_FOE_STATE_FIN] = "FIN",
};
struct airoha_ppe *ppe = m->private;
int i;
for (i = 0; i < PPE_NUM_ENTRIES; i++) {
const char *state_str, *type_str = "UNKNOWN";
void *src_addr = NULL, *dest_addr = NULL;
u16 *src_port = NULL, *dest_port = NULL;
struct airoha_foe_mac_info_common *l2;
unsigned char h_source[ETH_ALEN] = {};
unsigned char h_dest[ETH_ALEN];
struct airoha_foe_entry *hwe;
u32 type, state, ib2, data;
bool ipv6 = false;
hwe = airoha_ppe_foe_get_entry(ppe, i);
if (!hwe)
continue;
state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
if (!state)
continue;
if (bind && state != AIROHA_FOE_STATE_BIND)
continue;
state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)];
type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type])
type_str = ppe_type_str[type];
seq_printf(m, "%05x %s %7s", i, state_str, type_str);
switch (type) {
case PPE_PKT_TYPE_IPV4_HNAPT:
case PPE_PKT_TYPE_IPV4_DSLITE:
src_port = &hwe->ipv4.orig_tuple.src_port;
dest_port = &hwe->ipv4.orig_tuple.dest_port;
fallthrough;
case PPE_PKT_TYPE_IPV4_ROUTE:
src_addr = &hwe->ipv4.orig_tuple.src_ip;
dest_addr = &hwe->ipv4.orig_tuple.dest_ip;
break;
case PPE_PKT_TYPE_IPV6_ROUTE_5T:
src_port = &hwe->ipv6.src_port;
dest_port = &hwe->ipv6.dest_port;
fallthrough;
case PPE_PKT_TYPE_IPV6_ROUTE_3T:
case PPE_PKT_TYPE_IPV6_6RD:
src_addr = &hwe->ipv6.src_ip;
dest_addr = &hwe->ipv6.dest_ip;
ipv6 = true;
break;
default:
break;
}
if (src_addr && dest_addr) {
seq_puts(m, " orig=");
airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
src_port, dest_port, ipv6);
}
switch (type) {
case PPE_PKT_TYPE_IPV4_HNAPT:
case PPE_PKT_TYPE_IPV4_DSLITE:
src_port = &hwe->ipv4.new_tuple.src_port;
dest_port = &hwe->ipv4.new_tuple.dest_port;
fallthrough;
case PPE_PKT_TYPE_IPV4_ROUTE:
src_addr = &hwe->ipv4.new_tuple.src_ip;
dest_addr = &hwe->ipv4.new_tuple.dest_ip;
seq_puts(m, " new=");
airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
src_port, dest_port,
ipv6);
break;
default:
break;
}
if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
data = hwe->ipv6.data;
ib2 = hwe->ipv6.ib2;
l2 = &hwe->ipv6.l2;
} else {
data = hwe->ipv4.data;
ib2 = hwe->ipv4.ib2;
l2 = &hwe->ipv4.l2.common;
*((__be16 *)&h_source[4]) =
cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
}
*((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
*((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
*((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
" vlan=%d,%d ib1=%08x ib2=%08x\n",
h_source, h_dest, l2->etype, data,
l2->vlan1, l2->vlan2, hwe->ib1, ib2);
}
return 0;
}
static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
{
return airoha_ppe_debugfs_foe_show(m, private, false);
}
DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all);
static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
{
return airoha_ppe_debugfs_foe_show(m, private, true);
}
DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind);
int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
{
ppe->debugfs_dir = debugfs_create_dir("ppe", NULL);
debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe,
&airoha_ppe_debugfs_foe_all_fops);
debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe,
&airoha_ppe_debugfs_foe_bind_fops);
return 0;
}

View File

@@ -0,0 +1,798 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2024 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
#ifndef AIROHA_REGS_H
#define AIROHA_REGS_H
#include <linux/types.h>
/* FE */
#define PSE_BASE 0x0100
#define CSR_IFC_BASE 0x0200
#define CDM1_BASE 0x0400
#define GDM1_BASE 0x0500
#define PPE1_BASE 0x0c00
#define PPE2_BASE 0x1c00
#define CDM2_BASE 0x1400
#define GDM2_BASE 0x1500
#define GDM3_BASE 0x1100
#define GDM4_BASE 0x2500
#define GDM_BASE(_n) \
((_n) == 4 ? GDM4_BASE : \
(_n) == 3 ? GDM3_BASE : \
(_n) == 2 ? GDM2_BASE : GDM1_BASE)
#define REG_FE_DMA_GLO_CFG 0x0000
#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
#define REG_FE_RST_GLO_CFG 0x0004
#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
#define FE_RST_CORE_MASK BIT(0)
#define REG_FE_FOE_TS 0x0010
#define REG_FE_WAN_PORT 0x0024
#define WAN1_EN_MASK BIT(16)
#define WAN1_MASK GENMASK(12, 8)
#define WAN0_MASK GENMASK(4, 0)
#define REG_FE_WAN_MAC_H 0x0030
#define REG_FE_LAN_MAC_H 0x0040
#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
#define REG_FE_CDM1_OQ_MAP0 0x0050
#define REG_FE_CDM1_OQ_MAP1 0x0054
#define REG_FE_CDM1_OQ_MAP2 0x0058
#define REG_FE_CDM1_OQ_MAP3 0x005c
#define REG_FE_PCE_CFG 0x0070
#define PCE_DPI_EN_MASK BIT(2)
#define PCE_KA_EN_MASK BIT(1)
#define PCE_MC_EN_MASK BIT(0)
#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
#define PSE_CFG_WR_EN_MASK BIT(8)
#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
#define PSE_FQ_CFG 0x008c
#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
#define REG_FE_PSE_BUF_SET 0x0090
#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
#define PSE_ALLRSV_MASK GENMASK(14, 0)
#define REG_PSE_SHARE_USED_THD 0x0094
#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
#define REG_GDM_MISC_CFG 0x0148
#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
#define GDM2_CHN_VLD_MODE_MASK BIT(5)
#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
#define FE_IFC_EN_MASK BIT(0)
#define REG_FE_VIP_PORT_EN 0x01f0
#define REG_FE_IFC_PORT_EN 0x01f4
#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
#define PATN_FCPU_EN_MASK BIT(7)
#define PATN_SWP_EN_MASK BIT(6)
#define PATN_DP_EN_MASK BIT(5)
#define PATN_SP_EN_MASK BIT(4)
#define PATN_TYPE_MASK GENMASK(3, 1)
#define PATN_EN_MASK BIT(0)
#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
#define PATN_DP_MASK GENMASK(31, 16)
#define PATN_SP_MASK GENMASK(15, 0)
#define REG_CDM1_VLAN_CTRL CDM1_BASE
#define CDM1_VLAN_MASK GENMASK(31, 16)
#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
#define GDM_DROP_CRC_ERR BIT(23)
#define GDM_IP4_CKSUM BIT(22)
#define GDM_TCP_CKSUM BIT(21)
#define GDM_UDP_CKSUM BIT(20)
#define GDM_STRIP_CRC BIT(16)
#define GDM_UCFQ_MASK GENMASK(15, 12)
#define GDM_BCFQ_MASK GENMASK(11, 8)
#define GDM_MCFQ_MASK GENMASK(7, 4)
#define GDM_OCFQ_MASK GENMASK(3, 0)
#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
#define GDM_INGRESS_FC_EN_MASK BIT(1)
#define GDM_STAG_EN_MASK BIT(0)
#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
#define GDM_LONG_LEN_MASK GENMASK(29, 16)
#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c)
#define LPBK_GAP_MASK GENMASK(31, 24)
#define LPBK_LEN_MASK GENMASK(23, 10)
#define LPBK_CHAN_MASK GENMASK(8, 4)
#define LPBK_MODE_MASK GENMASK(3, 1)
#define LPBK_EN_MASK BIT(0)
#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
#define FE_CPORT_PAD BIT(26)
#define FE_CPORT_PORT_XFC_MASK BIT(25)
#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
#define PPE_GLO_CFG_BUSY_MASK BIT(31)
#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9)
#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6)
#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5)
#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4)
#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3)
#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2)
#define PPE_GLO_CFG_EN_MASK BIT(0)
#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204)
#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20)
#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19)
#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18)
#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17)
#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16)
#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14)
#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13)
#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12)
#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10)
#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9)
#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8)
#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7)
#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(15, 0)
#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(31, 16)
#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12)
#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11)
#define PPE_TB_CFG_AGE_UDP_MASK BIT(10)
#define PPE_TB_CFG_AGE_TCP_MASK BIT(9)
#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8)
#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7)
#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6)
#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4)
#define PPE_TB_ENTRY_SIZE_MASK BIT(3)
#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0)
#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220)
#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228)
#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16)
#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0)
#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c)
#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16)
#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0)
#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230)
#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16)
#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0)
#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c)
#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238)
#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16)
#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0)
#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240)
#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244)
#define PPE_HASH_SEED 0x12345678
#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250)
#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28)
#define PPE_DRAM_HASH1_EN_MASK BIT(24)
#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20)
#define PPE_DRAM_TABLE_EN_MASK BIT(16)
#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12)
#define PPE_SRAM_HASH1_EN_MASK BIT(8)
#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4)
#define PPE_SRAM_TABLE_EN_MASK BIT(0)
#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304)
#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2))
#define FP1_EGRESS_MTU_MASK GENMASK(29, 16)
#define FP0_EGRESS_MTU_MASK GENMASK(13, 0)
#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c)
#define PPE_SRAM_CTRL_ACK_MASK BIT(31)
#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30)
#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8)
#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2)
#define PPE_SRAM_CTRL_WR_MASK BIT(1)
#define PPE_SRAM_CTRL_REQ_MASK BIT(0)
#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
#define REG_GDM3_FWD_CFG GDM3_BASE
#define GDM3_PAD_EN_MASK BIT(28)
#define REG_GDM4_FWD_CFG GDM4_BASE
#define GDM4_PAD_EN_MASK BIT(28)
#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
#define REG_IP_FRAG_FP 0x2010
#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
#define REG_MC_VLAN_EN 0x2100
#define MC_VLAN_EN_MASK BIT(0)
#define REG_MC_VLAN_CFG 0x2104
#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
#define MC_VLAN_CFG_RW_MASK BIT(0)
#define REG_MC_VLAN_DATA 0x2108
#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2))
#define SP_CPORT_PCIE1_MASK GENMASK(31, 28)
#define SP_CPORT_PCIE0_MASK GENMASK(27, 24)
#define SP_CPORT_USB_MASK GENMASK(7, 4)
#define SP_CPORT_ETH_MASK GENMASK(7, 4)
#define REG_SRC_PORT_FC_MAP6 0x2298
#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)
#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16)
#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8)
#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0)
#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
/* QDMA */
#define REG_QDMA_GLOBAL_CFG 0x0004
#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
#define GLOBAL_CFG_RESET_MASK BIT(23)
#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
#define REG_FWD_DSCP_BASE 0x0010
#define REG_FWD_BUF_BASE 0x0014
#define REG_HW_FWD_DSCP_CFG 0x0018
#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
#define REG_INT_STATUS(_n) \
(((_n) == 4) ? 0x0730 : \
((_n) == 3) ? 0x0724 : \
((_n) == 2) ? 0x0720 : \
((_n) == 1) ? 0x0024 : 0x0020)
#define REG_INT_ENABLE(_n) \
(((_n) == 4) ? 0x0750 : \
((_n) == 3) ? 0x0744 : \
((_n) == 2) ? 0x0740 : \
((_n) == 1) ? 0x002c : 0x0028)
/* QDMA_CSR_INT_ENABLE1 */
#define RX15_COHERENT_INT_MASK BIT(31)
#define RX14_COHERENT_INT_MASK BIT(30)
#define RX13_COHERENT_INT_MASK BIT(29)
#define RX12_COHERENT_INT_MASK BIT(28)
#define RX11_COHERENT_INT_MASK BIT(27)
#define RX10_COHERENT_INT_MASK BIT(26)
#define RX9_COHERENT_INT_MASK BIT(25)
#define RX8_COHERENT_INT_MASK BIT(24)
#define RX7_COHERENT_INT_MASK BIT(23)
#define RX6_COHERENT_INT_MASK BIT(22)
#define RX5_COHERENT_INT_MASK BIT(21)
#define RX4_COHERENT_INT_MASK BIT(20)
#define RX3_COHERENT_INT_MASK BIT(19)
#define RX2_COHERENT_INT_MASK BIT(18)
#define RX1_COHERENT_INT_MASK BIT(17)
#define RX0_COHERENT_INT_MASK BIT(16)
#define TX7_COHERENT_INT_MASK BIT(15)
#define TX6_COHERENT_INT_MASK BIT(14)
#define TX5_COHERENT_INT_MASK BIT(13)
#define TX4_COHERENT_INT_MASK BIT(12)
#define TX3_COHERENT_INT_MASK BIT(11)
#define TX2_COHERENT_INT_MASK BIT(10)
#define TX1_COHERENT_INT_MASK BIT(9)
#define TX0_COHERENT_INT_MASK BIT(8)
#define CNT_OVER_FLOW_INT_MASK BIT(7)
#define IRQ1_FULL_INT_MASK BIT(5)
#define IRQ1_INT_MASK BIT(4)
#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
#define IRQ0_FULL_INT_MASK BIT(1)
#define IRQ0_INT_MASK BIT(0)
#define TX_DONE_INT_MASK(_n) \
((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
: IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
#define INT_TX_MASK \
(IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
#define INT_IDX0_MASK \
(TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
RX15_COHERENT_INT_MASK | INT_TX_MASK)
/* QDMA_CSR_INT_ENABLE2 */
#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
#define RX15_DONE_INT_MASK BIT(15)
#define RX14_DONE_INT_MASK BIT(14)
#define RX13_DONE_INT_MASK BIT(13)
#define RX12_DONE_INT_MASK BIT(12)
#define RX11_DONE_INT_MASK BIT(11)
#define RX10_DONE_INT_MASK BIT(10)
#define RX9_DONE_INT_MASK BIT(9)
#define RX8_DONE_INT_MASK BIT(8)
#define RX7_DONE_INT_MASK BIT(7)
#define RX6_DONE_INT_MASK BIT(6)
#define RX5_DONE_INT_MASK BIT(5)
#define RX4_DONE_INT_MASK BIT(4)
#define RX3_DONE_INT_MASK BIT(3)
#define RX2_DONE_INT_MASK BIT(2)
#define RX1_DONE_INT_MASK BIT(1)
#define RX0_DONE_INT_MASK BIT(0)
#define RX_DONE_INT_MASK \
(RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
RX15_DONE_INT_MASK)
#define INT_IDX1_MASK \
(RX_DONE_INT_MASK | \
RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
RX15_NO_CPU_DSCP_INT_MASK)
/* QDMA_CSR_INT_ENABLE5 */
#define TX31_COHERENT_INT_MASK BIT(31)
#define TX30_COHERENT_INT_MASK BIT(30)
#define TX29_COHERENT_INT_MASK BIT(29)
#define TX28_COHERENT_INT_MASK BIT(28)
#define TX27_COHERENT_INT_MASK BIT(27)
#define TX26_COHERENT_INT_MASK BIT(26)
#define TX25_COHERENT_INT_MASK BIT(25)
#define TX24_COHERENT_INT_MASK BIT(24)
#define TX23_COHERENT_INT_MASK BIT(23)
#define TX22_COHERENT_INT_MASK BIT(22)
#define TX21_COHERENT_INT_MASK BIT(21)
#define TX20_COHERENT_INT_MASK BIT(20)
#define TX19_COHERENT_INT_MASK BIT(19)
#define TX18_COHERENT_INT_MASK BIT(18)
#define TX17_COHERENT_INT_MASK BIT(17)
#define TX16_COHERENT_INT_MASK BIT(16)
#define TX15_COHERENT_INT_MASK BIT(15)
#define TX14_COHERENT_INT_MASK BIT(14)
#define TX13_COHERENT_INT_MASK BIT(13)
#define TX12_COHERENT_INT_MASK BIT(12)
#define TX11_COHERENT_INT_MASK BIT(11)
#define TX10_COHERENT_INT_MASK BIT(10)
#define TX9_COHERENT_INT_MASK BIT(9)
#define TX8_COHERENT_INT_MASK BIT(8)
#define INT_IDX4_MASK \
(TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
#define TX_IRQ_THR_MASK GENMASK(27, 16)
#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
#define REG_TX_RING_BASE(_n) \
(((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
#define REG_TX_RING_BLOCKING(_n) \
(((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
#define REG_TX_CPU_IDX(_n) \
(((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
#define REG_TX_DMA_IDX(_n) \
(((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
#define IRQ_RING_IDX_MASK GENMASK(20, 16)
#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
#define REG_RX_RING_BASE(_n) \
(((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
#define REG_RX_RING_SIZE(_n) \
(((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
#define RX_RING_THR_MASK GENMASK(31, 16)
#define RX_RING_SIZE_MASK GENMASK(15, 0)
#define REG_RX_CPU_IDX(_n) \
(((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
#define REG_RX_DMA_IDX(_n) \
(((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
#define REG_RX_DELAY_INT_IDX(_n) \
(((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
#define RX_DELAY_INT_MASK GENMASK(15, 0)
#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
#define REG_INGRESS_TRTCM_CFG 0x0070
#define INGRESS_TRTCM_EN_MASK BIT(31)
#define INGRESS_TRTCM_MODE_MASK BIT(30)
#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
#define CNTR_EN_MASK BIT(31)
#define CNTR_ALL_CHAN_EN_MASK BIT(30)
#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
#define CNTR_SRC_MASK GENMASK(27, 24)
#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
#define CNTR_CHAN_MASK GENMASK(7, 3)
#define CNTR_QUEUE_MASK GENMASK(2, 0)
#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
#define REG_LMGR_INIT_CFG 0x1000
#define LMGR_INIT_START BIT(31)
#define LMGR_SRAM_MODE_MASK BIT(30)
#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
#define REG_FWD_DSCP_LOW_THR 0x1004
#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
#define REG_EGRESS_RATE_METER_CFG 0x100c
#define EGRESS_RATE_METER_EN_MASK BIT(31)
#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
#define REG_EGRESS_TRTCM_CFG 0x1010
#define EGRESS_TRTCM_EN_MASK BIT(31)
#define EGRESS_TRTCM_MODE_MASK BIT(30)
#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
#define TRTCM_PARAM_RW_MASK BIT(31)
#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
#define REG_TXWRR_MODE_CFG 0x1020
#define TWRR_WEIGHT_SCALE_MASK BIT(31)
#define TWRR_WEIGHT_BASE_MASK BIT(3)
#define REG_TXWRR_WEIGHT_CFG 0x1024
#define TWRR_RW_CMD_MASK BIT(31)
#define TWRR_RW_CMD_DONE BIT(30)
#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
#define TWRR_VALUE_MASK GENMASK(15, 0)
#define REG_PSE_BUF_USAGE_CFG 0x1028
#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
#define REG_GLB_TRTCM_CFG 0x1080
#define GLB_TRTCM_EN_MASK BIT(31)
#define GLB_TRTCM_MODE_MASK BIT(30)
#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define GLB_FAST_TICK_MASK GENMASK(15, 0)
#define REG_TXQ_CNGST_CFG 0x10a0
#define TXQ_CNGST_DROP_EN BIT(31)
#define TXQ_CNGST_DEI_DROP_EN BIT(30)
#define REG_SLA_TRTCM_CFG 0x1150
#define SLA_TRTCM_EN_MASK BIT(31)
#define SLA_TRTCM_MODE_MASK BIT(30)
#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define SLA_FAST_TICK_MASK GENMASK(15, 0)
/* CTRL */
#define QDMA_DESC_DONE_MASK BIT(31)
#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
#define QDMA_DESC_DEI_MASK BIT(25)
#define QDMA_DESC_NO_DROP_MASK BIT(24)
#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
/* DATA */
#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
/* TX MSG0 */
#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
/* TX MSG1 */
#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
/* RX MSG0 */
#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14)
/* RX MSG1 */
#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
struct airoha_qdma_desc {
__le32 rsv;
__le32 ctrl;
__le32 addr;
__le32 data;
__le32 msg0;
__le32 msg1;
__le32 msg2;
__le32 msg3;
};
/* CTRL0 */
#define QDMA_FWD_DESC_CTX_MASK BIT(31)
#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
/* CTRL1 */
#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
/* CTRL2 */
#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
struct airoha_qdma_fwd_desc {
__le32 addr;
__le32 ctrl0;
__le32 ctrl1;
__le32 ctrl2;
__le32 msg0;
__le32 msg1;
__le32 rsv0;
__le32 rsv1;
};
#endif /* AIROHA_REGS_H */

View File

@@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK
if NET_VENDOR_MEDIATEK
config NET_AIROHA
tristate "Airoha SoC Gigabit Ethernet support"
depends on NET_DSA || !NET_DSA
select PAGE_POOL
help
This driver supports the gigabit ethernet MACs in the
Airoha SoC family.
config NET_MEDIATEK_SOC_WED
depends on ARCH_MEDIATEK || COMPILE_TEST
def_bool NET_MEDIATEK_SOC != n

View File

@@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
obj-$(CONFIG_NET_AIROHA) += airoha_eth.o