Merge branch 'add-wed-support-for-mt7986-chipset'
authorPaolo Abeni <pabeni@redhat.com>
Thu, 22 Sep 2022 13:13:26 +0000 (15:13 +0200)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 22 Sep 2022 13:13:26 +0000 (15:13 +0200)
Lorenzo Bianconi says:

====================
Add WED support for MT7986 chipset

Similar to MT7622, introduce Wireless Ethernet Dispatch (WED) support
for MT7986 chipset in order to offload to the hw packet engine traffic
received from LAN/WAN device to WLAN nic (MT7915E).
====================

Link: https://lore.kernel.org/r/cover.1663668203.git.lorenzo@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
16 files changed:
Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7986-wed-pcie.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/mediatek,net.yaml
arch/arm64/boot/dts/mediatek/mt7986a.dtsi
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mediatek/mtk_ppe.c
drivers/net/ethernet/mediatek/mtk_ppe.h
drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
drivers/net/ethernet/mediatek/mtk_ppe_regs.h
drivers/net/ethernet/mediatek/mtk_wed.c
drivers/net/ethernet/mediatek/mtk_wed.h
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
drivers/net/ethernet/mediatek/mtk_wed_regs.h
include/linux/soc/mediatek/mtk_wed.h

index 787d6673f952bf170355da56c45639888db93e4d..84fb0a146b6e1b1186f2a5618854cb63e9c4dff6 100644 (file)
@@ -20,6 +20,7 @@ properties:
     items:
       - enum:
           - mediatek,mt7622-wed
+          - mediatek,mt7986-wed
       - const: syscon
 
   reg:
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7986-wed-pcie.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7986-wed-pcie.yaml
new file mode 100644 (file)
index 0000000..96221f5
--- /dev/null
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/mediatek/mediatek,mt7986-wed-pcie.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: MediaTek PCIE WED Controller for MT7986
+
+maintainers:
+  - Lorenzo Bianconi <lorenzo@kernel.org>
+  - Felix Fietkau <nbd@nbd.name>
+
+description:
+  The mediatek WED PCIE provides a configuration interface for PCIE
+  controller on MT7986 soc.
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - mediatek,mt7986-wed-pcie
+      - const: syscon
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    soc {
+      #address-cells = <2>;
+      #size-cells = <2>;
+      wed_pcie: wed-pcie@10003000 {
+        compatible = "mediatek,mt7986-wed-pcie",
+                     "syscon";
+        reg = <0 0x10003000 0 0x10>;
+      };
+    };
index f5564ecddb6261a22c246edbdd6308b1b898f4e7..7ef696204c5a76c25fc836fbb359539748cd69ca 100644 (file)
@@ -69,6 +69,15 @@ properties:
       A list of phandle to the syscon node that handles the SGMII setup which is required for
       those SoCs equipped with SGMII.
 
+  mediatek,wed:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    minItems: 2
+    maxItems: 2
+    items:
+      maxItems: 1
+    description:
+      List of phandles to wireless ethernet dispatch nodes.
+
   dma-coherent: true
 
   mdio-bus:
@@ -112,6 +121,8 @@ allOf:
             Phandle to the syscon node that handles the ports slew rate and
             driver current.
 
+        mediatek,wed: false
+
   - if:
       properties:
         compatible:
@@ -144,15 +155,6 @@ allOf:
           minItems: 1
           maxItems: 1
 
-        mediatek,wed:
-          $ref: /schemas/types.yaml#/definitions/phandle-array
-          minItems: 2
-          maxItems: 2
-          items:
-            maxItems: 1
-          description:
-            List of phandles to wireless ethernet dispatch nodes.
-
         mediatek,pcie-mirror:
           $ref: /schemas/types.yaml#/definitions/phandle
           description:
@@ -202,6 +204,8 @@ allOf:
           minItems: 2
           maxItems: 2
 
+        mediatek,wed: false
+
   - if:
       properties:
         compatible:
@@ -238,6 +242,11 @@ allOf:
           minItems: 2
           maxItems: 2
 
+        mediatek,wed-pcie:
+          $ref: /schemas/types.yaml#/definitions/phandle
+          description:
+            Phandle to the mediatek wed-pcie controller.
+
 patternProperties:
   "^mac@[0-1]$":
     type: object
index e3a407d03551fec7c66243ce8f1439d54b514806..692102f6248d6d96ca9ca4fe432a0ad520420593 100644 (file)
                         #reset-cells = <1>;
                };
 
+               wed_pcie: wed-pcie@10003000 {
+                       compatible = "mediatek,mt7986-wed-pcie",
+                                    "syscon";
+                       reg = <0 0x10003000 0 0x10>;
+               };
+
+               wed0: wed@15010000 {
+                       compatible = "mediatek,mt7986-wed",
+                                    "syscon";
+                       reg = <0 0x15010000 0 0x1000>;
+                       interrupt-parent = <&gic>;
+                       interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+               };
+
+               wed1: wed@15011000 {
+                       compatible = "mediatek,mt7986-wed",
+                                    "syscon";
+                       reg = <0 0x15011000 0 0x1000>;
+                       interrupt-parent = <&gic>;
+                       interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+               };
+
                eth: ethernet@15100000 {
                        compatible = "mediatek,mt7986-eth";
                        reg = <0 0x15100000 0 0x80000>;
                                                 <&apmixedsys CLK_APMIXED_SGMPLL>;
                        mediatek,ethsys = <&ethsys>;
                        mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+                       mediatek,wed-pcie = <&wed_pcie>;
+                       mediatek,wed = <&wed0>, <&wed1>;
                        #reset-cells = <1>;
                        #address-cells = <1>;
                        #size-cells = <0>;
index c19c67a480ae17f0ffdf0dae656c44103057caed..516875cd698f3c7d42b365c124dc9d1f9e41203e 100644 (file)
@@ -73,6 +73,12 @@ static const struct mtk_reg_map mtk_reg_map = {
                .fq_blen        = 0x1b2c,
        },
        .gdm1_cnt               = 0x2400,
+       .gdma_to_ppe            = 0x4444,
+       .ppe_base               = 0x0c00,
+       .wdma_base = {
+               [0]             = 0x2800,
+               [1]             = 0x2c00,
+       },
 };
 
 static const struct mtk_reg_map mt7628_reg_map = {
@@ -126,6 +132,12 @@ static const struct mtk_reg_map mt7986_reg_map = {
                .fq_blen        = 0x472c,
        },
        .gdm1_cnt               = 0x1c00,
+       .gdma_to_ppe            = 0x3333,
+       .ppe_base               = 0x2000,
+       .wdma_base = {
+               [0]             = 0x4800,
+               [1]             = 0x4c00,
+       },
 };
 
 /* strings used by ethtool */
@@ -1894,12 +1906,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                bytes += skb->len;
 
                if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+                       reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
                        hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
                        if (hash != MTK_RXD5_FOE_ENTRY)
                                skb_set_hash(skb, jhash_1word(hash, 0),
                                             PKT_HASH_TYPE_L4);
                        rxdcsum = &trxd.rxd3;
                } else {
+                       reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
                        hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
                        if (hash != MTK_RXD4_FOE_ENTRY)
                                skb_set_hash(skb, jhash_1word(hash, 0),
@@ -1913,9 +1927,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                        skb_checksum_none_assert(skb);
                skb->protocol = eth_type_trans(skb, netdev);
 
-               reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
                if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
-                       mtk_ppe_check_skb(eth->ppe, skb, hash);
+                       mtk_ppe_check_skb(eth->ppe[0], skb, hash);
 
                if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
                        if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
@@ -2978,21 +2991,25 @@ static int mtk_open(struct net_device *dev)
 
        /* we run 2 netdevs on the same dma ring so we only bring it up once */
        if (!refcount_read(&eth->dma_refcnt)) {
-               u32 gdm_config = MTK_GDMA_TO_PDMA;
+               const struct mtk_soc_data *soc = eth->soc;
+               u32 gdm_config;
+               int i;
 
                err = mtk_start_dma(eth);
                if (err)
                        return err;
 
-               if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
-                       gdm_config = MTK_GDMA_TO_PPE;
+               for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+                       mtk_ppe_start(eth->ppe[i]);
 
+               gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
+                                                 : MTK_GDMA_TO_PDMA;
                mtk_gdm_config(eth, gdm_config);
 
                napi_enable(&eth->tx_napi);
                napi_enable(&eth->rx_napi);
                mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
-               mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+               mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
                refcount_set(&eth->dma_refcnt, 1);
        }
        else
@@ -3030,6 +3047,7 @@ static int mtk_stop(struct net_device *dev)
 {
        struct mtk_mac *mac = netdev_priv(dev);
        struct mtk_eth *eth = mac->hw;
+       int i;
 
        phylink_stop(mac->phylink);
 
@@ -3057,8 +3075,8 @@ static int mtk_stop(struct net_device *dev)
 
        mtk_dma_free(eth);
 
-       if (eth->soc->offload_version)
-               mtk_ppe_stop(eth->ppe);
+       for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+               mtk_ppe_stop(eth->ppe[i]);
 
        return 0;
 }
@@ -3927,6 +3945,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
 
 static int mtk_probe(struct platform_device *pdev)
 {
+       struct resource *res = NULL;
        struct device_node *mac_np;
        struct mtk_eth *eth;
        int err, i;
@@ -4007,20 +4026,31 @@ static int mtk_probe(struct platform_device *pdev)
                }
        }
 
-       for (i = 0;; i++) {
-               struct device_node *np = of_parse_phandle(pdev->dev.of_node,
-                                                         "mediatek,wed", i);
-               static const u32 wdma_regs[] = {
-                       MTK_WDMA0_BASE,
-                       MTK_WDMA1_BASE
-               };
-               void __iomem *wdma;
-
-               if (!np || i >= ARRAY_SIZE(wdma_regs))
-                       break;
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+               if (!res)
+                       return -EINVAL;
+       }
 
-               wdma = eth->base + wdma_regs[i];
-               mtk_wed_add_hw(np, eth, wdma, i);
+       if (eth->soc->offload_version) {
+               for (i = 0;; i++) {
+                       struct device_node *np;
+                       phys_addr_t wdma_phy;
+                       u32 wdma_base;
+
+                       if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
+                               break;
+
+                       np = of_parse_phandle(pdev->dev.of_node,
+                                             "mediatek,wed", i);
+                       if (!np)
+                               break;
+
+                       wdma_base = eth->soc->reg_map->wdma_base[i];
+                       wdma_phy = res ? res->start + wdma_base : 0;
+                       mtk_wed_add_hw(np, eth, eth->base + wdma_base,
+                                      wdma_phy, i);
+               }
        }
 
        for (i = 0; i < 3; i++) {
@@ -4098,10 +4128,19 @@ static int mtk_probe(struct platform_device *pdev)
        }
 
        if (eth->soc->offload_version) {
-               eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
-               if (!eth->ppe) {
-                       err = -ENOMEM;
-                       goto err_free_dev;
+               u32 num_ppe;
+
+               num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+               num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
+               for (i = 0; i < num_ppe; i++) {
+                       u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+
+                       eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
+                                                  eth->soc->offload_version, i);
+                       if (!eth->ppe[i]) {
+                               err = -ENOMEM;
+                               goto err_free_dev;
+                       }
                }
 
                err = mtk_eth_offload_init(eth);
@@ -4194,6 +4233,8 @@ static const struct mtk_soc_data mt7621_data = {
        .required_clks = MT7621_CLKS_BITMAP,
        .required_pctl = false,
        .offload_version = 2,
+       .hash_offset = 2,
+       .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
        .txrx = {
                .txd_size = sizeof(struct mtk_tx_dma),
                .rxd_size = sizeof(struct mtk_rx_dma),
@@ -4212,6 +4253,8 @@ static const struct mtk_soc_data mt7622_data = {
        .required_clks = MT7622_CLKS_BITMAP,
        .required_pctl = false,
        .offload_version = 2,
+       .hash_offset = 2,
+       .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
        .txrx = {
                .txd_size = sizeof(struct mtk_tx_dma),
                .rxd_size = sizeof(struct mtk_rx_dma),
@@ -4229,6 +4272,8 @@ static const struct mtk_soc_data mt7623_data = {
        .required_clks = MT7623_CLKS_BITMAP,
        .required_pctl = true,
        .offload_version = 2,
+       .hash_offset = 2,
+       .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
        .txrx = {
                .txd_size = sizeof(struct mtk_tx_dma),
                .rxd_size = sizeof(struct mtk_rx_dma),
@@ -4260,8 +4305,11 @@ static const struct mtk_soc_data mt7986_data = {
        .reg_map = &mt7986_reg_map,
        .ana_rgc3 = 0x128,
        .caps = MT7986_CAPS,
+       .hw_features = MTK_HW_FEATURES,
        .required_clks = MT7986_CLKS_BITMAP,
        .required_pctl = false,
+       .hash_offset = 4,
+       .foe_entry_size = sizeof(struct mtk_foe_entry),
        .txrx = {
                .txd_size = sizeof(struct mtk_tx_dma_v2),
                .rxd_size = sizeof(struct mtk_rx_dma_v2),
index ecf85e9ed824023be00881fc27bbd25fc1761353..1efaba5d43377551d43491e0e312fefc4c9f4590 100644 (file)
 #define MTK_GDMA_TCS_EN                BIT(21)
 #define MTK_GDMA_UCS_EN                BIT(20)
 #define MTK_GDMA_TO_PDMA       0x0
-#define MTK_GDMA_TO_PPE                0x4444
 #define MTK_GDMA_DROP_ALL       0x7777
 
 /* Unicast Filter MAC Address Register - Low */
 #define TX_DMA_FPORT_MASK_V2   0xf
 #define TX_DMA_SWC_V2          BIT(30)
 
-#define MTK_WDMA0_BASE         0x2800
-#define MTK_WDMA1_BASE         0x2c00
-
 /* QDMA descriptor txd4 */
 #define TX_DMA_CHKSUM          (0x7 << 29)
 #define TX_DMA_TSO             BIT(28)
@@ -955,6 +951,9 @@ struct mtk_reg_map {
                u32     fq_blen;        /* fq free page buffer length */
        } qdma;
        u32     gdm1_cnt;
+       u32     gdma_to_ppe;
+       u32     ppe_base;
+       u32     wdma_base[2];
 };
 
 /* struct mtk_eth_data -       This is the structure holding all differences
@@ -968,6 +967,8 @@ struct mtk_reg_map {
  *                             the target SoC
  * @required_pctl              A bool value to show whether the SoC requires
  *                             the extra setup for those pins used by GMAC.
+ * @hash_offset                        Flow table hash offset.
+ * @foe_entry_size             Foe table entry size.
  * @txd_size                   Tx DMA descriptor size.
  * @rxd_size                   Rx DMA descriptor size.
  * @rx_irq_done_mask           Rx irq done register mask.
@@ -982,6 +983,8 @@ struct mtk_soc_data {
        u32             required_clks;
        bool            required_pctl;
        u8              offload_version;
+       u8              hash_offset;
+       u16             foe_entry_size;
        netdev_features_t hw_features;
        struct {
                u32     txd_size;
@@ -1111,7 +1114,7 @@ struct mtk_eth {
 
        int                             ip_align;
 
-       struct mtk_ppe                  *ppe;
+       struct mtk_ppe                  *ppe[2];
        struct rhashtable               flow_table;
 
        struct bpf_prog                 __rcu *prog;
@@ -1142,6 +1145,86 @@ struct mtk_mac {
 /* the struct describing the SoC. these are declared in the soc_xyz.c files */
 extern const struct of_device_id of_mtk_match[];
 
+static inline struct mtk_foe_entry *
+mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
+{
+       const struct mtk_soc_data *soc = ppe->eth->soc;
+
+       return ppe->foe_table + hash * soc->foe_entry_size;
+}
+
+static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
+{
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+
+       return MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
+static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
+{
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               return MTK_FOE_IB1_BIND_PPPOE_V2;
+
+       return MTK_FOE_IB1_BIND_PPPOE;
+}
+
+static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
+{
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
+
+       return MTK_FOE_IB1_BIND_VLAN_TAG;
+}
+
+static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
+{
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
+
+       return MTK_FOE_IB1_BIND_VLAN_LAYER;
+}
+
+static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+       return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+       return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
+{
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               return MTK_FOE_IB1_PACKET_TYPE_V2;
+
+       return MTK_FOE_IB1_PACKET_TYPE;
+}
+
+static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
+{
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
+
+       return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
+}
+
+static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
+{
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+               return MTK_FOE_IB2_MULTICAST_V2;
+
+       return MTK_FOE_IB2_MULTICAST;
+}
+
 /* read the hardware status register */
 void mtk_stats_update_mac(struct mtk_mac *mac);
 
index cfe804bc8d2055ab8c1eeeff16b5c1b4f7fc2368..25f8738a062bd0d1bae2b92c1a43d70471536a40 100644 (file)
@@ -56,7 +56,7 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
 
 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
 {
-       return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+       return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
 }
 
 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
@@ -88,12 +88,12 @@ static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
                enable * MTK_PPE_CACHE_CTL_EN);
 }
 
-static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
+static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
 {
        u32 hv1, hv2, hv3;
        u32 hash;
 
-       switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
+       switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
                case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
                case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
                        hv1 = e->ipv4.orig.ports;
@@ -122,16 +122,16 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
        hash = (hash >> 24) | ((hash & 0xffffff) << 8);
        hash ^= hv1 ^ hv2 ^ hv3;
        hash ^= hash >> 16;
-       hash <<= 1;
+       hash <<= (ffs(eth->soc->hash_offset) - 1);
        hash &= MTK_PPE_ENTRIES - 1;
 
        return hash;
 }
 
 static inline struct mtk_foe_mac_info *
-mtk_foe_entry_l2(struct mtk_foe_entry *entry)
+mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
 {
-       int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+       int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
 
        if (type == MTK_PPE_PKT_TYPE_BRIDGE)
                return &entry->bridge.l2;
@@ -143,9 +143,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
 }
 
 static inline u32 *
-mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
+mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
 {
-       int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+       int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
 
        if (type == MTK_PPE_PKT_TYPE_BRIDGE)
                return &entry->bridge.ib2;
@@ -156,27 +156,38 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
        return &entry->ipv4.ib2;
 }
 
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
-                         u8 pse_port, u8 *src_mac, u8 *dest_mac)
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                         int type, int l4proto, u8 pse_port, u8 *src_mac,
+                         u8 *dest_mac)
 {
        struct mtk_foe_mac_info *l2;
        u32 ports_pad, val;
 
        memset(entry, 0, sizeof(*entry));
 
-       val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
-             FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
-             FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
-             MTK_FOE_IB1_BIND_TTL |
-             MTK_FOE_IB1_BIND_CACHE;
-       entry->ib1 = val;
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+               val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+                     FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
+                     FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+                     MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
+               entry->ib1 = val;
 
-       val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
-             FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
-             FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
+               val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
+                     FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
+       } else {
+               val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+                     FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
+                     FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+                     MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
+               entry->ib1 = val;
+
+               val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
+                     FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
+                     FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
+       }
 
        if (is_multicast_ether_addr(dest_mac))
-               val |= MTK_FOE_IB2_MULTICAST;
+               val |= mtk_get_ib2_multicast_mask(eth);
 
        ports_pad = 0xa5a5a500 | (l4proto & 0xff);
        if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
@@ -210,24 +221,30 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
        return 0;
 }
 
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+                              struct mtk_foe_entry *entry, u8 port)
 {
-       u32 *ib2 = mtk_foe_entry_ib2(entry);
-       u32 val;
+       u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+       u32 val = *ib2;
 
-       val = *ib2;
-       val &= ~MTK_FOE_IB2_DEST_PORT;
-       val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+               val &= ~MTK_FOE_IB2_DEST_PORT_V2;
+               val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
+       } else {
+               val &= ~MTK_FOE_IB2_DEST_PORT;
+               val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+       }
        *ib2 = val;
 
        return 0;
 }
 
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+                                struct mtk_foe_entry *entry, bool egress,
                                 __be32 src_addr, __be16 src_port,
                                 __be32 dest_addr, __be16 dest_port)
 {
-       int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+       int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
        struct mtk_ipv4_tuple *t;
 
        switch (type) {
@@ -262,11 +279,12 @@ int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
        return 0;
 }
 
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+                                struct mtk_foe_entry *entry,
                                 __be32 *src_addr, __be16 src_port,
                                 __be32 *dest_addr, __be16 dest_port)
 {
-       int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+       int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
        u32 *src, *dest;
        int i;
 
@@ -297,39 +315,41 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
        return 0;
 }
 
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                         int port)
 {
-       struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+       struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
 
        l2->etype = BIT(port);
 
-       if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
-               entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+       if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
+               entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
        else
                l2->etype |= BIT(8);
 
-       entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
+       entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
 
        return 0;
 }
 
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                          int vid)
 {
-       struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+       struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
 
-       switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
+       switch (mtk_prep_ib1_vlan_layer(eth, entry->ib1)) {
        case 0:
-               entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
-                             FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+               entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
+                             mtk_prep_ib1_vlan_layer(eth, 1);
                l2->vlan1 = vid;
                return 0;
        case 1:
-               if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
+               if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
                        l2->vlan1 = vid;
                        l2->etype |= BIT(8);
                } else {
                        l2->vlan2 = vid;
-                       entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+                       entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
                }
                return 0;
        default:
@@ -337,34 +357,42 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
        }
 }
 
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                           int sid)
 {
-       struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+       struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
 
-       if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
-           (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
+       if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
+           (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
                l2->etype = ETH_P_PPP_SES;
 
-       entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
+       entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
        l2->pppoe_id = sid;
 
        return 0;
 }
 
-int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
-                          int bss, int wcid)
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                          int wdma_idx, int txq, int bss, int wcid)
 {
-       struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
-       u32 *ib2 = mtk_foe_entry_ib2(entry);
-
-       *ib2 &= ~MTK_FOE_IB2_PORT_MG;
-       *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
-       if (wdma_idx)
-               *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+       struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+       u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
 
-       l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
-                   FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
-                   FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+               *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+               *ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
+                        MTK_FOE_IB2_WDMA_WINFO_V2;
+               l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
+                           FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
+       } else {
+               *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+               *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+               if (wdma_idx)
+                       *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+               l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+                           FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+                           FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+       }
 
        return 0;
 }
@@ -376,14 +404,15 @@ static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
 }
 
 static bool
-mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
+mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
+                    struct mtk_foe_entry *data)
 {
        int type, len;
 
        if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
                return false;
 
-       type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+       type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
        if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
                len = offsetof(struct mtk_foe_entry, ipv6._rsv);
        else
@@ -410,9 +439,10 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 
        hlist_del_init(&entry->list);
        if (entry->hash != 0xffff) {
-               ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
-               ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
-                                                             MTK_FOE_STATE_UNBIND);
+               struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
+
+               hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+               hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_UNBIND);
                dma_wmb();
        }
        entry->hash = 0xffff;
@@ -426,14 +456,12 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 
 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
 {
-       u16 timestamp;
-       u16 now;
-
-       now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
-       timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+       u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+       u16 now = mtk_eth_timestamp(ppe->eth);
+       u16 timestamp = ib1 & ib1_ts_mask;
 
        if (timestamp > now)
-               return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
+               return ib1_ts_mask + 1 - timestamp + now;
        else
                return now - timestamp;
 }
@@ -441,6 +469,7 @@ static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
 static void
 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 {
+       u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
        struct mtk_flow_entry *cur;
        struct mtk_foe_entry *hwe;
        struct hlist_node *tmp;
@@ -451,7 +480,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
                int cur_idle;
                u32 ib1;
 
-               hwe = &ppe->foe_table[cur->hash];
+               hwe = mtk_foe_get_entry(ppe, cur->hash);
                ib1 = READ_ONCE(hwe->ib1);
 
                if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
@@ -465,16 +494,16 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
                        continue;
 
                idle = cur_idle;
-               entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
-               entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+               entry->data.ib1 &= ~ib1_ts_mask;
+               entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
        }
 }
 
 static void
 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 {
+       struct mtk_foe_entry foe = {};
        struct mtk_foe_entry *hwe;
-       struct mtk_foe_entry foe;
 
        spin_lock_bh(&ppe_lock);
 
@@ -486,9 +515,9 @@ mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
        if (entry->hash == 0xffff)
                goto out;
 
-       hwe = &ppe->foe_table[entry->hash];
-       memcpy(&foe, hwe, sizeof(foe));
-       if (!mtk_flow_entry_match(entry, &foe)) {
+       hwe = mtk_foe_get_entry(ppe, entry->hash);
+       memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
+       if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
                entry->hash = 0xffff;
                goto out;
        }
@@ -503,16 +532,22 @@ static void
 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
                       u16 hash)
 {
+       struct mtk_eth *eth = ppe->eth;
+       u16 timestamp = mtk_eth_timestamp(eth);
        struct mtk_foe_entry *hwe;
-       u16 timestamp;
 
-       timestamp = mtk_eth_timestamp(ppe->eth);
-       timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
-       entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
-       entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+               entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+               entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
+                                        timestamp);
+       } else {
+               entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+               entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
+                                        timestamp);
+       }
 
-       hwe = &ppe->foe_table[hash];
-       memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
+       hwe = mtk_foe_get_entry(ppe, hash);
+       memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size);
        wmb();
        hwe->ib1 = entry->ib1;
 
@@ -539,16 +574,17 @@ mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 
 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 {
-       int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+       const struct mtk_soc_data *soc = ppe->eth->soc;
+       int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
        u32 hash;
 
        if (type == MTK_PPE_PKT_TYPE_BRIDGE)
                return mtk_foe_entry_commit_l2(ppe, entry);
 
-       hash = mtk_ppe_hash_entry(&entry->data);
+       hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
        entry->hash = 0xffff;
        spin_lock_bh(&ppe_lock);
-       hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
+       hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
        spin_unlock_bh(&ppe_lock);
 
        return 0;
@@ -558,10 +594,11 @@ static void
 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
                             u16 hash)
 {
+       const struct mtk_soc_data *soc = ppe->eth->soc;
        struct mtk_flow_entry *flow_info;
-       struct mtk_foe_entry foe, *hwe;
+       struct mtk_foe_entry foe = {}, *hwe;
        struct mtk_foe_mac_info *l2;
-       u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
+       u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
        int type;
 
        flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
@@ -572,32 +609,34 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
        flow_info->l2_data.base_flow = entry;
        flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
        flow_info->hash = hash;
-       hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
+       hlist_add_head(&flow_info->list,
+                      &ppe->foe_flow[hash / soc->hash_offset]);
        hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
 
-       hwe = &ppe->foe_table[hash];
-       memcpy(&foe, hwe, sizeof(foe));
+       hwe = mtk_foe_get_entry(ppe, hash);
+       memcpy(&foe, hwe, soc->foe_entry_size);
        foe.ib1 &= ib1_mask;
        foe.ib1 |= entry->data.ib1 & ~ib1_mask;
 
-       l2 = mtk_foe_entry_l2(&foe);
+       l2 = mtk_foe_entry_l2(ppe->eth, &foe);
        memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
 
-       type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
+       type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
        if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
                memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
        else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
                l2->etype = ETH_P_IPV6;
 
-       *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
+       *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
 
        __mtk_foe_entry_commit(ppe, &foe, hash);
 }
 
 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
 {
-       struct hlist_head *head = &ppe->foe_flow[hash / 2];
-       struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
+       const struct mtk_soc_data *soc = ppe->eth->soc;
+       struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
+       struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
        struct mtk_flow_entry *entry;
        struct mtk_foe_bridge key = {};
        struct hlist_node *n;
@@ -621,7 +660,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
                        continue;
                }
 
-               if (found || !mtk_flow_entry_match(entry, hwe)) {
+               if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
                        if (entry->hash != 0xffff)
                                entry->hash = 0xffff;
                        continue;
@@ -678,11 +717,13 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
 }
 
 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
-                int version)
+                            int version, int index)
 {
+       const struct mtk_soc_data *soc = eth->soc;
        struct device *dev = eth->dev;
-       struct mtk_foe_entry *foe;
        struct mtk_ppe *ppe;
+       u32 foe_flow_size;
+       void *foe;
 
        ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
        if (!ppe)
@@ -698,14 +739,21 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
        ppe->dev = dev;
        ppe->version = version;
 
-       foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
+       foe = dmam_alloc_coherent(ppe->dev,
+                                 MTK_PPE_ENTRIES * soc->foe_entry_size,
                                  &ppe->foe_phys, GFP_KERNEL);
        if (!foe)
                return NULL;
 
        ppe->foe_table = foe;
 
-       mtk_ppe_debugfs_init(ppe);
+       foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
+                       sizeof(*ppe->foe_flow);
+       ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
+       if (!ppe->foe_flow)
+               return NULL;
+
+       mtk_ppe_debugfs_init(ppe, index);
 
        return ppe;
 }
@@ -715,21 +763,30 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
        static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
        int i, k;
 
-       memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
+       memset(ppe->foe_table, 0,
+              MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
 
        if (!IS_ENABLED(CONFIG_SOC_MT7621))
                return;
 
        /* skip all entries that cross the 1024 byte boundary */
-       for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
-               for (k = 0; k < ARRAY_SIZE(skip); k++)
-                       ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
+       for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
+               for (k = 0; k < ARRAY_SIZE(skip); k++) {
+                       struct mtk_foe_entry *hwe;
+
+                       hwe = mtk_foe_get_entry(ppe, i + skip[k]);
+                       hwe->ib1 |= MTK_FOE_IB1_STATIC;
+               }
+       }
 }
 
-int mtk_ppe_start(struct mtk_ppe *ppe)
+void mtk_ppe_start(struct mtk_ppe *ppe)
 {
        u32 val;
 
+       if (!ppe)
+               return;
+
        mtk_ppe_init_foe_table(ppe);
        ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
 
@@ -748,6 +805,8 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
                         MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
              FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
                         MTK_PPE_ENTRIES_SHIFT);
+       if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+               val |= MTK_PPE_TB_CFG_INFO_SEL;
        ppe_w32(ppe, MTK_PPE_TB_CFG, val);
 
        ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
@@ -755,15 +814,21 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
 
        mtk_ppe_cache_enable(ppe, true);
 
-       val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
-             MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
-             MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
+       val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
              MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
              MTK_PPE_FLOW_CFG_IP6_6RD |
              MTK_PPE_FLOW_CFG_IP4_NAT |
              MTK_PPE_FLOW_CFG_IP4_NAPT |
              MTK_PPE_FLOW_CFG_IP4_DSLITE |
              MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
+       if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+               val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
+                      MTK_PPE_MD_TOAP_BYP_CRSN1 |
+                      MTK_PPE_MD_TOAP_BYP_CRSN2 |
+                      MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
+       else
+               val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
+                      MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
        ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
 
        val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
@@ -798,7 +863,10 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
 
        ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
 
-       return 0;
+       if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
+               ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
+               ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
+       }
 }
 
 int mtk_ppe_stop(struct mtk_ppe *ppe)
@@ -806,9 +874,15 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
        u32 val;
        int i;
 
-       for (i = 0; i < MTK_PPE_ENTRIES; i++)
-               ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
-                                                  MTK_FOE_STATE_INVALID);
+       if (!ppe)
+               return 0;
+
+       for (i = 0; i < MTK_PPE_ENTRIES; i++) {
+               struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
+
+               hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
+                                     MTK_FOE_STATE_INVALID);
+       }
 
        mtk_ppe_cache_enable(ppe, false);
 
index 8f786c47b61a22fe938b1fb96e2769287705d5c3..0b7a67a958e4ca9d218ebf33b3e8a48503ff61ff 100644 (file)
@@ -8,8 +8,6 @@
 #include <linux/bitfield.h>
 #include <linux/rhashtable.h>
 
-#define MTK_ETH_PPE_BASE               0xc00
-
 #define MTK_PPE_ENTRIES_SHIFT          3
 #define MTK_PPE_ENTRIES                        (1024 << MTK_PPE_ENTRIES_SHIFT)
 #define MTK_PPE_HASH_MASK              (MTK_PPE_ENTRIES - 1)
 #define MTK_FOE_IB1_UDP                        BIT(30)
 #define MTK_FOE_IB1_STATIC             BIT(31)
 
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB1_BIND_TIMESTAMP_V2  GENMASK(7, 0)
+#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14)
+#define MTK_FOE_IB1_BIND_PPPOE_V2      BIT(17)
+#define MTK_FOE_IB1_BIND_VLAN_TAG_V2   BIT(18)
+#define MTK_FOE_IB1_BIND_CACHE_V2      BIT(20)
+#define MTK_FOE_IB1_BIND_TTL_V2                BIT(22)
+#define MTK_FOE_IB1_PACKET_TYPE_V2     GENMASK(27, 23)
+
 enum {
        MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
        MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
@@ -55,14 +62,25 @@ enum {
 
 #define MTK_FOE_IB2_PORT_MG            GENMASK(17, 12)
 
+#define MTK_FOE_IB2_RX_IDX             GENMASK(18, 17)
 #define MTK_FOE_IB2_PORT_AG            GENMASK(23, 18)
 
 #define MTK_FOE_IB2_DSCP               GENMASK(31, 24)
 
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB2_PORT_MG_V2         BIT(7)
+#define MTK_FOE_IB2_DEST_PORT_V2       GENMASK(12, 9)
+#define MTK_FOE_IB2_MULTICAST_V2       BIT(13)
+#define MTK_FOE_IB2_WDMA_WINFO_V2      BIT(19)
+#define MTK_FOE_IB2_PORT_AG_V2         GENMASK(23, 20)
+
 #define MTK_FOE_VLAN2_WINFO_BSS                GENMASK(5, 0)
 #define MTK_FOE_VLAN2_WINFO_WCID       GENMASK(13, 6)
 #define MTK_FOE_VLAN2_WINFO_RING       GENMASK(15, 14)
 
+#define MTK_FOE_WINFO_BSS              GENMASK(5, 0)
+#define MTK_FOE_WINFO_WCID             GENMASK(15, 6)
+
 enum {
        MTK_FOE_STATE_INVALID,
        MTK_FOE_STATE_UNBIND,
@@ -83,6 +101,9 @@ struct mtk_foe_mac_info {
 
        u16 pppoe_id;
        u16 src_mac_lo;
+
+       u16 minfo;
+       u16 winfo;
 };
 
 /* software-only entry type */
@@ -200,7 +221,7 @@ struct mtk_foe_entry {
                struct mtk_foe_ipv4_dslite dslite;
                struct mtk_foe_ipv6 ipv6;
                struct mtk_foe_ipv6_6rd ipv6_6rd;
-               u32 data[19];
+               u32 data[23];
        };
 };
 
@@ -249,6 +270,7 @@ struct mtk_flow_entry {
        };
        u8 type;
        s8 wed_index;
+       u8 ppe_index;
        u16 hash;
        union {
                struct mtk_foe_entry data;
@@ -267,20 +289,22 @@ struct mtk_ppe {
        struct device *dev;
        void __iomem *base;
        int version;
+       char dirname[5];
 
-       struct mtk_foe_entry *foe_table;
+       void *foe_table;
        dma_addr_t foe_phys;
 
        u16 foe_check_time[MTK_PPE_ENTRIES];
-       struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
+       struct hlist_head *foe_flow;
 
        struct rhashtable l2_flows;
 
        void *acct_table;
 };
 
-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
-int mtk_ppe_start(struct mtk_ppe *ppe);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
+                            int version, int index);
+void mtk_ppe_start(struct mtk_ppe *ppe);
 int mtk_ppe_stop(struct mtk_ppe *ppe);
 
 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
@@ -305,23 +329,30 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
        __mtk_ppe_check_skb(ppe, skb, hash);
 }
 
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
-                         u8 pse_port, u8 *src_mac, u8 *dest_mac);
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                         int type, int l4proto, u8 pse_port, u8 *src_mac,
+                         u8 *dest_mac);
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+                              struct mtk_foe_entry *entry, u8 port);
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+                                struct mtk_foe_entry *entry, bool orig,
                                 __be32 src_addr, __be16 src_port,
                                 __be32 dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+                                struct mtk_foe_entry *entry,
                                 __be32 *src_addr, __be16 src_port,
                                 __be32 *dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
-int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
-                          int bss, int wcid);
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                         int port);
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                          int vid);
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                           int sid);
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+                          int wdma_idx, int txq, int bss, int wcid);
 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
 
 #endif
index eb0b598f14e4bc2b1f132cb48ad914a707ed71c6..ec49829ab32d7973449d58a644e397ba7be20607 100644 (file)
@@ -79,7 +79,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
        int i;
 
        for (i = 0; i < MTK_PPE_ENTRIES; i++) {
-               struct mtk_foe_entry *entry = &ppe->foe_table[i];
+               struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i);
                struct mtk_foe_mac_info *l2;
                struct mtk_flow_addr_info ai = {};
                unsigned char h_source[ETH_ALEN];
@@ -187,7 +187,7 @@ mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
                           inode->i_private);
 }
 
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
 {
        static const struct file_operations fops_all = {
                .open = mtk_ppe_debugfs_foe_open_all,
@@ -195,17 +195,17 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
                .llseek = seq_lseek,
                .release = single_release,
        };
-
        static const struct file_operations fops_bind = {
                .open = mtk_ppe_debugfs_foe_open_bind,
                .read = seq_read,
                .llseek = seq_lseek,
                .release = single_release,
        };
-
        struct dentry *root;
 
-       root = debugfs_create_dir("mtk_ppe", NULL);
+       snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
+
+       root = debugfs_create_dir(ppe->dirname, NULL);
        debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
        debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
 
index 5a1fc4bcd7a57ad4cecf5675beb0e6ee1bf1cffb..28bbd1df3e3059c3356f2b1d704602abf1ede7fd 100644 (file)
@@ -52,18 +52,19 @@ static const struct rhashtable_params mtk_flow_ht_params = {
 };
 
 static int
-mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
-                      bool egress)
+mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+                      struct mtk_flow_data *data, bool egress)
 {
-       return mtk_foe_entry_set_ipv4_tuple(foe, egress,
+       return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
                                            data->v4.src_addr, data->src_port,
                                            data->v4.dst_addr, data->dst_port);
 }
 
 static int
-mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
+mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+                      struct mtk_flow_data *data)
 {
-       return mtk_foe_entry_set_ipv6_tuple(foe,
+       return mtk_foe_entry_set_ipv6_tuple(eth, foe,
                                            data->v6.src_addr.s6_addr32, data->src_port,
                                            data->v6.dst_addr.s6_addr32, data->dst_port);
 }
@@ -190,16 +191,29 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
        int pse_port, dsa_port;
 
        if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
-               mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
-                                      info.wcid);
-               pse_port = 3;
+               mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
+                                      info.bss, info.wcid);
+               if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+                       switch (info.wdma_idx) {
+                       case 0:
+                               pse_port = 8;
+                               break;
+                       case 1:
+                               pse_port = 9;
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+               } else {
+                       pse_port = 3;
+               }
                *wed_index = info.wdma_idx;
                goto out;
        }
 
        dsa_port = mtk_flow_get_dsa_port(&dev);
        if (dsa_port >= 0)
-               mtk_foe_entry_set_dsa(foe, dsa_port);
+               mtk_foe_entry_set_dsa(eth, foe, dsa_port);
 
        if (dev == eth->netdev[0])
                pse_port = 1;
@@ -209,7 +223,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
                return -EOPNOTSUPP;
 
 out:
-       mtk_foe_entry_set_pse_port(foe, pse_port);
+       mtk_foe_entry_set_pse_port(eth, foe, pse_port);
 
        return 0;
 }
@@ -333,9 +347,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
            !is_valid_ether_addr(data.eth.h_dest))
                return -EINVAL;
 
-       err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
-                                   data.eth.h_source,
-                                   data.eth.h_dest);
+       err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
+                                   data.eth.h_source, data.eth.h_dest);
        if (err)
                return err;
 
@@ -360,7 +373,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
                data.v4.src_addr = addrs.key->src;
                data.v4.dst_addr = addrs.key->dst;
 
-               mtk_flow_set_ipv4_addr(&foe, &data, false);
+               mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
        }
 
        if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
@@ -371,7 +384,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
                data.v6.src_addr = addrs.key->src;
                data.v6.dst_addr = addrs.key->dst;
 
-               mtk_flow_set_ipv6_addr(&foe, &data);
+               mtk_flow_set_ipv6_addr(eth, &foe, &data);
        }
 
        flow_action_for_each(i, act, &rule->action) {
@@ -401,7 +414,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
        }
 
        if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
-               err = mtk_flow_set_ipv4_addr(&foe, &data, true);
+               err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
                if (err)
                        return err;
        }
@@ -413,10 +426,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
                if (data.vlan.proto != htons(ETH_P_8021Q))
                        return -EOPNOTSUPP;
 
-               mtk_foe_entry_set_vlan(&foe, data.vlan.id);
+               mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
        }
        if (data.pppoe.num == 1)
-               mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
+               mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
 
        err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
                                         &wed_index);
@@ -434,7 +447,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
        memcpy(&entry->data, &foe, sizeof(entry->data));
        entry->wed_index = wed_index;
 
-       err = mtk_foe_entry_commit(eth->ppe, entry);
+       err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
        if (err < 0)
                goto free;
 
@@ -446,7 +459,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
        return 0;
 
 clear:
-       mtk_foe_entry_clear(eth->ppe, entry);
+       mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
 free:
        kfree(entry);
        if (wed_index >= 0)
@@ -464,7 +477,7 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
        if (!entry)
                return -ENOENT;
 
-       mtk_foe_entry_clear(eth->ppe, entry);
+       mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
        rhashtable_remove_fast(&eth->flow_table, &entry->node,
                               mtk_flow_ht_params);
        if (entry->wed_index >= 0)
@@ -485,7 +498,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
        if (!entry)
                return -ENOENT;
 
-       idle = mtk_foe_entry_idle_time(eth->ppe, entry);
+       idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
        f->stats.lastused = jiffies - idle * HZ;
 
        return 0;
@@ -537,7 +550,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
        struct flow_block_cb *block_cb;
        flow_setup_cb_t *cb;
 
-       if (!eth->ppe || !eth->ppe->foe_table)
+       if (!eth->soc->offload_version)
                return -EOPNOTSUPP;
 
        if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
@@ -589,8 +602,5 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
 
 int mtk_eth_offload_init(struct mtk_eth *eth)
 {
-       if (!eth->ppe || !eth->ppe->foe_table)
-               return 0;
-
        return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
 }
index 0c45ea0900f16aba01e4e1d2422e0519b825c5f9..59596d823d8b8a36ed63bdb5b4f882e74a2d4b8f 100644 (file)
@@ -21,6 +21,9 @@
 #define MTK_PPE_GLO_CFG_BUSY                   BIT(31)
 
 #define MTK_PPE_FLOW_CFG                       0x204
+#define MTK_PPE_MD_TOAP_BYP_CRSN0              BIT(1)
+#define MTK_PPE_MD_TOAP_BYP_CRSN1              BIT(2)
+#define MTK_PPE_MD_TOAP_BYP_CRSN2              BIT(3)
 #define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG          BIT(6)
 #define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG          BIT(7)
 #define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE          BIT(8)
@@ -54,6 +57,7 @@
 #define MTK_PPE_TB_CFG_HASH_MODE               GENMASK(15, 14)
 #define MTK_PPE_TB_CFG_SCAN_MODE               GENMASK(17, 16)
 #define MTK_PPE_TB_CFG_HASH_DEBUG              GENMASK(19, 18)
+#define MTK_PPE_TB_CFG_INFO_SEL                        BIT(20)
 
 enum {
        MTK_PPE_SCAN_MODE_DISABLED,
@@ -112,6 +116,8 @@ enum {
 #define MTK_PPE_DEFAULT_CPU_PORT               0x248
 #define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n)      (GENMASK(2, 0) << ((_n) * 4))
 
+#define MTK_PPE_DEFAULT_CPU_PORT1              0x24c
+
 #define MTK_PPE_MTU_DROP                       0x308
 
 #define MTK_PPE_VLAN_MTU0                      0x30c
@@ -141,4 +147,6 @@ enum {
 #define MTK_PPE_MIB_CACHE_CTL_EN               BIT(0)
 #define MTK_PPE_MIB_CACHE_CTL_FLUSH            BIT(2)
 
+#define MTK_PPE_SBW_CTRL                       0x374
+
 #endif
index 29be2fcafea3bc3907fe5b90a8bdb6e7e3997578..099b6e0df619a9cd580d69058504e2a32393a529 100644 (file)
 
 #define MTK_WED_TX_RING_SIZE           2048
 #define MTK_WED_WDMA_RING_SIZE         1024
+#define MTK_WED_MAX_GROUP_SIZE         0x100
+#define MTK_WED_VLD_GROUP_SIZE         0x40
+#define MTK_WED_PER_GROUP_PKT          128
+
+#define MTK_WED_FBUF_SIZE              128
 
 static struct mtk_wed_hw *hw_list[2];
 static DEFINE_MUTEX(hw_lock);
@@ -80,11 +85,31 @@ static struct mtk_wed_hw *
 mtk_wed_assign(struct mtk_wed_device *dev)
 {
        struct mtk_wed_hw *hw;
+       int i;
+
+       if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+               hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+               if (!hw)
+                       return NULL;
+
+               if (!hw->wed_dev)
+                       goto out;
+
+               if (hw->version == 1)
+                       return NULL;
+
+               /* MT7986 WED devices do not have any pcie slot restrictions */
+       }
+       /* MT7986 PCIE or AXI */
+       for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+               hw = hw_list[i];
+               if (hw && !hw->wed_dev)
+                       goto out;
+       }
 
-       hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
-       if (!hw || hw->wed_dev)
-               return NULL;
+       return NULL;
 
+out:
        hw->wed_dev = dev;
        return hw;
 }
@@ -150,10 +175,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
 
                        desc->buf0 = cpu_to_le32(buf_phys);
                        desc->buf1 = cpu_to_le32(buf_phys + txd_size);
-                       ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
-                              FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
-                                         MTK_WED_BUF_SIZE - txd_size) |
-                              MTK_WDMA_DESC_CTRL_LAST_SEG1;
+
+                       if (dev->hw->version == 1)
+                               ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+                                      FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+                                                 MTK_WED_BUF_SIZE - txd_size) |
+                                      MTK_WDMA_DESC_CTRL_LAST_SEG1;
+                       else
+                               ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+                                      FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+                                                 MTK_WED_BUF_SIZE - txd_size) |
+                                      MTK_WDMA_DESC_CTRL_LAST_SEG0;
                        desc->ctrl = cpu_to_le32(ctrl);
                        desc->info = 0;
                        desc++;
@@ -209,7 +241,7 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
        if (!ring->desc)
                return;
 
-       dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
+       dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
                          ring->desc, ring->desc_phys);
 }
 
@@ -229,6 +261,14 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
 {
        u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
 
+       if (dev->hw->version == 1)
+               mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+       else
+               mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+                       MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+                       MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+                       MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
        if (!dev->hw->num_flows)
                mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
 
@@ -236,10 +276,55 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
        wed_r32(dev, MTK_WED_EXT_INT_MASK);
 }
 
+static void
+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
+{
+       if (enable) {
+               wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+               wed_w32(dev, MTK_WED_TXP_DW1,
+                       FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
+       } else {
+               wed_w32(dev, MTK_WED_TXP_DW1,
+                       FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
+               wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+       }
+}
+
+static void
+mtk_wed_dma_disable(struct mtk_wed_device *dev)
+{
+       wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+               MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+               MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+
+       wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+       wed_clr(dev, MTK_WED_GLO_CFG,
+               MTK_WED_GLO_CFG_TX_DMA_EN |
+               MTK_WED_GLO_CFG_RX_DMA_EN);
+
+       wdma_m32(dev, MTK_WDMA_GLO_CFG,
+                MTK_WDMA_GLO_CFG_TX_DMA_EN |
+                MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+                MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
+
+       if (dev->hw->version == 1) {
+               regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+               wdma_m32(dev, MTK_WDMA_GLO_CFG,
+                        MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
+       } else {
+               wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+                       MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+                       MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+               mtk_wed_set_512_support(dev, false);
+       }
+}
+
 static void
 mtk_wed_stop(struct mtk_wed_device *dev)
 {
-       regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+       mtk_wed_dma_disable(dev);
        mtk_wed_set_ext_int(dev, false);
 
        wed_clr(dev, MTK_WED_CTRL,
@@ -252,21 +337,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
        wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
        wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
        wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
-
-       wed_clr(dev, MTK_WED_GLO_CFG,
-               MTK_WED_GLO_CFG_TX_DMA_EN |
-               MTK_WED_GLO_CFG_RX_DMA_EN);
-       wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
-               MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
-               MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
-       wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
-               MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
 }
 
 static void
 mtk_wed_detach(struct mtk_wed_device *dev)
 {
-       struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
        struct mtk_wed_hw *hw = dev->hw;
 
        mutex_lock(&hw_lock);
@@ -281,9 +356,14 @@ mtk_wed_detach(struct mtk_wed_device *dev)
        mtk_wed_free_buffer(dev);
        mtk_wed_free_tx_rings(dev);
 
-       if (of_dma_is_coherent(wlan_node))
-               regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
-                                  BIT(hw->index), BIT(hw->index));
+       if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+               struct device_node *wlan_node;
+
+               wlan_node = dev->wlan.pci_dev->dev.of_node;
+               if (of_dma_is_coherent(wlan_node) && hw->hifsys)
+                       regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+                                          BIT(hw->index), BIT(hw->index));
+       }
 
        if (!hw_list[!hw->index]->wed_dev &&
            hw->eth->dma_dev != hw->eth->dev)
@@ -296,14 +376,76 @@ mtk_wed_detach(struct mtk_wed_device *dev)
        mutex_unlock(&hw_lock);
 }
 
+#define PCIE_BASE_ADDR0                0x11280000
+static void
+mtk_wed_bus_init(struct mtk_wed_device *dev)
+{
+       switch (dev->wlan.bus_type) {
+       case MTK_WED_BUS_PCIE: {
+               struct device_node *np = dev->hw->eth->dev->of_node;
+               struct regmap *regs;
+
+               regs = syscon_regmap_lookup_by_phandle(np,
+                                                      "mediatek,wed-pcie");
+               if (IS_ERR(regs))
+                       break;
+
+               regmap_update_bits(regs, 0, BIT(0), BIT(0));
+
+               wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+                       FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+
+               /* pcie interrupt control: pola/source selection */
+               wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+                       MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+                       FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+               wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+
+               wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+               wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+
+               /* pcie interrupt status trigger register */
+               wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+               wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+
+               /* pola setting */
+               wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+                       MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+               break;
+       }
+       case MTK_WED_BUS_AXI:
+               wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+                       MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
+                       FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
+               break;
+       default:
+               break;
+       }
+}
+
+static void
+mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+{
+       if (dev->hw->version == 1) {
+               wed_w32(dev, MTK_WED_WPDMA_CFG_BASE,  dev->wlan.wpdma_phys);
+       } else {
+               mtk_wed_bus_init(dev);
+
+               wed_w32(dev, MTK_WED_WPDMA_CFG_BASE,  dev->wlan.wpdma_int);
+               wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK,  dev->wlan.wpdma_mask);
+               wed_w32(dev, MTK_WED_WPDMA_CFG_TX,  dev->wlan.wpdma_tx);
+               wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE,  dev->wlan.wpdma_txfree);
+       }
+}
+
 static void
 mtk_wed_hw_init_early(struct mtk_wed_device *dev)
 {
        u32 mask, set;
-       u32 offset;
 
        mtk_wed_stop(dev);
        mtk_wed_reset(dev, MTK_WED_RESET_WED);
+       mtk_wed_set_wpdma(dev);
 
        mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
               MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
@@ -313,14 +455,33 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
              MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
        wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
 
-       wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
+       if (dev->hw->version == 1) {
+               u32 offset = dev->hw->index ? 0x04000400 : 0;
 
-       offset = dev->hw->index ? 0x04000400 : 0;
-       wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
-       wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+               wdma_set(dev, MTK_WDMA_GLO_CFG,
+                        MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+                        MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
+                        MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
 
-       wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
-       wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+               wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+               wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+               wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+                       MTK_PCIE_BASE(dev->hw->index));
+       } else {
+               wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
+               wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
+               wed_w32(dev, MTK_WED_WDMA_OFFSET0,
+                       FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
+                                  MTK_WDMA_INT_STATUS) |
+                       FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
+                                  MTK_WDMA_GLO_CFG));
+
+               wed_w32(dev, MTK_WED_WDMA_OFFSET1,
+                       FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
+                                  MTK_WDMA_RING_TX(0)) |
+                       FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
+                                  MTK_WDMA_RING_RX(0)));
+       }
 }
 
 static void
@@ -340,37 +501,65 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
 
        wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
 
-       wed_w32(dev, MTK_WED_TX_BM_TKID,
-               FIELD_PREP(MTK_WED_TX_BM_TKID_START,
-                          dev->wlan.token_start) |
-               FIELD_PREP(MTK_WED_TX_BM_TKID_END,
-                          dev->wlan.token_start + dev->wlan.nbuf - 1));
-
        wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
 
-       wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
-               FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
-               MTK_WED_TX_BM_DYN_THR_HI);
+       if (dev->hw->version == 1) {
+               wed_w32(dev, MTK_WED_TX_BM_TKID,
+                       FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+                                  dev->wlan.token_start) |
+                       FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+                                  dev->wlan.token_start +
+                                  dev->wlan.nbuf - 1));
+               wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+                       FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+                       MTK_WED_TX_BM_DYN_THR_HI);
+       } else {
+               wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
+                       FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+                                  dev->wlan.token_start) |
+                       FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+                                  dev->wlan.token_start +
+                                  dev->wlan.nbuf - 1));
+               wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+                       FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+                       MTK_WED_TX_BM_DYN_THR_HI_V2);
+               wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+                       MTK_WED_TX_TKID_CTRL_PAUSE |
+                       FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
+                                  dev->buf_ring.size / 128) |
+                       FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+                                  dev->buf_ring.size / 128));
+               wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+                       FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+                       MTK_WED_TX_TKID_DYN_THR_HI);
+       }
 
        mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
 
-       wed_set(dev, MTK_WED_CTRL,
-               MTK_WED_CTRL_WED_TX_BM_EN |
-               MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+       if (dev->hw->version == 1)
+               wed_set(dev, MTK_WED_CTRL,
+                       MTK_WED_CTRL_WED_TX_BM_EN |
+                       MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+       else
+               wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
 
        wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
 }
 
 static void
-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
 {
+       void *head = (void *)ring->desc;
        int i;
 
        for (i = 0; i < size; i++) {
-               desc[i].buf0 = 0;
-               desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
-               desc[i].buf1 = 0;
-               desc[i].info = 0;
+               struct mtk_wdma_desc *desc;
+
+               desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
+               desc->buf0 = 0;
+               desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+               desc->buf1 = 0;
+               desc->info = 0;
        }
 }
 
@@ -421,12 +610,10 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
-               struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
-
-               if (!desc)
+               if (!dev->tx_ring[i].desc)
                        continue;
 
-               mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
+               mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
        }
 
        if (mtk_wed_poll_busy(dev))
@@ -483,16 +670,16 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
 
 static int
 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
-                  int size)
+                  int size, u32 desc_size)
 {
-       ring->desc = dma_alloc_coherent(dev->hw->dev,
-                                       size * sizeof(*ring->desc),
+       ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
                                        &ring->desc_phys, GFP_KERNEL);
        if (!ring->desc)
                return -ENOMEM;
 
+       ring->desc_size = desc_size;
        ring->size = size;
-       mtk_wed_ring_reset(ring->desc, size);
+       mtk_wed_ring_reset(ring, size);
 
        return 0;
 }
@@ -500,9 +687,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
 static int
 mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
 {
+       u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
        struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
 
-       if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
+       if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
                return -ENOMEM;
 
        wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
@@ -520,43 +708,63 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
 }
 
 static void
-mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
 {
-       u32 wdma_mask;
-       u32 val;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
-               if (!dev->tx_wdma[i].desc)
-                       mtk_wed_wdma_ring_setup(dev, i, 16);
-
-       wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
-
-       mtk_wed_hw_init(dev);
+       u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
 
+       /* wed control cr set */
        wed_set(dev, MTK_WED_CTRL,
                MTK_WED_CTRL_WDMA_INT_AGENT_EN |
                MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
                MTK_WED_CTRL_WED_TX_BM_EN |
                MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
 
-       wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
+       if (dev->hw->version == 1) {
+               wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+                       MTK_WED_PCIE_INT_TRIGGER_STATUS);
 
-       wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
-               MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
-               MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+               wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+                       MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+                       MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
 
-       wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
-               MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+               wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+       } else {
+               /* initail tx interrupt trigger */
+               wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+                       MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+                       MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
+                       MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
+                       MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
+                       FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
+                                  dev->wlan.tx_tbit[0]) |
+                       FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
+                                  dev->wlan.tx_tbit[1]));
+
+               /* initail txfree interrupt trigger */
+               wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
+                       MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
+                       MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
+                       FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+                                  dev->wlan.txfree_tbit));
+
+               wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+               wed_set(dev, MTK_WED_WDMA_INT_CTRL,
+                       FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
+                                  dev->wdma_idx));
+       }
 
        wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
-       wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
 
        wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
        wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
-
        wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
        wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+}
+
+static void
+mtk_wed_dma_enable(struct mtk_wed_device *dev)
+{
+       wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
 
        wed_set(dev, MTK_WED_GLO_CFG,
                MTK_WED_GLO_CFG_TX_DMA_EN |
@@ -567,16 +775,54 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
        wed_set(dev, MTK_WED_WDMA_GLO_CFG,
                MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
 
+       wdma_set(dev, MTK_WDMA_GLO_CFG,
+                MTK_WDMA_GLO_CFG_TX_DMA_EN |
+                MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+                MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+       if (dev->hw->version == 1) {
+               wdma_set(dev, MTK_WDMA_GLO_CFG,
+                        MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+       } else {
+               wed_set(dev, MTK_WED_WPDMA_CTRL,
+                       MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+
+               wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+                       MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+                       MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+               wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+                       MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+                       MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+       }
+}
+
+static void
+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+               if (!dev->tx_wdma[i].desc)
+                       mtk_wed_wdma_ring_setup(dev, i, 16);
+
+       mtk_wed_hw_init(dev);
+       mtk_wed_configure_irq(dev, irq_mask);
+
        mtk_wed_set_ext_int(dev, true);
-       val = dev->wlan.wpdma_phys |
-             MTK_PCIE_MIRROR_MAP_EN |
-             FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
 
-       if (dev->hw->index)
-               val |= BIT(1);
-       val |= BIT(0);
-       regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+       if (dev->hw->version == 1) {
+               u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
+                         FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
+                                    dev->hw->index);
+
+               val |= BIT(0) | (BIT(1) * !!dev->hw->index);
+               regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+       } else {
+               mtk_wed_set_512_support(dev, true);
+       }
 
+       mtk_wed_dma_enable(dev);
        dev->running = true;
 }
 
@@ -585,12 +831,14 @@ mtk_wed_attach(struct mtk_wed_device *dev)
        __releases(RCU)
 {
        struct mtk_wed_hw *hw;
+       struct device *device;
        int ret = 0;
 
        RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
                         "mtk_wed_attach without holding the RCU read lock");
 
-       if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
+       if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
+            pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
            !try_module_get(THIS_MODULE))
                ret = -ENODEV;
 
@@ -608,7 +856,11 @@ mtk_wed_attach(struct mtk_wed_device *dev)
                goto out;
        }
 
-       dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
+       device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
+               ? &dev->wlan.pci_dev->dev
+               : &dev->wlan.platform_dev->dev;
+       dev_info(device, "attaching wed device %d version %d\n",
+                hw->index, hw->version);
 
        dev->hw = hw;
        dev->dev = hw->dev;
@@ -626,7 +878,9 @@ mtk_wed_attach(struct mtk_wed_device *dev)
        }
 
        mtk_wed_hw_init_early(dev);
-       regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
+       if (hw->hifsys)
+               regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+                                  BIT(hw->index), 0);
 
 out:
        mutex_unlock(&hw_lock);
@@ -653,7 +907,8 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
 
        BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
 
-       if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
+       if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
+                              sizeof(*ring->desc)))
                return -ENOMEM;
 
        if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
@@ -680,21 +935,21 @@ static int
 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
 {
        struct mtk_wed_ring *ring = &dev->txfree_ring;
-       int i;
+       int i, index = dev->hw->version == 1;
 
        /*
         * For txfree event handling, the same DMA ring is shared between WED
         * and WLAN. The WLAN driver accesses the ring index registers through
         * WED
         */
-       ring->reg_base = MTK_WED_RING_RX(1);
+       ring->reg_base = MTK_WED_RING_RX(index);
        ring->wpdma = regs;
 
        for (i = 0; i < 12; i += 4) {
                u32 val = readl(regs + i);
 
-               wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
-               wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
+               wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
+               wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
        }
 
        return 0;
@@ -703,11 +958,19 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
 static u32
 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
 {
-       u32 val;
+       u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+       if (dev->hw->version == 1)
+               ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+       else
+               ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+                           MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+                           MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+                           MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
 
        val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
        wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
-       val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+       val &= ext_mask;
        if (!dev->hw->num_flows)
                val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
        if (val && net_ratelimit())
@@ -782,7 +1045,8 @@ out:
 }
 
 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
-                   void __iomem *wdma, int index)
+                   void __iomem *wdma, phys_addr_t wdma_phy,
+                   int index)
 {
        static const struct mtk_wed_ops wed_ops = {
                .attach = mtk_wed_attach,
@@ -829,26 +1093,33 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
        hw = kzalloc(sizeof(*hw), GFP_KERNEL);
        if (!hw)
                goto unlock;
+
        hw->node = np;
        hw->regs = regs;
        hw->eth = eth;
        hw->dev = &pdev->dev;
+       hw->wdma_phy = wdma_phy;
        hw->wdma = wdma;
        hw->index = index;
        hw->irq = irq;
-       hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
-                                                    "mediatek,pcie-mirror");
-       hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
-                                                    "mediatek,hifsys");
-       if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
-               kfree(hw);
-               goto unlock;
-       }
+       hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+
+       if (hw->version == 1) {
+               hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+                               "mediatek,pcie-mirror");
+               hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+                               "mediatek,hifsys");
+               if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+                       kfree(hw);
+                       goto unlock;
+               }
 
-       if (!index) {
-               regmap_write(hw->mirror, 0, 0);
-               regmap_write(hw->mirror, 4, 0);
+               if (!index) {
+                       regmap_write(hw->mirror, 0, 0);
+                       regmap_write(hw->mirror, 4, 0);
+               }
        }
+
        mtk_wed_hw_add_debugfs(hw);
 
        hw_list[index] = hw;
index 981ec613f4b0b589ea67a4d076eba5ed095db8e3..ae420ca01a488047fe99978df54c22d68fd7427a 100644 (file)
@@ -18,11 +18,13 @@ struct mtk_wed_hw {
        struct regmap *hifsys;
        struct device *dev;
        void __iomem *wdma;
+       phys_addr_t wdma_phy;
        struct regmap *mirror;
        struct dentry *debugfs_dir;
        struct mtk_wed_device *wed_dev;
        u32 debugfs_reg;
        u32 num_flows;
+       u8 version;
        char dirname[5];
        int irq;
        int index;
@@ -101,14 +103,16 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
 }
 
 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
-                   void __iomem *wdma, int index);
+                   void __iomem *wdma, phys_addr_t wdma_phy,
+                   int index);
 void mtk_wed_exit(void);
 int mtk_wed_flow_add(int index);
 void mtk_wed_flow_remove(int index);
 #else
 static inline void
 mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
-              void __iomem *wdma, int index)
+              void __iomem *wdma, phys_addr_t wdma_phy,
+              int index)
 {
 }
 static inline void
index a81d3fd1a439de0f7da8ab644cb2258a89d79269..f420f187e837352afb05ef9a2df42e1ed340c138 100644 (file)
@@ -116,6 +116,9 @@ wed_txinfo_show(struct seq_file *s, void *data)
                DUMP_WDMA(WDMA_GLO_CFG),
                DUMP_WDMA_RING(WDMA_RING_RX(0)),
                DUMP_WDMA_RING(WDMA_RING_RX(1)),
+
+               DUMP_STR("TX FREE"),
+               DUMP_WED(WED_RX_MIB(0)),
        };
        struct mtk_wed_hw *hw = s->private;
        struct mtk_wed_device *dev = hw->wed_dev;
index 0a0465ea58b4682098a3d9e0e7f3d5bcec360d4f..e270fb33614320dbcfa9eb1a355d853084367ec9 100644 (file)
@@ -5,6 +5,7 @@
 #define __MTK_WED_REGS_H
 
 #define MTK_WDMA_DESC_CTRL_LEN1                        GENMASK(14, 0)
+#define MTK_WDMA_DESC_CTRL_LEN1_V2             GENMASK(13, 0)
 #define MTK_WDMA_DESC_CTRL_LAST_SEG1           BIT(15)
 #define MTK_WDMA_DESC_CTRL_BURST               BIT(16)
 #define MTK_WDMA_DESC_CTRL_LEN0                        GENMASK(29, 16)
@@ -41,6 +42,7 @@ struct mtk_wdma_desc {
 #define MTK_WED_CTRL_RESERVE_EN                                BIT(12)
 #define MTK_WED_CTRL_RESERVE_BUSY                      BIT(13)
 #define MTK_WED_CTRL_FINAL_DIDX_READ                   BIT(24)
+#define MTK_WED_CTRL_ETH_DMAD_FMT                      BIT(25)
 #define MTK_WED_CTRL_MIB_READ_CLEAR                    BIT(28)
 
 #define MTK_WED_EXT_INT_STATUS                         0x020
@@ -57,7 +59,8 @@ struct mtk_wdma_desc {
 #define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN     BIT(19)
 #define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
 #define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR       BIT(21)
-#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR       BIT(22)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR       BIT(22)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR       BIT(23)
 #define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE      BIT(24)
 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK              (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
                                                         MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
@@ -65,8 +68,7 @@ struct mtk_wdma_desc {
                                                         MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
                                                         MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
                                                         MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
-                                                        MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
-                                                        MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
+                                                        MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
 
 #define MTK_WED_EXT_INT_MASK                           0x028
 
@@ -81,6 +83,7 @@ struct mtk_wdma_desc {
 #define MTK_WED_TX_BM_BASE                             0x084
 
 #define MTK_WED_TX_BM_TKID                             0x088
+#define MTK_WED_TX_BM_TKID_V2                          0x0c8
 #define MTK_WED_TX_BM_TKID_START                       GENMASK(15, 0)
 #define MTK_WED_TX_BM_TKID_END                         GENMASK(31, 16)
 
@@ -94,7 +97,25 @@ struct mtk_wdma_desc {
 
 #define MTK_WED_TX_BM_DYN_THR                          0x0a0
 #define MTK_WED_TX_BM_DYN_THR_LO                       GENMASK(6, 0)
+#define MTK_WED_TX_BM_DYN_THR_LO_V2                    GENMASK(8, 0)
 #define MTK_WED_TX_BM_DYN_THR_HI                       GENMASK(22, 16)
+#define MTK_WED_TX_BM_DYN_THR_HI_V2                    GENMASK(24, 16)
+
+#define MTK_WED_TX_TKID_CTRL                           0x0c0
+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM               GENMASK(6, 0)
+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM               GENMASK(22, 16)
+#define MTK_WED_TX_TKID_CTRL_PAUSE                     BIT(28)
+
+#define MTK_WED_TX_TKID_DYN_THR                                0x0e0
+#define MTK_WED_TX_TKID_DYN_THR_LO                     GENMASK(6, 0)
+#define MTK_WED_TX_TKID_DYN_THR_HI                     GENMASK(22, 16)
+
+#define MTK_WED_TXP_DW0                                        0x120
+#define MTK_WED_TXP_DW1                                        0x124
+#define MTK_WED_WPDMA_WRITE_TXP                                GENMASK(31, 16)
+#define MTK_WED_TXDP_CTRL                              0x130
+#define MTK_WED_TXDP_DW9_OVERWR                                BIT(9)
+#define MTK_WED_RX_BM_TKID_MIB                         0x1cc
 
 #define MTK_WED_INT_STATUS                             0x200
 #define MTK_WED_INT_MASK                               0x204
@@ -125,6 +146,7 @@ struct mtk_wdma_desc {
 #define MTK_WED_RESET_IDX_RX                           GENMASK(17, 16)
 
 #define MTK_WED_TX_MIB(_n)                             (0x2a0 + (_n) * 4)
+#define MTK_WED_RX_MIB(_n)                             (0x2e0 + (_n) * 4)
 
 #define MTK_WED_RING_TX(_n)                            (0x300 + (_n) * 0x10)
 
@@ -155,21 +177,64 @@ struct mtk_wdma_desc {
 #define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP                        BIT(29)
 #define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET             BIT(31)
 
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC       BIT(4)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC       BIT(5)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC       BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC       BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT     BIT(19)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR             BIT(21)
+#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP             BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV         BIT(28)
+
 #define MTK_WED_WPDMA_RESET_IDX                                0x50c
 #define MTK_WED_WPDMA_RESET_IDX_TX                     GENMASK(3, 0)
 #define MTK_WED_WPDMA_RESET_IDX_RX                     GENMASK(17, 16)
 
+#define MTK_WED_WPDMA_CTRL                             0x518
+#define MTK_WED_WPDMA_CTRL_SDL1_FIXED                  BIT(31)
+
 #define MTK_WED_WPDMA_INT_CTRL                         0x520
 #define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV               BIT(21)
+#define MTK_WED_WPDMA_INT_CTRL_SIG_SRC                 BIT(22)
+#define MTK_WED_WPDMA_INT_CTRL_SRC_SEL                 GENMASK(17, 16)
 
 #define MTK_WED_WPDMA_INT_MASK                         0x524
 
+#define MTK_WED_WPDMA_INT_CTRL_TX                      0x530
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN             BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR            BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG           GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN             BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR            BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG           GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_RX                      0x534
+
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE                 0x538
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN         BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR                BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG       GENMASK(6, 2)
+
 #define MTK_WED_PCIE_CFG_BASE                          0x560
 
+#define MTK_WED_PCIE_CFG_BASE                          0x560
+#define MTK_WED_PCIE_CFG_INTM                          0x564
+#define MTK_WED_PCIE_CFG_MSIS                          0x568
 #define MTK_WED_PCIE_INT_TRIGGER                       0x570
 #define MTK_WED_PCIE_INT_TRIGGER_STATUS                        BIT(16)
 
+#define MTK_WED_PCIE_INT_CTRL                          0x57c
+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA              BIT(20)
+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL                  GENMASK(17, 16)
+#define MTK_WED_PCIE_INT_CTRL_POLL_EN                  GENMASK(13, 12)
+
 #define MTK_WED_WPDMA_CFG_BASE                         0x580
+#define MTK_WED_WPDMA_CFG_INT_MASK                     0x584
+#define MTK_WED_WPDMA_CFG_TX                           0x588
+#define MTK_WED_WPDMA_CFG_TX_FREE                      0x58c
 
 #define MTK_WED_WPDMA_TX_MIB(_n)                       (0x5a0 + (_n) * 4)
 #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n)              (0x5d0 + (_n) * 4)
@@ -203,15 +268,24 @@ struct mtk_wdma_desc {
 #define MTK_WED_WDMA_RESET_IDX_RX                      GENMASK(17, 16)
 #define MTK_WED_WDMA_RESET_IDX_DRV                     GENMASK(25, 24)
 
+#define MTK_WED_WDMA_INT_CLR                           0xa24
+#define MTK_WED_WDMA_INT_CLR_RX_DONE                   GENMASK(17, 16)
+
 #define MTK_WED_WDMA_INT_TRIGGER                       0xa28
 #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE               GENMASK(17, 16)
 
 #define MTK_WED_WDMA_INT_CTRL                          0xa2c
 #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL             GENMASK(17, 16)
 
+#define MTK_WED_WDMA_CFG_BASE                          0xaa0
 #define MTK_WED_WDMA_OFFSET0                           0xaa4
 #define MTK_WED_WDMA_OFFSET1                           0xaa8
 
+#define MTK_WED_WDMA_OFST0_GLO_INTS                    GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST0_GLO_CFG                     GENMASK(31, 16)
+#define MTK_WED_WDMA_OFST1_TX_CTRL                     GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST1_RX_CTRL                     GENMASK(31, 16)
+
 #define MTK_WED_WDMA_RX_MIB(_n)                                (0xae0 + (_n) * 4)
 #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n)                        (0xae8 + (_n) * 4)
 #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n)              (0xaf0 + (_n) * 4)
@@ -221,15 +295,22 @@ struct mtk_wdma_desc {
 #define MTK_WED_RING_OFS_CPU_IDX                       0x08
 #define MTK_WED_RING_OFS_DMA_IDX                       0x0c
 
+#define MTK_WDMA_RING_TX(_n)                           (0x000 + (_n) * 0x10)
 #define MTK_WDMA_RING_RX(_n)                           (0x100 + (_n) * 0x10)
 
 #define MTK_WDMA_GLO_CFG                               0x204
-#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES                        GENMASK(28, 26)
+#define MTK_WDMA_GLO_CFG_TX_DMA_EN                     BIT(0)
+#define MTK_WDMA_GLO_CFG_RX_DMA_EN                     BIT(2)
+#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES               BIT(26)
+#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES               BIT(27)
+#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES               BIT(28)
 
 #define MTK_WDMA_RESET_IDX                             0x208
 #define MTK_WDMA_RESET_IDX_TX                          GENMASK(3, 0)
 #define MTK_WDMA_RESET_IDX_RX                          GENMASK(17, 16)
 
+#define MTK_WDMA_INT_STATUS                            0x220
+
 #define MTK_WDMA_INT_MASK                              0x228
 #define MTK_WDMA_INT_MASK_TX_DONE                      GENMASK(3, 0)
 #define MTK_WDMA_INT_MASK_RX_DONE                      GENMASK(17, 16)
index 7e00cca067090a9d78a5eae612a1e656f14168b8..4450c8b7a1cb3310f75aed9722b4edf5ac583e6a 100644 (file)
 struct mtk_wed_hw;
 struct mtk_wdma_desc;
 
+enum mtk_wed_bus_tye {
+       MTK_WED_BUS_PCIE,
+       MTK_WED_BUS_AXI,
+};
+
 struct mtk_wed_ring {
        struct mtk_wdma_desc *desc;
        dma_addr_t desc_phys;
+       u32 desc_size;
        int size;
 
        u32 reg_base;
@@ -42,13 +48,24 @@ struct mtk_wed_device {
 
        /* filled by driver: */
        struct {
-               struct pci_dev *pci_dev;
+               union {
+                       struct platform_device *platform_dev;
+                       struct pci_dev *pci_dev;
+               };
+               enum mtk_wed_bus_tye bus_type;
 
                u32 wpdma_phys;
+               u32 wpdma_int;
+               u32 wpdma_mask;
+               u32 wpdma_tx;
+               u32 wpdma_txfree;
 
                u16 token_start;
                unsigned int nbuf;
 
+               u8 tx_tbit[MTK_WED_TX_QUEUES];
+               u8 txfree_tbit;
+
                u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
                int (*offload_enable)(struct mtk_wed_device *wed);
                void (*offload_disable)(struct mtk_wed_device *wed);