mediatek: add upstream patches
authorLorenzo Bianconi <lorenzo@kernel.org>
Tue, 7 Nov 2023 10:57:44 +0000 (11:57 +0100)
committerDavid Bauer <mail@david-bauer.net>
Thu, 30 Nov 2023 06:35:49 +0000 (07:35 +0100)
target/linux/mediatek/patches-6.1/950-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch [new file with mode: 0644]
target/linux/mediatek/patches-6.1/951-net-ethernet-mtk_wed-make-memory-region-optional.patch [new file with mode: 0644]
target/linux/mediatek/patches-6.1/955-net-ethernet-mtk_wed-fix-firmware-loading-for-MT7986.patch [new file with mode: 0644]
target/linux/mediatek/patches-6.1/956-align-upstream.patch [new file with mode: 0644]
target/linux/mediatek/patches-6.1/957-net-ethernet-mtk_wed-remove-wo-pointer-in-wo_r32-wo_.patch [new file with mode: 0644]
target/linux/mediatek/patches-6.1/961-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch [new file with mode: 0644]
target/linux/mediatek/patches-6.1/962-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch [new file with mode: 0644]

diff --git a/target/linux/mediatek/patches-6.1/950-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch b/target/linux/mediatek/patches-6.1/950-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch
new file mode 100644 (file)
index 0000000..51fe678
--- /dev/null
@@ -0,0 +1,97 @@
+From d374bdddd4d724f96b93e6a6bb24b55182b32e5c Mon Sep 17 00:00:00 2001
+Message-ID: <d374bdddd4d724f96b93e6a6bb24b55182b32e5c.1693948541.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 5 Sep 2023 22:46:51 +0200
+Subject: [PATCH net-next 1/2] net: ethernet: mtk_wed: move mem_region array
+ out of mtk_wed_mcu_load_firmware
+
+Remove mtk_wed_wo_memory_region boot struct in mtk_wed_wo. This is a
+preliminary patch to introduce WED support for MT7988 SoC.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 37 ++++++++++-----------
+ drivers/net/ethernet/mediatek/mtk_wed_wo.h  |  1 -
+ 2 files changed, 18 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -16,12 +16,28 @@
+ #include "mtk_wed_wo.h"
+ #include "mtk_wed.h"
++static struct mtk_wed_wo_memory_region mem_region[] = {
++      [MTK_WED_WO_REGION_EMI] = {
++              .name = "wo-emi",
++      },
++      [MTK_WED_WO_REGION_ILM] = {
++              .name = "wo-ilm",
++      },
++      [MTK_WED_WO_REGION_DATA] = {
++              .name = "wo-data",
++              .shared = true,
++      },
++      [MTK_WED_WO_REGION_BOOT] = {
++              .name = "wo-boot",
++      },
++};
++
+ static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+ {
+       u32 val;
+       if (!wo->boot_regmap)
+-              return readl(wo->boot.addr + reg);
++              return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+       if (regmap_read(wo->boot_regmap, reg, &val))
+               val = ~0;
+@@ -34,7 +50,7 @@ static void wo_w32(struct mtk_wed_wo *wo
+       if (wo->boot_regmap)
+               regmap_write(wo->boot_regmap, reg, val);
+       else
+-              writel(val, wo->boot.addr + reg);
++              writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+ static struct sk_buff *
+@@ -348,18 +364,6 @@ mtk_wed_mcu_load_memory_regions(struct m
+ static int
+ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
+ {
+-      static struct mtk_wed_wo_memory_region mem_region[] = {
+-              [MTK_WED_WO_REGION_EMI] = {
+-                      .name = "wo-emi",
+-              },
+-              [MTK_WED_WO_REGION_ILM] = {
+-                      .name = "wo-ilm",
+-              },
+-              [MTK_WED_WO_REGION_DATA] = {
+-                      .name = "wo-data",
+-                      .shared = true,
+-              },
+-      };
+       const struct mtk_wed_fw_trailer *trailer;
+       const struct firmware *fw;
+       const char *fw_name;
+@@ -380,8 +384,8 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+                * is defined through reserved memory property.
+                */
+               wo->boot_regmap = NULL;
+-              wo->boot.name = "wo-boot";
+-              ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
++              ret = mtk_wed_get_reserved_memory_region(wo,
++                              &mem_region[MTK_WED_WO_REGION_BOOT]);
+               if (ret)
+                       return ret;
+       }
+--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+@@ -228,7 +228,6 @@ struct mtk_wed_wo_queue {
+ struct mtk_wed_wo {
+       struct mtk_wed_hw *hw;
+-      struct mtk_wed_wo_memory_region boot; /* backward compatibility */
+       struct regmap *boot_regmap;
+       struct mtk_wed_wo_queue q_tx;
diff --git a/target/linux/mediatek/patches-6.1/951-net-ethernet-mtk_wed-make-memory-region-optional.patch b/target/linux/mediatek/patches-6.1/951-net-ethernet-mtk_wed-make-memory-region-optional.patch
new file mode 100644 (file)
index 0000000..0ebbbf7
--- /dev/null
@@ -0,0 +1,150 @@
+From 4944aaa54be3696dfebc5506d8431dc007c77095 Mon Sep 17 00:00:00 2001
+Message-ID: <4944aaa54be3696dfebc5506d8431dc007c77095.1693948541.git.lorenzo@kernel.org>
+In-Reply-To: <d374bdddd4d724f96b93e6a6bb24b55182b32e5c.1693948541.git.lorenzo@kernel.org>
+References: <d374bdddd4d724f96b93e6a6bb24b55182b32e5c.1693948541.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 5 Sep 2023 23:06:24 +0200
+Subject: [PATCH net-next 2/2] net: ethernet: mtk_wed: make memory region
+ optional
+
+Make mtk_wed_wo_memory_region optionals. This is a preliminary patch to
+introduce WED support for MT7988 SoC.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 23 ++++++++++++---------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -242,19 +242,13 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ }
+ static int
+-mtk_wed_get_reserved_memory_region(struct mtk_wed_wo *wo,
++mtk_wed_get_reserved_memory_region(struct mtk_wed_hw *hw, int index,
+                                  struct mtk_wed_wo_memory_region *region)
+ {
+       struct reserved_mem *rmem;
+       struct device_node *np;
+-      int index;
+-      index = of_property_match_string(wo->hw->node, "memory-region-names",
+-                                       region->name);
+-      if (index < 0)
+-              return index;
+-
+-      np = of_parse_phandle(wo->hw->node, "memory-region", index);
++      np = of_parse_phandle(hw->node, "memory-region", index);
+       if (!np)
+               return -ENODEV;
+@@ -266,7 +260,7 @@ mtk_wed_get_reserved_memory_region(struc
+       region->phy_addr = rmem->base;
+       region->size = rmem->size;
+-      region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
++      region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
+       return !region->addr ? -EINVAL : 0;
+ }
+@@ -279,6 +273,9 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_
+       const struct mtk_wed_fw_trailer *trailer;
+       const struct mtk_wed_fw_region *fw_region;
++      if (!region->phy_addr || !region->size)
++              return 0;
++
+       trailer_ptr = fw->data + fw->size - sizeof(*trailer);
+       trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
+       region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
+@@ -320,19 +317,27 @@ mtk_wed_mcu_load_memory_regions(struct m
+                               struct mtk_wed_wo_memory_region *region)
+ {
+       struct device_node *np;
+-      int ret;
++      int ret, index;
+-      /* firmware EMI memory region */
+-      ret = mtk_wed_get_reserved_memory_region(wo,
+-                      &region[MTK_WED_WO_REGION_EMI]);
+-      if (ret)
+-              return ret;
++      index = of_property_match_string(wo->hw->node, "memory-region-names",
++                                       "wo-emi");
++      if (index >= 0) {
++              /* firmware EMI memory region */
++              ret = mtk_wed_get_reserved_memory_region(wo->hw, index,
++                              &region[MTK_WED_WO_REGION_EMI]);
++              if (ret)
++                      return ret;
++      }
+-      /* firmware DATA memory region */
+-      ret = mtk_wed_get_reserved_memory_region(wo,
+-                      &region[MTK_WED_WO_REGION_DATA]);
+-      if (ret)
+-              return ret;
++      index = of_property_match_string(wo->hw->node, "memory-region-names",
++                                       "wo-data");
++      if (index >= 0) {
++              /* firmware DATA memory region */
++              ret = mtk_wed_get_reserved_memory_region(wo->hw, index,
++                              &region[MTK_WED_WO_REGION_DATA]);
++              if (ret)
++                      return ret;
++      }
+       np = of_parse_phandle(wo->hw->node, "mediatek,wo-ilm", 0);
+       if (np) {
+@@ -354,11 +359,17 @@ mtk_wed_mcu_load_memory_regions(struct m
+               return IS_ERR(ilm_region->addr) ? PTR_ERR(ilm_region->addr) : 0;
+       }
+-      /* For backward compatibility, we need to check if ILM
+-       * node is defined through reserved memory property.
+-       */
+-      return mtk_wed_get_reserved_memory_region(wo,
+-                      &region[MTK_WED_WO_REGION_ILM]);
++      index = of_property_match_string(wo->hw->node, "memory-region-names",
++                                       "wo-ilm");
++      if (index >= 0) {
++              /* For backward compatibility, we need to check if ILM
++               * node is defined through reserved memory property.
++               */
++              ret = mtk_wed_get_reserved_memory_region(wo->hw, index,
++                              &region[MTK_WED_WO_REGION_ILM]);
++      }
++
++      return ret;
+ }
+ static int
+@@ -377,6 +388,8 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+       wo->boot_regmap = syscon_regmap_lookup_by_phandle(wo->hw->node,
+                                                         "mediatek,wo-cpuboot");
+       if (IS_ERR(wo->boot_regmap)) {
++              int index;
++
+               if (wo->boot_regmap != ERR_PTR(-ENODEV))
+                       return PTR_ERR(wo->boot_regmap);
+@@ -384,10 +397,14 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+                * is defined through reserved memory property.
+                */
+               wo->boot_regmap = NULL;
+-              ret = mtk_wed_get_reserved_memory_region(wo,
+-                              &mem_region[MTK_WED_WO_REGION_BOOT]);
+-              if (ret)
+-                      return ret;
++              index = of_property_match_string(wo->hw->node, "memory-region-names",
++                                               "wo-boot");
++              if (index >= 0) {
++                      ret = mtk_wed_get_reserved_memory_region(wo->hw, index,
++                                      &mem_region[MTK_WED_WO_REGION_BOOT]);
++                      if (ret)
++                              return ret;
++              }
+       }
+       /* set dummy cr */
diff --git a/target/linux/mediatek/patches-6.1/955-net-ethernet-mtk_wed-fix-firmware-loading-for-MT7986.patch b/target/linux/mediatek/patches-6.1/955-net-ethernet-mtk_wed-fix-firmware-loading-for-MT7986.patch
new file mode 100644 (file)
index 0000000..8481589
--- /dev/null
@@ -0,0 +1,108 @@
+From fde51ec7ebc462ff8f3c0ccc0638babb55b41d10 Mon Sep 17 00:00:00 2001
+Message-ID: <fde51ec7ebc462ff8f3c0ccc0638babb55b41d10.1698069110.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 23 Oct 2023 15:24:49 +0200
+Subject: [PATCH wireless-next 1/2] net: ethernet: mtk_wed: fix firmware
+ loading for MT7986 SoC
+
+Not all memory defined dts region are present in the WED mcu firmware
+image. Reverse the mtk_wed_mcu_run_firmware() logic to check all the
+fw images are defined in the dts reserved_memory node.
+
+Fixes: c6d961aeaa772 ("net: ethernet: mtk_wed: move mem_region array out of mtk_wed_mcu_load_firmware")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 48 +++++++++++----------
+ 1 file changed, 25 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -266,16 +266,12 @@ mtk_wed_get_reserved_memory_region(struc
+ }
+ static int
+-mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
+-                       struct mtk_wed_wo_memory_region *region)
++mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw)
+ {
+       const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data;
+       const struct mtk_wed_fw_trailer *trailer;
+       const struct mtk_wed_fw_region *fw_region;
+-      if (!region->phy_addr || !region->size)
+-              return 0;
+-
+       trailer_ptr = fw->data + fw->size - sizeof(*trailer);
+       trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
+       region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
+@@ -283,33 +279,41 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_
+       while (region_ptr < trailer_ptr) {
+               u32 length;
++              int i;
+               fw_region = (const struct mtk_wed_fw_region *)region_ptr;
+               length = le32_to_cpu(fw_region->len);
+-
+-              if (region->phy_addr != le32_to_cpu(fw_region->addr))
+-                      goto next;
+-
+-              if (region->size < length)
+-                      goto next;
+-
+               if (first_region_ptr < ptr + length)
+                       goto next;
+-              if (region->shared && region->consumed)
+-                      return 0;
++              for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
++                      struct mtk_wed_wo_memory_region *region;
+-              if (!region->shared || !region->consumed) {
+-                      memcpy_toio(region->addr, ptr, length);
+-                      region->consumed = true;
+-                      return 0;
++                      region = &mem_region[i];
++                      if (region->phy_addr != le32_to_cpu(fw_region->addr))
++                              continue;
++
++                      if (region->size < length)
++                              continue;
++
++                      if (region->shared && region->consumed)
++                              break;
++
++                      if (!region->shared || !region->consumed) {
++                              memcpy_toio(region->addr, ptr, length);
++                              region->consumed = true;
++                              break;
++                      }
+               }
++
++              if (i == ARRAY_SIZE(mem_region))
++                      return -EINVAL;
+ next:
+               region_ptr += sizeof(*fw_region);
+               ptr += length;
+       }
+-      return -EINVAL;
++      return 0;
+ }
+ static int
+@@ -429,11 +433,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+       dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n",
+                trailer->chip_id, trailer->num_region);
+-      for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+-              ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]);
+-              if (ret)
+-                      goto out;
+-      }
++      ret = mtk_wed_mcu_run_firmware(wo, fw);
++      if (ret)
++              goto out;
+       /* set the start address */
+       boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
diff --git a/target/linux/mediatek/patches-6.1/956-align-upstream.patch b/target/linux/mediatek/patches-6.1/956-align-upstream.patch
new file mode 100644 (file)
index 0000000..f086879
--- /dev/null
@@ -0,0 +1,100 @@
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -34,23 +34,12 @@ static struct mtk_wed_wo_memory_region m
+ static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+ {
+-      u32 val;
+-
+-      if (!wo->boot_regmap)
+-              return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+-
+-      if (regmap_read(wo->boot_regmap, reg, &val))
+-              val = ~0;
+-
+-      return val;
++      return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+ static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+ {
+-      if (wo->boot_regmap)
+-              regmap_write(wo->boot_regmap, reg, val);
+-      else
+-              writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
++      writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+ static struct sk_buff *
+@@ -343,6 +332,37 @@ mtk_wed_mcu_load_memory_regions(struct m
+                       return ret;
+       }
++      np = of_parse_phandle(wo->hw->node, "mediatek,wo-cpuboot", 0);
++      if (np) {
++              struct mtk_wed_wo_memory_region *boot_region;
++              struct resource res;
++
++              ret = of_address_to_resource(np, 0, &res);
++              of_node_put(np);
++
++              if (ret < 0)
++                      return ret;
++
++              boot_region = &region[MTK_WED_WO_REGION_BOOT];
++              boot_region->phy_addr = res.start;
++              boot_region->size = resource_size(&res);
++              boot_region->addr = devm_ioremap(wo->hw->dev, res.start,
++                                              resource_size(&res));
++              if (IS_ERR(boot_region->addr))
++                      return PTR_ERR(boot_region->addr);
++
++      } else {
++              index = of_property_match_string(wo->hw->node, "memory-region-names",
++                                               "wo-boot");
++              if (index >= 0) {
++                      /* For backward compatibility, we need to check if ILM
++                       * node is defined through reserved memory property.
++                       */
++                      ret = mtk_wed_get_reserved_memory_region(wo->hw, index,
++                                      &region[MTK_WED_WO_REGION_BOOT]);
++              }
++      }
++
+       np = of_parse_phandle(wo->hw->node, "mediatek,wo-ilm", 0);
+       if (np) {
+               struct mtk_wed_wo_memory_region *ilm_region;
+@@ -385,32 +405,11 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+       u32 val, boot_cr;
+       int ret, i;
++      wo->boot_regmap = NULL;
+       ret = mtk_wed_mcu_load_memory_regions(wo, mem_region);
+       if (ret)
+               return ret;
+-      wo->boot_regmap = syscon_regmap_lookup_by_phandle(wo->hw->node,
+-                                                        "mediatek,wo-cpuboot");
+-      if (IS_ERR(wo->boot_regmap)) {
+-              int index;
+-
+-              if (wo->boot_regmap != ERR_PTR(-ENODEV))
+-                      return PTR_ERR(wo->boot_regmap);
+-
+-              /* For backward compatibility, we need to check if cpu_boot
+-               * is defined through reserved memory property.
+-               */
+-              wo->boot_regmap = NULL;
+-              index = of_property_match_string(wo->hw->node, "memory-region-names",
+-                                               "wo-boot");
+-              if (index >= 0) {
+-                      ret = mtk_wed_get_reserved_memory_region(wo->hw, index,
+-                                      &mem_region[MTK_WED_WO_REGION_BOOT]);
+-                      if (ret)
+-                              return ret;
+-              }
+-      }
+-
+       /* set dummy cr */
+       wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
+               wo->hw->index + 1);
diff --git a/target/linux/mediatek/patches-6.1/957-net-ethernet-mtk_wed-remove-wo-pointer-in-wo_r32-wo_.patch b/target/linux/mediatek/patches-6.1/957-net-ethernet-mtk_wed-remove-wo-pointer-in-wo_r32-wo_.patch
new file mode 100644 (file)
index 0000000..c2ce071
--- /dev/null
@@ -0,0 +1,53 @@
+From 3c27bedae1e524fd39e6fb72180d44e6c4d213f2 Mon Sep 17 00:00:00 2001
+Message-ID: <3c27bedae1e524fd39e6fb72180d44e6c4d213f2.1698074158.git.lorenzo@kernel.org>
+In-Reply-To: <fde51ec7ebc462ff8f3c0ccc0638babb55b41d10.1698074158.git.lorenzo@kernel.org>
+References: <fde51ec7ebc462ff8f3c0ccc0638babb55b41d10.1698074158.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 23 Oct 2023 15:29:04 +0200
+Subject: [PATCH wireless-next 2/2] net: ethernet: mtk_wed: remove wo pointer
+ in wo_r32/wo_w32 signature
+
+wo pointer is no longer used in wo_r32 and wo_w32 routines so get rid of
+it.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -32,12 +32,12 @@ static struct mtk_wed_wo_memory_region m
+       },
+ };
+-static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
++static u32 wo_r32(u32 reg)
+ {
+       return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+-static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
++static void wo_w32(u32 reg, u32 val)
+ {
+       writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+@@ -439,14 +439,14 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+       /* set the start address */
+       boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
+                               : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
+-      wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
++      wo_w32(boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
+       /* wo firmware reset */
+-      wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
++      wo_w32(MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
+-      val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
++      val = wo_r32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
+       val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
+                            : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
+-      wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
++      wo_w32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+ out:
+       release_firmware(fw);
diff --git a/target/linux/mediatek/patches-6.1/961-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch b/target/linux/mediatek/patches-6.1/961-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch
new file mode 100644 (file)
index 0000000..85cff14
--- /dev/null
@@ -0,0 +1,601 @@
+From da921578a0edc5c5165a319b908abe8ed15eefbb Mon Sep 17 00:00:00 2001
+Message-ID: <da921578a0edc5c5165a319b908abe8ed15eefbb.1699223257.git.lorenzo@kernel.org>
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Thu, 2 Nov 2023 16:47:07 +0100
+Subject: [PATCH net-next 1/2] net: ethernet: mediatek: split tx and rx fields
+ in mtk_soc_data struct
+
+Split tx and rx fields in mtk_soc_data struct. This is a preliminary
+patch to roll back to QDMA for MT7986 SoC in order to fix a hw hang
+if the device receives a corrupted packet.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h |  29 +--
+ 2 files changed, 139 insertions(+), 100 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1238,7 +1238,7 @@ static int mtk_init_fq_dma(struct mtk_et
+               eth->scratch_ring = eth->sram_base;
+       else
+               eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+-                                                     cnt * soc->txrx.txd_size,
++                                                     cnt * soc->tx.desc_size,
+                                                      &eth->phy_scratch_ring,
+                                                      GFP_KERNEL);
+       if (unlikely(!eth->scratch_ring))
+@@ -1254,16 +1254,16 @@ static int mtk_init_fq_dma(struct mtk_et
+       if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+               return -ENOMEM;
+-      phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
++      phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+       for (i = 0; i < cnt; i++) {
+               struct mtk_tx_dma_v2 *txd;
+-              txd = eth->scratch_ring + i * soc->txrx.txd_size;
++              txd = eth->scratch_ring + i * soc->tx.desc_size;
+               txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+               if (i < cnt - 1)
+                       txd->txd2 = eth->phy_scratch_ring +
+-                                  (i + 1) * soc->txrx.txd_size;
++                                  (i + 1) * soc->tx.desc_size;
+               txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+               txd->txd4 = 0;
+@@ -1512,7 +1512,7 @@ static int mtk_tx_map(struct sk_buff *sk
+       if (itxd == ring->last_free)
+               return -ENOMEM;
+-      itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++      itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+       memset(itx_buf, 0, sizeof(*itx_buf));
+       txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+@@ -1553,7 +1553,7 @@ static int mtk_tx_map(struct sk_buff *sk
+                       memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+                       txd_info.size = min_t(unsigned int, frag_size,
+-                                            soc->txrx.dma_max_len);
++                                            soc->tx.dma_max_len);
+                       txd_info.qid = queue;
+                       txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+                                       !(frag_size - txd_info.size);
+@@ -1566,7 +1566,7 @@ static int mtk_tx_map(struct sk_buff *sk
+                       mtk_tx_set_dma_desc(dev, txd, &txd_info);
+                       tx_buf = mtk_desc_to_tx_buf(ring, txd,
+-                                                  soc->txrx.txd_size);
++                                                  soc->tx.desc_size);
+                       if (new_desc)
+                               memset(tx_buf, 0, sizeof(*tx_buf));
+                       tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+@@ -1609,7 +1609,7 @@ static int mtk_tx_map(struct sk_buff *sk
+       } else {
+               int next_idx;
+-              next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
++              next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
+                                        ring->dma_size);
+               mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+       }
+@@ -1618,7 +1618,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ err_dma:
+       do {
+-              tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++              tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+               /* unmap dma */
+               mtk_tx_unmap(eth, tx_buf, NULL, false);
+@@ -1643,7 +1643,7 @@ static int mtk_cal_txd_req(struct mtk_et
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       frag = &skb_shinfo(skb)->frags[i];
+                       nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+-                                             eth->soc->txrx.dma_max_len);
++                                             eth->soc->tx.dma_max_len);
+               }
+       } else {
+               nfrags += skb_shinfo(skb)->nr_frags;
+@@ -1784,7 +1784,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
+               ring = &eth->rx_ring[i];
+               idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+-              rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++              rxd = ring->dma + idx * eth->soc->rx.desc_size;
+               if (rxd->rxd2 & RX_DMA_DONE) {
+                       ring->calc_idx_update = true;
+                       return ring;
+@@ -1952,7 +1952,7 @@ static int mtk_xdp_submit_frame(struct m
+       }
+       htxd = txd;
+-      tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
++      tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
+       memset(tx_buf, 0, sizeof(*tx_buf));
+       htx_buf = tx_buf;
+@@ -1971,7 +1971,7 @@ static int mtk_xdp_submit_frame(struct m
+                               goto unmap;
+                       tx_buf = mtk_desc_to_tx_buf(ring, txd,
+-                                                  soc->txrx.txd_size);
++                                                  soc->tx.desc_size);
+                       memset(tx_buf, 0, sizeof(*tx_buf));
+                       n_desc++;
+               }
+@@ -2009,7 +2009,7 @@ static int mtk_xdp_submit_frame(struct m
+       } else {
+               int idx;
+-              idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
++              idx = txd_to_idx(ring, txd, soc->tx.desc_size);
+               mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+                       MT7628_TX_CTX_IDX0);
+       }
+@@ -2020,7 +2020,7 @@ static int mtk_xdp_submit_frame(struct m
+ unmap:
+       while (htxd != txd) {
+-              tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
++              tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
+               mtk_tx_unmap(eth, tx_buf, NULL, false);
+               htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+@@ -2151,7 +2151,7 @@ static int mtk_poll_rx(struct napi_struc
+                       goto rx_done;
+               idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+-              rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++              rxd = ring->dma + idx * eth->soc->rx.desc_size;
+               data = ring->data[idx];
+               if (!mtk_rx_get_desc(eth, &trxd, rxd))
+@@ -2286,7 +2286,7 @@ static int mtk_poll_rx(struct napi_struc
+                       rxdcsum = &trxd.rxd4;
+               }
+-              if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
++              if (*rxdcsum & eth->soc->rx.dma_l4_valid)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               else
+                       skb_checksum_none_assert(skb);
+@@ -2410,7 +2410,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+                       break;
+               tx_buf = mtk_desc_to_tx_buf(ring, desc,
+-                                          eth->soc->txrx.txd_size);
++                                          eth->soc->tx.desc_size);
+               if (!tx_buf->data)
+                       break;
+@@ -2461,7 +2461,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+               }
+               mtk_tx_unmap(eth, tx_buf, &bq, true);
+-              desc = ring->dma + cpu * eth->soc->txrx.txd_size;
++              desc = ring->dma + cpu * eth->soc->tx.desc_size;
+               ring->last_free = desc;
+               atomic_inc(&ring->free_count);
+@@ -2551,7 +2551,7 @@ static int mtk_napi_rx(struct napi_struc
+       do {
+               int rx_done;
+-              mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
++              mtk_w32(eth, eth->soc->rx.irq_done_mask,
+                       reg_map->pdma.irq_status);
+               rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+               rx_done_total += rx_done;
+@@ -2567,10 +2567,10 @@ static int mtk_napi_rx(struct napi_struc
+                       return budget;
+       } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+-               eth->soc->txrx.rx_irq_done_mask);
++               eth->soc->rx.irq_done_mask);
+       if (napi_complete_done(napi, rx_done_total))
+-              mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++              mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+       return rx_done_total;
+ }
+@@ -2579,7 +2579,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ {
+       const struct mtk_soc_data *soc = eth->soc;
+       struct mtk_tx_ring *ring = &eth->tx_ring;
+-      int i, sz = soc->txrx.txd_size;
++      int i, sz = soc->tx.desc_size;
+       struct mtk_tx_dma_v2 *txd;
+       int ring_size;
+       u32 ofs, val;
+@@ -2702,14 +2702,14 @@ static void mtk_tx_clean(struct mtk_eth
+       }
+       if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
+               dma_free_coherent(eth->dma_dev,
+-                                ring->dma_size * soc->txrx.txd_size,
++                                ring->dma_size * soc->tx.desc_size,
+                                 ring->dma, ring->phys);
+               ring->dma = NULL;
+       }
+       if (ring->dma_pdma) {
+               dma_free_coherent(eth->dma_dev,
+-                                ring->dma_size * soc->txrx.txd_size,
++                                ring->dma_size * soc->tx.desc_size,
+                                 ring->dma_pdma, ring->phys_pdma);
+               ring->dma_pdma = NULL;
+       }
+@@ -2764,15 +2764,15 @@ static int mtk_rx_alloc(struct mtk_eth *
+       if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
+           rx_flag != MTK_RX_FLAGS_NORMAL) {
+               ring->dma = dma_alloc_coherent(eth->dma_dev,
+-                                             rx_dma_size * eth->soc->txrx.rxd_size,
+-                                             &ring->phys, GFP_KERNEL);
++                              rx_dma_size * eth->soc->rx.desc_size,
++                              &ring->phys, GFP_KERNEL);
+       } else {
+               struct mtk_tx_ring *tx_ring = &eth->tx_ring;
+               ring->dma = tx_ring->dma + tx_ring_size *
+-                          eth->soc->txrx.txd_size * (ring_no + 1);
++                          eth->soc->tx.desc_size * (ring_no + 1);
+               ring->phys = tx_ring->phys + tx_ring_size *
+-                           eth->soc->txrx.txd_size * (ring_no + 1);
++                           eth->soc->tx.desc_size * (ring_no + 1);
+       }
+       if (!ring->dma)
+@@ -2783,7 +2783,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+               dma_addr_t dma_addr;
+               void *data;
+-              rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++              rxd = ring->dma + i * eth->soc->rx.desc_size;
+               if (ring->page_pool) {
+                       data = mtk_page_pool_get_buff(ring->page_pool,
+                                                     &dma_addr, GFP_KERNEL);
+@@ -2874,7 +2874,7 @@ static void mtk_rx_clean(struct mtk_eth
+                       if (!ring->data[i])
+                               continue;
+-                      rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++                      rxd = ring->dma + i * eth->soc->rx.desc_size;
+                       if (!rxd->rxd1)
+                               continue;
+@@ -2891,7 +2891,7 @@ static void mtk_rx_clean(struct mtk_eth
+       if (!in_sram && ring->dma) {
+               dma_free_coherent(eth->dma_dev,
+-                                ring->dma_size * eth->soc->txrx.rxd_size,
++                                ring->dma_size * eth->soc->rx.desc_size,
+                                 ring->dma, ring->phys);
+               ring->dma = NULL;
+       }
+@@ -3254,7 +3254,7 @@ static void mtk_dma_free(struct mtk_eth
+                       netdev_reset_queue(eth->netdev[i]);
+       if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+               dma_free_coherent(eth->dma_dev,
+-                                MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
++                                MTK_QDMA_RING_SIZE * soc->tx.desc_size,
+                                 eth->scratch_ring, eth->phy_scratch_ring);
+               eth->scratch_ring = NULL;
+               eth->phy_scratch_ring = 0;
+@@ -3304,7 +3304,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+       eth->rx_events++;
+       if (likely(napi_schedule_prep(&eth->rx_napi))) {
+-              mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++              mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+               __napi_schedule(&eth->rx_napi);
+       }
+@@ -3330,9 +3330,9 @@ static irqreturn_t mtk_handle_irq(int ir
+       const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+       if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+-          eth->soc->txrx.rx_irq_done_mask) {
++          eth->soc->rx.irq_done_mask) {
+               if (mtk_r32(eth, reg_map->pdma.irq_status) &
+-                  eth->soc->txrx.rx_irq_done_mask)
++                  eth->soc->rx.irq_done_mask)
+                       mtk_handle_irq_rx(irq, _eth);
+       }
+       if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+@@ -3350,10 +3350,10 @@ static void mtk_poll_controller(struct n
+       struct mtk_eth *eth = mac->hw;
+       mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+-      mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++      mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+       mtk_handle_irq_rx(eth->irq[2], dev);
+       mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+-      mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++      mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+ }
+ #endif
+@@ -3516,7 +3516,7 @@ static int mtk_open(struct net_device *d
+               napi_enable(&eth->tx_napi);
+               napi_enable(&eth->rx_napi);
+               mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+-              mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
++              mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
+               refcount_set(&eth->dma_refcnt, 1);
+       }
+       else
+@@ -3599,7 +3599,7 @@ static int mtk_stop(struct net_device *d
+       mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+       mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+-      mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++      mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+       napi_disable(&eth->tx_napi);
+       napi_disable(&eth->rx_napi);
+@@ -4075,9 +4075,9 @@ static int mtk_hw_init(struct mtk_eth *e
+       /* FE int grouping */
+       mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+-      mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
++      mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
+       mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+-      mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
++      mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
+       mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+       if (mtk_is_netsys_v3_or_greater(eth)) {
+@@ -5172,11 +5172,15 @@ static const struct mtk_soc_data mt2701_
+       .required_clks = MT7623_CLKS_BITMAP,
+       .required_pctl = true,
+       .version = 1,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5192,11 +5196,15 @@ static const struct mtk_soc_data mt7621_
+       .offload_version = 1,
+       .hash_offset = 2,
+       .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5214,11 +5222,15 @@ static const struct mtk_soc_data mt7622_
+       .hash_offset = 2,
+       .has_accounting = true,
+       .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5235,11 +5247,15 @@ static const struct mtk_soc_data mt7623_
+       .hash_offset = 2,
+       .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+       .disable_pll_modes = true,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5254,11 +5270,15 @@ static const struct mtk_soc_data mt7629_
+       .required_pctl = false,
+       .has_accounting = true,
+       .version = 1,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5276,11 +5296,15 @@ static const struct mtk_soc_data mt7981_
+       .hash_offset = 4,
+       .has_accounting = true,
+       .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma_v2),
+-              .rxd_size = sizeof(struct mtk_rx_dma_v2),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma_v2),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++              .dma_len_offset = 8,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma_v2),
++              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .dma_l4_valid = RX_DMA_L4_VALID_V2,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+               .dma_len_offset = 8,
+       },
+@@ -5298,11 +5322,15 @@ static const struct mtk_soc_data mt7986_
+       .hash_offset = 4,
+       .has_accounting = true,
+       .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma_v2),
+-              .rxd_size = sizeof(struct mtk_rx_dma_v2),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma_v2),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++              .dma_len_offset = 8,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma_v2),
++              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .dma_l4_valid = RX_DMA_L4_VALID_V2,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+               .dma_len_offset = 8,
+       },
+@@ -5320,11 +5348,15 @@ static const struct mtk_soc_data mt7988_
+       .hash_offset = 4,
+       .has_accounting = true,
+       .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma_v2),
+-              .rxd_size = sizeof(struct mtk_rx_dma_v2),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma_v2),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++              .dma_len_offset = 8,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma_v2),
++              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .dma_l4_valid = RX_DMA_L4_VALID_V2,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+               .dma_len_offset = 8,
+       },
+@@ -5337,11 +5369,15 @@ static const struct mtk_soc_data rt5350_
+       .required_clks = MT7628_CLKS_BITMAP,
+       .required_pctl = false,
+       .version = 1,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -326,8 +326,8 @@
+ /* QDMA descriptor txd3 */
+ #define TX_DMA_OWNER_CPU      BIT(31)
+ #define TX_DMA_LS0            BIT(30)
+-#define TX_DMA_PLEN0(x)               (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define TX_DMA_PLEN1(x)               ((x) & eth->soc->txrx.dma_max_len)
++#define TX_DMA_PLEN0(x)               (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
++#define TX_DMA_PLEN1(x)               ((x) & eth->soc->tx.dma_max_len)
+ #define TX_DMA_SWC            BIT(14)
+ #define TX_DMA_PQID           GENMASK(3, 0)
+ #define TX_DMA_ADDR64_MASK    GENMASK(3, 0)
+@@ -347,8 +347,8 @@
+ /* QDMA descriptor rxd2 */
+ #define RX_DMA_DONE           BIT(31)
+ #define RX_DMA_LSO            BIT(30)
+-#define RX_DMA_PREP_PLEN0(x)  (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define RX_DMA_GET_PLEN0(x)   (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
++#define RX_DMA_PREP_PLEN0(x)  (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
++#define RX_DMA_GET_PLEN0(x)   (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
+ #define RX_DMA_VTAG           BIT(15)
+ #define RX_DMA_ADDR64_MASK    GENMASK(3, 0)
+ #if IS_ENABLED(CONFIG_64BIT)
+@@ -1279,10 +1279,9 @@ struct mtk_reg_map {
+  * @foe_entry_size            Foe table entry size.
+  * @has_accounting            Bool indicating support for accounting of
+  *                            offloaded flows.
+- * @txd_size                  Tx DMA descriptor size.
+- * @rxd_size                  Rx DMA descriptor size.
+- * @rx_irq_done_mask          Rx irq done register mask.
+- * @rx_dma_l4_valid           Rx DMA valid register mask.
++ * @desc_size                 Tx/Rx DMA descriptor size.
++ * @irq_done_mask             Rx irq done register mask.
++ * @dma_l4_valid              Rx DMA valid register mask.
+  * @dma_max_len                       Max DMA tx/rx buffer length.
+  * @dma_len_offset            Tx/Rx DMA length field offset.
+  */
+@@ -1300,13 +1299,17 @@ struct mtk_soc_data {
+       bool            has_accounting;
+       bool            disable_pll_modes;
+       struct {
+-              u32     txd_size;
+-              u32     rxd_size;
+-              u32     rx_irq_done_mask;
+-              u32     rx_dma_l4_valid;
++              u32     desc_size;
+               u32     dma_max_len;
+               u32     dma_len_offset;
+-      } txrx;
++      } tx;
++      struct {
++              u32     desc_size;
++              u32     irq_done_mask;
++              u32     dma_l4_valid;
++              u32     dma_max_len;
++              u32     dma_len_offset;
++      } rx;
+ };
+ #define MTK_DMA_MONITOR_TIMEOUT               msecs_to_jiffies(1000)
diff --git a/target/linux/mediatek/patches-6.1/962-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch b/target/linux/mediatek/patches-6.1/962-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch
new file mode 100644 (file)
index 0000000..6187499
--- /dev/null
@@ -0,0 +1,127 @@
+From 4c536f5fb75551035a55be2d1074e42716de9bd1 Mon Sep 17 00:00:00 2001
+Message-ID: <4c536f5fb75551035a55be2d1074e42716de9bd1.1699223258.git.lorenzo@kernel.org>
+In-Reply-To: <da921578a0edc5c5165a319b908abe8ed15eefbb.1699223257.git.lorenzo@kernel.org>
+References: <da921578a0edc5c5165a319b908abe8ed15eefbb.1699223257.git.lorenzo@kernel.org>
+From: Daniel Golle <daniel@makrotopia.org>
+Date: Tue, 10 Oct 2023 21:06:43 +0200
+Subject: [PATCH net-next 2/2] net: ethernet: mediatek: use QDMA instead of
+ ADMAv2 on MT7981 and MT7986
+
+ADMA is plagued by RX hangs which can't easily detected and happen upon
+receival of a corrupted package.
+Use QDMA just like on netsys v1 which is also still present and usable, and
+doesn't suffer from that problem.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 46 ++++++++++-----------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -108,16 +108,16 @@ static const struct mtk_reg_map mt7986_r
+       .tx_irq_mask            = 0x461c,
+       .tx_irq_status          = 0x4618,
+       .pdma = {
+-              .rx_ptr         = 0x6100,
+-              .rx_cnt_cfg     = 0x6104,
+-              .pcrx_ptr       = 0x6108,
+-              .glo_cfg        = 0x6204,
+-              .rst_idx        = 0x6208,
+-              .delay_irq      = 0x620c,
+-              .irq_status     = 0x6220,
+-              .irq_mask       = 0x6228,
+-              .adma_rx_dbg0   = 0x6238,
+-              .int_grp        = 0x6250,
++              .rx_ptr         = 0x4100,
++              .rx_cnt_cfg     = 0x4104,
++              .pcrx_ptr       = 0x4108,
++              .glo_cfg        = 0x4204,
++              .rst_idx        = 0x4208,
++              .delay_irq      = 0x420c,
++              .irq_status     = 0x4220,
++              .irq_mask       = 0x4228,
++              .adma_rx_dbg0   = 0x4238,
++              .int_grp        = 0x4250,
+       },
+       .qdma = {
+               .qtx_cfg        = 0x4400,
+@@ -1206,7 +1206,7 @@ static bool mtk_rx_get_desc(struct mtk_e
+       rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+       rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+       rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+-      if (mtk_is_netsys_v2_or_greater(eth)) {
++      if (mtk_is_netsys_v3_or_greater(eth)) {
+               rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+               rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+       }
+@@ -2158,7 +2158,7 @@ static int mtk_poll_rx(struct napi_struc
+                       break;
+               /* find out which mac the packet come from. values start at 1 */
+-              if (mtk_is_netsys_v2_or_greater(eth)) {
++              if (mtk_is_netsys_v3_or_greater(eth)) {
+                       u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+                       switch (val) {
+@@ -2270,7 +2270,7 @@ static int mtk_poll_rx(struct napi_struc
+               skb->dev = netdev;
+               bytes += skb->len;
+-              if (mtk_is_netsys_v2_or_greater(eth)) {
++              if (mtk_is_netsys_v3_or_greater(eth)) {
+                       reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+                       hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+                       if (hash != MTK_RXD5_FOE_ENTRY)
+@@ -2820,7 +2820,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+               rxd->rxd3 = 0;
+               rxd->rxd4 = 0;
+-              if (mtk_is_netsys_v2_or_greater(eth)) {
++              if (mtk_is_netsys_v3_or_greater(eth)) {
+                       rxd->rxd5 = 0;
+                       rxd->rxd6 = 0;
+                       rxd->rxd7 = 0;
+@@ -4021,7 +4021,7 @@ static int mtk_hw_init(struct mtk_eth *e
+       else
+               mtk_hw_reset(eth);
+-      if (mtk_is_netsys_v2_or_greater(eth)) {
++      if (mtk_is_netsys_v3_or_greater(eth)) {
+               /* Set FE to PDMAv2 if necessary */
+               val = mtk_r32(eth, MTK_FE_GLO_MISC);
+               mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
+@@ -5302,11 +5302,11 @@ static const struct mtk_soc_data mt7981_
+               .dma_len_offset = 8,
+       },
+       .rx = {
+-              .desc_size = sizeof(struct mtk_rx_dma_v2),
+-              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID_V2,
+-              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+-              .dma_len_offset = 8,
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
+       },
+ };
+@@ -5328,11 +5328,11 @@ static const struct mtk_soc_data mt7986_
+               .dma_len_offset = 8,
+       },
+       .rx = {
+-              .desc_size = sizeof(struct mtk_rx_dma_v2),
+-              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID_V2,
+-              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+-              .dma_len_offset = 8,
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
+       },
+ };