sunxi: backport the stmmac driver from kernel 4.13
authorHauke Mehrtens <hauke@hauke-m.de>
Thu, 21 Sep 2017 20:10:08 +0000 (22:10 +0200)
committerHauke Mehrtens <hauke@hauke-m.de>
Sun, 1 Oct 2017 11:00:16 +0000 (13:00 +0200)
This adds support for the GMAC which is use in the A64 and other
Allwinner chips by backporting the changes from the kernel versions
4.13.

Some commits are not backported which are adding support for newly
introduced APIs which are not available in kernel 4.9.

Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
target/linux/sunxi/config-4.9
target/linux/sunxi/cortexa53/config-default
target/linux/sunxi/cortexa7/config-default
target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch [new file with mode: 0644]
target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch [new file with mode: 0644]
target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch [new file with mode: 0644]
target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch [new file with mode: 0644]

index 45677f9a83ea42e5d27d692ba0a8dca80e1b1a0b..e961c8ad821de921efe382baae6e534d4f79d587 100644 (file)
@@ -146,7 +146,9 @@ CONFIG_DMA_VIRTUAL_CHANNELS=y
 CONFIG_DNOTIFY=y
 CONFIG_DTC=y
 CONFIG_DUMMY_CONSOLE=y
+# CONFIG_DWMAC_DWC_QOS_ETH is not set
 CONFIG_DWMAC_GENERIC=y
+# CONFIG_DWMAC_SUN8I is not set
 CONFIG_DWMAC_SUNXI=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_EDAC_ATOMIC_SCRUB=y
index 58ac214695330a9d2824816129804f25b9501708..527a6f697975475ae394481a5c056c731f10e40c 100644 (file)
@@ -43,6 +43,7 @@ CONFIG_COMMON_CLK_XGENE=y
 # CONFIG_COMPAT is not set
 # CONFIG_DEBUG_ALIGN_RODATA is not set
 CONFIG_DEBUG_RODATA=y
+CONFIG_DWMAC_SUN8I=y
 CONFIG_FRAME_POINTER=y
 # CONFIG_FSL_ERRATUM_A008585 is not set
 CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
index aabfa5ab1024e45cf06151de5f31764c12c9ca67..fe974c1033345713cb3b132d5cd730baa52e70a4 100644 (file)
@@ -1,3 +1,4 @@
+CONFIG_DWMAC_SUN8I=y
 # CONFIG_MACH_SUN4I is not set
 # CONFIG_MACH_SUN5I is not set
 # CONFIG_PINCTRL_GR8 is not set
diff --git a/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch b/target/linux/sunxi/patches-4.9/0050-stmmac-form-4-10.patch
new file mode 100644 (file)
index 0000000..b88c19e
--- /dev/null
@@ -0,0 +1,3497 @@
+--- a/Documentation/devicetree/bindings/net/stmmac.txt
++++ b/Documentation/devicetree/bindings/net/stmmac.txt
+@@ -1,7 +1,7 @@
+ * STMicroelectronics 10/100/1000 Ethernet driver (GMAC)
+ Required properties:
+-- compatible: Should be "snps,dwmac-<ip_version>" "snps,dwmac"
++- compatible: Should be "snps,dwmac-<ip_version>", "snps,dwmac"
+       For backwards compatibility: "st,spear600-gmac" is also supported.
+ - reg: Address and length of the register set for the device
+ - interrupt-parent: Should be the phandle for the interrupt controller
+@@ -34,7 +34,13 @@ Optional properties:
+   platforms.
+ - tx-fifo-depth: See ethernet.txt file in the same directory
+ - rx-fifo-depth: See ethernet.txt file in the same directory
+-- snps,pbl            Programmable Burst Length
++- snps,pbl            Programmable Burst Length (tx and rx)
++- snps,txpbl          Tx Programmable Burst Length. Only for GMAC and newer.
++                      If set, DMA tx will use this value rather than snps,pbl.
++- snps,rxpbl          Rx Programmable Burst Length. Only for GMAC and newer.
++                      If set, DMA rx will use this value rather than snps,pbl.
++- snps,no-pbl-x8      Don't multiply the pbl/txpbl/rxpbl values by 8.
++                      For core rev < 3.50, don't multiply the values by 4.
+ - snps,aal            Address-Aligned Beats
+ - snps,fixed-burst    Program the DMA to use the fixed burst mode
+ - snps,mixed-burst    Program the DMA to use the mixed burst mode
+@@ -50,6 +56,8 @@ Optional properties:
+ - snps,ps-speed: port selection speed that can be passed to the core when
+                PCS is supported. For example, this is used in case of SGMII
+                and MAC2MAC connection.
++- snps,tso: this enables the TSO feature otherwise it will be managed by
++               MAC HW capability register. Only for GMAC4 and newer.
+ - AXI BUS Mode parameters: below the list of all the parameters to program the
+                          AXI register inside the DMA module:
+       - snps,lpi_en: enable Low Power Interface
+@@ -62,8 +70,6 @@ Optional properties:
+       - snps,fb: fixed-burst
+       - snps,mb: mixed-burst
+       - snps,rb: rebuild INCRx Burst
+-      - snps,tso: this enables the TSO feature otherwise it will be managed by
+-          MAC HW capability register.
+ - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
+ Examples:
+--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+@@ -69,6 +69,17 @@ config DWMAC_MESON
+         the stmmac device driver. This driver is used for Meson6,
+         Meson8, Meson8b and GXBB SoCs.
++config DWMAC_OXNAS
++      tristate "Oxford Semiconductor OXNAS dwmac support"
++      default ARCH_OXNAS
++      depends on OF && COMMON_CLK && (ARCH_OXNAS || COMPILE_TEST)
++      select MFD_SYSCON
++      help
++        Support for Ethernet controller on Oxford Semiconductor OXNAS SoCs.
++
++        This selects the Oxford Semiconductor OXNASSoC glue layer support for
++        the stmmac device driver. This driver is used for OX820.
++
+ config DWMAC_ROCKCHIP
+       tristate "Rockchip dwmac support"
+       default ARCH_ROCKCHIP
+--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
+@@ -10,6 +10,7 @@ obj-$(CONFIG_STMMAC_PLATFORM)        += stmmac-
+ obj-$(CONFIG_DWMAC_IPQ806X)   += dwmac-ipq806x.o
+ obj-$(CONFIG_DWMAC_LPC18XX)   += dwmac-lpc18xx.o
+ obj-$(CONFIG_DWMAC_MESON)     += dwmac-meson.o dwmac-meson8b.o
++obj-$(CONFIG_DWMAC_OXNAS)     += dwmac-oxnas.o
+ obj-$(CONFIG_DWMAC_ROCKCHIP)  += dwmac-rk.o
+ obj-$(CONFIG_DWMAC_SOCFPGA)   += dwmac-altr-socfpga.o
+ obj-$(CONFIG_DWMAC_STI)               += dwmac-sti.o
+--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+@@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, str
+       unsigned int entry = priv->cur_tx;
+       struct dma_desc *desc = priv->dma_tx + entry;
+       unsigned int nopaged_len = skb_headlen(skb);
+-      unsigned int bmax;
++      unsigned int bmax, des2;
+       unsigned int i = 1, len;
+       if (priv->plat->enh_desc)
+@@ -44,11 +44,12 @@ static int stmmac_jumbo_frm(void *p, str
+       len = nopaged_len - bmax;
+-      desc->des2 = dma_map_single(priv->device, skb->data,
+-                                  bmax, DMA_TO_DEVICE);
+-      if (dma_mapping_error(priv->device, desc->des2))
++      des2 = dma_map_single(priv->device, skb->data,
++                            bmax, DMA_TO_DEVICE);
++      desc->des2 = cpu_to_le32(des2);
++      if (dma_mapping_error(priv->device, des2))
+               return -1;
+-      priv->tx_skbuff_dma[entry].buf = desc->des2;
++      priv->tx_skbuff_dma[entry].buf = des2;
+       priv->tx_skbuff_dma[entry].len = bmax;
+       /* do not close the descriptor and do not set own bit */
+       priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
+@@ -60,12 +61,13 @@ static int stmmac_jumbo_frm(void *p, str
+               desc = priv->dma_tx + entry;
+               if (len > bmax) {
+-                      desc->des2 = dma_map_single(priv->device,
+-                                                  (skb->data + bmax * i),
+-                                                  bmax, DMA_TO_DEVICE);
+-                      if (dma_mapping_error(priv->device, desc->des2))
++                      des2 = dma_map_single(priv->device,
++                                            (skb->data + bmax * i),
++                                            bmax, DMA_TO_DEVICE);
++                      desc->des2 = cpu_to_le32(des2);
++                      if (dma_mapping_error(priv->device, des2))
+                               return -1;
+-                      priv->tx_skbuff_dma[entry].buf = desc->des2;
++                      priv->tx_skbuff_dma[entry].buf = des2;
+                       priv->tx_skbuff_dma[entry].len = bmax;
+                       priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
+                                                       STMMAC_CHAIN_MODE, 1,
+@@ -73,12 +75,13 @@ static int stmmac_jumbo_frm(void *p, str
+                       len -= bmax;
+                       i++;
+               } else {
+-                      desc->des2 = dma_map_single(priv->device,
+-                                                  (skb->data + bmax * i), len,
+-                                                  DMA_TO_DEVICE);
+-                      if (dma_mapping_error(priv->device, desc->des2))
++                      des2 = dma_map_single(priv->device,
++                                            (skb->data + bmax * i), len,
++                                            DMA_TO_DEVICE);
++                      desc->des2 = cpu_to_le32(des2);
++                      if (dma_mapping_error(priv->device, des2))
+                               return -1;
+-                      priv->tx_skbuff_dma[entry].buf = desc->des2;
++                      priv->tx_skbuff_dma[entry].buf = des2;
+                       priv->tx_skbuff_dma[entry].len = len;
+                       /* last descriptor can be set now */
+                       priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+@@ -119,19 +122,19 @@ static void stmmac_init_dma_chain(void *
+               struct dma_extended_desc *p = (struct dma_extended_desc *)des;
+               for (i = 0; i < (size - 1); i++) {
+                       dma_phy += sizeof(struct dma_extended_desc);
+-                      p->basic.des3 = (unsigned int)dma_phy;
++                      p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
+                       p++;
+               }
+-              p->basic.des3 = (unsigned int)phy_addr;
++              p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
+       } else {
+               struct dma_desc *p = (struct dma_desc *)des;
+               for (i = 0; i < (size - 1); i++) {
+                       dma_phy += sizeof(struct dma_desc);
+-                      p->des3 = (unsigned int)dma_phy;
++                      p->des3 = cpu_to_le32((unsigned int)dma_phy);
+                       p++;
+               }
+-              p->des3 = (unsigned int)phy_addr;
++              p->des3 = cpu_to_le32((unsigned int)phy_addr);
+       }
+ }
+@@ -144,10 +147,10 @@ static void stmmac_refill_desc3(void *pr
+                * 1588-2002 time stamping is enabled, hence reinitialize it
+                * to keep explicit chaining in the descriptor.
+                */
+-              p->des3 = (unsigned int)(priv->dma_rx_phy +
+-                                       (((priv->dirty_rx) + 1) %
+-                                        DMA_RX_SIZE) *
+-                                       sizeof(struct dma_desc));
++              p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
++                                    (((priv->dirty_rx) + 1) %
++                                     DMA_RX_SIZE) *
++                                    sizeof(struct dma_desc)));
+ }
+ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
+@@ -161,9 +164,9 @@ static void stmmac_clean_desc3(void *pri
+                * 1588-2002 time stamping is enabled, hence reinitialize it
+                * to keep explicit chaining in the descriptor.
+                */
+-              p->des3 = (unsigned int)((priv->dma_tx_phy +
+-                                        ((priv->dirty_tx + 1) % DMA_TX_SIZE))
+-                                        * sizeof(struct dma_desc));
++              p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
++                                    ((priv->dirty_tx + 1) % DMA_TX_SIZE))
++                                    * sizeof(struct dma_desc)));
+ }
+ const struct stmmac_mode_ops chain_mode_ops = {
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -44,6 +44,7 @@
+ #define       DWMAC_CORE_4_00 0x40
+ #define STMMAC_CHAN0  0       /* Always supported and default for all chips */
++/* These need to be power of two, and >= 4 */
+ #define DMA_TX_SIZE 512
+ #define DMA_RX_SIZE 512
+ #define STMMAC_GET_ENTRY(x, size)     ((x + 1) & (size - 1))
+@@ -411,8 +412,8 @@ extern const struct stmmac_desc_ops ndes
+ struct stmmac_dma_ops {
+       /* DMA core initialization */
+       int (*reset)(void __iomem *ioaddr);
+-      void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb,
+-                   int aal, u32 dma_tx, u32 dma_rx, int atds);
++      void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
++                   u32 dma_tx, u32 dma_rx, int atds);
+       /* Configure the AXI Bus Mode Register */
+       void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
+       /* Dump DMA registers */
+@@ -506,6 +507,12 @@ struct mac_link {
+ struct mii_regs {
+       unsigned int addr;      /* MII Address */
+       unsigned int data;      /* MII Data */
++      unsigned int addr_shift;        /* MII address shift */
++      unsigned int reg_shift;         /* MII reg shift */
++      unsigned int addr_mask;         /* MII address mask */
++      unsigned int reg_mask;          /* MII reg mask */
++      unsigned int clk_csr_shift;
++      unsigned int clk_csr_mask;
+ };
+ /* Helpers to manage the descriptors for chain and ring modes */
+--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
+@@ -87,7 +87,7 @@
+ #define       TDES0_ERROR_SUMMARY             BIT(15)
+ #define       TDES0_IP_HEADER_ERROR           BIT(16)
+ #define       TDES0_TIME_STAMP_STATUS         BIT(17)
+-#define       TDES0_OWN                       BIT(31)
++#define       TDES0_OWN                       ((u32)BIT(31))  /* silence sparse */
+ /* TDES1 */
+ #define       TDES1_BUFFER1_SIZE_MASK         GENMASK(10, 0)
+ #define       TDES1_BUFFER2_SIZE_MASK         GENMASK(21, 11)
+@@ -130,7 +130,7 @@
+ #define       ETDES0_FIRST_SEGMENT            BIT(28)
+ #define       ETDES0_LAST_SEGMENT             BIT(29)
+ #define       ETDES0_INTERRUPT                BIT(30)
+-#define       ETDES0_OWN                      BIT(31)
++#define       ETDES0_OWN                      ((u32)BIT(31))  /* silence sparse */
+ /* TDES1 */
+ #define       ETDES1_BUFFER1_SIZE_MASK        GENMASK(12, 0)
+ #define       ETDES1_BUFFER2_SIZE_MASK        GENMASK(28, 16)
+@@ -170,19 +170,19 @@
+ /* Basic descriptor structure for normal and alternate descriptors */
+ struct dma_desc {
+-      unsigned int des0;
+-      unsigned int des1;
+-      unsigned int des2;
+-      unsigned int des3;
++      __le32 des0;
++      __le32 des1;
++      __le32 des2;
++      __le32 des3;
+ };
+ /* Extended descriptor structure (e.g. >= databook 3.50a) */
+ struct dma_extended_desc {
+       struct dma_desc basic;  /* Basic descriptors */
+-      unsigned int des4;      /* Extended Status */
+-      unsigned int des5;      /* Reserved */
+-      unsigned int des6;      /* Tx/Rx Timestamp Low */
+-      unsigned int des7;      /* Tx/Rx Timestamp High */
++      __le32 des4;    /* Extended Status */
++      __le32 des5;    /* Reserved */
++      __le32 des6;    /* Tx/Rx Timestamp Low */
++      __le32 des7;    /* Tx/Rx Timestamp High */
+ };
+ /* Transmit checksum insertion control */
+--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
++++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+@@ -35,47 +35,50 @@
+ /* Enhanced descriptors */
+ static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+ {
+-      p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT)
+-                 & ERDES1_BUFFER2_SIZE_MASK;
++      p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
++                      << ERDES1_BUFFER2_SIZE_SHIFT)
++                 & ERDES1_BUFFER2_SIZE_MASK);
+       if (end)
+-              p->des1 |= ERDES1_END_RING;
++              p->des1 |= cpu_to_le32(ERDES1_END_RING);
+ }
+ static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
+ {
+       if (end)
+-              p->des0 |= ETDES0_END_RING;
++              p->des0 |= cpu_to_le32(ETDES0_END_RING);
+       else
+-              p->des0 &= ~ETDES0_END_RING;
++              p->des0 &= cpu_to_le32(~ETDES0_END_RING);
+ }
+ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
+ {
+       if (unlikely(len > BUF_SIZE_4KiB)) {
+-              p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT)
++              p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
++                                      << ETDES1_BUFFER2_SIZE_SHIFT)
+                           & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
+-                          & ETDES1_BUFFER1_SIZE_MASK);
++                          & ETDES1_BUFFER1_SIZE_MASK));
+       } else
+-              p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
++              p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
+ }
+ /* Normal descriptors */
+ static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+ {
+-      p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT)
+-                  & RDES1_BUFFER2_SIZE_MASK;
++      p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
++                              << RDES1_BUFFER2_SIZE_SHIFT)
++                  & RDES1_BUFFER2_SIZE_MASK);
+       if (end)
+-              p->des1 |= RDES1_END_RING;
++              p->des1 |= cpu_to_le32(RDES1_END_RING);
+ }
+ static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
+ {
+       if (end)
+-              p->des1 |= TDES1_END_RING;
++              p->des1 |= cpu_to_le32(TDES1_END_RING);
+       else
+-              p->des1 &= ~TDES1_END_RING;
++              p->des1 &= cpu_to_le32(~TDES1_END_RING);
+ }
+ static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
+@@ -83,10 +86,11 @@ static inline void norm_set_tx_desc_len_
+       if (unlikely(len > BUF_SIZE_2KiB)) {
+               unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
+                                       & TDES1_BUFFER1_SIZE_MASK;
+-              p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT)
+-                          & TDES1_BUFFER2_SIZE_MASK) | buffer1);
++              p->des1 |= cpu_to_le32((((len - buffer1)
++                                      << TDES1_BUFFER2_SIZE_SHIFT)
++                              & TDES1_BUFFER2_SIZE_MASK) | buffer1);
+       } else
+-              p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK);
++              p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
+ }
+ /* Specific functions used for Chain mode */
+@@ -94,32 +98,32 @@ static inline void norm_set_tx_desc_len_
+ /* Enhanced descriptors */
+ static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
+ {
+-      p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED;
++      p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED);
+ }
+ static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
+ {
+-      p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED;
++      p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED);
+ }
+ static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
+ {
+-      p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
++      p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK);
+ }
+ /* Normal descriptors */
+ static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
+ {
+-      p->des1 |= RDES1_SECOND_ADDRESS_CHAINED;
++      p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED);
+ }
+ static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
+ {
+-      p->des1 |= TDES1_SECOND_ADDRESS_CHAINED;
++      p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED);
+ }
+ static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
+ {
+-      p->des1 |= len & TDES1_BUFFER1_SIZE_MASK;
++      p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK);
+ }
+ #endif /* __DESC_COM_H__ */
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+@@ -71,9 +71,12 @@ err_remove_config_dt:
+ static const struct of_device_id dwmac_generic_match[] = {
+       { .compatible = "st,spear600-gmac"},
++      { .compatible = "snps,dwmac-3.50a"},
+       { .compatible = "snps,dwmac-3.610"},
+       { .compatible = "snps,dwmac-3.70a"},
+       { .compatible = "snps,dwmac-3.710"},
++      { .compatible = "snps,dwmac-4.00"},
++      { .compatible = "snps,dwmac-4.10a"},
+       { .compatible = "snps,dwmac"},
+       { }
+ };
+--- /dev/null
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+@@ -0,0 +1,194 @@
++/*
++ * Oxford Semiconductor OXNAS DWMAC glue layer
++ *
++ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
++ * Copyright (C) 2014 Daniel Golle <daniel@makrotopia.org>
++ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
++ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/device.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/regmap.h>
++#include <linux/mfd/syscon.h>
++#include <linux/stmmac.h>
++
++#include "stmmac_platform.h"
++
++/* System Control regmap offsets */
++#define OXNAS_DWMAC_CTRL_REGOFFSET    0x78
++#define OXNAS_DWMAC_DELAY_REGOFFSET   0x100
++
++/* Control Register */
++#define DWMAC_CKEN_RX_IN        14
++#define DWMAC_CKEN_RXN_OUT      13
++#define DWMAC_CKEN_RX_OUT       12
++#define DWMAC_CKEN_TX_IN        10
++#define DWMAC_CKEN_TXN_OUT      9
++#define DWMAC_CKEN_TX_OUT       8
++#define DWMAC_RX_SOURCE         7
++#define DWMAC_TX_SOURCE         6
++#define DWMAC_LOW_TX_SOURCE     4
++#define DWMAC_AUTO_TX_SOURCE    3
++#define DWMAC_RGMII             2
++#define DWMAC_SIMPLE_MUX        1
++#define DWMAC_CKEN_GTX          0
++
++/* Delay register */
++#define DWMAC_TX_VARDELAY_SHIFT               0
++#define DWMAC_TXN_VARDELAY_SHIFT      8
++#define DWMAC_RX_VARDELAY_SHIFT               16
++#define DWMAC_RXN_VARDELAY_SHIFT      24
++#define DWMAC_TX_VARDELAY(d)          ((d) << DWMAC_TX_VARDELAY_SHIFT)
++#define DWMAC_TXN_VARDELAY(d)         ((d) << DWMAC_TXN_VARDELAY_SHIFT)
++#define DWMAC_RX_VARDELAY(d)          ((d) << DWMAC_RX_VARDELAY_SHIFT)
++#define DWMAC_RXN_VARDELAY(d)         ((d) << DWMAC_RXN_VARDELAY_SHIFT)
++
++struct oxnas_dwmac {
++      struct device   *dev;
++      struct clk      *clk;
++      struct regmap   *regmap;
++};
++
++static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
++{
++      struct oxnas_dwmac *dwmac = priv;
++      unsigned int value;
++      int ret;
++
++      /* Reset HW here before changing the glue configuration */
++      ret = device_reset(dwmac->dev);
++      if (ret)
++              return ret;
++
++      ret = clk_prepare_enable(dwmac->clk);
++      if (ret)
++              return ret;
++
++      ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
++      if (ret < 0) {
++              clk_disable_unprepare(dwmac->clk);
++              return ret;
++      }
++
++      /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
++      value |= BIT(DWMAC_CKEN_GTX)            |
++               /* Use simple mux for 25/125 Mhz clock switching */
++               BIT(DWMAC_SIMPLE_MUX)          |
++               /* set auto switch tx clock source */
++               BIT(DWMAC_AUTO_TX_SOURCE)      |
++               /* enable tx & rx vardelay */
++               BIT(DWMAC_CKEN_TX_OUT)         |
++               BIT(DWMAC_CKEN_TXN_OUT)        |
++               BIT(DWMAC_CKEN_TX_IN)          |
++               BIT(DWMAC_CKEN_RX_OUT)         |
++               BIT(DWMAC_CKEN_RXN_OUT)        |
++               BIT(DWMAC_CKEN_RX_IN);
++      regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
++
++      /* set tx & rx vardelay */
++      value = DWMAC_TX_VARDELAY(4)    |
++              DWMAC_TXN_VARDELAY(2)   |
++              DWMAC_RX_VARDELAY(10)   |
++              DWMAC_RXN_VARDELAY(8);
++      regmap_write(dwmac->regmap, OXNAS_DWMAC_DELAY_REGOFFSET, value);
++
++      return 0;
++}
++
++static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
++{
++      struct oxnas_dwmac *dwmac = priv;
++
++      clk_disable_unprepare(dwmac->clk);
++}
++
++static int oxnas_dwmac_probe(struct platform_device *pdev)
++{
++      struct plat_stmmacenet_data *plat_dat;
++      struct stmmac_resources stmmac_res;
++      struct oxnas_dwmac *dwmac;
++      int ret;
++
++      ret = stmmac_get_platform_resources(pdev, &stmmac_res);
++      if (ret)
++              return ret;
++
++      plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
++      if (IS_ERR(plat_dat))
++              return PTR_ERR(plat_dat);
++
++      dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
++      if (!dwmac) {
++              ret = -ENOMEM;
++              goto err_remove_config_dt;
++      }
++
++      dwmac->dev = &pdev->dev;
++      plat_dat->bsp_priv = dwmac;
++      plat_dat->init = oxnas_dwmac_init;
++      plat_dat->exit = oxnas_dwmac_exit;
++
++      dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
++                                                      "oxsemi,sys-ctrl");
++      if (IS_ERR(dwmac->regmap)) {
++              dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
++              ret = PTR_ERR(dwmac->regmap);
++              goto err_remove_config_dt;
++      }
++
++      dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
++      if (IS_ERR(dwmac->clk)) {
++              ret = PTR_ERR(dwmac->clk);
++              goto err_remove_config_dt;
++      }
++
++      ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
++      if (ret)
++              goto err_remove_config_dt;
++
++      ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
++      if (ret)
++              goto err_dwmac_exit;
++
++
++      return 0;
++
++err_dwmac_exit:
++      oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
++err_remove_config_dt:
++      stmmac_remove_config_dt(pdev, plat_dat);
++
++      return ret;
++}
++
++static const struct of_device_id oxnas_dwmac_match[] = {
++      { .compatible = "oxsemi,ox820-dwmac" },
++      { }
++};
++MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
++
++static struct platform_driver oxnas_dwmac_driver = {
++      .probe  = oxnas_dwmac_probe,
++      .remove = stmmac_pltfr_remove,
++      .driver = {
++              .name           = "oxnas-dwmac",
++              .pm             = &stmmac_pltfr_pm_ops,
++              .of_match_table = oxnas_dwmac_match,
++      },
++};
++module_platform_driver(oxnas_dwmac_driver);
++
++MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
++MODULE_DESCRIPTION("Oxford Semiconductor OXNAS DWMAC glue layer");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -864,6 +864,10 @@ static int rk_gmac_powerup(struct rk_pri
+       int ret;
+       struct device *dev = &bsp_priv->pdev->dev;
++      ret = gmac_clk_enable(bsp_priv, true);
++      if (ret)
++              return ret;
++
+       /*rmii or rgmii*/
+       if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
+               dev_info(dev, "init for RGMII\n");
+@@ -880,10 +884,6 @@ static int rk_gmac_powerup(struct rk_pri
+       if (ret)
+               return ret;
+-      ret = gmac_clk_enable(bsp_priv, true);
+-      if (ret)
+-              return ret;
+-
+       pm_runtime_enable(dev);
+       pm_runtime_get_sync(dev);
+@@ -901,44 +901,6 @@ static void rk_gmac_powerdown(struct rk_
+       gmac_clk_enable(gmac, false);
+ }
+-static int rk_gmac_init(struct platform_device *pdev, void *priv)
+-{
+-      struct rk_priv_data *bsp_priv = priv;
+-
+-      return rk_gmac_powerup(bsp_priv);
+-}
+-
+-static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+-{
+-      struct rk_priv_data *bsp_priv = priv;
+-
+-      rk_gmac_powerdown(bsp_priv);
+-}
+-
+-static void rk_gmac_suspend(struct platform_device *pdev, void *priv)
+-{
+-      struct rk_priv_data *bsp_priv = priv;
+-
+-      /* Keep the PHY up if we use Wake-on-Lan. */
+-      if (device_may_wakeup(&pdev->dev))
+-              return;
+-
+-      rk_gmac_powerdown(bsp_priv);
+-      bsp_priv->suspended = true;
+-}
+-
+-static void rk_gmac_resume(struct platform_device *pdev, void *priv)
+-{
+-      struct rk_priv_data *bsp_priv = priv;
+-
+-      /* The PHY was up for Wake-on-Lan. */
+-      if (!bsp_priv->suspended)
+-              return;
+-
+-      rk_gmac_powerup(bsp_priv);
+-      bsp_priv->suspended = false;
+-}
+-
+ static void rk_fix_speed(void *priv, unsigned int speed)
+ {
+       struct rk_priv_data *bsp_priv = priv;
+@@ -974,11 +936,7 @@ static int rk_gmac_probe(struct platform
+               return PTR_ERR(plat_dat);
+       plat_dat->has_gmac = true;
+-      plat_dat->init = rk_gmac_init;
+-      plat_dat->exit = rk_gmac_exit;
+       plat_dat->fix_mac_speed = rk_fix_speed;
+-      plat_dat->suspend = rk_gmac_suspend;
+-      plat_dat->resume = rk_gmac_resume;
+       plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
+       if (IS_ERR(plat_dat->bsp_priv)) {
+@@ -986,24 +944,65 @@ static int rk_gmac_probe(struct platform
+               goto err_remove_config_dt;
+       }
+-      ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
++      ret = rk_gmac_powerup(plat_dat->bsp_priv);
+       if (ret)
+               goto err_remove_config_dt;
+       ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       if (ret)
+-              goto err_gmac_exit;
++              goto err_gmac_powerdown;
+       return 0;
+-err_gmac_exit:
+-      rk_gmac_exit(pdev, plat_dat->bsp_priv);
++err_gmac_powerdown:
++      rk_gmac_powerdown(plat_dat->bsp_priv);
+ err_remove_config_dt:
+       stmmac_remove_config_dt(pdev, plat_dat);
+       return ret;
+ }
++static int rk_gmac_remove(struct platform_device *pdev)
++{
++      struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
++      int ret = stmmac_dvr_remove(&pdev->dev);
++
++      rk_gmac_powerdown(bsp_priv);
++
++      return ret;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int rk_gmac_suspend(struct device *dev)
++{
++      struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
++      int ret = stmmac_suspend(dev);
++
++      /* Keep the PHY up if we use Wake-on-Lan. */
++      if (!device_may_wakeup(dev)) {
++              rk_gmac_powerdown(bsp_priv);
++              bsp_priv->suspended = true;
++      }
++
++      return ret;
++}
++
++static int rk_gmac_resume(struct device *dev)
++{
++      struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
++
++      /* The PHY was up for Wake-on-Lan. */
++      if (bsp_priv->suspended) {
++              rk_gmac_powerup(bsp_priv);
++              bsp_priv->suspended = false;
++      }
++
++      return stmmac_resume(dev);
++}
++#endif /* CONFIG_PM_SLEEP */
++
++static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
++
+ static const struct of_device_id rk_gmac_dwmac_match[] = {
+       { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
+       { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+@@ -1016,10 +1015,10 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_ma
+ static struct platform_driver rk_gmac_dwmac_driver = {
+       .probe  = rk_gmac_probe,
+-      .remove = stmmac_pltfr_remove,
++      .remove = rk_gmac_remove,
+       .driver = {
+               .name           = "rk_gmac-dwmac",
+-              .pm             = &stmmac_pltfr_pm_ops,
++              .pm             = &rk_gmac_pm_ops,
+               .of_match_table = rk_gmac_dwmac_match,
+       },
+ };
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+@@ -380,8 +380,8 @@ static int socfpga_dwmac_resume(struct d
+        * control register 0, and can be modified by the phy driver
+        * framework.
+        */
+-      if (priv->phydev)
+-              phy_resume(priv->phydev);
++      if (ndev->phydev)
++              phy_resume(ndev->phydev);
+       return stmmac_resume(dev);
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+@@ -126,8 +126,8 @@ struct sti_dwmac {
+       struct clk *clk;        /* PHY clock */
+       u32 ctrl_reg;           /* GMAC glue-logic control register */
+       int clk_sel_reg;        /* GMAC ext clk selection register */
+-      struct device *dev;
+       struct regmap *regmap;
++      bool gmac_en;
+       u32 speed;
+       void (*fix_retime_src)(void *priv, unsigned int speed);
+ };
+@@ -191,7 +191,7 @@ static void stih4xx_fix_retime_src(void
+               }
+       }
+-      if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk && freq)
++      if (src == TX_RETIME_SRC_CLKGEN && freq)
+               clk_set_rate(dwmac->clk, freq);
+       regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
+@@ -222,26 +222,20 @@ static void stid127_fix_retime_src(void
+                       freq = DWMAC_2_5MHZ;
+       }
+-      if (dwmac->clk && freq)
++      if (freq)
+               clk_set_rate(dwmac->clk, freq);
+       regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
+ }
+-static int sti_dwmac_init(struct platform_device *pdev, void *priv)
++static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
+ {
+-      struct sti_dwmac *dwmac = priv;
+       struct regmap *regmap = dwmac->regmap;
+       int iface = dwmac->interface;
+-      struct device *dev = dwmac->dev;
+-      struct device_node *np = dev->of_node;
+       u32 reg = dwmac->ctrl_reg;
+       u32 val;
+-      if (dwmac->clk)
+-              clk_prepare_enable(dwmac->clk);
+-
+-      if (of_property_read_bool(np, "st,gmac_en"))
++      if (dwmac->gmac_en)
+               regmap_update_bits(regmap, reg, EN_MASK, EN);
+       regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
+@@ -249,18 +243,11 @@ static int sti_dwmac_init(struct platfor
+       val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
+       regmap_update_bits(regmap, reg, ENMII_MASK, val);
+-      dwmac->fix_retime_src(priv, dwmac->speed);
++      dwmac->fix_retime_src(dwmac, dwmac->speed);
+       return 0;
+ }
+-static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
+-{
+-      struct sti_dwmac *dwmac = priv;
+-
+-      if (dwmac->clk)
+-              clk_disable_unprepare(dwmac->clk);
+-}
+ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
+                               struct platform_device *pdev)
+ {
+@@ -270,9 +257,6 @@ static int sti_dwmac_parse_data(struct s
+       struct regmap *regmap;
+       int err;
+-      if (!np)
+-              return -EINVAL;
+-
+       /* clk selection from extra syscfg register */
+       dwmac->clk_sel_reg = -ENXIO;
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
+@@ -289,9 +273,9 @@ static int sti_dwmac_parse_data(struct s
+               return err;
+       }
+-      dwmac->dev = dev;
+       dwmac->interface = of_get_phy_mode(np);
+       dwmac->regmap = regmap;
++      dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
+       dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+       dwmac->tx_retime_src = TX_RETIME_SRC_NA;
+       dwmac->speed = SPEED_100;
+@@ -359,28 +343,65 @@ static int sti_dwmac_probe(struct platfo
+       dwmac->fix_retime_src = data->fix_retime_src;
+       plat_dat->bsp_priv = dwmac;
+-      plat_dat->init = sti_dwmac_init;
+-      plat_dat->exit = sti_dwmac_exit;
+       plat_dat->fix_mac_speed = data->fix_retime_src;
+-      ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
++      ret = clk_prepare_enable(dwmac->clk);
+       if (ret)
+               goto err_remove_config_dt;
++      ret = sti_dwmac_set_mode(dwmac);
++      if (ret)
++              goto disable_clk;
++
+       ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       if (ret)
+-              goto err_dwmac_exit;
++              goto disable_clk;
+       return 0;
+-err_dwmac_exit:
+-      sti_dwmac_exit(pdev, plat_dat->bsp_priv);
++disable_clk:
++      clk_disable_unprepare(dwmac->clk);
+ err_remove_config_dt:
+       stmmac_remove_config_dt(pdev, plat_dat);
+       return ret;
+ }
++static int sti_dwmac_remove(struct platform_device *pdev)
++{
++      struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
++      int ret = stmmac_dvr_remove(&pdev->dev);
++
++      clk_disable_unprepare(dwmac->clk);
++
++      return ret;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int sti_dwmac_suspend(struct device *dev)
++{
++      struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
++      int ret = stmmac_suspend(dev);
++
++      clk_disable_unprepare(dwmac->clk);
++
++      return ret;
++}
++
++static int sti_dwmac_resume(struct device *dev)
++{
++      struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
++
++      clk_prepare_enable(dwmac->clk);
++      sti_dwmac_set_mode(dwmac);
++
++      return stmmac_resume(dev);
++}
++#endif /* CONFIG_PM_SLEEP */
++
++static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
++                                         sti_dwmac_resume);
++
+ static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+       .fix_retime_src = stih4xx_fix_retime_src,
+ };
+@@ -400,10 +421,10 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match)
+ static struct platform_driver sti_dwmac_driver = {
+       .probe  = sti_dwmac_probe,
+-      .remove = stmmac_pltfr_remove,
++      .remove = sti_dwmac_remove,
+       .driver = {
+               .name           = "sti-dwmac",
+-              .pm             = &stmmac_pltfr_pm_ops,
++              .pm             = &sti_dwmac_pm_ops,
+               .of_match_table = sti_dwmac_match,
+       },
+ };
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+@@ -225,7 +225,7 @@ enum rx_tx_priority_ratio {
+ #define DMA_BUS_MODE_FB               0x00010000      /* Fixed burst */
+ #define DMA_BUS_MODE_MB               0x04000000      /* Mixed burst */
+-#define DMA_BUS_MODE_RPBL_MASK        0x003e0000      /* Rx-Programmable Burst Len */
++#define DMA_BUS_MODE_RPBL_MASK        0x007e0000      /* Rx-Programmable Burst Len */
+ #define DMA_BUS_MODE_RPBL_SHIFT       17
+ #define DMA_BUS_MODE_USP      0x00800000
+ #define DMA_BUS_MODE_MAXPBL   0x01000000
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -538,6 +538,12 @@ struct mac_device_info *dwmac1000_setup(
+       mac->link.speed = GMAC_CONTROL_FES;
+       mac->mii.addr = GMAC_MII_ADDR;
+       mac->mii.data = GMAC_MII_DATA;
++      mac->mii.addr_shift = 11;
++      mac->mii.addr_mask = 0x0000F800;
++      mac->mii.reg_shift = 6;
++      mac->mii.reg_mask = 0x000007C0;
++      mac->mii.clk_csr_shift = 2;
++      mac->mii.clk_csr_mask = GENMASK(5, 2);
+       /* Get and dump the chip ID */
+       *synopsys_id = stmmac_get_synopsys_id(hwid);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+@@ -84,37 +84,39 @@ static void dwmac1000_dma_axi(void __iom
+       writel(value, ioaddr + DMA_AXI_BUS_MODE);
+ }
+-static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+-                             int aal, u32 dma_tx, u32 dma_rx, int atds)
++static void dwmac1000_dma_init(void __iomem *ioaddr,
++                             struct stmmac_dma_cfg *dma_cfg,
++                             u32 dma_tx, u32 dma_rx, int atds)
+ {
+       u32 value = readl(ioaddr + DMA_BUS_MODE);
++      int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
++      int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+       /*
+        * Set the DMA PBL (Programmable Burst Length) mode.
+        *
+        * Note: before stmmac core 3.50 this mode bit was 4xPBL, and
+        * post 3.5 mode bit acts as 8*PBL.
+-       *
+-       * This configuration doesn't take care about the Separate PBL
+-       * so only the bits: 13-8 are programmed with the PBL passed from the
+-       * platform.
+        */
+-      value |= DMA_BUS_MODE_MAXPBL;
+-      value &= ~DMA_BUS_MODE_PBL_MASK;
+-      value |= (pbl << DMA_BUS_MODE_PBL_SHIFT);
++      if (dma_cfg->pblx8)
++              value |= DMA_BUS_MODE_MAXPBL;
++      value |= DMA_BUS_MODE_USP;
++      value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
++      value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
++      value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+       /* Set the Fixed burst mode */
+-      if (fb)
++      if (dma_cfg->fixed_burst)
+               value |= DMA_BUS_MODE_FB;
+       /* Mixed Burst has no effect when fb is set */
+-      if (mb)
++      if (dma_cfg->mixed_burst)
+               value |= DMA_BUS_MODE_MB;
+       if (atds)
+               value |= DMA_BUS_MODE_ATDS;
+-      if (aal)
++      if (dma_cfg->aal)
+               value |= DMA_BUS_MODE_AAL;
+       writel(value, ioaddr + DMA_BUS_MODE);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+@@ -192,6 +192,13 @@ struct mac_device_info *dwmac100_setup(v
+       mac->link.speed = 0;
+       mac->mii.addr = MAC_MII_ADDR;
+       mac->mii.data = MAC_MII_DATA;
++      mac->mii.addr_shift = 11;
++      mac->mii.addr_mask = 0x0000F800;
++      mac->mii.reg_shift = 6;
++      mac->mii.reg_mask = 0x000007C0;
++      mac->mii.clk_csr_shift = 2;
++      mac->mii.clk_csr_mask = GENMASK(5, 2);
++
+       /* Synopsys Id is not available on old chips */
+       *synopsys_id = 0;
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+@@ -32,11 +32,12 @@
+ #include "dwmac100.h"
+ #include "dwmac_dma.h"
+-static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+-                            int aal, u32 dma_tx, u32 dma_rx, int atds)
++static void dwmac100_dma_init(void __iomem *ioaddr,
++                            struct stmmac_dma_cfg *dma_cfg,
++                            u32 dma_tx, u32 dma_rx, int atds)
+ {
+       /* Enable Application Access by writing to DMA CSR0 */
+-      writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
++      writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
+              ioaddr + DMA_BUS_MODE);
+       /* Mask interrupts by writing to CSR7 */
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+@@ -155,8 +155,11 @@ enum power_event {
+ #define MTL_CHAN_RX_DEBUG(x)          (MTL_CHANX_BASE_ADDR(x) + 0x38)
+ #define MTL_OP_MODE_RSF                       BIT(5)
++#define MTL_OP_MODE_TXQEN             BIT(3)
+ #define MTL_OP_MODE_TSF                       BIT(1)
++#define MTL_OP_MODE_TQS_MASK          GENMASK(24, 16)
++
+ #define MTL_OP_MODE_TTC_MASK          0x70
+ #define MTL_OP_MODE_TTC_SHIFT         4
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -430,6 +430,12 @@ struct mac_device_info *dwmac4_setup(voi
+       mac->link.speed = GMAC_CONFIG_FES;
+       mac->mii.addr = GMAC_MDIO_ADDR;
+       mac->mii.data = GMAC_MDIO_DATA;
++      mac->mii.addr_shift = 21;
++      mac->mii.addr_mask = GENMASK(25, 21);
++      mac->mii.reg_shift = 16;
++      mac->mii.reg_mask = GENMASK(20, 16);
++      mac->mii.clk_csr_shift = 8;
++      mac->mii.clk_csr_mask = GENMASK(11, 8);
+       /* Get and dump the chip ID */
+       *synopsys_id = stmmac_get_synopsys_id(hwid);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+@@ -23,7 +23,7 @@ static int dwmac4_wrback_get_tx_status(v
+       unsigned int tdes3;
+       int ret = tx_done;
+-      tdes3 = p->des3;
++      tdes3 = le32_to_cpu(p->des3);
+       /* Get tx owner first */
+       if (unlikely(tdes3 & TDES3_OWN))
+@@ -77,9 +77,9 @@ static int dwmac4_wrback_get_rx_status(v
+                                      struct dma_desc *p)
+ {
+       struct net_device_stats *stats = (struct net_device_stats *)data;
+-      unsigned int rdes1 = p->des1;
+-      unsigned int rdes2 = p->des2;
+-      unsigned int rdes3 = p->des3;
++      unsigned int rdes1 = le32_to_cpu(p->des1);
++      unsigned int rdes2 = le32_to_cpu(p->des2);
++      unsigned int rdes3 = le32_to_cpu(p->des3);
+       int message_type;
+       int ret = good_frame;
+@@ -176,47 +176,48 @@ static int dwmac4_wrback_get_rx_status(v
+ static int dwmac4_rd_get_tx_len(struct dma_desc *p)
+ {
+-      return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
++      return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
+ }
+ static int dwmac4_get_tx_owner(struct dma_desc *p)
+ {
+-      return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
++      return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
+ }
+ static void dwmac4_set_tx_owner(struct dma_desc *p)
+ {
+-      p->des3 |= TDES3_OWN;
++      p->des3 |= cpu_to_le32(TDES3_OWN);
+ }
+ static void dwmac4_set_rx_owner(struct dma_desc *p)
+ {
+-      p->des3 |= RDES3_OWN;
++      p->des3 |= cpu_to_le32(RDES3_OWN);
+ }
+ static int dwmac4_get_tx_ls(struct dma_desc *p)
+ {
+-      return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
++      return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
++              >> TDES3_LAST_DESCRIPTOR_SHIFT;
+ }
+ static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+ {
+-      return (p->des3 & RDES3_PACKET_SIZE_MASK);
++      return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
+ }
+ static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
+ {
+-      p->des2 |= TDES2_TIMESTAMP_ENABLE;
++      p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
+ }
+ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
+ {
+       /* Context type from W/B descriptor must be zero */
+-      if (p->des3 & TDES3_CONTEXT_TYPE)
++      if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
+               return -EINVAL;
+       /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
+-      if (p->des3 & TDES3_TIMESTAMP_STATUS)
++      if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
+               return 0;
+       return 1;
+@@ -227,9 +228,9 @@ static inline u64 dwmac4_get_timestamp(v
+       struct dma_desc *p = (struct dma_desc *)desc;
+       u64 ns;
+-      ns = p->des0;
++      ns = le32_to_cpu(p->des0);
+       /* convert high/sec time stamp value to nanosecond */
+-      ns += p->des1 * 1000000000ULL;
++      ns += le32_to_cpu(p->des1) * 1000000000ULL;
+       return ns;
+ }
+@@ -264,7 +265,7 @@ static int dwmac4_wrback_get_rx_timestam
+       /* Get the status from normal w/b descriptor */
+       if (likely(p->des3 & TDES3_RS1V)) {
+-              if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
++              if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
+                       int i = 0;
+                       /* Check if timestamp is OK from context descriptor */
+@@ -287,10 +288,10 @@ exit:
+ static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+                                  int mode, int end)
+ {
+-      p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
++      p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+       if (!disable_rx_ic)
+-              p->des3 |= RDES3_INT_ON_COMPLETION_EN;
++              p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
+ }
+ static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
+@@ -305,9 +306,9 @@ static void dwmac4_rd_prepare_tx_desc(st
+                                     bool csum_flag, int mode, bool tx_own,
+                                     bool ls)
+ {
+-      unsigned int tdes3 = p->des3;
++      unsigned int tdes3 = le32_to_cpu(p->des3);
+-      p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
++      p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
+       if (is_fs)
+               tdes3 |= TDES3_FIRST_DESCRIPTOR;
+@@ -333,9 +334,9 @@ static void dwmac4_rd_prepare_tx_desc(st
+                * descriptors for the same frame has to be set before, to
+                * avoid race condition.
+                */
+-              wmb();
++              dma_wmb();
+-      p->des3 = tdes3;
++      p->des3 = cpu_to_le32(tdes3);
+ }
+ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+@@ -343,14 +344,14 @@ static void dwmac4_rd_prepare_tso_tx_des
+                                         bool ls, unsigned int tcphdrlen,
+                                         unsigned int tcppayloadlen)
+ {
+-      unsigned int tdes3 = p->des3;
++      unsigned int tdes3 = le32_to_cpu(p->des3);
+       if (len1)
+-              p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
++              p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
+       if (len2)
+-              p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
+-                          & TDES2_BUFFER2_SIZE_MASK;
++              p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
++                          & TDES2_BUFFER2_SIZE_MASK);
+       if (is_fs) {
+               tdes3 |= TDES3_FIRST_DESCRIPTOR |
+@@ -376,9 +377,9 @@ static void dwmac4_rd_prepare_tso_tx_des
+                * descriptors for the same frame has to be set before, to
+                * avoid race condition.
+                */
+-              wmb();
++              dma_wmb();
+-      p->des3 = tdes3;
++      p->des3 = cpu_to_le32(tdes3);
+ }
+ static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
+@@ -389,7 +390,7 @@ static void dwmac4_release_tx_desc(struc
+ static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
+ {
+-      p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
++      p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
+ }
+ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
+@@ -402,7 +403,8 @@ static void dwmac4_display_ring(void *he
+       for (i = 0; i < size; i++) {
+               pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+                       i, (unsigned int)virt_to_phys(p),
+-                      p->des0, p->des1, p->des2, p->des3);
++                      le32_to_cpu(p->des0), le32_to_cpu(p->des1),
++                      le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+               p++;
+       }
+ }
+@@ -411,8 +413,8 @@ static void dwmac4_set_mss_ctxt(struct d
+ {
+       p->des0 = 0;
+       p->des1 = 0;
+-      p->des2 = mss;
+-      p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
++      p->des2 = cpu_to_le32(mss);
++      p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
+ }
+ const struct stmmac_desc_ops dwmac4_desc_ops = {
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+@@ -71,25 +71,29 @@ static void dwmac4_dma_axi(void __iomem
+       writel(value, ioaddr + DMA_SYS_BUS_MODE);
+ }
+-static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
++static void dwmac4_dma_init_channel(void __iomem *ioaddr,
++                                  struct stmmac_dma_cfg *dma_cfg,
+                                   u32 dma_tx_phy, u32 dma_rx_phy,
+                                   u32 channel)
+ {
+       u32 value;
++      int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
++      int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+       /* set PBL for each channels. Currently we affect same configuration
+        * on each channel
+        */
+       value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
+-      value = value | DMA_BUS_MODE_PBL;
++      if (dma_cfg->pblx8)
++              value = value | DMA_BUS_MODE_PBL;
+       writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
+       value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
+-      value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
++      value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+       writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
+       value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
+-      value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
++      value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
+       /* Mask interrupts by writing to CSR7 */
+@@ -99,27 +103,28 @@ static void dwmac4_dma_init_channel(void
+       writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
+ }
+-static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+-                          int aal, u32 dma_tx, u32 dma_rx, int atds)
++static void dwmac4_dma_init(void __iomem *ioaddr,
++                          struct stmmac_dma_cfg *dma_cfg,
++                          u32 dma_tx, u32 dma_rx, int atds)
+ {
+       u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+       int i;
+       /* Set the Fixed burst mode */
+-      if (fb)
++      if (dma_cfg->fixed_burst)
+               value |= DMA_SYS_BUS_FB;
+       /* Mixed Burst has no effect when fb is set */
+-      if (mb)
++      if (dma_cfg->mixed_burst)
+               value |= DMA_SYS_BUS_MB;
+-      if (aal)
++      if (dma_cfg->aal)
+               value |= DMA_SYS_BUS_AAL;
+       writel(value, ioaddr + DMA_SYS_BUS_MODE);
+       for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+-              dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
++              dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
+ }
+ static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
+@@ -215,7 +220,17 @@ static void dwmac4_dma_chan_op_mode(void
+               else
+                       mtl_tx_op |= MTL_OP_MODE_TTC_512;
+       }
+-
++      /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
++       * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
++       * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
++       * with reset values: TXQEN off, TQS 256 bytes.
++       *
++       * Write the bits in both cases, since it will have no effect when RO.
++       * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
++       * be RO, however, writing the whole TQS field will result in a value
++       * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
++       */
++      mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
+       writel(mtl_tx_op, ioaddr +  MTL_CHAN_TX_OP_MODE(channel));
+       mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+@@ -30,7 +30,7 @@ static int enh_desc_get_tx_status(void *
+                                 struct dma_desc *p, void __iomem *ioaddr)
+ {
+       struct net_device_stats *stats = (struct net_device_stats *)data;
+-      unsigned int tdes0 = p->des0;
++      unsigned int tdes0 = le32_to_cpu(p->des0);
+       int ret = tx_done;
+       /* Get tx owner first */
+@@ -95,7 +95,7 @@ static int enh_desc_get_tx_status(void *
+ static int enh_desc_get_tx_len(struct dma_desc *p)
+ {
+-      return (p->des1 & ETDES1_BUFFER1_SIZE_MASK);
++      return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
+ }
+ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
+@@ -134,8 +134,8 @@ static int enh_desc_coe_rdes0(int ipc_er
+ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
+                                   struct dma_extended_desc *p)
+ {
+-      unsigned int rdes0 = p->basic.des0;
+-      unsigned int rdes4 = p->des4;
++      unsigned int rdes0 = le32_to_cpu(p->basic.des0);
++      unsigned int rdes4 = le32_to_cpu(p->des4);
+       if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
+               int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
+@@ -199,7 +199,7 @@ static int enh_desc_get_rx_status(void *
+                                 struct dma_desc *p)
+ {
+       struct net_device_stats *stats = (struct net_device_stats *)data;
+-      unsigned int rdes0 = p->des0;
++      unsigned int rdes0 = le32_to_cpu(p->des0);
+       int ret = good_frame;
+       if (unlikely(rdes0 & RDES0_OWN))
+@@ -265,8 +265,8 @@ static int enh_desc_get_rx_status(void *
+ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+                                 int mode, int end)
+ {
+-      p->des0 |= RDES0_OWN;
+-      p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
++      p->des0 |= cpu_to_le32(RDES0_OWN);
++      p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
+       if (mode == STMMAC_CHAIN_MODE)
+               ehn_desc_rx_set_on_chain(p);
+@@ -274,12 +274,12 @@ static void enh_desc_init_rx_desc(struct
+               ehn_desc_rx_set_on_ring(p, end);
+       if (disable_rx_ic)
+-              p->des1 |= ERDES1_DISABLE_IC;
++              p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
+ }
+ static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
+ {
+-      p->des0 &= ~ETDES0_OWN;
++      p->des0 &= cpu_to_le32(~ETDES0_OWN);
+       if (mode == STMMAC_CHAIN_MODE)
+               enh_desc_end_tx_desc_on_chain(p);
+       else
+@@ -288,27 +288,27 @@ static void enh_desc_init_tx_desc(struct
+ static int enh_desc_get_tx_owner(struct dma_desc *p)
+ {
+-      return (p->des0 & ETDES0_OWN) >> 31;
++      return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
+ }
+ static void enh_desc_set_tx_owner(struct dma_desc *p)
+ {
+-      p->des0 |= ETDES0_OWN;
++      p->des0 |= cpu_to_le32(ETDES0_OWN);
+ }
+ static void enh_desc_set_rx_owner(struct dma_desc *p)
+ {
+-      p->des0 |= RDES0_OWN;
++      p->des0 |= cpu_to_le32(RDES0_OWN);
+ }
+ static int enh_desc_get_tx_ls(struct dma_desc *p)
+ {
+-      return (p->des0 & ETDES0_LAST_SEGMENT) >> 29;
++      return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
+ }
+ static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
+ {
+-      int ter = (p->des0 & ETDES0_END_RING) >> 21;
++      int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
+       memset(p, 0, offsetof(struct dma_desc, des2));
+       if (mode == STMMAC_CHAIN_MODE)
+@@ -321,7 +321,7 @@ static void enh_desc_prepare_tx_desc(str
+                                    bool csum_flag, int mode, bool tx_own,
+                                    bool ls)
+ {
+-      unsigned int tdes0 = p->des0;
++      unsigned int tdes0 = le32_to_cpu(p->des0);
+       if (mode == STMMAC_CHAIN_MODE)
+               enh_set_tx_desc_len_on_chain(p, len);
+@@ -350,14 +350,14 @@ static void enh_desc_prepare_tx_desc(str
+                * descriptors for the same frame has to be set before, to
+                * avoid race condition.
+                */
+-              wmb();
++              dma_wmb();
+-      p->des0 = tdes0;
++      p->des0 = cpu_to_le32(tdes0);
+ }
+ static void enh_desc_set_tx_ic(struct dma_desc *p)
+ {
+-      p->des0 |= ETDES0_INTERRUPT;
++      p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
+ }
+ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
+@@ -372,18 +372,18 @@ static int enh_desc_get_rx_frame_len(str
+       if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+               csum = 2;
+-      return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
+-              csum);
++      return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
++                              >> RDES0_FRAME_LEN_SHIFT) - csum);
+ }
+ static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
+ {
+-      p->des0 |= ETDES0_TIME_STAMP_ENABLE;
++      p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
+ }
+ static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
+ {
+-      return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17;
++      return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
+ }
+ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
+@@ -392,13 +392,13 @@ static u64 enh_desc_get_timestamp(void *
+       if (ats) {
+               struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+-              ns = p->des6;
++              ns = le32_to_cpu(p->des6);
+               /* convert high/sec time stamp value to nanosecond */
+-              ns += p->des7 * 1000000000ULL;
++              ns += le32_to_cpu(p->des7) * 1000000000ULL;
+       } else {
+               struct dma_desc *p = (struct dma_desc *)desc;
+-              ns = p->des2;
+-              ns += p->des3 * 1000000000ULL;
++              ns = le32_to_cpu(p->des2);
++              ns += le32_to_cpu(p->des3) * 1000000000ULL;
+       }
+       return ns;
+@@ -408,10 +408,11 @@ static int enh_desc_get_rx_timestamp_sta
+ {
+       if (ats) {
+               struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+-              return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7;
++              return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
+       } else {
+               struct dma_desc *p = (struct dma_desc *)desc;
+-              if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
++              if ((le32_to_cpu(p->des2) == 0xffffffff) &&
++                  (le32_to_cpu(p->des3) == 0xffffffff))
+                       /* timestamp is corrupted, hence don't store it */
+                       return 0;
+               else
+--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+@@ -30,8 +30,8 @@ static int ndesc_get_tx_status(void *dat
+                              struct dma_desc *p, void __iomem *ioaddr)
+ {
+       struct net_device_stats *stats = (struct net_device_stats *)data;
+-      unsigned int tdes0 = p->des0;
+-      unsigned int tdes1 = p->des1;
++      unsigned int tdes0 = le32_to_cpu(p->des0);
++      unsigned int tdes1 = le32_to_cpu(p->des1);
+       int ret = tx_done;
+       /* Get tx owner first */
+@@ -77,7 +77,7 @@ static int ndesc_get_tx_status(void *dat
+ static int ndesc_get_tx_len(struct dma_desc *p)
+ {
+-      return (p->des1 & RDES1_BUFFER1_SIZE_MASK);
++      return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
+ }
+ /* This function verifies if each incoming frame has some errors
+@@ -88,7 +88,7 @@ static int ndesc_get_rx_status(void *dat
+                              struct dma_desc *p)
+ {
+       int ret = good_frame;
+-      unsigned int rdes0 = p->des0;
++      unsigned int rdes0 = le32_to_cpu(p->des0);
+       struct net_device_stats *stats = (struct net_device_stats *)data;
+       if (unlikely(rdes0 & RDES0_OWN))
+@@ -141,8 +141,8 @@ static int ndesc_get_rx_status(void *dat
+ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
+                              int end)
+ {
+-      p->des0 |= RDES0_OWN;
+-      p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK;
++      p->des0 |= cpu_to_le32(RDES0_OWN);
++      p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+       if (mode == STMMAC_CHAIN_MODE)
+               ndesc_rx_set_on_chain(p, end);
+@@ -150,12 +150,12 @@ static void ndesc_init_rx_desc(struct dm
+               ndesc_rx_set_on_ring(p, end);
+       if (disable_rx_ic)
+-              p->des1 |= RDES1_DISABLE_IC;
++              p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
+ }
+ static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
+ {
+-      p->des0 &= ~TDES0_OWN;
++      p->des0 &= cpu_to_le32(~TDES0_OWN);
+       if (mode == STMMAC_CHAIN_MODE)
+               ndesc_tx_set_on_chain(p);
+       else
+@@ -164,27 +164,27 @@ static void ndesc_init_tx_desc(struct dm
+ static int ndesc_get_tx_owner(struct dma_desc *p)
+ {
+-      return (p->des0 & TDES0_OWN) >> 31;
++      return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
+ }
+ static void ndesc_set_tx_owner(struct dma_desc *p)
+ {
+-      p->des0 |= TDES0_OWN;
++      p->des0 |= cpu_to_le32(TDES0_OWN);
+ }
+ static void ndesc_set_rx_owner(struct dma_desc *p)
+ {
+-      p->des0 |= RDES0_OWN;
++      p->des0 |= cpu_to_le32(RDES0_OWN);
+ }
+ static int ndesc_get_tx_ls(struct dma_desc *p)
+ {
+-      return (p->des1 & TDES1_LAST_SEGMENT) >> 30;
++      return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
+ }
+ static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
+ {
+-      int ter = (p->des1 & TDES1_END_RING) >> 25;
++      int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
+       memset(p, 0, offsetof(struct dma_desc, des2));
+       if (mode == STMMAC_CHAIN_MODE)
+@@ -197,7 +197,7 @@ static void ndesc_prepare_tx_desc(struct
+                                 bool csum_flag, int mode, bool tx_own,
+                                 bool ls)
+ {
+-      unsigned int tdes1 = p->des1;
++      unsigned int tdes1 = le32_to_cpu(p->des1);
+       if (is_fs)
+               tdes1 |= TDES1_FIRST_SEGMENT;
+@@ -212,7 +212,7 @@ static void ndesc_prepare_tx_desc(struct
+       if (ls)
+               tdes1 |= TDES1_LAST_SEGMENT;
+-      p->des1 = tdes1;
++      p->des1 = cpu_to_le32(tdes1);
+       if (mode == STMMAC_CHAIN_MODE)
+               norm_set_tx_desc_len_on_chain(p, len);
+@@ -220,12 +220,12 @@ static void ndesc_prepare_tx_desc(struct
+               norm_set_tx_desc_len_on_ring(p, len);
+       if (tx_own)
+-              p->des0 |= TDES0_OWN;
++              p->des0 |= cpu_to_le32(TDES0_OWN);
+ }
+ static void ndesc_set_tx_ic(struct dma_desc *p)
+ {
+-      p->des1 |= TDES1_INTERRUPT;
++      p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
+ }
+ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
+@@ -241,19 +241,20 @@ static int ndesc_get_rx_frame_len(struct
+       if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+               csum = 2;
+-      return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
++      return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
++                              >> RDES0_FRAME_LEN_SHIFT) -
+               csum);
+ }
+ static void ndesc_enable_tx_timestamp(struct dma_desc *p)
+ {
+-      p->des1 |= TDES1_TIME_STAMP_ENABLE;
++      p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
+ }
+ static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
+ {
+-      return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17;
++      return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
+ }
+ static u64 ndesc_get_timestamp(void *desc, u32 ats)
+@@ -261,9 +262,9 @@ static u64 ndesc_get_timestamp(void *des
+       struct dma_desc *p = (struct dma_desc *)desc;
+       u64 ns;
+-      ns = p->des2;
++      ns = le32_to_cpu(p->des2);
+       /* convert high/sec time stamp value to nanosecond */
+-      ns += p->des3 * 1000000000ULL;
++      ns += le32_to_cpu(p->des3) * 1000000000ULL;
+       return ns;
+ }
+@@ -272,7 +273,8 @@ static int ndesc_get_rx_timestamp_status
+ {
+       struct dma_desc *p = (struct dma_desc *)desc;
+-      if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
++      if ((le32_to_cpu(p->des2) == 0xffffffff) &&
++          (le32_to_cpu(p->des3) == 0xffffffff))
+               /* timestamp is corrupted, hence don't store it */
+               return 0;
+       else
+--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+@@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, str
+       unsigned int entry = priv->cur_tx;
+       struct dma_desc *desc;
+       unsigned int nopaged_len = skb_headlen(skb);
+-      unsigned int bmax, len;
++      unsigned int bmax, len, des2;
+       if (priv->extend_desc)
+               desc = (struct dma_desc *)(priv->dma_etx + entry);
+@@ -50,16 +50,17 @@ static int stmmac_jumbo_frm(void *p, str
+       if (nopaged_len > BUF_SIZE_8KiB) {
+-              desc->des2 = dma_map_single(priv->device, skb->data,
+-                                          bmax, DMA_TO_DEVICE);
+-              if (dma_mapping_error(priv->device, desc->des2))
++              des2 = dma_map_single(priv->device, skb->data, bmax,
++                                    DMA_TO_DEVICE);
++              desc->des2 = cpu_to_le32(des2);
++              if (dma_mapping_error(priv->device, des2))
+                       return -1;
+-              priv->tx_skbuff_dma[entry].buf = desc->des2;
++              priv->tx_skbuff_dma[entry].buf = des2;
+               priv->tx_skbuff_dma[entry].len = bmax;
+               priv->tx_skbuff_dma[entry].is_jumbo = true;
+-              desc->des3 = desc->des2 + BUF_SIZE_4KiB;
++              desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+               priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
+                                               STMMAC_RING_MODE, 0, false);
+               priv->tx_skbuff[entry] = NULL;
+@@ -70,26 +71,28 @@ static int stmmac_jumbo_frm(void *p, str
+               else
+                       desc = priv->dma_tx + entry;
+-              desc->des2 = dma_map_single(priv->device, skb->data + bmax,
+-                                          len, DMA_TO_DEVICE);
+-              if (dma_mapping_error(priv->device, desc->des2))
++              des2 = dma_map_single(priv->device, skb->data + bmax, len,
++                                    DMA_TO_DEVICE);
++              desc->des2 = cpu_to_le32(des2);
++              if (dma_mapping_error(priv->device, des2))
+                       return -1;
+-              priv->tx_skbuff_dma[entry].buf = desc->des2;
++              priv->tx_skbuff_dma[entry].buf = des2;
+               priv->tx_skbuff_dma[entry].len = len;
+               priv->tx_skbuff_dma[entry].is_jumbo = true;
+-              desc->des3 = desc->des2 + BUF_SIZE_4KiB;
++              desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+               priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+                                               STMMAC_RING_MODE, 1, true);
+       } else {
+-              desc->des2 = dma_map_single(priv->device, skb->data,
+-                                          nopaged_len, DMA_TO_DEVICE);
+-              if (dma_mapping_error(priv->device, desc->des2))
++              des2 = dma_map_single(priv->device, skb->data,
++                                    nopaged_len, DMA_TO_DEVICE);
++              desc->des2 = cpu_to_le32(des2);
++              if (dma_mapping_error(priv->device, des2))
+                       return -1;
+-              priv->tx_skbuff_dma[entry].buf = desc->des2;
++              priv->tx_skbuff_dma[entry].buf = des2;
+               priv->tx_skbuff_dma[entry].len = nopaged_len;
+               priv->tx_skbuff_dma[entry].is_jumbo = true;
+-              desc->des3 = desc->des2 + BUF_SIZE_4KiB;
++              desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+               priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
+                                               STMMAC_RING_MODE, 0, true);
+       }
+@@ -115,13 +118,13 @@ static void stmmac_refill_desc3(void *pr
+       /* Fill DES3 in case of RING mode */
+       if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+-              p->des3 = p->des2 + BUF_SIZE_8KiB;
++              p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
+ }
+ /* In ring mode we need to fill the desc3 because it is used as buffer */
+ static void stmmac_init_desc3(struct dma_desc *p)
+ {
+-      p->des3 = p->des2 + BUF_SIZE_8KiB;
++      p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
+ }
+ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -64,7 +64,6 @@ struct stmmac_priv {
+       dma_addr_t dma_tx_phy;
+       int tx_coalesce;
+       int hwts_tx_en;
+-      spinlock_t tx_lock;
+       bool tx_path_in_lpi_mode;
+       struct timer_list txtimer;
+       bool tso;
+@@ -90,7 +89,6 @@ struct stmmac_priv {
+       struct mac_device_info *hw;
+       spinlock_t lock;
+-      struct phy_device *phydev ____cacheline_aligned_in_smp;
+       int oldlink;
+       int speed;
+       int oldduplex;
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -263,7 +263,7 @@ static void stmmac_ethtool_getdrvinfo(st
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
+-      if (priv->plat->has_gmac)
++      if (priv->plat->has_gmac || priv->plat->has_gmac4)
+               strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+       else
+               strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+@@ -272,25 +272,26 @@ static void stmmac_ethtool_getdrvinfo(st
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ }
+-static int stmmac_ethtool_getsettings(struct net_device *dev,
+-                                    struct ethtool_cmd *cmd)
++static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
++                                           struct ethtool_link_ksettings *cmd)
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
+-      struct phy_device *phy = priv->phydev;
++      struct phy_device *phy = dev->phydev;
+       int rc;
+       if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+           priv->hw->pcs & STMMAC_PCS_SGMII) {
+               struct rgmii_adv adv;
++              u32 supported, advertising, lp_advertising;
+               if (!priv->xstats.pcs_link) {
+-                      ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+-                      cmd->duplex = DUPLEX_UNKNOWN;
++                      cmd->base.speed = SPEED_UNKNOWN;
++                      cmd->base.duplex = DUPLEX_UNKNOWN;
+                       return 0;
+               }
+-              cmd->duplex = priv->xstats.pcs_duplex;
++              cmd->base.duplex = priv->xstats.pcs_duplex;
+-              ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
++              cmd->base.speed = priv->xstats.pcs_speed;
+               /* Get and convert ADV/LP_ADV from the HW AN registers */
+               if (!priv->hw->mac->pcs_get_adv_lp)
+@@ -300,45 +301,59 @@ static int stmmac_ethtool_getsettings(st
+               /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
++              ethtool_convert_link_mode_to_legacy_u32(
++                      &supported, cmd->link_modes.supported);
++              ethtool_convert_link_mode_to_legacy_u32(
++                      &advertising, cmd->link_modes.advertising);
++              ethtool_convert_link_mode_to_legacy_u32(
++                      &lp_advertising, cmd->link_modes.lp_advertising);
++
+               if (adv.pause & STMMAC_PCS_PAUSE)
+-                      cmd->advertising |= ADVERTISED_Pause;
++                      advertising |= ADVERTISED_Pause;
+               if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
+-                      cmd->advertising |= ADVERTISED_Asym_Pause;
++                      advertising |= ADVERTISED_Asym_Pause;
+               if (adv.lp_pause & STMMAC_PCS_PAUSE)
+-                      cmd->lp_advertising |= ADVERTISED_Pause;
++                      lp_advertising |= ADVERTISED_Pause;
+               if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
+-                      cmd->lp_advertising |= ADVERTISED_Asym_Pause;
++                      lp_advertising |= ADVERTISED_Asym_Pause;
+               /* Reg49[3] always set because ANE is always supported */
+-              cmd->autoneg = ADVERTISED_Autoneg;
+-              cmd->supported |= SUPPORTED_Autoneg;
+-              cmd->advertising |= ADVERTISED_Autoneg;
+-              cmd->lp_advertising |= ADVERTISED_Autoneg;
++              cmd->base.autoneg = ADVERTISED_Autoneg;
++              supported |= SUPPORTED_Autoneg;
++              advertising |= ADVERTISED_Autoneg;
++              lp_advertising |= ADVERTISED_Autoneg;
+               if (adv.duplex) {
+-                      cmd->supported |= (SUPPORTED_1000baseT_Full |
+-                                         SUPPORTED_100baseT_Full |
+-                                         SUPPORTED_10baseT_Full);
+-                      cmd->advertising |= (ADVERTISED_1000baseT_Full |
+-                                           ADVERTISED_100baseT_Full |
+-                                           ADVERTISED_10baseT_Full);
++                      supported |= (SUPPORTED_1000baseT_Full |
++                                    SUPPORTED_100baseT_Full |
++                                    SUPPORTED_10baseT_Full);
++                      advertising |= (ADVERTISED_1000baseT_Full |
++                                      ADVERTISED_100baseT_Full |
++                                      ADVERTISED_10baseT_Full);
+               } else {
+-                      cmd->supported |= (SUPPORTED_1000baseT_Half |
+-                                         SUPPORTED_100baseT_Half |
+-                                         SUPPORTED_10baseT_Half);
+-                      cmd->advertising |= (ADVERTISED_1000baseT_Half |
+-                                           ADVERTISED_100baseT_Half |
+-                                           ADVERTISED_10baseT_Half);
++                      supported |= (SUPPORTED_1000baseT_Half |
++                                    SUPPORTED_100baseT_Half |
++                                    SUPPORTED_10baseT_Half);
++                      advertising |= (ADVERTISED_1000baseT_Half |
++                                      ADVERTISED_100baseT_Half |
++                                      ADVERTISED_10baseT_Half);
+               }
+               if (adv.lp_duplex)
+-                      cmd->lp_advertising |= (ADVERTISED_1000baseT_Full |
+-                                              ADVERTISED_100baseT_Full |
+-                                              ADVERTISED_10baseT_Full);
++                      lp_advertising |= (ADVERTISED_1000baseT_Full |
++                                         ADVERTISED_100baseT_Full |
++                                         ADVERTISED_10baseT_Full);
+               else
+-                      cmd->lp_advertising |= (ADVERTISED_1000baseT_Half |
+-                                              ADVERTISED_100baseT_Half |
+-                                              ADVERTISED_10baseT_Half);
+-              cmd->port = PORT_OTHER;
++                      lp_advertising |= (ADVERTISED_1000baseT_Half |
++                                         ADVERTISED_100baseT_Half |
++                                         ADVERTISED_10baseT_Half);
++              cmd->base.port = PORT_OTHER;
++
++              ethtool_convert_legacy_u32_to_link_mode(
++                      cmd->link_modes.supported, supported);
++              ethtool_convert_legacy_u32_to_link_mode(
++                      cmd->link_modes.advertising, advertising);
++              ethtool_convert_legacy_u32_to_link_mode(
++                      cmd->link_modes.lp_advertising, lp_advertising);
+               return 0;
+       }
+@@ -353,16 +368,16 @@ static int stmmac_ethtool_getsettings(st
+               "link speed / duplex setting\n", dev->name);
+               return -EBUSY;
+       }
+-      cmd->transceiver = XCVR_INTERNAL;
+-      rc = phy_ethtool_gset(phy, cmd);
++      rc = phy_ethtool_ksettings_get(phy, cmd);
+       return rc;
+ }
+-static int stmmac_ethtool_setsettings(struct net_device *dev,
+-                                    struct ethtool_cmd *cmd)
++static int
++stmmac_ethtool_set_link_ksettings(struct net_device *dev,
++                                const struct ethtool_link_ksettings *cmd)
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
+-      struct phy_device *phy = priv->phydev;
++      struct phy_device *phy = dev->phydev;
+       int rc;
+       if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+@@ -370,7 +385,7 @@ static int stmmac_ethtool_setsettings(st
+               u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
+               /* Only support ANE */
+-              if (cmd->autoneg != AUTONEG_ENABLE)
++              if (cmd->base.autoneg != AUTONEG_ENABLE)
+                       return -EINVAL;
+               mask &= (ADVERTISED_1000baseT_Half |
+@@ -391,9 +406,7 @@ static int stmmac_ethtool_setsettings(st
+               return 0;
+       }
+-      spin_lock(&priv->lock);
+-      rc = phy_ethtool_sset(phy, cmd);
+-      spin_unlock(&priv->lock);
++      rc = phy_ethtool_ksettings_set(phy, cmd);
+       return rc;
+ }
+@@ -433,7 +446,7 @@ static void stmmac_ethtool_gregs(struct
+       memset(reg_space, 0x0, REG_SPACE_SIZE);
+-      if (!priv->plat->has_gmac) {
++      if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) {
+               /* MAC registers */
+               for (i = 0; i < 12; i++)
+                       reg_space[i] = readl(priv->ioaddr + (i * 4));
+@@ -471,12 +484,12 @@ stmmac_get_pauseparam(struct net_device
+               if (!adv_lp.pause)
+                       return;
+       } else {
+-              if (!(priv->phydev->supported & SUPPORTED_Pause) ||
+-                  !(priv->phydev->supported & SUPPORTED_Asym_Pause))
++              if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
++                  !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
+                       return;
+       }
+-      pause->autoneg = priv->phydev->autoneg;
++      pause->autoneg = netdev->phydev->autoneg;
+       if (priv->flow_ctrl & FLOW_RX)
+               pause->rx_pause = 1;
+@@ -490,7 +503,7 @@ stmmac_set_pauseparam(struct net_device
+                     struct ethtool_pauseparam *pause)
+ {
+       struct stmmac_priv *priv = netdev_priv(netdev);
+-      struct phy_device *phy = priv->phydev;
++      struct phy_device *phy = netdev->phydev;
+       int new_pause = FLOW_OFF;
+       if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
+@@ -550,7 +563,7 @@ static void stmmac_get_ethtool_stats(str
+                       }
+               }
+               if (priv->eee_enabled) {
+-                      int val = phy_get_eee_err(priv->phydev);
++                      int val = phy_get_eee_err(dev->phydev);
+                       if (val)
+                               priv->xstats.phy_eee_wakeup_error_n = val;
+               }
+@@ -669,7 +682,7 @@ static int stmmac_ethtool_op_get_eee(str
+       edata->eee_active = priv->eee_active;
+       edata->tx_lpi_timer = priv->tx_lpi_timer;
+-      return phy_ethtool_get_eee(priv->phydev, edata);
++      return phy_ethtool_get_eee(dev->phydev, edata);
+ }
+ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
+@@ -694,7 +707,7 @@ static int stmmac_ethtool_op_set_eee(str
+               priv->tx_lpi_timer = edata->tx_lpi_timer;
+       }
+-      return phy_ethtool_set_eee(priv->phydev, edata);
++      return phy_ethtool_set_eee(dev->phydev, edata);
+ }
+ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
+@@ -853,8 +866,6 @@ static int stmmac_set_tunable(struct net
+ static const struct ethtool_ops stmmac_ethtool_ops = {
+       .begin = stmmac_check_if_running,
+       .get_drvinfo = stmmac_ethtool_getdrvinfo,
+-      .get_settings = stmmac_ethtool_getsettings,
+-      .set_settings = stmmac_ethtool_setsettings,
+       .get_msglevel = stmmac_ethtool_getmsglevel,
+       .set_msglevel = stmmac_ethtool_setmsglevel,
+       .get_regs = stmmac_ethtool_gregs,
+@@ -874,6 +885,8 @@ static const struct ethtool_ops stmmac_e
+       .set_coalesce = stmmac_set_coalesce,
+       .get_tunable = stmmac_get_tunable,
+       .set_tunable = stmmac_set_tunable,
++      .get_link_ksettings = stmmac_ethtool_get_link_ksettings,
++      .set_link_ksettings = stmmac_ethtool_set_link_ksettings,
+ };
+ void stmmac_set_ethtool_ops(struct net_device *netdev)
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -105,8 +105,8 @@ module_param(eee_timer, int, S_IRUGO | S
+ MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
+ #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
+-/* By default the driver will use the ring mode to manage tx and rx descriptors
+- * but passing this value so user can force to use the chain instead of the ring
++/* By default the driver will use the ring mode to manage tx and rx descriptors,
++ * but allow user to force to use the chain instead of the ring
+  */
+ static unsigned int chain_mode;
+ module_param(chain_mode, int, S_IRUGO);
+@@ -221,7 +221,8 @@ static inline u32 stmmac_rx_dirty(struct
+  */
+ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
+ {
+-      struct phy_device *phydev = priv->phydev;
++      struct net_device *ndev = priv->dev;
++      struct phy_device *phydev = ndev->phydev;
+       if (likely(priv->plat->fix_mac_speed))
+               priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
+@@ -279,6 +280,7 @@ static void stmmac_eee_ctrl_timer(unsign
+  */
+ bool stmmac_eee_init(struct stmmac_priv *priv)
+ {
++      struct net_device *ndev = priv->dev;
+       unsigned long flags;
+       bool ret = false;
+@@ -295,7 +297,7 @@ bool stmmac_eee_init(struct stmmac_priv
+               int tx_lpi_timer = priv->tx_lpi_timer;
+               /* Check if the PHY supports EEE */
+-              if (phy_init_eee(priv->phydev, 1)) {
++              if (phy_init_eee(ndev->phydev, 1)) {
+                       /* To manage at run-time if the EEE cannot be supported
+                        * anymore (for example because the lp caps have been
+                        * changed).
+@@ -303,7 +305,7 @@ bool stmmac_eee_init(struct stmmac_priv
+                        */
+                       spin_lock_irqsave(&priv->lock, flags);
+                       if (priv->eee_active) {
+-                              pr_debug("stmmac: disable EEE\n");
++                              netdev_dbg(priv->dev, "disable EEE\n");
+                               del_timer_sync(&priv->eee_ctrl_timer);
+                               priv->hw->mac->set_eee_timer(priv->hw, 0,
+                                                            tx_lpi_timer);
+@@ -327,12 +329,12 @@ bool stmmac_eee_init(struct stmmac_priv
+                                                    tx_lpi_timer);
+               }
+               /* Set HW EEE according to the speed */
+-              priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
++              priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
+               ret = true;
+               spin_unlock_irqrestore(&priv->lock, flags);
+-              pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
++              netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+       }
+ out:
+       return ret;
+@@ -450,8 +452,8 @@ static int stmmac_hwtstamp_ioctl(struct
+                          sizeof(struct hwtstamp_config)))
+               return -EFAULT;
+-      pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+-               __func__, config.flags, config.tx_type, config.rx_filter);
++      netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
++                 __func__, config.flags, config.tx_type, config.rx_filter);
+       /* reserved for future extensions */
+       if (config.flags)
+@@ -697,7 +699,7 @@ static void stmmac_release_ptp(struct st
+ static void stmmac_adjust_link(struct net_device *dev)
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
+-      struct phy_device *phydev = priv->phydev;
++      struct phy_device *phydev = dev->phydev;
+       unsigned long flags;
+       int new_state = 0;
+       unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
+@@ -750,9 +752,9 @@ static void stmmac_adjust_link(struct ne
+                               stmmac_hw_fix_mac_speed(priv);
+                               break;
+                       default:
+-                              if (netif_msg_link(priv))
+-                                      pr_warn("%s: Speed (%d) not 10/100\n",
+-                                              dev->name, phydev->speed);
++                              netif_warn(priv, link, priv->dev,
++                                         "Speed (%d) not 10/100\n",
++                                         phydev->speed);
+                               break;
+                       }
+@@ -805,10 +807,10 @@ static void stmmac_check_pcs_mode(struct
+                   (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+                   (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+                   (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+-                      pr_debug("STMMAC: PCS RGMII support enable\n");
++                      netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
+                       priv->hw->pcs = STMMAC_PCS_RGMII;
+               } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+-                      pr_debug("STMMAC: PCS SGMII support enable\n");
++                      netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
+                       priv->hw->pcs = STMMAC_PCS_SGMII;
+               }
+       }
+@@ -843,15 +845,15 @@ static int stmmac_init_phy(struct net_de
+               snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+                        priv->plat->phy_addr);
+-              pr_debug("stmmac_init_phy:  trying to attach to %s\n",
+-                       phy_id_fmt);
++              netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
++                         phy_id_fmt);
+               phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
+                                    interface);
+       }
+       if (IS_ERR_OR_NULL(phydev)) {
+-              pr_err("%s: Could not attach to PHY\n", dev->name);
++              netdev_err(priv->dev, "Could not attach to PHY\n");
+               if (!phydev)
+                       return -ENODEV;
+@@ -884,10 +886,8 @@ static int stmmac_init_phy(struct net_de
+       if (phydev->is_pseudo_fixed_link)
+               phydev->irq = PHY_POLL;
+-      pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
+-               " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
+-
+-      priv->phydev = phydev;
++      netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
++                 __func__, phydev->phy_id, phydev->link);
+       return 0;
+ }
+@@ -973,7 +973,8 @@ static int stmmac_init_rx_buffers(struct
+       skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
+       if (!skb) {
+-              pr_err("%s: Rx init fails; skb is NULL\n", __func__);
++              netdev_err(priv->dev,
++                         "%s: Rx init fails; skb is NULL\n", __func__);
+               return -ENOMEM;
+       }
+       priv->rx_skbuff[i] = skb;
+@@ -981,15 +982,15 @@ static int stmmac_init_rx_buffers(struct
+                                               priv->dma_buf_sz,
+                                               DMA_FROM_DEVICE);
+       if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
+-              pr_err("%s: DMA mapping error\n", __func__);
++              netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+       if (priv->synopsys_id >= DWMAC_CORE_4_00)
+-              p->des0 = priv->rx_skbuff_dma[i];
++              p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
+       else
+-              p->des2 = priv->rx_skbuff_dma[i];
++              p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
+       if ((priv->hw->mode->init_desc3) &&
+           (priv->dma_buf_sz == BUF_SIZE_16KiB))
+@@ -1031,13 +1032,14 @@ static int init_dma_desc_rings(struct ne
+       priv->dma_buf_sz = bfsize;
+-      if (netif_msg_probe(priv)) {
+-              pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
+-                       (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
++      netif_dbg(priv, probe, priv->dev,
++                "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
++                __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
++
++      /* RX INITIALIZATION */
++      netif_dbg(priv, probe, priv->dev,
++                "SKB addresses:\nskb\t\tskb data\tdma data\n");
+-              /* RX INITIALIZATION */
+-              pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
+-      }
+       for (i = 0; i < DMA_RX_SIZE; i++) {
+               struct dma_desc *p;
+               if (priv->extend_desc)
+@@ -1049,10 +1051,9 @@ static int init_dma_desc_rings(struct ne
+               if (ret)
+                       goto err_init_rx_buffers;
+-              if (netif_msg_probe(priv))
+-                      pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
+-                               priv->rx_skbuff[i]->data,
+-                               (unsigned int)priv->rx_skbuff_dma[i]);
++              netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
++                        priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
++                        (unsigned int)priv->rx_skbuff_dma[i]);
+       }
+       priv->cur_rx = 0;
+       priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+@@ -1307,7 +1308,7 @@ static void stmmac_tx_clean(struct stmma
+       unsigned int bytes_compl = 0, pkts_compl = 0;
+       unsigned int entry = priv->dirty_tx;
+-      spin_lock(&priv->tx_lock);
++      netif_tx_lock(priv->dev);
+       priv->xstats.tx_clean++;
+@@ -1378,22 +1379,17 @@ static void stmmac_tx_clean(struct stmma
+       netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
+       if (unlikely(netif_queue_stopped(priv->dev) &&
+-                   stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
+-              netif_tx_lock(priv->dev);
+-              if (netif_queue_stopped(priv->dev) &&
+-                  stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
+-                      if (netif_msg_tx_done(priv))
+-                              pr_debug("%s: restart transmit\n", __func__);
+-                      netif_wake_queue(priv->dev);
+-              }
+-              netif_tx_unlock(priv->dev);
++          stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
++              netif_dbg(priv, tx_done, priv->dev,
++                        "%s: restart transmit\n", __func__);
++              netif_wake_queue(priv->dev);
+       }
+       if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+               stmmac_enable_eee_mode(priv);
+               mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
+       }
+-      spin_unlock(&priv->tx_lock);
++      netif_tx_unlock(priv->dev);
+ }
+ static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
+@@ -1497,7 +1493,7 @@ static void stmmac_mmc_setup(struct stmm
+               dwmac_mmc_ctrl(priv->mmcaddr, mode);
+               memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+       } else
+-              pr_info(" No MAC Management Counters available\n");
++              netdev_info(priv->dev, "No MAC Management Counters available\n");
+ }
+ /**
+@@ -1510,18 +1506,18 @@ static void stmmac_mmc_setup(struct stmm
+ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
+ {
+       if (priv->plat->enh_desc) {
+-              pr_info(" Enhanced/Alternate descriptors\n");
++              dev_info(priv->device, "Enhanced/Alternate descriptors\n");
+               /* GMAC older than 3.50 has no extended descriptors */
+               if (priv->synopsys_id >= DWMAC_CORE_3_50) {
+-                      pr_info("\tEnabled extended descriptors\n");
++                      dev_info(priv->device, "Enabled extended descriptors\n");
+                       priv->extend_desc = 1;
+               } else
+-                      pr_warn("Extended descriptors not supported\n");
++                      dev_warn(priv->device, "Extended descriptors not supported\n");
+               priv->hw->desc = &enh_desc_ops;
+       } else {
+-              pr_info(" Normal descriptors\n");
++              dev_info(priv->device, "Normal descriptors\n");
+               priv->hw->desc = &ndesc_ops;
+       }
+ }
+@@ -1562,8 +1558,8 @@ static void stmmac_check_ether_addr(stru
+                                            priv->dev->dev_addr, 0);
+               if (!is_valid_ether_addr(priv->dev->dev_addr))
+                       eth_hw_addr_random(priv->dev);
+-              pr_info("%s: device MAC address %pM\n", priv->dev->name,
+-                      priv->dev->dev_addr);
++              netdev_info(priv->dev, "device MAC address %pM\n",
++                          priv->dev->dev_addr);
+       }
+ }
+@@ -1577,16 +1573,12 @@ static void stmmac_check_ether_addr(stru
+  */
+ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+ {
+-      int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0;
+-      int mixed_burst = 0;
+       int atds = 0;
+       int ret = 0;
+-      if (priv->plat->dma_cfg) {
+-              pbl = priv->plat->dma_cfg->pbl;
+-              fixed_burst = priv->plat->dma_cfg->fixed_burst;
+-              mixed_burst = priv->plat->dma_cfg->mixed_burst;
+-              aal = priv->plat->dma_cfg->aal;
++      if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
++              dev_err(priv->device, "Invalid DMA configuration\n");
++              return -EINVAL;
+       }
+       if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
+@@ -1598,8 +1590,8 @@ static int stmmac_init_dma_engine(struct
+               return ret;
+       }
+-      priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
+-                          aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
++      priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
++                          priv->dma_tx_phy, priv->dma_rx_phy, atds);
+       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+               priv->rx_tail_addr = priv->dma_rx_phy +
+@@ -1671,7 +1663,8 @@ static int stmmac_hw_setup(struct net_de
+       /* DMA initialization and SW reset */
+       ret = stmmac_init_dma_engine(priv);
+       if (ret < 0) {
+-              pr_err("%s: DMA engine initialization failed\n", __func__);
++              netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
++                         __func__);
+               return ret;
+       }
+@@ -1700,7 +1693,7 @@ static int stmmac_hw_setup(struct net_de
+       ret = priv->hw->mac->rx_ipc(priv->hw);
+       if (!ret) {
+-              pr_warn(" RX IPC Checksum Offload disabled\n");
++              netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
+               priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+               priv->hw->rx_csum = 0;
+       }
+@@ -1725,10 +1718,11 @@ static int stmmac_hw_setup(struct net_de
+ #ifdef CONFIG_DEBUG_FS
+       ret = stmmac_init_fs(dev);
+       if (ret < 0)
+-              pr_warn("%s: failed debugFS registration\n", __func__);
++              netdev_warn(priv->dev, "%s: failed debugFS registration\n",
++                          __func__);
+ #endif
+       /* Start the ball rolling... */
+-      pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
++      netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
+       priv->hw->dma->start_tx(priv->ioaddr);
+       priv->hw->dma->start_rx(priv->ioaddr);
+@@ -1783,8 +1777,9 @@ static int stmmac_open(struct net_device
+           priv->hw->pcs != STMMAC_PCS_RTBI) {
+               ret = stmmac_init_phy(dev);
+               if (ret) {
+-                      pr_err("%s: Cannot attach to PHY (error: %d)\n",
+-                             __func__, ret);
++                      netdev_err(priv->dev,
++                                 "%s: Cannot attach to PHY (error: %d)\n",
++                                 __func__, ret);
+                       return ret;
+               }
+       }
+@@ -1798,33 +1793,36 @@ static int stmmac_open(struct net_device
+       ret = alloc_dma_desc_resources(priv);
+       if (ret < 0) {
+-              pr_err("%s: DMA descriptors allocation failed\n", __func__);
++              netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
++                         __func__);
+               goto dma_desc_error;
+       }
+       ret = init_dma_desc_rings(dev, GFP_KERNEL);
+       if (ret < 0) {
+-              pr_err("%s: DMA descriptors initialization failed\n", __func__);
++              netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
++                         __func__);
+               goto init_error;
+       }
+       ret = stmmac_hw_setup(dev, true);
+       if (ret < 0) {
+-              pr_err("%s: Hw setup failed\n", __func__);
++              netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+               goto init_error;
+       }
+       stmmac_init_tx_coalesce(priv);
+-      if (priv->phydev)
+-              phy_start(priv->phydev);
++      if (dev->phydev)
++              phy_start(dev->phydev);
+       /* Request the IRQ lines */
+       ret = request_irq(dev->irq, stmmac_interrupt,
+                         IRQF_SHARED, dev->name, dev);
+       if (unlikely(ret < 0)) {
+-              pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+-                     __func__, dev->irq, ret);
++              netdev_err(priv->dev,
++                         "%s: ERROR: allocating the IRQ %d (error: %d)\n",
++                         __func__, dev->irq, ret);
+               goto init_error;
+       }
+@@ -1833,8 +1831,9 @@ static int stmmac_open(struct net_device
+               ret = request_irq(priv->wol_irq, stmmac_interrupt,
+                                 IRQF_SHARED, dev->name, dev);
+               if (unlikely(ret < 0)) {
+-                      pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
+-                             __func__, priv->wol_irq, ret);
++                      netdev_err(priv->dev,
++                                 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
++                                 __func__, priv->wol_irq, ret);
+                       goto wolirq_error;
+               }
+       }
+@@ -1844,8 +1843,9 @@ static int stmmac_open(struct net_device
+               ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
+                                 dev->name, dev);
+               if (unlikely(ret < 0)) {
+-                      pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+-                             __func__, priv->lpi_irq, ret);
++                      netdev_err(priv->dev,
++                                 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
++                                 __func__, priv->lpi_irq, ret);
+                       goto lpiirq_error;
+               }
+       }
+@@ -1864,8 +1864,8 @@ wolirq_error:
+ init_error:
+       free_dma_desc_resources(priv);
+ dma_desc_error:
+-      if (priv->phydev)
+-              phy_disconnect(priv->phydev);
++      if (dev->phydev)
++              phy_disconnect(dev->phydev);
+       return ret;
+ }
+@@ -1884,10 +1884,9 @@ static int stmmac_release(struct net_dev
+               del_timer_sync(&priv->eee_ctrl_timer);
+       /* Stop and disconnect the PHY */
+-      if (priv->phydev) {
+-              phy_stop(priv->phydev);
+-              phy_disconnect(priv->phydev);
+-              priv->phydev = NULL;
++      if (dev->phydev) {
++              phy_stop(dev->phydev);
++              phy_disconnect(dev->phydev);
+       }
+       netif_stop_queue(dev);
+@@ -1947,13 +1946,13 @@ static void stmmac_tso_allocator(struct
+               priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+               desc = priv->dma_tx + priv->cur_tx;
+-              desc->des0 = des + (total_len - tmp_len);
++              desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
+               buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+                           TSO_MAX_BUFF_SIZE : tmp_len;
+               priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
+                       0, 1,
+-                      (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
++                      (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+                       0, 0);
+               tmp_len -= TSO_MAX_BUFF_SIZE;
+@@ -1998,8 +1997,6 @@ static netdev_tx_t stmmac_tso_xmit(struc
+       u8 proto_hdr_len;
+       int i;
+-      spin_lock(&priv->tx_lock);
+-
+       /* Compute header lengths */
+       proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+@@ -2009,9 +2006,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
+                       /* This is a hard error, log it. */
+-                      pr_err("%s: Tx Ring full when queue awake\n", __func__);
++                      netdev_err(priv->dev,
++                                 "%s: Tx Ring full when queue awake\n",
++                                 __func__);
+               }
+-              spin_unlock(&priv->tx_lock);
+               return NETDEV_TX_BUSY;
+       }
+@@ -2049,11 +2047,11 @@ static netdev_tx_t stmmac_tso_xmit(struc
+       priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+       priv->tx_skbuff[first_entry] = skb;
+-      first->des0 = des;
++      first->des0 = cpu_to_le32(des);
+       /* Fill start of payload in buff2 of first descriptor */
+       if (pay_len)
+-              first->des1 =  des + proto_hdr_len;
++              first->des1 = cpu_to_le32(des + proto_hdr_len);
+       /* If needed take extra descriptors to fill the remaining payload */
+       tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+@@ -2082,8 +2080,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
+       priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+       if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+-              if (netif_msg_hw(priv))
+-                      pr_debug("%s: stop transmitted packets\n", __func__);
++              netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
++                        __func__);
+               netif_stop_queue(dev);
+       }
+@@ -2127,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+        * descriptor and then barrier is needed to make sure that
+        * all is coherent before granting the DMA engine.
+        */
+-      smp_wmb();
++      dma_wmb();
+       if (netif_msg_pktdata(priv)) {
+               pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+@@ -2146,11 +2144,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
+       priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+                                      STMMAC_CHAN0);
+-      spin_unlock(&priv->tx_lock);
+       return NETDEV_TX_OK;
+ dma_map_err:
+-      spin_unlock(&priv->tx_lock);
+       dev_err(priv->device, "Tx dma map failed\n");
+       dev_kfree_skb(skb);
+       priv->dev->stats.tx_dropped++;
+@@ -2182,14 +2178,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+                       return stmmac_tso_xmit(skb, dev);
+       }
+-      spin_lock(&priv->tx_lock);
+-
+       if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+-              spin_unlock(&priv->tx_lock);
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
+                       /* This is a hard error, log it. */
+-                      pr_err("%s: Tx Ring full when queue awake\n", __func__);
++                      netdev_err(priv->dev,
++                                 "%s: Tx Ring full when queue awake\n",
++                                 __func__);
+               }
+               return NETDEV_TX_BUSY;
+       }
+@@ -2242,13 +2237,11 @@ static netdev_tx_t stmmac_xmit(struct sk
+               priv->tx_skbuff[entry] = NULL;
+-              if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+-                      desc->des0 = des;
+-                      priv->tx_skbuff_dma[entry].buf = desc->des0;
+-              } else {
+-                      desc->des2 = des;
+-                      priv->tx_skbuff_dma[entry].buf = desc->des2;
+-              }
++              priv->tx_skbuff_dma[entry].buf = des;
++              if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
++                      desc->des0 = cpu_to_le32(des);
++              else
++                      desc->des2 = cpu_to_le32(des);
+               priv->tx_skbuff_dma[entry].map_as_page = true;
+               priv->tx_skbuff_dma[entry].len = len;
+@@ -2266,9 +2259,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+       if (netif_msg_pktdata(priv)) {
+               void *tx_head;
+-              pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
+-                       __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+-                       entry, first, nfrags);
++              netdev_dbg(priv->dev,
++                         "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
++                         __func__, priv->cur_tx, priv->dirty_tx, first_entry,
++                         entry, first, nfrags);
+               if (priv->extend_desc)
+                       tx_head = (void *)priv->dma_etx;
+@@ -2277,13 +2271,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+               priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
+-              pr_debug(">>> frame to be transmitted: ");
++              netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
+               print_pkt(skb->data, skb->len);
+       }
+       if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+-              if (netif_msg_hw(priv))
+-                      pr_debug("%s: stop transmitted packets\n", __func__);
++              netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
++                        __func__);
+               netif_stop_queue(dev);
+       }
+@@ -2319,13 +2313,11 @@ static netdev_tx_t stmmac_xmit(struct sk
+               if (dma_mapping_error(priv->device, des))
+                       goto dma_map_err;
+-              if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+-                      first->des0 = des;
+-                      priv->tx_skbuff_dma[first_entry].buf = first->des0;
+-              } else {
+-                      first->des2 = des;
+-                      priv->tx_skbuff_dma[first_entry].buf = first->des2;
+-              }
++              priv->tx_skbuff_dma[first_entry].buf = des;
++              if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
++                      first->des0 = cpu_to_le32(des);
++              else
++                      first->des2 = cpu_to_le32(des);
+               priv->tx_skbuff_dma[first_entry].len = nopaged_len;
+               priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
+@@ -2346,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+                * descriptor and then barrier is needed to make sure that
+                * all is coherent before granting the DMA engine.
+                */
+-              smp_wmb();
++              dma_wmb();
+       }
+       netdev_sent_queue(dev, skb->len);
+@@ -2357,12 +2349,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+               priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+                                              STMMAC_CHAN0);
+-      spin_unlock(&priv->tx_lock);
+       return NETDEV_TX_OK;
+ dma_map_err:
+-      spin_unlock(&priv->tx_lock);
+-      dev_err(priv->device, "Tx dma map failed\n");
++      netdev_err(priv->dev, "Tx DMA map failed\n");
+       dev_kfree_skb(skb);
+       priv->dev->stats.tx_dropped++;
+       return NETDEV_TX_OK;
+@@ -2433,16 +2423,16 @@ static inline void stmmac_rx_refill(stru
+                                          DMA_FROM_DEVICE);
+                       if (dma_mapping_error(priv->device,
+                                             priv->rx_skbuff_dma[entry])) {
+-                              dev_err(priv->device, "Rx dma map failed\n");
++                              netdev_err(priv->dev, "Rx DMA map failed\n");
+                               dev_kfree_skb(skb);
+                               break;
+                       }
+                       if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+-                              p->des0 = priv->rx_skbuff_dma[entry];
++                              p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
+                               p->des1 = 0;
+                       } else {
+-                              p->des2 = priv->rx_skbuff_dma[entry];
++                              p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
+                       }
+                       if (priv->hw->mode->refill_desc3)
+                               priv->hw->mode->refill_desc3(priv, p);
+@@ -2450,17 +2440,17 @@ static inline void stmmac_rx_refill(stru
+                       if (priv->rx_zeroc_thresh > 0)
+                               priv->rx_zeroc_thresh--;
+-                      if (netif_msg_rx_status(priv))
+-                              pr_debug("\trefill entry #%d\n", entry);
++                      netif_dbg(priv, rx_status, priv->dev,
++                                "refill entry #%d\n", entry);
+               }
+-              wmb();
++              dma_wmb();
+               if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+                       priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
+               else
+                       priv->hw->desc->set_rx_owner(p);
+-              wmb();
++              dma_wmb();
+               entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
+       }
+@@ -2484,7 +2474,7 @@ static int stmmac_rx(struct stmmac_priv
+       if (netif_msg_rx_status(priv)) {
+               void *rx_head;
+-              pr_info(">>>>>> %s: descriptor ring:\n", __func__);
++              netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+               if (priv->extend_desc)
+                       rx_head = (void *)priv->dma_erx;
+               else
+@@ -2546,9 +2536,9 @@ static int stmmac_rx(struct stmmac_priv
+                       unsigned int des;
+                       if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+-                              des = p->des0;
++                              des = le32_to_cpu(p->des0);
+                       else
+-                              des = p->des2;
++                              des = le32_to_cpu(p->des2);
+                       frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
+@@ -2557,9 +2547,9 @@ static int stmmac_rx(struct stmmac_priv
+                        *  ignored
+                        */
+                       if (frame_len > priv->dma_buf_sz) {
+-                              pr_err("%s: len %d larger than size (%d)\n",
+-                                     priv->dev->name, frame_len,
+-                                     priv->dma_buf_sz);
++                              netdev_err(priv->dev,
++                                         "len %d larger than size (%d)\n",
++                                         frame_len, priv->dma_buf_sz);
+                               priv->dev->stats.rx_length_errors++;
+                               break;
+                       }
+@@ -2571,11 +2561,11 @@ static int stmmac_rx(struct stmmac_priv
+                               frame_len -= ETH_FCS_LEN;
+                       if (netif_msg_rx_status(priv)) {
+-                              pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
+-                                      p, entry, des);
++                              netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
++                                         p, entry, des);
+                               if (frame_len > ETH_FRAME_LEN)
+-                                      pr_debug("\tframe size %d, COE: %d\n",
+-                                               frame_len, status);
++                                      netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
++                                                 frame_len, status);
+                       }
+                       /* The zero-copy is always used for all the sizes
+@@ -2612,8 +2602,9 @@ static int stmmac_rx(struct stmmac_priv
+                       } else {
+                               skb = priv->rx_skbuff[entry];
+                               if (unlikely(!skb)) {
+-                                      pr_err("%s: Inconsistent Rx chain\n",
+-                                             priv->dev->name);
++                                      netdev_err(priv->dev,
++                                                 "%s: Inconsistent Rx chain\n",
++                                                 priv->dev->name);
+                                       priv->dev->stats.rx_dropped++;
+                                       break;
+                               }
+@@ -2629,7 +2620,8 @@ static int stmmac_rx(struct stmmac_priv
+                       }
+                       if (netif_msg_pktdata(priv)) {
+-                              pr_debug("frame received (%dbytes)", frame_len);
++                              netdev_dbg(priv->dev, "frame received (%dbytes)",
++                                         frame_len);
+                               print_pkt(skb->data, frame_len);
+                       }
+@@ -2732,7 +2724,7 @@ static int stmmac_change_mtu(struct net_
+       int max_mtu;
+       if (netif_running(dev)) {
+-              pr_err("%s: must be stopped to change its MTU\n", dev->name);
++              netdev_err(priv->dev, "must be stopped to change its MTU\n");
+               return -EBUSY;
+       }
+@@ -2824,7 +2816,7 @@ static irqreturn_t stmmac_interrupt(int
+               pm_wakeup_event(priv->device, 0);
+       if (unlikely(!dev)) {
+-              pr_err("%s: invalid dev pointer\n", __func__);
++              netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+               return IRQ_NONE;
+       }
+@@ -2882,7 +2874,6 @@ static void stmmac_poll_controller(struc
+  */
+ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ {
+-      struct stmmac_priv *priv = netdev_priv(dev);
+       int ret = -EOPNOTSUPP;
+       if (!netif_running(dev))
+@@ -2892,9 +2883,9 @@ static int stmmac_ioctl(struct net_devic
+       case SIOCGMIIPHY:
+       case SIOCGMIIREG:
+       case SIOCSMIIREG:
+-              if (!priv->phydev)
++              if (!dev->phydev)
+                       return -EINVAL;
+-              ret = phy_mii_ioctl(priv->phydev, rq, cmd);
++              ret = phy_mii_ioctl(dev->phydev, rq, cmd);
+               break;
+       case SIOCSHWTSTAMP:
+               ret = stmmac_hwtstamp_ioctl(dev, rq);
+@@ -2922,14 +2913,17 @@ static void sysfs_display_ring(void *hea
+                       x = *(u64 *) ep;
+                       seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+                                  i, (unsigned int)virt_to_phys(ep),
+-                                 ep->basic.des0, ep->basic.des1,
+-                                 ep->basic.des2, ep->basic.des3);
++                                 le32_to_cpu(ep->basic.des0),
++                                 le32_to_cpu(ep->basic.des1),
++                                 le32_to_cpu(ep->basic.des2),
++                                 le32_to_cpu(ep->basic.des3));
+                       ep++;
+               } else {
+                       x = *(u64 *) p;
+                       seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+                                  i, (unsigned int)virt_to_phys(ep),
+-                                 p->des0, p->des1, p->des2, p->des3);
++                                 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
++                                 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+                       p++;
+               }
+               seq_printf(seq, "\n");
+@@ -2961,6 +2955,8 @@ static int stmmac_sysfs_ring_open(struct
+       return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
+ }
++/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
++
+ static const struct file_operations stmmac_rings_status_fops = {
+       .owner = THIS_MODULE,
+       .open = stmmac_sysfs_ring_open,
+@@ -2983,11 +2979,11 @@ static int stmmac_sysfs_dma_cap_read(str
+       seq_printf(seq, "\tDMA HW features\n");
+       seq_printf(seq, "==============================\n");
+-      seq_printf(seq, "\t10/100 Mbps %s\n",
++      seq_printf(seq, "\t10/100 Mbps: %s\n",
+                  (priv->dma_cap.mbps_10_100) ? "Y" : "N");
+-      seq_printf(seq, "\t1000 Mbps %s\n",
++      seq_printf(seq, "\t1000 Mbps: %s\n",
+                  (priv->dma_cap.mbps_1000) ? "Y" : "N");
+-      seq_printf(seq, "\tHalf duple %s\n",
++      seq_printf(seq, "\tHalf duplex: %s\n",
+                  (priv->dma_cap.half_duplex) ? "Y" : "N");
+       seq_printf(seq, "\tHash Filter: %s\n",
+                  (priv->dma_cap.hash_filter) ? "Y" : "N");
+@@ -3005,9 +3001,9 @@ static int stmmac_sysfs_dma_cap_read(str
+                  (priv->dma_cap.rmon) ? "Y" : "N");
+       seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
+                  (priv->dma_cap.time_stamp) ? "Y" : "N");
+-      seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
++      seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
+                  (priv->dma_cap.atime_stamp) ? "Y" : "N");
+-      seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
++      seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
+                  (priv->dma_cap.eee) ? "Y" : "N");
+       seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
+       seq_printf(seq, "\tChecksum Offload in TX: %s\n",
+@@ -3054,8 +3050,7 @@ static int stmmac_init_fs(struct net_dev
+       priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
+       if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
+-              pr_err("ERROR %s/%s, debugfs create directory failed\n",
+-                     STMMAC_RESOURCE_NAME, dev->name);
++              netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
+               return -ENOMEM;
+       }
+@@ -3067,7 +3062,7 @@ static int stmmac_init_fs(struct net_dev
+                                   &stmmac_rings_status_fops);
+       if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
+-              pr_info("ERROR creating stmmac ring debugfs file\n");
++              netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
+               debugfs_remove_recursive(priv->dbgfs_dir);
+               return -ENOMEM;
+@@ -3079,7 +3074,7 @@ static int stmmac_init_fs(struct net_dev
+                                           dev, &stmmac_dma_cap_fops);
+       if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
+-              pr_info("ERROR creating stmmac MMC debugfs file\n");
++              netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
+               debugfs_remove_recursive(priv->dbgfs_dir);
+               return -ENOMEM;
+@@ -3151,11 +3146,11 @@ static int stmmac_hw_init(struct stmmac_
+       } else {
+               if (chain_mode) {
+                       priv->hw->mode = &chain_mode_ops;
+-                      pr_info(" Chain mode enabled\n");
++                      dev_info(priv->device, "Chain mode enabled\n");
+                       priv->mode = STMMAC_CHAIN_MODE;
+               } else {
+                       priv->hw->mode = &ring_mode_ops;
+-                      pr_info(" Ring mode enabled\n");
++                      dev_info(priv->device, "Ring mode enabled\n");
+                       priv->mode = STMMAC_RING_MODE;
+               }
+       }
+@@ -3163,7 +3158,7 @@ static int stmmac_hw_init(struct stmmac_
+       /* Get the HW capability (new GMAC newer than 3.50a) */
+       priv->hw_cap_support = stmmac_get_hw_features(priv);
+       if (priv->hw_cap_support) {
+-              pr_info(" DMA HW capability register supported");
++              dev_info(priv->device, "DMA HW capability register supported\n");
+               /* We can override some gmac/dma configuration fields: e.g.
+                * enh_desc, tx_coe (e.g. that are passed through the
+@@ -3188,8 +3183,9 @@ static int stmmac_hw_init(struct stmmac_
+               else if (priv->dma_cap.rx_coe_type1)
+                       priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+-      } else
+-              pr_info(" No HW DMA feature register supported");
++      } else {
++              dev_info(priv->device, "No HW DMA feature register supported\n");
++      }
+       /* To use alternate (extended), normal or GMAC4 descriptor structures */
+       if (priv->synopsys_id >= DWMAC_CORE_4_00)
+@@ -3199,20 +3195,20 @@ static int stmmac_hw_init(struct stmmac_
+       if (priv->plat->rx_coe) {
+               priv->hw->rx_csum = priv->plat->rx_coe;
+-              pr_info(" RX Checksum Offload Engine supported\n");
++              dev_info(priv->device, "RX Checksum Offload Engine supported\n");
+               if (priv->synopsys_id < DWMAC_CORE_4_00)
+-                      pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
++                      dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
+       }
+       if (priv->plat->tx_coe)
+-              pr_info(" TX Checksum insertion supported\n");
++              dev_info(priv->device, "TX Checksum insertion supported\n");
+       if (priv->plat->pmt) {
+-              pr_info(" Wake-Up On Lan supported\n");
++              dev_info(priv->device, "Wake-Up On Lan supported\n");
+               device_set_wakeup_capable(priv->device, 1);
+       }
+       if (priv->dma_cap.tsoen)
+-              pr_info(" TSO supported\n");
++              dev_info(priv->device, "TSO supported\n");
+       return 0;
+ }
+@@ -3271,8 +3267,8 @@ int stmmac_dvr_probe(struct device *devi
+       priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
+       if (IS_ERR(priv->stmmac_clk)) {
+-              dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
+-                       __func__);
++              netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n",
++                          __func__);
+               /* If failed to obtain stmmac_clk and specific clk_csr value
+                * is NOT passed from the platform, probe fail.
+                */
+@@ -3321,7 +3317,7 @@ int stmmac_dvr_probe(struct device *devi
+       if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+               ndev->hw_features |= NETIF_F_TSO;
+               priv->tso = true;
+-              pr_info(" TSO feature enabled\n");
++              dev_info(priv->device, "TSO feature enabled\n");
+       }
+       ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+       ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+@@ -3341,13 +3337,13 @@ int stmmac_dvr_probe(struct device *devi
+        */
+       if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
+               priv->use_riwt = 1;
+-              pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
++              dev_info(priv->device,
++                       "Enable RX Mitigation via HW Watchdog Timer\n");
+       }
+       netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
+       spin_lock_init(&priv->lock);
+-      spin_lock_init(&priv->tx_lock);
+       /* If a specific clk_csr value is passed from the platform
+        * this means that the CSR Clock Range selection cannot be
+@@ -3368,15 +3364,17 @@ int stmmac_dvr_probe(struct device *devi
+               /* MDIO bus Registration */
+               ret = stmmac_mdio_register(ndev);
+               if (ret < 0) {
+-                      pr_debug("%s: MDIO bus (id: %d) registration failed",
+-                               __func__, priv->plat->bus_id);
+-                      goto error_napi_register;
++                      dev_err(priv->device,
++                              "%s: MDIO bus (id: %d) registration failed",
++                              __func__, priv->plat->bus_id);
++                      goto error_mdio_register;
+               }
+       }
+       ret = register_netdev(ndev);
+       if (ret) {
+-              pr_err("%s: ERROR %i registering the device\n", __func__, ret);
++              dev_err(priv->device, "%s: ERROR %i registering the device\n",
++                      __func__, ret);
+               goto error_netdev_register;
+       }
+@@ -3387,7 +3385,7 @@ error_netdev_register:
+           priv->hw->pcs != STMMAC_PCS_TBI &&
+           priv->hw->pcs != STMMAC_PCS_RTBI)
+               stmmac_mdio_unregister(ndev);
+-error_napi_register:
++error_mdio_register:
+       netif_napi_del(&priv->napi);
+ error_hw_init:
+       clk_disable_unprepare(priv->pclk);
+@@ -3411,7 +3409,7 @@ int stmmac_dvr_remove(struct device *dev
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+-      pr_info("%s:\n\tremoving driver", __func__);
++      netdev_info(priv->dev, "%s: removing driver", __func__);
+       priv->hw->dma->stop_rx(priv->ioaddr);
+       priv->hw->dma->stop_tx(priv->ioaddr);
+@@ -3449,8 +3447,8 @@ int stmmac_suspend(struct device *dev)
+       if (!ndev || !netif_running(ndev))
+               return 0;
+-      if (priv->phydev)
+-              phy_stop(priv->phydev);
++      if (ndev->phydev)
++              phy_stop(ndev->phydev);
+       spin_lock_irqsave(&priv->lock, flags);
+@@ -3544,8 +3542,8 @@ int stmmac_resume(struct device *dev)
+       spin_unlock_irqrestore(&priv->lock, flags);
+-      if (priv->phydev)
+-              phy_start(priv->phydev);
++      if (ndev->phydev)
++              phy_start(ndev->phydev);
+       return 0;
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+@@ -42,13 +42,6 @@
+ #define MII_GMAC4_WRITE                       (1 << MII_GMAC4_GOC_SHIFT)
+ #define MII_GMAC4_READ                        (3 << MII_GMAC4_GOC_SHIFT)
+-#define MII_PHY_ADDR_GMAC4_SHIFT      21
+-#define MII_PHY_ADDR_GMAC4_MASK               GENMASK(25, 21)
+-#define MII_PHY_REG_GMAC4_SHIFT               16
+-#define MII_PHY_REG_GMAC4_MASK                GENMASK(20, 16)
+-#define MII_CSR_CLK_GMAC4_SHIFT               8
+-#define MII_CSR_CLK_GMAC4_MASK                GENMASK(11, 8)
+-
+ static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
+ {
+       unsigned long curr;
+@@ -68,8 +61,8 @@ static int stmmac_mdio_busy_wait(void __
+ /**
+  * stmmac_mdio_read
+  * @bus: points to the mii_bus structure
+- * @phyaddr: MII addr reg bits 15-11
+- * @phyreg: MII addr reg bits 10-6
++ * @phyaddr: MII addr
++ * @phyreg: MII reg
+  * Description: it reads data from the MII register from within the phy device.
+  * For the 7111 GMAC, we must set the bit 0 in the MII address register while
+  * accessing the PHY registers.
+@@ -83,14 +76,20 @@ static int stmmac_mdio_read(struct mii_b
+       unsigned int mii_data = priv->hw->mii.data;
+       int data;
+-      u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
+-                      ((phyreg << 6) & (0x000007C0)));
+-      regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
++      u32 value = MII_BUSY;
++
++      value |= (phyaddr << priv->hw->mii.addr_shift)
++              & priv->hw->mii.addr_mask;
++      value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
++      value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
++              & priv->hw->mii.clk_csr_mask;
++      if (priv->plat->has_gmac4)
++              value |= MII_GMAC4_READ;
+       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+               return -EBUSY;
+-      writel(regValue, priv->ioaddr + mii_address);
++      writel(value, priv->ioaddr + mii_address);
+       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+               return -EBUSY;
+@@ -104,8 +103,8 @@ static int stmmac_mdio_read(struct mii_b
+ /**
+  * stmmac_mdio_write
+  * @bus: points to the mii_bus structure
+- * @phyaddr: MII addr reg bits 15-11
+- * @phyreg: MII addr reg bits 10-6
++ * @phyaddr: MII addr
++ * @phyreg: MII reg
+  * @phydata: phy data
+  * Description: it writes the data into the MII register from within the device.
+  */
+@@ -117,85 +116,18 @@ static int stmmac_mdio_write(struct mii_
+       unsigned int mii_address = priv->hw->mii.addr;
+       unsigned int mii_data = priv->hw->mii.data;
+-      u16 value =
+-          (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
+-          | MII_WRITE;
+-
+-      value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
+-
+-      /* Wait until any existing MII operation is complete */
+-      if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+-              return -EBUSY;
+-
+-      /* Set the MII address register to write */
+-      writel(phydata, priv->ioaddr + mii_data);
+-      writel(value, priv->ioaddr + mii_address);
+-
+-      /* Wait until any existing MII operation is complete */
+-      return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
+-}
+-
+-/**
+- * stmmac_mdio_read_gmac4
+- * @bus: points to the mii_bus structure
+- * @phyaddr: MII addr reg bits 25-21
+- * @phyreg: MII addr reg bits 20-16
+- * Description: it reads data from the MII register of GMAC4 from within
+- * the phy device.
+- */
+-static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg)
+-{
+-      struct net_device *ndev = bus->priv;
+-      struct stmmac_priv *priv = netdev_priv(ndev);
+-      unsigned int mii_address = priv->hw->mii.addr;
+-      unsigned int mii_data = priv->hw->mii.data;
+-      int data;
+-      u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+-                   (MII_PHY_ADDR_GMAC4_MASK)) |
+-                   ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+-                   (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ;
+-
+-      value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+-               << MII_CSR_CLK_GMAC4_SHIFT);
+-
+-      if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+-              return -EBUSY;
+-
+-      writel(value, priv->ioaddr + mii_address);
+-
+-      if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+-              return -EBUSY;
+-
+-      /* Read the data from the MII data register */
+-      data = (int)readl(priv->ioaddr + mii_data);
+-
+-      return data;
+-}
+-
+-/**
+- * stmmac_mdio_write_gmac4
+- * @bus: points to the mii_bus structure
+- * @phyaddr: MII addr reg bits 25-21
+- * @phyreg: MII addr reg bits 20-16
+- * @phydata: phy data
+- * Description: it writes the data into the MII register of GMAC4 from within
+- * the device.
+- */
+-static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
+-                                 u16 phydata)
+-{
+-      struct net_device *ndev = bus->priv;
+-      struct stmmac_priv *priv = netdev_priv(ndev);
+-      unsigned int mii_address = priv->hw->mii.addr;
+-      unsigned int mii_data = priv->hw->mii.data;
+-
+-      u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+-                   (MII_PHY_ADDR_GMAC4_MASK)) |
+-                   ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+-                   (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE;
++      u32 value = MII_BUSY;
+-      value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+-               << MII_CSR_CLK_GMAC4_SHIFT);
++      value |= (phyaddr << priv->hw->mii.addr_shift)
++              & priv->hw->mii.addr_mask;
++      value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
++
++      value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
++              & priv->hw->mii.clk_csr_mask;
++      if (priv->plat->has_gmac4)
++              value |= MII_GMAC4_WRITE;
++      else
++              value |= MII_WRITE;
+       /* Wait until any existing MII operation is complete */
+       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+@@ -260,7 +192,7 @@ int stmmac_mdio_reset(struct mii_bus *bu
+ #endif
+       if (data->phy_reset) {
+-              pr_debug("stmmac_mdio_reset: calling phy_reset\n");
++              netdev_dbg(ndev, "stmmac_mdio_reset: calling phy_reset\n");
+               data->phy_reset(priv->plat->bsp_priv);
+       }
+@@ -305,13 +237,8 @@ int stmmac_mdio_register(struct net_devi
+ #endif
+       new_bus->name = "stmmac";
+-      if (priv->plat->has_gmac4) {
+-              new_bus->read = &stmmac_mdio_read_gmac4;
+-              new_bus->write = &stmmac_mdio_write_gmac4;
+-      } else {
+-              new_bus->read = &stmmac_mdio_read;
+-              new_bus->write = &stmmac_mdio_write;
+-      }
++      new_bus->read = &stmmac_mdio_read;
++      new_bus->write = &stmmac_mdio_write;
+       new_bus->reset = &stmmac_mdio_reset;
+       snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+@@ -325,7 +252,7 @@ int stmmac_mdio_register(struct net_devi
+       else
+               err = mdiobus_register(new_bus);
+       if (err != 0) {
+-              pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
++              netdev_err(ndev, "Cannot register the MDIO bus\n");
+               goto bus_register_fail;
+       }
+@@ -372,16 +299,16 @@ int stmmac_mdio_register(struct net_devi
+                               irq_str = irq_num;
+                               break;
+                       }
+-                      pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n",
+-                              ndev->name, phydev->phy_id, addr,
+-                              irq_str, phydev_name(phydev),
+-                              act ? " active" : "");
++                      netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
++                                  phydev->phy_id, addr,
++                                  irq_str, phydev_name(phydev),
++                                  act ? " active" : "");
+                       found = 1;
+               }
+       }
+       if (!found && !mdio_node) {
+-              pr_warn("%s: No PHY found\n", ndev->name);
++              netdev_warn(ndev, "No PHY found\n");
+               mdiobus_unregister(new_bus);
+               mdiobus_free(new_bus);
+               return -ENODEV;
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+@@ -81,6 +81,7 @@ static void stmmac_default_data(struct p
+       plat->mdio_bus_data->phy_mask = 0;
+       plat->dma_cfg->pbl = 32;
++      plat->dma_cfg->pblx8 = true;
+       /* TODO: AXI */
+       /* Set default value for multicast hash bins */
+@@ -88,6 +89,9 @@ static void stmmac_default_data(struct p
+       /* Set default value for unicast filter entries */
+       plat->unicast_filter_entries = 1;
++
++      /* Set the maxmtu to a default of JUMBO_LEN */
++      plat->maxmtu = JUMBO_LEN;
+ }
+ static int quark_default_data(struct plat_stmmacenet_data *plat,
+@@ -115,6 +119,7 @@ static int quark_default_data(struct pla
+       plat->mdio_bus_data->phy_mask = 0;
+       plat->dma_cfg->pbl = 16;
++      plat->dma_cfg->pblx8 = true;
+       plat->dma_cfg->fixed_burst = 1;
+       /* AXI (TODO) */
+@@ -124,6 +129,9 @@ static int quark_default_data(struct pla
+       /* Set default value for unicast filter entries */
+       plat->unicast_filter_entries = 1;
++      /* Set the maxmtu to a default of JUMBO_LEN */
++      plat->maxmtu = JUMBO_LEN;
++
+       return 0;
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -292,6 +292,7 @@ stmmac_probe_config_dt(struct platform_d
+       if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
+           of_device_is_compatible(np, "snps,dwmac-4.10a")) {
+               plat->has_gmac4 = 1;
++              plat->has_gmac = 0;
+               plat->pmt = 1;
+               plat->tso_en = of_property_read_bool(np, "snps,tso");
+       }
+@@ -303,21 +304,25 @@ stmmac_probe_config_dt(struct platform_d
+               plat->force_sf_dma_mode = 1;
+       }
+-      if (of_find_property(np, "snps,pbl", NULL)) {
+-              dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
+-                                     GFP_KERNEL);
+-              if (!dma_cfg) {
+-                      stmmac_remove_config_dt(pdev, plat);
+-                      return ERR_PTR(-ENOMEM);
+-              }
+-              plat->dma_cfg = dma_cfg;
+-              of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
+-              dma_cfg->aal = of_property_read_bool(np, "snps,aal");
+-              dma_cfg->fixed_burst =
+-                      of_property_read_bool(np, "snps,fixed-burst");
+-              dma_cfg->mixed_burst =
+-                      of_property_read_bool(np, "snps,mixed-burst");
+-      }
++      dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
++                             GFP_KERNEL);
++      if (!dma_cfg) {
++              stmmac_remove_config_dt(pdev, plat);
++              return ERR_PTR(-ENOMEM);
++      }
++      plat->dma_cfg = dma_cfg;
++
++      of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
++      if (!dma_cfg->pbl)
++              dma_cfg->pbl = DEFAULT_DMA_PBL;
++      of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
++      of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
++      dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
++
++      dma_cfg->aal = of_property_read_bool(np, "snps,aal");
++      dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
++      dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
++
+       plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
+       if (plat->force_thresh_dma_mode) {
+               plat->force_sf_dma_mode = 0;
+@@ -445,9 +450,7 @@ static int stmmac_pltfr_suspend(struct d
+       struct platform_device *pdev = to_platform_device(dev);
+       ret = stmmac_suspend(dev);
+-      if (priv->plat->suspend)
+-              priv->plat->suspend(pdev, priv->plat->bsp_priv);
+-      else if (priv->plat->exit)
++      if (priv->plat->exit)
+               priv->plat->exit(pdev, priv->plat->bsp_priv);
+       return ret;
+@@ -466,9 +469,7 @@ static int stmmac_pltfr_resume(struct de
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       struct platform_device *pdev = to_platform_device(dev);
+-      if (priv->plat->resume)
+-              priv->plat->resume(pdev, priv->plat->bsp_priv);
+-      else if (priv->plat->init)
++      if (priv->plat->init)
+               priv->plat->init(pdev, priv->plat->bsp_priv);
+       return stmmac_resume(dev);
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -88,6 +88,9 @@ struct stmmac_mdio_bus_data {
+ struct stmmac_dma_cfg {
+       int pbl;
++      int txpbl;
++      int rxpbl;
++      bool pblx8;
+       int fixed_burst;
+       int mixed_burst;
+       bool aal;
+@@ -135,8 +138,6 @@ struct plat_stmmacenet_data {
+       void (*bus_setup)(void __iomem *ioaddr);
+       int (*init)(struct platform_device *pdev, void *priv);
+       void (*exit)(struct platform_device *pdev, void *priv);
+-      void (*suspend)(struct platform_device *pdev, void *priv);
+-      void (*resume)(struct platform_device *pdev, void *priv);
+       void *bsp_priv;
+       struct stmmac_axi *axi;
+       int has_gmac4;
diff --git a/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch b/target/linux/sunxi/patches-4.9/0051-stmmac-form-4-11.patch
new file mode 100644 (file)
index 0000000..f3b4262
--- /dev/null
@@ -0,0 +1,2305 @@
+--- a/Documentation/devicetree/bindings/net/stmmac.txt
++++ b/Documentation/devicetree/bindings/net/stmmac.txt
+@@ -49,6 +49,8 @@ Optional properties:
+ - snps,force_sf_dma_mode      Force DMA to use the Store and Forward
+                               mode for both tx and rx. This flag is
+                               ignored if force_thresh_dma_mode is set.
++- snps,en-tx-lpi-clockgating  Enable gating of the MAC TX clock during
++                              TX low-power mode
+ - snps,multicast-filter-bins: Number of multicast filter hash bins
+                               supported by this device instance
+ - snps,perfect-filter-entries:        Number of perfect filter entries supported
+@@ -65,7 +67,6 @@ Optional properties:
+       - snps,wr_osr_lmt: max write outstanding req. limit
+       - snps,rd_osr_lmt: max read outstanding req. limit
+       - snps,kbbe: do not cross 1KiB boundary.
+-      - snps,axi_all: align address
+       - snps,blen: this is a vector of supported burst length.
+       - snps,fb: fixed-burst
+       - snps,mb: mixed-burst
+--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+@@ -1,5 +1,5 @@
+ config STMMAC_ETH
+-      tristate "STMicroelectronics 10/100/1000 Ethernet driver"
++      tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver"
+       depends on HAS_IOMEM && HAS_DMA
+       select MII
+       select PHYLIB
+@@ -7,9 +7,8 @@ config STMMAC_ETH
+       select PTP_1588_CLOCK
+       select RESET_CONTROLLER
+       ---help---
+-        This is the driver for the Ethernet IPs are built around a
+-        Synopsys IP Core and only tested on the STMicroelectronics
+-        platforms.
++        This is the driver for the Ethernet IPs built around a
++        Synopsys IP Core.
+ if STMMAC_ETH
+@@ -29,6 +28,15 @@ config STMMAC_PLATFORM
+ if STMMAC_PLATFORM
++config DWMAC_DWC_QOS_ETH
++      tristate "Support for snps,dwc-qos-ethernet.txt DT binding."
++      select PHYLIB
++      select CRC32
++      select MII
++      depends on OF && HAS_DMA
++      help
++        Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
++
+ config DWMAC_GENERIC
+       tristate "Generic driver for DWMAC"
+       default STMMAC_PLATFORM
+@@ -143,11 +151,11 @@ config STMMAC_PCI
+       tristate "STMMAC PCI bus support"
+       depends on STMMAC_ETH && PCI
+       ---help---
+-        This is to select the Synopsys DWMAC available on PCI devices,
+-        if you have a controller with this interface, say Y or M here.
++        This selects the platform specific bus support for the stmmac driver.
++        This driver was tested on XLINX XC2V3000 FF1152AMT0221
++        D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit.
+-        This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
+-        D1215994A VIRTEX FPGA board.
++        If you have a controller with this interface, say Y or M here.
+         If unsure, say N.
+ endif
+--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
+@@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA)  += dwmac-alt
+ obj-$(CONFIG_DWMAC_STI)               += dwmac-sti.o
+ obj-$(CONFIG_DWMAC_STM32)     += dwmac-stm32.o
+ obj-$(CONFIG_DWMAC_SUNXI)     += dwmac-sunxi.o
++obj-$(CONFIG_DWMAC_DWC_QOS_ETH)       += dwmac-dwc-qos-eth.o
+ obj-$(CONFIG_DWMAC_GENERIC)   += dwmac-generic.o
+ stmmac-platform-objs:= stmmac_platform.o
+ dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
+--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+@@ -16,10 +16,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -71,7 +67,7 @@ struct stmmac_extra_stats {
+       unsigned long overflow_error;
+       unsigned long ipc_csum_error;
+       unsigned long rx_collision;
+-      unsigned long rx_crc;
++      unsigned long rx_crc_errors;
+       unsigned long dribbling_bit;
+       unsigned long rx_length;
+       unsigned long rx_mii;
+@@ -323,6 +319,9 @@ struct dma_features {
+       /* TX and RX number of channels */
+       unsigned int number_rx_channel;
+       unsigned int number_tx_channel;
++      /* TX and RX number of queues */
++      unsigned int number_rx_queues;
++      unsigned int number_tx_queues;
+       /* Alternate (enhanced) DESC mode */
+       unsigned int enh_desc;
+ };
+@@ -340,7 +339,7 @@ struct dma_features {
+ /* Common MAC defines */
+ #define MAC_CTRL_REG          0x00000000      /* MAC Control */
+ #define MAC_ENABLE_TX         0x00000008      /* Transmitter Enable */
+-#define MAC_RNABLE_RX         0x00000004      /* Receiver Enable */
++#define MAC_ENABLE_RX         0x00000004      /* Receiver Enable */
+ /* Default LPI timers */
+ #define STMMAC_DEFAULT_LIT_LS 0x3E8
+@@ -417,7 +416,7 @@ struct stmmac_dma_ops {
+       /* Configure the AXI Bus Mode Register */
+       void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
+       /* Dump DMA registers */
+-      void (*dump_regs) (void __iomem *ioaddr);
++      void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space);
+       /* Set tx/rx threshold in the csr6 register
+        * An invalid value enables the store-and-forward mode */
+       void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
+@@ -454,8 +453,10 @@ struct stmmac_ops {
+       void (*core_init)(struct mac_device_info *hw, int mtu);
+       /* Enable and verify that the IPC module is supported */
+       int (*rx_ipc)(struct mac_device_info *hw);
++      /* Enable RX Queues */
++      void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
+       /* Dump MAC registers */
+-      void (*dump_regs)(struct mac_device_info *hw);
++      void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
+       /* Handle extra events on specific interrupts hw dependent */
+       int (*host_irq_status)(struct mac_device_info *hw,
+                              struct stmmac_extra_stats *x);
+@@ -471,7 +472,8 @@ struct stmmac_ops {
+                             unsigned int reg_n);
+       void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
+                             unsigned int reg_n);
+-      void (*set_eee_mode)(struct mac_device_info *hw);
++      void (*set_eee_mode)(struct mac_device_info *hw,
++                           bool en_tx_lpi_clockgating);
+       void (*reset_eee_mode)(struct mac_device_info *hw);
+       void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
+       void (*set_eee_pls)(struct mac_device_info *hw, int link);
+--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
+@@ -11,10 +11,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
++++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+@@ -17,10 +17,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- /dev/null
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+@@ -0,0 +1,202 @@
++/*
++ * Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
++ *
++ * Copyright (C) 2016 Joao Pinto <jpinto@synopsys.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/device.h>
++#include <linux/ethtool.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <linux/module.h>
++#include <linux/of_net.h>
++#include <linux/mfd/syscon.h>
++#include <linux/platform_device.h>
++#include <linux/stmmac.h>
++
++#include "stmmac_platform.h"
++
++static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
++                                 struct plat_stmmacenet_data *plat_dat)
++{
++      struct device_node *np = pdev->dev.of_node;
++      u32 burst_map = 0;
++      u32 bit_index = 0;
++      u32 a_index = 0;
++
++      if (!plat_dat->axi) {
++              plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
++
++              if (!plat_dat->axi)
++                      return -ENOMEM;
++      }
++
++      plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi");
++      if (of_property_read_u32(np, "snps,write-requests",
++                               &plat_dat->axi->axi_wr_osr_lmt)) {
++              /**
++               * Since the register has a reset value of 1, if property
++               * is missing, default to 1.
++               */
++              plat_dat->axi->axi_wr_osr_lmt = 1;
++      } else {
++              /**
++               * If property exists, to keep the behavior from dwc_eth_qos,
++               * subtract one after parsing.
++               */
++              plat_dat->axi->axi_wr_osr_lmt--;
++      }
++
++      if (of_property_read_u32(np, "read,read-requests",
++                               &plat_dat->axi->axi_rd_osr_lmt)) {
++              /**
++               * Since the register has a reset value of 1, if property
++               * is missing, default to 1.
++               */
++              plat_dat->axi->axi_rd_osr_lmt = 1;
++      } else {
++              /**
++               * If property exists, to keep the behavior from dwc_eth_qos,
++               * subtract one after parsing.
++               */
++              plat_dat->axi->axi_rd_osr_lmt--;
++      }
++      of_property_read_u32(np, "snps,burst-map", &burst_map);
++
++      /* converts burst-map bitmask to burst array */
++      for (bit_index = 0; bit_index < 7; bit_index++) {
++              if (burst_map & (1 << bit_index)) {
++                      switch (bit_index) {
++                      case 0:
++                      plat_dat->axi->axi_blen[a_index] = 4; break;
++                      case 1:
++                      plat_dat->axi->axi_blen[a_index] = 8; break;
++                      case 2:
++                      plat_dat->axi->axi_blen[a_index] = 16; break;
++                      case 3:
++                      plat_dat->axi->axi_blen[a_index] = 32; break;
++                      case 4:
++                      plat_dat->axi->axi_blen[a_index] = 64; break;
++                      case 5:
++                      plat_dat->axi->axi_blen[a_index] = 128; break;
++                      case 6:
++                      plat_dat->axi->axi_blen[a_index] = 256; break;
++                      default:
++                      break;
++                      }
++                      a_index++;
++              }
++      }
++
++      /* dwc-qos needs GMAC4, AAL, TSO and PMT */
++      plat_dat->has_gmac4 = 1;
++      plat_dat->dma_cfg->aal = 1;
++      plat_dat->tso_en = 1;
++      plat_dat->pmt = 1;
++
++      return 0;
++}
++
++static int dwc_eth_dwmac_probe(struct platform_device *pdev)
++{
++      struct plat_stmmacenet_data *plat_dat;
++      struct stmmac_resources stmmac_res;
++      struct resource *res;
++      int ret;
++
++      memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
++
++      /**
++       * Since stmmac_platform supports name IRQ only, basic platform
++       * resource initialization is done in the glue logic.
++       */
++      stmmac_res.irq = platform_get_irq(pdev, 0);
++      if (stmmac_res.irq < 0) {
++              if (stmmac_res.irq != -EPROBE_DEFER)
++                      dev_err(&pdev->dev,
++                              "IRQ configuration information not found\n");
++
++              return stmmac_res.irq;
++      }
++      stmmac_res.wol_irq = stmmac_res.irq;
++
++      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++      stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res);
++      if (IS_ERR(stmmac_res.addr))
++              return PTR_ERR(stmmac_res.addr);
++
++      plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
++      if (IS_ERR(plat_dat))
++              return PTR_ERR(plat_dat);
++
++      plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
++      if (IS_ERR(plat_dat->stmmac_clk)) {
++              dev_err(&pdev->dev, "apb_pclk clock not found.\n");
++              ret = PTR_ERR(plat_dat->stmmac_clk);
++              plat_dat->stmmac_clk = NULL;
++              goto err_remove_config_dt;
++      }
++      clk_prepare_enable(plat_dat->stmmac_clk);
++
++      plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
++      if (IS_ERR(plat_dat->pclk)) {
++              dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
++              ret = PTR_ERR(plat_dat->pclk);
++              plat_dat->pclk = NULL;
++              goto err_out_clk_dis_phy;
++      }
++      clk_prepare_enable(plat_dat->pclk);
++
++      ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
++      if (ret)
++              goto err_out_clk_dis_aper;
++
++      ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
++      if (ret)
++              goto err_out_clk_dis_aper;
++
++      return 0;
++
++err_out_clk_dis_aper:
++      clk_disable_unprepare(plat_dat->pclk);
++err_out_clk_dis_phy:
++      clk_disable_unprepare(plat_dat->stmmac_clk);
++err_remove_config_dt:
++      stmmac_remove_config_dt(pdev, plat_dat);
++
++      return ret;
++}
++
++static int dwc_eth_dwmac_remove(struct platform_device *pdev)
++{
++      return stmmac_pltfr_remove(pdev);
++}
++
++static const struct of_device_id dwc_eth_dwmac_match[] = {
++      { .compatible = "snps,dwc-qos-ethernet-4.10", },
++      { }
++};
++MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
++
++static struct platform_driver dwc_eth_dwmac_driver = {
++      .probe  = dwc_eth_dwmac_probe,
++      .remove = dwc_eth_dwmac_remove,
++      .driver = {
++              .name           = "dwc-eth-dwmac",
++              .of_match_table = dwc_eth_dwmac_match,
++      },
++};
++module_platform_driver(dwc_eth_dwmac_driver);
++
++MODULE_AUTHOR("Joao Pinto <jpinto@synopsys.com>");
++MODULE_DESCRIPTION("Synopsys DWC Ethernet Quality-of-Service v4.10a driver");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+@@ -35,10 +35,6 @@
+ #define PRG_ETH0_TXDLY_SHIFT          5
+ #define PRG_ETH0_TXDLY_MASK           GENMASK(6, 5)
+-#define PRG_ETH0_TXDLY_OFF            (0x0 << PRG_ETH0_TXDLY_SHIFT)
+-#define PRG_ETH0_TXDLY_QUARTER                (0x1 << PRG_ETH0_TXDLY_SHIFT)
+-#define PRG_ETH0_TXDLY_HALF           (0x2 << PRG_ETH0_TXDLY_SHIFT)
+-#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT)
+ /* divider for the result of m250_sel */
+ #define PRG_ETH0_CLK_M250_DIV_SHIFT   7
+@@ -69,6 +65,8 @@ struct meson8b_dwmac {
+       struct clk_divider      m25_div;
+       struct clk              *m25_div_clk;
++
++      u32                     tx_delay_ns;
+ };
+ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
+@@ -179,11 +177,19 @@ static int meson8b_init_prg_eth(struct m
+ {
+       int ret;
+       unsigned long clk_rate;
++      u8 tx_dly_val = 0;
+       switch (dwmac->phy_mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+-      case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
++              /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where
++               * 8ns are exactly one cycle of the 125MHz RGMII TX clock):
++               * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
++               */
++              tx_dly_val = dwmac->tx_delay_ns >> 1;
++              /* fall through */
++
++      case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               /* Generate a 25MHz clock for the PHY */
+               clk_rate = 25 * 1000 * 1000;
+@@ -196,9 +202,8 @@ static int meson8b_init_prg_eth(struct m
+               meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+                                       PRG_ETH0_INVERTED_RMII_CLK, 0);
+-              /* TX clock delay - all known boards use a 1/4 cycle delay */
+               meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
+-                                      PRG_ETH0_TXDLY_QUARTER);
++                                      tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
+               break;
+       case PHY_INTERFACE_MODE_RMII:
+@@ -284,6 +289,11 @@ static int meson8b_dwmac_probe(struct pl
+               goto err_remove_config_dt;
+       }
++      /* use 2ns as fallback since this value was previously hardcoded */
++      if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
++                               &dwmac->tx_delay_ns))
++              dwmac->tx_delay_ns = 2;
++
+       ret = meson8b_init_clk(dwmac);
+       if (ret)
+               goto err_remove_config_dt;
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -302,6 +302,122 @@ static const struct rk_gmac_ops rk3288_o
+       .set_rmii_speed = rk3288_set_rmii_speed,
+ };
++#define RK3328_GRF_MAC_CON0   0x0900
++#define RK3328_GRF_MAC_CON1   0x0904
++
++/* RK3328_GRF_MAC_CON0 */
++#define RK3328_GMAC_CLK_RX_DL_CFG(val)        HIWORD_UPDATE(val, 0x7F, 7)
++#define RK3328_GMAC_CLK_TX_DL_CFG(val)        HIWORD_UPDATE(val, 0x7F, 0)
++
++/* RK3328_GRF_MAC_CON1 */
++#define RK3328_GMAC_PHY_INTF_SEL_RGMII        \
++              (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
++#define RK3328_GMAC_PHY_INTF_SEL_RMII \
++              (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
++#define RK3328_GMAC_FLOW_CTRL         GRF_BIT(3)
++#define RK3328_GMAC_FLOW_CTRL_CLR     GRF_CLR_BIT(3)
++#define RK3328_GMAC_SPEED_10M         GRF_CLR_BIT(2)
++#define RK3328_GMAC_SPEED_100M                GRF_BIT(2)
++#define RK3328_GMAC_RMII_CLK_25M      GRF_BIT(7)
++#define RK3328_GMAC_RMII_CLK_2_5M     GRF_CLR_BIT(7)
++#define RK3328_GMAC_CLK_125M          (GRF_CLR_BIT(11) | GRF_CLR_BIT(12))
++#define RK3328_GMAC_CLK_25M           (GRF_BIT(11) | GRF_BIT(12))
++#define RK3328_GMAC_CLK_2_5M          (GRF_CLR_BIT(11) | GRF_BIT(12))
++#define RK3328_GMAC_RMII_MODE         GRF_BIT(9)
++#define RK3328_GMAC_RMII_MODE_CLR     GRF_CLR_BIT(9)
++#define RK3328_GMAC_TXCLK_DLY_ENABLE  GRF_BIT(0)
++#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
++#define RK3328_GMAC_RXCLK_DLY_ENABLE  GRF_BIT(1)
++#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0)
++
++static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
++                              int tx_delay, int rx_delay)
++{
++      struct device *dev = &bsp_priv->pdev->dev;
++
++      if (IS_ERR(bsp_priv->grf)) {
++              dev_err(dev, "Missing rockchip,grf property\n");
++              return;
++      }
++
++      regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
++                   RK3328_GMAC_PHY_INTF_SEL_RGMII |
++                   RK3328_GMAC_RMII_MODE_CLR |
++                   RK3328_GMAC_RXCLK_DLY_ENABLE |
++                   RK3328_GMAC_TXCLK_DLY_ENABLE);
++
++      regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON0,
++                   RK3328_GMAC_CLK_RX_DL_CFG(rx_delay) |
++                   RK3328_GMAC_CLK_TX_DL_CFG(tx_delay));
++}
++
++static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
++{
++      struct device *dev = &bsp_priv->pdev->dev;
++
++      if (IS_ERR(bsp_priv->grf)) {
++              dev_err(dev, "Missing rockchip,grf property\n");
++              return;
++      }
++
++      regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
++                   RK3328_GMAC_PHY_INTF_SEL_RMII |
++                   RK3328_GMAC_RMII_MODE);
++
++      /* set MAC to RMII mode */
++      regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, GRF_BIT(11));
++}
++
++static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
++{
++      struct device *dev = &bsp_priv->pdev->dev;
++
++      if (IS_ERR(bsp_priv->grf)) {
++              dev_err(dev, "Missing rockchip,grf property\n");
++              return;
++      }
++
++      if (speed == 10)
++              regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
++                           RK3328_GMAC_CLK_2_5M);
++      else if (speed == 100)
++              regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
++                           RK3328_GMAC_CLK_25M);
++      else if (speed == 1000)
++              regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
++                           RK3328_GMAC_CLK_125M);
++      else
++              dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
++}
++
++static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
++{
++      struct device *dev = &bsp_priv->pdev->dev;
++
++      if (IS_ERR(bsp_priv->grf)) {
++              dev_err(dev, "Missing rockchip,grf property\n");
++              return;
++      }
++
++      if (speed == 10)
++              regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
++                           RK3328_GMAC_RMII_CLK_2_5M |
++                           RK3328_GMAC_SPEED_10M);
++      else if (speed == 100)
++              regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
++                           RK3328_GMAC_RMII_CLK_25M |
++                           RK3328_GMAC_SPEED_100M);
++      else
++              dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
++}
++
++static const struct rk_gmac_ops rk3328_ops = {
++      .set_to_rgmii = rk3328_set_to_rgmii,
++      .set_to_rmii = rk3328_set_to_rmii,
++      .set_rgmii_speed = rk3328_set_rgmii_speed,
++      .set_rmii_speed = rk3328_set_rmii_speed,
++};
++
+ #define RK3366_GRF_SOC_CON6   0x0418
+ #define RK3366_GRF_SOC_CON7   0x041c
+@@ -1006,6 +1122,7 @@ static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops,
+ static const struct of_device_id rk_gmac_dwmac_match[] = {
+       { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
+       { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
++      { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
+       { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
+       { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
+       { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+@@ -341,7 +341,7 @@ static int socfpga_dwmac_probe(struct pl
+        * mode. Create a copy of the core reset handle so it can be used by
+        * the driver later.
+        */
+-      dwmac->stmmac_rst = stpriv->stmmac_rst;
++      dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
+       ret = socfpga_dwmac_set_phy_mode(dwmac);
+       if (ret)
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+@@ -10,10 +10,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -16,10 +16,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -96,17 +92,13 @@ static int dwmac1000_rx_ipc_enable(struc
+       return !!(value & GMAC_CONTROL_IPC);
+ }
+-static void dwmac1000_dump_regs(struct mac_device_info *hw)
++static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+       int i;
+-      pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
+-      for (i = 0; i < 55; i++) {
+-              int offset = i * 4;
+-              pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+-                      offset, readl(ioaddr + offset));
+-      }
++      for (i = 0; i < 55; i++)
++              reg_space[i] = readl(ioaddr + i * 4);
+ }
+ static void dwmac1000_set_umac_addr(struct mac_device_info *hw,
+@@ -347,11 +339,14 @@ static int dwmac1000_irq_status(struct m
+       return ret;
+ }
+-static void dwmac1000_set_eee_mode(struct mac_device_info *hw)
++static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
++                                 bool en_tx_lpi_clockgating)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value;
++      /*TODO - en_tx_lpi_clockgating treatment */
++
+       /* Enable the link status receive on RGMII, SGMII ore SMII
+        * receive path and instruct the transmit to enter in LPI
+        * state.
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+@@ -16,10 +16,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -205,18 +201,14 @@ static void dwmac1000_dma_operation_mode
+       writel(csr6, ioaddr + DMA_CONTROL);
+ }
+-static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
++static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
+ {
+       int i;
+-      pr_info(" DMA registers\n");
+-      for (i = 0; i < 22; i++) {
+-              if ((i < 9) || (i > 17)) {
+-                      int offset = i * 4;
+-                      pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
+-                             (DMA_BUS_MODE + offset),
+-                             readl(ioaddr + DMA_BUS_MODE + offset));
+-              }
+-      }
++
++      for (i = 0; i < 22; i++)
++              if ((i < 9) || (i > 17))
++                      reg_space[DMA_BUS_MODE / 4 + i] =
++                              readl(ioaddr + DMA_BUS_MODE + i * 4);
+ }
+ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+@@ -18,10 +18,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -44,28 +40,18 @@ static void dwmac100_core_init(struct ma
+ #endif
+ }
+-static void dwmac100_dump_mac_regs(struct mac_device_info *hw)
++static void dwmac100_dump_mac_regs(struct mac_device_info *hw, u32 *reg_space)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+-      pr_info("\t----------------------------------------------\n"
+-              "\t  DWMAC 100 CSR (base addr = 0x%p)\n"
+-              "\t----------------------------------------------\n", ioaddr);
+-      pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+-              readl(ioaddr + MAC_CONTROL));
+-      pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+-              readl(ioaddr + MAC_ADDR_HIGH));
+-      pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+-              readl(ioaddr + MAC_ADDR_LOW));
+-      pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+-              MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+-      pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+-              MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+-      pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+-              MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+-      pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+-              readl(ioaddr + MAC_VLAN1));
+-      pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+-              readl(ioaddr + MAC_VLAN2));
++
++      reg_space[MAC_CONTROL / 4] = readl(ioaddr + MAC_CONTROL);
++      reg_space[MAC_ADDR_HIGH / 4] = readl(ioaddr + MAC_ADDR_HIGH);
++      reg_space[MAC_ADDR_LOW / 4] = readl(ioaddr + MAC_ADDR_LOW);
++      reg_space[MAC_HASH_HIGH / 4] = readl(ioaddr + MAC_HASH_HIGH);
++      reg_space[MAC_HASH_LOW / 4] = readl(ioaddr + MAC_HASH_LOW);
++      reg_space[MAC_FLOW_CTRL / 4] = readl(ioaddr + MAC_FLOW_CTRL);
++      reg_space[MAC_VLAN1 / 4] = readl(ioaddr + MAC_VLAN1);
++      reg_space[MAC_VLAN2 / 4] = readl(ioaddr + MAC_VLAN2);
+ }
+ static int dwmac100_rx_ipc_enable(struct mac_device_info *hw)
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+@@ -18,10 +18,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -70,19 +66,18 @@ static void dwmac100_dma_operation_mode(
+       writel(csr6, ioaddr + DMA_CONTROL);
+ }
+-static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
++static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
+ {
+       int i;
+-      pr_debug("DWMAC 100 DMA CSR\n");
+       for (i = 0; i < 9; i++)
+-              pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+-                       (DMA_BUS_MODE + i * 4),
+-                       readl(ioaddr + DMA_BUS_MODE + i * 4));
+-
+-      pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n",
+-               DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR),
+-               DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
++              reg_space[DMA_BUS_MODE / 4 + i] =
++                      readl(ioaddr + DMA_BUS_MODE + i * 4);
++
++      reg_space[DMA_CUR_TX_BUF_ADDR / 4] =
++              readl(ioaddr + DMA_CUR_TX_BUF_ADDR);
++      reg_space[DMA_CUR_RX_BUF_ADDR / 4] =
++              readl(ioaddr + DMA_CUR_RX_BUF_ADDR);
+ }
+ /* DMA controller has two counters to track the number of the missed frames. */
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+@@ -22,6 +22,7 @@
+ #define GMAC_HASH_TAB_32_63           0x00000014
+ #define GMAC_RX_FLOW_CTRL             0x00000090
+ #define GMAC_QX_TX_FLOW_CTRL(x)               (0x70 + x * 4)
++#define GMAC_RXQ_CTRL0                        0x000000a0
+ #define GMAC_INT_STATUS                       0x000000b0
+ #define GMAC_INT_EN                   0x000000b4
+ #define GMAC_PCS_BASE                 0x000000e0
+@@ -44,6 +45,11 @@
+ #define GMAC_MAX_PERFECT_ADDRESSES    128
++/* MAC RX Queue Enable */
++#define GMAC_RX_QUEUE_CLEAR(queue)    ~(GENMASK(1, 0) << ((queue) * 2))
++#define GMAC_RX_AV_QUEUE_ENABLE(queue)        BIT((queue) * 2)
++#define GMAC_RX_DCB_QUEUE_ENABLE(queue)       BIT(((queue) * 2) + 1)
++
+ /* MAC Flow Control RX */
+ #define GMAC_RX_FLOW_CTRL_RFE         BIT(0)
+@@ -84,6 +90,19 @@ enum power_event {
+       power_down = 0x00000001,
+ };
++/* Energy Efficient Ethernet (EEE) for GMAC4
++ *
++ * LPI status, timer and control register offset
++ */
++#define GMAC4_LPI_CTRL_STATUS 0xd0
++#define GMAC4_LPI_TIMER_CTRL  0xd4
++
++/* LPI control and status defines */
++#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
++#define GMAC4_LPI_CTRL_STATUS_LPITXA  BIT(19) /* Enable LPI TX Automate */
++#define GMAC4_LPI_CTRL_STATUS_PLS     BIT(17) /* PHY Link Status */
++#define GMAC4_LPI_CTRL_STATUS_LPIEN   BIT(16) /* LPI Enable */
++
+ /* MAC Debug bitmap */
+ #define GMAC_DEBUG_TFCSTS_MASK                GENMASK(18, 17)
+ #define GMAC_DEBUG_TFCSTS_SHIFT               17
+@@ -133,6 +152,8 @@ enum power_event {
+ /* MAC HW features2 bitmap */
+ #define GMAC_HW_FEAT_TXCHCNT          GENMASK(21, 18)
+ #define GMAC_HW_FEAT_RXCHCNT          GENMASK(15, 12)
++#define GMAC_HW_FEAT_TXQCNT           GENMASK(9, 6)
++#define GMAC_HW_FEAT_RXQCNT           GENMASK(3, 0)
+ /* MAC HW ADDR regs */
+ #define GMAC_HI_DCS                   GENMASK(18, 16)
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -59,19 +59,24 @@ static void dwmac4_core_init(struct mac_
+       writel(value, ioaddr + GMAC_INT_EN);
+ }
+-static void dwmac4_dump_regs(struct mac_device_info *hw)
++static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+-      int i;
++      u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
+-      pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr);
++      value &= GMAC_RX_QUEUE_CLEAR(queue);
++      value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+-      for (i = 0; i < GMAC_REG_NUM; i++) {
+-              int offset = i * 4;
++      writel(value, ioaddr + GMAC_RXQ_CTRL0);
++}
+-              pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+-                       offset, readl(ioaddr + offset));
+-      }
++static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      int i;
++
++      for (i = 0; i < GMAC_REG_NUM; i++)
++              reg_space[i] = readl(ioaddr + i * 4);
+ }
+ static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
+@@ -126,6 +131,65 @@ static void dwmac4_get_umac_addr(struct
+                                  GMAC_ADDR_LOW(reg_n));
+ }
++static void dwmac4_set_eee_mode(struct mac_device_info *hw,
++                              bool en_tx_lpi_clockgating)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 value;
++
++      /* Enable the link status receive on RGMII, SGMII ore SMII
++       * receive path and instruct the transmit to enter in LPI
++       * state.
++       */
++      value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
++      value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
++
++      if (en_tx_lpi_clockgating)
++              value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
++
++      writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
++}
++
++static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 value;
++
++      value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
++      value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
++      writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
++}
++
++static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 value;
++
++      value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
++
++      if (link)
++              value |= GMAC4_LPI_CTRL_STATUS_PLS;
++      else
++              value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
++
++      writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
++}
++
++static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
++
++      /* Program the timers in the LPI timer control register:
++       * LS: minimum time (ms) for which the link
++       *  status from PHY should be ok before transmitting
++       *  the LPI pattern.
++       * TW: minimum time (us) for which the core waits
++       *  after it has stopped transmitting the LPI pattern.
++       */
++      writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
++}
++
+ static void dwmac4_set_filter(struct mac_device_info *hw,
+                             struct net_device *dev)
+ {
+@@ -392,12 +456,17 @@ static void dwmac4_debug(void __iomem *i
+ static const struct stmmac_ops dwmac4_ops = {
+       .core_init = dwmac4_core_init,
+       .rx_ipc = dwmac4_rx_ipc_enable,
++      .rx_queue_enable = dwmac4_rx_queue_enable,
+       .dump_regs = dwmac4_dump_regs,
+       .host_irq_status = dwmac4_irq_status,
+       .flow_ctrl = dwmac4_flow_ctrl,
+       .pmt = dwmac4_pmt,
+       .set_umac_addr = dwmac4_set_umac_addr,
+       .get_umac_addr = dwmac4_get_umac_addr,
++      .set_eee_mode = dwmac4_set_eee_mode,
++      .reset_eee_mode = dwmac4_reset_eee_mode,
++      .set_eee_timer = dwmac4_set_eee_timer,
++      .set_eee_pls = dwmac4_set_eee_pls,
+       .pcs_ctrl_ane = dwmac4_ctrl_ane,
+       .pcs_rane = dwmac4_rane,
+       .pcs_get_adv_lp = dwmac4_get_adv_lp,
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+@@ -103,7 +103,7 @@ static int dwmac4_wrback_get_rx_status(v
+                       x->rx_mii++;
+               if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
+-                      x->rx_crc++;
++                      x->rx_crc_errors++;
+                       stats->rx_crc_errors++;
+               }
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+@@ -127,53 +127,51 @@ static void dwmac4_dma_init(void __iomem
+               dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
+ }
+-static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
++static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
++                                u32 *reg_space)
+ {
+-      pr_debug(" Channel %d\n", channel);
+-      pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
+-               readl(ioaddr + DMA_CHAN_CONTROL(channel)));
+-      pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
+-               readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)));
+-      pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
+-               readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)));
+-      pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
+-               readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)));
+-      pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
+-               readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)));
+-      pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
+-               readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)));
+-      pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
+-               readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)));
+-      pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
+-               readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)));
+-      pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
+-               readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)));
+-      pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
+-               readl(ioaddr + DMA_CHAN_INTR_ENA(channel)));
+-      pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
+-               readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)));
+-      pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
+-               readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)));
+-      pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
+-               readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)));
+-      pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
+-               readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)));
+-      pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
+-               readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)));
+-      pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
+-               readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)));
+-      pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
+-               readl(ioaddr + DMA_CHAN_STATUS(channel)));
++      reg_space[DMA_CHAN_CONTROL(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_CONTROL(channel));
++      reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
++      reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
++      reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
++      reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
++      reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel));
++      reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel));
++      reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel));
++      reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel));
++      reg_space[DMA_CHAN_INTR_ENA(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_INTR_ENA(channel));
++      reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel));
++      reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel));
++      reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel));
++      reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel));
++      reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel));
++      reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel));
++      reg_space[DMA_CHAN_STATUS(channel) / 4] =
++              readl(ioaddr + DMA_CHAN_STATUS(channel));
+ }
+-static void dwmac4_dump_dma_regs(void __iomem *ioaddr)
++static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
+ {
+       int i;
+-      pr_debug(" GMAC4 DMA registers\n");
+-
+       for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+-              _dwmac4_dump_dma_regs(ioaddr, i);
++              _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
+ }
+ static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+@@ -303,6 +301,11 @@ static void dwmac4_get_hw_feature(void _
+               ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
+       dma_cap->number_tx_channel =
+               ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
++      /* TX and RX number of queues */
++      dma_cap->number_rx_queues =
++              ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1;
++      dma_cap->number_tx_queues =
++              ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1;
+       /* IEEE 1588-2002 */
+       dma_cap->time_stamp = 0;
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+@@ -10,10 +10,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -21,6 +17,7 @@
+ *******************************************************************************/
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include "common.h"
+ #include "dwmac_dma.h"
+@@ -29,19 +26,16 @@
+ int dwmac_dma_reset(void __iomem *ioaddr)
+ {
+       u32 value = readl(ioaddr + DMA_BUS_MODE);
+-      int limit;
++      int err;
+       /* DMA SW reset */
+       value |= DMA_BUS_MODE_SFT_RESET;
+       writel(value, ioaddr + DMA_BUS_MODE);
+-      limit = 10;
+-      while (limit--) {
+-              if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+-                      break;
+-              mdelay(10);
+-      }
+-      if (limit < 0)
++      err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
++                               !(value & DMA_BUS_MODE_SFT_RESET),
++                               100000, 10000);
++      if (err)
+               return -EBUSY;
+       return 0;
+@@ -102,7 +96,7 @@ static void show_tx_process_state(unsign
+               pr_debug("- TX (Stopped): Reset or Stop command\n");
+               break;
+       case 1:
+-              pr_debug("- TX (Running):Fetching the Tx desc\n");
++              pr_debug("- TX (Running): Fetching the Tx desc\n");
+               break;
+       case 2:
+               pr_debug("- TX (Running): Waiting for end of tx\n");
+@@ -136,7 +130,7 @@ static void show_rx_process_state(unsign
+               pr_debug("- RX (Running): Fetching the Rx desc\n");
+               break;
+       case 2:
+-              pr_debug("- RX (Running):Checking for end of pkt\n");
++              pr_debug("- RX (Running): Checking for end of pkt\n");
+               break;
+       case 3:
+               pr_debug("- RX (Running): Waiting for Rx pkt\n");
+@@ -246,7 +240,7 @@ void stmmac_set_mac_addr(void __iomem *i
+       unsigned long data;
+       data = (addr[5] << 8) | addr[4];
+-      /* For MAC Addr registers se have to set the Address Enable (AE)
++      /* For MAC Addr registers we have to set the Address Enable (AE)
+        * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+        * is RO.
+        */
+@@ -261,9 +255,9 @@ void stmmac_set_mac(void __iomem *ioaddr
+       u32 value = readl(ioaddr + MAC_CTRL_REG);
+       if (enable)
+-              value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
++              value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+       else
+-              value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
++              value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+       writel(value, ioaddr + MAC_CTRL_REG);
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -225,7 +221,7 @@ static int enh_desc_get_rx_status(void *
+                       x->rx_mii++;
+               if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
+-                      x->rx_crc++;
++                      x->rx_crc_errors++;
+                       stats->rx_crc_errors++;
+               }
+               ret = discard_frame;
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -115,7 +111,7 @@ static int ndesc_get_rx_status(void *dat
+                       stats->collisions++;
+               }
+               if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
+-                      x->rx_crc++;
++                      x->rx_crc_errors++;
+                       stats->rx_crc_errors++;
+               }
+               ret = discard_frame;
+--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+@@ -16,10 +16,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -10,10 +10,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -106,9 +102,6 @@ struct stmmac_priv {
+       u32 msg_enable;
+       int wolopts;
+       int wol_irq;
+-      struct clk *stmmac_clk;
+-      struct clk *pclk;
+-      struct reset_control *stmmac_rst;
+       int clk_csr;
+       struct timer_list eee_ctrl_timer;
+       int lpi_irq;
+@@ -120,8 +113,6 @@ struct stmmac_priv {
+       struct ptp_clock *ptp_clock;
+       struct ptp_clock_info ptp_clock_ops;
+       unsigned int default_addend;
+-      struct clk *clk_ptp_ref;
+-      unsigned int clk_ptp_rate;
+       u32 adv_ts;
+       int use_riwt;
+       int irq_wake;
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -65,7 +61,7 @@ static const struct stmmac_stats stmmac_
+       STMMAC_STAT(overflow_error),
+       STMMAC_STAT(ipc_csum_error),
+       STMMAC_STAT(rx_collision),
+-      STMMAC_STAT(rx_crc),
++      STMMAC_STAT(rx_crc_errors),
+       STMMAC_STAT(dribbling_bit),
+       STMMAC_STAT(rx_length),
+       STMMAC_STAT(rx_mii),
+@@ -439,32 +435,14 @@ static int stmmac_ethtool_get_regs_len(s
+ static void stmmac_ethtool_gregs(struct net_device *dev,
+                         struct ethtool_regs *regs, void *space)
+ {
+-      int i;
+       u32 *reg_space = (u32 *) space;
+       struct stmmac_priv *priv = netdev_priv(dev);
+       memset(reg_space, 0x0, REG_SPACE_SIZE);
+-      if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) {
+-              /* MAC registers */
+-              for (i = 0; i < 12; i++)
+-                      reg_space[i] = readl(priv->ioaddr + (i * 4));
+-              /* DMA registers */
+-              for (i = 0; i < 9; i++)
+-                      reg_space[i + 12] =
+-                          readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+-              reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
+-              reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
+-      } else {
+-              /* MAC registers */
+-              for (i = 0; i < 55; i++)
+-                      reg_space[i] = readl(priv->ioaddr + (i * 4));
+-              /* DMA registers */
+-              for (i = 0; i < 22; i++)
+-                      reg_space[i + 55] =
+-                          readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+-      }
++      priv->hw->mac->dump_regs(priv->hw, reg_space);
++      priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
+ }
+ static void
+@@ -712,7 +690,7 @@ static int stmmac_ethtool_op_set_eee(str
+ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
+ {
+-      unsigned long clk = clk_get_rate(priv->stmmac_clk);
++      unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
+       if (!clk)
+               return 0;
+@@ -722,7 +700,7 @@ static u32 stmmac_usec2riwt(u32 usec, st
+ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
+ {
+-      unsigned long clk = clk_get_rate(priv->stmmac_clk);
++      unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
+       if (!clk)
+               return 0;
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -13,10 +13,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -158,7 +154,7 @@ static void stmmac_clk_csr_set(struct st
+ {
+       u32 clk_rate;
+-      clk_rate = clk_get_rate(priv->stmmac_clk);
++      clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+       /* Platform provided default clk_csr would be assumed valid
+        * for all other cases except for the below mentioned ones.
+@@ -191,7 +187,7 @@ static void print_pkt(unsigned char *buf
+ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
+ {
+-      unsigned avail;
++      u32 avail;
+       if (priv->dirty_tx > priv->cur_tx)
+               avail = priv->dirty_tx - priv->cur_tx - 1;
+@@ -203,7 +199,7 @@ static inline u32 stmmac_tx_avail(struct
+ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
+ {
+-      unsigned dirty;
++      u32 dirty;
+       if (priv->dirty_rx <= priv->cur_rx)
+               dirty = priv->cur_rx - priv->dirty_rx;
+@@ -216,7 +212,7 @@ static inline u32 stmmac_rx_dirty(struct
+ /**
+  * stmmac_hw_fix_mac_speed - callback for speed selection
+  * @priv: driver private structure
+- * Description: on some platforms (e.g. ST), some HW system configuraton
++ * Description: on some platforms (e.g. ST), some HW system configuration
+  * registers have to be set according to the link speed negotiated.
+  */
+ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
+@@ -239,7 +235,8 @@ static void stmmac_enable_eee_mode(struc
+       /* Check and enter in LPI mode */
+       if ((priv->dirty_tx == priv->cur_tx) &&
+           (priv->tx_path_in_lpi_mode == false))
+-              priv->hw->mac->set_eee_mode(priv->hw);
++              priv->hw->mac->set_eee_mode(priv->hw,
++                                          priv->plat->en_tx_lpi_clockgating);
+ }
+ /**
+@@ -415,7 +412,7 @@ static void stmmac_get_rx_hwtstamp(struc
+ /**
+  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
+  *  @dev: device pointer.
+- *  @ifr: An IOCTL specefic structure, that can contain a pointer to
++ *  @ifr: An IOCTL specific structure, that can contain a pointer to
+  *  a proprietary structure used to pass information to the driver.
+  *  Description:
+  *  This function configures the MAC to enable/disable both outgoing(TX)
+@@ -606,7 +603,7 @@ static int stmmac_hwtstamp_ioctl(struct
+               /* program Sub Second Increment reg */
+               sec_inc = priv->hw->ptp->config_sub_second_increment(
+-                      priv->ptpaddr, priv->clk_ptp_rate,
++                      priv->ptpaddr, priv->plat->clk_ptp_rate,
+                       priv->plat->has_gmac4);
+               temp = div_u64(1000000000ULL, sec_inc);
+@@ -616,7 +613,7 @@ static int stmmac_hwtstamp_ioctl(struct
+                * where, freq_div_ratio = 1e9ns/sec_inc
+                */
+               temp = (u64)(temp << 32);
+-              priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
++              priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+               priv->hw->ptp->config_addend(priv->ptpaddr,
+                                            priv->default_addend);
+@@ -644,18 +641,6 @@ static int stmmac_init_ptp(struct stmmac
+       if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+               return -EOPNOTSUPP;
+-      /* Fall-back to main clock in case of no PTP ref is passed */
+-      priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
+-      if (IS_ERR(priv->clk_ptp_ref)) {
+-              priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
+-              priv->clk_ptp_ref = NULL;
+-              netdev_dbg(priv->dev, "PTP uses main clock\n");
+-      } else {
+-              clk_prepare_enable(priv->clk_ptp_ref);
+-              priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
+-              netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate);
+-      }
+-
+       priv->adv_ts = 0;
+       /* Check if adv_ts can be enabled for dwmac 4.x core */
+       if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
+@@ -682,8 +667,8 @@ static int stmmac_init_ptp(struct stmmac
+ static void stmmac_release_ptp(struct stmmac_priv *priv)
+ {
+-      if (priv->clk_ptp_ref)
+-              clk_disable_unprepare(priv->clk_ptp_ref);
++      if (priv->plat->clk_ptp_ref)
++              clk_disable_unprepare(priv->plat->clk_ptp_ref);
+       stmmac_ptp_unregister(priv);
+ }
+@@ -704,7 +689,7 @@ static void stmmac_adjust_link(struct ne
+       int new_state = 0;
+       unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
+-      if (phydev == NULL)
++      if (!phydev)
+               return;
+       spin_lock_irqsave(&priv->lock, flags);
+@@ -731,33 +716,36 @@ static void stmmac_adjust_link(struct ne
+                       new_state = 1;
+                       switch (phydev->speed) {
+                       case 1000:
+-                              if (likely((priv->plat->has_gmac) ||
+-                                         (priv->plat->has_gmac4)))
++                              if (priv->plat->has_gmac ||
++                                  priv->plat->has_gmac4)
+                                       ctrl &= ~priv->hw->link.port;
+-                              stmmac_hw_fix_mac_speed(priv);
+                               break;
+                       case 100:
++                              if (priv->plat->has_gmac ||
++                                  priv->plat->has_gmac4) {
++                                      ctrl |= priv->hw->link.port;
++                                      ctrl |= priv->hw->link.speed;
++                              } else {
++                                      ctrl &= ~priv->hw->link.port;
++                              }
++                              break;
+                       case 10:
+-                              if (likely((priv->plat->has_gmac) ||
+-                                         (priv->plat->has_gmac4))) {
++                              if (priv->plat->has_gmac ||
++                                  priv->plat->has_gmac4) {
+                                       ctrl |= priv->hw->link.port;
+-                                      if (phydev->speed == SPEED_100) {
+-                                              ctrl |= priv->hw->link.speed;
+-                                      } else {
+-                                              ctrl &= ~(priv->hw->link.speed);
+-                                      }
++                                      ctrl &= ~(priv->hw->link.speed);
+                               } else {
+                                       ctrl &= ~priv->hw->link.port;
+                               }
+-                              stmmac_hw_fix_mac_speed(priv);
+                               break;
+                       default:
+                               netif_warn(priv, link, priv->dev,
+-                                         "Speed (%d) not 10/100\n",
+-                                         phydev->speed);
++                                         "broken speed: %d\n", phydev->speed);
++                              phydev->speed = SPEED_UNKNOWN;
+                               break;
+                       }
+-
++                      if (phydev->speed != SPEED_UNKNOWN)
++                              stmmac_hw_fix_mac_speed(priv);
+                       priv->speed = phydev->speed;
+               }
+@@ -770,8 +758,8 @@ static void stmmac_adjust_link(struct ne
+       } else if (priv->oldlink) {
+               new_state = 1;
+               priv->oldlink = 0;
+-              priv->speed = 0;
+-              priv->oldduplex = -1;
++              priv->speed = SPEED_UNKNOWN;
++              priv->oldduplex = DUPLEX_UNKNOWN;
+       }
+       if (new_state && netif_msg_link(priv))
+@@ -833,8 +821,8 @@ static int stmmac_init_phy(struct net_de
+       int interface = priv->plat->interface;
+       int max_speed = priv->plat->max_speed;
+       priv->oldlink = 0;
+-      priv->speed = 0;
+-      priv->oldduplex = -1;
++      priv->speed = SPEED_UNKNOWN;
++      priv->oldduplex = DUPLEX_UNKNOWN;
+       if (priv->plat->phy_node) {
+               phydev = of_phy_connect(dev, priv->plat->phy_node,
+@@ -886,9 +874,7 @@ static int stmmac_init_phy(struct net_de
+       if (phydev->is_pseudo_fixed_link)
+               phydev->irq = PHY_POLL;
+-      netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
+-                 __func__, phydev->phy_id, phydev->link);
+-
++      phy_attached_info(phydev);
+       return 0;
+ }
+@@ -1014,7 +1000,7 @@ static void stmmac_free_rx_buffers(struc
+  * @dev: net device structure
+  * @flags: gfp flag.
+  * Description: this function initializes the DMA RX/TX descriptors
+- * and allocates the socket buffers. It suppors the chained and ring
++ * and allocates the socket buffers. It supports the chained and ring
+  * modes.
+  */
+ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+@@ -1127,13 +1113,6 @@ static void dma_free_tx_skbufs(struct st
+       int i;
+       for (i = 0; i < DMA_TX_SIZE; i++) {
+-              struct dma_desc *p;
+-
+-              if (priv->extend_desc)
+-                      p = &((priv->dma_etx + i)->basic);
+-              else
+-                      p = priv->dma_tx + i;
+-
+               if (priv->tx_skbuff_dma[i].buf) {
+                       if (priv->tx_skbuff_dma[i].map_as_page)
+                               dma_unmap_page(priv->device,
+@@ -1147,7 +1126,7 @@ static void dma_free_tx_skbufs(struct st
+                                                DMA_TO_DEVICE);
+               }
+-              if (priv->tx_skbuff[i] != NULL) {
++              if (priv->tx_skbuff[i]) {
+                       dev_kfree_skb_any(priv->tx_skbuff[i]);
+                       priv->tx_skbuff[i] = NULL;
+                       priv->tx_skbuff_dma[i].buf = 0;
+@@ -1271,6 +1250,28 @@ static void free_dma_desc_resources(stru
+ }
+ /**
++ *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
++ *  @priv: driver private structure
++ *  Description: It is used for enabling the rx queues in the MAC
++ */
++static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
++{
++      int rx_count = priv->dma_cap.number_rx_queues;
++      int queue = 0;
++
++      /* If GMAC does not have multiple queues, then this is not necessary*/
++      if (rx_count == 1)
++              return;
++
++      /**
++       *  If the core is synthesized with multiple rx queues / multiple
++       *  dma channels, then rx queues will be disabled by default.
++       *  For now only rx queue 0 is enabled.
++       */
++      priv->hw->mac->rx_queue_enable(priv->hw, queue);
++}
++
++/**
+  *  stmmac_dma_operation_mode - HW DMA operation mode
+  *  @priv: driver private structure
+  *  Description: it is used for configuring the DMA operation mode register in
+@@ -1671,10 +1672,6 @@ static int stmmac_hw_setup(struct net_de
+       /* Copy the MAC addr into the HW  */
+       priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
+-      /* If required, perform hw setup of the bus. */
+-      if (priv->plat->bus_setup)
+-              priv->plat->bus_setup(priv->ioaddr);
+-
+       /* PS and related bits will be programmed according to the speed */
+       if (priv->hw->pcs) {
+               int speed = priv->plat->mac_port_sel_speed;
+@@ -1691,6 +1688,10 @@ static int stmmac_hw_setup(struct net_de
+       /* Initialize the MAC Core */
+       priv->hw->mac->core_init(priv->hw, dev->mtu);
++      /* Initialize MAC RX Queues */
++      if (priv->hw->mac->rx_queue_enable)
++              stmmac_mac_enable_rx_queues(priv);
++
+       ret = priv->hw->mac->rx_ipc(priv->hw);
+       if (!ret) {
+               netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
+@@ -1711,8 +1712,10 @@ static int stmmac_hw_setup(struct net_de
+       if (init_ptp) {
+               ret = stmmac_init_ptp(priv);
+-              if (ret)
+-                      netdev_warn(priv->dev, "fail to init PTP.\n");
++              if (ret == -EOPNOTSUPP)
++                      netdev_warn(priv->dev, "PTP not supported by HW\n");
++              else if (ret)
++                      netdev_warn(priv->dev, "PTP init failed\n");
+       }
+ #ifdef CONFIG_DEBUG_FS
+@@ -1726,11 +1729,6 @@ static int stmmac_hw_setup(struct net_de
+       priv->hw->dma->start_tx(priv->ioaddr);
+       priv->hw->dma->start_rx(priv->ioaddr);
+-      /* Dump DMA/MAC registers */
+-      if (netif_msg_hw(priv)) {
+-              priv->hw->mac->dump_regs(priv->hw);
+-              priv->hw->dma->dump_regs(priv->ioaddr);
+-      }
+       priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
+       if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+@@ -2519,7 +2517,7 @@ static int stmmac_rx(struct stmmac_priv
+               if (unlikely(status == discard_frame)) {
+                       priv->dev->stats.rx_errors++;
+                       if (priv->hwts_rx_en && !priv->extend_desc) {
+-                              /* DESC2 & DESC3 will be overwitten by device
++                              /* DESC2 & DESC3 will be overwritten by device
+                                * with timestamp value, hence reinitialize
+                                * them in stmmac_rx_refill() function so that
+                                * device can reuse it.
+@@ -2542,7 +2540,7 @@ static int stmmac_rx(struct stmmac_priv
+                       frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
+-                      /*  If frame length is greather than skb buffer size
++                      /*  If frame length is greater than skb buffer size
+                        *  (preallocated during init) then the packet is
+                        *  ignored
+                        */
+@@ -2669,7 +2667,7 @@ static int stmmac_poll(struct napi_struc
+       work_done = stmmac_rx(priv, budget);
+       if (work_done < budget) {
+-              napi_complete(napi);
++              napi_complete_done(napi, work_done);
+               stmmac_enable_dma_irq(priv);
+       }
+       return work_done;
+@@ -2762,7 +2760,7 @@ static netdev_features_t stmmac_fix_feat
+       /* Some GMAC devices have a bugged Jumbo frame support that
+        * needs to have the Tx COE disabled for oversized frames
+        * (due to limited buffer sizes). In this case we disable
+-       * the TX csum insertionin the TDES and not use SF.
++       * the TX csum insertion in the TDES and not use SF.
+        */
+       if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
+               features &= ~NETIF_F_CSUM_MASK;
+@@ -2908,9 +2906,7 @@ static void sysfs_display_ring(void *hea
+       struct dma_desc *p = (struct dma_desc *)head;
+       for (i = 0; i < size; i++) {
+-              u64 x;
+               if (extend_desc) {
+-                      x = *(u64 *) ep;
+                       seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+                                  i, (unsigned int)virt_to_phys(ep),
+                                  le32_to_cpu(ep->basic.des0),
+@@ -2919,7 +2915,6 @@ static void sysfs_display_ring(void *hea
+                                  le32_to_cpu(ep->basic.des3));
+                       ep++;
+               } else {
+-                      x = *(u64 *) p;
+                       seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+                                  i, (unsigned int)virt_to_phys(ep),
+                                  le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+@@ -2989,7 +2984,7 @@ static int stmmac_sysfs_dma_cap_read(str
+                  (priv->dma_cap.hash_filter) ? "Y" : "N");
+       seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+                  (priv->dma_cap.multi_addr) ? "Y" : "N");
+-      seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
++      seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
+                  (priv->dma_cap.pcs) ? "Y" : "N");
+       seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
+                  (priv->dma_cap.sma_mdio) ? "Y" : "N");
+@@ -3265,44 +3260,8 @@ int stmmac_dvr_probe(struct device *devi
+       if ((phyaddr >= 0) && (phyaddr <= 31))
+               priv->plat->phy_addr = phyaddr;
+-      priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
+-      if (IS_ERR(priv->stmmac_clk)) {
+-              netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n",
+-                          __func__);
+-              /* If failed to obtain stmmac_clk and specific clk_csr value
+-               * is NOT passed from the platform, probe fail.
+-               */
+-              if (!priv->plat->clk_csr) {
+-                      ret = PTR_ERR(priv->stmmac_clk);
+-                      goto error_clk_get;
+-              } else {
+-                      priv->stmmac_clk = NULL;
+-              }
+-      }
+-      clk_prepare_enable(priv->stmmac_clk);
+-
+-      priv->pclk = devm_clk_get(priv->device, "pclk");
+-      if (IS_ERR(priv->pclk)) {
+-              if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) {
+-                      ret = -EPROBE_DEFER;
+-                      goto error_pclk_get;
+-              }
+-              priv->pclk = NULL;
+-      }
+-      clk_prepare_enable(priv->pclk);
+-
+-      priv->stmmac_rst = devm_reset_control_get(priv->device,
+-                                                STMMAC_RESOURCE_NAME);
+-      if (IS_ERR(priv->stmmac_rst)) {
+-              if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
+-                      ret = -EPROBE_DEFER;
+-                      goto error_hw_init;
+-              }
+-              dev_info(priv->device, "no reset control found\n");
+-              priv->stmmac_rst = NULL;
+-      }
+-      if (priv->stmmac_rst)
+-              reset_control_deassert(priv->stmmac_rst);
++      if (priv->plat->stmmac_rst)
++              reset_control_deassert(priv->plat->stmmac_rst);
+       /* Init MAC and get the capabilities */
+       ret = stmmac_hw_init(priv);
+@@ -3388,10 +3347,6 @@ error_netdev_register:
+ error_mdio_register:
+       netif_napi_del(&priv->napi);
+ error_hw_init:
+-      clk_disable_unprepare(priv->pclk);
+-error_pclk_get:
+-      clk_disable_unprepare(priv->stmmac_clk);
+-error_clk_get:
+       free_netdev(ndev);
+       return ret;
+@@ -3417,10 +3372,10 @@ int stmmac_dvr_remove(struct device *dev
+       stmmac_set_mac(priv->ioaddr, false);
+       netif_carrier_off(ndev);
+       unregister_netdev(ndev);
+-      if (priv->stmmac_rst)
+-              reset_control_assert(priv->stmmac_rst);
+-      clk_disable_unprepare(priv->pclk);
+-      clk_disable_unprepare(priv->stmmac_clk);
++      if (priv->plat->stmmac_rst)
++              reset_control_assert(priv->plat->stmmac_rst);
++      clk_disable_unprepare(priv->plat->pclk);
++      clk_disable_unprepare(priv->plat->stmmac_clk);
+       if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+           priv->hw->pcs != STMMAC_PCS_TBI &&
+           priv->hw->pcs != STMMAC_PCS_RTBI)
+@@ -3469,14 +3424,14 @@ int stmmac_suspend(struct device *dev)
+               stmmac_set_mac(priv->ioaddr, false);
+               pinctrl_pm_select_sleep_state(priv->device);
+               /* Disable clock in case of PWM is off */
+-              clk_disable(priv->pclk);
+-              clk_disable(priv->stmmac_clk);
++              clk_disable(priv->plat->pclk);
++              clk_disable(priv->plat->stmmac_clk);
+       }
+       spin_unlock_irqrestore(&priv->lock, flags);
+       priv->oldlink = 0;
+-      priv->speed = 0;
+-      priv->oldduplex = -1;
++      priv->speed = SPEED_UNKNOWN;
++      priv->oldduplex = DUPLEX_UNKNOWN;
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(stmmac_suspend);
+@@ -3509,9 +3464,9 @@ int stmmac_resume(struct device *dev)
+               priv->irq_wake = 0;
+       } else {
+               pinctrl_pm_select_default_state(priv->device);
+-              /* enable the clk prevously disabled */
+-              clk_enable(priv->stmmac_clk);
+-              clk_enable(priv->pclk);
++              /* enable the clk previously disabled */
++              clk_enable(priv->plat->stmmac_clk);
++              clk_enable(priv->plat->pclk);
+               /* reset the phy so that it's ready */
+               if (priv->mii)
+                       stmmac_mdio_reset(priv->mii);
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+@@ -13,10 +13,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -24,13 +20,14 @@
+   Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ *******************************************************************************/
++#include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/mii.h>
+-#include <linux/phy.h>
+-#include <linux/slab.h>
+ #include <linux/of.h>
+ #include <linux/of_gpio.h>
+ #include <linux/of_mdio.h>
+-#include <asm/io.h>
++#include <linux/phy.h>
++#include <linux/slab.h>
+ #include "stmmac.h"
+@@ -42,22 +39,6 @@
+ #define MII_GMAC4_WRITE                       (1 << MII_GMAC4_GOC_SHIFT)
+ #define MII_GMAC4_READ                        (3 << MII_GMAC4_GOC_SHIFT)
+-static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
+-{
+-      unsigned long curr;
+-      unsigned long finish = jiffies + 3 * HZ;
+-
+-      do {
+-              curr = jiffies;
+-              if (readl(ioaddr + mii_addr) & MII_BUSY)
+-                      cpu_relax();
+-              else
+-                      return 0;
+-      } while (!time_after_eq(curr, finish));
+-
+-      return -EBUSY;
+-}
+-
+ /**
+  * stmmac_mdio_read
+  * @bus: points to the mii_bus structure
+@@ -74,7 +55,7 @@ static int stmmac_mdio_read(struct mii_b
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       unsigned int mii_address = priv->hw->mii.addr;
+       unsigned int mii_data = priv->hw->mii.data;
+-
++      u32 v;
+       int data;
+       u32 value = MII_BUSY;
+@@ -86,12 +67,14 @@ static int stmmac_mdio_read(struct mii_b
+       if (priv->plat->has_gmac4)
+               value |= MII_GMAC4_READ;
+-      if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
++      if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
++                             100, 10000))
+               return -EBUSY;
+       writel(value, priv->ioaddr + mii_address);
+-      if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
++      if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
++                             100, 10000))
+               return -EBUSY;
+       /* Read the data from the MII data register */
+@@ -115,7 +98,7 @@ static int stmmac_mdio_write(struct mii_
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       unsigned int mii_address = priv->hw->mii.addr;
+       unsigned int mii_data = priv->hw->mii.data;
+-
++      u32 v;
+       u32 value = MII_BUSY;
+       value |= (phyaddr << priv->hw->mii.addr_shift)
+@@ -130,7 +113,8 @@ static int stmmac_mdio_write(struct mii_
+               value |= MII_WRITE;
+       /* Wait until any existing MII operation is complete */
+-      if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
++      if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
++                             100, 10000))
+               return -EBUSY;
+       /* Set the MII address register to write */
+@@ -138,7 +122,8 @@ static int stmmac_mdio_write(struct mii_
+       writel(value, priv->ioaddr + mii_address);
+       /* Wait until any existing MII operation is complete */
+-      return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
++      return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
++                                100, 10000);
+ }
+ /**
+@@ -156,9 +141,9 @@ int stmmac_mdio_reset(struct mii_bus *bu
+ #ifdef CONFIG_OF
+       if (priv->device->of_node) {
+-
+               if (data->reset_gpio < 0) {
+                       struct device_node *np = priv->device->of_node;
++
+                       if (!np)
+                               return 0;
+@@ -198,7 +183,7 @@ int stmmac_mdio_reset(struct mii_bus *bu
+       /* This is a workaround for problems with the STE101P PHY.
+        * It doesn't complete its reset until at least one clock cycle
+-       * on MDC, so perform a dummy mdio read. To be upadted for GMAC4
++       * on MDC, so perform a dummy mdio read. To be updated for GMAC4
+        * if needed.
+        */
+       if (!priv->plat->has_gmac4)
+@@ -225,7 +210,7 @@ int stmmac_mdio_register(struct net_devi
+               return 0;
+       new_bus = mdiobus_alloc();
+-      if (new_bus == NULL)
++      if (!new_bus)
+               return -ENOMEM;
+       if (mdio_bus_data->irqs)
+@@ -262,49 +247,48 @@ int stmmac_mdio_register(struct net_devi
+       found = 0;
+       for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+               struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
+-              if (phydev) {
+-                      int act = 0;
+-                      char irq_num[4];
+-                      char *irq_str;
+-
+-                      /*
+-                       * If an IRQ was provided to be assigned after
+-                       * the bus probe, do it here.
+-                       */
+-                      if ((mdio_bus_data->irqs == NULL) &&
+-                          (mdio_bus_data->probed_phy_irq > 0)) {
+-                              new_bus->irq[addr] =
+-                                      mdio_bus_data->probed_phy_irq;
+-                              phydev->irq = mdio_bus_data->probed_phy_irq;
+-                      }
+-
+-                      /*
+-                       * If we're going to bind the MAC to this PHY bus,
+-                       * and no PHY number was provided to the MAC,
+-                       * use the one probed here.
+-                       */
+-                      if (priv->plat->phy_addr == -1)
+-                              priv->plat->phy_addr = addr;
+-
+-                      act = (priv->plat->phy_addr == addr);
+-                      switch (phydev->irq) {
+-                      case PHY_POLL:
+-                              irq_str = "POLL";
+-                              break;
+-                      case PHY_IGNORE_INTERRUPT:
+-                              irq_str = "IGNORE";
+-                              break;
+-                      default:
+-                              sprintf(irq_num, "%d", phydev->irq);
+-                              irq_str = irq_num;
+-                              break;
+-                      }
+-                      netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
+-                                  phydev->phy_id, addr,
+-                                  irq_str, phydev_name(phydev),
+-                                  act ? " active" : "");
+-                      found = 1;
++              int act = 0;
++              char irq_num[4];
++              char *irq_str;
++
++              if (!phydev)
++                      continue;
++
++              /*
++               * If an IRQ was provided to be assigned after
++               * the bus probe, do it here.
++               */
++              if (!mdio_bus_data->irqs &&
++                  (mdio_bus_data->probed_phy_irq > 0)) {
++                      new_bus->irq[addr] = mdio_bus_data->probed_phy_irq;
++                      phydev->irq = mdio_bus_data->probed_phy_irq;
++              }
++
++              /*
++               * If we're going to bind the MAC to this PHY bus,
++               * and no PHY number was provided to the MAC,
++               * use the one probed here.
++               */
++              if (priv->plat->phy_addr == -1)
++                      priv->plat->phy_addr = addr;
++
++              act = (priv->plat->phy_addr == addr);
++              switch (phydev->irq) {
++              case PHY_POLL:
++                      irq_str = "POLL";
++                      break;
++              case PHY_IGNORE_INTERRUPT:
++                      irq_str = "IGNORE";
++                      break;
++              default:
++                      sprintf(irq_num, "%d", phydev->irq);
++                      irq_str = irq_num;
++                      break;
+               }
++              netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
++                          phydev->phy_id, addr, irq_str, phydev_name(phydev),
++                          act ? " active" : "");
++              found = 1;
+       }
+       if (!found && !mdio_node) {
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+@@ -121,7 +117,6 @@ static struct stmmac_axi *stmmac_axi_set
+       axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
+       axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
+       axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
+-      axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all");
+       axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
+       axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
+       axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb");
+@@ -181,10 +176,19 @@ static int stmmac_dt_phy(struct plat_stm
+               mdio = false;
+       }
+-      /* If snps,dwmac-mdio is passed from DT, always register the MDIO */
+-      for_each_child_of_node(np, plat->mdio_node) {
+-              if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
+-                      break;
++      /* exception for dwmac-dwc-qos-eth glue logic */
++      if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
++              plat->mdio_node = of_get_child_by_name(np, "mdio");
++      } else {
++              /**
++               * If snps,dwmac-mdio is passed from DT, always register
++               * the MDIO
++               */
++              for_each_child_of_node(np, plat->mdio_node) {
++                      if (of_device_is_compatible(plat->mdio_node,
++                                                  "snps,dwmac-mdio"))
++                              break;
++              }
+       }
+       if (plat->mdio_node) {
+@@ -249,6 +253,9 @@ stmmac_probe_config_dt(struct platform_d
+       plat->force_sf_dma_mode =
+               of_property_read_bool(np, "snps,force_sf_dma_mode");
++      plat->en_tx_lpi_clockgating =
++              of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
++
+       /* Set the maxmtu to a default of JUMBO_LEN in case the
+        * parameter is not present in the device tree.
+        */
+@@ -333,7 +340,54 @@ stmmac_probe_config_dt(struct platform_d
+       plat->axi = stmmac_axi_setup(pdev);
++      /* clock setup */
++      plat->stmmac_clk = devm_clk_get(&pdev->dev,
++                                      STMMAC_RESOURCE_NAME);
++      if (IS_ERR(plat->stmmac_clk)) {
++              dev_warn(&pdev->dev, "Cannot get CSR clock\n");
++              plat->stmmac_clk = NULL;
++      }
++      clk_prepare_enable(plat->stmmac_clk);
++
++      plat->pclk = devm_clk_get(&pdev->dev, "pclk");
++      if (IS_ERR(plat->pclk)) {
++              if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
++                      goto error_pclk_get;
++
++              plat->pclk = NULL;
++      }
++      clk_prepare_enable(plat->pclk);
++
++      /* Fall-back to main clock in case of no PTP ref is passed */
++      plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
++      if (IS_ERR(plat->clk_ptp_ref)) {
++              plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
++              plat->clk_ptp_ref = NULL;
++              dev_warn(&pdev->dev, "PTP uses main clock\n");
++      } else {
++              clk_prepare_enable(plat->clk_ptp_ref);
++              plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
++              dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
++      }
++
++      plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
++                                                STMMAC_RESOURCE_NAME);
++      if (IS_ERR(plat->stmmac_rst)) {
++              if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
++                      goto error_hw_init;
++
++              dev_info(&pdev->dev, "no reset control found\n");
++              plat->stmmac_rst = NULL;
++      }
++
+       return plat;
++
++error_hw_init:
++      clk_disable_unprepare(plat->pclk);
++error_pclk_get:
++      clk_disable_unprepare(plat->stmmac_clk);
++
++      return ERR_PTR(-EPROBE_DEFER);
+ }
+ /**
+@@ -357,7 +411,7 @@ void stmmac_remove_config_dt(struct plat
+ struct plat_stmmacenet_data *
+ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
+ {
+-      return ERR_PTR(-ENOSYS);
++      return ERR_PTR(-EINVAL);
+ }
+ void stmmac_remove_config_dt(struct platform_device *pdev,
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+@@ -12,10 +12,6 @@
+   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+   more details.
+-  You should have received a copy of the GNU General Public License along with
+-  this program; if not, write to the Free Software Foundation, Inc.,
+-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-
+   The full GNU General Public License is included in this distribution in
+   the file called "COPYING".
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -103,7 +103,6 @@ struct stmmac_axi {
+       u32 axi_wr_osr_lmt;
+       u32 axi_rd_osr_lmt;
+       bool axi_kbbe;
+-      bool axi_axi_all;
+       u32 axi_blen[AXI_BLEN];
+       bool axi_fb;
+       bool axi_mb;
+@@ -135,13 +134,18 @@ struct plat_stmmacenet_data {
+       int tx_fifo_size;
+       int rx_fifo_size;
+       void (*fix_mac_speed)(void *priv, unsigned int speed);
+-      void (*bus_setup)(void __iomem *ioaddr);
+       int (*init)(struct platform_device *pdev, void *priv);
+       void (*exit)(struct platform_device *pdev, void *priv);
+       void *bsp_priv;
++      struct clk *stmmac_clk;
++      struct clk *pclk;
++      struct clk *clk_ptp_ref;
++      unsigned int clk_ptp_rate;
++      struct reset_control *stmmac_rst;
+       struct stmmac_axi *axi;
+       int has_gmac4;
+       bool tso_en;
+       int mac_port_sel_speed;
++      bool en_tx_lpi_clockgating;
+ };
+ #endif
diff --git a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch
new file mode 100644 (file)
index 0000000..285e4d2
--- /dev/null
@@ -0,0 +1,5952 @@
+--- a/Documentation/devicetree/bindings/net/stmmac.txt
++++ b/Documentation/devicetree/bindings/net/stmmac.txt
+@@ -7,9 +7,12 @@ Required properties:
+ - interrupt-parent: Should be the phandle for the interrupt controller
+   that services interrupts for this device
+ - interrupts: Should contain the STMMAC interrupts
+-- interrupt-names: Should contain the interrupt names "macirq"
+-  "eth_wake_irq" if this interrupt is supported in the "interrupts"
+-  property
++- interrupt-names: Should contain a list of interrupt names corresponding to
++      the interrupts in the interrupts property, if available.
++      Valid interrupt names are:
++  - "macirq" (combined signal for various interrupt events)
++  - "eth_wake_irq" (the interrupt to manage the remote wake-up packet detection)
++  - "eth_lpi" (the interrupt that occurs when Tx or Rx enters/exits LPI state)
+ - phy-mode: See ethernet.txt file in the same directory.
+ - snps,reset-gpio     gpio number for phy reset.
+ - snps,reset-active-low boolean flag to indicate if phy reset is active low.
+@@ -28,9 +31,9 @@ Optional properties:
+   clocks may be specified in derived bindings.
+ - clock-names: One name for each entry in the clocks property, the
+   first one should be "stmmaceth" and the second one should be "pclk".
+-- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
+-  available this clock is used for programming the Timestamp Addend Register.
+-  If not passed then the system clock will be used and this is fine on some
++- ptp_ref: this is the PTP reference clock; in case of the PTP is available
++  this clock is used for programming the Timestamp Addend Register. If not
++  passed then the system clock will be used and this is fine on some
+   platforms.
+ - tx-fifo-depth: See ethernet.txt file in the same directory
+ - rx-fifo-depth: See ethernet.txt file in the same directory
+@@ -72,7 +75,45 @@ Optional properties:
+       - snps,mb: mixed-burst
+       - snps,rb: rebuild INCRx Burst
+ - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
+-
++- Multiple RX Queues parameters: below the list of all the parameters to
++                               configure the multiple RX queues:
++      - snps,rx-queues-to-use: number of RX queues to be used in the driver
++      - Choose one of these RX scheduling algorithms:
++              - snps,rx-sched-sp: Strict priority
++              - snps,rx-sched-wsp: Weighted Strict priority
++      - For each RX queue
++              - Choose one of these modes:
++                      - snps,dcb-algorithm: Queue to be enabled as DCB
++                      - snps,avb-algorithm: Queue to be enabled as AVB
++              - snps,map-to-dma-channel: Channel to map
++              - Specifiy specific packet routing:
++                      - snps,route-avcp: AV Untagged Control packets
++                      - snps,route-ptp: PTP Packets
++                      - snps,route-dcbcp: DCB Control Packets
++                      - snps,route-up: Untagged Packets
++                      - snps,route-multi-broad: Multicast & Broadcast Packets
++              - snps,priority: RX queue priority (Range: 0x0 to 0xF)
++- Multiple TX Queues parameters: below the list of all the parameters to
++                               configure the multiple TX queues:
++      - snps,tx-queues-to-use: number of TX queues to be used in the driver
++      - Choose one of these TX scheduling algorithms:
++              - snps,tx-sched-wrr: Weighted Round Robin
++              - snps,tx-sched-wfq: Weighted Fair Queuing
++              - snps,tx-sched-dwrr: Deficit Weighted Round Robin
++              - snps,tx-sched-sp: Strict priority
++      - For each TX queue
++              - snps,weight: TX queue weight (if using a DCB weight algorithm)
++              - Choose one of these modes:
++                      - snps,dcb-algorithm: TX queue will be working in DCB
++                      - snps,avb-algorithm: TX queue will be working in AVB
++                        [Attention] Queue 0 is reserved for legacy traffic
++                        and so no AVB is available in this queue.
++              - Configure Credit Base Shaper (if AVB Mode selected):
++                      - snps,send_slope: enable Low Power Interface
++                      - snps,idle_slope: unlock on WoL
++                      - snps,high_credit: max write outstanding req. limit
++                      - snps,low_credit: max read outstanding req. limit
++              - snps,priority: TX queue priority (Range: 0x0 to 0xF)
+ Examples:
+       stmmac_axi_setup: stmmac-axi-config {
+@@ -81,12 +122,41 @@ Examples:
+               snps,blen = <256 128 64 32 0 0 0>;
+       };
++      mtl_rx_setup: rx-queues-config {
++              snps,rx-queues-to-use = <1>;
++              snps,rx-sched-sp;
++              queue0 {
++                      snps,dcb-algorithm;
++                      snps,map-to-dma-channel = <0x0>;
++                      snps,priority = <0x0>;
++              };
++      };
++
++      mtl_tx_setup: tx-queues-config {
++              snps,tx-queues-to-use = <2>;
++              snps,tx-sched-wrr;
++              queue0 {
++                      snps,weight = <0x10>;
++                      snps,dcb-algorithm;
++                      snps,priority = <0x0>;
++              };
++
++              queue1 {
++                      snps,avb-algorithm;
++                      snps,send_slope = <0x1000>;
++                      snps,idle_slope = <0x1000>;
++                      snps,high_credit = <0x3E800>;
++                      snps,low_credit = <0xFFC18000>;
++                      snps,priority = <0x1>;
++              };
++      };
++
+       gmac0: ethernet@e0800000 {
+               compatible = "st,spear600-gmac";
+               reg = <0xe0800000 0x8000>;
+               interrupt-parent = <&vic1>;
+-              interrupts = <24 23>;
+-              interrupt-names = "macirq", "eth_wake_irq";
++              interrupts = <24 23 22>;
++              interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
+               mac-address = [000000000000]; /* Filled in by U-Boot */
+               max-frame-size = <3800>;
+               phy-mode = "gmii";
+@@ -104,4 +174,6 @@ Examples:
+                       phy1: ethernet-phy@0 {
+                       };
+               };
++              snps,mtl-rx-config = <&mtl_rx_setup>;
++              snps,mtl-tx-config = <&mtl_tx_setup>;
+       };
+--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
++++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+@@ -37,6 +37,7 @@
+ #define TSE_PCS_CONTROL_AN_EN_MASK                    BIT(12)
+ #define TSE_PCS_CONTROL_REG                           0x00
+ #define TSE_PCS_CONTROL_RESTART_AN_MASK                       BIT(9)
++#define TSE_PCS_CTRL_AUTONEG_SGMII                    0x1140
+ #define TSE_PCS_IF_MODE_REG                           0x28
+ #define TSE_PCS_LINK_TIMER_0_REG                      0x24
+ #define TSE_PCS_LINK_TIMER_1_REG                      0x26
+@@ -65,6 +66,7 @@
+ #define TSE_PCS_SW_RESET_TIMEOUT                      100
+ #define TSE_PCS_USE_SGMII_AN_MASK                     BIT(1)
+ #define TSE_PCS_USE_SGMII_ENA                         BIT(0)
++#define TSE_PCS_IF_USE_SGMII                          0x03
+ #define SGMII_ADAPTER_CTRL_REG                                0x00
+ #define SGMII_ADAPTER_DISABLE                         0x0001
+@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, str
+ {
+       int ret = 0;
+-      writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
++      writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
++
++      writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
+       writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
+       writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
+--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+@@ -26,12 +26,15 @@
+ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+ {
+-      struct stmmac_priv *priv = (struct stmmac_priv *)p;
+-      unsigned int entry = priv->cur_tx;
+-      struct dma_desc *desc = priv->dma_tx + entry;
++      struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
+       unsigned int nopaged_len = skb_headlen(skb);
++      struct stmmac_priv *priv = tx_q->priv_data;
++      unsigned int entry = tx_q->cur_tx;
+       unsigned int bmax, des2;
+       unsigned int i = 1, len;
++      struct dma_desc *desc;
++
++      desc = tx_q->dma_tx + entry;
+       if (priv->plat->enh_desc)
+               bmax = BUF_SIZE_8KiB;
+@@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, str
+       desc->des2 = cpu_to_le32(des2);
+       if (dma_mapping_error(priv->device, des2))
+               return -1;
+-      priv->tx_skbuff_dma[entry].buf = des2;
+-      priv->tx_skbuff_dma[entry].len = bmax;
++      tx_q->tx_skbuff_dma[entry].buf = des2;
++      tx_q->tx_skbuff_dma[entry].len = bmax;
+       /* do not close the descriptor and do not set own bit */
+       priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
+-                                      0, false);
++                                      0, false, skb->len);
+       while (len != 0) {
+-              priv->tx_skbuff[entry] = NULL;
++              tx_q->tx_skbuff[entry] = NULL;
+               entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+-              desc = priv->dma_tx + entry;
++              desc = tx_q->dma_tx + entry;
+               if (len > bmax) {
+                       des2 = dma_map_single(priv->device,
+@@ -63,11 +66,11 @@ static int stmmac_jumbo_frm(void *p, str
+                       desc->des2 = cpu_to_le32(des2);
+                       if (dma_mapping_error(priv->device, des2))
+                               return -1;
+-                      priv->tx_skbuff_dma[entry].buf = des2;
+-                      priv->tx_skbuff_dma[entry].len = bmax;
++                      tx_q->tx_skbuff_dma[entry].buf = des2;
++                      tx_q->tx_skbuff_dma[entry].len = bmax;
+                       priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
+                                                       STMMAC_CHAIN_MODE, 1,
+-                                                      false);
++                                                      false, skb->len);
+                       len -= bmax;
+                       i++;
+               } else {
+@@ -77,17 +80,17 @@ static int stmmac_jumbo_frm(void *p, str
+                       desc->des2 = cpu_to_le32(des2);
+                       if (dma_mapping_error(priv->device, des2))
+                               return -1;
+-                      priv->tx_skbuff_dma[entry].buf = des2;
+-                      priv->tx_skbuff_dma[entry].len = len;
++                      tx_q->tx_skbuff_dma[entry].buf = des2;
++                      tx_q->tx_skbuff_dma[entry].len = len;
+                       /* last descriptor can be set now */
+                       priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+                                                       STMMAC_CHAIN_MODE, 1,
+-                                                      true);
++                                                      true, skb->len);
+                       len = 0;
+               }
+       }
+-      priv->cur_tx = entry;
++      tx_q->cur_tx = entry;
+       return entry;
+ }
+@@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void *
+ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
+ {
+-      struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
++      struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
++      struct stmmac_priv *priv = rx_q->priv_data;
+       if (priv->hwts_rx_en && !priv->extend_desc)
+               /* NOTE: Device will overwrite des3 with timestamp value if
+                * 1588-2002 time stamping is enabled, hence reinitialize it
+                * to keep explicit chaining in the descriptor.
+                */
+-              p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
+-                                    (((priv->dirty_rx) + 1) %
++              p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
++                                    (((rx_q->dirty_rx) + 1) %
+                                      DMA_RX_SIZE) *
+                                     sizeof(struct dma_desc)));
+ }
+ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
+ {
+-      struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+-      unsigned int entry = priv->dirty_tx;
++      struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
++      struct stmmac_priv *priv = tx_q->priv_data;
++      unsigned int entry = tx_q->dirty_tx;
+-      if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
++      if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
+           priv->hwts_tx_en)
+               /* NOTE: Device will overwrite des3 with timestamp value if
+                * 1588-2002 time stamping is enabled, hence reinitialize it
+                * to keep explicit chaining in the descriptor.
+                */
+-              p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
+-                                    ((priv->dirty_tx + 1) % DMA_TX_SIZE))
++              p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
++                                    ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
+                                     * sizeof(struct dma_desc)));
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -246,6 +246,15 @@ struct stmmac_extra_stats {
+ #define STMMAC_TX_MAX_FRAMES  256
+ #define STMMAC_TX_FRAMES      64
++/* Packets types */
++enum packets_types {
++      PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
++      PACKET_PTPQ = 0x2, /* PTP Packets */
++      PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
++      PACKET_UPQ = 0x4, /* Untagged Packets */
++      PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
++};
++
+ /* Rx IPC status */
+ enum rx_frame_status {
+       good_frame = 0x0,
+@@ -324,6 +333,9 @@ struct dma_features {
+       unsigned int number_tx_queues;
+       /* Alternate (enhanced) DESC mode */
+       unsigned int enh_desc;
++      /* TX and RX FIFO sizes */
++      unsigned int tx_fifo_size;
++      unsigned int rx_fifo_size;
+ };
+ /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+@@ -361,7 +373,7 @@ struct stmmac_desc_ops {
+       /* Invoked by the xmit function to prepare the tx descriptor */
+       void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
+                                bool csum_flag, int mode, bool tx_own,
+-                               bool ls);
++                               bool ls, unsigned int tot_pkt_len);
+       void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
+                                   int len2, bool tx_own, bool ls,
+                                   unsigned int tcphdrlen,
+@@ -413,6 +425,14 @@ struct stmmac_dma_ops {
+       int (*reset)(void __iomem *ioaddr);
+       void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
+                    u32 dma_tx, u32 dma_rx, int atds);
++      void (*init_chan)(void __iomem *ioaddr,
++                        struct stmmac_dma_cfg *dma_cfg, u32 chan);
++      void (*init_rx_chan)(void __iomem *ioaddr,
++                           struct stmmac_dma_cfg *dma_cfg,
++                           u32 dma_rx_phy, u32 chan);
++      void (*init_tx_chan)(void __iomem *ioaddr,
++                           struct stmmac_dma_cfg *dma_cfg,
++                           u32 dma_tx_phy, u32 chan);
+       /* Configure the AXI Bus Mode Register */
+       void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
+       /* Dump DMA registers */
+@@ -421,25 +441,28 @@ struct stmmac_dma_ops {
+        * An invalid value enables the store-and-forward mode */
+       void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
+                        int rxfifosz);
++      void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
++                          int fifosz);
++      void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
+       /* To track extra statistic (if supported) */
+       void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+                                  void __iomem *ioaddr);
+       void (*enable_dma_transmission) (void __iomem *ioaddr);
+-      void (*enable_dma_irq) (void __iomem *ioaddr);
+-      void (*disable_dma_irq) (void __iomem *ioaddr);
+-      void (*start_tx) (void __iomem *ioaddr);
+-      void (*stop_tx) (void __iomem *ioaddr);
+-      void (*start_rx) (void __iomem *ioaddr);
+-      void (*stop_rx) (void __iomem *ioaddr);
++      void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
++      void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
++      void (*start_tx)(void __iomem *ioaddr, u32 chan);
++      void (*stop_tx)(void __iomem *ioaddr, u32 chan);
++      void (*start_rx)(void __iomem *ioaddr, u32 chan);
++      void (*stop_rx)(void __iomem *ioaddr, u32 chan);
+       int (*dma_interrupt) (void __iomem *ioaddr,
+-                            struct stmmac_extra_stats *x);
++                            struct stmmac_extra_stats *x, u32 chan);
+       /* If supported then get the optional core features */
+       void (*get_hw_feature)(void __iomem *ioaddr,
+                              struct dma_features *dma_cap);
+       /* Program the HW RX Watchdog */
+-      void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
+-      void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
+-      void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
++      void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
++      void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
++      void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
+       void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+       void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+       void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+@@ -451,20 +474,44 @@ struct mac_device_info;
+ struct stmmac_ops {
+       /* MAC core initialization */
+       void (*core_init)(struct mac_device_info *hw, int mtu);
++      /* Enable the MAC RX/TX */
++      void (*set_mac)(void __iomem *ioaddr, bool enable);
+       /* Enable and verify that the IPC module is supported */
+       int (*rx_ipc)(struct mac_device_info *hw);
+       /* Enable RX Queues */
+-      void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
++      void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
++      /* RX Queues Priority */
++      void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
++      /* TX Queues Priority */
++      void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
++      /* RX Queues Routing */
++      void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
++                               u32 queue);
++      /* Program RX Algorithms */
++      void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
++      /* Program TX Algorithms */
++      void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
++      /* Set MTL TX queues weight */
++      void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
++                                      u32 weight, u32 queue);
++      /* RX MTL queue to RX dma mapping */
++      void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
++      /* Configure AV Algorithm */
++      void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
++                         u32 idle_slope, u32 high_credit, u32 low_credit,
++                         u32 queue);
+       /* Dump MAC registers */
+       void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
+       /* Handle extra events on specific interrupts hw dependent */
+       int (*host_irq_status)(struct mac_device_info *hw,
+                              struct stmmac_extra_stats *x);
++      /* Handle MTL interrupts */
++      int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
+       /* Multicast filter setting */
+       void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
+       /* Flow control setting */
+       void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
+-                        unsigned int fc, unsigned int pause_time);
++                        unsigned int fc, unsigned int pause_time, u32 tx_cnt);
+       /* Set power management mode (e.g. magic frame) */
+       void (*pmt)(struct mac_device_info *hw, unsigned long mode);
+       /* Set/Get Unicast MAC addresses */
+@@ -477,7 +524,8 @@ struct stmmac_ops {
+       void (*reset_eee_mode)(struct mac_device_info *hw);
+       void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
+       void (*set_eee_pls)(struct mac_device_info *hw, int link);
+-      void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
++      void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
++                    u32 rx_queues, u32 tx_queues);
+       /* PCS calls */
+       void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+                            bool loopback);
+@@ -547,6 +595,11 @@ struct mac_device_info {
+       unsigned int ps;
+ };
++struct stmmac_rx_routing {
++      u32 reg_mask;
++      u32 reg_shift;
++};
++
+ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
+                                       int perfect_uc_entries,
+                                       int *synopsys_id);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+@@ -14,16 +14,34 @@
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/device.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/ethtool.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/ioport.h>
+ #include <linux/module.h>
++#include <linux/of_device.h>
+ #include <linux/of_net.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/platform_device.h>
++#include <linux/reset.h>
+ #include <linux/stmmac.h>
+ #include "stmmac_platform.h"
++#include "dwmac4.h"
++
++struct tegra_eqos {
++      struct device *dev;
++      void __iomem *regs;
++
++      struct reset_control *rst;
++      struct clk *clk_master;
++      struct clk *clk_slave;
++      struct clk *clk_tx;
++      struct clk *clk_rx;
++
++      struct gpio_desc *reset;
++};
+ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
+                                  struct plat_stmmacenet_data *plat_dat)
+@@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struc
+       return 0;
+ }
++static void *dwc_qos_probe(struct platform_device *pdev,
++                         struct plat_stmmacenet_data *plat_dat,
++                         struct stmmac_resources *stmmac_res)
++{
++      int err;
++
++      plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
++      if (IS_ERR(plat_dat->stmmac_clk)) {
++              dev_err(&pdev->dev, "apb_pclk clock not found.\n");
++              return ERR_CAST(plat_dat->stmmac_clk);
++      }
++
++      err = clk_prepare_enable(plat_dat->stmmac_clk);
++      if (err < 0) {
++              dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
++                      err);
++              return ERR_PTR(err);
++      }
++
++      plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
++      if (IS_ERR(plat_dat->pclk)) {
++              dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
++              err = PTR_ERR(plat_dat->pclk);
++              goto disable;
++      }
++
++      err = clk_prepare_enable(plat_dat->pclk);
++      if (err < 0) {
++              dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
++                      err);
++              goto disable;
++      }
++
++      return NULL;
++
++disable:
++      clk_disable_unprepare(plat_dat->stmmac_clk);
++      return ERR_PTR(err);
++}
++
++static int dwc_qos_remove(struct platform_device *pdev)
++{
++      struct net_device *ndev = platform_get_drvdata(pdev);
++      struct stmmac_priv *priv = netdev_priv(ndev);
++
++      clk_disable_unprepare(priv->plat->pclk);
++      clk_disable_unprepare(priv->plat->stmmac_clk);
++
++      return 0;
++}
++
++#define SDMEMCOMPPADCTRL 0x8800
++#define  SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
++
++#define AUTO_CAL_CONFIG 0x8804
++#define  AUTO_CAL_CONFIG_START BIT(31)
++#define  AUTO_CAL_CONFIG_ENABLE BIT(29)
++
++#define AUTO_CAL_STATUS 0x880c
++#define  AUTO_CAL_STATUS_ACTIVE BIT(31)
++
++static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
++{
++      struct tegra_eqos *eqos = priv;
++      unsigned long rate = 125000000;
++      bool needs_calibration = false;
++      u32 value;
++      int err;
++
++      switch (speed) {
++      case SPEED_1000:
++              needs_calibration = true;
++              rate = 125000000;
++              break;
++
++      case SPEED_100:
++              needs_calibration = true;
++              rate = 25000000;
++              break;
++
++      case SPEED_10:
++              rate = 2500000;
++              break;
++
++      default:
++              dev_err(eqos->dev, "invalid speed %u\n", speed);
++              break;
++      }
++
++      if (needs_calibration) {
++              /* calibrate */
++              value = readl(eqos->regs + SDMEMCOMPPADCTRL);
++              value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
++              writel(value, eqos->regs + SDMEMCOMPPADCTRL);
++
++              udelay(1);
++
++              value = readl(eqos->regs + AUTO_CAL_CONFIG);
++              value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
++              writel(value, eqos->regs + AUTO_CAL_CONFIG);
++
++              err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
++                                              value,
++                                              value & AUTO_CAL_STATUS_ACTIVE,
++                                              1, 10);
++              if (err < 0) {
++                      dev_err(eqos->dev, "calibration did not start\n");
++                      goto failed;
++              }
++
++              err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
++                                              value,
++                                              (value & AUTO_CAL_STATUS_ACTIVE) == 0,
++                                              20, 200);
++              if (err < 0) {
++                      dev_err(eqos->dev, "calibration didn't finish\n");
++                      goto failed;
++              }
++
++      failed:
++              value = readl(eqos->regs + SDMEMCOMPPADCTRL);
++              value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
++              writel(value, eqos->regs + SDMEMCOMPPADCTRL);
++      } else {
++              value = readl(eqos->regs + AUTO_CAL_CONFIG);
++              value &= ~AUTO_CAL_CONFIG_ENABLE;
++              writel(value, eqos->regs + AUTO_CAL_CONFIG);
++      }
++
++      err = clk_set_rate(eqos->clk_tx, rate);
++      if (err < 0)
++              dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
++}
++
++static int tegra_eqos_init(struct platform_device *pdev, void *priv)
++{
++      struct tegra_eqos *eqos = priv;
++      unsigned long rate;
++      u32 value;
++
++      rate = clk_get_rate(eqos->clk_slave);
++
++      value = (rate / 1000000) - 1;
++      writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
++
++      return 0;
++}
++
++static void *tegra_eqos_probe(struct platform_device *pdev,
++                            struct plat_stmmacenet_data *data,
++                            struct stmmac_resources *res)
++{
++      struct tegra_eqos *eqos;
++      int err;
++
++      eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
++      if (!eqos) {
++              err = -ENOMEM;
++              goto error;
++      }
++
++      eqos->dev = &pdev->dev;
++      eqos->regs = res->addr;
++
++      eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
++      if (IS_ERR(eqos->clk_master)) {
++              err = PTR_ERR(eqos->clk_master);
++              goto error;
++      }
++
++      err = clk_prepare_enable(eqos->clk_master);
++      if (err < 0)
++              goto error;
++
++      eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
++      if (IS_ERR(eqos->clk_slave)) {
++              err = PTR_ERR(eqos->clk_slave);
++              goto disable_master;
++      }
++
++      data->stmmac_clk = eqos->clk_slave;
++
++      err = clk_prepare_enable(eqos->clk_slave);
++      if (err < 0)
++              goto disable_master;
++
++      eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
++      if (IS_ERR(eqos->clk_rx)) {
++              err = PTR_ERR(eqos->clk_rx);
++              goto disable_slave;
++      }
++
++      err = clk_prepare_enable(eqos->clk_rx);
++      if (err < 0)
++              goto disable_slave;
++
++      eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
++      if (IS_ERR(eqos->clk_tx)) {
++              err = PTR_ERR(eqos->clk_tx);
++              goto disable_rx;
++      }
++
++      err = clk_prepare_enable(eqos->clk_tx);
++      if (err < 0)
++              goto disable_rx;
++
++      eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
++      if (IS_ERR(eqos->reset)) {
++              err = PTR_ERR(eqos->reset);
++              goto disable_tx;
++      }
++
++      usleep_range(2000, 4000);
++      gpiod_set_value(eqos->reset, 0);
++
++      eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
++      if (IS_ERR(eqos->rst)) {
++              err = PTR_ERR(eqos->rst);
++              goto reset_phy;
++      }
++
++      err = reset_control_assert(eqos->rst);
++      if (err < 0)
++              goto reset_phy;
++
++      usleep_range(2000, 4000);
++
++      err = reset_control_deassert(eqos->rst);
++      if (err < 0)
++              goto reset_phy;
++
++      usleep_range(2000, 4000);
++
++      data->fix_mac_speed = tegra_eqos_fix_speed;
++      data->init = tegra_eqos_init;
++      data->bsp_priv = eqos;
++
++      err = tegra_eqos_init(pdev, eqos);
++      if (err < 0)
++              goto reset;
++
++out:
++      return eqos;
++
++reset:
++      reset_control_assert(eqos->rst);
++reset_phy:
++      gpiod_set_value(eqos->reset, 1);
++disable_tx:
++      clk_disable_unprepare(eqos->clk_tx);
++disable_rx:
++      clk_disable_unprepare(eqos->clk_rx);
++disable_slave:
++      clk_disable_unprepare(eqos->clk_slave);
++disable_master:
++      clk_disable_unprepare(eqos->clk_master);
++error:
++      eqos = ERR_PTR(err);
++      goto out;
++}
++
++static int tegra_eqos_remove(struct platform_device *pdev)
++{
++      struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
++
++      reset_control_assert(eqos->rst);
++      gpiod_set_value(eqos->reset, 1);
++      clk_disable_unprepare(eqos->clk_tx);
++      clk_disable_unprepare(eqos->clk_rx);
++      clk_disable_unprepare(eqos->clk_slave);
++      clk_disable_unprepare(eqos->clk_master);
++
++      return 0;
++}
++
++struct dwc_eth_dwmac_data {
++      void *(*probe)(struct platform_device *pdev,
++                     struct plat_stmmacenet_data *data,
++                     struct stmmac_resources *res);
++      int (*remove)(struct platform_device *pdev);
++};
++
++static const struct dwc_eth_dwmac_data dwc_qos_data = {
++      .probe = dwc_qos_probe,
++      .remove = dwc_qos_remove,
++};
++
++static const struct dwc_eth_dwmac_data tegra_eqos_data = {
++      .probe = tegra_eqos_probe,
++      .remove = tegra_eqos_remove,
++};
++
+ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
+ {
++      const struct dwc_eth_dwmac_data *data;
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       struct resource *res;
++      void *priv;
+       int ret;
++      data = of_device_get_match_data(&pdev->dev);
++
+       memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
+       /**
+@@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct pl
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+-      plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
+-      if (IS_ERR(plat_dat->stmmac_clk)) {
+-              dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+-              ret = PTR_ERR(plat_dat->stmmac_clk);
+-              plat_dat->stmmac_clk = NULL;
+-              goto err_remove_config_dt;
++      priv = data->probe(pdev, plat_dat, &stmmac_res);
++      if (IS_ERR(priv)) {
++              ret = PTR_ERR(priv);
++              dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret);
++              goto remove_config;
+       }
+-      clk_prepare_enable(plat_dat->stmmac_clk);
+-
+-      plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+-      if (IS_ERR(plat_dat->pclk)) {
+-              dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+-              ret = PTR_ERR(plat_dat->pclk);
+-              plat_dat->pclk = NULL;
+-              goto err_out_clk_dis_phy;
+-      }
+-      clk_prepare_enable(plat_dat->pclk);
+       ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
+       if (ret)
+-              goto err_out_clk_dis_aper;
++              goto remove;
+       ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       if (ret)
+-              goto err_out_clk_dis_aper;
++              goto remove;
+-      return 0;
++      return ret;
+-err_out_clk_dis_aper:
+-      clk_disable_unprepare(plat_dat->pclk);
+-err_out_clk_dis_phy:
+-      clk_disable_unprepare(plat_dat->stmmac_clk);
+-err_remove_config_dt:
++remove:
++      data->remove(pdev);
++remove_config:
+       stmmac_remove_config_dt(pdev, plat_dat);
+       return ret;
+@@ -178,11 +479,29 @@ err_remove_config_dt:
+ static int dwc_eth_dwmac_remove(struct platform_device *pdev)
+ {
+-      return stmmac_pltfr_remove(pdev);
++      struct net_device *ndev = platform_get_drvdata(pdev);
++      struct stmmac_priv *priv = netdev_priv(ndev);
++      const struct dwc_eth_dwmac_data *data;
++      int err;
++
++      data = of_device_get_match_data(&pdev->dev);
++
++      err = stmmac_dvr_remove(&pdev->dev);
++      if (err < 0)
++              dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
++
++      err = data->remove(pdev);
++      if (err < 0)
++              dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
++
++      stmmac_remove_config_dt(pdev, priv->plat);
++
++      return err;
+ }
+ static const struct of_device_id dwc_eth_dwmac_match[] = {
+-      { .compatible = "snps,dwc-qos-ethernet-4.10", },
++      { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
++      { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
+       { }
+ };
+ MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -74,6 +74,10 @@ struct rk_priv_data {
+ #define GRF_BIT(nr)   (BIT(nr) | BIT(nr+16))
+ #define GRF_CLR_BIT(nr)       (BIT(nr+16))
++#define DELAY_ENABLE(soc, tx, rx) \
++      (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
++       ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
++
+ #define RK3228_GRF_MAC_CON0   0x0900
+ #define RK3228_GRF_MAC_CON1   0x0904
+@@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct r
+       regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+                    RK3228_GMAC_PHY_INTF_SEL_RGMII |
+                    RK3228_GMAC_RMII_MODE_CLR |
+-                   RK3228_GMAC_RXCLK_DLY_ENABLE |
+-                   RK3228_GMAC_TXCLK_DLY_ENABLE);
++                   DELAY_ENABLE(RK3228, tx_delay, rx_delay));
+       regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
+                    RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
+@@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct r
+                    RK3288_GMAC_PHY_INTF_SEL_RGMII |
+                    RK3288_GMAC_RMII_MODE_CLR);
+       regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
+-                   RK3288_GMAC_RXCLK_DLY_ENABLE |
+-                   RK3288_GMAC_TXCLK_DLY_ENABLE |
++                   DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
+                    RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
+                    RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
+ }
+@@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct r
+                    RK3366_GMAC_PHY_INTF_SEL_RGMII |
+                    RK3366_GMAC_RMII_MODE_CLR);
+       regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
+-                   RK3366_GMAC_RXCLK_DLY_ENABLE |
+-                   RK3366_GMAC_TXCLK_DLY_ENABLE |
++                   DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
+                    RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
+                    RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
+ }
+@@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct r
+                    RK3368_GMAC_PHY_INTF_SEL_RGMII |
+                    RK3368_GMAC_RMII_MODE_CLR);
+       regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
+-                   RK3368_GMAC_RXCLK_DLY_ENABLE |
+-                   RK3368_GMAC_TXCLK_DLY_ENABLE |
++                   DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
+                    RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
+                    RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
+ }
+@@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct r
+                    RK3399_GMAC_PHY_INTF_SEL_RGMII |
+                    RK3399_GMAC_RMII_MODE_CLR);
+       regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
+-                   RK3399_GMAC_RXCLK_DLY_ENABLE |
+-                   RK3399_GMAC_TXCLK_DLY_ENABLE |
++                   DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
+                    RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
+                    RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
+ }
+@@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_pri
+               return ret;
+       /*rmii or rgmii*/
+-      if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
++      switch (bsp_priv->phy_iface) {
++      case PHY_INTERFACE_MODE_RGMII:
+               dev_info(dev, "init for RGMII\n");
+               bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
+                                           bsp_priv->rx_delay);
+-      } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
++              break;
++      case PHY_INTERFACE_MODE_RGMII_ID:
++              dev_info(dev, "init for RGMII_ID\n");
++              bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
++              break;
++      case PHY_INTERFACE_MODE_RGMII_RXID:
++              dev_info(dev, "init for RGMII_RXID\n");
++              bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
++              break;
++      case PHY_INTERFACE_MODE_RGMII_TXID:
++              dev_info(dev, "init for RGMII_TXID\n");
++              bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
++              break;
++      case PHY_INTERFACE_MODE_RMII:
+               dev_info(dev, "init for RMII\n");
+               bsp_priv->ops->set_to_rmii(bsp_priv);
+-      } else {
++              break;
++      default:
+               dev_err(dev, "NO interface defined!\n");
+       }
+@@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, uns
+       struct rk_priv_data *bsp_priv = priv;
+       struct device *dev = &bsp_priv->pdev->dev;
+-      if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
++      switch (bsp_priv->phy_iface) {
++      case PHY_INTERFACE_MODE_RGMII:
++      case PHY_INTERFACE_MODE_RGMII_ID:
++      case PHY_INTERFACE_MODE_RGMII_RXID:
++      case PHY_INTERFACE_MODE_RGMII_TXID:
+               bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
+-      else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
++              break;
++      case PHY_INTERFACE_MODE_RMII:
+               bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
+-      else
++              break;
++      default:
+               dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
++      }
+ }
+ static int rk_gmac_probe(struct platform_device *pdev)
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct
+ static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+-                              unsigned int fc, unsigned int pause_time)
++                              unsigned int fc, unsigned int pause_time,
++                              u32 tx_cnt)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+       /* Set flow such that DZPQ in Mac Register 6 is 0,
+@@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __
+       dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+ }
+-static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
++static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
++                          u32 rx_queues, u32 tx_queues)
+ {
+       u32 value = readl(ioaddr + GMAC_DEBUG);
+@@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem
+ static const struct stmmac_ops dwmac1000_ops = {
+       .core_init = dwmac1000_core_init,
++      .set_mac = stmmac_set_mac,
+       .rx_ipc = dwmac1000_rx_ipc_enable,
+       .dump_regs = dwmac1000_dump_regs,
+       .host_irq_status = dwmac1000_irq_status,
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+@@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(voi
+       dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+ }
+-static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
++static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
++                                u32 number_chan)
+ {
+       writel(riwt, ioaddr + DMA_RX_WATCHDOG);
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+@@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct m
+ }
+ static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+-                             unsigned int fc, unsigned int pause_time)
++                             unsigned int fc, unsigned int pause_time,
++                             u32 tx_cnt)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+       unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+@@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_devi
+ static const struct stmmac_ops dwmac100_ops = {
+       .core_init = dwmac100_core_init,
++      .set_mac = stmmac_set_mac,
+       .rx_ipc = dwmac100_rx_ipc_enable,
+       .dump_regs = dwmac100_dump_mac_regs,
+       .host_irq_status = dwmac100_irq_status,
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+@@ -22,9 +22,15 @@
+ #define GMAC_HASH_TAB_32_63           0x00000014
+ #define GMAC_RX_FLOW_CTRL             0x00000090
+ #define GMAC_QX_TX_FLOW_CTRL(x)               (0x70 + x * 4)
++#define GMAC_TXQ_PRTY_MAP0            0x98
++#define GMAC_TXQ_PRTY_MAP1            0x9C
+ #define GMAC_RXQ_CTRL0                        0x000000a0
++#define GMAC_RXQ_CTRL1                        0x000000a4
++#define GMAC_RXQ_CTRL2                        0x000000a8
++#define GMAC_RXQ_CTRL3                        0x000000ac
+ #define GMAC_INT_STATUS                       0x000000b0
+ #define GMAC_INT_EN                   0x000000b4
++#define GMAC_1US_TIC_COUNTER          0x000000dc
+ #define GMAC_PCS_BASE                 0x000000e0
+ #define GMAC_PHYIF_CONTROL_STATUS     0x000000f8
+ #define GMAC_PMT                      0x000000c0
+@@ -38,6 +44,22 @@
+ #define GMAC_ADDR_HIGH(reg)           (0x300 + reg * 8)
+ #define GMAC_ADDR_LOW(reg)            (0x304 + reg * 8)
++/* RX Queues Routing */
++#define GMAC_RXQCTRL_AVCPQ_MASK               GENMASK(2, 0)
++#define GMAC_RXQCTRL_AVCPQ_SHIFT      0
++#define GMAC_RXQCTRL_PTPQ_MASK                GENMASK(6, 4)
++#define GMAC_RXQCTRL_PTPQ_SHIFT               4
++#define GMAC_RXQCTRL_DCBCPQ_MASK      GENMASK(10, 8)
++#define GMAC_RXQCTRL_DCBCPQ_SHIFT     8
++#define GMAC_RXQCTRL_UPQ_MASK         GENMASK(14, 12)
++#define GMAC_RXQCTRL_UPQ_SHIFT                12
++#define GMAC_RXQCTRL_MCBCQ_MASK               GENMASK(18, 16)
++#define GMAC_RXQCTRL_MCBCQ_SHIFT      16
++#define GMAC_RXQCTRL_MCBCQEN          BIT(20)
++#define GMAC_RXQCTRL_MCBCQEN_SHIFT    20
++#define GMAC_RXQCTRL_TACPQE           BIT(21)
++#define GMAC_RXQCTRL_TACPQE_SHIFT     21
++
+ /* MAC Packet Filtering */
+ #define GMAC_PACKET_FILTER_PR         BIT(0)
+ #define GMAC_PACKET_FILTER_HMC                BIT(2)
+@@ -53,6 +75,14 @@
+ /* MAC Flow Control RX */
+ #define GMAC_RX_FLOW_CTRL_RFE         BIT(0)
++/* RX Queues Priorities */
++#define GMAC_RXQCTRL_PSRQX_MASK(x)    GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
++#define GMAC_RXQCTRL_PSRQX_SHIFT(x)   ((x) * 8)
++
++/* TX Queues Priorities */
++#define GMAC_TXQCTRL_PSTQX_MASK(x)    GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
++#define GMAC_TXQCTRL_PSTQX_SHIFT(x)   ((x) * 8)
++
+ /* MAC Flow Control TX */
+ #define GMAC_TX_FLOW_CTRL_TFE         BIT(1)
+ #define GMAC_TX_FLOW_CTRL_PT_SHIFT    16
+@@ -148,6 +178,8 @@ enum power_event {
+ /* MAC HW features1 bitmap */
+ #define GMAC_HW_FEAT_AVSEL            BIT(20)
+ #define GMAC_HW_TSOEN                 BIT(18)
++#define GMAC_HW_TXFIFOSIZE            GENMASK(10, 6)
++#define GMAC_HW_RXFIFOSIZE            GENMASK(4, 0)
+ /* MAC HW features2 bitmap */
+ #define GMAC_HW_FEAT_TXCHCNT          GENMASK(21, 18)
+@@ -161,8 +193,25 @@ enum power_event {
+ #define GMAC_HI_REG_AE                        BIT(31)
+ /*  MTL registers */
++#define MTL_OPERATION_MODE            0x00000c00
++#define MTL_OPERATION_SCHALG_MASK     GENMASK(6, 5)
++#define MTL_OPERATION_SCHALG_WRR      (0x0 << 5)
++#define MTL_OPERATION_SCHALG_WFQ      (0x1 << 5)
++#define MTL_OPERATION_SCHALG_DWRR     (0x2 << 5)
++#define MTL_OPERATION_SCHALG_SP               (0x3 << 5)
++#define MTL_OPERATION_RAA             BIT(2)
++#define MTL_OPERATION_RAA_SP          (0x0 << 2)
++#define MTL_OPERATION_RAA_WSP         (0x1 << 2)
++
+ #define MTL_INT_STATUS                        0x00000c20
+-#define MTL_INT_Q0                    BIT(0)
++#define MTL_INT_QX(x)                 BIT(x)
++
++#define MTL_RXQ_DMA_MAP0              0x00000c30 /* queue 0 to 3 */
++#define MTL_RXQ_DMA_MAP1              0x00000c34 /* queue 4 to 7 */
++#define MTL_RXQ_DMA_Q04MDMACH_MASK    GENMASK(3, 0)
++#define MTL_RXQ_DMA_Q04MDMACH(x)      ((x) << 0)
++#define MTL_RXQ_DMA_QXMDMACH_MASK(x)  GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
++#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
+ #define MTL_CHAN_BASE_ADDR            0x00000d00
+ #define MTL_CHAN_BASE_OFFSET          0x40
+@@ -180,6 +229,7 @@ enum power_event {
+ #define MTL_OP_MODE_TSF                       BIT(1)
+ #define MTL_OP_MODE_TQS_MASK          GENMASK(24, 16)
++#define MTL_OP_MODE_TQS_SHIFT         16
+ #define MTL_OP_MODE_TTC_MASK          0x70
+ #define MTL_OP_MODE_TTC_SHIFT         4
+@@ -193,6 +243,17 @@ enum power_event {
+ #define MTL_OP_MODE_TTC_384           (6 << MTL_OP_MODE_TTC_SHIFT)
+ #define MTL_OP_MODE_TTC_512           (7 << MTL_OP_MODE_TTC_SHIFT)
++#define MTL_OP_MODE_RQS_MASK          GENMASK(29, 20)
++#define MTL_OP_MODE_RQS_SHIFT         20
++
++#define MTL_OP_MODE_RFD_MASK          GENMASK(19, 14)
++#define MTL_OP_MODE_RFD_SHIFT         14
++
++#define MTL_OP_MODE_RFA_MASK          GENMASK(13, 8)
++#define MTL_OP_MODE_RFA_SHIFT         8
++
++#define MTL_OP_MODE_EHFC              BIT(7)
++
+ #define MTL_OP_MODE_RTC_MASK          0x18
+ #define MTL_OP_MODE_RTC_SHIFT         3
+@@ -201,6 +262,46 @@ enum power_event {
+ #define MTL_OP_MODE_RTC_96            (2 << MTL_OP_MODE_RTC_SHIFT)
+ #define MTL_OP_MODE_RTC_128           (3 << MTL_OP_MODE_RTC_SHIFT)
++/* MTL ETS Control register */
++#define MTL_ETS_CTRL_BASE_ADDR                0x00000d10
++#define MTL_ETS_CTRL_BASE_OFFSET      0x40
++#define MTL_ETSX_CTRL_BASE_ADDR(x)    (MTL_ETS_CTRL_BASE_ADDR + \
++                                      ((x) * MTL_ETS_CTRL_BASE_OFFSET))
++
++#define MTL_ETS_CTRL_CC                       BIT(3)
++#define MTL_ETS_CTRL_AVALG            BIT(2)
++
++/* MTL Queue Quantum Weight */
++#define MTL_TXQ_WEIGHT_BASE_ADDR      0x00000d18
++#define MTL_TXQ_WEIGHT_BASE_OFFSET    0x40
++#define MTL_TXQX_WEIGHT_BASE_ADDR(x)  (MTL_TXQ_WEIGHT_BASE_ADDR + \
++                                      ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
++#define MTL_TXQ_WEIGHT_ISCQW_MASK     GENMASK(20, 0)
++
++/* MTL sendSlopeCredit register */
++#define MTL_SEND_SLP_CRED_BASE_ADDR   0x00000d1c
++#define MTL_SEND_SLP_CRED_OFFSET      0x40
++#define MTL_SEND_SLP_CREDX_BASE_ADDR(x)       (MTL_SEND_SLP_CRED_BASE_ADDR + \
++                                      ((x) * MTL_SEND_SLP_CRED_OFFSET))
++
++#define MTL_SEND_SLP_CRED_SSC_MASK    GENMASK(13, 0)
++
++/* MTL hiCredit register */
++#define MTL_HIGH_CRED_BASE_ADDR               0x00000d20
++#define MTL_HIGH_CRED_OFFSET          0x40
++#define MTL_HIGH_CREDX_BASE_ADDR(x)   (MTL_HIGH_CRED_BASE_ADDR + \
++                                      ((x) * MTL_HIGH_CRED_OFFSET))
++
++#define MTL_HIGH_CRED_HC_MASK         GENMASK(28, 0)
++
++/* MTL loCredit register */
++#define MTL_LOW_CRED_BASE_ADDR                0x00000d24
++#define MTL_LOW_CRED_OFFSET           0x40
++#define MTL_LOW_CREDX_BASE_ADDR(x)    (MTL_LOW_CRED_BASE_ADDR + \
++                                      ((x) * MTL_LOW_CRED_OFFSET))
++
++#define MTL_HIGH_CRED_LC_MASK         GENMASK(28, 0)
++
+ /*  MTL debug */
+ #define MTL_DEBUG_TXSTSFSTS           BIT(5)
+ #define MTL_DEBUG_TXFSTS              BIT(4)
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_
+       writel(value, ioaddr + GMAC_INT_EN);
+ }
+-static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
++static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
++                                 u8 mode, u32 queue)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
+       value &= GMAC_RX_QUEUE_CLEAR(queue);
+-      value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
++      if (mode == MTL_QUEUE_AVB)
++              value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
++      else if (mode == MTL_QUEUE_DCB)
++              value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
+       writel(value, ioaddr + GMAC_RXQ_CTRL0);
+ }
++static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
++                                   u32 prio, u32 queue)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 base_register;
++      u32 value;
++
++      base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
++
++      value = readl(ioaddr + base_register);
++
++      value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
++      value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
++                                              GMAC_RXQCTRL_PSRQX_MASK(queue);
++      writel(value, ioaddr + base_register);
++}
++
++static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
++                                   u32 prio, u32 queue)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 base_register;
++      u32 value;
++
++      base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
++
++      value = readl(ioaddr + base_register);
++
++      value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
++      value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
++                                              GMAC_TXQCTRL_PSTQX_MASK(queue);
++
++      writel(value, ioaddr + base_register);
++}
++
++static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
++                                  u8 packet, u32 queue)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 value;
++
++      const struct stmmac_rx_routing route_possibilities[] = {
++              { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
++              { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
++              { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
++              { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
++              { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
++      };
++
++      value = readl(ioaddr + GMAC_RXQ_CTRL1);
++
++      /* routing configuration */
++      value &= ~route_possibilities[packet - 1].reg_mask;
++      value |= (queue << route_possibilities[packet-1].reg_shift) &
++               route_possibilities[packet - 1].reg_mask;
++
++      /* some packets require extra ops */
++      if (packet == PACKET_AVCPQ) {
++              value &= ~GMAC_RXQCTRL_TACPQE;
++              value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
++      } else if (packet == PACKET_MCBCQ) {
++              value &= ~GMAC_RXQCTRL_MCBCQEN;
++              value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
++      }
++
++      writel(value, ioaddr + GMAC_RXQ_CTRL1);
++}
++
++static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
++                                        u32 rx_alg)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 value = readl(ioaddr + MTL_OPERATION_MODE);
++
++      value &= ~MTL_OPERATION_RAA;
++      switch (rx_alg) {
++      case MTL_RX_ALGORITHM_SP:
++              value |= MTL_OPERATION_RAA_SP;
++              break;
++      case MTL_RX_ALGORITHM_WSP:
++              value |= MTL_OPERATION_RAA_WSP;
++              break;
++      default:
++              break;
++      }
++
++      writel(value, ioaddr + MTL_OPERATION_MODE);
++}
++
++static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
++                                        u32 tx_alg)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 value = readl(ioaddr + MTL_OPERATION_MODE);
++
++      value &= ~MTL_OPERATION_SCHALG_MASK;
++      switch (tx_alg) {
++      case MTL_TX_ALGORITHM_WRR:
++              value |= MTL_OPERATION_SCHALG_WRR;
++              break;
++      case MTL_TX_ALGORITHM_WFQ:
++              value |= MTL_OPERATION_SCHALG_WFQ;
++              break;
++      case MTL_TX_ALGORITHM_DWRR:
++              value |= MTL_OPERATION_SCHALG_DWRR;
++              break;
++      case MTL_TX_ALGORITHM_SP:
++              value |= MTL_OPERATION_SCHALG_SP;
++              break;
++      default:
++              break;
++      }
++}
++
++static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
++                                         u32 weight, u32 queue)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
++
++      value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
++      value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
++      writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
++}
++
++static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 value;
++
++      if (queue < 4)
++              value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
++      else
++              value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
++
++      if (queue == 0 || queue == 4) {
++              value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
++              value |= MTL_RXQ_DMA_Q04MDMACH(chan);
++      } else {
++              value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
++              value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
++      }
++
++      if (queue < 4)
++              writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
++      else
++              writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
++}
++
++static void dwmac4_config_cbs(struct mac_device_info *hw,
++                            u32 send_slope, u32 idle_slope,
++                            u32 high_credit, u32 low_credit, u32 queue)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 value;
++
++      pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
++      pr_debug("\tsend_slope: 0x%08x\n", send_slope);
++      pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
++      pr_debug("\thigh_credit: 0x%08x\n", high_credit);
++      pr_debug("\tlow_credit: 0x%08x\n", low_credit);
++
++      /* enable AV algorithm */
++      value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
++      value |= MTL_ETS_CTRL_AVALG;
++      value |= MTL_ETS_CTRL_CC;
++      writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
++
++      /* configure send slope */
++      value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
++      value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
++      value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
++      writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
++
++      /* configure idle slope (same register as tx weight) */
++      dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
++
++      /* configure high credit */
++      value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
++      value &= ~MTL_HIGH_CRED_HC_MASK;
++      value |= high_credit & MTL_HIGH_CRED_HC_MASK;
++      writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
++
++      /* configure high credit */
++      value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
++      value &= ~MTL_HIGH_CRED_LC_MASK;
++      value |= low_credit & MTL_HIGH_CRED_LC_MASK;
++      writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
++}
++
+ static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+@@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac
+ }
+ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+-                           unsigned int fc, unsigned int pause_time)
++                           unsigned int fc, unsigned int pause_time,
++                           u32 tx_cnt)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+-      u32 channel = STMMAC_CHAN0;     /* FIXME */
+       unsigned int flow = 0;
++      u32 queue = 0;
+       pr_debug("GMAC Flow-Control:\n");
+       if (fc & FLOW_RX) {
+@@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_
+       }
+       if (fc & FLOW_TX) {
+               pr_debug("\tTransmit Flow-Control ON\n");
+-              flow |= GMAC_TX_FLOW_CTRL_TFE;
+-              writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+-              if (duplex) {
++              if (duplex)
+                       pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
+-                      flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+-                      writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
++
++              for (queue = 0; queue < tx_cnt; queue++) {
++                      flow |= GMAC_TX_FLOW_CTRL_TFE;
++
++                      if (duplex)
++                              flow |=
++                              (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
++
++                      writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
+               }
+       }
+ }
+@@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iome
+       }
+ }
++static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 mtl_int_qx_status;
++      int ret = 0;
++
++      mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
++
++      /* Check MTL Interrupt */
++      if (mtl_int_qx_status & MTL_INT_QX(chan)) {
++              /* read Queue x Interrupt status */
++              u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
++
++              if (status & MTL_RX_OVERFLOW_INT) {
++                      /*  clear Interrupt */
++                      writel(status | MTL_RX_OVERFLOW_INT,
++                             ioaddr + MTL_CHAN_INT_CTRL(chan));
++                      ret = CORE_IRQ_MTL_RX_OVERFLOW;
++              }
++      }
++
++      return ret;
++}
++
+ static int dwmac4_irq_status(struct mac_device_info *hw,
+                            struct stmmac_extra_stats *x)
+ {
+       void __iomem *ioaddr = hw->pcsr;
+-      u32 mtl_int_qx_status;
+       u32 intr_status;
+       int ret = 0;
+@@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_
+               x->irq_receive_pmt_irq_n++;
+       }
+-      mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+-      /* Check MTL Interrupt: Currently only one queue is used: Q0. */
+-      if (mtl_int_qx_status & MTL_INT_Q0) {
+-              /* read Queue 0 Interrupt status */
+-              u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+-
+-              if (status & MTL_RX_OVERFLOW_INT) {
+-                      /*  clear Interrupt */
+-                      writel(status | MTL_RX_OVERFLOW_INT,
+-                             ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+-                      ret = CORE_IRQ_MTL_RX_OVERFLOW;
+-              }
+-      }
+-
+       dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+       if (intr_status & PCS_RGSMIIIS_IRQ)
+               dwmac4_phystatus(ioaddr, x);
+@@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_
+       return ret;
+ }
+-static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
++static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
++                       u32 rx_queues, u32 tx_queues)
+ {
+       u32 value;
++      u32 queue;
+-      /*  Currently only channel 0 is supported */
+-      value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
++      for (queue = 0; queue < tx_queues; queue++) {
++              value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
+-      if (value & MTL_DEBUG_TXSTSFSTS)
+-              x->mtl_tx_status_fifo_full++;
+-      if (value & MTL_DEBUG_TXFSTS)
+-              x->mtl_tx_fifo_not_empty++;
+-      if (value & MTL_DEBUG_TWCSTS)
+-              x->mmtl_fifo_ctrl++;
+-      if (value & MTL_DEBUG_TRCSTS_MASK) {
+-              u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+-                           >> MTL_DEBUG_TRCSTS_SHIFT;
+-              if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+-                      x->mtl_tx_fifo_read_ctrl_write++;
+-              else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+-                      x->mtl_tx_fifo_read_ctrl_wait++;
+-              else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+-                      x->mtl_tx_fifo_read_ctrl_read++;
+-              else
+-                      x->mtl_tx_fifo_read_ctrl_idle++;
++              if (value & MTL_DEBUG_TXSTSFSTS)
++                      x->mtl_tx_status_fifo_full++;
++              if (value & MTL_DEBUG_TXFSTS)
++                      x->mtl_tx_fifo_not_empty++;
++              if (value & MTL_DEBUG_TWCSTS)
++                      x->mmtl_fifo_ctrl++;
++              if (value & MTL_DEBUG_TRCSTS_MASK) {
++                      u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
++                                   >> MTL_DEBUG_TRCSTS_SHIFT;
++                      if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
++                              x->mtl_tx_fifo_read_ctrl_write++;
++                      else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
++                              x->mtl_tx_fifo_read_ctrl_wait++;
++                      else if (trcsts == MTL_DEBUG_TRCSTS_READ)
++                              x->mtl_tx_fifo_read_ctrl_read++;
++                      else
++                              x->mtl_tx_fifo_read_ctrl_idle++;
++              }
++              if (value & MTL_DEBUG_TXPAUSED)
++                      x->mac_tx_in_pause++;
+       }
+-      if (value & MTL_DEBUG_TXPAUSED)
+-              x->mac_tx_in_pause++;
+-      value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
++      for (queue = 0; queue < rx_queues; queue++) {
++              value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
+-      if (value & MTL_DEBUG_RXFSTS_MASK) {
+-              u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+-                           >> MTL_DEBUG_RRCSTS_SHIFT;
+-
+-              if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+-                      x->mtl_rx_fifo_fill_level_full++;
+-              else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+-                      x->mtl_rx_fifo_fill_above_thresh++;
+-              else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+-                      x->mtl_rx_fifo_fill_below_thresh++;
+-              else
+-                      x->mtl_rx_fifo_fill_level_empty++;
+-      }
+-      if (value & MTL_DEBUG_RRCSTS_MASK) {
+-              u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+-                           MTL_DEBUG_RRCSTS_SHIFT;
+-
+-              if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+-                      x->mtl_rx_fifo_read_ctrl_flush++;
+-              else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+-                      x->mtl_rx_fifo_read_ctrl_read_data++;
+-              else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+-                      x->mtl_rx_fifo_read_ctrl_status++;
+-              else
+-                      x->mtl_rx_fifo_read_ctrl_idle++;
++              if (value & MTL_DEBUG_RXFSTS_MASK) {
++                      u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
++                                   >> MTL_DEBUG_RRCSTS_SHIFT;
++
++                      if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
++                              x->mtl_rx_fifo_fill_level_full++;
++                      else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
++                              x->mtl_rx_fifo_fill_above_thresh++;
++                      else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
++                              x->mtl_rx_fifo_fill_below_thresh++;
++                      else
++                              x->mtl_rx_fifo_fill_level_empty++;
++              }
++              if (value & MTL_DEBUG_RRCSTS_MASK) {
++                      u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
++                                   MTL_DEBUG_RRCSTS_SHIFT;
++
++                      if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
++                              x->mtl_rx_fifo_read_ctrl_flush++;
++                      else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
++                              x->mtl_rx_fifo_read_ctrl_read_data++;
++                      else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
++                              x->mtl_rx_fifo_read_ctrl_status++;
++                      else
++                              x->mtl_rx_fifo_read_ctrl_idle++;
++              }
++              if (value & MTL_DEBUG_RWCSTS)
++                      x->mtl_rx_fifo_ctrl_active++;
+       }
+-      if (value & MTL_DEBUG_RWCSTS)
+-              x->mtl_rx_fifo_ctrl_active++;
+       /* GMAC debug */
+       value = readl(ioaddr + GMAC_DEBUG);
+@@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *i
+ static const struct stmmac_ops dwmac4_ops = {
+       .core_init = dwmac4_core_init,
++      .set_mac = stmmac_set_mac,
+       .rx_ipc = dwmac4_rx_ipc_enable,
+       .rx_queue_enable = dwmac4_rx_queue_enable,
++      .rx_queue_prio = dwmac4_rx_queue_priority,
++      .tx_queue_prio = dwmac4_tx_queue_priority,
++      .rx_queue_routing = dwmac4_tx_queue_routing,
++      .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
++      .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
++      .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
++      .map_mtl_to_dma = dwmac4_map_mtl_dma,
++      .config_cbs = dwmac4_config_cbs,
+       .dump_regs = dwmac4_dump_regs,
+       .host_irq_status = dwmac4_irq_status,
++      .host_mtl_irq_status = dwmac4_irq_mtl_status,
++      .flow_ctrl = dwmac4_flow_ctrl,
++      .pmt = dwmac4_pmt,
++      .set_umac_addr = dwmac4_set_umac_addr,
++      .get_umac_addr = dwmac4_get_umac_addr,
++      .set_eee_mode = dwmac4_set_eee_mode,
++      .reset_eee_mode = dwmac4_reset_eee_mode,
++      .set_eee_timer = dwmac4_set_eee_timer,
++      .set_eee_pls = dwmac4_set_eee_pls,
++      .pcs_ctrl_ane = dwmac4_ctrl_ane,
++      .pcs_rane = dwmac4_rane,
++      .pcs_get_adv_lp = dwmac4_get_adv_lp,
++      .debug = dwmac4_debug,
++      .set_filter = dwmac4_set_filter,
++};
++
++static const struct stmmac_ops dwmac410_ops = {
++      .core_init = dwmac4_core_init,
++      .set_mac = stmmac_dwmac4_set_mac,
++      .rx_ipc = dwmac4_rx_ipc_enable,
++      .rx_queue_enable = dwmac4_rx_queue_enable,
++      .rx_queue_prio = dwmac4_rx_queue_priority,
++      .tx_queue_prio = dwmac4_tx_queue_priority,
++      .rx_queue_routing = dwmac4_tx_queue_routing,
++      .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
++      .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
++      .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
++      .map_mtl_to_dma = dwmac4_map_mtl_dma,
++      .config_cbs = dwmac4_config_cbs,
++      .dump_regs = dwmac4_dump_regs,
++      .host_irq_status = dwmac4_irq_status,
++      .host_mtl_irq_status = dwmac4_irq_mtl_status,
+       .flow_ctrl = dwmac4_flow_ctrl,
+       .pmt = dwmac4_pmt,
+       .set_umac_addr = dwmac4_set_umac_addr,
+@@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(voi
+       if (mac->multicast_filter_bins)
+               mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+-      mac->mac = &dwmac4_ops;
+-
+       mac->link.port = GMAC_CONFIG_PS;
+       mac->link.duplex = GMAC_CONFIG_DM;
+       mac->link.speed = GMAC_CONFIG_FES;
+@@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(voi
+       else
+               mac->dma = &dwmac4_dma_ops;
++      if (*synopsys_id >= DWMAC_CORE_4_00)
++              mac->mac = &dwmac410_ops;
++      else
++              mac->mac = &dwmac4_ops;
++
+       return mac;
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+@@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestam
+ {
+       /* Context type from W/B descriptor must be zero */
+       if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
+-              return -EINVAL;
++              return 0;
+       /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
+       if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
+-              return 0;
++              return 1;
+-      return 1;
++      return 0;
+ }
+ static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
+@@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestam
+               }
+       }
+ exit:
+-      return ret;
++      if (likely(ret == 0))
++              return 1;
++
++      return 0;
+ }
+ static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+@@ -304,12 +307,13 @@ static void dwmac4_rd_init_tx_desc(struc
+ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+                                     bool csum_flag, int mode, bool tx_own,
+-                                    bool ls)
++                                    bool ls, unsigned int tot_pkt_len)
+ {
+       unsigned int tdes3 = le32_to_cpu(p->des3);
+       p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
++      tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
+       if (is_fs)
+               tdes3 |= TDES3_FIRST_DESCRIPTOR;
+       else
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+@@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem
+       writel(value, ioaddr + DMA_SYS_BUS_MODE);
+ }
+-static void dwmac4_dma_init_channel(void __iomem *ioaddr,
+-                                  struct stmmac_dma_cfg *dma_cfg,
+-                                  u32 dma_tx_phy, u32 dma_rx_phy,
+-                                  u32 channel)
++void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
++                           struct stmmac_dma_cfg *dma_cfg,
++                           u32 dma_rx_phy, u32 chan)
+ {
+       u32 value;
+-      int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+-      int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
++      u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+-      /* set PBL for each channels. Currently we affect same configuration
+-       * on each channel
+-       */
+-      value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
+-      if (dma_cfg->pblx8)
+-              value = value | DMA_BUS_MODE_PBL;
+-      writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
++      value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
++      value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
++      writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
++
++      writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
++}
+-      value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
++void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
++                           struct stmmac_dma_cfg *dma_cfg,
++                           u32 dma_tx_phy, u32 chan)
++{
++      u32 value;
++      u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
++
++      value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+       value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+-      writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
++      writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+-      value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
+-      value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+-      writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
++      writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
++}
+-      /* Mask interrupts by writing to CSR7 */
+-      writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
++void dwmac4_dma_init_channel(void __iomem *ioaddr,
++                           struct stmmac_dma_cfg *dma_cfg, u32 chan)
++{
++      u32 value;
++
++      /* common channel control register config */
++      value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
++      if (dma_cfg->pblx8)
++              value = value | DMA_BUS_MODE_PBL;
++      writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+-      writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
+-      writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
++      /* Mask interrupts by writing to CSR7 */
++      writel(DMA_CHAN_INTR_DEFAULT_MASK,
++             ioaddr + DMA_CHAN_INTR_ENA(chan));
+ }
+ static void dwmac4_dma_init(void __iomem *ioaddr,
+@@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem
+                           u32 dma_tx, u32 dma_rx, int atds)
+ {
+       u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+-      int i;
+       /* Set the Fixed burst mode */
+       if (dma_cfg->fixed_burst)
+@@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem
+               value |= DMA_SYS_BUS_AAL;
+       writel(value, ioaddr + DMA_SYS_BUS_MODE);
+-
+-      for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+-              dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
+ }
+ static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
+@@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __
+               _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
+ }
+-static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
++static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
+ {
+-      int i;
++      u32 chan;
+-      for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+-              writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
++      for (chan = 0; chan < number_chan; chan++)
++              writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
+ }
+-static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
+-                                  int rxmode, u32 channel)
++static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
++                                     u32 channel, int fifosz)
+ {
+-      u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
++      unsigned int rqs = fifosz / 256 - 1;
++      u32 mtl_rx_op, mtl_rx_int;
+-      /* Following code only done for channel 0, other channels not yet
+-       * supported.
+-       */
+-      mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
++      mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
++
++      if (mode == SF_DMA_MODE) {
++              pr_debug("GMAC: enable RX store and forward mode\n");
++              mtl_rx_op |= MTL_OP_MODE_RSF;
++      } else {
++              pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
++              mtl_rx_op &= ~MTL_OP_MODE_RSF;
++              mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
++              if (mode <= 32)
++                      mtl_rx_op |= MTL_OP_MODE_RTC_32;
++              else if (mode <= 64)
++                      mtl_rx_op |= MTL_OP_MODE_RTC_64;
++              else if (mode <= 96)
++                      mtl_rx_op |= MTL_OP_MODE_RTC_96;
++              else
++                      mtl_rx_op |= MTL_OP_MODE_RTC_128;
++      }
++
++      mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
++      mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
++
++      /* enable flow control only if each channel gets 4 KiB or more FIFO */
++      if (fifosz >= 4096) {
++              unsigned int rfd, rfa;
++
++              mtl_rx_op |= MTL_OP_MODE_EHFC;
++
++              /* Set Threshold for Activating Flow Control to min 2 frames,
++               * i.e. 1500 * 2 = 3000 bytes.
++               *
++               * Set Threshold for Deactivating Flow Control to min 1 frame,
++               * i.e. 1500 bytes.
++               */
++              switch (fifosz) {
++              case 4096:
++                      /* This violates the above formula because of FIFO size
++                       * limit therefore overflow may occur in spite of this.
++                       */
++                      rfd = 0x03; /* Full-2.5K */
++                      rfa = 0x01; /* Full-1.5K */
++                      break;
++
++              case 8192:
++                      rfd = 0x06; /* Full-4K */
++                      rfa = 0x0a; /* Full-6K */
++                      break;
++
++              case 16384:
++                      rfd = 0x06; /* Full-4K */
++                      rfa = 0x12; /* Full-10K */
++                      break;
++
++              default:
++                      rfd = 0x06; /* Full-4K */
++                      rfa = 0x1e; /* Full-16K */
++                      break;
++              }
+-      if (txmode == SF_DMA_MODE) {
++              mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
++              mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
++
++              mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
++              mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
++      }
++
++      writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
++
++      /* Enable MTL RX overflow */
++      mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
++      writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
++             ioaddr + MTL_CHAN_INT_CTRL(channel));
++}
++
++static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
++                                     u32 channel)
++{
++      u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
++
++      if (mode == SF_DMA_MODE) {
+               pr_debug("GMAC: enable TX store and forward mode\n");
+               /* Transmit COE type 2 cannot be done in cut-through mode. */
+               mtl_tx_op |= MTL_OP_MODE_TSF;
+       } else {
+-              pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
++              pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
+               mtl_tx_op &= ~MTL_OP_MODE_TSF;
+               mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
+               /* Set the transmit threshold */
+-              if (txmode <= 32)
++              if (mode <= 32)
+                       mtl_tx_op |= MTL_OP_MODE_TTC_32;
+-              else if (txmode <= 64)
++              else if (mode <= 64)
+                       mtl_tx_op |= MTL_OP_MODE_TTC_64;
+-              else if (txmode <= 96)
++              else if (mode <= 96)
+                       mtl_tx_op |= MTL_OP_MODE_TTC_96;
+-              else if (txmode <= 128)
++              else if (mode <= 128)
+                       mtl_tx_op |= MTL_OP_MODE_TTC_128;
+-              else if (txmode <= 192)
++              else if (mode <= 192)
+                       mtl_tx_op |= MTL_OP_MODE_TTC_192;
+-              else if (txmode <= 256)
++              else if (mode <= 256)
+                       mtl_tx_op |= MTL_OP_MODE_TTC_256;
+-              else if (txmode <= 384)
++              else if (mode <= 384)
+                       mtl_tx_op |= MTL_OP_MODE_TTC_384;
+               else
+                       mtl_tx_op |= MTL_OP_MODE_TTC_512;
+@@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void
+        */
+       mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
+       writel(mtl_tx_op, ioaddr +  MTL_CHAN_TX_OP_MODE(channel));
+-
+-      mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+-
+-      if (rxmode == SF_DMA_MODE) {
+-              pr_debug("GMAC: enable RX store and forward mode\n");
+-              mtl_rx_op |= MTL_OP_MODE_RSF;
+-      } else {
+-              pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
+-              mtl_rx_op &= ~MTL_OP_MODE_RSF;
+-              mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+-              if (rxmode <= 32)
+-                      mtl_rx_op |= MTL_OP_MODE_RTC_32;
+-              else if (rxmode <= 64)
+-                      mtl_rx_op |= MTL_OP_MODE_RTC_64;
+-              else if (rxmode <= 96)
+-                      mtl_rx_op |= MTL_OP_MODE_RTC_96;
+-              else
+-                      mtl_rx_op |= MTL_OP_MODE_RTC_128;
+-      }
+-
+-      writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+-
+-      /* Enable MTL RX overflow */
+-      mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
+-      writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
+-             ioaddr + MTL_CHAN_INT_CTRL(channel));
+-}
+-
+-static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
+-                                    int rxmode, int rxfifosz)
+-{
+-      /* Only Channel 0 is actually configured and used */
+-      dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
+ }
+ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
+@@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void _
+       hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+       dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
+       dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
++      /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
++       * shifting and store the sizes in bytes.
++       */
++      dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
++      dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
+       /* MAC HW feature2 */
+       hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
+       /* TX and RX number of channels */
+@@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iom
+ const struct stmmac_dma_ops dwmac4_dma_ops = {
+       .reset = dwmac4_dma_reset,
+       .init = dwmac4_dma_init,
++      .init_chan = dwmac4_dma_init_channel,
++      .init_rx_chan = dwmac4_dma_init_rx_chan,
++      .init_tx_chan = dwmac4_dma_init_tx_chan,
+       .axi = dwmac4_dma_axi,
+       .dump_regs = dwmac4_dump_dma_regs,
+-      .dma_mode = dwmac4_dma_operation_mode,
++      .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
++      .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
+       .enable_dma_irq = dwmac4_enable_dma_irq,
+       .disable_dma_irq = dwmac4_disable_dma_irq,
+       .start_tx = dwmac4_dma_start_tx,
+@@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_o
+ const struct stmmac_dma_ops dwmac410_dma_ops = {
+       .reset = dwmac4_dma_reset,
+       .init = dwmac4_dma_init,
++      .init_chan = dwmac4_dma_init_channel,
++      .init_rx_chan = dwmac4_dma_init_rx_chan,
++      .init_tx_chan = dwmac4_dma_init_tx_chan,
+       .axi = dwmac4_dma_axi,
+       .dump_regs = dwmac4_dump_dma_regs,
+-      .dma_mode = dwmac4_dma_operation_mode,
++      .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
++      .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
+       .enable_dma_irq = dwmac410_enable_dma_irq,
+       .disable_dma_irq = dwmac4_disable_dma_irq,
+       .start_tx = dwmac4_dma_start_tx,
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+@@ -185,17 +185,17 @@
+ int dwmac4_dma_reset(void __iomem *ioaddr);
+ void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
+-void dwmac4_enable_dma_irq(void __iomem *ioaddr);
+-void dwmac410_enable_dma_irq(void __iomem *ioaddr);
+-void dwmac4_disable_dma_irq(void __iomem *ioaddr);
+-void dwmac4_dma_start_tx(void __iomem *ioaddr);
+-void dwmac4_dma_stop_tx(void __iomem *ioaddr);
+-void dwmac4_dma_start_rx(void __iomem *ioaddr);
+-void dwmac4_dma_stop_rx(void __iomem *ioaddr);
++void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
++void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
++void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
++void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
++void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
++void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
++void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
+ int dwmac4_dma_interrupt(void __iomem *ioaddr,
+-                       struct stmmac_extra_stats *x);
+-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
+-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
++                       struct stmmac_extra_stats *x, u32 chan);
++void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
++void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
+ void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+@@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioadd
+ void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+ {
+-      writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
++      writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
+ }
+ void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+ {
+-      writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
++      writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
+ }
+-void dwmac4_dma_start_tx(void __iomem *ioaddr)
++void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
+ {
+-      u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
++      u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+       value |= DMA_CONTROL_ST;
+-      writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
++      writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+       value = readl(ioaddr + GMAC_CONFIG);
+       value |= GMAC_CONFIG_TE;
+       writel(value, ioaddr + GMAC_CONFIG);
+ }
+-void dwmac4_dma_stop_tx(void __iomem *ioaddr)
++void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+ {
+-      u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
++      u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+       value &= ~DMA_CONTROL_ST;
+-      writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
++      writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+       value = readl(ioaddr + GMAC_CONFIG);
+       value &= ~GMAC_CONFIG_TE;
+       writel(value, ioaddr + GMAC_CONFIG);
+ }
+-void dwmac4_dma_start_rx(void __iomem *ioaddr)
++void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
+ {
+-      u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
++      u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+       value |= DMA_CONTROL_SR;
+-      writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
++      writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+       value = readl(ioaddr + GMAC_CONFIG);
+       value |= GMAC_CONFIG_RE;
+       writel(value, ioaddr + GMAC_CONFIG);
+ }
+-void dwmac4_dma_stop_rx(void __iomem *ioaddr)
++void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+ {
+-      u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
++      u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+       value &= ~DMA_CONTROL_SR;
+-      writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
++      writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+       value = readl(ioaddr + GMAC_CONFIG);
+       value &= ~GMAC_CONFIG_RE;
+       writel(value, ioaddr + GMAC_CONFIG);
+ }
+-void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
++void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+ {
+-      writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
++      writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
+ }
+-void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
++void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+ {
+-      writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
++      writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
+ }
+-void dwmac4_enable_dma_irq(void __iomem *ioaddr)
++void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+ {
+       writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
+-             DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
++             DMA_CHAN_INTR_ENA(chan));
+ }
+-void dwmac410_enable_dma_irq(void __iomem *ioaddr)
++void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+ {
+       writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
+-             ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
++             ioaddr + DMA_CHAN_INTR_ENA(chan));
+ }
+-void dwmac4_disable_dma_irq(void __iomem *ioaddr)
++void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+ {
+-      writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
++      writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
+ }
+ int dwmac4_dma_interrupt(void __iomem *ioaddr,
+-                       struct stmmac_extra_stats *x)
++                       struct stmmac_extra_stats *x, u32 chan)
+ {
+       int ret = 0;
+-      u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
++      u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
+       /* ABNORMAL interrupts */
+       if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
+@@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *i
+               if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
+                       u32 value;
+-                      value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
++                      value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+                       /* to schedule NAPI on real RIE event. */
+                       if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
+                               x->rx_normal_irq_n++;
+@@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *i
+        * status [21-0] expect reserved bits [5-3]
+        */
+       writel((intr_status & 0x3fffc7),
+-             ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
++             ioaddr + DMA_CHAN_STATUS(chan));
+       return ret;
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+@@ -137,13 +137,14 @@
+ #define DMA_CONTROL_FTF               0x00100000      /* Flush transmit FIFO */
+ void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+-void dwmac_enable_dma_irq(void __iomem *ioaddr);
+-void dwmac_disable_dma_irq(void __iomem *ioaddr);
+-void dwmac_dma_start_tx(void __iomem *ioaddr);
+-void dwmac_dma_stop_tx(void __iomem *ioaddr);
+-void dwmac_dma_start_rx(void __iomem *ioaddr);
+-void dwmac_dma_stop_rx(void __iomem *ioaddr);
+-int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
++void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
++void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
++void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
++void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
++void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
++void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
++int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
++                      u32 chan);
+ int dwmac_dma_reset(void __iomem *ioaddr);
+ #endif /* __DWMAC_DMA_H__ */
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+@@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void
+       writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+ }
+-void dwmac_enable_dma_irq(void __iomem *ioaddr)
++void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
+ {
+       writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+ }
+-void dwmac_disable_dma_irq(void __iomem *ioaddr)
++void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
+ {
+       writel(0, ioaddr + DMA_INTR_ENA);
+ }
+-void dwmac_dma_start_tx(void __iomem *ioaddr)
++void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
+ {
+       u32 value = readl(ioaddr + DMA_CONTROL);
+       value |= DMA_CONTROL_ST;
+       writel(value, ioaddr + DMA_CONTROL);
+ }
+-void dwmac_dma_stop_tx(void __iomem *ioaddr)
++void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+ {
+       u32 value = readl(ioaddr + DMA_CONTROL);
+       value &= ~DMA_CONTROL_ST;
+       writel(value, ioaddr + DMA_CONTROL);
+ }
+-void dwmac_dma_start_rx(void __iomem *ioaddr)
++void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
+ {
+       u32 value = readl(ioaddr + DMA_CONTROL);
+       value |= DMA_CONTROL_SR;
+       writel(value, ioaddr + DMA_CONTROL);
+ }
+-void dwmac_dma_stop_rx(void __iomem *ioaddr)
++void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+ {
+       u32 value = readl(ioaddr + DMA_CONTROL);
+       value &= ~DMA_CONTROL_SR;
+@@ -156,7 +156,7 @@ static void show_rx_process_state(unsign
+ #endif
+ int dwmac_dma_interrupt(void __iomem *ioaddr,
+-                      struct stmmac_extra_stats *x)
++                      struct stmmac_extra_stats *x, u32 chan)
+ {
+       int ret = 0;
+       /* read the status register (CSR5) */
+--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+@@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(str
+ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+                                    bool csum_flag, int mode, bool tx_own,
+-                                   bool ls)
++                                   bool ls, unsigned int tot_pkt_len)
+ {
+       unsigned int tdes0 = le32_to_cpu(p->des0);
+--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+@@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct
+ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+                                 bool csum_flag, int mode, bool tx_own,
+-                                bool ls)
++                                bool ls, unsigned int tot_pkt_len)
+ {
+       unsigned int tdes1 = le32_to_cpu(p->des1);
+--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+@@ -26,16 +26,17 @@
+ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+ {
+-      struct stmmac_priv *priv = (struct stmmac_priv *)p;
+-      unsigned int entry = priv->cur_tx;
+-      struct dma_desc *desc;
++      struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
+       unsigned int nopaged_len = skb_headlen(skb);
++      struct stmmac_priv *priv = tx_q->priv_data;
++      unsigned int entry = tx_q->cur_tx;
+       unsigned int bmax, len, des2;
++      struct dma_desc *desc;
+       if (priv->extend_desc)
+-              desc = (struct dma_desc *)(priv->dma_etx + entry);
++              desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+       else
+-              desc = priv->dma_tx + entry;
++              desc = tx_q->dma_tx + entry;
+       if (priv->plat->enh_desc)
+               bmax = BUF_SIZE_8KiB;
+@@ -52,48 +53,51 @@ static int stmmac_jumbo_frm(void *p, str
+               if (dma_mapping_error(priv->device, des2))
+                       return -1;
+-              priv->tx_skbuff_dma[entry].buf = des2;
+-              priv->tx_skbuff_dma[entry].len = bmax;
+-              priv->tx_skbuff_dma[entry].is_jumbo = true;
++              tx_q->tx_skbuff_dma[entry].buf = des2;
++              tx_q->tx_skbuff_dma[entry].len = bmax;
++              tx_q->tx_skbuff_dma[entry].is_jumbo = true;
+               desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+               priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
+-                                              STMMAC_RING_MODE, 0, false);
+-              priv->tx_skbuff[entry] = NULL;
++                                              STMMAC_RING_MODE, 0,
++                                              false, skb->len);
++              tx_q->tx_skbuff[entry] = NULL;
+               entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+               if (priv->extend_desc)
+-                      desc = (struct dma_desc *)(priv->dma_etx + entry);
++                      desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+               else
+-                      desc = priv->dma_tx + entry;
++                      desc = tx_q->dma_tx + entry;
+               des2 = dma_map_single(priv->device, skb->data + bmax, len,
+                                     DMA_TO_DEVICE);
+               desc->des2 = cpu_to_le32(des2);
+               if (dma_mapping_error(priv->device, des2))
+                       return -1;
+-              priv->tx_skbuff_dma[entry].buf = des2;
+-              priv->tx_skbuff_dma[entry].len = len;
+-              priv->tx_skbuff_dma[entry].is_jumbo = true;
++              tx_q->tx_skbuff_dma[entry].buf = des2;
++              tx_q->tx_skbuff_dma[entry].len = len;
++              tx_q->tx_skbuff_dma[entry].is_jumbo = true;
+               desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+               priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+-                                              STMMAC_RING_MODE, 1, true);
++                                              STMMAC_RING_MODE, 1,
++                                              true, skb->len);
+       } else {
+               des2 = dma_map_single(priv->device, skb->data,
+                                     nopaged_len, DMA_TO_DEVICE);
+               desc->des2 = cpu_to_le32(des2);
+               if (dma_mapping_error(priv->device, des2))
+                       return -1;
+-              priv->tx_skbuff_dma[entry].buf = des2;
+-              priv->tx_skbuff_dma[entry].len = nopaged_len;
+-              priv->tx_skbuff_dma[entry].is_jumbo = true;
++              tx_q->tx_skbuff_dma[entry].buf = des2;
++              tx_q->tx_skbuff_dma[entry].len = nopaged_len;
++              tx_q->tx_skbuff_dma[entry].is_jumbo = true;
+               desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+               priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
+-                                              STMMAC_RING_MODE, 0, true);
++                                              STMMAC_RING_MODE, 0,
++                                              true, skb->len);
+       }
+-      priv->cur_tx = entry;
++      tx_q->cur_tx = entry;
+       return entry;
+ }
+@@ -125,12 +129,13 @@ static void stmmac_init_desc3(struct dma
+ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
+ {
+-      struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+-      unsigned int entry = priv->dirty_tx;
++      struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
++      struct stmmac_priv *priv = tx_q->priv_data;
++      unsigned int entry = tx_q->dirty_tx;
+       /* des3 is only used for jumbo frames tx or time stamping */
+-      if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
+-                   (priv->tx_skbuff_dma[entry].last_segment &&
++      if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
++                   (tx_q->tx_skbuff_dma[entry].last_segment &&
+                     !priv->extend_desc && priv->hwts_tx_en)))
+               p->des3 = 0;
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -46,38 +46,51 @@ struct stmmac_tx_info {
+       bool is_jumbo;
+ };
+-struct stmmac_priv {
+-      /* Frequently used values are kept adjacent for cache effect */
++/* Frequently used values are kept adjacent for cache effect */
++struct stmmac_tx_queue {
++      u32 queue_index;
++      struct stmmac_priv *priv_data;
+       struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
+       struct dma_desc *dma_tx;
+       struct sk_buff **tx_skbuff;
++      struct stmmac_tx_info *tx_skbuff_dma;
+       unsigned int cur_tx;
+       unsigned int dirty_tx;
++      dma_addr_t dma_tx_phy;
++      u32 tx_tail_addr;
++};
++
++struct stmmac_rx_queue {
++      u32 queue_index;
++      struct stmmac_priv *priv_data;
++      struct dma_extended_desc *dma_erx;
++      struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
++      struct sk_buff **rx_skbuff;
++      dma_addr_t *rx_skbuff_dma;
++      unsigned int cur_rx;
++      unsigned int dirty_rx;
++      u32 rx_zeroc_thresh;
++      dma_addr_t dma_rx_phy;
++      u32 rx_tail_addr;
++      struct napi_struct napi ____cacheline_aligned_in_smp;
++};
++
++struct stmmac_priv {
++      /* Frequently used values are kept adjacent for cache effect */
+       u32 tx_count_frames;
+       u32 tx_coal_frames;
+       u32 tx_coal_timer;
+-      struct stmmac_tx_info *tx_skbuff_dma;
+-      dma_addr_t dma_tx_phy;
++
+       int tx_coalesce;
+       int hwts_tx_en;
+       bool tx_path_in_lpi_mode;
+       struct timer_list txtimer;
+       bool tso;
+-      struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
+-      struct dma_extended_desc *dma_erx;
+-      struct sk_buff **rx_skbuff;
+-      unsigned int cur_rx;
+-      unsigned int dirty_rx;
+       unsigned int dma_buf_sz;
+       unsigned int rx_copybreak;
+-      unsigned int rx_zeroc_thresh;
+       u32 rx_riwt;
+       int hwts_rx_en;
+-      dma_addr_t *rx_skbuff_dma;
+-      dma_addr_t dma_rx_phy;
+-
+-      struct napi_struct napi ____cacheline_aligned_in_smp;
+       void __iomem *ioaddr;
+       struct net_device *dev;
+@@ -85,6 +98,12 @@ struct stmmac_priv {
+       struct mac_device_info *hw;
+       spinlock_t lock;
++      /* RX Queue */
++      struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
++
++      /* TX Queue */
++      struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
++
+       int oldlink;
+       int speed;
+       int oldduplex;
+@@ -119,8 +138,6 @@ struct stmmac_priv {
+       spinlock_t ptp_lock;
+       void __iomem *mmcaddr;
+       void __iomem *ptpaddr;
+-      u32 rx_tail_addr;
+-      u32 tx_tail_addr;
+       u32 mss;
+ #ifdef CONFIG_DEBUG_FS
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device
+                     struct ethtool_pauseparam *pause)
+ {
+       struct stmmac_priv *priv = netdev_priv(netdev);
++      u32 tx_cnt = priv->plat->tx_queues_to_use;
+       struct phy_device *phy = netdev->phydev;
+       int new_pause = FLOW_OFF;
+@@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device
+       }
+       priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
+-                               priv->pause);
++                               priv->pause, tx_cnt);
+       return 0;
+ }
+@@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(str
+                                struct ethtool_stats *dummy, u64 *data)
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
++      u32 rx_queues_count = priv->plat->rx_queues_to_use;
++      u32 tx_queues_count = priv->plat->tx_queues_to_use;
+       int i, j = 0;
+       /* Update the DMA HW counters for dwmac10/100 */
+@@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(str
+               if ((priv->hw->mac->debug) &&
+                   (priv->synopsys_id >= DWMAC_CORE_3_50))
+                       priv->hw->mac->debug(priv->ioaddr,
+-                                           (void *)&priv->xstats);
++                                           (void *)&priv->xstats,
++                                           rx_queues_count, tx_queues_count);
+       }
+       for (i = 0; i < STMMAC_STATS_LEN; i++) {
+               char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
+@@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct ne
+                              struct ethtool_coalesce *ec)
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
++      u32 rx_cnt = priv->plat->rx_queues_to_use;
+       unsigned int rx_riwt;
+       /* Check not supported parameters  */
+@@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct ne
+       priv->tx_coal_frames = ec->tx_max_coalesced_frames;
+       priv->tx_coal_timer = ec->tx_coalesce_usecs;
+       priv->rx_riwt = rx_riwt;
+-      priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
++      priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
+       return 0;
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -139,6 +139,64 @@ static void stmmac_verify_args(void)
+ }
+ /**
++ * stmmac_disable_all_queues - Disable all queues
++ * @priv: driver private structure
++ */
++static void stmmac_disable_all_queues(struct stmmac_priv *priv)
++{
++      u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
++      u32 queue;
++
++      for (queue = 0; queue < rx_queues_cnt; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++              napi_disable(&rx_q->napi);
++      }
++}
++
++/**
++ * stmmac_enable_all_queues - Enable all queues
++ * @priv: driver private structure
++ */
++static void stmmac_enable_all_queues(struct stmmac_priv *priv)
++{
++      u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
++      u32 queue;
++
++      for (queue = 0; queue < rx_queues_cnt; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++              napi_enable(&rx_q->napi);
++      }
++}
++
++/**
++ * stmmac_stop_all_queues - Stop all queues
++ * @priv: driver private structure
++ */
++static void stmmac_stop_all_queues(struct stmmac_priv *priv)
++{
++      u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
++      u32 queue;
++
++      for (queue = 0; queue < tx_queues_cnt; queue++)
++              netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
++}
++
++/**
++ * stmmac_start_all_queues - Start all queues
++ * @priv: driver private structure
++ */
++static void stmmac_start_all_queues(struct stmmac_priv *priv)
++{
++      u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
++      u32 queue;
++
++      for (queue = 0; queue < tx_queues_cnt; queue++)
++              netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
++}
++
++/**
+  * stmmac_clk_csr_set - dynamically set the MDC clock
+  * @priv: driver private structure
+  * Description: this is to dynamically set the MDC clock according to the csr
+@@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf
+       print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
+ }
+-static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
++static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
+ {
++      struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+       u32 avail;
+-      if (priv->dirty_tx > priv->cur_tx)
+-              avail = priv->dirty_tx - priv->cur_tx - 1;
++      if (tx_q->dirty_tx > tx_q->cur_tx)
++              avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
+       else
+-              avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
++              avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
+       return avail;
+ }
+-static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
++/**
++ * stmmac_rx_dirty - Get RX queue dirty
++ * @priv: driver private structure
++ * @queue: RX queue index
++ */
++static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
+ {
++      struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       u32 dirty;
+-      if (priv->dirty_rx <= priv->cur_rx)
+-              dirty = priv->cur_rx - priv->dirty_rx;
++      if (rx_q->dirty_rx <= rx_q->cur_rx)
++              dirty = rx_q->cur_rx - rx_q->dirty_rx;
+       else
+-              dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
++              dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
+       return dirty;
+ }
+@@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_spe
+  */
+ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+ {
++      u32 tx_cnt = priv->plat->tx_queues_to_use;
++      u32 queue;
++
++      /* check if all TX queues have the work finished */
++      for (queue = 0; queue < tx_cnt; queue++) {
++              struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
++
++              if (tx_q->dirty_tx != tx_q->cur_tx)
++                      return; /* still unfinished work */
++      }
++
+       /* Check and enter in LPI mode */
+-      if ((priv->dirty_tx == priv->cur_tx) &&
+-          (priv->tx_path_in_lpi_mode == false))
++      if (!priv->tx_path_in_lpi_mode)
+               priv->hw->mac->set_eee_mode(priv->hw,
+                                           priv->plat->en_tx_lpi_clockgating);
+ }
+@@ -359,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struc
+               return;
+       /* check tx tstamp status */
+-      if (!priv->hw->desc->get_tx_timestamp_status(p)) {
++      if (priv->hw->desc->get_tx_timestamp_status(p)) {
+               /* get the valid tstamp */
+               ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
+               memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+               shhwtstamp.hwtstamp = ns_to_ktime(ns);
+-              netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
++              netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+               /* pass tstamp to stack */
+               skb_tstamp_tx(skb, &shhwtstamp);
+       }
+@@ -393,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struc
+               return;
+       /* Check if timestamp is available */
+-      if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
++      if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
+               /* For GMAC4, the valid timestamp is from CTX next desc. */
+               if (priv->plat->has_gmac4)
+                       ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
+               else
+                       ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
+-              netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
++              netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+               shhwtstamp = skb_hwtstamps(skb);
+               memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+               shhwtstamp->hwtstamp = ns_to_ktime(ns);
+       } else  {
+-              netdev_err(priv->dev, "cannot get RX hw timestamp\n");
++              netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
+       }
+ }
+@@ -471,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct
+                       /* PTP v1, UDP, any kind of event packet */
+                       config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+                       /* take time stamp for all event messages */
+-                      snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
++                      if (priv->plat->has_gmac4)
++                              snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
++                      else
++                              snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+                       ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+                       ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+@@ -503,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct
+                       config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+                       ptp_v2 = PTP_TCR_TSVER2ENA;
+                       /* take time stamp for all event messages */
+-                      snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
++                      if (priv->plat->has_gmac4)
++                              snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
++                      else
++                              snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+                       ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+                       ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+@@ -537,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct
+                       config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+                       ptp_v2 = PTP_TCR_TSVER2ENA;
+                       /* take time stamp for all event messages */
+-                      snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
++                      if (priv->plat->has_gmac4)
++                              snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
++                      else
++                              snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+                       ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+                       ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+@@ -673,6 +757,19 @@ static void stmmac_release_ptp(struct st
+ }
+ /**
++ *  stmmac_mac_flow_ctrl - Configure flow control in all queues
++ *  @priv: driver private structure
++ *  Description: It is used for configuring the flow control in all queues
++ */
++static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
++{
++      u32 tx_cnt = priv->plat->tx_queues_to_use;
++
++      priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
++                               priv->pause, tx_cnt);
++}
++
++/**
+  * stmmac_adjust_link - adjusts the link parameters
+  * @dev: net device structure
+  * Description: this is the helper called by the physical abstraction layer
+@@ -687,7 +784,6 @@ static void stmmac_adjust_link(struct ne
+       struct phy_device *phydev = dev->phydev;
+       unsigned long flags;
+       int new_state = 0;
+-      unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
+       if (!phydev)
+               return;
+@@ -709,8 +805,7 @@ static void stmmac_adjust_link(struct ne
+               }
+               /* Flow Control operation */
+               if (phydev->pause)
+-                      priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
+-                                               fc, pause_time);
++                      stmmac_mac_flow_ctrl(priv, phydev->duplex);
+               if (phydev->speed != priv->speed) {
+                       new_state = 1;
+@@ -878,22 +973,56 @@ static int stmmac_init_phy(struct net_de
+       return 0;
+ }
+-static void stmmac_display_rings(struct stmmac_priv *priv)
++static void stmmac_display_rx_rings(struct stmmac_priv *priv)
+ {
+-      void *head_rx, *head_tx;
++      u32 rx_cnt = priv->plat->rx_queues_to_use;
++      void *head_rx;
++      u32 queue;
+-      if (priv->extend_desc) {
+-              head_rx = (void *)priv->dma_erx;
+-              head_tx = (void *)priv->dma_etx;
+-      } else {
+-              head_rx = (void *)priv->dma_rx;
+-              head_tx = (void *)priv->dma_tx;
++      /* Display RX rings */
++      for (queue = 0; queue < rx_cnt; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++              pr_info("\tRX Queue %u rings\n", queue);
++
++              if (priv->extend_desc)
++                      head_rx = (void *)rx_q->dma_erx;
++              else
++                      head_rx = (void *)rx_q->dma_rx;
++
++              /* Display RX ring */
++              priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+       }
++}
++
++static void stmmac_display_tx_rings(struct stmmac_priv *priv)
++{
++      u32 tx_cnt = priv->plat->tx_queues_to_use;
++      void *head_tx;
++      u32 queue;
++
++      /* Display TX rings */
++      for (queue = 0; queue < tx_cnt; queue++) {
++              struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
++
++              pr_info("\tTX Queue %d rings\n", queue);
++
++              if (priv->extend_desc)
++                      head_tx = (void *)tx_q->dma_etx;
++              else
++                      head_tx = (void *)tx_q->dma_tx;
+-      /* Display Rx ring */
+-      priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+-      /* Display Tx ring */
+-      priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
++              priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
++      }
++}
++
++static void stmmac_display_rings(struct stmmac_priv *priv)
++{
++      /* Display RX ring */
++      stmmac_display_rx_rings(priv);
++
++      /* Display TX ring */
++      stmmac_display_tx_rings(priv);
+ }
+ static int stmmac_set_bfsize(int mtu, int bufsize)
+@@ -913,48 +1042,88 @@ static int stmmac_set_bfsize(int mtu, in
+ }
+ /**
+- * stmmac_clear_descriptors - clear descriptors
++ * stmmac_clear_rx_descriptors - clear RX descriptors
+  * @priv: driver private structure
+- * Description: this function is called to clear the tx and rx descriptors
++ * @queue: RX queue index
++ * Description: this function is called to clear the RX descriptors
+  * in case of both basic and extended descriptors are used.
+  */
+-static void stmmac_clear_descriptors(struct stmmac_priv *priv)
++static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
+ {
++      struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       int i;
+-      /* Clear the Rx/Tx descriptors */
++      /* Clear the RX descriptors */
+       for (i = 0; i < DMA_RX_SIZE; i++)
+               if (priv->extend_desc)
+-                      priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
++                      priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
+                                                    priv->use_riwt, priv->mode,
+                                                    (i == DMA_RX_SIZE - 1));
+               else
+-                      priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
++                      priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
+                                                    priv->use_riwt, priv->mode,
+                                                    (i == DMA_RX_SIZE - 1));
++}
++
++/**
++ * stmmac_clear_tx_descriptors - clear tx descriptors
++ * @priv: driver private structure
++ * @queue: TX queue index.
++ * Description: this function is called to clear the TX descriptors
++ * in case of both basic and extended descriptors are used.
++ */
++static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
++{
++      struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
++      int i;
++
++      /* Clear the TX descriptors */
+       for (i = 0; i < DMA_TX_SIZE; i++)
+               if (priv->extend_desc)
+-                      priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
++                      priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
+                                                    priv->mode,
+                                                    (i == DMA_TX_SIZE - 1));
+               else
+-                      priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
++                      priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
+                                                    priv->mode,
+                                                    (i == DMA_TX_SIZE - 1));
+ }
+ /**
++ * stmmac_clear_descriptors - clear descriptors
++ * @priv: driver private structure
++ * Description: this function is called to clear the TX and RX descriptors
++ * in case of both basic and extended descriptors are used.
++ */
++static void stmmac_clear_descriptors(struct stmmac_priv *priv)
++{
++      u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
++      u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
++      u32 queue;
++
++      /* Clear the RX descriptors */
++      for (queue = 0; queue < rx_queue_cnt; queue++)
++              stmmac_clear_rx_descriptors(priv, queue);
++
++      /* Clear the TX descriptors */
++      for (queue = 0; queue < tx_queue_cnt; queue++)
++              stmmac_clear_tx_descriptors(priv, queue);
++}
++
++/**
+  * stmmac_init_rx_buffers - init the RX descriptor buffer.
+  * @priv: driver private structure
+  * @p: descriptor pointer
+  * @i: descriptor index
+- * @flags: gfp flag.
++ * @flags: gfp flag
++ * @queue: RX queue index
+  * Description: this function is called to allocate a receive buffer, perform
+  * the DMA mapping and init the descriptor.
+  */
+ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+-                                int i, gfp_t flags)
++                                int i, gfp_t flags, u32 queue)
+ {
++      struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       struct sk_buff *skb;
+       skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
+@@ -963,20 +1132,20 @@ static int stmmac_init_rx_buffers(struct
+                          "%s: Rx init fails; skb is NULL\n", __func__);
+               return -ENOMEM;
+       }
+-      priv->rx_skbuff[i] = skb;
+-      priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
++      rx_q->rx_skbuff[i] = skb;
++      rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+                                               priv->dma_buf_sz,
+                                               DMA_FROM_DEVICE);
+-      if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
++      if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
+               netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+       if (priv->synopsys_id >= DWMAC_CORE_4_00)
+-              p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
++              p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
+       else
+-              p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
++              p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
+       if ((priv->hw->mode->init_desc3) &&
+           (priv->dma_buf_sz == BUF_SIZE_16KiB))
+@@ -985,30 +1154,71 @@ static int stmmac_init_rx_buffers(struct
+       return 0;
+ }
+-static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
++/**
++ * stmmac_free_rx_buffer - free RX dma buffers
++ * @priv: private structure
++ * @queue: RX queue index
++ * @i: buffer index.
++ */
++static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+ {
+-      if (priv->rx_skbuff[i]) {
+-              dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
++      struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++      if (rx_q->rx_skbuff[i]) {
++              dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
+                                priv->dma_buf_sz, DMA_FROM_DEVICE);
+-              dev_kfree_skb_any(priv->rx_skbuff[i]);
++              dev_kfree_skb_any(rx_q->rx_skbuff[i]);
+       }
+-      priv->rx_skbuff[i] = NULL;
++      rx_q->rx_skbuff[i] = NULL;
+ }
+ /**
+- * init_dma_desc_rings - init the RX/TX descriptor rings
++ * stmmac_free_tx_buffer - free RX dma buffers
++ * @priv: private structure
++ * @queue: RX queue index
++ * @i: buffer index.
++ */
++static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
++{
++      struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
++
++      if (tx_q->tx_skbuff_dma[i].buf) {
++              if (tx_q->tx_skbuff_dma[i].map_as_page)
++                      dma_unmap_page(priv->device,
++                                     tx_q->tx_skbuff_dma[i].buf,
++                                     tx_q->tx_skbuff_dma[i].len,
++                                     DMA_TO_DEVICE);
++              else
++                      dma_unmap_single(priv->device,
++                                       tx_q->tx_skbuff_dma[i].buf,
++                                       tx_q->tx_skbuff_dma[i].len,
++                                       DMA_TO_DEVICE);
++      }
++
++      if (tx_q->tx_skbuff[i]) {
++              dev_kfree_skb_any(tx_q->tx_skbuff[i]);
++              tx_q->tx_skbuff[i] = NULL;
++              tx_q->tx_skbuff_dma[i].buf = 0;
++              tx_q->tx_skbuff_dma[i].map_as_page = false;
++      }
++}
++
++/**
++ * init_dma_rx_desc_rings - init the RX descriptor rings
+  * @dev: net device structure
+  * @flags: gfp flag.
+- * Description: this function initializes the DMA RX/TX descriptors
++ * Description: this function initializes the DMA RX descriptors
+  * and allocates the socket buffers. It supports the chained and ring
+  * modes.
+  */
+-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
++static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+ {
+-      int i;
+       struct stmmac_priv *priv = netdev_priv(dev);
++      u32 rx_count = priv->plat->rx_queues_to_use;
+       unsigned int bfsize = 0;
+       int ret = -ENOMEM;
++      int queue;
++      int i;
+       if (priv->hw->mode->set_16kib_bfsize)
+               bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
+@@ -1018,235 +1228,409 @@ static int init_dma_desc_rings(struct ne
+       priv->dma_buf_sz = bfsize;
+-      netif_dbg(priv, probe, priv->dev,
+-                "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
+-                __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
+-
+       /* RX INITIALIZATION */
+       netif_dbg(priv, probe, priv->dev,
+                 "SKB addresses:\nskb\t\tskb data\tdma data\n");
+-      for (i = 0; i < DMA_RX_SIZE; i++) {
+-              struct dma_desc *p;
+-              if (priv->extend_desc)
+-                      p = &((priv->dma_erx + i)->basic);
+-              else
+-                      p = priv->dma_rx + i;
++      for (queue = 0; queue < rx_count; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++              netif_dbg(priv, probe, priv->dev,
++                        "(%s) dma_rx_phy=0x%08x\n", __func__,
++                        (u32)rx_q->dma_rx_phy);
++
++              for (i = 0; i < DMA_RX_SIZE; i++) {
++                      struct dma_desc *p;
++
++                      if (priv->extend_desc)
++                              p = &((rx_q->dma_erx + i)->basic);
++                      else
++                              p = rx_q->dma_rx + i;
++
++                      ret = stmmac_init_rx_buffers(priv, p, i, flags,
++                                                   queue);
++                      if (ret)
++                              goto err_init_rx_buffers;
++
++                      netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
++                                rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
++                                (unsigned int)rx_q->rx_skbuff_dma[i]);
++              }
+-              ret = stmmac_init_rx_buffers(priv, p, i, flags);
+-              if (ret)
+-                      goto err_init_rx_buffers;
++              rx_q->cur_rx = 0;
++              rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+-              netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
+-                        priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
+-                        (unsigned int)priv->rx_skbuff_dma[i]);
++              stmmac_clear_rx_descriptors(priv, queue);
++
++              /* Setup the chained descriptor addresses */
++              if (priv->mode == STMMAC_CHAIN_MODE) {
++                      if (priv->extend_desc)
++                              priv->hw->mode->init(rx_q->dma_erx,
++                                                   rx_q->dma_rx_phy,
++                                                   DMA_RX_SIZE, 1);
++                      else
++                              priv->hw->mode->init(rx_q->dma_rx,
++                                                   rx_q->dma_rx_phy,
++                                                   DMA_RX_SIZE, 0);
++              }
+       }
+-      priv->cur_rx = 0;
+-      priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
++
+       buf_sz = bfsize;
+-      /* Setup the chained descriptor addresses */
+-      if (priv->mode == STMMAC_CHAIN_MODE) {
+-              if (priv->extend_desc) {
+-                      priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
+-                                           DMA_RX_SIZE, 1);
+-                      priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
+-                                           DMA_TX_SIZE, 1);
+-              } else {
+-                      priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
+-                                           DMA_RX_SIZE, 0);
+-                      priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
+-                                           DMA_TX_SIZE, 0);
+-              }
++      return 0;
++
++err_init_rx_buffers:
++      while (queue >= 0) {
++              while (--i >= 0)
++                      stmmac_free_rx_buffer(priv, queue, i);
++
++              if (queue == 0)
++                      break;
++
++              i = DMA_RX_SIZE;
++              queue--;
+       }
+-      /* TX INITIALIZATION */
+-      for (i = 0; i < DMA_TX_SIZE; i++) {
+-              struct dma_desc *p;
+-              if (priv->extend_desc)
+-                      p = &((priv->dma_etx + i)->basic);
+-              else
+-                      p = priv->dma_tx + i;
++      return ret;
++}
+-              if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+-                      p->des0 = 0;
+-                      p->des1 = 0;
+-                      p->des2 = 0;
+-                      p->des3 = 0;
+-              } else {
+-                      p->des2 = 0;
++/**
++ * init_dma_tx_desc_rings - init the TX descriptor rings
++ * @dev: net device structure.
++ * Description: this function initializes the DMA TX descriptors
++ * and allocates the socket buffers. It supports the chained and ring
++ * modes.
++ */
++static int init_dma_tx_desc_rings(struct net_device *dev)
++{
++      struct stmmac_priv *priv = netdev_priv(dev);
++      u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
++      u32 queue;
++      int i;
++
++      for (queue = 0; queue < tx_queue_cnt; queue++) {
++              struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
++
++              netif_dbg(priv, probe, priv->dev,
++                        "(%s) dma_tx_phy=0x%08x\n", __func__,
++                       (u32)tx_q->dma_tx_phy);
++
++              /* Setup the chained descriptor addresses */
++              if (priv->mode == STMMAC_CHAIN_MODE) {
++                      if (priv->extend_desc)
++                              priv->hw->mode->init(tx_q->dma_etx,
++                                                   tx_q->dma_tx_phy,
++                                                   DMA_TX_SIZE, 1);
++                      else
++                              priv->hw->mode->init(tx_q->dma_tx,
++                                                   tx_q->dma_tx_phy,
++                                                   DMA_TX_SIZE, 0);
++              }
++
++              for (i = 0; i < DMA_TX_SIZE; i++) {
++                      struct dma_desc *p;
++                      if (priv->extend_desc)
++                              p = &((tx_q->dma_etx + i)->basic);
++                      else
++                              p = tx_q->dma_tx + i;
++
++                      if (priv->synopsys_id >= DWMAC_CORE_4_00) {
++                              p->des0 = 0;
++                              p->des1 = 0;
++                              p->des2 = 0;
++                              p->des3 = 0;
++                      } else {
++                              p->des2 = 0;
++                      }
++
++                      tx_q->tx_skbuff_dma[i].buf = 0;
++                      tx_q->tx_skbuff_dma[i].map_as_page = false;
++                      tx_q->tx_skbuff_dma[i].len = 0;
++                      tx_q->tx_skbuff_dma[i].last_segment = false;
++                      tx_q->tx_skbuff[i] = NULL;
+               }
+-              priv->tx_skbuff_dma[i].buf = 0;
+-              priv->tx_skbuff_dma[i].map_as_page = false;
+-              priv->tx_skbuff_dma[i].len = 0;
+-              priv->tx_skbuff_dma[i].last_segment = false;
+-              priv->tx_skbuff[i] = NULL;
++              tx_q->dirty_tx = 0;
++              tx_q->cur_tx = 0;
++
++              netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+       }
+-      priv->dirty_tx = 0;
+-      priv->cur_tx = 0;
+-      netdev_reset_queue(priv->dev);
++      return 0;
++}
++
++/**
++ * init_dma_desc_rings - init the RX/TX descriptor rings
++ * @dev: net device structure
++ * @flags: gfp flag.
++ * Description: this function initializes the DMA RX/TX descriptors
++ * and allocates the socket buffers. It supports the chained and ring
++ * modes.
++ */
++static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
++{
++      struct stmmac_priv *priv = netdev_priv(dev);
++      int ret;
++
++      ret = init_dma_rx_desc_rings(dev, flags);
++      if (ret)
++              return ret;
++
++      ret = init_dma_tx_desc_rings(dev);
+       stmmac_clear_descriptors(priv);
+       if (netif_msg_hw(priv))
+               stmmac_display_rings(priv);
+-      return 0;
+-err_init_rx_buffers:
+-      while (--i >= 0)
+-              stmmac_free_rx_buffers(priv, i);
+       return ret;
+ }
+-static void dma_free_rx_skbufs(struct stmmac_priv *priv)
++/**
++ * dma_free_rx_skbufs - free RX dma buffers
++ * @priv: private structure
++ * @queue: RX queue index
++ */
++static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+ {
+       int i;
+       for (i = 0; i < DMA_RX_SIZE; i++)
+-              stmmac_free_rx_buffers(priv, i);
++              stmmac_free_rx_buffer(priv, queue, i);
+ }
+-static void dma_free_tx_skbufs(struct stmmac_priv *priv)
++/**
++ * dma_free_tx_skbufs - free TX dma buffers
++ * @priv: private structure
++ * @queue: TX queue index
++ */
++static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
+ {
+       int i;
+-      for (i = 0; i < DMA_TX_SIZE; i++) {
+-              if (priv->tx_skbuff_dma[i].buf) {
+-                      if (priv->tx_skbuff_dma[i].map_as_page)
+-                              dma_unmap_page(priv->device,
+-                                             priv->tx_skbuff_dma[i].buf,
+-                                             priv->tx_skbuff_dma[i].len,
+-                                             DMA_TO_DEVICE);
+-                      else
+-                              dma_unmap_single(priv->device,
+-                                               priv->tx_skbuff_dma[i].buf,
+-                                               priv->tx_skbuff_dma[i].len,
+-                                               DMA_TO_DEVICE);
+-              }
++      for (i = 0; i < DMA_TX_SIZE; i++)
++              stmmac_free_tx_buffer(priv, queue, i);
++}
+-              if (priv->tx_skbuff[i]) {
+-                      dev_kfree_skb_any(priv->tx_skbuff[i]);
+-                      priv->tx_skbuff[i] = NULL;
+-                      priv->tx_skbuff_dma[i].buf = 0;
+-                      priv->tx_skbuff_dma[i].map_as_page = false;
+-              }
++/**
++ * free_dma_rx_desc_resources - free RX dma desc resources
++ * @priv: private structure
++ */
++static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
++{
++      u32 rx_count = priv->plat->rx_queues_to_use;
++      u32 queue;
++
++      /* Free RX queue resources */
++      for (queue = 0; queue < rx_count; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++              /* Release the DMA RX socket buffers */
++              dma_free_rx_skbufs(priv, queue);
++
++              /* Free DMA regions of consistent memory previously allocated */
++              if (!priv->extend_desc)
++                      dma_free_coherent(priv->device,
++                                        DMA_RX_SIZE * sizeof(struct dma_desc),
++                                        rx_q->dma_rx, rx_q->dma_rx_phy);
++              else
++                      dma_free_coherent(priv->device, DMA_RX_SIZE *
++                                        sizeof(struct dma_extended_desc),
++                                        rx_q->dma_erx, rx_q->dma_rx_phy);
++
++              kfree(rx_q->rx_skbuff_dma);
++              kfree(rx_q->rx_skbuff);
+       }
+ }
+ /**
+- * alloc_dma_desc_resources - alloc TX/RX resources.
++ * free_dma_tx_desc_resources - free TX dma desc resources
++ * @priv: private structure
++ */
++static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
++{
++      u32 tx_count = priv->plat->tx_queues_to_use;
++      u32 queue = 0;
++
++      /* Free TX queue resources */
++      for (queue = 0; queue < tx_count; queue++) {
++              struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
++
++              /* Release the DMA TX socket buffers */
++              dma_free_tx_skbufs(priv, queue);
++
++              /* Free DMA regions of consistent memory previously allocated */
++              if (!priv->extend_desc)
++                      dma_free_coherent(priv->device,
++                                        DMA_TX_SIZE * sizeof(struct dma_desc),
++                                        tx_q->dma_tx, tx_q->dma_tx_phy);
++              else
++                      dma_free_coherent(priv->device, DMA_TX_SIZE *
++                                        sizeof(struct dma_extended_desc),
++                                        tx_q->dma_etx, tx_q->dma_tx_phy);
++
++              kfree(tx_q->tx_skbuff_dma);
++              kfree(tx_q->tx_skbuff);
++      }
++}
++
++/**
++ * alloc_dma_rx_desc_resources - alloc RX resources.
+  * @priv: private structure
+  * Description: according to which descriptor can be used (extend or basic)
+  * this function allocates the resources for TX and RX paths. In case of
+  * reception, for example, it pre-allocated the RX socket buffer in order to
+  * allow zero-copy mechanism.
+  */
+-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
++static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
+ {
++      u32 rx_count = priv->plat->rx_queues_to_use;
+       int ret = -ENOMEM;
++      u32 queue;
+-      priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
+-                                          GFP_KERNEL);
+-      if (!priv->rx_skbuff_dma)
+-              return -ENOMEM;
++      /* RX queues buffers and DMA */
++      for (queue = 0; queue < rx_count; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+-      priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
+-                                      GFP_KERNEL);
+-      if (!priv->rx_skbuff)
+-              goto err_rx_skbuff;
+-
+-      priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
+-                                          sizeof(*priv->tx_skbuff_dma),
+-                                          GFP_KERNEL);
+-      if (!priv->tx_skbuff_dma)
+-              goto err_tx_skbuff_dma;
+-
+-      priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
+-                                      GFP_KERNEL);
+-      if (!priv->tx_skbuff)
+-              goto err_tx_skbuff;
+-
+-      if (priv->extend_desc) {
+-              priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
+-                                                  sizeof(struct
+-                                                         dma_extended_desc),
+-                                                  &priv->dma_rx_phy,
+-                                                  GFP_KERNEL);
+-              if (!priv->dma_erx)
+-                      goto err_dma;
++              rx_q->queue_index = queue;
++              rx_q->priv_data = priv;
+-              priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
+-                                                  sizeof(struct
+-                                                         dma_extended_desc),
+-                                                  &priv->dma_tx_phy,
++              rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
++                                                  sizeof(dma_addr_t),
+                                                   GFP_KERNEL);
+-              if (!priv->dma_etx) {
+-                      dma_free_coherent(priv->device, DMA_RX_SIZE *
+-                                        sizeof(struct dma_extended_desc),
+-                                        priv->dma_erx, priv->dma_rx_phy);
+-                      goto err_dma;
+-              }
+-      } else {
+-              priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
+-                                                 sizeof(struct dma_desc),
+-                                                 &priv->dma_rx_phy,
+-                                                 GFP_KERNEL);
+-              if (!priv->dma_rx)
+-                      goto err_dma;
++              if (!rx_q->rx_skbuff_dma)
++                      return -ENOMEM;
+-              priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
+-                                                 sizeof(struct dma_desc),
+-                                                 &priv->dma_tx_phy,
+-                                                 GFP_KERNEL);
+-              if (!priv->dma_tx) {
+-                      dma_free_coherent(priv->device, DMA_RX_SIZE *
+-                                        sizeof(struct dma_desc),
+-                                        priv->dma_rx, priv->dma_rx_phy);
++              rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
++                                              sizeof(struct sk_buff *),
++                                              GFP_KERNEL);
++              if (!rx_q->rx_skbuff)
+                       goto err_dma;
++
++              if (priv->extend_desc) {
++                      rx_q->dma_erx = dma_zalloc_coherent(priv->device,
++                                                          DMA_RX_SIZE *
++                                                          sizeof(struct
++                                                          dma_extended_desc),
++                                                          &rx_q->dma_rx_phy,
++                                                          GFP_KERNEL);
++                      if (!rx_q->dma_erx)
++                              goto err_dma;
++
++              } else {
++                      rx_q->dma_rx = dma_zalloc_coherent(priv->device,
++                                                         DMA_RX_SIZE *
++                                                         sizeof(struct
++                                                         dma_desc),
++                                                         &rx_q->dma_rx_phy,
++                                                         GFP_KERNEL);
++                      if (!rx_q->dma_rx)
++                              goto err_dma;
+               }
+       }
+       return 0;
+ err_dma:
+-      kfree(priv->tx_skbuff);
+-err_tx_skbuff:
+-      kfree(priv->tx_skbuff_dma);
+-err_tx_skbuff_dma:
+-      kfree(priv->rx_skbuff);
+-err_rx_skbuff:
+-      kfree(priv->rx_skbuff_dma);
++      free_dma_rx_desc_resources(priv);
++
++      return ret;
++}
++
++/**
++ * alloc_dma_tx_desc_resources - alloc TX resources.
++ * @priv: private structure
++ * Description: according to which descriptor can be used (extend or basic)
++ * this function allocates the resources for TX and RX paths. In case of
++ * reception, for example, it pre-allocated the RX socket buffer in order to
++ * allow zero-copy mechanism.
++ */
++static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
++{
++      u32 tx_count = priv->plat->tx_queues_to_use;
++      int ret = -ENOMEM;
++      u32 queue;
++
++      /* TX queues buffers and DMA */
++      for (queue = 0; queue < tx_count; queue++) {
++              struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
++
++              tx_q->queue_index = queue;
++              tx_q->priv_data = priv;
++
++              tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
++                                                  sizeof(*tx_q->tx_skbuff_dma),
++                                                  GFP_KERNEL);
++              if (!tx_q->tx_skbuff_dma)
++                      return -ENOMEM;
++
++              tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
++                                              sizeof(struct sk_buff *),
++                                              GFP_KERNEL);
++              if (!tx_q->tx_skbuff)
++                      goto err_dma_buffers;
++
++              if (priv->extend_desc) {
++                      tx_q->dma_etx = dma_zalloc_coherent(priv->device,
++                                                          DMA_TX_SIZE *
++                                                          sizeof(struct
++                                                          dma_extended_desc),
++                                                          &tx_q->dma_tx_phy,
++                                                          GFP_KERNEL);
++                      if (!tx_q->dma_etx)
++                              goto err_dma_buffers;
++              } else {
++                      tx_q->dma_tx = dma_zalloc_coherent(priv->device,
++                                                         DMA_TX_SIZE *
++                                                         sizeof(struct
++                                                                dma_desc),
++                                                         &tx_q->dma_tx_phy,
++                                                         GFP_KERNEL);
++                      if (!tx_q->dma_tx)
++                              goto err_dma_buffers;
++              }
++      }
++
++      return 0;
++
++err_dma_buffers:
++      free_dma_tx_desc_resources(priv);
++
+       return ret;
+ }
++/**
++ * alloc_dma_desc_resources - alloc TX/RX resources.
++ * @priv: private structure
++ * Description: according to which descriptor can be used (extend or basic)
++ * this function allocates the resources for TX and RX paths. In case of
++ * reception, for example, it pre-allocated the RX socket buffer in order to
++ * allow zero-copy mechanism.
++ */
++static int alloc_dma_desc_resources(struct stmmac_priv *priv)
++{
++      /* RX Allocation */
++      int ret = alloc_dma_rx_desc_resources(priv);
++
++      if (ret)
++              return ret;
++
++      ret = alloc_dma_tx_desc_resources(priv);
++
++      return ret;
++}
++
++/**
++ * free_dma_desc_resources - free dma desc resources
++ * @priv: private structure
++ */
+ static void free_dma_desc_resources(struct stmmac_priv *priv)
+ {
+-      /* Release the DMA TX/RX socket buffers */
+-      dma_free_rx_skbufs(priv);
+-      dma_free_tx_skbufs(priv);
+-
+-      /* Free DMA regions of consistent memory previously allocated */
+-      if (!priv->extend_desc) {
+-              dma_free_coherent(priv->device,
+-                                DMA_TX_SIZE * sizeof(struct dma_desc),
+-                                priv->dma_tx, priv->dma_tx_phy);
+-              dma_free_coherent(priv->device,
+-                                DMA_RX_SIZE * sizeof(struct dma_desc),
+-                                priv->dma_rx, priv->dma_rx_phy);
+-      } else {
+-              dma_free_coherent(priv->device, DMA_TX_SIZE *
+-                                sizeof(struct dma_extended_desc),
+-                                priv->dma_etx, priv->dma_tx_phy);
+-              dma_free_coherent(priv->device, DMA_RX_SIZE *
+-                                sizeof(struct dma_extended_desc),
+-                                priv->dma_erx, priv->dma_rx_phy);
+-      }
+-      kfree(priv->rx_skbuff_dma);
+-      kfree(priv->rx_skbuff);
+-      kfree(priv->tx_skbuff_dma);
+-      kfree(priv->tx_skbuff);
++      /* Release the DMA RX socket buffers */
++      free_dma_rx_desc_resources(priv);
++
++      /* Release the DMA TX socket buffers */
++      free_dma_tx_desc_resources(priv);
+ }
+ /**
+@@ -1256,19 +1640,104 @@ static void free_dma_desc_resources(stru
+  */
+ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+ {
+-      int rx_count = priv->dma_cap.number_rx_queues;
+-      int queue = 0;
++      u32 rx_queues_count = priv->plat->rx_queues_to_use;
++      int queue;
++      u8 mode;
+-      /* If GMAC does not have multiple queues, then this is not necessary*/
+-      if (rx_count == 1)
+-              return;
++      for (queue = 0; queue < rx_queues_count; queue++) {
++              mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
++              priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
++      }
++}
+-      /**
+-       *  If the core is synthesized with multiple rx queues / multiple
+-       *  dma channels, then rx queues will be disabled by default.
+-       *  For now only rx queue 0 is enabled.
+-       */
+-      priv->hw->mac->rx_queue_enable(priv->hw, queue);
++/**
++ * stmmac_start_rx_dma - start RX DMA channel
++ * @priv: driver private structure
++ * @chan: RX channel index
++ * Description:
++ * This starts a RX DMA channel
++ */
++static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
++{
++      netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
++      priv->hw->dma->start_rx(priv->ioaddr, chan);
++}
++
++/**
++ * stmmac_start_tx_dma - start TX DMA channel
++ * @priv: driver private structure
++ * @chan: TX channel index
++ * Description:
++ * This starts a TX DMA channel
++ */
++static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
++{
++      netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
++      priv->hw->dma->start_tx(priv->ioaddr, chan);
++}
++
++/**
++ * stmmac_stop_rx_dma - stop RX DMA channel
++ * @priv: driver private structure
++ * @chan: RX channel index
++ * Description:
++ * This stops a RX DMA channel
++ */
++static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
++{
++      netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
++      priv->hw->dma->stop_rx(priv->ioaddr, chan);
++}
++
++/**
++ * stmmac_stop_tx_dma - stop TX DMA channel
++ * @priv: driver private structure
++ * @chan: TX channel index
++ * Description:
++ * This stops a TX DMA channel
++ */
++static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
++{
++      netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
++      priv->hw->dma->stop_tx(priv->ioaddr, chan);
++}
++
++/**
++ * stmmac_start_all_dma - start all RX and TX DMA channels
++ * @priv: driver private structure
++ * Description:
++ * This starts all the RX and TX DMA channels
++ */
++static void stmmac_start_all_dma(struct stmmac_priv *priv)
++{
++      u32 rx_channels_count = priv->plat->rx_queues_to_use;
++      u32 tx_channels_count = priv->plat->tx_queues_to_use;
++      u32 chan = 0;
++
++      for (chan = 0; chan < rx_channels_count; chan++)
++              stmmac_start_rx_dma(priv, chan);
++
++      for (chan = 0; chan < tx_channels_count; chan++)
++              stmmac_start_tx_dma(priv, chan);
++}
++
++/**
++ * stmmac_stop_all_dma - stop all RX and TX DMA channels
++ * @priv: driver private structure
++ * Description:
++ * This stops the RX and TX DMA channels
++ */
++static void stmmac_stop_all_dma(struct stmmac_priv *priv)
++{
++      u32 rx_channels_count = priv->plat->rx_queues_to_use;
++      u32 tx_channels_count = priv->plat->tx_queues_to_use;
++      u32 chan = 0;
++
++      for (chan = 0; chan < rx_channels_count; chan++)
++              stmmac_stop_rx_dma(priv, chan);
++
++      for (chan = 0; chan < tx_channels_count; chan++)
++              stmmac_stop_tx_dma(priv, chan);
+ }
+ /**
+@@ -1279,11 +1748,20 @@ static void stmmac_mac_enable_rx_queues(
+  */
+ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
+ {
++      u32 rx_channels_count = priv->plat->rx_queues_to_use;
++      u32 tx_channels_count = priv->plat->tx_queues_to_use;
+       int rxfifosz = priv->plat->rx_fifo_size;
+-
+-      if (priv->plat->force_thresh_dma_mode)
+-              priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
+-      else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
++      u32 txmode = 0;
++      u32 rxmode = 0;
++      u32 chan = 0;
++
++      if (rxfifosz == 0)
++              rxfifosz = priv->dma_cap.rx_fifo_size;
++
++      if (priv->plat->force_thresh_dma_mode) {
++              txmode = tc;
++              rxmode = tc;
++      } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
+               /*
+                * In case of GMAC, SF mode can be enabled
+                * to perform the TX COE in HW. This depends on:
+@@ -1291,37 +1769,53 @@ static void stmmac_dma_operation_mode(st
+                * 2) There is no bugged Jumbo frame support
+                *    that needs to not insert csum in the TDES.
+                */
+-              priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
+-                                      rxfifosz);
++              txmode = SF_DMA_MODE;
++              rxmode = SF_DMA_MODE;
+               priv->xstats.threshold = SF_DMA_MODE;
+-      } else
+-              priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
++      } else {
++              txmode = tc;
++              rxmode = SF_DMA_MODE;
++      }
++
++      /* configure all channels */
++      if (priv->synopsys_id >= DWMAC_CORE_4_00) {
++              for (chan = 0; chan < rx_channels_count; chan++)
++                      priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
++                                                 rxfifosz);
++
++              for (chan = 0; chan < tx_channels_count; chan++)
++                      priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
++      } else {
++              priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
+                                       rxfifosz);
++      }
+ }
+ /**
+  * stmmac_tx_clean - to manage the transmission completion
+  * @priv: driver private structure
++ * @queue: TX queue index
+  * Description: it reclaims the transmit resources after transmission completes.
+  */
+-static void stmmac_tx_clean(struct stmmac_priv *priv)
++static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
+ {
++      struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+       unsigned int bytes_compl = 0, pkts_compl = 0;
+-      unsigned int entry = priv->dirty_tx;
++      unsigned int entry = tx_q->dirty_tx;
+       netif_tx_lock(priv->dev);
+       priv->xstats.tx_clean++;
+-      while (entry != priv->cur_tx) {
+-              struct sk_buff *skb = priv->tx_skbuff[entry];
++      while (entry != tx_q->cur_tx) {
++              struct sk_buff *skb = tx_q->tx_skbuff[entry];
+               struct dma_desc *p;
+               int status;
+               if (priv->extend_desc)
+-                      p = (struct dma_desc *)(priv->dma_etx + entry);
++                      p = (struct dma_desc *)(tx_q->dma_etx + entry);
+               else
+-                      p = priv->dma_tx + entry;
++                      p = tx_q->dma_tx + entry;
+               status = priv->hw->desc->tx_status(&priv->dev->stats,
+                                                     &priv->xstats, p,
+@@ -1342,48 +1836,51 @@ static void stmmac_tx_clean(struct stmma
+                       stmmac_get_tx_hwtstamp(priv, p, skb);
+               }
+-              if (likely(priv->tx_skbuff_dma[entry].buf)) {
+-                      if (priv->tx_skbuff_dma[entry].map_as_page)
++              if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
++                      if (tx_q->tx_skbuff_dma[entry].map_as_page)
+                               dma_unmap_page(priv->device,
+-                                             priv->tx_skbuff_dma[entry].buf,
+-                                             priv->tx_skbuff_dma[entry].len,
++                                             tx_q->tx_skbuff_dma[entry].buf,
++                                             tx_q->tx_skbuff_dma[entry].len,
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(priv->device,
+-                                               priv->tx_skbuff_dma[entry].buf,
+-                                               priv->tx_skbuff_dma[entry].len,
++                                               tx_q->tx_skbuff_dma[entry].buf,
++                                               tx_q->tx_skbuff_dma[entry].len,
+                                                DMA_TO_DEVICE);
+-                      priv->tx_skbuff_dma[entry].buf = 0;
+-                      priv->tx_skbuff_dma[entry].len = 0;
+-                      priv->tx_skbuff_dma[entry].map_as_page = false;
++                      tx_q->tx_skbuff_dma[entry].buf = 0;
++                      tx_q->tx_skbuff_dma[entry].len = 0;
++                      tx_q->tx_skbuff_dma[entry].map_as_page = false;
+               }
+               if (priv->hw->mode->clean_desc3)
+-                      priv->hw->mode->clean_desc3(priv, p);
++                      priv->hw->mode->clean_desc3(tx_q, p);
+-              priv->tx_skbuff_dma[entry].last_segment = false;
+-              priv->tx_skbuff_dma[entry].is_jumbo = false;
++              tx_q->tx_skbuff_dma[entry].last_segment = false;
++              tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+               if (likely(skb != NULL)) {
+                       pkts_compl++;
+                       bytes_compl += skb->len;
+                       dev_consume_skb_any(skb);
+-                      priv->tx_skbuff[entry] = NULL;
++                      tx_q->tx_skbuff[entry] = NULL;
+               }
+               priv->hw->desc->release_tx_desc(p, priv->mode);
+               entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+       }
+-      priv->dirty_tx = entry;
++      tx_q->dirty_tx = entry;
++
++      netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
++                                pkts_compl, bytes_compl);
+-      netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
++      if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
++                                                              queue))) &&
++          stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
+-      if (unlikely(netif_queue_stopped(priv->dev) &&
+-          stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
+               netif_dbg(priv, tx_done, priv->dev,
+                         "%s: restart transmit\n", __func__);
+-              netif_wake_queue(priv->dev);
++              netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
+       }
+       if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+@@ -1393,45 +1890,76 @@ static void stmmac_tx_clean(struct stmma
+       netif_tx_unlock(priv->dev);
+ }
+-static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
++static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
+ {
+-      priv->hw->dma->enable_dma_irq(priv->ioaddr);
++      priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
+ }
+-static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
++static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
+ {
+-      priv->hw->dma->disable_dma_irq(priv->ioaddr);
++      priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
+ }
+ /**
+  * stmmac_tx_err - to manage the tx error
+  * @priv: driver private structure
++ * @chan: channel index
+  * Description: it cleans the descriptors and restarts the transmission
+  * in case of transmission errors.
+  */
+-static void stmmac_tx_err(struct stmmac_priv *priv)
++static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
+ {
++      struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+       int i;
+-      netif_stop_queue(priv->dev);
+-      priv->hw->dma->stop_tx(priv->ioaddr);
+-      dma_free_tx_skbufs(priv);
++      netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
++
++      stmmac_stop_tx_dma(priv, chan);
++      dma_free_tx_skbufs(priv, chan);
+       for (i = 0; i < DMA_TX_SIZE; i++)
+               if (priv->extend_desc)
+-                      priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
++                      priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
+                                                    priv->mode,
+                                                    (i == DMA_TX_SIZE - 1));
+               else
+-                      priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
++                      priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
+                                                    priv->mode,
+                                                    (i == DMA_TX_SIZE - 1));
+-      priv->dirty_tx = 0;
+-      priv->cur_tx = 0;
+-      netdev_reset_queue(priv->dev);
+-      priv->hw->dma->start_tx(priv->ioaddr);
++      tx_q->dirty_tx = 0;
++      tx_q->cur_tx = 0;
++      netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
++      stmmac_start_tx_dma(priv, chan);
+       priv->dev->stats.tx_errors++;
+-      netif_wake_queue(priv->dev);
++      netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
++}
++
++/**
++ *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
++ *  @priv: driver private structure
++ *  @txmode: TX operating mode
++ *  @rxmode: RX operating mode
++ *  @chan: channel index
++ *  Description: it is used for configuring of the DMA operation mode in
++ *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
++ *  mode.
++ */
++static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
++                                        u32 rxmode, u32 chan)
++{
++      int rxfifosz = priv->plat->rx_fifo_size;
++
++      if (rxfifosz == 0)
++              rxfifosz = priv->dma_cap.rx_fifo_size;
++
++      if (priv->synopsys_id >= DWMAC_CORE_4_00) {
++              priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
++                                         rxfifosz);
++              priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
++      } else {
++              priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
++                                      rxfifosz);
++      }
+ }
+ /**
+@@ -1443,31 +1971,43 @@ static void stmmac_tx_err(struct stmmac_
+  */
+ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+ {
++      u32 tx_channel_count = priv->plat->tx_queues_to_use;
+       int status;
+-      int rxfifosz = priv->plat->rx_fifo_size;
++      u32 chan;
++
++      for (chan = 0; chan < tx_channel_count; chan++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+-      status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
+-      if (likely((status & handle_rx)) || (status & handle_tx)) {
+-              if (likely(napi_schedule_prep(&priv->napi))) {
+-                      stmmac_disable_dma_irq(priv);
+-                      __napi_schedule(&priv->napi);
++              status = priv->hw->dma->dma_interrupt(priv->ioaddr,
++                                                    &priv->xstats, chan);
++              if (likely((status & handle_rx)) || (status & handle_tx)) {
++                      if (likely(napi_schedule_prep(&rx_q->napi))) {
++                              stmmac_disable_dma_irq(priv, chan);
++                              __napi_schedule(&rx_q->napi);
++                      }
+               }
+-      }
+-      if (unlikely(status & tx_hard_error_bump_tc)) {
+-              /* Try to bump up the dma threshold on this failure */
+-              if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
+-                  (tc <= 256)) {
+-                      tc += 64;
+-                      if (priv->plat->force_thresh_dma_mode)
+-                              priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
+-                                                      rxfifosz);
+-                      else
+-                              priv->hw->dma->dma_mode(priv->ioaddr, tc,
+-                                                      SF_DMA_MODE, rxfifosz);
+-                      priv->xstats.threshold = tc;
++
++              if (unlikely(status & tx_hard_error_bump_tc)) {
++                      /* Try to bump up the dma threshold on this failure */
++                      if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
++                          (tc <= 256)) {
++                              tc += 64;
++                              if (priv->plat->force_thresh_dma_mode)
++                                      stmmac_set_dma_operation_mode(priv,
++                                                                    tc,
++                                                                    tc,
++                                                                    chan);
++                              else
++                                      stmmac_set_dma_operation_mode(priv,
++                                                                  tc,
++                                                                  SF_DMA_MODE,
++                                                                  chan);
++                              priv->xstats.threshold = tc;
++                      }
++              } else if (unlikely(status == tx_hard_error)) {
++                      stmmac_tx_err(priv, chan);
+               }
+-      } else if (unlikely(status == tx_hard_error))
+-              stmmac_tx_err(priv);
++      }
+ }
+ /**
+@@ -1574,6 +2114,13 @@ static void stmmac_check_ether_addr(stru
+  */
+ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+ {
++      u32 rx_channels_count = priv->plat->rx_queues_to_use;
++      u32 tx_channels_count = priv->plat->tx_queues_to_use;
++      struct stmmac_rx_queue *rx_q;
++      struct stmmac_tx_queue *tx_q;
++      u32 dummy_dma_rx_phy = 0;
++      u32 dummy_dma_tx_phy = 0;
++      u32 chan = 0;
+       int atds = 0;
+       int ret = 0;
+@@ -1591,19 +2138,49 @@ static int stmmac_init_dma_engine(struct
+               return ret;
+       }
+-      priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
+-                          priv->dma_tx_phy, priv->dma_rx_phy, atds);
+-
+       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+-              priv->rx_tail_addr = priv->dma_rx_phy +
+-                          (DMA_RX_SIZE * sizeof(struct dma_desc));
+-              priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
+-                                             STMMAC_CHAN0);
+-
+-              priv->tx_tail_addr = priv->dma_tx_phy +
+-                          (DMA_TX_SIZE * sizeof(struct dma_desc));
+-              priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+-                                             STMMAC_CHAN0);
++              /* DMA Configuration */
++              priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
++                                  dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
++
++              /* DMA RX Channel Configuration */
++              for (chan = 0; chan < rx_channels_count; chan++) {
++                      rx_q = &priv->rx_queue[chan];
++
++                      priv->hw->dma->init_rx_chan(priv->ioaddr,
++                                                  priv->plat->dma_cfg,
++                                                  rx_q->dma_rx_phy, chan);
++
++                      rx_q->rx_tail_addr = rx_q->dma_rx_phy +
++                                  (DMA_RX_SIZE * sizeof(struct dma_desc));
++                      priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
++                                                     rx_q->rx_tail_addr,
++                                                     chan);
++              }
++
++              /* DMA TX Channel Configuration */
++              for (chan = 0; chan < tx_channels_count; chan++) {
++                      tx_q = &priv->tx_queue[chan];
++
++                      priv->hw->dma->init_chan(priv->ioaddr,
++                                               priv->plat->dma_cfg,
++                                               chan);
++
++                      priv->hw->dma->init_tx_chan(priv->ioaddr,
++                                                  priv->plat->dma_cfg,
++                                                  tx_q->dma_tx_phy, chan);
++
++                      tx_q->tx_tail_addr = tx_q->dma_tx_phy +
++                                  (DMA_TX_SIZE * sizeof(struct dma_desc));
++                      priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
++                                                     tx_q->tx_tail_addr,
++                                                     chan);
++              }
++      } else {
++              rx_q = &priv->rx_queue[chan];
++              tx_q = &priv->tx_queue[chan];
++              priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
++                                  tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
+       }
+       if (priv->plat->axi && priv->hw->dma->axi)
+@@ -1621,8 +2198,12 @@ static int stmmac_init_dma_engine(struct
+ static void stmmac_tx_timer(unsigned long data)
+ {
+       struct stmmac_priv *priv = (struct stmmac_priv *)data;
++      u32 tx_queues_count = priv->plat->tx_queues_to_use;
++      u32 queue;
+-      stmmac_tx_clean(priv);
++      /* let's scan all the tx queues */
++      for (queue = 0; queue < tx_queues_count; queue++)
++              stmmac_tx_clean(priv, queue);
+ }
+ /**
+@@ -1644,6 +2225,196 @@ static void stmmac_init_tx_coalesce(stru
+       add_timer(&priv->txtimer);
+ }
++static void stmmac_set_rings_length(struct stmmac_priv *priv)
++{
++      u32 rx_channels_count = priv->plat->rx_queues_to_use;
++      u32 tx_channels_count = priv->plat->tx_queues_to_use;
++      u32 chan;
++
++      /* set TX ring length */
++      if (priv->hw->dma->set_tx_ring_len) {
++              for (chan = 0; chan < tx_channels_count; chan++)
++                      priv->hw->dma->set_tx_ring_len(priv->ioaddr,
++                                                     (DMA_TX_SIZE - 1), chan);
++      }
++
++      /* set RX ring length */
++      if (priv->hw->dma->set_rx_ring_len) {
++              for (chan = 0; chan < rx_channels_count; chan++)
++                      priv->hw->dma->set_rx_ring_len(priv->ioaddr,
++                                                     (DMA_RX_SIZE - 1), chan);
++      }
++}
++
++/**
++ *  stmmac_set_tx_queue_weight - Set TX queue weight
++ *  @priv: driver private structure
++ *  Description: It is used for setting TX queues weight
++ */
++static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
++{
++      u32 tx_queues_count = priv->plat->tx_queues_to_use;
++      u32 weight;
++      u32 queue;
++
++      for (queue = 0; queue < tx_queues_count; queue++) {
++              weight = priv->plat->tx_queues_cfg[queue].weight;
++              priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
++      }
++}
++
++/**
++ *  stmmac_configure_cbs - Configure CBS in TX queue
++ *  @priv: driver private structure
++ *  Description: It is used for configuring CBS in AVB TX queues
++ */
++static void stmmac_configure_cbs(struct stmmac_priv *priv)
++{
++      u32 tx_queues_count = priv->plat->tx_queues_to_use;
++      u32 mode_to_use;
++      u32 queue;
++
++      /* queue 0 is reserved for legacy traffic */
++      for (queue = 1; queue < tx_queues_count; queue++) {
++              mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
++              if (mode_to_use == MTL_QUEUE_DCB)
++                      continue;
++
++              priv->hw->mac->config_cbs(priv->hw,
++                              priv->plat->tx_queues_cfg[queue].send_slope,
++                              priv->plat->tx_queues_cfg[queue].idle_slope,
++                              priv->plat->tx_queues_cfg[queue].high_credit,
++                              priv->plat->tx_queues_cfg[queue].low_credit,
++                              queue);
++      }
++}
++
++/**
++ *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
++ *  @priv: driver private structure
++ *  Description: It is used for mapping RX queues to RX dma channels
++ */
++static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
++{
++      u32 rx_queues_count = priv->plat->rx_queues_to_use;
++      u32 queue;
++      u32 chan;
++
++      for (queue = 0; queue < rx_queues_count; queue++) {
++              chan = priv->plat->rx_queues_cfg[queue].chan;
++              priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
++      }
++}
++
++/**
++ *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
++ *  @priv: driver private structure
++ *  Description: It is used for configuring the RX Queue Priority
++ */
++static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
++{
++      u32 rx_queues_count = priv->plat->rx_queues_to_use;
++      u32 queue;
++      u32 prio;
++
++      for (queue = 0; queue < rx_queues_count; queue++) {
++              if (!priv->plat->rx_queues_cfg[queue].use_prio)
++                      continue;
++
++              prio = priv->plat->rx_queues_cfg[queue].prio;
++              priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
++      }
++}
++
++/**
++ *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
++ *  @priv: driver private structure
++ *  Description: It is used for configuring the TX Queue Priority
++ */
++static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
++{
++      u32 tx_queues_count = priv->plat->tx_queues_to_use;
++      u32 queue;
++      u32 prio;
++
++      for (queue = 0; queue < tx_queues_count; queue++) {
++              if (!priv->plat->tx_queues_cfg[queue].use_prio)
++                      continue;
++
++              prio = priv->plat->tx_queues_cfg[queue].prio;
++              priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
++      }
++}
++
++/**
++ *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
++ *  @priv: driver private structure
++ *  Description: It is used for configuring the RX queue routing
++ */
++static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
++{
++      u32 rx_queues_count = priv->plat->rx_queues_to_use;
++      u32 queue;
++      u8 packet;
++
++      for (queue = 0; queue < rx_queues_count; queue++) {
++              /* no specific packet type routing specified for the queue */
++              if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
++                      continue;
++
++              packet = priv->plat->rx_queues_cfg[queue].pkt_route;
++              priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
++      }
++}
++
++/**
++ *  stmmac_mtl_configuration - Configure MTL
++ *  @priv: driver private structure
++ *  Description: It is used for configurring MTL
++ */
++static void stmmac_mtl_configuration(struct stmmac_priv *priv)
++{
++      u32 rx_queues_count = priv->plat->rx_queues_to_use;
++      u32 tx_queues_count = priv->plat->tx_queues_to_use;
++
++      if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
++              stmmac_set_tx_queue_weight(priv);
++
++      /* Configure MTL RX algorithms */
++      if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
++              priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
++                                              priv->plat->rx_sched_algorithm);
++
++      /* Configure MTL TX algorithms */
++      if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
++              priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
++                                              priv->plat->tx_sched_algorithm);
++
++      /* Configure CBS in AVB TX queues */
++      if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
++              stmmac_configure_cbs(priv);
++
++      /* Map RX MTL to DMA channels */
++      if (priv->hw->mac->map_mtl_to_dma)
++              stmmac_rx_queue_dma_chan_map(priv);
++
++      /* Enable MAC RX Queues */
++      if (priv->hw->mac->rx_queue_enable)
++              stmmac_mac_enable_rx_queues(priv);
++
++      /* Set RX priorities */
++      if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
++              stmmac_mac_config_rx_queues_prio(priv);
++
++      /* Set TX priorities */
++      if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
++              stmmac_mac_config_tx_queues_prio(priv);
++
++      /* Set RX routing */
++      if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
++              stmmac_mac_config_rx_queues_routing(priv);
++}
++
+ /**
+  * stmmac_hw_setup - setup mac in a usable state.
+  *  @dev : pointer to the device structure.
+@@ -1659,6 +2430,9 @@ static void stmmac_init_tx_coalesce(stru
+ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
++      u32 rx_cnt = priv->plat->rx_queues_to_use;
++      u32 tx_cnt = priv->plat->tx_queues_to_use;
++      u32 chan;
+       int ret;
+       /* DMA initialization and SW reset */
+@@ -1688,9 +2462,9 @@ static int stmmac_hw_setup(struct net_de
+       /* Initialize the MAC Core */
+       priv->hw->mac->core_init(priv->hw, dev->mtu);
+-      /* Initialize MAC RX Queues */
+-      if (priv->hw->mac->rx_queue_enable)
+-              stmmac_mac_enable_rx_queues(priv);
++      /* Initialize MTL*/
++      if (priv->synopsys_id >= DWMAC_CORE_4_00)
++              stmmac_mtl_configuration(priv);
+       ret = priv->hw->mac->rx_ipc(priv->hw);
+       if (!ret) {
+@@ -1700,10 +2474,7 @@ static int stmmac_hw_setup(struct net_de
+       }
+       /* Enable the MAC Rx/Tx */
+-      if (priv->synopsys_id >= DWMAC_CORE_4_00)
+-              stmmac_dwmac4_set_mac(priv->ioaddr, true);
+-      else
+-              stmmac_set_mac(priv->ioaddr, true);
++      priv->hw->mac->set_mac(priv->ioaddr, true);
+       /* Set the HW DMA mode and the COE */
+       stmmac_dma_operation_mode(priv);
+@@ -1711,6 +2482,10 @@ static int stmmac_hw_setup(struct net_de
+       stmmac_mmc_setup(priv);
+       if (init_ptp) {
++              ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
++              if (ret < 0)
++                      netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
++
+               ret = stmmac_init_ptp(priv);
+               if (ret == -EOPNOTSUPP)
+                       netdev_warn(priv->dev, "PTP not supported by HW\n");
+@@ -1725,35 +2500,37 @@ static int stmmac_hw_setup(struct net_de
+                           __func__);
+ #endif
+       /* Start the ball rolling... */
+-      netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
+-      priv->hw->dma->start_tx(priv->ioaddr);
+-      priv->hw->dma->start_rx(priv->ioaddr);
++      stmmac_start_all_dma(priv);
+       priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
+       if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+               priv->rx_riwt = MAX_DMA_RIWT;
+-              priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
++              priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
+       }
+       if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
+               priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
+-      /*  set TX ring length */
+-      if (priv->hw->dma->set_tx_ring_len)
+-              priv->hw->dma->set_tx_ring_len(priv->ioaddr,
+-                                             (DMA_TX_SIZE - 1));
+-      /*  set RX ring length */
+-      if (priv->hw->dma->set_rx_ring_len)
+-              priv->hw->dma->set_rx_ring_len(priv->ioaddr,
+-                                             (DMA_RX_SIZE - 1));
++      /* set TX and RX rings length */
++      stmmac_set_rings_length(priv);
++
+       /* Enable TSO */
+-      if (priv->tso)
+-              priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
++      if (priv->tso) {
++              for (chan = 0; chan < tx_cnt; chan++)
++                      priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
++      }
+       return 0;
+ }
++static void stmmac_hw_teardown(struct net_device *dev)
++{
++      struct stmmac_priv *priv = netdev_priv(dev);
++
++      clk_disable_unprepare(priv->plat->clk_ptp_ref);
++}
++
+ /**
+  *  stmmac_open - open entry point of the driver
+  *  @dev : pointer to the device structure.
+@@ -1821,7 +2598,7 @@ static int stmmac_open(struct net_device
+               netdev_err(priv->dev,
+                          "%s: ERROR: allocating the IRQ %d (error: %d)\n",
+                          __func__, dev->irq, ret);
+-              goto init_error;
++              goto irq_error;
+       }
+       /* Request the Wake IRQ in case of another line is used for WoL */
+@@ -1848,8 +2625,8 @@ static int stmmac_open(struct net_device
+               }
+       }
+-      napi_enable(&priv->napi);
+-      netif_start_queue(dev);
++      stmmac_enable_all_queues(priv);
++      stmmac_start_all_queues(priv);
+       return 0;
+@@ -1858,7 +2635,12 @@ lpiirq_error:
+               free_irq(priv->wol_irq, dev);
+ wolirq_error:
+       free_irq(dev->irq, dev);
++irq_error:
++      if (dev->phydev)
++              phy_stop(dev->phydev);
++      del_timer_sync(&priv->txtimer);
++      stmmac_hw_teardown(dev);
+ init_error:
+       free_dma_desc_resources(priv);
+ dma_desc_error:
+@@ -1887,9 +2669,9 @@ static int stmmac_release(struct net_dev
+               phy_disconnect(dev->phydev);
+       }
+-      netif_stop_queue(dev);
++      stmmac_stop_all_queues(priv);
+-      napi_disable(&priv->napi);
++      stmmac_disable_all_queues(priv);
+       del_timer_sync(&priv->txtimer);
+@@ -1901,14 +2683,13 @@ static int stmmac_release(struct net_dev
+               free_irq(priv->lpi_irq, dev);
+       /* Stop TX/RX DMA and clear the descriptors */
+-      priv->hw->dma->stop_tx(priv->ioaddr);
+-      priv->hw->dma->stop_rx(priv->ioaddr);
++      stmmac_stop_all_dma(priv);
+       /* Release and free the Rx/Tx resources */
+       free_dma_desc_resources(priv);
+       /* Disable the MAC Rx/Tx */
+-      stmmac_set_mac(priv->ioaddr, false);
++      priv->hw->mac->set_mac(priv->ioaddr, false);
+       netif_carrier_off(dev);
+@@ -1927,22 +2708,24 @@ static int stmmac_release(struct net_dev
+  *  @des: buffer start address
+  *  @total_len: total length to fill in descriptors
+  *  @last_segmant: condition for the last descriptor
++ *  @queue: TX queue index
+  *  Description:
+  *  This function fills descriptor and request new descriptors according to
+  *  buffer length to fill
+  */
+ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+-                               int total_len, bool last_segment)
++                               int total_len, bool last_segment, u32 queue)
+ {
++      struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+       struct dma_desc *desc;
+-      int tmp_len;
+       u32 buff_size;
++      int tmp_len;
+       tmp_len = total_len;
+       while (tmp_len > 0) {
+-              priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+-              desc = priv->dma_tx + priv->cur_tx;
++              tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
++              desc = tx_q->dma_tx + tx_q->cur_tx;
+               desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
+               buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+@@ -1950,7 +2733,7 @@ static void stmmac_tso_allocator(struct
+               priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
+                       0, 1,
+-                      (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
++                      (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
+                       0, 0);
+               tmp_len -= TSO_MAX_BUFF_SIZE;
+@@ -1986,23 +2769,28 @@ static void stmmac_tso_allocator(struct
+  */
+ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+-      u32 pay_len, mss;
+-      int tmp_pay_len = 0;
++      struct dma_desc *desc, *first, *mss_desc = NULL;
+       struct stmmac_priv *priv = netdev_priv(dev);
+       int nfrags = skb_shinfo(skb)->nr_frags;
++      u32 queue = skb_get_queue_mapping(skb);
+       unsigned int first_entry, des;
+-      struct dma_desc *desc, *first, *mss_desc = NULL;
++      struct stmmac_tx_queue *tx_q;
++      int tmp_pay_len = 0;
++      u32 pay_len, mss;
+       u8 proto_hdr_len;
+       int i;
++      tx_q = &priv->tx_queue[queue];
++
+       /* Compute header lengths */
+       proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       /* Desc availability based on threshold should be enough safe */
+-      if (unlikely(stmmac_tx_avail(priv) <
++      if (unlikely(stmmac_tx_avail(priv, queue) <
+               (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
+-              if (!netif_queue_stopped(dev)) {
+-                      netif_stop_queue(dev);
++              if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
++                      netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
++                                                              queue));
+                       /* This is a hard error, log it. */
+                       netdev_err(priv->dev,
+                                  "%s: Tx Ring full when queue awake\n",
+@@ -2017,10 +2805,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
+       /* set new MSS value if needed */
+       if (mss != priv->mss) {
+-              mss_desc = priv->dma_tx + priv->cur_tx;
++              mss_desc = tx_q->dma_tx + tx_q->cur_tx;
+               priv->hw->desc->set_mss(mss_desc, mss);
+               priv->mss = mss;
+-              priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
++              tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+       }
+       if (netif_msg_tx_queued(priv)) {
+@@ -2030,9 +2818,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
+                       skb->data_len);
+       }
+-      first_entry = priv->cur_tx;
++      first_entry = tx_q->cur_tx;
+-      desc = priv->dma_tx + first_entry;
++      desc = tx_q->dma_tx + first_entry;
+       first = desc;
+       /* first descriptor: fill Headers on Buf1 */
+@@ -2041,9 +2829,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
+       if (dma_mapping_error(priv->device, des))
+               goto dma_map_err;
+-      priv->tx_skbuff_dma[first_entry].buf = des;
+-      priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+-      priv->tx_skbuff[first_entry] = skb;
++      tx_q->tx_skbuff_dma[first_entry].buf = des;
++      tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+       first->des0 = cpu_to_le32(des);
+@@ -2054,7 +2841,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+       /* If needed take extra descriptors to fill the remaining payload */
+       tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+-      stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
++      stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+       /* Prepare fragments */
+       for (i = 0; i < nfrags; i++) {
+@@ -2063,24 +2850,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
+               des = skb_frag_dma_map(priv->device, frag, 0,
+                                      skb_frag_size(frag),
+                                      DMA_TO_DEVICE);
++              if (dma_mapping_error(priv->device, des))
++                      goto dma_map_err;
+               stmmac_tso_allocator(priv, des, skb_frag_size(frag),
+-                                   (i == nfrags - 1));
++                                   (i == nfrags - 1), queue);
+-              priv->tx_skbuff_dma[priv->cur_tx].buf = des;
+-              priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
+-              priv->tx_skbuff[priv->cur_tx] = NULL;
+-              priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
++              tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
++              tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
++              tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
++              tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
+       }
+-      priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
++      tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
++
++      /* Only the last descriptor gets to point to the skb. */
++      tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+-      priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
++      /* We've used all descriptors we need for this skb, however,
++       * advance cur_tx so that it references a fresh descriptor.
++       * ndo_start_xmit will fill this descriptor the next time it's
++       * called and stmmac_tx_clean may clean up to this descriptor.
++       */
++      tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+-      if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
++      if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+               netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+                         __func__);
+-              netif_stop_queue(dev);
++              netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+       }
+       dev->stats.tx_bytes += skb->len;
+@@ -2112,7 +2909,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+       priv->hw->desc->prepare_tso_tx_desc(first, 1,
+                       proto_hdr_len,
+                       pay_len,
+-                      1, priv->tx_skbuff_dma[first_entry].last_segment,
++                      1, tx_q->tx_skbuff_dma[first_entry].last_segment,
+                       tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+       /* If context desc is used to change MSS */
+@@ -2127,20 +2924,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
+       if (netif_msg_pktdata(priv)) {
+               pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+-                      __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+-                      priv->cur_tx, first, nfrags);
++                      __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
++                      tx_q->cur_tx, first, nfrags);
+-              priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
++              priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
+                                            0);
+               pr_info(">>> frame to be transmitted: ");
+               print_pkt(skb->data, skb_headlen(skb));
+       }
+-      netdev_sent_queue(dev, skb->len);
++      netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+-      priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+-                                     STMMAC_CHAN0);
++      priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
++                                     queue);
+       return NETDEV_TX_OK;
+@@ -2164,21 +2961,27 @@ static netdev_tx_t stmmac_xmit(struct sk
+       struct stmmac_priv *priv = netdev_priv(dev);
+       unsigned int nopaged_len = skb_headlen(skb);
+       int i, csum_insertion = 0, is_jumbo = 0;
++      u32 queue = skb_get_queue_mapping(skb);
+       int nfrags = skb_shinfo(skb)->nr_frags;
+-      unsigned int entry, first_entry;
++      int entry;
++      unsigned int first_entry;
+       struct dma_desc *desc, *first;
++      struct stmmac_tx_queue *tx_q;
+       unsigned int enh_desc;
+       unsigned int des;
++      tx_q = &priv->tx_queue[queue];
++
+       /* Manage oversized TCP frames for GMAC4 device */
+       if (skb_is_gso(skb) && priv->tso) {
+               if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+                       return stmmac_tso_xmit(skb, dev);
+       }
+-      if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+-              if (!netif_queue_stopped(dev)) {
+-                      netif_stop_queue(dev);
++      if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
++              if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
++                      netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
++                                                              queue));
+                       /* This is a hard error, log it. */
+                       netdev_err(priv->dev,
+                                  "%s: Tx Ring full when queue awake\n",
+@@ -2190,20 +2993,18 @@ static netdev_tx_t stmmac_xmit(struct sk
+       if (priv->tx_path_in_lpi_mode)
+               stmmac_disable_eee_mode(priv);
+-      entry = priv->cur_tx;
++      entry = tx_q->cur_tx;
+       first_entry = entry;
+       csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
+       if (likely(priv->extend_desc))
+-              desc = (struct dma_desc *)(priv->dma_etx + entry);
++              desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+       else
+-              desc = priv->dma_tx + entry;
++              desc = tx_q->dma_tx + entry;
+       first = desc;
+-      priv->tx_skbuff[first_entry] = skb;
+-
+       enh_desc = priv->plat->enh_desc;
+       /* To program the descriptors according to the size of the frame */
+       if (enh_desc)
+@@ -2211,7 +3012,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+       if (unlikely(is_jumbo) && likely(priv->synopsys_id <
+                                        DWMAC_CORE_4_00)) {
+-              entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
++              entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
+               if (unlikely(entry < 0))
+                       goto dma_map_err;
+       }
+@@ -2224,48 +3025,56 @@ static netdev_tx_t stmmac_xmit(struct sk
+               entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+               if (likely(priv->extend_desc))
+-                      desc = (struct dma_desc *)(priv->dma_etx + entry);
++                      desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+               else
+-                      desc = priv->dma_tx + entry;
++                      desc = tx_q->dma_tx + entry;
+               des = skb_frag_dma_map(priv->device, frag, 0, len,
+                                      DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->device, des))
+                       goto dma_map_err; /* should reuse desc w/o issues */
+-              priv->tx_skbuff[entry] = NULL;
++              tx_q->tx_skbuff[entry] = NULL;
+-              priv->tx_skbuff_dma[entry].buf = des;
++              tx_q->tx_skbuff_dma[entry].buf = des;
+               if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+                       desc->des0 = cpu_to_le32(des);
+               else
+                       desc->des2 = cpu_to_le32(des);
+-              priv->tx_skbuff_dma[entry].map_as_page = true;
+-              priv->tx_skbuff_dma[entry].len = len;
+-              priv->tx_skbuff_dma[entry].last_segment = last_segment;
++              tx_q->tx_skbuff_dma[entry].map_as_page = true;
++              tx_q->tx_skbuff_dma[entry].len = len;
++              tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
+               /* Prepare the descriptor and set the own bit too */
+               priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
+-                                              priv->mode, 1, last_segment);
++                                              priv->mode, 1, last_segment,
++                                              skb->len);
+       }
+-      entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
++      /* Only the last descriptor gets to point to the skb. */
++      tx_q->tx_skbuff[entry] = skb;
+-      priv->cur_tx = entry;
++      /* We've used all descriptors we need for this skb, however,
++       * advance cur_tx so that it references a fresh descriptor.
++       * ndo_start_xmit will fill this descriptor the next time it's
++       * called and stmmac_tx_clean may clean up to this descriptor.
++       */
++      entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
++      tx_q->cur_tx = entry;
+       if (netif_msg_pktdata(priv)) {
+               void *tx_head;
+               netdev_dbg(priv->dev,
+                          "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
+-                         __func__, priv->cur_tx, priv->dirty_tx, first_entry,
++                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+                          entry, first, nfrags);
+               if (priv->extend_desc)
+-                      tx_head = (void *)priv->dma_etx;
++                      tx_head = (void *)tx_q->dma_etx;
+               else
+-                      tx_head = (void *)priv->dma_tx;
++                      tx_head = (void *)tx_q->dma_tx;
+               priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
+@@ -2273,10 +3082,10 @@ static netdev_tx_t stmmac_xmit(struct sk
+               print_pkt(skb->data, skb->len);
+       }
+-      if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
++      if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+               netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+                         __func__);
+-              netif_stop_queue(dev);
++              netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+       }
+       dev->stats.tx_bytes += skb->len;
+@@ -2311,14 +3120,14 @@ static netdev_tx_t stmmac_xmit(struct sk
+               if (dma_mapping_error(priv->device, des))
+                       goto dma_map_err;
+-              priv->tx_skbuff_dma[first_entry].buf = des;
++              tx_q->tx_skbuff_dma[first_entry].buf = des;
+               if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+                       first->des0 = cpu_to_le32(des);
+               else
+                       first->des2 = cpu_to_le32(des);
+-              priv->tx_skbuff_dma[first_entry].len = nopaged_len;
+-              priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
++              tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
++              tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
+               if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+                            priv->hwts_tx_en)) {
+@@ -2330,7 +3139,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+               /* Prepare the first descriptor setting the OWN bit too */
+               priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
+                                               csum_insertion, priv->mode, 1,
+-                                              last_segment);
++                                              last_segment, skb->len);
+               /* The own bit must be the latest setting done when prepare the
+                * descriptor and then barrier is needed to make sure that
+@@ -2339,13 +3148,13 @@ static netdev_tx_t stmmac_xmit(struct sk
+               dma_wmb();
+       }
+-      netdev_sent_queue(dev, skb->len);
++      netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+       if (priv->synopsys_id < DWMAC_CORE_4_00)
+               priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+       else
+-              priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+-                                             STMMAC_CHAN0);
++              priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
++                                             queue);
+       return NETDEV_TX_OK;
+@@ -2373,9 +3182,9 @@ static void stmmac_rx_vlan(struct net_de
+ }
+-static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
++static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
+ {
+-      if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
++      if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
+               return 0;
+       return 1;
+@@ -2384,30 +3193,33 @@ static inline int stmmac_rx_threshold_co
+ /**
+  * stmmac_rx_refill - refill used skb preallocated buffers
+  * @priv: driver private structure
++ * @queue: RX queue index
+  * Description : this is to reallocate the skb for the reception process
+  * that is based on zero-copy.
+  */
+-static inline void stmmac_rx_refill(struct stmmac_priv *priv)
++static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
+ {
++      struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++      int dirty = stmmac_rx_dirty(priv, queue);
++      unsigned int entry = rx_q->dirty_rx;
++
+       int bfsize = priv->dma_buf_sz;
+-      unsigned int entry = priv->dirty_rx;
+-      int dirty = stmmac_rx_dirty(priv);
+       while (dirty-- > 0) {
+               struct dma_desc *p;
+               if (priv->extend_desc)
+-                      p = (struct dma_desc *)(priv->dma_erx + entry);
++                      p = (struct dma_desc *)(rx_q->dma_erx + entry);
+               else
+-                      p = priv->dma_rx + entry;
++                      p = rx_q->dma_rx + entry;
+-              if (likely(priv->rx_skbuff[entry] == NULL)) {
++              if (likely(!rx_q->rx_skbuff[entry])) {
+                       struct sk_buff *skb;
+                       skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
+                       if (unlikely(!skb)) {
+                               /* so for a while no zero-copy! */
+-                              priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
++                              rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
+                               if (unlikely(net_ratelimit()))
+                                       dev_err(priv->device,
+                                               "fail to alloc skb entry %d\n",
+@@ -2415,28 +3227,28 @@ static inline void stmmac_rx_refill(stru
+                               break;
+                       }
+-                      priv->rx_skbuff[entry] = skb;
+-                      priv->rx_skbuff_dma[entry] =
++                      rx_q->rx_skbuff[entry] = skb;
++                      rx_q->rx_skbuff_dma[entry] =
+                           dma_map_single(priv->device, skb->data, bfsize,
+                                          DMA_FROM_DEVICE);
+                       if (dma_mapping_error(priv->device,
+-                                            priv->rx_skbuff_dma[entry])) {
++                                            rx_q->rx_skbuff_dma[entry])) {
+                               netdev_err(priv->dev, "Rx DMA map failed\n");
+                               dev_kfree_skb(skb);
+                               break;
+                       }
+                       if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+-                              p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
++                              p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
+                               p->des1 = 0;
+                       } else {
+-                              p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
++                              p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
+                       }
+                       if (priv->hw->mode->refill_desc3)
+-                              priv->hw->mode->refill_desc3(priv, p);
++                              priv->hw->mode->refill_desc3(rx_q, p);
+-                      if (priv->rx_zeroc_thresh > 0)
+-                              priv->rx_zeroc_thresh--;
++                      if (rx_q->rx_zeroc_thresh > 0)
++                              rx_q->rx_zeroc_thresh--;
+                       netif_dbg(priv, rx_status, priv->dev,
+                                 "refill entry #%d\n", entry);
+@@ -2452,31 +3264,33 @@ static inline void stmmac_rx_refill(stru
+               entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
+       }
+-      priv->dirty_rx = entry;
++      rx_q->dirty_rx = entry;
+ }
+ /**
+  * stmmac_rx - manage the receive process
+  * @priv: driver private structure
+- * @limit: napi bugget.
++ * @limit: napi bugget
++ * @queue: RX queue index.
+  * Description :  this the function called by the napi poll method.
+  * It gets all the frames inside the ring.
+  */
+-static int stmmac_rx(struct stmmac_priv *priv, int limit)
++static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ {
+-      unsigned int entry = priv->cur_rx;
++      struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++      unsigned int entry = rx_q->cur_rx;
++      int coe = priv->hw->rx_csum;
+       unsigned int next_entry;
+       unsigned int count = 0;
+-      int coe = priv->hw->rx_csum;
+       if (netif_msg_rx_status(priv)) {
+               void *rx_head;
+               netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+               if (priv->extend_desc)
+-                      rx_head = (void *)priv->dma_erx;
++                      rx_head = (void *)rx_q->dma_erx;
+               else
+-                      rx_head = (void *)priv->dma_rx;
++                      rx_head = (void *)rx_q->dma_rx;
+               priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
+       }
+@@ -2486,9 +3300,9 @@ static int stmmac_rx(struct stmmac_priv
+               struct dma_desc *np;
+               if (priv->extend_desc)
+-                      p = (struct dma_desc *)(priv->dma_erx + entry);
++                      p = (struct dma_desc *)(rx_q->dma_erx + entry);
+               else
+-                      p = priv->dma_rx + entry;
++                      p = rx_q->dma_rx + entry;
+               /* read the status of the incoming frame */
+               status = priv->hw->desc->rx_status(&priv->dev->stats,
+@@ -2499,20 +3313,20 @@ static int stmmac_rx(struct stmmac_priv
+               count++;
+-              priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
+-              next_entry = priv->cur_rx;
++              rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
++              next_entry = rx_q->cur_rx;
+               if (priv->extend_desc)
+-                      np = (struct dma_desc *)(priv->dma_erx + next_entry);
++                      np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+               else
+-                      np = priv->dma_rx + next_entry;
++                      np = rx_q->dma_rx + next_entry;
+               prefetch(np);
+               if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
+                       priv->hw->desc->rx_extended_status(&priv->dev->stats,
+                                                          &priv->xstats,
+-                                                         priv->dma_erx +
++                                                         rx_q->dma_erx +
+                                                          entry);
+               if (unlikely(status == discard_frame)) {
+                       priv->dev->stats.rx_errors++;
+@@ -2522,9 +3336,9 @@ static int stmmac_rx(struct stmmac_priv
+                                * them in stmmac_rx_refill() function so that
+                                * device can reuse it.
+                                */
+-                              priv->rx_skbuff[entry] = NULL;
++                              rx_q->rx_skbuff[entry] = NULL;
+                               dma_unmap_single(priv->device,
+-                                               priv->rx_skbuff_dma[entry],
++                                               rx_q->rx_skbuff_dma[entry],
+                                                priv->dma_buf_sz,
+                                                DMA_FROM_DEVICE);
+                       }
+@@ -2572,7 +3386,7 @@ static int stmmac_rx(struct stmmac_priv
+                        */
+                       if (unlikely(!priv->plat->has_gmac4 &&
+                                    ((frame_len < priv->rx_copybreak) ||
+-                                   stmmac_rx_threshold_count(priv)))) {
++                                   stmmac_rx_threshold_count(rx_q)))) {
+                               skb = netdev_alloc_skb_ip_align(priv->dev,
+                                                               frame_len);
+                               if (unlikely(!skb)) {
+@@ -2584,21 +3398,21 @@ static int stmmac_rx(struct stmmac_priv
+                               }
+                               dma_sync_single_for_cpu(priv->device,
+-                                                      priv->rx_skbuff_dma
++                                                      rx_q->rx_skbuff_dma
+                                                       [entry], frame_len,
+                                                       DMA_FROM_DEVICE);
+                               skb_copy_to_linear_data(skb,
+-                                                      priv->
++                                                      rx_q->
+                                                       rx_skbuff[entry]->data,
+                                                       frame_len);
+                               skb_put(skb, frame_len);
+                               dma_sync_single_for_device(priv->device,
+-                                                         priv->rx_skbuff_dma
++                                                         rx_q->rx_skbuff_dma
+                                                          [entry], frame_len,
+                                                          DMA_FROM_DEVICE);
+                       } else {
+-                              skb = priv->rx_skbuff[entry];
++                              skb = rx_q->rx_skbuff[entry];
+                               if (unlikely(!skb)) {
+                                       netdev_err(priv->dev,
+                                                  "%s: Inconsistent Rx chain\n",
+@@ -2607,12 +3421,12 @@ static int stmmac_rx(struct stmmac_priv
+                                       break;
+                               }
+                               prefetch(skb->data - NET_IP_ALIGN);
+-                              priv->rx_skbuff[entry] = NULL;
+-                              priv->rx_zeroc_thresh++;
++                              rx_q->rx_skbuff[entry] = NULL;
++                              rx_q->rx_zeroc_thresh++;
+                               skb_put(skb, frame_len);
+                               dma_unmap_single(priv->device,
+-                                               priv->rx_skbuff_dma[entry],
++                                               rx_q->rx_skbuff_dma[entry],
+                                                priv->dma_buf_sz,
+                                                DMA_FROM_DEVICE);
+                       }
+@@ -2634,7 +3448,7 @@ static int stmmac_rx(struct stmmac_priv
+                       else
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+-                      napi_gro_receive(&priv->napi, skb);
++                      napi_gro_receive(&rx_q->napi, skb);
+                       priv->dev->stats.rx_packets++;
+                       priv->dev->stats.rx_bytes += frame_len;
+@@ -2642,7 +3456,7 @@ static int stmmac_rx(struct stmmac_priv
+               entry = next_entry;
+       }
+-      stmmac_rx_refill(priv);
++      stmmac_rx_refill(priv, queue);
+       priv->xstats.rx_pkt_n += count;
+@@ -2659,16 +3473,24 @@ static int stmmac_rx(struct stmmac_priv
+  */
+ static int stmmac_poll(struct napi_struct *napi, int budget)
+ {
+-      struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
++      struct stmmac_rx_queue *rx_q =
++              container_of(napi, struct stmmac_rx_queue, napi);
++      struct stmmac_priv *priv = rx_q->priv_data;
++      u32 tx_count = priv->plat->tx_queues_to_use;
++      u32 chan = rx_q->queue_index;
+       int work_done = 0;
++      u32 queue;
+       priv->xstats.napi_poll++;
+-      stmmac_tx_clean(priv);
+-      work_done = stmmac_rx(priv, budget);
++      /* check all the queues */
++      for (queue = 0; queue < tx_count; queue++)
++              stmmac_tx_clean(priv, queue);
++
++      work_done = stmmac_rx(priv, budget, rx_q->queue_index);
+       if (work_done < budget) {
+               napi_complete_done(napi, work_done);
+-              stmmac_enable_dma_irq(priv);
++              stmmac_enable_dma_irq(priv, chan);
+       }
+       return work_done;
+ }
+@@ -2684,9 +3506,12 @@ static int stmmac_poll(struct napi_struc
+ static void stmmac_tx_timeout(struct net_device *dev)
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
++      u32 tx_count = priv->plat->tx_queues_to_use;
++      u32 chan;
+       /* Clear Tx resources and restart transmitting again */
+-      stmmac_tx_err(priv);
++      for (chan = 0; chan < tx_count; chan++)
++              stmmac_tx_err(priv, chan);
+ }
+ /**
+@@ -2809,6 +3634,12 @@ static irqreturn_t stmmac_interrupt(int
+ {
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct stmmac_priv *priv = netdev_priv(dev);
++      u32 rx_cnt = priv->plat->rx_queues_to_use;
++      u32 tx_cnt = priv->plat->tx_queues_to_use;
++      u32 queues_count;
++      u32 queue;
++
++      queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
+       if (priv->irq_wake)
+               pm_wakeup_event(priv->device, 0);
+@@ -2822,16 +3653,30 @@ static irqreturn_t stmmac_interrupt(int
+       if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
+               int status = priv->hw->mac->host_irq_status(priv->hw,
+                                                           &priv->xstats);
++
+               if (unlikely(status)) {
+                       /* For LPI we need to save the tx status */
+                       if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
+                               priv->tx_path_in_lpi_mode = true;
+                       if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
+                               priv->tx_path_in_lpi_mode = false;
+-                      if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
+-                              priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+-                                                      priv->rx_tail_addr,
+-                                                      STMMAC_CHAN0);
++              }
++
++              if (priv->synopsys_id >= DWMAC_CORE_4_00) {
++                      for (queue = 0; queue < queues_count; queue++) {
++                              struct stmmac_rx_queue *rx_q =
++                              &priv->rx_queue[queue];
++
++                              status |=
++                              priv->hw->mac->host_mtl_irq_status(priv->hw,
++                                                                 queue);
++
++                              if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
++                                  priv->hw->dma->set_rx_tail_ptr)
++                                      priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
++                                                              rx_q->rx_tail_addr,
++                                                              queue);
++                      }
+               }
+               /* PCS link status */
+@@ -2916,7 +3761,7 @@ static void sysfs_display_ring(void *hea
+                       ep++;
+               } else {
+                       seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+-                                 i, (unsigned int)virt_to_phys(ep),
++                                 i, (unsigned int)virt_to_phys(p),
+                                  le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+                                  le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+                       p++;
+@@ -2929,17 +3774,40 @@ static int stmmac_sysfs_ring_read(struct
+ {
+       struct net_device *dev = seq->private;
+       struct stmmac_priv *priv = netdev_priv(dev);
++      u32 rx_count = priv->plat->rx_queues_to_use;
++      u32 tx_count = priv->plat->tx_queues_to_use;
++      u32 queue;
+-      if (priv->extend_desc) {
+-              seq_printf(seq, "Extended RX descriptor ring:\n");
+-              sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
+-              seq_printf(seq, "Extended TX descriptor ring:\n");
+-              sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
+-      } else {
+-              seq_printf(seq, "RX descriptor ring:\n");
+-              sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
+-              seq_printf(seq, "TX descriptor ring:\n");
+-              sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
++      for (queue = 0; queue < rx_count; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++              seq_printf(seq, "RX Queue %d:\n", queue);
++
++              if (priv->extend_desc) {
++                      seq_printf(seq, "Extended descriptor ring:\n");
++                      sysfs_display_ring((void *)rx_q->dma_erx,
++                                         DMA_RX_SIZE, 1, seq);
++              } else {
++                      seq_printf(seq, "Descriptor ring:\n");
++                      sysfs_display_ring((void *)rx_q->dma_rx,
++                                         DMA_RX_SIZE, 0, seq);
++              }
++      }
++
++      for (queue = 0; queue < tx_count; queue++) {
++              struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
++
++              seq_printf(seq, "TX Queue %d:\n", queue);
++
++              if (priv->extend_desc) {
++                      seq_printf(seq, "Extended descriptor ring:\n");
++                      sysfs_display_ring((void *)tx_q->dma_etx,
++                                         DMA_TX_SIZE, 1, seq);
++              } else {
++                      seq_printf(seq, "Descriptor ring:\n");
++                      sysfs_display_ring((void *)tx_q->dma_tx,
++                                         DMA_TX_SIZE, 0, seq);
++              }
+       }
+       return 0;
+@@ -3222,11 +4090,14 @@ int stmmac_dvr_probe(struct device *devi
+                    struct plat_stmmacenet_data *plat_dat,
+                    struct stmmac_resources *res)
+ {
+-      int ret = 0;
+       struct net_device *ndev = NULL;
+       struct stmmac_priv *priv;
++      int ret = 0;
++      u32 queue;
+-      ndev = alloc_etherdev(sizeof(struct stmmac_priv));
++      ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
++                                MTL_MAX_TX_QUEUES,
++                                MTL_MAX_RX_QUEUES);
+       if (!ndev)
+               return -ENOMEM;
+@@ -3268,6 +4139,10 @@ int stmmac_dvr_probe(struct device *devi
+       if (ret)
+               goto error_hw_init;
++      /* Configure real RX and TX queues */
++      netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
++      netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
++
+       ndev->netdev_ops = &stmmac_netdev_ops;
+       ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+@@ -3300,7 +4175,12 @@ int stmmac_dvr_probe(struct device *devi
+                        "Enable RX Mitigation via HW Watchdog Timer\n");
+       }
+-      netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
++      for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++              netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
++                             (8 * priv->plat->rx_queues_to_use));
++      }
+       spin_lock_init(&priv->lock);
+@@ -3345,7 +4225,11 @@ error_netdev_register:
+           priv->hw->pcs != STMMAC_PCS_RTBI)
+               stmmac_mdio_unregister(ndev);
+ error_mdio_register:
+-      netif_napi_del(&priv->napi);
++      for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++              netif_napi_del(&rx_q->napi);
++      }
+ error_hw_init:
+       free_netdev(ndev);
+@@ -3366,10 +4250,9 @@ int stmmac_dvr_remove(struct device *dev
+       netdev_info(priv->dev, "%s: removing driver", __func__);
+-      priv->hw->dma->stop_rx(priv->ioaddr);
+-      priv->hw->dma->stop_tx(priv->ioaddr);
++      stmmac_stop_all_dma(priv);
+-      stmmac_set_mac(priv->ioaddr, false);
++      priv->hw->mac->set_mac(priv->ioaddr, false);
+       netif_carrier_off(ndev);
+       unregister_netdev(ndev);
+       if (priv->plat->stmmac_rst)
+@@ -3408,20 +4291,19 @@ int stmmac_suspend(struct device *dev)
+       spin_lock_irqsave(&priv->lock, flags);
+       netif_device_detach(ndev);
+-      netif_stop_queue(ndev);
++      stmmac_stop_all_queues(priv);
+-      napi_disable(&priv->napi);
++      stmmac_disable_all_queues(priv);
+       /* Stop TX/RX DMA */
+-      priv->hw->dma->stop_tx(priv->ioaddr);
+-      priv->hw->dma->stop_rx(priv->ioaddr);
++      stmmac_stop_all_dma(priv);
+       /* Enable Power down mode by programming the PMT regs */
+       if (device_may_wakeup(priv->device)) {
+               priv->hw->mac->pmt(priv->hw, priv->wolopts);
+               priv->irq_wake = 1;
+       } else {
+-              stmmac_set_mac(priv->ioaddr, false);
++              priv->hw->mac->set_mac(priv->ioaddr, false);
+               pinctrl_pm_select_sleep_state(priv->device);
+               /* Disable clock in case of PWM is off */
+               clk_disable(priv->plat->pclk);
+@@ -3437,6 +4319,31 @@ int stmmac_suspend(struct device *dev)
+ EXPORT_SYMBOL_GPL(stmmac_suspend);
+ /**
++ * stmmac_reset_queues_param - reset queue parameters
++ * @dev: device pointer
++ */
++static void stmmac_reset_queues_param(struct stmmac_priv *priv)
++{
++      u32 rx_cnt = priv->plat->rx_queues_to_use;
++      u32 tx_cnt = priv->plat->tx_queues_to_use;
++      u32 queue;
++
++      for (queue = 0; queue < rx_cnt; queue++) {
++              struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
++
++              rx_q->cur_rx = 0;
++              rx_q->dirty_rx = 0;
++      }
++
++      for (queue = 0; queue < tx_cnt; queue++) {
++              struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
++
++              tx_q->cur_tx = 0;
++              tx_q->dirty_tx = 0;
++      }
++}
++
++/**
+  * stmmac_resume - resume callback
+  * @dev: device pointer
+  * Description: when resume this function is invoked to setup the DMA and CORE
+@@ -3476,10 +4383,8 @@ int stmmac_resume(struct device *dev)
+       spin_lock_irqsave(&priv->lock, flags);
+-      priv->cur_rx = 0;
+-      priv->dirty_rx = 0;
+-      priv->dirty_tx = 0;
+-      priv->cur_tx = 0;
++      stmmac_reset_queues_param(priv);
++
+       /* reset private mss value to force mss context settings at
+        * next tso xmit (only used for gmac4).
+        */
+@@ -3491,9 +4396,9 @@ int stmmac_resume(struct device *dev)
+       stmmac_init_tx_coalesce(priv);
+       stmmac_set_rx_mode(ndev);
+-      napi_enable(&priv->napi);
++      stmmac_enable_all_queues(priv);
+-      netif_start_queue(ndev);
++      stmmac_start_all_queues(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+@@ -32,6 +32,7 @@
+  */
+ struct stmmac_pci_dmi_data {
+       const char *name;
++      const char *asset_tag;
+       unsigned int func;
+       int phy_addr;
+ };
+@@ -46,6 +47,7 @@ struct stmmac_pci_info {
+ static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
+ {
+       const char *name = dmi_get_system_info(DMI_BOARD_NAME);
++      const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG);
+       unsigned int func = PCI_FUNC(info->pdev->devfn);
+       struct stmmac_pci_dmi_data *dmi;
+@@ -57,18 +59,19 @@ static int stmmac_pci_find_phy_addr(stru
+               return 1;
+       for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
+-              if (!strcmp(dmi->name, name) && dmi->func == func)
++              if (!strcmp(dmi->name, name) && dmi->func == func) {
++                      /* If asset tag is provided, match on it as well. */
++                      if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag))
++                              continue;
+                       return dmi->phy_addr;
++              }
+       }
+       return -ENODEV;
+ }
+-static void stmmac_default_data(struct plat_stmmacenet_data *plat)
++static void common_default_data(struct plat_stmmacenet_data *plat)
+ {
+-      plat->bus_id = 1;
+-      plat->phy_addr = 0;
+-      plat->interface = PHY_INTERFACE_MODE_GMII;
+       plat->clk_csr = 2;      /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+       plat->has_gmac = 1;
+       plat->force_sf_dma_mode = 1;
+@@ -76,10 +79,6 @@ static void stmmac_default_data(struct p
+       plat->mdio_bus_data->phy_reset = NULL;
+       plat->mdio_bus_data->phy_mask = 0;
+-      plat->dma_cfg->pbl = 32;
+-      plat->dma_cfg->pblx8 = true;
+-      /* TODO: AXI */
+-
+       /* Set default value for multicast hash bins */
+       plat->multicast_filter_bins = HASH_TABLE_SIZE;
+@@ -88,6 +87,31 @@ static void stmmac_default_data(struct p
+       /* Set the maxmtu to a default of JUMBO_LEN */
+       plat->maxmtu = JUMBO_LEN;
++
++      /* Set default number of RX and TX queues to use */
++      plat->tx_queues_to_use = 1;
++      plat->rx_queues_to_use = 1;
++
++      /* Disable Priority config by default */
++      plat->tx_queues_cfg[0].use_prio = false;
++      plat->rx_queues_cfg[0].use_prio = false;
++
++      /* Disable RX queues routing by default */
++      plat->rx_queues_cfg[0].pkt_route = 0x0;
++}
++
++static void stmmac_default_data(struct plat_stmmacenet_data *plat)
++{
++      /* Set common default data first */
++      common_default_data(plat);
++
++      plat->bus_id = 1;
++      plat->phy_addr = 0;
++      plat->interface = PHY_INTERFACE_MODE_GMII;
++
++      plat->dma_cfg->pbl = 32;
++      plat->dma_cfg->pblx8 = true;
++      /* TODO: AXI */
+ }
+ static int quark_default_data(struct plat_stmmacenet_data *plat,
+@@ -96,6 +120,9 @@ static int quark_default_data(struct pla
+       struct pci_dev *pdev = info->pdev;
+       int ret;
++      /* Set common default data first */
++      common_default_data(plat);
++
+       /*
+        * Refuse to load the driver and register net device if MAC controller
+        * does not connect to any PHY interface.
+@@ -107,27 +134,12 @@ static int quark_default_data(struct pla
+       plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+       plat->phy_addr = ret;
+       plat->interface = PHY_INTERFACE_MODE_RMII;
+-      plat->clk_csr = 2;
+-      plat->has_gmac = 1;
+-      plat->force_sf_dma_mode = 1;
+-
+-      plat->mdio_bus_data->phy_reset = NULL;
+-      plat->mdio_bus_data->phy_mask = 0;
+       plat->dma_cfg->pbl = 16;
+       plat->dma_cfg->pblx8 = true;
+       plat->dma_cfg->fixed_burst = 1;
+       /* AXI (TODO) */
+-      /* Set default value for multicast hash bins */
+-      plat->multicast_filter_bins = HASH_TABLE_SIZE;
+-
+-      /* Set default value for unicast filter entries */
+-      plat->unicast_filter_entries = 1;
+-
+-      /* Set the maxmtu to a default of JUMBO_LEN */
+-      plat->maxmtu = JUMBO_LEN;
+-
+       return 0;
+ }
+@@ -142,6 +154,24 @@ static struct stmmac_pci_dmi_data quark_
+               .func = 6,
+               .phy_addr = 1,
+       },
++      {
++              .name = "SIMATIC IOT2000",
++              .asset_tag = "6ES7647-0AA00-0YA2",
++              .func = 6,
++              .phy_addr = 1,
++      },
++      {
++              .name = "SIMATIC IOT2000",
++              .asset_tag = "6ES7647-0AA00-1YA2",
++              .func = 6,
++              .phy_addr = 1,
++      },
++      {
++              .name = "SIMATIC IOT2000",
++              .asset_tag = "6ES7647-0AA00-1YA2",
++              .func = 7,
++              .phy_addr = 1,
++      },
+       {}
+ };
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_set
+       if (!np)
+               return NULL;
+-      axi = kzalloc(sizeof(*axi), GFP_KERNEL);
++      axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
+       if (!axi) {
+               of_node_put(np);
+               return ERR_PTR(-ENOMEM);
+@@ -132,6 +132,155 @@ static struct stmmac_axi *stmmac_axi_set
+ }
+ /**
++ * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
++ * @pdev: platform device
++ */
++static void stmmac_mtl_setup(struct platform_device *pdev,
++                           struct plat_stmmacenet_data *plat)
++{
++      struct device_node *q_node;
++      struct device_node *rx_node;
++      struct device_node *tx_node;
++      u8 queue = 0;
++
++      /* For backwards-compatibility with device trees that don't have any
++       * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
++       * to one RX and TX queues each.
++       */
++      plat->rx_queues_to_use = 1;
++      plat->tx_queues_to_use = 1;
++
++      rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
++      if (!rx_node)
++              return;
++
++      tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
++      if (!tx_node) {
++              of_node_put(rx_node);
++              return;
++      }
++
++      /* Processing RX queues common config */
++      if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
++                              &plat->rx_queues_to_use))
++              plat->rx_queues_to_use = 1;
++
++      if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
++              plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
++      else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
++              plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
++      else
++              plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
++
++      /* Processing individual RX queue config */
++      for_each_child_of_node(rx_node, q_node) {
++              if (queue >= plat->rx_queues_to_use)
++                      break;
++
++              if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
++                      plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
++              else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
++                      plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
++              else
++                      plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
++
++              if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
++                                      &plat->rx_queues_cfg[queue].chan))
++                      plat->rx_queues_cfg[queue].chan = queue;
++              /* TODO: Dynamic mapping to be included in the future */
++
++              if (of_property_read_u32(q_node, "snps,priority",
++                                      &plat->rx_queues_cfg[queue].prio)) {
++                      plat->rx_queues_cfg[queue].prio = 0;
++                      plat->rx_queues_cfg[queue].use_prio = false;
++              } else {
++                      plat->rx_queues_cfg[queue].use_prio = true;
++              }
++
++              /* RX queue specific packet type routing */
++              if (of_property_read_bool(q_node, "snps,route-avcp"))
++                      plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
++              else if (of_property_read_bool(q_node, "snps,route-ptp"))
++                      plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
++              else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
++                      plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
++              else if (of_property_read_bool(q_node, "snps,route-up"))
++                      plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
++              else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
++                      plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
++              else
++                      plat->rx_queues_cfg[queue].pkt_route = 0x0;
++
++              queue++;
++      }
++
++      /* Processing TX queues common config */
++      if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
++                              &plat->tx_queues_to_use))
++              plat->tx_queues_to_use = 1;
++
++      if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
++              plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
++      else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
++              plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
++      else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
++              plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
++      else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
++              plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
++      else
++              plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
++
++      queue = 0;
++
++      /* Processing individual TX queue config */
++      for_each_child_of_node(tx_node, q_node) {
++              if (queue >= plat->tx_queues_to_use)
++                      break;
++
++              if (of_property_read_u8(q_node, "snps,weight",
++                                      &plat->tx_queues_cfg[queue].weight))
++                      plat->tx_queues_cfg[queue].weight = 0x10 + queue;
++
++              if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
++                      plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
++              } else if (of_property_read_bool(q_node,
++                                               "snps,avb-algorithm")) {
++                      plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
++
++                      /* Credit Base Shaper parameters used by AVB */
++                      if (of_property_read_u32(q_node, "snps,send_slope",
++                              &plat->tx_queues_cfg[queue].send_slope))
++                              plat->tx_queues_cfg[queue].send_slope = 0x0;
++                      if (of_property_read_u32(q_node, "snps,idle_slope",
++                              &plat->tx_queues_cfg[queue].idle_slope))
++                              plat->tx_queues_cfg[queue].idle_slope = 0x0;
++                      if (of_property_read_u32(q_node, "snps,high_credit",
++                              &plat->tx_queues_cfg[queue].high_credit))
++                              plat->tx_queues_cfg[queue].high_credit = 0x0;
++                      if (of_property_read_u32(q_node, "snps,low_credit",
++                              &plat->tx_queues_cfg[queue].low_credit))
++                              plat->tx_queues_cfg[queue].low_credit = 0x0;
++              } else {
++                      plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
++              }
++
++              if (of_property_read_u32(q_node, "snps,priority",
++                                      &plat->tx_queues_cfg[queue].prio)) {
++                      plat->tx_queues_cfg[queue].prio = 0;
++                      plat->tx_queues_cfg[queue].use_prio = false;
++              } else {
++                      plat->tx_queues_cfg[queue].use_prio = true;
++              }
++
++              queue++;
++      }
++
++      of_node_put(rx_node);
++      of_node_put(tx_node);
++      of_node_put(q_node);
++}
++
++/**
+  * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
+  * @plat: driver data platform structure
+  * @np: device tree node
+@@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_d
+       plat->axi = stmmac_axi_setup(pdev);
++      stmmac_mtl_setup(pdev, plat);
++
+       /* clock setup */
+       plat->stmmac_clk = devm_clk_get(&pdev->dev,
+                                       STMMAC_RESOURCE_NAME);
+@@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_d
+       clk_prepare_enable(plat->pclk);
+       /* Fall-back to main clock in case of no PTP ref is passed */
+-      plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
++      plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
+       if (IS_ERR(plat->clk_ptp_ref)) {
+               plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
+               plat->clk_ptp_ref = NULL;
+               dev_warn(&pdev->dev, "PTP uses main clock\n");
+       } else {
+-              clk_prepare_enable(plat->clk_ptp_ref);
+               plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
+               dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+       }
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+@@ -59,7 +59,8 @@
+ /* Enable Snapshot for Messages Relevant to Master */
+ #define       PTP_TCR_TSMSTRENA       BIT(15)
+ /* Select PTP packets for Taking Snapshots */
+-#define       PTP_TCR_SNAPTYPSEL_1    GENMASK(17, 16)
++#define       PTP_TCR_SNAPTYPSEL_1    BIT(16)
++#define       PTP_GMAC4_TCR_SNAPTYPSEL_1      GENMASK(17, 16)
+ /* Enable MAC address for PTP Frame Filtering */
+ #define       PTP_TCR_TSENMACADDR     BIT(18)
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -28,6 +28,9 @@
+ #include <linux/platform_device.h>
++#define MTL_MAX_RX_QUEUES     8
++#define MTL_MAX_TX_QUEUES     8
++
+ #define STMMAC_RX_COE_NONE    0
+ #define STMMAC_RX_COE_TYPE1   1
+ #define STMMAC_RX_COE_TYPE2   2
+@@ -44,6 +47,18 @@
+ #define       STMMAC_CSR_150_250M     0x4     /* MDC = clk_scr_i/102 */
+ #define       STMMAC_CSR_250_300M     0x5     /* MDC = clk_scr_i/122 */
++/* MTL algorithms identifiers */
++#define MTL_TX_ALGORITHM_WRR  0x0
++#define MTL_TX_ALGORITHM_WFQ  0x1
++#define MTL_TX_ALGORITHM_DWRR 0x2
++#define MTL_TX_ALGORITHM_SP   0x3
++#define MTL_RX_ALGORITHM_SP   0x4
++#define MTL_RX_ALGORITHM_WSP  0x5
++
++/* RX/TX Queue Mode */
++#define MTL_QUEUE_AVB         0x0
++#define MTL_QUEUE_DCB         0x1
++
+ /* The MDC clock could be set higher than the IEEE 802.3
+  * specified frequency limit 0f 2.5 MHz, by programming a clock divider
+  * of value different than the above defined values. The resultant MDIO
+@@ -109,6 +124,26 @@ struct stmmac_axi {
+       bool axi_rb;
+ };
++struct stmmac_rxq_cfg {
++      u8 mode_to_use;
++      u8 chan;
++      u8 pkt_route;
++      bool use_prio;
++      u32 prio;
++};
++
++struct stmmac_txq_cfg {
++      u8 weight;
++      u8 mode_to_use;
++      /* Credit Base Shaper parameters */
++      u32 send_slope;
++      u32 idle_slope;
++      u32 high_credit;
++      u32 low_credit;
++      bool use_prio;
++      u32 prio;
++};
++
+ struct plat_stmmacenet_data {
+       int bus_id;
+       int phy_addr;
+@@ -133,6 +168,12 @@ struct plat_stmmacenet_data {
+       int unicast_filter_entries;
+       int tx_fifo_size;
+       int rx_fifo_size;
++      u8 rx_queues_to_use;
++      u8 tx_queues_to_use;
++      u8 rx_sched_algorithm;
++      u8 tx_sched_algorithm;
++      struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
++      struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
+       void (*fix_mac_speed)(void *priv, unsigned int speed);
+       int (*init)(struct platform_device *pdev, void *priv);
+       void (*exit)(struct platform_device *pdev, void *priv);
diff --git a/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch b/target/linux/sunxi/patches-4.9/0053-stmmac-form-4-13.patch
new file mode 100644 (file)
index 0000000..f829b79
--- /dev/null
@@ -0,0 +1,1924 @@
+--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+@@ -145,6 +145,17 @@ config DWMAC_SUNXI
+         This selects Allwinner SoC glue layer support for the
+         stmmac device driver. This driver is used for A20/A31
+         GMAC ethernet controller.
++
++config DWMAC_SUN8I
++      tristate "Allwinner sun8i GMAC support"
++      default ARCH_SUNXI
++      depends on OF && (ARCH_SUNXI || COMPILE_TEST)
++      ---help---
++        Support for Allwinner H3 A83T A64 EMAC ethernet controllers.
++
++        This selects Allwinner SoC glue layer support for the
++        stmmac device driver. This driver is used for H3/A83T/A64
++        EMAC ethernet controller.
+ endif
+ config STMMAC_PCI
+--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
+@@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA)  += dwmac-alt
+ obj-$(CONFIG_DWMAC_STI)               += dwmac-sti.o
+ obj-$(CONFIG_DWMAC_STM32)     += dwmac-stm32.o
+ obj-$(CONFIG_DWMAC_SUNXI)     += dwmac-sunxi.o
++obj-$(CONFIG_DWMAC_SUN8I)     += dwmac-sun8i.o
+ obj-$(CONFIG_DWMAC_DWC_QOS_ETH)       += dwmac-dwc-qos-eth.o
+ obj-$(CONFIG_DWMAC_GENERIC)   += dwmac-generic.o
+ stmmac-platform-objs:= stmmac_platform.o
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -549,9 +549,11 @@ extern const struct stmmac_hwtimestamp s
+ extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+ struct mac_link {
+-      int port;
+-      int duplex;
+-      int speed;
++      u32 speed_mask;
++      u32 speed10;
++      u32 speed100;
++      u32 speed1000;
++      u32 duplex;
+ };
+ struct mii_regs {
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+@@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(st
+       ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
+       ctrl |= val << reg_shift;
+-      if (dwmac->f2h_ptp_ref_clk) {
++      if (dwmac->f2h_ptp_ref_clk ||
++          phymode == PHY_INTERFACE_MODE_MII ||
++          phymode == PHY_INTERFACE_MODE_GMII ||
++          phymode == PHY_INTERFACE_MODE_SGMII) {
+               ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
+               regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+                           &module);
+--- /dev/null
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+@@ -0,0 +1,1007 @@
++/*
++ * dwmac-sun8i.c - Allwinner sun8i DWMAC specific glue layer
++ *
++ * Copyright (C) 2017 Corentin Labbe <clabbe.montjoie@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/clk.h>
++#include <linux/io.h>
++#include <linux/iopoll.h>
++#include <linux/mfd/syscon.h>
++#include <linux/module.h>
++#include <linux/of_device.h>
++#include <linux/of_mdio.h>
++#include <linux/of_net.h>
++#include <linux/phy.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/regmap.h>
++#include <linux/stmmac.h>
++
++#include "stmmac.h"
++#include "stmmac_platform.h"
++
++/* General notes on dwmac-sun8i:
++ * Locking: no locking is necessary in this file because all necessary locking
++ *            is done in the "stmmac files"
++ */
++
++/* struct emac_variant - Descrive dwmac-sun8i hardware variant
++ * @default_syscon_value:     The default value of the EMAC register in syscon
++ *                            This value is used for disabling properly EMAC
++ *                            and used as a good starting value in case of the
++ *                            boot process(uboot) leave some stuff.
++ * @internal_phy:             Does the MAC embed an internal PHY
++ * @support_mii:              Does the MAC handle MII
++ * @support_rmii:             Does the MAC handle RMII
++ * @support_rgmii:            Does the MAC handle RGMII
++ */
++struct emac_variant {
++      u32 default_syscon_value;
++      int internal_phy;
++      bool support_mii;
++      bool support_rmii;
++      bool support_rgmii;
++};
++
++/* struct sunxi_priv_data - hold all sunxi private data
++ * @tx_clk:   reference to MAC TX clock
++ * @ephy_clk: reference to the optional EPHY clock for the internal PHY
++ * @regulator:        reference to the optional regulator
++ * @rst_ephy: reference to the optional EPHY reset for the internal PHY
++ * @variant:  reference to the current board variant
++ * @regmap:   regmap for using the syscon
++ * @use_internal_phy: Does the current PHY choice imply using the internal PHY
++ */
++struct sunxi_priv_data {
++      struct clk *tx_clk;
++      struct clk *ephy_clk;
++      struct regulator *regulator;
++      struct reset_control *rst_ephy;
++      const struct emac_variant *variant;
++      struct regmap *regmap;
++      bool use_internal_phy;
++};
++
++static const struct emac_variant emac_variant_h3 = {
++      .default_syscon_value = 0x58000,
++      .internal_phy = PHY_INTERFACE_MODE_MII,
++      .support_mii = true,
++      .support_rmii = true,
++      .support_rgmii = true
++};
++
++static const struct emac_variant emac_variant_v3s = {
++      .default_syscon_value = 0x38000,
++      .internal_phy = PHY_INTERFACE_MODE_MII,
++      .support_mii = true
++};
++
++static const struct emac_variant emac_variant_a83t = {
++      .default_syscon_value = 0,
++      .internal_phy = 0,
++      .support_mii = true,
++      .support_rgmii = true
++};
++
++static const struct emac_variant emac_variant_a64 = {
++      .default_syscon_value = 0,
++      .internal_phy = 0,
++      .support_mii = true,
++      .support_rmii = true,
++      .support_rgmii = true
++};
++
++#define EMAC_BASIC_CTL0 0x00
++#define EMAC_BASIC_CTL1 0x04
++#define EMAC_INT_STA    0x08
++#define EMAC_INT_EN     0x0C
++#define EMAC_TX_CTL0    0x10
++#define EMAC_TX_CTL1    0x14
++#define EMAC_TX_FLOW_CTL        0x1C
++#define EMAC_TX_DESC_LIST 0x20
++#define EMAC_RX_CTL0    0x24
++#define EMAC_RX_CTL1    0x28
++#define EMAC_RX_DESC_LIST 0x34
++#define EMAC_RX_FRM_FLT 0x38
++#define EMAC_MDIO_CMD   0x48
++#define EMAC_MDIO_DATA  0x4C
++#define EMAC_MACADDR_HI(reg) (0x50 + (reg) * 8)
++#define EMAC_MACADDR_LO(reg) (0x54 + (reg) * 8)
++#define EMAC_TX_DMA_STA 0xB0
++#define EMAC_TX_CUR_DESC        0xB4
++#define EMAC_TX_CUR_BUF 0xB8
++#define EMAC_RX_DMA_STA 0xC0
++#define EMAC_RX_CUR_DESC        0xC4
++#define EMAC_RX_CUR_BUF 0xC8
++
++/* Use in EMAC_BASIC_CTL0 */
++#define EMAC_DUPLEX_FULL      BIT(0)
++#define EMAC_LOOPBACK         BIT(1)
++#define EMAC_SPEED_1000 0
++#define EMAC_SPEED_100 (0x03 << 2)
++#define EMAC_SPEED_10 (0x02 << 2)
++
++/* Use in EMAC_BASIC_CTL1 */
++#define EMAC_BURSTLEN_SHIFT           24
++
++/* Used in EMAC_RX_FRM_FLT */
++#define EMAC_FRM_FLT_RXALL              BIT(0)
++#define EMAC_FRM_FLT_CTL                BIT(13)
++#define EMAC_FRM_FLT_MULTICAST          BIT(16)
++
++/* Used in RX_CTL1*/
++#define EMAC_RX_MD              BIT(1)
++#define EMAC_RX_TH_MASK               GENMASK(4, 5)
++#define EMAC_RX_TH_32         0
++#define EMAC_RX_TH_64         (0x1 << 4)
++#define EMAC_RX_TH_96         (0x2 << 4)
++#define EMAC_RX_TH_128                (0x3 << 4)
++#define EMAC_RX_DMA_EN  BIT(30)
++#define EMAC_RX_DMA_START       BIT(31)
++
++/* Used in TX_CTL1*/
++#define EMAC_TX_MD              BIT(1)
++#define EMAC_TX_NEXT_FRM        BIT(2)
++#define EMAC_TX_TH_MASK               GENMASK(8, 10)
++#define EMAC_TX_TH_64         0
++#define EMAC_TX_TH_128                (0x1 << 8)
++#define EMAC_TX_TH_192                (0x2 << 8)
++#define EMAC_TX_TH_256                (0x3 << 8)
++#define EMAC_TX_DMA_EN  BIT(30)
++#define EMAC_TX_DMA_START       BIT(31)
++
++/* Used in RX_CTL0 */
++#define EMAC_RX_RECEIVER_EN             BIT(31)
++#define EMAC_RX_DO_CRC BIT(27)
++#define EMAC_RX_FLOW_CTL_EN             BIT(16)
++
++/* Used in TX_CTL0 */
++#define EMAC_TX_TRANSMITTER_EN  BIT(31)
++
++/* Used in EMAC_TX_FLOW_CTL */
++#define EMAC_TX_FLOW_CTL_EN             BIT(0)
++
++/* Used in EMAC_INT_STA */
++#define EMAC_TX_INT             BIT(0)
++#define EMAC_TX_DMA_STOP_INT    BIT(1)
++#define EMAC_TX_BUF_UA_INT      BIT(2)
++#define EMAC_TX_TIMEOUT_INT     BIT(3)
++#define EMAC_TX_UNDERFLOW_INT   BIT(4)
++#define EMAC_TX_EARLY_INT       BIT(5)
++#define EMAC_RX_INT             BIT(8)
++#define EMAC_RX_BUF_UA_INT      BIT(9)
++#define EMAC_RX_DMA_STOP_INT    BIT(10)
++#define EMAC_RX_TIMEOUT_INT     BIT(11)
++#define EMAC_RX_OVERFLOW_INT    BIT(12)
++#define EMAC_RX_EARLY_INT       BIT(13)
++#define EMAC_RGMII_STA_INT      BIT(16)
++
++#define MAC_ADDR_TYPE_DST BIT(31)
++
++/* H3 specific bits for EPHY */
++#define H3_EPHY_ADDR_SHIFT    20
++#define H3_EPHY_CLK_SEL               BIT(18) /* 1: 24MHz, 0: 25MHz */
++#define H3_EPHY_LED_POL               BIT(17) /* 1: active low, 0: active high */
++#define H3_EPHY_SHUTDOWN      BIT(16) /* 1: shutdown, 0: power up */
++#define H3_EPHY_SELECT                BIT(15) /* 1: internal PHY, 0: external PHY */
++
++/* H3/A64 specific bits */
++#define SYSCON_RMII_EN                BIT(13) /* 1: enable RMII (overrides EPIT) */
++
++/* Generic system control EMAC_CLK bits */
++#define SYSCON_ETXDC_MASK             GENMASK(2, 0)
++#define SYSCON_ETXDC_SHIFT            10
++#define SYSCON_ERXDC_MASK             GENMASK(4, 0)
++#define SYSCON_ERXDC_SHIFT            5
++/* EMAC PHY Interface Type */
++#define SYSCON_EPIT                   BIT(2) /* 1: RGMII, 0: MII */
++#define SYSCON_ETCS_MASK              GENMASK(1, 0)
++#define SYSCON_ETCS_MII               0x0
++#define SYSCON_ETCS_EXT_GMII  0x1
++#define SYSCON_ETCS_INT_GMII  0x2
++#define SYSCON_EMAC_REG               0x30
++
++/* sun8i_dwmac_dma_reset() - reset the EMAC
++ * Called from stmmac via stmmac_dma_ops->reset
++ */
++static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
++{
++      writel(0, ioaddr + EMAC_RX_CTL1);
++      writel(0, ioaddr + EMAC_TX_CTL1);
++      writel(0, ioaddr + EMAC_RX_FRM_FLT);
++      writel(0, ioaddr + EMAC_RX_DESC_LIST);
++      writel(0, ioaddr + EMAC_TX_DESC_LIST);
++      writel(0, ioaddr + EMAC_INT_EN);
++      writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
++      return 0;
++}
++
++/* sun8i_dwmac_dma_init() - initialize the EMAC
++ * Called from stmmac via stmmac_dma_ops->init
++ */
++static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
++                               struct stmmac_dma_cfg *dma_cfg,
++                               u32 dma_tx, u32 dma_rx, int atds)
++{
++      /* Write TX and RX descriptors address */
++      writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST);
++      writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST);
++
++      writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
++      writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
++}
++
++/* sun8i_dwmac_dump_regs() - Dump EMAC address space
++ * Called from stmmac_dma_ops->dump_regs
++ * Used for ethtool
++ */
++static void sun8i_dwmac_dump_regs(void __iomem *ioaddr, u32 *reg_space)
++{
++      int i;
++
++      for (i = 0; i < 0xC8; i += 4) {
++              if (i == 0x32 || i == 0x3C)
++                      continue;
++              reg_space[i / 4] = readl(ioaddr + i);
++      }
++}
++
++/* sun8i_dwmac_dump_mac_regs() - Dump EMAC address space
++ * Called from stmmac_ops->dump_regs
++ * Used for ethtool
++ */
++static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
++                                    u32 *reg_space)
++{
++      int i;
++      void __iomem *ioaddr = hw->pcsr;
++
++      for (i = 0; i < 0xC8; i += 4) {
++              if (i == 0x32 || i == 0x3C)
++                      continue;
++              reg_space[i / 4] = readl(ioaddr + i);
++      }
++}
++
++static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
++{
++      writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
++}
++
++static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
++{
++      writel(0, ioaddr + EMAC_INT_EN);
++}
++
++static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
++{
++      u32 v;
++
++      v = readl(ioaddr + EMAC_TX_CTL1);
++      v |= EMAC_TX_DMA_START;
++      v |= EMAC_TX_DMA_EN;
++      writel(v, ioaddr + EMAC_TX_CTL1);
++}
++
++static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
++{
++      u32 v;
++
++      v = readl(ioaddr + EMAC_TX_CTL1);
++      v |= EMAC_TX_DMA_START;
++      v |= EMAC_TX_DMA_EN;
++      writel(v, ioaddr + EMAC_TX_CTL1);
++}
++
++static void sun8i_dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
++{
++      u32 v;
++
++      v = readl(ioaddr + EMAC_TX_CTL1);
++      v &= ~EMAC_TX_DMA_EN;
++      writel(v, ioaddr + EMAC_TX_CTL1);
++}
++
++static void sun8i_dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
++{
++      u32 v;
++
++      v = readl(ioaddr + EMAC_RX_CTL1);
++      v |= EMAC_RX_DMA_START;
++      v |= EMAC_RX_DMA_EN;
++      writel(v, ioaddr + EMAC_RX_CTL1);
++}
++
++static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
++{
++      u32 v;
++
++      v = readl(ioaddr + EMAC_RX_CTL1);
++      v &= ~EMAC_RX_DMA_EN;
++      writel(v, ioaddr + EMAC_RX_CTL1);
++}
++
++static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
++                                   struct stmmac_extra_stats *x, u32 chan)
++{
++      u32 v;
++      int ret = 0;
++
++      v = readl(ioaddr + EMAC_INT_STA);
++
++      if (v & EMAC_TX_INT) {
++              ret |= handle_tx;
++              x->tx_normal_irq_n++;
++      }
++
++      if (v & EMAC_TX_DMA_STOP_INT)
++              x->tx_process_stopped_irq++;
++
++      if (v & EMAC_TX_BUF_UA_INT)
++              x->tx_process_stopped_irq++;
++
++      if (v & EMAC_TX_TIMEOUT_INT)
++              ret |= tx_hard_error;
++
++      if (v & EMAC_TX_UNDERFLOW_INT) {
++              ret |= tx_hard_error;
++              x->tx_undeflow_irq++;
++      }
++
++      if (v & EMAC_TX_EARLY_INT)
++              x->tx_early_irq++;
++
++      if (v & EMAC_RX_INT) {
++              ret |= handle_rx;
++              x->rx_normal_irq_n++;
++      }
++
++      if (v & EMAC_RX_BUF_UA_INT)
++              x->rx_buf_unav_irq++;
++
++      if (v & EMAC_RX_DMA_STOP_INT)
++              x->rx_process_stopped_irq++;
++
++      if (v & EMAC_RX_TIMEOUT_INT)
++              ret |= tx_hard_error;
++
++      if (v & EMAC_RX_OVERFLOW_INT) {
++              ret |= tx_hard_error;
++              x->rx_overflow_irq++;
++      }
++
++      if (v & EMAC_RX_EARLY_INT)
++              x->rx_early_irq++;
++
++      if (v & EMAC_RGMII_STA_INT)
++              x->irq_rgmii_n++;
++
++      writel(v, ioaddr + EMAC_INT_STA);
++
++      return ret;
++}
++
++static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
++                                         int rxmode, int rxfifosz)
++{
++      u32 v;
++
++      v = readl(ioaddr + EMAC_TX_CTL1);
++      if (txmode == SF_DMA_MODE) {
++              v |= EMAC_TX_MD;
++              /* Undocumented bit (called TX_NEXT_FRM in BSP), the original
++               * comment is
++               * "Operating on second frame increase the performance
++               * especially when transmit store-and-forward is used."
++               */
++              v |= EMAC_TX_NEXT_FRM;
++      } else {
++              v &= ~EMAC_TX_MD;
++              v &= ~EMAC_TX_TH_MASK;
++              if (txmode < 64)
++                      v |= EMAC_TX_TH_64;
++              else if (txmode < 128)
++                      v |= EMAC_TX_TH_128;
++              else if (txmode < 192)
++                      v |= EMAC_TX_TH_192;
++              else if (txmode < 256)
++                      v |= EMAC_TX_TH_256;
++      }
++      writel(v, ioaddr + EMAC_TX_CTL1);
++
++      v = readl(ioaddr + EMAC_RX_CTL1);
++      if (rxmode == SF_DMA_MODE) {
++              v |= EMAC_RX_MD;
++      } else {
++              v &= ~EMAC_RX_MD;
++              v &= ~EMAC_RX_TH_MASK;
++              if (rxmode < 32)
++                      v |= EMAC_RX_TH_32;
++              else if (rxmode < 64)
++                      v |= EMAC_RX_TH_64;
++              else if (rxmode < 96)
++                      v |= EMAC_RX_TH_96;
++              else if (rxmode < 128)
++                      v |= EMAC_RX_TH_128;
++      }
++      writel(v, ioaddr + EMAC_RX_CTL1);
++}
++
++static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
++      .reset = sun8i_dwmac_dma_reset,
++      .init = sun8i_dwmac_dma_init,
++      .dump_regs = sun8i_dwmac_dump_regs,
++      .dma_mode = sun8i_dwmac_dma_operation_mode,
++      .enable_dma_transmission = sun8i_dwmac_enable_dma_transmission,
++      .enable_dma_irq = sun8i_dwmac_enable_dma_irq,
++      .disable_dma_irq = sun8i_dwmac_disable_dma_irq,
++      .start_tx = sun8i_dwmac_dma_start_tx,
++      .stop_tx = sun8i_dwmac_dma_stop_tx,
++      .start_rx = sun8i_dwmac_dma_start_rx,
++      .stop_rx = sun8i_dwmac_dma_stop_rx,
++      .dma_interrupt = sun8i_dwmac_dma_interrupt,
++};
++
++static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
++{
++      struct sunxi_priv_data *gmac = priv;
++      int ret;
++
++      if (gmac->regulator) {
++              ret = regulator_enable(gmac->regulator);
++              if (ret) {
++                      dev_err(&pdev->dev, "Fail to enable regulator\n");
++                      return ret;
++              }
++      }
++
++      ret = clk_prepare_enable(gmac->tx_clk);
++      if (ret) {
++              if (gmac->regulator)
++                      regulator_disable(gmac->regulator);
++              dev_err(&pdev->dev, "Could not enable AHB clock\n");
++              return ret;
++      }
++
++      return 0;
++}
++
++static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 v;
++
++      v = (8 << EMAC_BURSTLEN_SHIFT); /* burst len */
++      writel(v, ioaddr + EMAC_BASIC_CTL1);
++}
++
++static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable)
++{
++      u32 t, r;
++
++      t = readl(ioaddr + EMAC_TX_CTL0);
++      r = readl(ioaddr + EMAC_RX_CTL0);
++      if (enable) {
++              t |= EMAC_TX_TRANSMITTER_EN;
++              r |= EMAC_RX_RECEIVER_EN;
++      } else {
++              t &= ~EMAC_TX_TRANSMITTER_EN;
++              r &= ~EMAC_RX_RECEIVER_EN;
++      }
++      writel(t, ioaddr + EMAC_TX_CTL0);
++      writel(r, ioaddr + EMAC_RX_CTL0);
++}
++
++/* Set MAC address at slot reg_n
++ * All slot > 0 need to be enabled with MAC_ADDR_TYPE_DST
++ * If addr is NULL, clear the slot
++ */
++static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw,
++                                    unsigned char *addr,
++                                    unsigned int reg_n)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 v;
++
++      if (!addr) {
++              writel(0, ioaddr + EMAC_MACADDR_HI(reg_n));
++              return;
++      }
++
++      stmmac_set_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n),
++                          EMAC_MACADDR_LO(reg_n));
++      if (reg_n > 0) {
++              v = readl(ioaddr + EMAC_MACADDR_HI(reg_n));
++              v |= MAC_ADDR_TYPE_DST;
++              writel(v, ioaddr + EMAC_MACADDR_HI(reg_n));
++      }
++}
++
++static void sun8i_dwmac_get_umac_addr(struct mac_device_info *hw,
++                                    unsigned char *addr,
++                                    unsigned int reg_n)
++{
++      void __iomem *ioaddr = hw->pcsr;
++
++      stmmac_get_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n),
++                          EMAC_MACADDR_LO(reg_n));
++}
++
++/* caution this function must return non 0 to work */
++static int sun8i_dwmac_rx_ipc_enable(struct mac_device_info *hw)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 v;
++
++      v = readl(ioaddr + EMAC_RX_CTL0);
++      v |= EMAC_RX_DO_CRC;
++      writel(v, ioaddr + EMAC_RX_CTL0);
++
++      return 1;
++}
++
++static void sun8i_dwmac_set_filter(struct mac_device_info *hw,
++                                 struct net_device *dev)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 v;
++      int i = 1;
++      struct netdev_hw_addr *ha;
++      int macaddrs = netdev_uc_count(dev) + netdev_mc_count(dev) + 1;
++
++      v = EMAC_FRM_FLT_CTL;
++
++      if (dev->flags & IFF_PROMISC) {
++              v = EMAC_FRM_FLT_RXALL;
++      } else if (dev->flags & IFF_ALLMULTI) {
++              v |= EMAC_FRM_FLT_MULTICAST;
++      } else if (macaddrs <= hw->unicast_filter_entries) {
++              if (!netdev_mc_empty(dev)) {
++                      netdev_for_each_mc_addr(ha, dev) {
++                              sun8i_dwmac_set_umac_addr(hw, ha->addr, i);
++                              i++;
++                      }
++              }
++              if (!netdev_uc_empty(dev)) {
++                      netdev_for_each_uc_addr(ha, dev) {
++                              sun8i_dwmac_set_umac_addr(hw, ha->addr, i);
++                              i++;
++                      }
++              }
++      } else {
++              netdev_info(dev, "Too many address, switching to promiscuous\n");
++              v = EMAC_FRM_FLT_RXALL;
++      }
++
++      /* Disable unused address filter slots */
++      while (i < hw->unicast_filter_entries)
++              sun8i_dwmac_set_umac_addr(hw, NULL, i++);
++
++      writel(v, ioaddr + EMAC_RX_FRM_FLT);
++}
++
++static void sun8i_dwmac_flow_ctrl(struct mac_device_info *hw,
++                                unsigned int duplex, unsigned int fc,
++                                unsigned int pause_time, u32 tx_cnt)
++{
++      void __iomem *ioaddr = hw->pcsr;
++      u32 v;
++
++      v = readl(ioaddr + EMAC_RX_CTL0);
++      if (fc == FLOW_AUTO)
++              v |= EMAC_RX_FLOW_CTL_EN;
++      else
++              v &= ~EMAC_RX_FLOW_CTL_EN;
++      writel(v, ioaddr + EMAC_RX_CTL0);
++
++      v = readl(ioaddr + EMAC_TX_FLOW_CTL);
++      if (fc == FLOW_AUTO)
++              v |= EMAC_TX_FLOW_CTL_EN;
++      else
++              v &= ~EMAC_TX_FLOW_CTL_EN;
++      writel(v, ioaddr + EMAC_TX_FLOW_CTL);
++}
++
++static int sun8i_dwmac_reset(struct stmmac_priv *priv)
++{
++      u32 v;
++      int err;
++
++      v = readl(priv->ioaddr + EMAC_BASIC_CTL1);
++      writel(v | 0x01, priv->ioaddr + EMAC_BASIC_CTL1);
++
++      /* The timeout was previoulsy set to 10ms, but some board (OrangePI0)
++       * need more if no cable plugged. 100ms seems OK
++       */
++      err = readl_poll_timeout(priv->ioaddr + EMAC_BASIC_CTL1, v,
++                               !(v & 0x01), 100, 100000);
++
++      if (err) {
++              dev_err(priv->device, "EMAC reset timeout\n");
++              return -EFAULT;
++      }
++      return 0;
++}
++
++static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
++{
++      struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
++      struct device_node *node = priv->device->of_node;
++      int ret;
++      u32 reg, val;
++
++      regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val);
++      reg = gmac->variant->default_syscon_value;
++      if (reg != val)
++              dev_warn(priv->device,
++                       "Current syscon value is not the default %x (expect %x)\n",
++                       val, reg);
++
++      if (gmac->variant->internal_phy) {
++              if (!gmac->use_internal_phy) {
++                      /* switch to external PHY interface */
++                      reg &= ~H3_EPHY_SELECT;
++              } else {
++                      reg |= H3_EPHY_SELECT;
++                      reg &= ~H3_EPHY_SHUTDOWN;
++                      dev_dbg(priv->device, "Select internal_phy %x\n", reg);
++
++                      if (of_property_read_bool(priv->plat->phy_node,
++                                                "allwinner,leds-active-low"))
++                              reg |= H3_EPHY_LED_POL;
++                      else
++                              reg &= ~H3_EPHY_LED_POL;
++
++                      /* Force EPHY xtal frequency to 24MHz. */
++                      reg |= H3_EPHY_CLK_SEL;
++
++                      ret = of_mdio_parse_addr(priv->device,
++                                               priv->plat->phy_node);
++                      if (ret < 0) {
++                              dev_err(priv->device, "Could not parse MDIO addr\n");
++                              return ret;
++                      }
++                      /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
++                       * address. No need to mask it again.
++                       */
++                      reg |= ret << H3_EPHY_ADDR_SHIFT;
++              }
++      }
++
++      if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
++              if (val % 100) {
++                      dev_err(priv->device, "tx-delay must be a multiple of 100\n");
++                      return -EINVAL;
++              }
++              val /= 100;
++              dev_dbg(priv->device, "set tx-delay to %x\n", val);
++              if (val <= SYSCON_ETXDC_MASK) {
++                      reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT);
++                      reg |= (val << SYSCON_ETXDC_SHIFT);
++              } else {
++                      dev_err(priv->device, "Invalid TX clock delay: %d\n",
++                              val);
++                      return -EINVAL;
++              }
++      }
++
++      if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
++              if (val % 100) {
++                      dev_err(priv->device, "rx-delay must be a multiple of 100\n");
++                      return -EINVAL;
++              }
++              val /= 100;
++              dev_dbg(priv->device, "set rx-delay to %x\n", val);
++              if (val <= SYSCON_ERXDC_MASK) {
++                      reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT);
++                      reg |= (val << SYSCON_ERXDC_SHIFT);
++              } else {
++                      dev_err(priv->device, "Invalid RX clock delay: %d\n",
++                              val);
++                      return -EINVAL;
++              }
++      }
++
++      /* Clear interface mode bits */
++      reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT);
++      if (gmac->variant->support_rmii)
++              reg &= ~SYSCON_RMII_EN;
++
++      switch (priv->plat->interface) {
++      case PHY_INTERFACE_MODE_MII:
++              /* default */
++              break;
++      case PHY_INTERFACE_MODE_RGMII:
++              reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
++              break;
++      case PHY_INTERFACE_MODE_RMII:
++              reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
++              break;
++      default:
++              dev_err(priv->device, "Unsupported interface mode: %s",
++                      phy_modes(priv->plat->interface));
++              return -EINVAL;
++      }
++
++      regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
++
++      return 0;
++}
++
++static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
++{
++      u32 reg = gmac->variant->default_syscon_value;
++
++      regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
++}
++
++static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
++{
++      struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
++      int ret;
++
++      if (!gmac->use_internal_phy)
++              return 0;
++
++      ret = clk_prepare_enable(gmac->ephy_clk);
++      if (ret) {
++              dev_err(priv->device, "Cannot enable ephy\n");
++              return ret;
++      }
++
++      /* Make sure the EPHY is properly reseted, as U-Boot may leave
++       * it at deasserted state, and thus it may fail to reset EMAC.
++       */
++      reset_control_assert(gmac->rst_ephy);
++
++      ret = reset_control_deassert(gmac->rst_ephy);
++      if (ret) {
++              dev_err(priv->device, "Cannot deassert ephy\n");
++              clk_disable_unprepare(gmac->ephy_clk);
++              return ret;
++      }
++
++      return 0;
++}
++
++static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
++{
++      if (!gmac->use_internal_phy)
++              return 0;
++
++      clk_disable_unprepare(gmac->ephy_clk);
++      reset_control_assert(gmac->rst_ephy);
++      return 0;
++}
++
++/* sun8i_power_phy() - Activate the PHY:
++ * In case of error, no need to call sun8i_unpower_phy(),
++ * it will be called anyway by sun8i_dwmac_exit()
++ */
++static int sun8i_power_phy(struct stmmac_priv *priv)
++{
++      int ret;
++
++      ret = sun8i_dwmac_power_internal_phy(priv);
++      if (ret)
++              return ret;
++
++      ret = sun8i_dwmac_set_syscon(priv);
++      if (ret)
++              return ret;
++
++      /* After changing syscon value, the MAC need reset or it will use
++       * the last value (and so the last PHY set.
++       */
++      ret = sun8i_dwmac_reset(priv);
++      if (ret)
++              return ret;
++      return 0;
++}
++
++static void sun8i_unpower_phy(struct sunxi_priv_data *gmac)
++{
++      sun8i_dwmac_unset_syscon(gmac);
++      sun8i_dwmac_unpower_internal_phy(gmac);
++}
++
++static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
++{
++      struct sunxi_priv_data *gmac = priv;
++
++      sun8i_unpower_phy(gmac);
++
++      clk_disable_unprepare(gmac->tx_clk);
++
++      if (gmac->regulator)
++              regulator_disable(gmac->regulator);
++}
++
++static const struct stmmac_ops sun8i_dwmac_ops = {
++      .core_init = sun8i_dwmac_core_init,
++      .set_mac = sun8i_dwmac_set_mac,
++      .dump_regs = sun8i_dwmac_dump_mac_regs,
++      .rx_ipc = sun8i_dwmac_rx_ipc_enable,
++      .set_filter = sun8i_dwmac_set_filter,
++      .flow_ctrl = sun8i_dwmac_flow_ctrl,
++      .set_umac_addr = sun8i_dwmac_set_umac_addr,
++      .get_umac_addr = sun8i_dwmac_get_umac_addr,
++};
++
++static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
++{
++      struct mac_device_info *mac;
++      struct stmmac_priv *priv = ppriv;
++      int ret;
++
++      mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
++      if (!mac)
++              return NULL;
++
++      ret = sun8i_power_phy(priv);
++      if (ret)
++              return NULL;
++
++      mac->pcsr = priv->ioaddr;
++      mac->mac = &sun8i_dwmac_ops;
++      mac->dma = &sun8i_dwmac_dma_ops;
++
++      /* The loopback bit seems to be re-set when link change
++       * Simply mask it each time
++       * Speed 10/100/1000 are set in BIT(2)/BIT(3)
++       */
++      mac->link.speed_mask = GENMASK(3, 2) | EMAC_LOOPBACK;
++      mac->link.speed10 = EMAC_SPEED_10;
++      mac->link.speed100 = EMAC_SPEED_100;
++      mac->link.speed1000 = EMAC_SPEED_1000;
++      mac->link.duplex = EMAC_DUPLEX_FULL;
++      mac->mii.addr = EMAC_MDIO_CMD;
++      mac->mii.data = EMAC_MDIO_DATA;
++      mac->mii.reg_shift = 4;
++      mac->mii.reg_mask = GENMASK(8, 4);
++      mac->mii.addr_shift = 12;
++      mac->mii.addr_mask = GENMASK(16, 12);
++      mac->mii.clk_csr_shift = 20;
++      mac->mii.clk_csr_mask = GENMASK(22, 20);
++      mac->unicast_filter_entries = 8;
++
++      /* Synopsys Id is not available */
++      priv->synopsys_id = 0;
++
++      return mac;
++}
++
++static int sun8i_dwmac_probe(struct platform_device *pdev)
++{
++      struct plat_stmmacenet_data *plat_dat;
++      struct stmmac_resources stmmac_res;
++      struct sunxi_priv_data *gmac;
++      struct device *dev = &pdev->dev;
++      int ret;
++
++      ret = stmmac_get_platform_resources(pdev, &stmmac_res);
++      if (ret)
++              return ret;
++
++      plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
++      if (IS_ERR(plat_dat))
++              return PTR_ERR(plat_dat);
++
++      gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
++      if (!gmac)
++              return -ENOMEM;
++
++      gmac->variant = of_device_get_match_data(&pdev->dev);
++      if (!gmac->variant) {
++              dev_err(&pdev->dev, "Missing dwmac-sun8i variant\n");
++              return -EINVAL;
++      }
++
++      gmac->tx_clk = devm_clk_get(dev, "stmmaceth");
++      if (IS_ERR(gmac->tx_clk)) {
++              dev_err(dev, "Could not get TX clock\n");
++              return PTR_ERR(gmac->tx_clk);
++      }
++
++      /* Optional regulator for PHY */
++      gmac->regulator = devm_regulator_get_optional(dev, "phy");
++      if (IS_ERR(gmac->regulator)) {
++              if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
++                      return -EPROBE_DEFER;
++              dev_info(dev, "No regulator found\n");
++              gmac->regulator = NULL;
++      }
++
++      gmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
++                                                     "syscon");
++      if (IS_ERR(gmac->regmap)) {
++              ret = PTR_ERR(gmac->regmap);
++              dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret);
++              return ret;
++      }
++
++      plat_dat->interface = of_get_phy_mode(dev->of_node);
++      if (plat_dat->interface == gmac->variant->internal_phy) {
++              dev_info(&pdev->dev, "Will use internal PHY\n");
++              gmac->use_internal_phy = true;
++              gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0);
++              if (IS_ERR(gmac->ephy_clk)) {
++                      ret = PTR_ERR(gmac->ephy_clk);
++                      dev_err(&pdev->dev, "Cannot get EPHY clock: %d\n", ret);
++                      return -EINVAL;
++              }
++
++              gmac->rst_ephy = of_reset_control_get(plat_dat->phy_node, NULL);
++              if (IS_ERR(gmac->rst_ephy)) {
++                      ret = PTR_ERR(gmac->rst_ephy);
++                      if (ret == -EPROBE_DEFER)
++                              return ret;
++                      dev_err(&pdev->dev, "No EPHY reset control found %d\n",
++                              ret);
++                      return -EINVAL;
++              }
++      } else {
++              dev_info(&pdev->dev, "Will use external PHY\n");
++              gmac->use_internal_phy = false;
++      }
++
++      /* platform data specifying hardware features and callbacks.
++       * hardware features were copied from Allwinner drivers.
++       */
++      plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
++      plat_dat->tx_coe = 1;
++      plat_dat->has_sun8i = true;
++      plat_dat->bsp_priv = gmac;
++      plat_dat->init = sun8i_dwmac_init;
++      plat_dat->exit = sun8i_dwmac_exit;
++      plat_dat->setup = sun8i_dwmac_setup;
++
++      ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
++      if (ret)
++              return ret;
++
++      ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
++      if (ret)
++              sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
++
++      return ret;
++}
++
++static const struct of_device_id sun8i_dwmac_match[] = {
++      { .compatible = "allwinner,sun8i-h3-emac",
++              .data = &emac_variant_h3 },
++      { .compatible = "allwinner,sun8i-v3s-emac",
++              .data = &emac_variant_v3s },
++      { .compatible = "allwinner,sun8i-a83t-emac",
++              .data = &emac_variant_a83t },
++      { .compatible = "allwinner,sun50i-a64-emac",
++              .data = &emac_variant_a64 },
++      { }
++};
++MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
++
++static struct platform_driver sun8i_dwmac_driver = {
++      .probe  = sun8i_dwmac_probe,
++      .remove = stmmac_pltfr_remove,
++      .driver = {
++              .name           = "dwmac-sun8i",
++              .pm             = &stmmac_pltfr_pm_ops,
++              .of_match_table = sun8i_dwmac_match,
++      },
++};
++module_platform_driver(sun8i_dwmac_driver);
++
++MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
++MODULE_DESCRIPTION("Allwinner sun8i DWMAC specific glue layer");
++MODULE_LICENSE("GPL");
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -45,15 +45,17 @@ static void dwmac1000_core_init(struct m
+       if (hw->ps) {
+               value |= GMAC_CONTROL_TE;
+-              if (hw->ps == SPEED_1000) {
+-                      value &= ~GMAC_CONTROL_PS;
+-              } else {
+-                      value |= GMAC_CONTROL_PS;
+-
+-                      if (hw->ps == SPEED_10)
+-                              value &= ~GMAC_CONTROL_FES;
+-                      else
+-                              value |= GMAC_CONTROL_FES;
++              value &= ~hw->link.speed_mask;
++              switch (hw->ps) {
++              case SPEED_1000:
++                      value |= hw->link.speed1000;
++                      break;
++              case SPEED_100:
++                      value |= hw->link.speed100;
++                      break;
++              case SPEED_10:
++                      value |= hw->link.speed10;
++                      break;
+               }
+       }
+@@ -531,9 +533,11 @@ struct mac_device_info *dwmac1000_setup(
+       mac->mac = &dwmac1000_ops;
+       mac->dma = &dwmac1000_dma_ops;
+-      mac->link.port = GMAC_CONTROL_PS;
+       mac->link.duplex = GMAC_CONTROL_DM;
+-      mac->link.speed = GMAC_CONTROL_FES;
++      mac->link.speed10 = GMAC_CONTROL_PS;
++      mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
++      mac->link.speed1000 = 0;
++      mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+       mac->mii.addr = GMAC_MII_ADDR;
+       mac->mii.data = GMAC_MII_DATA;
+       mac->mii.addr_shift = 11;
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+@@ -205,8 +205,8 @@ static void dwmac1000_dump_dma_regs(void
+ {
+       int i;
+-      for (i = 0; i < 22; i++)
+-              if ((i < 9) || (i > 17))
++      for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
++              if ((i < 12) || (i > 17))
+                       reg_space[DMA_BUS_MODE / 4 + i] =
+                               readl(ioaddr + DMA_BUS_MODE + i * 4);
+ }
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+@@ -175,9 +175,11 @@ struct mac_device_info *dwmac100_setup(v
+       mac->mac = &dwmac100_ops;
+       mac->dma = &dwmac100_dma_ops;
+-      mac->link.port = MAC_CONTROL_PS;
+       mac->link.duplex = MAC_CONTROL_F;
+-      mac->link.speed = 0;
++      mac->link.speed10 = 0;
++      mac->link.speed100 = 0;
++      mac->link.speed1000 = 0;
++      mac->link.speed_mask = MAC_CONTROL_PS;
+       mac->mii.addr = MAC_MII_ADDR;
+       mac->mii.data = MAC_MII_DATA;
+       mac->mii.addr_shift = 11;
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+@@ -70,7 +70,7 @@ static void dwmac100_dump_dma_regs(void
+ {
+       int i;
+-      for (i = 0; i < 9; i++)
++      for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++)
+               reg_space[DMA_BUS_MODE / 4 + i] =
+                       readl(ioaddr + DMA_BUS_MODE + i * 4);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -35,15 +35,17 @@ static void dwmac4_core_init(struct mac_
+       if (hw->ps) {
+               value |= GMAC_CONFIG_TE;
+-              if (hw->ps == SPEED_1000) {
+-                      value &= ~GMAC_CONFIG_PS;
+-              } else {
+-                      value |= GMAC_CONFIG_PS;
+-
+-                      if (hw->ps == SPEED_10)
+-                              value &= ~GMAC_CONFIG_FES;
+-                      else
+-                              value |= GMAC_CONFIG_FES;
++              value &= hw->link.speed_mask;
++              switch (hw->ps) {
++              case SPEED_1000:
++                      value |= hw->link.speed1000;
++                      break;
++              case SPEED_100:
++                      value |= hw->link.speed100;
++                      break;
++              case SPEED_10:
++                      value |= hw->link.speed10;
++                      break;
+               }
+       }
+@@ -115,7 +117,7 @@ static void dwmac4_tx_queue_routing(stru
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value;
+-      const struct stmmac_rx_routing route_possibilities[] = {
++      static const struct stmmac_rx_routing route_possibilities[] = {
+               { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
+               { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
+               { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
+@@ -747,9 +749,11 @@ struct mac_device_info *dwmac4_setup(voi
+       if (mac->multicast_filter_bins)
+               mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+-      mac->link.port = GMAC_CONFIG_PS;
+       mac->link.duplex = GMAC_CONFIG_DM;
+-      mac->link.speed = GMAC_CONFIG_FES;
++      mac->link.speed10 = GMAC_CONFIG_PS;
++      mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
++      mac->link.speed1000 = 0;
++      mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
+       mac->mii.addr = GMAC_MDIO_ADDR;
+       mac->mii.data = GMAC_MDIO_DATA;
+       mac->mii.addr_shift = 21;
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+@@ -71,9 +71,9 @@ static void dwmac4_dma_axi(void __iomem
+       writel(value, ioaddr + DMA_SYS_BUS_MODE);
+ }
+-void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
+-                           struct stmmac_dma_cfg *dma_cfg,
+-                           u32 dma_rx_phy, u32 chan)
++static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
++                                  struct stmmac_dma_cfg *dma_cfg,
++                                  u32 dma_rx_phy, u32 chan)
+ {
+       u32 value;
+       u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+@@ -85,9 +85,9 @@ void dwmac4_dma_init_rx_chan(void __iome
+       writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
+ }
+-void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
+-                           struct stmmac_dma_cfg *dma_cfg,
+-                           u32 dma_tx_phy, u32 chan)
++static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
++                                  struct stmmac_dma_cfg *dma_cfg,
++                                  u32 dma_tx_phy, u32 chan)
+ {
+       u32 value;
+       u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+@@ -99,8 +99,8 @@ void dwmac4_dma_init_tx_chan(void __iome
+       writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
+ }
+-void dwmac4_dma_init_channel(void __iomem *ioaddr,
+-                           struct stmmac_dma_cfg *dma_cfg, u32 chan)
++static void dwmac4_dma_init_channel(void __iomem *ioaddr,
++                                  struct stmmac_dma_cfg *dma_cfg, u32 chan)
+ {
+       u32 value;
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+@@ -136,6 +136,9 @@
+ #define DMA_STATUS_TI 0x00000001      /* Transmit Interrupt */
+ #define DMA_CONTROL_FTF               0x00100000      /* Flush transmit FIFO */
++#define NUM_DWMAC100_DMA_REGS 9
++#define NUM_DWMAC1000_DMA_REGS        23
++
+ void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+ void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
+ void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+@@ -248,6 +248,7 @@ void stmmac_set_mac_addr(void __iomem *i
+       data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+       writel(data, ioaddr + low);
+ }
++EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
+ /* Enable disable MAC RX/TX */
+ void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+@@ -279,4 +280,4 @@ void stmmac_get_mac_addr(void __iomem *i
+       addr[4] = hi_addr & 0xff;
+       addr[5] = (hi_addr >> 8) & 0xff;
+ }
+-
++EXPORT_SYMBOL_GPL(stmmac_get_mac_addr);
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -104,7 +104,7 @@ struct stmmac_priv {
+       /* TX Queue */
+       struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+-      int oldlink;
++      bool oldlink;
+       int speed;
+       int oldduplex;
+       unsigned int flow_ctrl;
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -29,10 +29,12 @@
+ #include "stmmac.h"
+ #include "dwmac_dma.h"
+-#define REG_SPACE_SIZE        0x1054
++#define REG_SPACE_SIZE        0x1060
+ #define MAC100_ETHTOOL_NAME   "st_mac100"
+ #define GMAC_ETHTOOL_NAME     "st_gmac"
++#define ETHTOOL_DMA_OFFSET    55
++
+ struct stmmac_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int sizeof_stat;
+@@ -273,7 +275,6 @@ static int stmmac_ethtool_get_link_ksett
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
+       struct phy_device *phy = dev->phydev;
+-      int rc;
+       if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+           priv->hw->pcs & STMMAC_PCS_SGMII) {
+@@ -364,8 +365,8 @@ static int stmmac_ethtool_get_link_ksett
+               "link speed / duplex setting\n", dev->name);
+               return -EBUSY;
+       }
+-      rc = phy_ethtool_ksettings_get(phy, cmd);
+-      return rc;
++      phy_ethtool_ksettings_get(phy, cmd);
++      return 0;
+ }
+ static int
+@@ -443,6 +444,9 @@ static void stmmac_ethtool_gregs(struct
+       priv->hw->mac->dump_regs(priv->hw, reg_space);
+       priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
++      /* Copy DMA registers to where ethtool expects them */
++      memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
++             NUM_DWMAC1000_DMA_REGS * 4);
+ }
+ static void
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -235,6 +235,17 @@ static void stmmac_clk_csr_set(struct st
+               else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+                       priv->clk_csr = STMMAC_CSR_250_300M;
+       }
++
++      if (priv->plat->has_sun8i) {
++              if (clk_rate > 160000000)
++                      priv->clk_csr = 0x03;
++              else if (clk_rate > 80000000)
++                      priv->clk_csr = 0x02;
++              else if (clk_rate > 40000000)
++                      priv->clk_csr = 0x01;
++              else
++                      priv->clk_csr = 0;
++      }
+ }
+ static void print_pkt(unsigned char *buf, int len)
+@@ -783,7 +794,7 @@ static void stmmac_adjust_link(struct ne
+       struct stmmac_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev = dev->phydev;
+       unsigned long flags;
+-      int new_state = 0;
++      bool new_state = false;
+       if (!phydev)
+               return;
+@@ -796,8 +807,8 @@ static void stmmac_adjust_link(struct ne
+               /* Now we make sure that we can be in full duplex mode.
+                * If not, we operate in half-duplex mode. */
+               if (phydev->duplex != priv->oldduplex) {
+-                      new_state = 1;
+-                      if (!(phydev->duplex))
++                      new_state = true;
++                      if (!phydev->duplex)
+                               ctrl &= ~priv->hw->link.duplex;
+                       else
+                               ctrl |= priv->hw->link.duplex;
+@@ -808,30 +819,17 @@ static void stmmac_adjust_link(struct ne
+                       stmmac_mac_flow_ctrl(priv, phydev->duplex);
+               if (phydev->speed != priv->speed) {
+-                      new_state = 1;
++                      new_state = true;
++                      ctrl &= ~priv->hw->link.speed_mask;
+                       switch (phydev->speed) {
+-                      case 1000:
+-                              if (priv->plat->has_gmac ||
+-                                  priv->plat->has_gmac4)
+-                                      ctrl &= ~priv->hw->link.port;
++                      case SPEED_1000:
++                              ctrl |= priv->hw->link.speed1000;
+                               break;
+-                      case 100:
+-                              if (priv->plat->has_gmac ||
+-                                  priv->plat->has_gmac4) {
+-                                      ctrl |= priv->hw->link.port;
+-                                      ctrl |= priv->hw->link.speed;
+-                              } else {
+-                                      ctrl &= ~priv->hw->link.port;
+-                              }
++                      case SPEED_100:
++                              ctrl |= priv->hw->link.speed100;
+                               break;
+-                      case 10:
+-                              if (priv->plat->has_gmac ||
+-                                  priv->plat->has_gmac4) {
+-                                      ctrl |= priv->hw->link.port;
+-                                      ctrl &= ~(priv->hw->link.speed);
+-                              } else {
+-                                      ctrl &= ~priv->hw->link.port;
+-                              }
++                      case SPEED_10:
++                              ctrl |= priv->hw->link.speed10;
+                               break;
+                       default:
+                               netif_warn(priv, link, priv->dev,
+@@ -847,12 +845,12 @@ static void stmmac_adjust_link(struct ne
+               writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+               if (!priv->oldlink) {
+-                      new_state = 1;
+-                      priv->oldlink = 1;
++                      new_state = true;
++                      priv->oldlink = true;
+               }
+       } else if (priv->oldlink) {
+-              new_state = 1;
+-              priv->oldlink = 0;
++              new_state = true;
++              priv->oldlink = false;
+               priv->speed = SPEED_UNKNOWN;
+               priv->oldduplex = DUPLEX_UNKNOWN;
+       }
+@@ -915,7 +913,7 @@ static int stmmac_init_phy(struct net_de
+       char bus_id[MII_BUS_ID_SIZE];
+       int interface = priv->plat->interface;
+       int max_speed = priv->plat->max_speed;
+-      priv->oldlink = 0;
++      priv->oldlink = false;
+       priv->speed = SPEED_UNKNOWN;
+       priv->oldduplex = DUPLEX_UNKNOWN;
+@@ -1450,7 +1448,7 @@ static void free_dma_rx_desc_resources(s
+ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+ {
+       u32 tx_count = priv->plat->tx_queues_to_use;
+-      u32 queue = 0;
++      u32 queue;
+       /* Free TX queue resources */
+       for (queue = 0; queue < tx_count; queue++) {
+@@ -1499,7 +1497,7 @@ static int alloc_dma_rx_desc_resources(s
+                                                   sizeof(dma_addr_t),
+                                                   GFP_KERNEL);
+               if (!rx_q->rx_skbuff_dma)
+-                      return -ENOMEM;
++                      goto err_dma;
+               rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
+                                               sizeof(struct sk_buff *),
+@@ -1562,13 +1560,13 @@ static int alloc_dma_tx_desc_resources(s
+                                                   sizeof(*tx_q->tx_skbuff_dma),
+                                                   GFP_KERNEL);
+               if (!tx_q->tx_skbuff_dma)
+-                      return -ENOMEM;
++                      goto err_dma;
+               tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
+                                               sizeof(struct sk_buff *),
+                                               GFP_KERNEL);
+               if (!tx_q->tx_skbuff)
+-                      goto err_dma_buffers;
++                      goto err_dma;
+               if (priv->extend_desc) {
+                       tx_q->dma_etx = dma_zalloc_coherent(priv->device,
+@@ -1578,7 +1576,7 @@ static int alloc_dma_tx_desc_resources(s
+                                                           &tx_q->dma_tx_phy,
+                                                           GFP_KERNEL);
+                       if (!tx_q->dma_etx)
+-                              goto err_dma_buffers;
++                              goto err_dma;
+               } else {
+                       tx_q->dma_tx = dma_zalloc_coherent(priv->device,
+                                                          DMA_TX_SIZE *
+@@ -1587,13 +1585,13 @@ static int alloc_dma_tx_desc_resources(s
+                                                          &tx_q->dma_tx_phy,
+                                                          GFP_KERNEL);
+                       if (!tx_q->dma_tx)
+-                              goto err_dma_buffers;
++                              goto err_dma;
+               }
+       }
+       return 0;
+-err_dma_buffers:
++err_dma:
+       free_dma_tx_desc_resources(priv);
+       return ret;
+@@ -2895,8 +2893,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+               priv->xstats.tx_set_ic_bit++;
+       }
+-      if (!priv->hwts_tx_en)
+-              skb_tx_timestamp(skb);
++      skb_tx_timestamp(skb);
+       if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+                    priv->hwts_tx_en)) {
+@@ -2974,7 +2971,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+       /* Manage oversized TCP frames for GMAC4 device */
+       if (skb_is_gso(skb) && priv->tso) {
+-              if (ip_hdr(skb)->protocol == IPPROTO_TCP)
++              if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+                       return stmmac_tso_xmit(skb, dev);
+       }
+@@ -3105,8 +3102,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+               priv->xstats.tx_set_ic_bit++;
+       }
+-      if (!priv->hwts_tx_en)
+-              skb_tx_timestamp(skb);
++      skb_tx_timestamp(skb);
+       /* Ready to fill the first descriptor and set the OWN bit w/o any
+        * problems because all the descriptors are actually ready to be
+@@ -3983,7 +3979,9 @@ static int stmmac_hw_init(struct stmmac_
+       struct mac_device_info *mac;
+       /* Identify the MAC HW device */
+-      if (priv->plat->has_gmac) {
++      if (priv->plat->setup) {
++              mac = priv->plat->setup(priv);
++      } else if (priv->plat->has_gmac) {
+               priv->dev->priv_flags |= IFF_UNICAST_FLT;
+               mac = dwmac1000_setup(priv->ioaddr,
+                                     priv->plat->multicast_filter_bins,
+@@ -4003,6 +4001,10 @@ static int stmmac_hw_init(struct stmmac_
+       priv->hw = mac;
++      /* dwmac-sun8i only work in chain mode */
++      if (priv->plat->has_sun8i)
++              chain_mode = 1;
++
+       /* To use the chained or ring mode */
+       if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+               priv->hw->mode = &dwmac4_ring_mode_ops;
+@@ -4131,8 +4133,15 @@ int stmmac_dvr_probe(struct device *devi
+       if ((phyaddr >= 0) && (phyaddr <= 31))
+               priv->plat->phy_addr = phyaddr;
+-      if (priv->plat->stmmac_rst)
++      if (priv->plat->stmmac_rst) {
++              ret = reset_control_assert(priv->plat->stmmac_rst);
+               reset_control_deassert(priv->plat->stmmac_rst);
++              /* Some reset controllers have only reset callback instead of
++               * assert + deassert callbacks pair.
++               */
++              if (ret == -ENOTSUPP)
++                      reset_control_reset(priv->plat->stmmac_rst);
++      }
+       /* Init MAC and get the capabilities */
+       ret = stmmac_hw_init(priv);
+@@ -4149,7 +4158,7 @@ int stmmac_dvr_probe(struct device *devi
+                           NETIF_F_RXCSUM;
+       if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+-              ndev->hw_features |= NETIF_F_TSO;
++              ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+               priv->tso = true;
+               dev_info(priv->device, "TSO feature enabled\n");
+       }
+@@ -4311,7 +4320,7 @@ int stmmac_suspend(struct device *dev)
+       }
+       spin_unlock_irqrestore(&priv->lock, flags);
+-      priv->oldlink = 0;
++      priv->oldlink = false;
+       priv->speed = SPEED_UNKNOWN;
+       priv->oldduplex = DUPLEX_UNKNOWN;
+       return 0;
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+@@ -204,6 +204,7 @@ int stmmac_mdio_register(struct net_devi
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
+       struct device_node *mdio_node = priv->plat->mdio_node;
++      struct device *dev = ndev->dev.parent;
+       int addr, found;
+       if (!mdio_bus_data)
+@@ -237,7 +238,7 @@ int stmmac_mdio_register(struct net_devi
+       else
+               err = mdiobus_register(new_bus);
+       if (err != 0) {
+-              netdev_err(ndev, "Cannot register the MDIO bus\n");
++              dev_err(dev, "Cannot register the MDIO bus\n");
+               goto bus_register_fail;
+       }
+@@ -292,7 +293,7 @@ int stmmac_mdio_register(struct net_devi
+       }
+       if (!found && !mdio_node) {
+-              netdev_warn(ndev, "No PHY found\n");
++              dev_warn(dev, "No PHY found\n");
+               mdiobus_unregister(new_bus);
+               mdiobus_free(new_bus);
+               return -ENODEV;
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+@@ -30,42 +30,39 @@
+  * negative value of the address means that MAC controller is not connected
+  * with PHY.
+  */
+-struct stmmac_pci_dmi_data {
+-      const char *name;
+-      const char *asset_tag;
++struct stmmac_pci_func_data {
+       unsigned int func;
+       int phy_addr;
+ };
+-struct stmmac_pci_info {
+-      struct pci_dev *pdev;
+-      int (*setup)(struct plat_stmmacenet_data *plat,
+-                   struct stmmac_pci_info *info);
+-      struct stmmac_pci_dmi_data *dmi;
++struct stmmac_pci_dmi_data {
++      const struct stmmac_pci_func_data *func;
++      size_t nfuncs;
+ };
+-static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
+-{
+-      const char *name = dmi_get_system_info(DMI_BOARD_NAME);
+-      const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG);
+-      unsigned int func = PCI_FUNC(info->pdev->devfn);
+-      struct stmmac_pci_dmi_data *dmi;
+-
+-      /*
+-       * Galileo boards with old firmware don't support DMI. We always return
+-       * 1 here, so at least first found MAC controller would be probed.
+-       */
+-      if (!name)
+-              return 1;
++struct stmmac_pci_info {
++      int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
++};
+-      for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
+-              if (!strcmp(dmi->name, name) && dmi->func == func) {
+-                      /* If asset tag is provided, match on it as well. */
+-                      if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag))
+-                              continue;
+-                      return dmi->phy_addr;
+-              }
+-      }
++static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
++                                  const struct dmi_system_id *dmi_list)
++{
++      const struct stmmac_pci_func_data *func_data;
++      const struct stmmac_pci_dmi_data *dmi_data;
++      const struct dmi_system_id *dmi_id;
++      int func = PCI_FUNC(pdev->devfn);
++      size_t n;
++
++      dmi_id = dmi_first_match(dmi_list);
++      if (!dmi_id)
++              return -ENODEV;
++
++      dmi_data = dmi_id->driver_data;
++      func_data = dmi_data->func;
++
++      for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
++              if (func_data->func == func)
++                      return func_data->phy_addr;
+       return -ENODEV;
+ }
+@@ -100,7 +97,8 @@ static void common_default_data(struct p
+       plat->rx_queues_cfg[0].pkt_route = 0x0;
+ }
+-static void stmmac_default_data(struct plat_stmmacenet_data *plat)
++static int stmmac_default_data(struct pci_dev *pdev,
++                             struct plat_stmmacenet_data *plat)
+ {
+       /* Set common default data first */
+       common_default_data(plat);
+@@ -112,12 +110,77 @@ static void stmmac_default_data(struct p
+       plat->dma_cfg->pbl = 32;
+       plat->dma_cfg->pblx8 = true;
+       /* TODO: AXI */
++
++      return 0;
+ }
+-static int quark_default_data(struct plat_stmmacenet_data *plat,
+-                            struct stmmac_pci_info *info)
++static const struct stmmac_pci_info stmmac_pci_info = {
++      .setup = stmmac_default_data,
++};
++
++static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
++      {
++              .func = 6,
++              .phy_addr = 1,
++      },
++};
++
++static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
++      .func = galileo_stmmac_func_data,
++      .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
++};
++
++static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
++      {
++              .func = 6,
++              .phy_addr = 1,
++      },
++      {
++              .func = 7,
++              .phy_addr = 1,
++      },
++};
++
++static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
++      .func = iot2040_stmmac_func_data,
++      .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
++};
++
++static const struct dmi_system_id quark_pci_dmi[] = {
++      {
++              .matches = {
++                      DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
++              },
++              .driver_data = (void *)&galileo_stmmac_dmi_data,
++      },
++      {
++              .matches = {
++                      DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
++              },
++              .driver_data = (void *)&galileo_stmmac_dmi_data,
++      },
++      {
++              .matches = {
++                      DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
++                      DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
++                                      "6ES7647-0AA00-0YA2"),
++              },
++              .driver_data = (void *)&galileo_stmmac_dmi_data,
++      },
++      {
++              .matches = {
++                      DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
++                      DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
++                                      "6ES7647-0AA00-1YA2"),
++              },
++              .driver_data = (void *)&iot2040_stmmac_dmi_data,
++      },
++      {}
++};
++
++static int quark_default_data(struct pci_dev *pdev,
++                            struct plat_stmmacenet_data *plat)
+ {
+-      struct pci_dev *pdev = info->pdev;
+       int ret;
+       /* Set common default data first */
+@@ -127,9 +190,19 @@ static int quark_default_data(struct pla
+        * Refuse to load the driver and register net device if MAC controller
+        * does not connect to any PHY interface.
+        */
+-      ret = stmmac_pci_find_phy_addr(info);
+-      if (ret < 0)
+-              return ret;
++      ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
++      if (ret < 0) {
++              /* Return error to the caller on DMI enabled boards. */
++              if (dmi_get_system_info(DMI_BOARD_NAME))
++                      return ret;
++
++              /*
++               * Galileo boards with old firmware don't support DMI. We always
++               * use 1 here as PHY address, so at least the first found MAC
++               * controller would be probed.
++               */
++              ret = 1;
++      }
+       plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
+       plat->phy_addr = ret;
+@@ -143,41 +216,8 @@ static int quark_default_data(struct pla
+       return 0;
+ }
+-static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
+-      {
+-              .name = "Galileo",
+-              .func = 6,
+-              .phy_addr = 1,
+-      },
+-      {
+-              .name = "GalileoGen2",
+-              .func = 6,
+-              .phy_addr = 1,
+-      },
+-      {
+-              .name = "SIMATIC IOT2000",
+-              .asset_tag = "6ES7647-0AA00-0YA2",
+-              .func = 6,
+-              .phy_addr = 1,
+-      },
+-      {
+-              .name = "SIMATIC IOT2000",
+-              .asset_tag = "6ES7647-0AA00-1YA2",
+-              .func = 6,
+-              .phy_addr = 1,
+-      },
+-      {
+-              .name = "SIMATIC IOT2000",
+-              .asset_tag = "6ES7647-0AA00-1YA2",
+-              .func = 7,
+-              .phy_addr = 1,
+-      },
+-      {}
+-};
+-
+-static struct stmmac_pci_info quark_pci_info = {
++static const struct stmmac_pci_info quark_pci_info = {
+       .setup = quark_default_data,
+-      .dmi = quark_pci_dmi_data,
+ };
+ /**
+@@ -236,15 +276,9 @@ static int stmmac_pci_probe(struct pci_d
+       pci_set_master(pdev);
+-      if (info) {
+-              info->pdev = pdev;
+-              if (info->setup) {
+-                      ret = info->setup(plat, info);
+-                      if (ret)
+-                              return ret;
+-              }
+-      } else
+-              stmmac_default_data(plat);
++      ret = info->setup(pdev, plat);
++      if (ret)
++              return ret;
+       pci_enable_msi(pdev);
+@@ -270,14 +304,21 @@ static void stmmac_pci_remove(struct pci
+ static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
+-#define STMMAC_VENDOR_ID 0x700
++/* synthetic ID, no official vendor */
++#define PCI_VENDOR_ID_STMMAC 0x700
++
+ #define STMMAC_QUARK_ID  0x0937
+ #define STMMAC_DEVICE_ID 0x1108
++#define STMMAC_DEVICE(vendor_id, dev_id, info)        {       \
++      PCI_VDEVICE(vendor_id, dev_id),                 \
++      .driver_data = (kernel_ulong_t)&info            \
++      }
++
+ static const struct pci_device_id stmmac_id_table[] = {
+-      {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
+-      {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
+-      {PCI_VDEVICE(INTEL, STMMAC_QUARK_ID), (kernel_ulong_t)&quark_pci_info},
++      STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info),
++      STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info),
++      STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info),
+       {}
+ };
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -309,6 +309,13 @@ static int stmmac_dt_phy(struct plat_stm
+                        struct device_node *np, struct device *dev)
+ {
+       bool mdio = true;
++      static const struct of_device_id need_mdio_ids[] = {
++              { .compatible = "snps,dwc-qos-ethernet-4.10" },
++              { .compatible = "allwinner,sun8i-a83t-emac" },
++              { .compatible = "allwinner,sun8i-h3-emac" },
++              { .compatible = "allwinner,sun8i-v3s-emac" },
++              { .compatible = "allwinner,sun50i-a64-emac" },
++      };
+       /* If phy-handle property is passed from DT, use it as the PHY */
+       plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+@@ -325,8 +332,7 @@ static int stmmac_dt_phy(struct plat_stm
+               mdio = false;
+       }
+-      /* exception for dwmac-dwc-qos-eth glue logic */
+-      if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
++      if (of_match_node(need_mdio_ids, np)) {
+               plat->mdio_node = of_get_child_by_name(np, "mdio");
+       } else {
+               /**
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -177,6 +177,7 @@ struct plat_stmmacenet_data {
+       void (*fix_mac_speed)(void *priv, unsigned int speed);
+       int (*init)(struct platform_device *pdev, void *priv);
+       void (*exit)(struct platform_device *pdev, void *priv);
++      struct mac_device_info *(*setup)(void *priv);
+       void *bsp_priv;
+       struct clk *stmmac_clk;
+       struct clk *pclk;
+@@ -185,6 +186,7 @@ struct plat_stmmacenet_data {
+       struct reset_control *stmmac_rst;
+       struct stmmac_axi *axi;
+       int has_gmac4;
++      bool has_sun8i;
+       bool tso_en;
+       int mac_port_sel_speed;
+       bool en_tx_lpi_clockgating;