ipq40xx: add ipqess ethernet driver
authorChristian Lamparter <chunkeey@gmail.com>
Sat, 9 Mar 2019 18:33:57 +0000 (19:33 +0100)
committerChristian Lamparter <chunkeey@gmail.com>
Tue, 11 Aug 2020 17:09:08 +0000 (19:09 +0200)
This driver from John Crispin is poised to replace the current
essedma driver that drives the ethernet MAC on the IPQ40XX platform.
<https://forum.openwrt.org/t/ipq40xx-target-single-nic-devices/7292/16>

Signed-off-by: John Crispin <john@phrozen.org>
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
target/linux/ipq40xx/config-4.19
target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/ipqess.c [new file with mode: 0644]
target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/ipqess.h [new file with mode: 0644]
target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/ipqess_ethtool.c [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/720-ipqess.patch [new file with mode: 0644]
target/linux/ipq40xx/patches-4.19/721-dts-ipq4019-add-ethernet-essedma-node.patch [new file with mode: 0644]

index c70c761d25c401b70e9ef0de0c15bc9cdcd8b000..0ef68439a3458550f23363697eebc81b23a308ba 100644 (file)
@@ -264,6 +264,7 @@ CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
 # CONFIG_IOMMU_IO_PGTABLE_LPAE is not set
 CONFIG_IOMMU_SUPPORT=y
+CONFIG_IPQ_ESS=y
 CONFIG_IPQ_GCC_4019=y
 # CONFIG_IPQ_GCC_806X is not set
 # CONFIG_IPQ_GCC_8074 is not set
diff --git a/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/ipqess.c b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/ipqess.c
new file mode 100644 (file)
index 0000000..1b68335
--- /dev/null
@@ -0,0 +1,1189 @@
+// SPDX-License-Identifier: (GPL-2.0 OR ISC)
+/* Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
+ * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include "ipqess.h"
+
+#define IPQESS_RRD_SIZE                16
+#define IPQESS_NEXT_IDX(X, Y)  (((X) + 1) & ((Y) - 1))
+#define IPQESS_TX_DMA_BUF_LEN  0x3fff
+
+static void ipqess_w32(struct ipqess *ess, u32 reg, u32 val)
+{
+       __raw_writel(val, ess->hw_addr + reg);
+}
+
+static u32 ipqess_r32(struct ipqess *ess, u16 reg)
+{
+       return __raw_readl(ess->hw_addr + reg);
+}
+
+static void ipqess_m32(struct ipqess *ess, u32 mask, u32 val, u16 reg)
+{
+       u32 _val = ipqess_r32(ess, reg);
+       _val &= ~mask;
+       _val |= val;
+       ipqess_w32(ess, reg, _val);
+}
+
+static int ipqess_tx_ring_alloc(struct ipqess *ess)
+{
+       int i;
+
+       for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               u32 idx;
+
+               ess->tx_ring[i].ess = ess;
+               ess->tx_ring[i].idx = i * 4;
+               ess->tx_ring[i].count = IPQESS_TX_RING_SIZE;
+               ess->tx_ring[i].nq = netdev_get_tx_queue(ess->netdev, i);
+
+               ess->tx_ring[i].buf = devm_kzalloc(&ess->pdev->dev,
+                       sizeof(struct ipqess_buf) * IPQESS_TX_RING_SIZE,
+                       GFP_KERNEL);
+               if (!ess->tx_ring[i].buf) {
+                       netdev_err(ess->netdev, "buffer alloc of tx ring failed");
+                       return -ENOMEM;
+               }
+
+               ess->tx_ring[i].hw_desc = dmam_alloc_coherent(&ess->pdev->dev,
+                       sizeof(struct ipqess_tx_desc) * IPQESS_TX_RING_SIZE,
+                       &ess->tx_ring[i].dma, GFP_KERNEL | __GFP_ZERO);
+               if (!ess->tx_ring[i].hw_desc) {
+                       netdev_err(ess->netdev, "descriptor allocation for tx ring failed");
+                       return -ENOMEM;
+               }
+
+               ipqess_w32(ess, IPQESS_REG_TPD_BASE_ADDR_Q(ess->tx_ring[i].idx),
+                        (u32)ess->tx_ring[i].dma);
+
+               idx = ipqess_r32(ess, IPQESS_REG_TPD_IDX_Q(ess->tx_ring[i].idx));
+               idx >>= IPQESS_TPD_CONS_IDX_SHIFT; /* need u32 here */
+               idx &= 0xffff;
+               ess->tx_ring[i].head = ess->tx_ring[i].tail = idx;
+
+               ipqess_m32(ess, IPQESS_TPD_PROD_IDX_MASK << IPQESS_TPD_PROD_IDX_SHIFT,
+                        idx, IPQESS_REG_TPD_IDX_Q(ess->tx_ring[i].idx));
+               ipqess_w32(ess, IPQESS_REG_TX_SW_CONS_IDX_Q(ess->tx_ring[i].idx), idx);
+               ipqess_w32(ess, IPQESS_REG_TPD_RING_SIZE, IPQESS_TX_RING_SIZE);
+       }
+
+       return 0;
+}
+
+static int ipqess_tx_unmap_and_free(struct device *dev, struct ipqess_buf *buf)
+{
+       int len = 0;
+
+       if (buf->flags & IPQESS_DESC_SINGLE)
+               dma_unmap_single(dev, buf->dma, buf->length, DMA_TO_DEVICE);
+       else if (buf->flags & IPQESS_DESC_PAGE)
+               dma_unmap_page(dev, buf->dma, buf->length, DMA_TO_DEVICE);
+
+       if (buf->flags & IPQESS_DESC_LAST) {
+               len = buf->skb->len;
+               dev_kfree_skb_any(buf->skb);
+       }
+
+       buf->flags = 0;
+
+       return len;
+}
+
+static void ipqess_tx_ring_free(struct ipqess *ess)
+{
+       int i;
+
+       for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               int j;
+
+               if (ess->tx_ring[i].hw_desc)
+                       continue;
+
+               for (j = 0; j < IPQESS_TX_RING_SIZE; j++) {
+                       struct ipqess_buf *buf = &ess->tx_ring[i].buf[j];
+
+                       ipqess_tx_unmap_and_free(&ess->pdev->dev, buf);
+               }
+
+               ess->tx_ring[i].buf = NULL;
+       }
+}
+
+static int ipqess_rx_buf_prepare(struct ipqess_buf *buf,
+       struct ipqess_rx_ring *rx_ring)
+{
+       /* Clean the HW DESC header, otherwise we might end up
+        * with a spurious desc because of random garbage */
+       memset(buf->skb->data, 0, sizeof(struct ipqess_rx_desc));
+
+       buf->dma = dma_map_single(rx_ring->ppdev, buf->skb->data,
+                                 IPQESS_RX_HEAD_BUFF_SIZE, DMA_FROM_DEVICE);
+       if (dma_mapping_error(rx_ring->ppdev, buf->dma)) {
+               dev_err_once(rx_ring->ppdev,
+                       "IPQESS DMA mapping failed for linear address %x",
+                       buf->dma);
+               dev_kfree_skb_any(buf->skb);
+               buf->skb = NULL;
+               return -EFAULT;
+       }
+
+       buf->length = IPQESS_RX_HEAD_BUFF_SIZE;
+       rx_ring->hw_desc[rx_ring->head] = (void *)buf->dma;
+       rx_ring->head = (rx_ring->head + 1) % IPQESS_RX_RING_SIZE;
+
+       ipqess_m32(rx_ring->ess, IPQESS_RFD_PROD_IDX_BITS,
+                (rx_ring->head + IPQESS_RX_RING_SIZE - 1) % IPQESS_RX_RING_SIZE,
+                IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
+
+       return 0;
+}
+
+/* locking is handled by the caller */
+static int ipqess_rx_buf_alloc_napi(struct ipqess_rx_ring *rx_ring)
+{
+       struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
+
+       buf->skb = napi_alloc_skb(&rx_ring->napi_rx,
+               IPQESS_RX_HEAD_BUFF_SIZE);
+       if (!buf->skb)
+               return -ENOMEM;
+
+       return ipqess_rx_buf_prepare(buf, rx_ring);
+}
+
+static int ipqess_rx_buf_alloc(struct ipqess_rx_ring *rx_ring)
+{
+       struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
+
+       buf->skb = netdev_alloc_skb_ip_align(rx_ring->ess->netdev,
+               IPQESS_RX_HEAD_BUFF_SIZE);
+       if (!buf->skb)
+               return -ENOMEM;
+
+       return ipqess_rx_buf_prepare(buf, rx_ring);
+}
+
+static void ipqess_refill_work(struct work_struct *work)
+{
+       struct ipqess_rx_ring_refill *rx_refill = container_of(work,
+               struct ipqess_rx_ring_refill, refill_work);
+       struct ipqess_rx_ring *rx_ring = rx_refill->rx_ring;
+       int refill = 0;
+
+       /* don't let this loop by accident. */
+       while (atomic_dec_and_test(&rx_ring->refill_count)) {
+               napi_disable(&rx_ring->napi_rx);
+               if (ipqess_rx_buf_alloc(rx_ring)) {
+                       refill++;
+                       dev_dbg(rx_ring->ppdev,
+                               "Not all buffers were reallocated");
+               }
+               napi_enable(&rx_ring->napi_rx);
+       }
+
+       if (atomic_add_return(refill, &rx_ring->refill_count))
+               schedule_work(&rx_refill->refill_work);
+}
+
+
+static int ipqess_rx_ring_alloc(struct ipqess *ess)
+{
+       int i;
+
+       for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               int j;
+
+               ess->rx_ring[i].ess = ess;
+               ess->rx_ring[i].ppdev = &ess->pdev->dev;
+               ess->rx_ring[i].idx = i;
+
+               ess->rx_ring[i].buf = devm_kzalloc(&ess->pdev->dev,
+                       sizeof(struct ipqess_buf) * IPQESS_RX_RING_SIZE,
+                       GFP_KERNEL);
+               if (!ess->rx_ring[i].buf)
+                       return -ENOMEM;
+
+               ess->rx_ring[i].hw_desc = dmam_alloc_coherent(&ess->pdev->dev,
+                       sizeof(struct ipqess_rx_desc) * IPQESS_RX_RING_SIZE,
+                       &ess->rx_ring[i].dma, GFP_KERNEL);
+               if (!ess->rx_ring[i].hw_desc)
+                       return -ENOMEM;
+
+               for (j = 0; j < IPQESS_RX_RING_SIZE; j++)
+                       if (ipqess_rx_buf_alloc(&ess->rx_ring[i]) < 0)
+                               return -ENOMEM;
+
+               ess->rx_refill[i].rx_ring = &ess->rx_ring[i];
+               INIT_WORK(&ess->rx_refill[i].refill_work, ipqess_refill_work);
+
+               ipqess_w32(ess, IPQESS_REG_RFD_BASE_ADDR_Q(i),
+                        (u32)(ess->rx_ring[i].dma));
+       }
+
+       ipqess_w32(ess, IPQESS_REG_RX_DESC0,
+                (IPQESS_RX_HEAD_BUFF_SIZE << IPQESS_RX_BUF_SIZE_SHIFT) |
+                (IPQESS_RX_RING_SIZE << IPQESS_RFD_RING_SIZE_SHIFT));
+
+       return 0;
+}
+
+static void ipqess_rx_ring_free(struct ipqess *ess)
+{
+       int i;
+
+       for (i = 0, i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               int j;
+
+               atomic_set(&ess->rx_ring[i].refill_count, 0);
+               cancel_work_sync(&ess->rx_refill[i].refill_work);
+
+               for (j = 0; j < IPQESS_RX_RING_SIZE; j++) {
+                       dma_unmap_single(&ess->pdev->dev,
+                                        ess->rx_ring[i].buf[j].dma,
+                                        ess->rx_ring[i].buf[j].length,
+                                        DMA_FROM_DEVICE);
+                       dev_kfree_skb_any(ess->rx_ring[i].buf[j].skb);
+               }
+       }
+}
+
+static struct net_device_stats *ipqess_get_stats(struct net_device *netdev)
+{
+       struct ipqess *ess = netdev_priv(netdev);
+       uint32_t *p;
+       int i;
+       u32 stat;
+
+       spin_lock(&ess->stats_lock);
+       p = (uint32_t *)&(ess->ipqessstats);
+
+       for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
+               stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_PKT_Q(i));
+               *p += stat;
+               p++;
+       }
+
+       for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
+               stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_BYTE_Q(i));
+               *p += stat;
+               p++;
+       }
+
+       for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
+               stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_PKT_Q(i));
+               *p += stat;
+               p++;
+       }
+
+       for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
+               stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_BYTE_Q(i));
+               *p += stat;
+               p++;
+       }
+
+       spin_unlock(&ess->stats_lock);
+
+       return &ess->stats;
+}
+
+static int ipqess_phy_connect(struct net_device *netdev)
+{
+       struct ipqess *ess = netdev_priv(netdev);
+       struct device_node *of_node = ess->pdev->dev.of_node;
+       struct device_node *np = NULL;
+
+       if (!of_phy_register_fixed_link(of_node))
+               np = of_node_get(of_node);
+       if (!np)
+               return -ENODEV;
+
+       netdev->phydev = of_phy_find_device(np);
+       of_node_put(np);
+       if (!netdev->phydev) {
+               of_phy_deregister_fixed_link(of_node);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ipqess_rx_poll(struct ipqess_rx_ring *rx_ring, int budget)
+{
+       u32 length = 0, num_desc, tail, rx_ring_tail;
+       int done = 0;
+
+       rx_ring_tail = rx_ring->tail;
+
+       tail = ipqess_r32(rx_ring->ess, IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
+       tail >>= IPQESS_RFD_CONS_IDX_SHIFT;
+       tail &= IPQESS_RFD_CONS_IDX_MASK;
+
+       while (done < budget) {
+               struct sk_buff *skb;
+               struct ipqess_rx_desc *rd;
+
+               if (rx_ring_tail == tail)
+                       break;
+
+               dma_unmap_single(rx_ring->ppdev,
+                                rx_ring->buf[rx_ring_tail].dma,
+                                rx_ring->buf[rx_ring_tail].length,
+                                DMA_FROM_DEVICE);
+
+               skb = xchg(&rx_ring->buf[rx_ring_tail].skb, NULL);
+               rd = (struct ipqess_rx_desc *)skb->data;
+               rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
+
+               /* Check if RRD is valid */
+               if (!(rd->rrd7 & IPQESS_RRD_DESC_VALID)) {
+                       num_desc = 1;
+                       dev_kfree_skb_any(skb);
+                       goto skip;
+               }
+
+               num_desc = rd->rrd1 & IPQESS_RRD_NUM_RFD_MASK;
+               length = rd->rrd6 & IPQESS_RRD_PKT_SIZE_MASK;
+
+               skb_reserve(skb, IPQESS_RRD_SIZE);
+               if (num_desc > 1) {
+                       /* can we use build_skb here ? */
+                       struct sk_buff *skb_prev = NULL;
+                       int size_remaining;
+                       int i;
+
+                       skb->data_len = 0;
+                       skb->tail += (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
+                       skb->len = skb->truesize = length;
+                       size_remaining = length - (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
+
+                       for (i = 1; i < num_desc; i++) {
+                               /* TODO: use build_skb ? */
+                               struct sk_buff *skb_temp = rx_ring->buf[rx_ring_tail].skb;
+
+                               dma_unmap_single(rx_ring->ppdev,
+                                                rx_ring->buf[rx_ring_tail].dma,
+                                                rx_ring->buf[rx_ring_tail].length,
+                                                DMA_FROM_DEVICE);
+
+                               skb_put(skb_temp, min(size_remaining, IPQESS_RX_HEAD_BUFF_SIZE));
+                               if (skb_prev)
+                                       skb_prev->next = rx_ring->buf[rx_ring_tail].skb;
+                               else
+                                       skb_shinfo(skb)->frag_list = rx_ring->buf[rx_ring_tail].skb;
+                               skb_prev = rx_ring->buf[rx_ring_tail].skb;
+                               rx_ring->buf[rx_ring_tail].skb->next = NULL;
+
+                               skb->data_len += rx_ring->buf[rx_ring_tail].skb->len;
+                               size_remaining -= rx_ring->buf[rx_ring_tail].skb->len;
+
+                               rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
+                       }
+
+               } else {
+                       skb_put(skb, length);
+               }
+
+               skb->dev = rx_ring->ess->netdev;
+               skb->protocol = eth_type_trans(skb, rx_ring->ess->netdev);
+               skb_record_rx_queue(skb, rx_ring->idx);
+
+               if (rd->rrd6 & IPQESS_RRD_CSUM_FAIL_MASK)
+                       skb_checksum_none_assert(skb);
+               else
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               if (rd->rrd7 & IPQESS_RRD_CVLAN) {
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rd->rrd4);
+               } else if (rd->rrd1 & IPQESS_RRD_SVLAN) {
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), rd->rrd4);
+               }
+               napi_gro_receive(&rx_ring->napi_rx, skb);
+
+               /* TODO: do we need to have these here ? */
+               rx_ring->ess->stats.rx_packets++;
+               rx_ring->ess->stats.rx_bytes += length;
+
+               done++;
+skip:
+
+               num_desc += atomic_xchg(&rx_ring->refill_count, 0);
+               while (num_desc) {
+                       if (ipqess_rx_buf_alloc_napi(rx_ring)) {
+                               num_desc = atomic_add_return(num_desc,
+                                        &rx_ring->refill_count);
+                               if (num_desc >= ((4 * IPQESS_RX_RING_SIZE + 6) / 7))
+                                       schedule_work(&rx_ring->ess->rx_refill[rx_ring->idx].refill_work);
+                               break;
+                       }
+                       num_desc--;
+               }
+       }
+
+       ipqess_w32(rx_ring->ess, IPQESS_REG_RX_SW_CONS_IDX_Q(rx_ring->idx),
+                  rx_ring_tail);
+       rx_ring->tail = rx_ring_tail;
+
+       return done;
+}
+
+static int ipqess_tx_complete(struct ipqess_tx_ring *tx_ring, int budget)
+{
+       u32 tail;
+       int done = 0;
+       int total = 0, ret;
+
+       tail = ipqess_r32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
+       tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
+       tail &= IPQESS_TPD_CONS_IDX_MASK;
+
+       while ((tx_ring->tail != tail) && (done < budget)) {
+               //pr_info("freeing txq:%d tail:%d tailbuf:%p\n", tx_ring->idx, tx_ring->tail, &tx_ring->buf[tx_ring->tail]);
+               ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
+                                      &tx_ring->buf[tx_ring->tail]);
+               tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
+               if (ret) {
+                       total += ret;
+                       done++;
+               }
+       }
+
+       ipqess_w32(tx_ring->ess,
+                IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
+                tx_ring->tail);
+
+       if (netif_tx_queue_stopped(tx_ring->nq)) {
+               printk("S %d\n", tx_ring->idx);
+               netif_tx_wake_queue(tx_ring->nq);
+       }
+
+       netdev_tx_completed_queue(tx_ring->nq, done, total);
+
+       return done;
+}
+
+static int ipqess_tx_napi(struct napi_struct *napi, int budget)
+{
+       struct ipqess_tx_ring *tx_ring = container_of(napi, struct ipqess_tx_ring,
+                                                   napi_tx);
+       u32 reg_data;
+       u32 shadow_tx_status;
+       int work_done = 0;
+       struct queue *queue = &tx_ring->ess->queue[tx_ring->idx / 4];
+
+       reg_data = ipqess_r32(tx_ring->ess, IPQESS_REG_TX_ISR);
+       queue->tx_status |= reg_data & BIT(tx_ring->idx);
+       shadow_tx_status = queue->tx_status;
+
+       work_done = ipqess_tx_complete(tx_ring, budget);
+
+       ipqess_w32(tx_ring->ess, IPQESS_REG_TX_ISR, shadow_tx_status);
+
+       if (likely(work_done < budget)) {
+               napi_complete(napi);
+               ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
+       }
+
+       return work_done;
+}
+
+static int ipqess_rx_napi(struct napi_struct *napi, int budget)
+{
+       struct ipqess_rx_ring *rx_ring = container_of(napi, struct ipqess_rx_ring,
+                                                   napi_rx);
+       struct ipqess *ess = rx_ring->ess;
+       int remain_budget = budget;
+       int rx_done;
+       u32 rx_mask = BIT(rx_ring->idx << IPQESS_RX_PER_CPU_MASK_SHIFT);
+       u32 status;
+
+poll_again:
+       ipqess_w32(ess, IPQESS_REG_RX_ISR, rx_mask);
+       rx_done = ipqess_rx_poll(rx_ring, remain_budget);
+
+       if (rx_done == remain_budget)
+               return budget;
+
+       status = ipqess_r32(ess, IPQESS_REG_RX_ISR);
+       if (status & rx_mask) {
+               remain_budget -= rx_done;
+               goto poll_again;
+       }
+
+       napi_complete(napi);
+       ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx), 0x1);
+
+       return rx_done + budget - remain_budget;
+}
+
+static irqreturn_t ipqess_interrupt_tx(int irq, void *priv)
+{
+       struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *) priv;
+
+       if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
+               __napi_schedule(&tx_ring->napi_tx);
+               ipqess_w32(tx_ring->ess,
+                        IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
+                        0x0);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ipqess_interrupt_rx(int irq, void *priv)
+{
+       struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *) priv;
+
+       if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
+               __napi_schedule(&rx_ring->napi_rx);
+               ipqess_w32(rx_ring->ess,
+                        IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
+                        0x0);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void ipqess_irq_enable(struct ipqess *ess)
+{
+       int i;
+
+       ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
+       ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
+       for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 1);
+               ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 1);
+       }
+}
+
+static void ipqess_irq_disable(struct ipqess *ess)
+{
+       int i;
+
+       for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 0);
+               ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 0);
+       }
+}
+
+static int __init ipqess_init(struct net_device *netdev)
+{
+       struct ipqess *ess = netdev_priv(netdev);
+       struct device_node *of_node = ess->pdev->dev.of_node;
+       const char *mac_addr;
+
+       mac_addr = of_get_mac_address(of_node);
+       if (mac_addr)
+               ether_addr_copy(netdev->dev_addr, mac_addr);
+       if (!is_valid_ether_addr(netdev->dev_addr)) {
+               random_ether_addr(netdev->dev_addr);
+               dev_info(&ess->pdev->dev, "generated random MAC address %pM\n",
+                       netdev->dev_addr);
+               netdev->addr_assign_type = NET_ADDR_RANDOM;
+       }
+
+       return ipqess_phy_connect(netdev);
+}
+
+static void ipqess_uninit(struct net_device *netdev)
+{
+       struct ipqess *ess = netdev_priv(netdev);
+       struct device_node *of_node = ess->pdev->dev.of_node;
+
+       phy_disconnect(netdev->phydev);
+       of_phy_deregister_fixed_link(of_node);
+}
+
+static int ipqess_open(struct net_device *netdev)
+{
+       struct ipqess *ess = netdev_priv(netdev);
+       int i;
+
+       for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               napi_enable(&ess->tx_ring[i].napi_tx);
+               napi_enable(&ess->rx_ring[i].napi_rx);
+       }
+       ipqess_irq_enable(ess);
+       phy_start(ess->netdev->phydev);
+       netif_tx_start_all_queues(netdev);
+       netif_carrier_on(netdev);
+
+       return 0;
+}
+
+static int ipqess_stop(struct net_device *netdev)
+{
+       struct ipqess *ess = netdev_priv(netdev);
+       int i;
+
+       netif_tx_stop_all_queues(netdev);
+       phy_stop(netdev->phydev);
+       netif_carrier_off(netdev);
+       ipqess_irq_disable(ess);
+       for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               napi_disable(&ess->tx_ring[i].napi_tx);
+               napi_disable(&ess->rx_ring[i].napi_rx);
+       }
+
+       return 0;
+}
+
+static int ipqess_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+       switch (cmd) {
+       case SIOCGMIIPHY:
+       case SIOCGMIIREG:
+       case SIOCSMIIREG:
+               return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+       default:
+               break;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+
+static inline u16 ipqess_tx_desc_available(struct ipqess_tx_ring *tx_ring)
+{
+       u16 count = 0;
+
+       if (tx_ring->tail <= tx_ring->head)
+               count = IPQESS_TX_RING_SIZE;
+
+       count += tx_ring->tail - tx_ring->head - 1;
+
+       return count;
+}
+
+static inline int ipqess_cal_txd_req(struct sk_buff *skb)
+{
+       int i, nfrags;
+       struct skb_frag_struct *frag;
+
+       nfrags = 1;
+       if (skb_is_gso(skb)) {
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       frag = &skb_shinfo(skb)->frags[i];
+                       nfrags += DIV_ROUND_UP(frag->size, IPQESS_TX_DMA_BUF_LEN);
+               }
+       } else {
+               nfrags += skb_shinfo(skb)->nr_frags;
+       }
+
+       return nfrags; // DIV_ROUND_UP(nfrags, 2);
+}
+
+static struct ipqess_buf *ipqess_get_tx_buffer(struct ipqess_tx_ring *tx_ring,
+                                              struct ipqess_tx_desc *desc)
+{
+       return &tx_ring->buf[desc - (struct ipqess_tx_desc *)tx_ring->hw_desc];
+}
+
+static struct ipqess_tx_desc *ipqess_tx_desc_next(struct ipqess_tx_ring *tx_ring)
+{
+       struct ipqess_tx_desc *desc;
+
+       desc = (&((struct ipqess_tx_desc *)(tx_ring->hw_desc))[tx_ring->head]);
+       tx_ring->head = IPQESS_NEXT_IDX(tx_ring->head, tx_ring->count);
+
+       return desc;
+}
+
+static void ipqess_rollback_tx(struct ipqess *eth,
+                           struct ipqess_tx_desc *first_desc, int queue_id)
+{
+       struct ipqess_tx_ring *tx_ring = &eth->tx_ring[queue_id / 4];
+       struct ipqess_buf *buf;
+       struct ipqess_tx_desc *desc = NULL;
+       u16 start_index, index;
+
+       start_index = first_desc - (struct ipqess_tx_desc *)(tx_ring->hw_desc);
+
+       index = start_index;
+       while (index != tx_ring->head) {
+               desc = (&((struct ipqess_tx_desc *)(tx_ring->hw_desc))[index]);
+               buf = &tx_ring->buf[index];
+               ipqess_tx_unmap_and_free(&eth->pdev->dev, buf);
+               memset(desc, 0, sizeof(struct ipqess_tx_desc));
+               if (++index == tx_ring->count)
+                       index = 0;
+       }
+       tx_ring->head = start_index;
+}
+
+static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring, struct sk_buff *skb)
+{
+       struct ipqess_buf *buf = NULL;
+       struct platform_device *pdev = tx_ring->ess->pdev;
+       struct ipqess_tx_desc *desc = NULL, *first_desc = NULL;
+       u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
+       u16 len, lso_len = 0;
+       int i = 0;
+
+       if (skb_is_gso(skb)) {
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+                       lso_word1 |= IPQESS_TPD_IPV4_EN;
+                       ip_hdr(skb)->check = 0;
+                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+                               ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+               } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+                       lso_word1 |= IPQESS_TPD_LSO_V2_EN;
+                       ipv6_hdr(skb)->payload_len = 0;
+                       tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                               &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+               }
+
+               lso_word1 |= IPQESS_TPD_LSO_EN |
+                            ((skb_shinfo(skb)->gso_size & IPQESS_TPD_MSS_MASK) << IPQESS_TPD_MSS_SHIFT) |
+                            (skb_transport_offset(skb) << IPQESS_TPD_HDR_SHIFT);
+       } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+                       u8 css, cso;
+                       cso = skb_checksum_start_offset(skb);
+                       css = cso + skb->csum_offset;
+
+                       word1 |= (IPQESS_TPD_CUSTOM_CSUM_EN);
+                       word1 |= (cso >> 1) << IPQESS_TPD_HDR_SHIFT;
+                       word1 |= ((css >> 1) << IPQESS_TPD_CUSTOM_CSUM_SHIFT);
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               switch (skb->vlan_proto) {
+               case htons(ETH_P_8021Q):
+                       word3 |= BIT(IPQESS_TX_INS_CVLAN);
+                       word3 |= skb_vlan_tag_get(skb) << IPQESS_TX_CVLAN_TAG_SHIFT;
+                       break;
+               case htons(ETH_P_8021AD):
+                       word1 |= BIT(IPQESS_TX_INS_SVLAN);
+                       svlan_tag = skb_vlan_tag_get(skb) << IPQESS_TX_SVLAN_TAG_SHIFT;
+                       break;
+               default:
+                       dev_err(&pdev->dev, "no ctag or stag present\n");
+                       goto vlan_tag_error;
+               }
+       }
+
+        if (skb->protocol == htons(ETH_P_PPP_SES))
+                word1 |= IPQESS_TPD_PPPOE_EN;
+
+       if ((skb->dev_scratch > 1) && (skb->dev_scratch < 0x3e)) {
+               word3 |= skb->dev_scratch << IPQESS_TPD_PORT_BITMAP_SHIFT;
+       } else {
+               word3 |= 0x3e << IPQESS_TPD_PORT_BITMAP_SHIFT;
+       }
+       len = skb_headlen(skb);
+
+       first_desc = desc = ipqess_tx_desc_next(tx_ring);
+       if (lso_word1 & IPQESS_TPD_LSO_V2_EN) {
+               desc->addr = cpu_to_le16(skb->len);
+               desc->word1 = word1 | lso_word1;
+               desc->svlan_tag = svlan_tag;
+               desc->word3 = word3;
+               desc = ipqess_tx_desc_next(tx_ring);
+       }
+
+       buf = ipqess_get_tx_buffer(tx_ring, desc);
+       if (lso_word1)
+               buf->length = lso_len;
+       else
+               buf->length = len;
+       buf->dma = dma_map_single(&pdev->dev,
+                               skb->data, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&pdev->dev, buf->dma))
+               goto dma_error;
+
+       desc->addr = cpu_to_le32(buf->dma);
+       desc->len  = cpu_to_le16(len);
+
+       buf->flags |= IPQESS_DESC_SINGLE;
+       desc->word1 = word1 | lso_word1;
+       desc->svlan_tag = svlan_tag;
+       desc->word3 = word3;
+
+       while (i < skb_shinfo(skb)->nr_frags) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               len = skb_frag_size(frag);
+               desc = ipqess_tx_desc_next(tx_ring);
+               buf = ipqess_get_tx_buffer(tx_ring, desc);
+               buf->length = len;
+               buf->flags |= IPQESS_DESC_PAGE;
+               buf->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, DMA_TO_DEVICE);
+               if (dma_mapping_error(NULL, buf->dma))
+                       goto dma_error;
+
+               desc->addr = cpu_to_le32(buf->dma);
+               desc->len  = cpu_to_le16(len);
+               desc->svlan_tag = svlan_tag;
+               desc->word1 = word1 | lso_word1;
+               desc->word3 = word3;
+               i++;
+       }
+       desc->word1 |= 1 << IPQESS_TPD_EOP_SHIFT;
+       buf->skb = skb;
+       buf->flags |= IPQESS_DESC_LAST;
+
+       return 0;
+
+dma_error:
+       ipqess_rollback_tx(tx_ring->ess, first_desc, tx_ring->idx);
+       dev_err(&pdev->dev, "TX DMA map failed\n");
+
+vlan_tag_error:
+       return -ENOMEM;
+}
+
+static netdev_tx_t ipqess_xmit(struct sk_buff *skb,
+                            struct net_device *netdev)
+{
+       struct ipqess *ess = netdev_priv(netdev);
+       struct ipqess_tx_ring *tx_ring;
+       int tx_num;
+       int ret;
+
+       tx_ring = &ess->tx_ring[skb_get_queue_mapping(skb)];
+       tx_num = ipqess_cal_txd_req(skb);
+       if (ipqess_tx_desc_available(tx_ring) <= tx_num) {
+               printk("s %d %x\n", tx_ring->idx, ipqess_r32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx)));
+               netif_tx_stop_queue(tx_ring->nq);
+               ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
+               return NETDEV_TX_BUSY;
+       }
+
+       ret = ipqess_tx_map_and_fill(tx_ring, skb);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               ess->stats.tx_errors++;
+               goto err_out;
+       }
+
+       ess->stats.tx_packets++;
+       ess->stats.tx_bytes += skb->len;
+       netdev_tx_sent_queue(tx_ring->nq, skb->len);
+
+       if (!skb->xmit_more || netif_xmit_stopped(tx_ring->nq))
+               ipqess_m32(ess,
+                        IPQESS_TPD_PROD_IDX_BITS,
+                        tx_ring->head,
+                        IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
+
+err_out:
+       return NETDEV_TX_OK;
+}
+
+static int ipqess_set_mac_address(struct net_device *netdev, void *p)
+{
+       int ret = eth_mac_addr(netdev, p);
+       struct ipqess *ess = netdev_priv(netdev);
+       const char *macaddr = netdev->dev_addr;
+
+       if (ret)
+               return ret;
+
+//     spin_lock_bh(&mac->hw->page_lock);
+       ipqess_w32(ess, IPQESS_REG_MAC_CTRL1,
+                (macaddr[0] << 8) | macaddr[1]);
+       ipqess_w32(ess, IPQESS_REG_MAC_CTRL0,
+                (macaddr[2] << 24) | (macaddr[3] << 16) |
+                (macaddr[4] << 8) | macaddr[5]);
+//     spin_unlock_bh(&mac->hw->page_lock);
+
+       return 0;
+}
+
+static const struct net_device_ops ipqess_axi_netdev_ops = {
+       .ndo_init               = ipqess_init,
+       .ndo_uninit             = ipqess_uninit,
+       .ndo_open               = ipqess_open,
+       .ndo_stop               = ipqess_stop,
+       .ndo_do_ioctl           = ipqess_do_ioctl,
+       .ndo_start_xmit         = ipqess_xmit,
+       .ndo_get_stats          = ipqess_get_stats,
+       .ndo_set_mac_address    = ipqess_set_mac_address,
+};
+
+static void ipqess_reset(struct ipqess *ess)
+{
+       int i;
+
+       /* disable all IRQs */
+       for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 0x0);
+               ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 0x0);
+       }
+
+       ipqess_w32(ess, IPQESS_REG_MISC_IMR, 0);
+       ipqess_w32(ess, IPQESS_REG_WOL_IMR, 0);
+
+       /* clear the IRQ status registers */
+       ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
+       ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
+       ipqess_w32(ess, IPQESS_REG_MISC_ISR, 0x1fff);
+       ipqess_w32(ess, IPQESS_REG_WOL_ISR, 0x1);
+       ipqess_w32(ess, IPQESS_REG_WOL_CTRL, 0);
+
+       /* disable RX and TX queues */
+       ipqess_m32(ess, IPQESS_RXQ_CTRL_EN, 0, IPQESS_REG_RXQ_CTRL);
+       ipqess_m32(ess, IPQESS_TXQ_CTRL_TXQ_EN, 0, IPQESS_REG_TXQ_CTRL);
+}
+
+static int ipqess_hw_init(struct ipqess *ess)
+{
+       int i, err;
+
+       ipqess_reset(ess);
+
+       ipqess_m32(ess, BIT(IPQESS_INTR_SW_IDX_W_TYP_SHIFT),
+                IPQESS_INTR_SW_IDX_W_TYPE << IPQESS_INTR_SW_IDX_W_TYP_SHIFT,
+                IPQESS_REG_INTR_CTRL);
+
+       ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
+       ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
+
+       /* enable IRQ delay slot */
+       ipqess_w32(ess, IPQESS_REG_IRQ_MODRT_TIMER_INIT,
+                (IPQESS_TX_IMT << IPQESS_IRQ_MODRT_TX_TIMER_SHIFT) |
+                (IPQESS_RX_IMT << IPQESS_IRQ_MODRT_RX_TIMER_SHIFT));
+
+       /* Configure the TX Queue bursting */
+       ipqess_w32(ess, IPQESS_REG_TXQ_CTRL,
+                (IPQESS_TPD_BURST << IPQESS_TXQ_NUM_TPD_BURST_SHIFT) |
+                (IPQESS_TXF_BURST << IPQESS_TXQ_TXF_BURST_NUM_SHIFT) |
+                IPQESS_TXQ_CTRL_TPD_BURST_EN);
+
+       /* Set RSS type */
+       ipqess_w32(ess, IPQESS_REG_RSS_TYPE,
+                IPQESS_RSS_TYPE_IPV4TCP | IPQESS_RSS_TYPE_IPV6_TCP |
+                IPQESS_RSS_TYPE_IPV4_UDP | IPQESS_RSS_TYPE_IPV6UDP |
+                IPQESS_RSS_TYPE_IPV4 | IPQESS_RSS_TYPE_IPV6);
+
+       /* Set RFD ring burst and threshold */
+       ipqess_w32(ess, IPQESS_REG_RX_DESC1,
+               (IPQESS_RFD_BURST << IPQESS_RXQ_RFD_BURST_NUM_SHIFT) |
+               (IPQESS_RFD_THR << IPQESS_RXQ_RFD_PF_THRESH_SHIFT) |
+               (IPQESS_RFD_LTHR << IPQESS_RXQ_RFD_LOW_THRESH_SHIFT));
+
+       /* Set Rx FIFO
+        * - threshold to start to DMA data to host
+        * (Remove IPQESS_RXQ_CTRL_RMV_VLAN otherwise VLAN won't work)
+        */
+       ipqess_w32(ess, IPQESS_REG_RXQ_CTRL,
+                IPQESS_FIFO_THRESH_128_BYTE /* | IPQESS_RXQ_CTRL_RMV_VLAN */);
+
+       err = ipqess_rx_ring_alloc(ess);
+       if (err)
+               return err;
+
+       err = ipqess_tx_ring_alloc(ess);
+       if (err)
+               return err;
+
+       /* Load all of ring base addresses above into the dma engine */
+       ipqess_m32(ess, 0, BIT(IPQESS_LOAD_PTR_SHIFT),
+                IPQESS_REG_TX_SRAM_PART);
+
+       /* Disable TX FIFO low watermark and high watermark */
+       ipqess_w32(ess, IPQESS_REG_TXF_WATER_MARK, 0);
+
+       /* Configure RSS indirection table.
+        * 128 hash will be configured in the following
+        * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
+        * and so on
+        */
+       for (i = 0; i < IPQESS_NUM_IDT; i++)
+               ipqess_w32(ess, IPQESS_REG_RSS_IDT(i), IPQESS_RSS_IDT_VALUE);
+
+       /* Configure load balance mapping table.
+        * 4 table entry will be configured according to the
+        * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
+        * respectively.
+        */
+       ipqess_w32(ess, IPQESS_REG_LB_RING, IPQESS_LB_REG_VALUE);
+
+       /* Configure Virtual queue for Tx rings */
+       ipqess_w32(ess, IPQESS_REG_VQ_CTRL0, IPQESS_VQ_REG_VALUE);
+       ipqess_w32(ess, IPQESS_REG_VQ_CTRL1, IPQESS_VQ_REG_VALUE);
+
+       /* Configure Max AXI Burst write size to 128 bytes*/
+       ipqess_w32(ess, IPQESS_REG_AXIW_CTRL_MAXWRSIZE,
+                IPQESS_AXIW_MAXWRSIZE_VALUE);
+
+       /* Enable All 16 tx and 8 rx irq mask */
+       ipqess_m32(ess, 0, IPQESS_TXQ_CTRL_TXQ_EN, IPQESS_REG_TXQ_CTRL);
+       ipqess_m32(ess, 0, IPQESS_RXQ_CTRL_EN, IPQESS_REG_RXQ_CTRL);
+
+       return 0;
+}
+
+static void ipqess_cleanup(struct ipqess *ess)
+{
+       ipqess_reset(ess);
+       unregister_netdev(ess->netdev);
+
+       ipqess_tx_ring_free(ess);
+       ipqess_rx_ring_free(ess);
+
+       free_netdev(ess->netdev);
+}
+
+static int ipqess_axi_probe(struct platform_device *pdev)
+{
+       struct ipqess *ess;
+       struct net_device *netdev;
+       struct resource *res;
+       int i, err = 0;
+
+       netdev = alloc_etherdev_mqs(sizeof(struct ipqess),
+                                   IPQESS_NETDEV_QUEUES,
+                                   IPQESS_NETDEV_QUEUES);
+       if (!netdev)
+               return -ENODEV;
+
+       ess = netdev_priv(netdev);
+       memset(ess, 0, sizeof(struct ipqess));
+       ess->netdev = netdev;
+       ess->pdev = pdev;
+       spin_lock_init(&ess->stats_lock);
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+       platform_set_drvdata(pdev, netdev);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ess->hw_addr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(ess->hw_addr)) {
+               err = PTR_ERR(ess->hw_addr);
+               goto err_out;
+       }
+
+       for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++)
+               ess->tx_irq[i] = platform_get_irq(pdev, i);
+       for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++)
+               ess->rx_irq[i] = platform_get_irq(pdev, i + IPQESS_MAX_TX_QUEUE);
+
+       netdev->netdev_ops = &ipqess_axi_netdev_ops;
+       netdev->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+                          NETIF_F_HW_VLAN_CTAG_RX |
+                          NETIF_F_HW_VLAN_CTAG_TX |
+                          NETIF_F_TSO | NETIF_F_TSO6 |
+                          NETIF_F_GRO | NETIF_F_SG;
+       netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+                             NETIF_F_HW_VLAN_CTAG_RX |
+                             NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG |
+                             NETIF_F_TSO | NETIF_F_TSO6 |
+                             NETIF_F_GRO;
+       netdev->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |
+                               NETIF_F_TSO | NETIF_F_TSO6 |
+                               NETIF_F_GRO;
+       netdev->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
+                                 NETIF_F_TSO | NETIF_F_TSO6 |
+                                 NETIF_F_GRO;
+       netdev->watchdog_timeo = 5 * HZ;
+       netdev->base_addr = (u32) ess->hw_addr;
+       netdev->max_mtu = 9000;
+       netdev->gso_max_segs = IPQESS_TX_RING_SIZE / 2;
+
+       ipqess_set_ethtool_ops(netdev);
+
+       netif_carrier_off(netdev);
+       err = register_netdev(netdev);
+       if (err)
+               goto err_out;
+
+       err = ipqess_hw_init(ess);
+       if (err)
+               goto err_out;
+
+       for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+               ess->queue[i].ess = ess;
+               ess->queue[i].idx = i;
+
+               netif_napi_add(netdev,
+                              &ess->tx_ring[i].napi_tx,
+                              ipqess_tx_napi, 64);
+               netif_napi_add(netdev,
+                              &ess->rx_ring[i].napi_rx,
+                              ipqess_rx_napi, 64);
+
+               err = devm_request_irq(&ess->netdev->dev,
+                       ess->tx_irq[i << IPQESS_TX_CPU_START_SHIFT],
+                       ipqess_interrupt_tx, 0, "ipqess TX", &ess->tx_ring[i]);
+               if (err)
+                       goto err_out;
+
+               err = devm_request_irq(&ess->netdev->dev,
+                       ess->rx_irq[i << IPQESS_RX_CPU_START_SHIFT],
+                       ipqess_interrupt_rx, 0, "ipqess RX", &ess->rx_ring[i]);
+               if (err)
+                       goto err_out;
+
+               /*
+                * irq_set_affinity_hint(ess->tx_irq[i << IPQESS_TX_CPU_START_SHIFT],
+                *                      get_cpu_mask(i));
+                * irq_set_affinity_hint(ess->rx_irq[i << IPQESS_RX_CPU_START_SHIFT],
+                *                      get_cpu_mask(i));
+                */
+       }
+
+       return 0;
+
+err_out:
+       ipqess_cleanup(ess);
+       return err;
+}
+
+static int ipqess_axi_remove(struct platform_device *pdev)
+{
+       const struct net_device *netdev = platform_get_drvdata(pdev);
+       struct ipqess *ess = netdev_priv(netdev);
+
+       ipqess_cleanup(ess);
+
+       return 0;
+}
+
+static const struct of_device_id ipqess_of_mtable[] = {
+       {.compatible = "qcom,ipq4019-ess-edma" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, ipqess_of_mtable);
+
+static struct platform_driver ipqess_axi_driver = {
+       .driver = {
+               .name    = "ipqess-edma",
+               .of_match_table = ipqess_of_mtable,
+       },
+       .probe    = ipqess_axi_probe,
+       .remove   = ipqess_axi_remove,
+};
+
+module_platform_driver(ipqess_axi_driver);
+
+MODULE_AUTHOR("Qualcomm Atheros Inc");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
+MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/ipqess.h b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/ipqess.h
new file mode 100644 (file)
index 0000000..13b55be
--- /dev/null
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: (GPL-2.0 OR ISC)
+/* Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
+ * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _IPQESS_H_
+#define _IPQESS_H_
+
+#define IPQESS_CPU_CORES_SUPPORTED 4
+#define IPQESS_MAX_PORTID_SUPPORTED 5
+#define IPQESS_MAX_VLAN_SUPPORTED  IPQESS_MAX_PORTID_SUPPORTED
+#define IPQESS_MAX_PORTID_BITMAP_INDEX (IPQESS_MAX_PORTID_SUPPORTED + 1)
+#define IPQESS_MAX_PORTID_BITMAP_SUPPORTED 0x1f        /* 0001_1111 = 0x1f */
+#define IPQESS_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */
+
+#define IPQESS_NETDEV_QUEUES   4
+
+#define IPQESS_TPD_EOP_SHIFT 31
+
+#define IPQESS_PORT_ID_SHIFT 12
+#define IPQESS_PORT_ID_MASK 0x7
+
+/* tpd word 3 bit 18-28 */
+#define IPQESS_TPD_PORT_BITMAP_SHIFT 18
+
+#define IPQESS_TPD_FROM_CPU_SHIFT 25
+
+#define IPQESS_RX_RING_SIZE 128
+#define IPQESS_RX_HEAD_BUFF_SIZE 1540
+#define IPQESS_TX_RING_SIZE 128
+#define IPQESS_MAX_RX_QUEUE 8
+#define IPQESS_MAX_TX_QUEUE 16
+
+
+/* Configurations */
+#define IPQESS_INTR_CLEAR_TYPE 0
+#define IPQESS_INTR_SW_IDX_W_TYPE 0
+#define IPQESS_FIFO_THRESH_TYPE 0
+#define IPQESS_RSS_TYPE 0
+#define IPQESS_RX_IMT 0x0020
+#define IPQESS_TX_IMT 0x0050
+#define IPQESS_TPD_BURST 5
+#define IPQESS_TXF_BURST 0x100
+#define IPQESS_RFD_BURST 8
+#define IPQESS_RFD_THR 16
+#define IPQESS_RFD_LTHR 0
+
+/* RX/TX per CPU based mask/shift */
+#define IPQESS_TX_PER_CPU_MASK 0xF
+#define IPQESS_RX_PER_CPU_MASK 0x3
+#define IPQESS_TX_PER_CPU_MASK_SHIFT 0x2
+#define IPQESS_RX_PER_CPU_MASK_SHIFT 0x1
+#define IPQESS_TX_CPU_START_SHIFT 0x2
+#define IPQESS_RX_CPU_START_SHIFT 0x1
+
+/* Flags used in transmit direction */
+#define IPQESS_HW_CHECKSUM 0x00000001
+#define IPQESS_VLAN_TX_TAG_INSERT_FLAG 0x00000002
+#define IPQESS_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004
+
+#define IPQESS_DESC_LAST 0x1
+#define IPQESS_DESC_SINGLE 0x2
+#define IPQESS_DESC_PAGE 0x4
+#define IPQESS_DESC_PAGELIST 0x8
+#define IPQESS_DESC_SKB_NONE 0x10
+#define IPQESS_DESC_SKB_REUSE 0x20
+
+
+#define IPQESS_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1)
+
+/* Ethtool specific list of IPQESS supported features */
+#define IPQESS_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \
+                                       | SUPPORTED_10baseT_Full \
+                                       | SUPPORTED_100baseT_Half \
+                                       | SUPPORTED_100baseT_Full \
+                                       | SUPPORTED_1000baseT_Full)
+
+/* Receive side Atheros Header */
+#define IPQESS_RX_ATH_HDR_VERSION 0x2
+#define IPQESS_RX_ATH_HDR_VERSION_SHIFT 14
+#define IPQESS_RX_ATH_HDR_PRIORITY_SHIFT 11
+#define IPQESS_RX_ATH_PORT_TYPE_SHIFT 6
+#define IPQESS_RX_ATH_HDR_RSTP_PORT_TYPE 0x4
+
+/* Transmit side Atheros Header */
+#define IPQESS_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F
+#define IPQESS_TX_ATH_HDR_FROM_CPU_MASK 0x80
+#define IPQESS_TX_ATH_HDR_FROM_CPU_SHIFT 7
+
+#define IPQESS_TXQ_START_CORE0 8
+#define IPQESS_TXQ_START_CORE1 12
+#define IPQESS_TXQ_START_CORE2 0
+#define IPQESS_TXQ_START_CORE3 4
+
+#define IPQESS_TXQ_IRQ_MASK_CORE0 0x0F00
+#define IPQESS_TXQ_IRQ_MASK_CORE1 0xF000
+#define IPQESS_TXQ_IRQ_MASK_CORE2 0x000F
+#define IPQESS_TXQ_IRQ_MASK_CORE3 0x00F0
+
+#define IPQESS_ETH_HDR_LEN 12
+#define IPQESS_ETH_TYPE_MASK 0xFFFF
+
+#define IPQESS_RX_BUFFER_WRITE 16
+#define IPQESS_RFD_AVAIL_THR 80
+
+#define IPQESS_GMAC_NO_MDIO_PHY        PHY_MAX_ADDR
+
+extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst,
+                                 __be16 sport, __be16 dport,
+                                 uint8_t proto, u16 loadbalance, bool action);
+struct ipqesstool_statistics {
+       u32 tx_q0_pkt;
+       u32 tx_q1_pkt;
+       u32 tx_q2_pkt;
+       u32 tx_q3_pkt;
+       u32 tx_q4_pkt;
+       u32 tx_q5_pkt;
+       u32 tx_q6_pkt;
+       u32 tx_q7_pkt;
+       u32 tx_q8_pkt;
+       u32 tx_q9_pkt;
+       u32 tx_q10_pkt;
+       u32 tx_q11_pkt;
+       u32 tx_q12_pkt;
+       u32 tx_q13_pkt;
+       u32 tx_q14_pkt;
+       u32 tx_q15_pkt;
+       u32 tx_q0_byte;
+       u32 tx_q1_byte;
+       u32 tx_q2_byte;
+       u32 tx_q3_byte;
+       u32 tx_q4_byte;
+       u32 tx_q5_byte;
+       u32 tx_q6_byte;
+       u32 tx_q7_byte;
+       u32 tx_q8_byte;
+       u32 tx_q9_byte;
+       u32 tx_q10_byte;
+       u32 tx_q11_byte;
+       u32 tx_q12_byte;
+       u32 tx_q13_byte;
+       u32 tx_q14_byte;
+       u32 tx_q15_byte;
+       u32 rx_q0_pkt;
+       u32 rx_q1_pkt;
+       u32 rx_q2_pkt;
+       u32 rx_q3_pkt;
+       u32 rx_q4_pkt;
+       u32 rx_q5_pkt;
+       u32 rx_q6_pkt;
+       u32 rx_q7_pkt;
+       u32 rx_q0_byte;
+       u32 rx_q1_byte;
+       u32 rx_q2_byte;
+       u32 rx_q3_byte;
+       u32 rx_q4_byte;
+       u32 rx_q5_byte;
+       u32 rx_q6_byte;
+       u32 rx_q7_byte;
+       u32 tx_desc_error;
+};
+
+struct ipqess_tx_desc {
+       __le16  len;
+       __le16  svlan_tag;
+       __le32  word1;
+       __le32  addr;
+       __le32  word3;
+} __aligned(16) __packed;
+
+struct ipqess_rx_desc {
+       u16 rrd0;
+       u16 rrd1;
+       u16 rrd2;
+       u16 rrd3;
+       u16 rrd4;
+       u16 rrd5;
+       u16 rrd6;
+       u16 rrd7;
+} __aligned(16) __packed;
+
+struct ipqess_buf {
+       struct sk_buff *skb;
+       dma_addr_t dma;
+       u32 flags;
+       u16 length;
+};
+
+struct queue {
+       u32 idx;
+       u32 tx_mask;
+       u32 tx_status;
+       u32 tx_start;
+       struct ipqess *ess;
+};
+
+struct ipqess_tx_ring {
+       struct napi_struct napi_tx;
+       u32 idx;
+       struct ipqess *ess;
+       struct netdev_queue *nq;
+       void *hw_desc;
+       struct ipqess_buf *buf;
+       dma_addr_t dma;
+       u16 count;
+       u16 head;
+       u16 tail;
+};
+
+struct ipqess_rx_ring {
+       struct napi_struct napi_rx;
+       u32 idx;
+       struct ipqess *ess;
+       struct device *ppdev;
+       void **hw_desc;
+       struct ipqess_buf *buf;
+       dma_addr_t dma;
+       u16 head;
+       u16 tail;
+       atomic_t refill_count;
+};
+
+struct ipqess_rx_ring_refill {
+       struct ipqess_rx_ring *rx_ring;
+       struct work_struct refill_work;
+};
+
+struct ipqess {
+       struct net_device *netdev;
+       void __iomem *hw_addr;
+
+       struct ipqess_rx_ring rx_ring[IPQESS_NETDEV_QUEUES];
+
+       struct platform_device *pdev;
+       struct queue queue[CONFIG_NR_CPUS];
+       struct ipqess_tx_ring tx_ring[IPQESS_NETDEV_QUEUES];
+
+       struct ipqesstool_statistics ipqessstats;
+       spinlock_t stats_lock;
+       struct net_device_stats stats;
+
+       struct ipqess_rx_ring_refill rx_refill[IPQESS_NETDEV_QUEUES];
+       u32 tx_irq[16];
+       u32 rx_irq[8];
+};
+
+static inline void build_test(void)
+{
+       struct ipqess *ess;
+       BUILD_BUG_ON(ARRAY_SIZE(ess->rx_ring) != ARRAY_SIZE(ess->rx_refill));
+}
+
+void ipqess_set_ethtool_ops(struct net_device *netdev);
+
+/* register definition */
+#define IPQESS_REG_MAS_CTRL 0x0
+#define IPQESS_REG_TIMEOUT_CTRL 0x004
+#define IPQESS_REG_DBG0 0x008
+#define IPQESS_REG_DBG1 0x00C
+#define IPQESS_REG_SW_CTRL0 0x100
+#define IPQESS_REG_SW_CTRL1 0x104
+
+/* Interrupt Status Register */
+#define IPQESS_REG_RX_ISR 0x200
+#define IPQESS_REG_TX_ISR 0x208
+#define IPQESS_REG_MISC_ISR 0x210
+#define IPQESS_REG_WOL_ISR 0x218
+
+#define IPQESS_MISC_ISR_RX_URG_Q(x) (1 << x)
+
+#define IPQESS_MISC_ISR_AXIR_TIMEOUT 0x00000100
+#define IPQESS_MISC_ISR_AXIR_ERR 0x00000200
+#define IPQESS_MISC_ISR_TXF_DEAD 0x00000400
+#define IPQESS_MISC_ISR_AXIW_ERR 0x00000800
+#define IPQESS_MISC_ISR_AXIW_TIMEOUT 0x00001000
+
+#define IPQESS_WOL_ISR 0x00000001
+
+/* Interrupt Mask Register */
+#define IPQESS_REG_MISC_IMR 0x214
+#define IPQESS_REG_WOL_IMR 0x218
+
+#define IPQESS_RX_IMR_NORMAL_MASK 0x1
+#define IPQESS_TX_IMR_NORMAL_MASK 0x1
+#define IPQESS_MISC_IMR_NORMAL_MASK 0x80001FFF
+#define IPQESS_WOL_IMR_NORMAL_MASK 0x1
+
+/* Edma receive consumer index */
+#define IPQESS_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 3)) /* x is the queue id */
+/* Edma transmit consumer index */
+#define IPQESS_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
+
+/* IRQ Moderator Initial Timer Register */
+#define IPQESS_REG_IRQ_MODRT_TIMER_INIT 0x280
+#define IPQESS_IRQ_MODRT_TIMER_MASK 0xFFFF
+#define IPQESS_IRQ_MODRT_RX_TIMER_SHIFT 0
+#define IPQESS_IRQ_MODRT_TX_TIMER_SHIFT 16
+
+/* Interrupt Control Register */
+#define IPQESS_REG_INTR_CTRL 0x284
+#define IPQESS_INTR_CLR_TYP_SHIFT 0
+#define IPQESS_INTR_SW_IDX_W_TYP_SHIFT 1
+#define IPQESS_INTR_CLEAR_TYPE_W1 0
+#define IPQESS_INTR_CLEAR_TYPE_R 1
+
+/* RX Interrupt Mask Register */
+#define IPQESS_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 3)) /* x = queue id */
+
+/* TX Interrupt mask register */
+#define IPQESS_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
+
+/* Load Ptr Register
+ * Software sets this bit after the initialization of the head and tail
+ */
+#define IPQESS_REG_TX_SRAM_PART 0x400
+#define IPQESS_LOAD_PTR_SHIFT 16
+
+/* TXQ Control Register */
+#define IPQESS_REG_TXQ_CTRL 0x404
+#define IPQESS_TXQ_CTRL_IP_OPTION_EN 0x10
+#define IPQESS_TXQ_CTRL_TXQ_EN 0x20
+#define IPQESS_TXQ_CTRL_ENH_MODE 0x40
+#define IPQESS_TXQ_CTRL_LS_8023_EN 0x80
+#define IPQESS_TXQ_CTRL_TPD_BURST_EN 0x100
+#define IPQESS_TXQ_CTRL_LSO_BREAK_EN 0x200
+#define IPQESS_TXQ_NUM_TPD_BURST_MASK 0xF
+#define IPQESS_TXQ_TXF_BURST_NUM_MASK 0xFFFF
+#define IPQESS_TXQ_NUM_TPD_BURST_SHIFT 0
+#define IPQESS_TXQ_TXF_BURST_NUM_SHIFT 16
+
+#define        IPQESS_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
+#define IPQESS_TXF_WATER_MARK_MASK 0x0FFF
+#define IPQESS_TXF_LOW_WATER_MARK_SHIFT 0
+#define IPQESS_TXF_HIGH_WATER_MARK_SHIFT 16
+#define IPQESS_TXQ_CTRL_BURST_MODE_EN 0x80000000
+
+/* WRR Control Register */
+#define IPQESS_REG_WRR_CTRL_Q0_Q3 0x40c
+#define IPQESS_REG_WRR_CTRL_Q4_Q7 0x410
+#define IPQESS_REG_WRR_CTRL_Q8_Q11 0x414
+#define IPQESS_REG_WRR_CTRL_Q12_Q15 0x418
+
+/* Weight round robin(WRR), it takes queue as input, and computes
+ * starting bits where we need to write the weight for a particular
+ * queue
+ */
+#define IPQESS_WRR_SHIFT(x) (((x) * 5) % 20)
+
+/* Tx Descriptor Control Register */
+#define IPQESS_REG_TPD_RING_SIZE 0x41C
+#define IPQESS_TPD_RING_SIZE_SHIFT 0
+#define IPQESS_TPD_RING_SIZE_MASK 0xFFFF
+
+/* Transmit descriptor base address */
+/* FYI: ess_dma.h definition uses << 2 for RX queues, however the
+ * driver code does increments in steps of 2. So this is where the
+ * discrepancy is coming from.
+ */
+#define IPQESS_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
+
+/* TPD Index Register */
+#define IPQESS_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
+
+#define IPQESS_TPD_PROD_IDX_BITS 0x0000FFFF
+#define IPQESS_TPD_CONS_IDX_BITS 0xFFFF0000
+#define IPQESS_TPD_PROD_IDX_MASK 0xFFFF
+#define IPQESS_TPD_CONS_IDX_MASK 0xFFFF
+#define IPQESS_TPD_PROD_IDX_SHIFT 0
+#define IPQESS_TPD_CONS_IDX_SHIFT 16
+
+/* TX Virtual Queue Mapping Control Register */
+#define IPQESS_REG_VQ_CTRL0 0x4A0
+#define IPQESS_REG_VQ_CTRL1 0x4A4
+
+/* Virtual QID shift, it takes queue as input, and computes
+ * Virtual QID position in virtual qid control register
+ */
+#define IPQESS_VQ_ID_SHIFT(i) (((i) * 3) % 24)
+
+/* Virtual Queue Default Value */
+#define IPQESS_VQ_REG_VALUE 0x240240
+
+/* Tx side Port Interface Control Register */
+#define IPQESS_REG_PORT_CTRL 0x4A8
+#define IPQESS_PAD_EN_SHIFT 15
+
+/* Tx side VLAN Configuration Register */
+#define IPQESS_REG_VLAN_CFG 0x4AC
+
+#define IPQESS_TX_CVLAN 16
+#define IPQESS_TX_INS_CVLAN 17
+#define IPQESS_TX_CVLAN_TAG_SHIFT 0
+
+#define IPQESS_TX_SVLAN 14
+#define IPQESS_TX_INS_SVLAN 15
+#define IPQESS_TX_SVLAN_TAG_SHIFT 16
+
+/* Tx Queue Packet Statistic Register */
+#define IPQESS_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
+
+#define IPQESS_TX_STAT_PKT_MASK 0xFFFFFF
+
+/* Tx Queue Byte Statistic Register */
+#define IPQESS_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
+
+/* Load Balance Based Ring Offset Register */
+#define IPQESS_REG_LB_RING 0x800
+#define IPQESS_LB_RING_ENTRY_MASK 0xff
+#define IPQESS_LB_RING_ID_MASK 0x7
+#define IPQESS_LB_RING_PROFILE_ID_MASK 0x3
+#define IPQESS_LB_RING_ENTRY_BIT_OFFSET 8
+#define IPQESS_LB_RING_ID_OFFSET 0
+#define IPQESS_LB_RING_PROFILE_ID_OFFSET 3
+#define IPQESS_LB_REG_VALUE 0x6040200
+
+/* Load Balance Priority Mapping Register */
+#define IPQESS_REG_LB_PRI_START 0x804
+#define IPQESS_REG_LB_PRI_END 0x810
+#define IPQESS_LB_PRI_REG_INC 4
+#define IPQESS_LB_PRI_ENTRY_BIT_OFFSET 4
+#define IPQESS_LB_PRI_ENTRY_MASK 0xf
+
+/* RSS Priority Mapping Register */
+#define IPQESS_REG_RSS_PRI 0x820
+#define IPQESS_RSS_PRI_ENTRY_MASK 0xf
+#define IPQESS_RSS_RING_ID_MASK 0x7
+#define IPQESS_RSS_PRI_ENTRY_BIT_OFFSET 4
+
+/* RSS Indirection Register */
+#define IPQESS_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
+#define IPQESS_NUM_IDT 16
+#define IPQESS_RSS_IDT_VALUE 0x64206420
+
+/* Default RSS Ring Register */
+#define IPQESS_REG_DEF_RSS 0x890
+#define IPQESS_DEF_RSS_MASK 0x7
+
+/* RSS Hash Function Type Register */
+#define IPQESS_REG_RSS_TYPE 0x894
+#define IPQESS_RSS_TYPE_NONE 0x01
+#define IPQESS_RSS_TYPE_IPV4TCP 0x02
+#define IPQESS_RSS_TYPE_IPV6_TCP 0x04
+#define IPQESS_RSS_TYPE_IPV4_UDP 0x08
+#define IPQESS_RSS_TYPE_IPV6UDP 0x10
+#define IPQESS_RSS_TYPE_IPV4 0x20
+#define IPQESS_RSS_TYPE_IPV6 0x40
+#define IPQESS_RSS_HASH_MODE_MASK 0x7f
+
+#define IPQESS_REG_RSS_HASH_VALUE 0x8C0
+
+#define IPQESS_REG_RSS_TYPE_RESULT 0x8C4
+
+#define IPQESS_HASH_TYPE_START 0
+#define IPQESS_HASH_TYPE_END 5
+#define IPQESS_HASH_TYPE_SHIFT 12
+
+#define IPQESS_RFS_FLOW_ENTRIES 1024
+#define IPQESS_RFS_FLOW_ENTRIES_MASK (IPQESS_RFS_FLOW_ENTRIES - 1)
+#define IPQESS_RFS_EXPIRE_COUNT_PER_CALL 128
+
+/* RFD Base Address Register */
+#define IPQESS_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 3)) /* x = queue id */
+
+/* RFD Index Register */
+#define IPQESS_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 3))
+
+#define IPQESS_RFD_PROD_IDX_BITS 0x00000FFF
+#define IPQESS_RFD_CONS_IDX_BITS 0x0FFF0000
+#define IPQESS_RFD_PROD_IDX_MASK 0xFFF
+#define IPQESS_RFD_CONS_IDX_MASK 0xFFF
+#define IPQESS_RFD_PROD_IDX_SHIFT 0
+#define IPQESS_RFD_CONS_IDX_SHIFT 16
+
+/* Rx Descriptor Control Register */
+#define IPQESS_REG_RX_DESC0 0xA10
+#define IPQESS_RFD_RING_SIZE_MASK 0xFFF
+#define IPQESS_RX_BUF_SIZE_MASK 0xFFFF
+#define IPQESS_RFD_RING_SIZE_SHIFT 0
+#define IPQESS_RX_BUF_SIZE_SHIFT 16
+
+#define IPQESS_REG_RX_DESC1 0xA14
+#define IPQESS_RXQ_RFD_BURST_NUM_MASK 0x3F
+#define IPQESS_RXQ_RFD_PF_THRESH_MASK 0x1F
+#define IPQESS_RXQ_RFD_LOW_THRESH_MASK 0xFFF
+#define IPQESS_RXQ_RFD_BURST_NUM_SHIFT 0
+#define IPQESS_RXQ_RFD_PF_THRESH_SHIFT 8
+#define IPQESS_RXQ_RFD_LOW_THRESH_SHIFT 16
+
+/* RXQ Control Register */
+#define IPQESS_REG_RXQ_CTRL 0xA18
+#define IPQESS_FIFO_THRESH_TYPE_SHIF 0
+#define IPQESS_FIFO_THRESH_128_BYTE 0x0
+#define IPQESS_FIFO_THRESH_64_BYTE 0x1
+#define IPQESS_RXQ_CTRL_RMV_VLAN 0x00000002
+#define IPQESS_RXQ_CTRL_EN 0x0000FF00
+
+/* AXI Burst Size Config */
+#define IPQESS_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
+#define IPQESS_AXIW_MAXWRSIZE_VALUE 0x0
+
+/* Rx Statistics Register */
+#define IPQESS_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
+#define IPQESS_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
+
+/* WoL Pattern Length Register */
+#define IPQESS_REG_WOL_PATTERN_LEN0 0xC00
+#define IPQESS_WOL_PT_LEN_MASK 0xFF
+#define IPQESS_WOL_PT0_LEN_SHIFT 0
+#define IPQESS_WOL_PT1_LEN_SHIFT 8
+#define IPQESS_WOL_PT2_LEN_SHIFT 16
+#define IPQESS_WOL_PT3_LEN_SHIFT 24
+
+#define IPQESS_REG_WOL_PATTERN_LEN1 0xC04
+#define IPQESS_WOL_PT4_LEN_SHIFT 0
+#define IPQESS_WOL_PT5_LEN_SHIFT 8
+#define IPQESS_WOL_PT6_LEN_SHIFT 16
+
+/* WoL Control Register */
+#define IPQESS_REG_WOL_CTRL 0xC08
+#define IPQESS_WOL_WK_EN 0x00000001
+#define IPQESS_WOL_MG_EN 0x00000002
+#define IPQESS_WOL_PT0_EN 0x00000004
+#define IPQESS_WOL_PT1_EN 0x00000008
+#define IPQESS_WOL_PT2_EN 0x00000010
+#define IPQESS_WOL_PT3_EN 0x00000020
+#define IPQESS_WOL_PT4_EN 0x00000040
+#define IPQESS_WOL_PT5_EN 0x00000080
+#define IPQESS_WOL_PT6_EN 0x00000100
+
+/* MAC Control Register */
+#define IPQESS_REG_MAC_CTRL0 0xC20
+#define IPQESS_REG_MAC_CTRL1 0xC24
+
+/* WoL Pattern Register */
+#define IPQESS_REG_WOL_PATTERN_START 0x5000
+#define IPQESS_PATTERN_PART_REG_OFFSET 0x40
+
+
+/* TX descriptor fields */
+#define IPQESS_TPD_HDR_SHIFT 0
+#define IPQESS_TPD_PPPOE_EN 0x00000100
+#define IPQESS_TPD_IP_CSUM_EN 0x00000200
+#define IPQESS_TPD_TCP_CSUM_EN 0x0000400
+#define IPQESS_TPD_UDP_CSUM_EN 0x00000800
+#define IPQESS_TPD_CUSTOM_CSUM_EN 0x00000C00
+#define IPQESS_TPD_LSO_EN 0x00001000
+#define IPQESS_TPD_LSO_V2_EN 0x00002000
+#define IPQESS_TPD_IPV4_EN 0x00010000
+#define IPQESS_TPD_MSS_MASK 0x1FFF
+#define IPQESS_TPD_MSS_SHIFT 18
+#define IPQESS_TPD_CUSTOM_CSUM_SHIFT 18
+
+/* RRD descriptor fields */
+#define IPQESS_RRD_NUM_RFD_MASK 0x000F
+#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
+#define IPQESS_RRD_SRC_PORT_NUM_MASK 0x4000
+#define IPQESS_RRD_SVLAN 0x8000
+#define IPQESS_RRD_FLOW_COOKIE_MASK 0x07FF;
+
+#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
+#define IPQESS_RRD_CSUM_FAIL_MASK 0xC000
+#define IPQESS_RRD_CVLAN 0x0001
+#define IPQESS_RRD_DESC_VALID 0x8000
+
+#define IPQESS_RRD_PRIORITY_SHIFT 4
+#define IPQESS_RRD_PRIORITY_MASK 0x7
+#define IPQESS_RRD_PORT_TYPE_SHIFT 7
+#define IPQESS_RRD_PORT_TYPE_MASK 0x1F
+
+#endif
diff --git a/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/ipqess_ethtool.c b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/ipqess_ethtool.c
new file mode 100644 (file)
index 0000000..a36f88c
--- /dev/null
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: (GPL-2.0 OR ISC)
+/* Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/phy.h>
+#include "ipqess.h"
+
+struct ipqesstool_stats {
+       uint8_t string[ETH_GSTRING_LEN];
+       uint32_t offset;
+};
+
+#define IPQESS_STAT(m)    offsetof(struct ipqesstool_statistics, m)
+#define DRVINFO_LEN    32
+
+static const struct ipqesstool_stats ipqess_stats[] = {
+       {"tx_q0_pkt", IPQESS_STAT(tx_q0_pkt)},
+       {"tx_q1_pkt", IPQESS_STAT(tx_q1_pkt)},
+       {"tx_q2_pkt", IPQESS_STAT(tx_q2_pkt)},
+       {"tx_q3_pkt", IPQESS_STAT(tx_q3_pkt)},
+       {"tx_q4_pkt", IPQESS_STAT(tx_q4_pkt)},
+       {"tx_q5_pkt", IPQESS_STAT(tx_q5_pkt)},
+       {"tx_q6_pkt", IPQESS_STAT(tx_q6_pkt)},
+       {"tx_q7_pkt", IPQESS_STAT(tx_q7_pkt)},
+       {"tx_q8_pkt", IPQESS_STAT(tx_q8_pkt)},
+       {"tx_q9_pkt", IPQESS_STAT(tx_q9_pkt)},
+       {"tx_q10_pkt", IPQESS_STAT(tx_q10_pkt)},
+       {"tx_q11_pkt", IPQESS_STAT(tx_q11_pkt)},
+       {"tx_q12_pkt", IPQESS_STAT(tx_q12_pkt)},
+       {"tx_q13_pkt", IPQESS_STAT(tx_q13_pkt)},
+       {"tx_q14_pkt", IPQESS_STAT(tx_q14_pkt)},
+       {"tx_q15_pkt", IPQESS_STAT(tx_q15_pkt)},
+       {"tx_q0_byte", IPQESS_STAT(tx_q0_byte)},
+       {"tx_q1_byte", IPQESS_STAT(tx_q1_byte)},
+       {"tx_q2_byte", IPQESS_STAT(tx_q2_byte)},
+       {"tx_q3_byte", IPQESS_STAT(tx_q3_byte)},
+       {"tx_q4_byte", IPQESS_STAT(tx_q4_byte)},
+       {"tx_q5_byte", IPQESS_STAT(tx_q5_byte)},
+       {"tx_q6_byte", IPQESS_STAT(tx_q6_byte)},
+       {"tx_q7_byte", IPQESS_STAT(tx_q7_byte)},
+       {"tx_q8_byte", IPQESS_STAT(tx_q8_byte)},
+       {"tx_q9_byte", IPQESS_STAT(tx_q9_byte)},
+       {"tx_q10_byte", IPQESS_STAT(tx_q10_byte)},
+       {"tx_q11_byte", IPQESS_STAT(tx_q11_byte)},
+       {"tx_q12_byte", IPQESS_STAT(tx_q12_byte)},
+       {"tx_q13_byte", IPQESS_STAT(tx_q13_byte)},
+       {"tx_q14_byte", IPQESS_STAT(tx_q14_byte)},
+       {"tx_q15_byte", IPQESS_STAT(tx_q15_byte)},
+       {"rx_q0_pkt", IPQESS_STAT(rx_q0_pkt)},
+       {"rx_q1_pkt", IPQESS_STAT(rx_q1_pkt)},
+       {"rx_q2_pkt", IPQESS_STAT(rx_q2_pkt)},
+       {"rx_q3_pkt", IPQESS_STAT(rx_q3_pkt)},
+       {"rx_q4_pkt", IPQESS_STAT(rx_q4_pkt)},
+       {"rx_q5_pkt", IPQESS_STAT(rx_q5_pkt)},
+       {"rx_q6_pkt", IPQESS_STAT(rx_q6_pkt)},
+       {"rx_q7_pkt", IPQESS_STAT(rx_q7_pkt)},
+       {"rx_q0_byte", IPQESS_STAT(rx_q0_byte)},
+       {"rx_q1_byte", IPQESS_STAT(rx_q1_byte)},
+       {"rx_q2_byte", IPQESS_STAT(rx_q2_byte)},
+       {"rx_q3_byte", IPQESS_STAT(rx_q3_byte)},
+       {"rx_q4_byte", IPQESS_STAT(rx_q4_byte)},
+       {"rx_q5_byte", IPQESS_STAT(rx_q5_byte)},
+       {"rx_q6_byte", IPQESS_STAT(rx_q6_byte)},
+       {"rx_q7_byte", IPQESS_STAT(rx_q7_byte)},
+       {"tx_desc_error", IPQESS_STAT(tx_desc_error)},
+};
+
+static int ipqess_get_strset_count(struct net_device *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(ipqess_stats);
+       default:
+               netdev_dbg(netdev, "%s: Invalid string set", __func__);
+               return -EOPNOTSUPP;
+       }
+}
+
+static void ipqess_get_strings(struct net_device *netdev, uint32_t stringset,
+                              uint8_t *data)
+{
+       uint8_t *p = data;
+       uint32_t i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++) {
+                       memcpy(p, ipqess_stats[i].string,
+                              min((size_t)ETH_GSTRING_LEN,
+                              strlen(ipqess_stats[i].string) + 1));
+                       p += ETH_GSTRING_LEN;
+               }
+               break;
+       }
+}
+
+static void ipqess_get_drvinfo(struct net_device *dev,
+                              struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, "qca_ipqess", DRVINFO_LEN);
+       strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
+}
+
+static int ipqess_get_settings(struct net_device *netdev,
+                              struct ethtool_cmd *ecmd)
+{
+       struct phy_device *phydev = NULL;
+       uint16_t phyreg;
+
+       phydev = netdev->phydev;
+
+       ecmd->advertising = phydev->advertising;
+       ecmd->autoneg = phydev->autoneg;
+       ecmd->speed = phydev->speed;
+       ecmd->duplex = phydev->duplex;
+       ecmd->phy_address = phydev->mdio.addr;
+
+       phyreg = (uint16_t)phy_read(netdev->phydev, MII_LPA);
+       if (phyreg & LPA_10HALF)
+               ecmd->lp_advertising |= ADVERTISED_10baseT_Half;
+
+       if (phyreg & LPA_10FULL)
+               ecmd->lp_advertising |= ADVERTISED_10baseT_Full;
+
+       if (phyreg & LPA_100HALF)
+               ecmd->lp_advertising |= ADVERTISED_100baseT_Half;
+
+       if (phyreg & LPA_100FULL)
+               ecmd->lp_advertising |= ADVERTISED_100baseT_Full;
+
+       phyreg = (uint16_t)phy_read(netdev->phydev, MII_STAT1000);
+       if (phyreg & LPA_1000HALF)
+               ecmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+
+       if (phyreg & LPA_1000FULL)
+               ecmd->lp_advertising |= ADVERTISED_1000baseT_Full;
+
+       return 0;
+}
+
+static int ipqess_set_settings(struct net_device *netdev,
+                            struct ethtool_cmd *ecmd)
+{
+       struct phy_device *phydev = NULL;
+
+       phydev = netdev->phydev;
+       phydev->advertising = ecmd->advertising;
+       phydev->autoneg = ecmd->autoneg;
+       phydev->speed = ethtool_cmd_speed(ecmd);
+       phydev->duplex = ecmd->duplex;
+
+       genphy_config_aneg(phydev);
+
+       return 0;
+}
+
+static void ipqess_get_ringparam(struct net_device *netdev,
+                              struct ethtool_ringparam *ring)
+{
+       ring->tx_max_pending = IPQESS_TX_RING_SIZE;
+       ring->rx_max_pending = IPQESS_RX_RING_SIZE;
+}
+
+static const struct ethtool_ops ipqesstool_ops = {
+       .get_drvinfo = &ipqess_get_drvinfo,
+       .get_link = &ethtool_op_get_link,
+       .get_settings = &ipqess_get_settings,
+       .set_settings = &ipqess_set_settings,
+       .get_strings = &ipqess_get_strings,
+       .get_sset_count = &ipqess_get_strset_count,
+       .get_ringparam = ipqess_get_ringparam,
+};
+
+void ipqess_set_ethtool_ops(struct net_device *netdev)
+{
+       netdev->ethtool_ops = &ipqesstool_ops;
+}
diff --git a/target/linux/ipq40xx/patches-4.19/720-ipqess.patch b/target/linux/ipq40xx/patches-4.19/720-ipqess.patch
new file mode 100644 (file)
index 0000000..ba16b41
--- /dev/null
@@ -0,0 +1,30 @@
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -15,6 +15,15 @@ config NET_VENDOR_QUALCOMM
+ if NET_VENDOR_QUALCOMM
++config IPQ_ESS
++      tristate "Qualcomm Atheros IPQ ESS support"
++      depends on OF
++      ---help---
++        This SPI protocol driver supports the Qualcomm Atheros QCA7000.
++
++        To compile this driver as a module, choose M here. The module
++        will be called qcaspi.
++
+ config QCA7000
+       tristate
+       help
+--- a/drivers/net/ethernet/qualcomm/Makefile
++++ b/drivers/net/ethernet/qualcomm/Makefile
+@@ -3,6 +3,9 @@
+ # Makefile for the Qualcomm network device drivers.
+ #
++obj-$(CONFIG_IPQ_ESS) += ipq_ess.o
++ipq_ess-objs := ipqess.o ipqess_ethtool.o
++
+ obj-$(CONFIG_QCA7000) += qca_7k_common.o
+ obj-$(CONFIG_QCA7000_SPI) += qcaspi.o
+ qcaspi-objs := qca_7k.o qca_debug.o qca_spi.o
diff --git a/target/linux/ipq40xx/patches-4.19/721-dts-ipq4019-add-ethernet-essedma-node.patch b/target/linux/ipq40xx/patches-4.19/721-dts-ipq4019-add-ethernet-essedma-node.patch
new file mode 100644 (file)
index 0000000..1d424a3
--- /dev/null
@@ -0,0 +1,69 @@
+From c611d3780fa101662a822d10acf8feb04ca97409 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@gmail.com>
+Date: Sun, 20 Nov 2016 01:01:10 +0100
+Subject: [PATCH] dts: ipq4019: add ethernet ipqess node
+
+This patch adds the device-tree node for the ipqess ethernet
+interfaces.
+
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 60 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 60 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -617,6 +618,53 @@
+                       status = "disabled";
+               };
++              gmac: ethernet@c080000 {
++                      compatible = "qcom,ipq4019-ess-edma";
++                      reg = <0xc080000 0x8000>;
++                      interrupts = <GIC_SPI  65 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  66 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  67 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  68 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  69 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  70 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  71 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  72 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  73 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  74 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  75 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  76 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  77 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  78 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  79 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI  80 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 240 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 241 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 242 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 243 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 244 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 245 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 246 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 248 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 249 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 252 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 253 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 254 IRQ_TYPE_EDGE_RISING>,
++                                   <GIC_SPI 255 IRQ_TYPE_EDGE_RISING>;
++
++                      status = "disabled";
++
++                      phy-mode = "internal";
++                      fixed-link {
++                              speed = <1000>;
++                              full-duplex;
++                              pause;
++                              asym-pause;
++                      };
++              };
++
+               usb3_ss_phy: ssphy@9a000 {
+                       compatible = "qcom,usb-ss-ipq4019-phy";
+                       #phy-cells = <0>;