if (mtk_queue_stopped(eth) &&
(atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
-@@ -2171,6 +2186,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+@@ -2174,6 +2189,7 @@ static irqreturn_t mtk_handle_irq_rx(int
{
struct mtk_eth *eth = _eth;
if (likely(napi_schedule_prep(ð->rx_napi))) {
__napi_schedule(ð->rx_napi);
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
-@@ -2183,6 +2199,7 @@ static irqreturn_t mtk_handle_irq_tx(int
+@@ -2186,6 +2202,7 @@ static irqreturn_t mtk_handle_irq_tx(int
{
struct mtk_eth *eth = _eth;
if (likely(napi_schedule_prep(ð->tx_napi))) {
__napi_schedule(ð->tx_napi);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
-@@ -2371,6 +2388,9 @@ static int mtk_stop(struct net_device *d
+@@ -2374,6 +2391,9 @@ static int mtk_stop(struct net_device *d
napi_disable(ð->tx_napi);
napi_disable(ð->rx_napi);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
-@@ -2423,6 +2443,64 @@ err_disable_clks:
+@@ -2426,6 +2446,64 @@ err_disable_clks:
return ret;
}
static int mtk_hw_init(struct mtk_eth *eth)
{
int i, val, ret;
-@@ -2444,9 +2522,6 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -2447,9 +2525,6 @@ static int mtk_hw_init(struct mtk_eth *e
goto err_disable_pm;
}
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
-@@ -2485,11 +2560,11 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -2488,11 +2563,11 @@ static int mtk_hw_init(struct mtk_eth *e
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
-@@ -2994,6 +3069,13 @@ static int mtk_probe(struct platform_dev
+@@ -2997,6 +3072,13 @@ static int mtk_probe(struct platform_dev
spin_lock_init(ð->page_lock);
spin_lock_init(ð->tx_irq_lock);
spin_lock_init(ð->rx_irq_lock);