ramips: raeth: use netdev_alloc_skb
[openwrt/openwrt.git] / target / linux / ramips / files / drivers / net / ramips.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
16 */
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28
29 #include <ramips_eth_platform.h>
30 #include "ramips_eth.h"
31
32 #define TX_TIMEOUT (20 * HZ / 100)
33 #define MAX_RX_LENGTH 1600
34
35 #ifdef CONFIG_RALINK_RT305X
36 #include "ramips_esw.c"
37 #else
38 static inline int rt305x_esw_init(void) { return 0; }
39 static inline void rt305x_esw_exit(void) { }
40 #endif
41
42 #define phys_to_bus(a) (a & 0x1FFFFFFF)
43
44 #ifdef CONFIG_RAMIPS_ETH_DEBUG
45 #define RADEBUG(fmt, args...) printk(KERN_DEBUG fmt, ## args)
46 #else
47 #define RADEBUG(fmt, args...) do {} while (0)
48 #endif
49
50 static struct net_device * ramips_dev;
51 static void __iomem *ramips_fe_base = 0;
52
53 static inline void
54 ramips_fe_wr(u32 val, unsigned reg)
55 {
56 __raw_writel(val, ramips_fe_base + reg);
57 }
58
59 static inline u32
60 ramips_fe_rr(unsigned reg)
61 {
62 return __raw_readl(ramips_fe_base + reg);
63 }
64
65 static inline void
66 ramips_fe_int_disable(u32 mask)
67 {
68 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~mask,
69 RAMIPS_FE_INT_ENABLE);
70 /* flush write */
71 ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
72 }
73
74 static inline void
75 ramips_fe_int_enable(u32 mask)
76 {
77 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | mask,
78 RAMIPS_FE_INT_ENABLE);
79 /* flush write */
80 ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
81 }
82
83 static inline void
84 ramips_hw_set_macaddr(unsigned char *mac)
85 {
86 ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
87 ramips_fe_wr((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
88 RAMIPS_GDMA1_MAC_ADRL);
89 }
90
91 #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883)
92
93 #define RAMIPS_MDIO_RETRY 1000
94
95 static unsigned char *ramips_speed_str(struct raeth_priv *re)
96 {
97 switch (re->speed) {
98 case SPEED_1000:
99 return "1000";
100 case SPEED_100:
101 return "100";
102 case SPEED_10:
103 return "10";
104 }
105
106 return "?";
107 }
108
109 static void ramips_link_adjust(struct raeth_priv *re)
110 {
111 struct ramips_eth_platform_data *pdata;
112 u32 mdio_cfg;
113
114 pdata = re->parent->platform_data;
115 if (!re->link) {
116 netif_carrier_off(re->netdev);
117 netdev_info(re->netdev, "link down\n");
118 return;
119 }
120
121 mdio_cfg = RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
122 RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
123 RAMIPS_MDIO_CFG_GP1_FRC_EN;
124
125 if (re->duplex == DUPLEX_FULL)
126 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_DUPLEX;
127
128 if (re->tx_fc)
129 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_TX;
130
131 if (re->rx_fc)
132 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_RX;
133
134 switch (re->speed) {
135 case SPEED_10:
136 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_10;
137 break;
138 case SPEED_100:
139 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_100;
140 break;
141 case SPEED_1000:
142 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_1000;
143 break;
144 default:
145 BUG();
146 }
147
148 ramips_fe_wr(mdio_cfg, RAMIPS_MDIO_CFG);
149
150 netif_carrier_on(re->netdev);
151 netdev_info(re->netdev, "link up (%sMbps/%s duplex)\n",
152 ramips_speed_str(re),
153 (DUPLEX_FULL == re->duplex) ? "Full" : "Half");
154 }
155
156 static int
157 ramips_mdio_wait_ready(struct raeth_priv *re)
158 {
159 int retries;
160
161 retries = RAMIPS_MDIO_RETRY;
162 while (1) {
163 u32 t;
164
165 t = ramips_fe_rr(RAMIPS_MDIO_ACCESS);
166 if ((t & (0x1 << 31)) == 0)
167 return 0;
168
169 if (retries-- == 0)
170 break;
171
172 udelay(1);
173 }
174
175 dev_err(re->parent, "MDIO operation timed out\n");
176 return -ETIMEDOUT;
177 }
178
179 static int
180 ramips_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
181 {
182 struct raeth_priv *re = bus->priv;
183 int err;
184 u32 t;
185
186 err = ramips_mdio_wait_ready(re);
187 if (err)
188 return 0xffff;
189
190 t = (phy_addr << 24) | (phy_reg << 16);
191 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
192 t |= (1 << 31);
193 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
194
195 err = ramips_mdio_wait_ready(re);
196 if (err)
197 return 0xffff;
198
199 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__,
200 phy_addr, phy_reg, ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff);
201
202 return ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff;
203 }
204
205 static int
206 ramips_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
207 {
208 struct raeth_priv *re = bus->priv;
209 int err;
210 u32 t;
211
212 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__,
213 phy_addr, phy_reg, ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff);
214
215 err = ramips_mdio_wait_ready(re);
216 if (err)
217 return err;
218
219 t = (1 << 30) | (phy_addr << 24) | (phy_reg << 16) | val;
220 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
221 t |= (1 << 31);
222 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
223
224 return ramips_mdio_wait_ready(re);
225 }
226
227 static int
228 ramips_mdio_reset(struct mii_bus *bus)
229 {
230 /* TODO */
231 return 0;
232 }
233
234 static int
235 ramips_mdio_init(struct raeth_priv *re)
236 {
237 int err;
238 int i;
239
240 re->mii_bus = mdiobus_alloc();
241 if (re->mii_bus == NULL)
242 return -ENOMEM;
243
244 re->mii_bus->name = "ramips_mdio";
245 re->mii_bus->read = ramips_mdio_read;
246 re->mii_bus->write = ramips_mdio_write;
247 re->mii_bus->reset = ramips_mdio_reset;
248 re->mii_bus->irq = re->mii_irq;
249 re->mii_bus->priv = re;
250 re->mii_bus->parent = re->parent;
251
252 snprintf(re->mii_bus->id, MII_BUS_ID_SIZE, "%s", "ramips_mdio");
253 re->mii_bus->phy_mask = 0;
254
255 for (i = 0; i < PHY_MAX_ADDR; i++)
256 re->mii_irq[i] = PHY_POLL;
257
258 err = mdiobus_register(re->mii_bus);
259 if (err)
260 goto err_free_bus;
261
262 return 0;
263
264 err_free_bus:
265 kfree(re->mii_bus);
266 return err;
267 }
268
269 static void
270 ramips_mdio_cleanup(struct raeth_priv *re)
271 {
272 mdiobus_unregister(re->mii_bus);
273 kfree(re->mii_bus);
274 }
275
276 static void
277 ramips_phy_link_adjust(struct net_device *dev)
278 {
279 struct raeth_priv *re = netdev_priv(dev);
280 struct phy_device *phydev = re->phy_dev;
281 unsigned long flags;
282 int status_change = 0;
283
284 spin_lock_irqsave(&re->phy_lock, flags);
285
286 if (phydev->link)
287 if (re->duplex != phydev->duplex ||
288 re->speed != phydev->speed)
289 status_change = 1;
290
291 if (phydev->link != re->link)
292 status_change = 1;
293
294 re->link = phydev->link;
295 re->duplex = phydev->duplex;
296 re->speed = phydev->speed;
297
298 if (status_change)
299 ramips_link_adjust(re);
300
301 spin_unlock_irqrestore(&re->phy_lock, flags);
302 }
303
304 static int
305 ramips_phy_connect_multi(struct raeth_priv *re)
306 {
307 struct net_device *netdev = re->netdev;
308 struct ramips_eth_platform_data *pdata;
309 struct phy_device *phydev = NULL;
310 int phy_addr;
311 int ret = 0;
312
313 pdata = re->parent->platform_data;
314 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
315 if (!(pdata->phy_mask & (1 << phy_addr)))
316 continue;
317
318 if (re->mii_bus->phy_map[phy_addr] == NULL)
319 continue;
320
321 RADEBUG("%s: PHY found at %s, uid=%08x\n",
322 netdev->name,
323 dev_name(&re->mii_bus->phy_map[phy_addr]->dev),
324 re->mii_bus->phy_map[phy_addr]->phy_id);
325
326 if (phydev == NULL)
327 phydev = re->mii_bus->phy_map[phy_addr];
328 }
329
330 if (!phydev) {
331 netdev_err(netdev, "no PHY found with phy_mask=%08x\n",
332 pdata->phy_mask);
333 return -ENODEV;
334 }
335
336 re->phy_dev = phy_connect(netdev, dev_name(&phydev->dev),
337 ramips_phy_link_adjust, 0,
338 pdata->phy_if_mode);
339
340 if (IS_ERR(re->phy_dev)) {
341 netdev_err(netdev, "could not connect to PHY at %s\n",
342 dev_name(&phydev->dev));
343 return PTR_ERR(re->phy_dev);
344 }
345
346 phydev->supported &= PHY_GBIT_FEATURES;
347 phydev->advertising = phydev->supported;
348
349 RADEBUG("%s: connected to PHY at %s [uid=%08x, driver=%s]\n",
350 netdev->name, dev_name(&phydev->dev),
351 phydev->phy_id, phydev->drv->name);
352
353 re->link = 0;
354 re->speed = 0;
355 re->duplex = -1;
356 re->rx_fc = 0;
357 re->tx_fc = 0;
358
359 return ret;
360 }
361
362 static int
363 ramips_phy_connect_fixed(struct raeth_priv *re)
364 {
365 struct ramips_eth_platform_data *pdata;
366
367 pdata = re->parent->platform_data;
368 switch (pdata->speed) {
369 case SPEED_10:
370 case SPEED_100:
371 case SPEED_1000:
372 break;
373 default:
374 netdev_err(re->netdev, "invalid speed specified\n");
375 return -EINVAL;
376 }
377
378 RADEBUG("%s: using fixed link parameters\n", re->netdev->name);
379
380 re->speed = pdata->speed;
381 re->duplex = pdata->duplex;
382 re->tx_fc = pdata->tx_fc;
383 re->rx_fc = pdata->tx_fc;
384
385 return 0;
386 }
387
388 static int
389 ramips_phy_connect(struct raeth_priv *re)
390 {
391 struct ramips_eth_platform_data *pdata;
392
393 pdata = re->parent->platform_data;
394 if (pdata->phy_mask)
395 return ramips_phy_connect_multi(re);
396
397 return ramips_phy_connect_fixed(re);
398 }
399
400 static void
401 ramips_phy_disconnect(struct raeth_priv *re)
402 {
403 if (re->phy_dev)
404 phy_disconnect(re->phy_dev);
405 }
406
407 static void
408 ramips_phy_start(struct raeth_priv *re)
409 {
410 unsigned long flags;
411
412 if (re->phy_dev) {
413 phy_start(re->phy_dev);
414 } else {
415 spin_lock_irqsave(&re->phy_lock, flags);
416 re->link = 1;
417 ramips_link_adjust(re);
418 spin_unlock_irqrestore(&re->phy_lock, flags);
419 }
420 }
421
422 static void
423 ramips_phy_stop(struct raeth_priv *re)
424 {
425 unsigned long flags;
426
427 if (re->phy_dev)
428 phy_stop(re->phy_dev);
429
430 spin_lock_irqsave(&re->phy_lock, flags);
431 re->link = 0;
432 ramips_link_adjust(re);
433 spin_unlock_irqrestore(&re->phy_lock, flags);
434 }
435 #else
436 static inline int
437 ramips_mdio_init(struct raeth_priv *re)
438 {
439 return 0;
440 }
441
442 static inline void
443 ramips_mdio_cleanup(struct raeth_priv *re)
444 {
445 }
446
447 static inline int
448 ramips_phy_connect(struct raeth_priv *re)
449 {
450 return 0;
451 }
452
453 static inline void
454 ramips_phy_disconnect(struct raeth_priv *re)
455 {
456 }
457
458 static inline void
459 ramips_phy_start(struct raeth_priv *re)
460 {
461 }
462
463 static inline void
464 ramips_phy_stop(struct raeth_priv *re)
465 {
466 }
467 #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT3883 */
468
469 static void
470 ramips_cleanup_dma(struct raeth_priv *re)
471 {
472 int i;
473
474 for (i = 0; i < NUM_RX_DESC; i++)
475 if (re->rx_skb[i]) {
476 dma_unmap_single(&re->netdev->dev, re->rx_dma[i],
477 MAX_RX_LENGTH, DMA_FROM_DEVICE);
478 dev_kfree_skb_any(re->rx_skb[i]);
479 }
480
481 if (re->rx)
482 dma_free_coherent(&re->netdev->dev,
483 NUM_RX_DESC * sizeof(struct ramips_rx_dma),
484 re->rx, re->rx_desc_dma);
485
486 if (re->tx)
487 dma_free_coherent(&re->netdev->dev,
488 NUM_TX_DESC * sizeof(struct ramips_tx_dma),
489 re->tx, re->tx_desc_dma);
490 }
491
492 static int
493 ramips_alloc_dma(struct raeth_priv *re)
494 {
495 int err = -ENOMEM;
496 int i;
497
498 re->skb_free_idx = 0;
499
500 /* setup tx ring */
501 re->tx = dma_alloc_coherent(&re->netdev->dev,
502 NUM_TX_DESC * sizeof(struct ramips_tx_dma),
503 &re->tx_desc_dma, GFP_ATOMIC);
504 if (!re->tx)
505 goto err_cleanup;
506
507 memset(re->tx, 0, NUM_TX_DESC * sizeof(struct ramips_tx_dma));
508 for (i = 0; i < NUM_TX_DESC; i++) {
509 re->tx[i].txd2 = TX_DMA_LSO | TX_DMA_DONE;
510 re->tx[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
511 }
512
513 /* setup rx ring */
514 re->rx = dma_alloc_coherent(&re->netdev->dev,
515 NUM_RX_DESC * sizeof(struct ramips_rx_dma),
516 &re->rx_desc_dma, GFP_ATOMIC);
517 if (!re->rx)
518 goto err_cleanup;
519
520 memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
521 for (i = 0; i < NUM_RX_DESC; i++) {
522 dma_addr_t dma_addr;
523 struct sk_buff *new_skb;
524
525 new_skb = netdev_alloc_skb(re->netdev,
526 MAX_RX_LENGTH + NET_IP_ALIGN);
527 if (!new_skb)
528 goto err_cleanup;
529
530 skb_reserve(new_skb, NET_IP_ALIGN);
531
532 dma_addr = dma_map_single(&re->netdev->dev, new_skb->data,
533 MAX_RX_LENGTH, DMA_FROM_DEVICE);
534 re->rx_dma[i] = dma_addr;
535 re->rx[i].rxd1 = (unsigned int) re->rx_dma[i];
536 re->rx[i].rxd2 |= RX_DMA_LSO;
537 re->rx_skb[i] = new_skb;
538 }
539
540 return 0;
541
542 err_cleanup:
543 ramips_cleanup_dma(re);
544 return err;
545 }
546
547 static void
548 ramips_setup_dma(struct raeth_priv *re)
549 {
550 ramips_fe_wr(re->tx_desc_dma, RAMIPS_TX_BASE_PTR0);
551 ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
552 ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
553 ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);
554
555 ramips_fe_wr(re->rx_desc_dma, RAMIPS_RX_BASE_PTR0);
556 ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
557 ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
558 ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
559 }
560
561 static int
562 ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
563 {
564 struct raeth_priv *re = netdev_priv(dev);
565 unsigned long tx;
566 unsigned int tx_next;
567 dma_addr_t mapped_addr;
568
569 if (re->plat->min_pkt_len) {
570 if (skb->len < re->plat->min_pkt_len) {
571 if (skb_padto(skb, re->plat->min_pkt_len)) {
572 printk(KERN_ERR
573 "ramips_eth: skb_padto failed\n");
574 kfree_skb(skb);
575 return 0;
576 }
577 skb_put(skb, re->plat->min_pkt_len - skb->len);
578 }
579 }
580
581 dev->trans_start = jiffies;
582 mapped_addr = dma_map_single(&re->netdev->dev, skb->data, skb->len,
583 DMA_TO_DEVICE);
584
585 spin_lock(&re->page_lock);
586 tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
587 tx_next = (tx + 1) % NUM_TX_DESC;
588
589 if ((re->tx_skb[tx]) || (re->tx_skb[tx_next]) ||
590 !(re->tx[tx].txd2 & TX_DMA_DONE) ||
591 !(re->tx[tx_next].txd2 & TX_DMA_DONE))
592 goto out;
593
594 re->tx[tx].txd1 = (unsigned int) mapped_addr;
595 re->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
596 re->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
597 dev->stats.tx_packets++;
598 dev->stats.tx_bytes += skb->len;
599 re->tx_skb[tx] = skb;
600 wmb();
601 ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
602 spin_unlock(&re->page_lock);
603 return NETDEV_TX_OK;
604
605 out:
606 spin_unlock(&re->page_lock);
607 dev->stats.tx_dropped++;
608 kfree_skb(skb);
609 return NETDEV_TX_OK;
610 }
611
612 static void
613 ramips_eth_rx_hw(unsigned long ptr)
614 {
615 struct net_device *dev = (struct net_device *) ptr;
616 struct raeth_priv *re = netdev_priv(dev);
617 int rx;
618 int max_rx = 16;
619
620 while (max_rx) {
621 struct sk_buff *rx_skb, *new_skb;
622 int pktlen;
623
624 rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
625 if (!(re->rx[rx].rxd2 & RX_DMA_DONE))
626 break;
627 max_rx--;
628
629 rx_skb = re->rx_skb[rx];
630 pktlen = RX_DMA_PLEN0(re->rx[rx].rxd2);
631
632 new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + NET_IP_ALIGN);
633 /* Reuse the buffer on allocation failures */
634 if (new_skb) {
635 dma_addr_t dma_addr;
636
637 dma_unmap_single(&re->netdev->dev, re->rx_dma[rx],
638 MAX_RX_LENGTH, DMA_FROM_DEVICE);
639
640 skb_put(rx_skb, pktlen);
641 rx_skb->dev = dev;
642 rx_skb->protocol = eth_type_trans(rx_skb, dev);
643 rx_skb->ip_summed = CHECKSUM_NONE;
644 dev->stats.rx_packets++;
645 dev->stats.rx_bytes += pktlen;
646 netif_rx(rx_skb);
647
648 re->rx_skb[rx] = new_skb;
649 skb_reserve(new_skb, NET_IP_ALIGN);
650
651 dma_addr = dma_map_single(&re->netdev->dev,
652 new_skb->data,
653 MAX_RX_LENGTH,
654 DMA_FROM_DEVICE);
655 re->rx_dma[rx] = dma_addr;
656 re->rx[rx].rxd1 = (unsigned int) dma_addr;
657 } else {
658 dev->stats.rx_dropped++;
659 }
660
661 re->rx[rx].rxd2 &= ~RX_DMA_DONE;
662 wmb();
663 ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
664 }
665
666 if (max_rx == 0)
667 tasklet_schedule(&re->rx_tasklet);
668 else
669 ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
670 }
671
672 static void
673 ramips_eth_tx_housekeeping(unsigned long ptr)
674 {
675 struct net_device *dev = (struct net_device*)ptr;
676 struct raeth_priv *re = netdev_priv(dev);
677
678 spin_lock(&re->page_lock);
679 while ((re->tx[re->skb_free_idx].txd2 & TX_DMA_DONE) &&
680 (re->tx_skb[re->skb_free_idx])) {
681 dev_kfree_skb_irq(re->tx_skb[re->skb_free_idx]);
682 re->tx_skb[re->skb_free_idx] = 0;
683 re->skb_free_idx++;
684 if (re->skb_free_idx >= NUM_TX_DESC)
685 re->skb_free_idx = 0;
686 }
687 spin_unlock(&re->page_lock);
688
689 ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
690 }
691
692 static void
693 ramips_eth_timeout(struct net_device *dev)
694 {
695 struct raeth_priv *re = netdev_priv(dev);
696
697 tasklet_schedule(&re->tx_housekeeping_tasklet);
698 }
699
700 static irqreturn_t
701 ramips_eth_irq(int irq, void *dev)
702 {
703 struct raeth_priv *re = netdev_priv(dev);
704 unsigned long fe_int = ramips_fe_rr(RAMIPS_FE_INT_STATUS);
705
706 ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS);
707
708 if (fe_int & RAMIPS_RX_DLY_INT) {
709 ramips_fe_int_disable(RAMIPS_RX_DLY_INT);
710 tasklet_schedule(&re->rx_tasklet);
711 }
712
713 if (fe_int & RAMIPS_TX_DLY_INT) {
714 ramips_fe_int_disable(RAMIPS_TX_DLY_INT);
715 tasklet_schedule(&re->tx_housekeeping_tasklet);
716 }
717
718 return IRQ_HANDLED;
719 }
720
721 static int
722 ramips_eth_open(struct net_device *dev)
723 {
724 struct raeth_priv *re = netdev_priv(dev);
725 int err;
726
727 err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
728 dev->name, dev);
729 if (err)
730 return err;
731
732 err = ramips_alloc_dma(re);
733 if (err)
734 goto err_free_irq;
735
736 ramips_hw_set_macaddr(dev->dev_addr);
737
738 ramips_setup_dma(re);
739 ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
740 (RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
741 RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS),
742 RAMIPS_PDMA_GLO_CFG);
743 ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) &
744 ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
745 ((re->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
746 RAMIPS_FE_GLO_CFG);
747
748 tasklet_init(&re->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
749 (unsigned long)dev);
750 tasklet_init(&re->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
751
752 ramips_phy_start(re);
753
754 ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG);
755 ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE);
756 ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) &
757 ~(RAMIPS_GDM1_ICS_EN | RAMIPS_GDM1_TCS_EN | RAMIPS_GDM1_UCS_EN | 0xffff),
758 RAMIPS_GDMA1_FWD_CFG);
759 ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG) &
760 ~(RAMIPS_ICS_GEN_EN | RAMIPS_TCS_GEN_EN | RAMIPS_UCS_GEN_EN),
761 RAMIPS_CDMA_CSG_CFG);
762 ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG);
763 ramips_fe_wr(1, RAMIPS_FE_RST_GL);
764 ramips_fe_wr(0, RAMIPS_FE_RST_GL);
765
766 netif_start_queue(dev);
767 return 0;
768
769 err_free_irq:
770 free_irq(dev->irq, dev);
771 return err;
772 }
773
774 static int
775 ramips_eth_stop(struct net_device *dev)
776 {
777 struct raeth_priv *re = netdev_priv(dev);
778
779 ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
780 ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
781 RAMIPS_PDMA_GLO_CFG);
782
783 /* disable all interrupts in the hw */
784 ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE);
785
786 ramips_phy_stop(re);
787 free_irq(dev->irq, dev);
788 netif_stop_queue(dev);
789 tasklet_kill(&re->tx_housekeeping_tasklet);
790 tasklet_kill(&re->rx_tasklet);
791 ramips_cleanup_dma(re);
792 RADEBUG("ramips_eth: stopped\n");
793 return 0;
794 }
795
796 static int __init
797 ramips_eth_probe(struct net_device *dev)
798 {
799 struct raeth_priv *re = netdev_priv(dev);
800 int err;
801
802 BUG_ON(!re->plat->reset_fe);
803 re->plat->reset_fe();
804 net_srandom(jiffies);
805 memcpy(dev->dev_addr, re->plat->mac, ETH_ALEN);
806
807 ether_setup(dev);
808 dev->mtu = 1500;
809 dev->watchdog_timeo = TX_TIMEOUT;
810 spin_lock_init(&re->page_lock);
811 spin_lock_init(&re->phy_lock);
812
813 err = ramips_mdio_init(re);
814 if (err)
815 return err;
816
817 err = ramips_phy_connect(re);
818 if (err)
819 goto err_mdio_cleanup;
820
821 return 0;
822
823 err_mdio_cleanup:
824 ramips_mdio_cleanup(re);
825 return err;
826 }
827
828 static void
829 ramips_eth_uninit(struct net_device *dev)
830 {
831 struct raeth_priv *re = netdev_priv(dev);
832
833 ramips_phy_disconnect(re);
834 ramips_mdio_cleanup(re);
835 }
836
837 static const struct net_device_ops ramips_eth_netdev_ops = {
838 .ndo_init = ramips_eth_probe,
839 .ndo_uninit = ramips_eth_uninit,
840 .ndo_open = ramips_eth_open,
841 .ndo_stop = ramips_eth_stop,
842 .ndo_start_xmit = ramips_eth_hard_start_xmit,
843 .ndo_tx_timeout = ramips_eth_timeout,
844 .ndo_change_mtu = eth_change_mtu,
845 .ndo_set_mac_address = eth_mac_addr,
846 .ndo_validate_addr = eth_validate_addr,
847 };
848
849 static int
850 ramips_eth_plat_probe(struct platform_device *plat)
851 {
852 struct raeth_priv *re;
853 struct ramips_eth_platform_data *data = plat->dev.platform_data;
854 struct resource *res;
855 int err;
856
857 if (!data) {
858 dev_err(&plat->dev, "no platform data specified\n");
859 return -EINVAL;
860 }
861
862 res = platform_get_resource(plat, IORESOURCE_MEM, 0);
863 if (!res) {
864 dev_err(&plat->dev, "no memory resource found\n");
865 return -ENXIO;
866 }
867
868 ramips_fe_base = ioremap_nocache(res->start, res->end - res->start + 1);
869 if (!ramips_fe_base)
870 return -ENOMEM;
871
872 ramips_dev = alloc_etherdev(sizeof(struct raeth_priv));
873 if (!ramips_dev) {
874 dev_err(&plat->dev, "alloc_etherdev failed\n");
875 err = -ENOMEM;
876 goto err_unmap;
877 }
878
879 strcpy(ramips_dev->name, "eth%d");
880 ramips_dev->irq = platform_get_irq(plat, 0);
881 if (ramips_dev->irq < 0) {
882 dev_err(&plat->dev, "no IRQ resource found\n");
883 err = -ENXIO;
884 goto err_free_dev;
885 }
886 ramips_dev->addr_len = ETH_ALEN;
887 ramips_dev->base_addr = (unsigned long)ramips_fe_base;
888 ramips_dev->netdev_ops = &ramips_eth_netdev_ops;
889
890 re = netdev_priv(ramips_dev);
891
892 re->netdev = ramips_dev;
893 re->parent = &plat->dev;
894 re->speed = data->speed;
895 re->duplex = data->duplex;
896 re->rx_fc = data->rx_fc;
897 re->tx_fc = data->tx_fc;
898 re->plat = data;
899
900 err = register_netdev(ramips_dev);
901 if (err) {
902 dev_err(&plat->dev, "error bringing up device\n");
903 goto err_free_dev;
904 }
905
906 RADEBUG("ramips_eth: loaded\n");
907 return 0;
908
909 err_free_dev:
910 kfree(ramips_dev);
911 err_unmap:
912 iounmap(ramips_fe_base);
913 return err;
914 }
915
916 static int
917 ramips_eth_plat_remove(struct platform_device *plat)
918 {
919 unregister_netdev(ramips_dev);
920 free_netdev(ramips_dev);
921 RADEBUG("ramips_eth: unloaded\n");
922 return 0;
923 }
924
925 static struct platform_driver ramips_eth_driver = {
926 .probe = ramips_eth_plat_probe,
927 .remove = ramips_eth_plat_remove,
928 .driver = {
929 .name = "ramips_eth",
930 .owner = THIS_MODULE,
931 },
932 };
933
934 static int __init
935 ramips_eth_init(void)
936 {
937 int ret;
938
939 ret = rt305x_esw_init();
940 if (ret)
941 return ret;
942
943 ret = platform_driver_register(&ramips_eth_driver);
944 if (ret) {
945 printk(KERN_ERR
946 "ramips_eth: Error registering platfom driver!\n");
947 goto esw_cleanup;
948 }
949
950 return 0;
951
952 esw_cleanup:
953 rt305x_esw_exit();
954 return ret;
955 }
956
957 static void __exit
958 ramips_eth_cleanup(void)
959 {
960 platform_driver_unregister(&ramips_eth_driver);
961 rt305x_esw_exit();
962 }
963
964 module_init(ramips_eth_init);
965 module_exit(ramips_eth_cleanup);
966
967 MODULE_LICENSE("GPL");
968 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
969 MODULE_DESCRIPTION("ethernet driver for ramips boards");