cns3xxx: use files directory
[openwrt/openwrt.git] / target / linux / cns3xxx / patches-3.3 / 051-cns3xxx_gigabit.patch
1 --- /dev/null
2 +++ b/drivers/net/ethernet/cavium/cns3xxx_eth.c
3 @@ -0,0 +1,1298 @@
4 +/*
5 + * Cavium CNS3xxx Gigabit driver for Linux
6 + *
7 + * Copyright 2011 Gateworks Corporation
8 + * Chris Lang <clang@gateworks.com>
9 + *
10 + * This program is free software; you can redistribute it and/or modify it
11 + * under the terms of version 2 of the GNU General Public License
12 + * as published by the Free Software Foundation.
13 + *
14 + */
15 +
16 +#include <linux/delay.h>
17 +#include <linux/module.h>
18 +#include <linux/dma-mapping.h>
19 +#include <linux/dmapool.h>
20 +#include <linux/etherdevice.h>
21 +#include <linux/interrupt.h>
22 +#include <linux/io.h>
23 +#include <linux/kernel.h>
24 +#include <linux/phy.h>
25 +#include <linux/platform_device.h>
26 +#include <linux/skbuff.h>
27 +#include <mach/irqs.h>
28 +#include <mach/platform.h>
29 +
30 +#define DRV_NAME "cns3xxx_eth"
31 +
32 +#define RX_DESCS 512
33 +#define TX_DESCS 512
34 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
35 +
36 +#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
37 +#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
38 +#define REGS_SIZE 336
39 +#define MAX_MRU (1536 + SKB_DMA_REALIGN)
40 +#define CNS3XXX_MAX_MTU (1536)
41 +
42 +#define NAPI_WEIGHT 64
43 +
44 +/* MDIO Defines */
45 +#define MDIO_CMD_COMPLETE 0x00008000
46 +#define MDIO_WRITE_COMMAND 0x00002000
47 +#define MDIO_READ_COMMAND 0x00004000
48 +#define MDIO_REG_OFFSET 8
49 +#define MDIO_VALUE_OFFSET 16
50 +
51 +/* Descritor Defines */
52 +#define END_OF_RING 0x40000000
53 +#define FIRST_SEGMENT 0x20000000
54 +#define LAST_SEGMENT 0x10000000
55 +#define FORCE_ROUTE 0x04000000
56 +#define IP_CHECKSUM 0x00040000
57 +#define UDP_CHECKSUM 0x00020000
58 +#define TCP_CHECKSUM 0x00010000
59 +
60 +/* Port Config Defines */
61 +#define PORT_BP_ENABLE 0x00020000
62 +#define PORT_DISABLE 0x00040000
63 +#define PORT_LEARN_DIS 0x00080000
64 +#define PORT_BLOCK_STATE 0x00100000
65 +#define PORT_BLOCK_MODE 0x00200000
66 +
67 +#define PROMISC_OFFSET 29
68 +
69 +/* Global Config Defines */
70 +#define UNKNOWN_VLAN_TO_CPU 0x02000000
71 +#define ACCEPT_CRC_PACKET 0x00200000
72 +#define CRC_STRIPPING 0x00100000
73 +
74 +/* VLAN Config Defines */
75 +#define NIC_MODE 0x00008000
76 +#define VLAN_UNAWARE 0x00000001
77 +
78 +/* DMA AUTO Poll Defines */
79 +#define TS_POLL_EN 0x00000020
80 +#define TS_SUSPEND 0x00000010
81 +#define FS_POLL_EN 0x00000002
82 +#define FS_SUSPEND 0x00000001
83 +
84 +/* DMA Ring Control Defines */
85 +#define QUEUE_THRESHOLD 0x000000f0
86 +#define CLR_FS_STATE 0x80000000
87 +
88 +/* Interrupt Status Defines */
89 +#define MAC0_STATUS_CHANGE 0x00004000
90 +#define MAC1_STATUS_CHANGE 0x00008000
91 +#define MAC2_STATUS_CHANGE 0x00010000
92 +#define MAC0_RX_ERROR 0x00100000
93 +#define MAC1_RX_ERROR 0x00200000
94 +#define MAC2_RX_ERROR 0x00400000
95 +
96 +struct tx_desc
97 +{
98 + u32 sdp; /* segment data pointer */
99 +
100 + union {
101 + struct {
102 + u32 sdl:16; /* segment data length */
103 + u32 tco:1;
104 + u32 uco:1;
105 + u32 ico:1;
106 + u32 rsv_1:3; /* reserve */
107 + u32 pri:3;
108 + u32 fp:1; /* force priority */
109 + u32 fr:1;
110 + u32 interrupt:1;
111 + u32 lsd:1;
112 + u32 fsd:1;
113 + u32 eor:1;
114 + u32 cown:1;
115 + };
116 + u32 config0;
117 + };
118 +
119 + union {
120 + struct {
121 + u32 ctv:1;
122 + u32 stv:1;
123 + u32 sid:4;
124 + u32 inss:1;
125 + u32 dels:1;
126 + u32 rsv_2:9;
127 + u32 pmap:5;
128 + u32 mark:3;
129 + u32 ewan:1;
130 + u32 fewan:1;
131 + u32 rsv_3:5;
132 + };
133 + u32 config1;
134 + };
135 +
136 + union {
137 + struct {
138 + u32 c_vid:12;
139 + u32 c_cfs:1;
140 + u32 c_pri:3;
141 + u32 s_vid:12;
142 + u32 s_dei:1;
143 + u32 s_pri:3;
144 + };
145 + u32 config2;
146 + };
147 +
148 + u8 alignment[16]; /* for 32 byte */
149 +};
150 +
151 +struct rx_desc
152 +{
153 + u32 sdp; /* segment data pointer */
154 +
155 + union {
156 + struct {
157 + u32 sdl:16; /* segment data length */
158 + u32 l4f:1;
159 + u32 ipf:1;
160 + u32 prot:4;
161 + u32 hr:6;
162 + u32 lsd:1;
163 + u32 fsd:1;
164 + u32 eor:1;
165 + u32 cown:1;
166 + };
167 + u32 config0;
168 + };
169 +
170 + union {
171 + struct {
172 + u32 ctv:1;
173 + u32 stv:1;
174 + u32 unv:1;
175 + u32 iwan:1;
176 + u32 exdv:1;
177 + u32 e_wan:1;
178 + u32 rsv_1:2;
179 + u32 sp:3;
180 + u32 crc_err:1;
181 + u32 un_eth:1;
182 + u32 tc:2;
183 + u32 rsv_2:1;
184 + u32 ip_offset:5;
185 + u32 rsv_3:11;
186 + };
187 + u32 config1;
188 + };
189 +
190 + union {
191 + struct {
192 + u32 c_vid:12;
193 + u32 c_cfs:1;
194 + u32 c_pri:3;
195 + u32 s_vid:12;
196 + u32 s_dei:1;
197 + u32 s_pri:3;
198 + };
199 + u32 config2;
200 + };
201 +
202 + u8 alignment[16]; /* for 32 byte alignment */
203 +};
204 +
205 +
206 +struct switch_regs {
207 + u32 phy_control;
208 + u32 phy_auto_addr;
209 + u32 mac_glob_cfg;
210 + u32 mac_cfg[4];
211 + u32 mac_pri_ctrl[5], __res;
212 + u32 etype[2];
213 + u32 udp_range[4];
214 + u32 prio_etype_udp;
215 + u32 prio_ipdscp[8];
216 + u32 tc_ctrl;
217 + u32 rate_ctrl;
218 + u32 fc_glob_thrs;
219 + u32 fc_port_thrs;
220 + u32 mc_fc_glob_thrs;
221 + u32 dc_glob_thrs;
222 + u32 arl_vlan_cmd;
223 + u32 arl_ctrl[3];
224 + u32 vlan_cfg;
225 + u32 pvid[2];
226 + u32 vlan_ctrl[3];
227 + u32 session_id[8];
228 + u32 intr_stat;
229 + u32 intr_mask;
230 + u32 sram_test;
231 + u32 mem_queue;
232 + u32 farl_ctrl;
233 + u32 fc_input_thrs, __res1[2];
234 + u32 clk_skew_ctrl;
235 + u32 mac_glob_cfg_ext, __res2[2];
236 + u32 dma_ring_ctrl;
237 + u32 dma_auto_poll_cfg;
238 + u32 delay_intr_cfg, __res3;
239 + u32 ts_dma_ctrl0;
240 + u32 ts_desc_ptr0;
241 + u32 ts_desc_base_addr0, __res4;
242 + u32 fs_dma_ctrl0;
243 + u32 fs_desc_ptr0;
244 + u32 fs_desc_base_addr0, __res5;
245 + u32 ts_dma_ctrl1;
246 + u32 ts_desc_ptr1;
247 + u32 ts_desc_base_addr1, __res6;
248 + u32 fs_dma_ctrl1;
249 + u32 fs_desc_ptr1;
250 + u32 fs_desc_base_addr1;
251 + u32 __res7[109];
252 + u32 mac_counter0[13];
253 +};
254 +
255 +struct _tx_ring {
256 + struct tx_desc *desc;
257 + dma_addr_t phys_addr;
258 + struct tx_desc *cur_addr;
259 + struct sk_buff *buff_tab[TX_DESCS];
260 + unsigned int phys_tab[TX_DESCS];
261 + u32 free_index;
262 + u32 count_index;
263 + u32 cur_index;
264 + int num_used;
265 + int num_count;
266 +};
267 +
268 +struct _rx_ring {
269 + struct rx_desc *desc;
270 + dma_addr_t phys_addr;
271 + struct rx_desc *cur_addr;
272 + struct sk_buff *buff_tab[RX_DESCS];
273 + unsigned int phys_tab[RX_DESCS];
274 + u32 cur_index;
275 + u32 alloc_index;
276 + int alloc_count;
277 +};
278 +
279 +struct sw {
280 + struct resource *mem_res;
281 + struct switch_regs __iomem *regs;
282 + struct napi_struct napi;
283 + struct cns3xxx_plat_info *plat;
284 + struct _tx_ring *tx_ring;
285 + struct _rx_ring *rx_ring;
286 +};
287 +
288 +struct port {
289 + struct net_device *netdev;
290 + struct phy_device *phydev;
291 + struct sw *sw;
292 + int id; /* logical port ID */
293 + int speed, duplex;
294 +};
295 +
296 +static spinlock_t mdio_lock;
297 +static DEFINE_SPINLOCK(tx_lock);
298 +static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
299 +struct mii_bus *mdio_bus;
300 +static int ports_open;
301 +static struct port *switch_port_tab[4];
302 +static struct dma_pool *rx_dma_pool;
303 +static struct dma_pool *tx_dma_pool;
304 +struct net_device *napi_dev;
305 +
306 +static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
307 + int write, u16 cmd)
308 +{
309 + int cycles = 0;
310 + u32 temp = 0;
311 +
312 + temp = __raw_readl(&mdio_regs->phy_control);
313 + temp |= MDIO_CMD_COMPLETE;
314 + __raw_writel(temp, &mdio_regs->phy_control);
315 + udelay(10);
316 +
317 + if (write) {
318 + temp = (cmd << MDIO_VALUE_OFFSET);
319 + temp |= MDIO_WRITE_COMMAND;
320 + } else {
321 + temp = MDIO_READ_COMMAND;
322 + }
323 + temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
324 + temp |= (phy_id & 0x1f);
325 +
326 + __raw_writel(temp, &mdio_regs->phy_control);
327 +
328 + while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0)
329 + && cycles < 5000) {
330 + udelay(1);
331 + cycles++;
332 + }
333 +
334 + if (cycles == 5000) {
335 + printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
336 + phy_id);
337 + return -1;
338 + }
339 +
340 + temp = __raw_readl(&mdio_regs->phy_control);
341 + temp |= MDIO_CMD_COMPLETE;
342 + __raw_writel(temp, &mdio_regs->phy_control);
343 +
344 + if (write)
345 + return 0;
346 +
347 + return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF);
348 +}
349 +
350 +static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
351 +{
352 + unsigned long flags;
353 + int ret;
354 +
355 + spin_lock_irqsave(&mdio_lock, flags);
356 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
357 + spin_unlock_irqrestore(&mdio_lock, flags);
358 + return ret;
359 +}
360 +
361 +static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
362 + u16 val)
363 +{
364 + unsigned long flags;
365 + int ret;
366 +
367 + spin_lock_irqsave(&mdio_lock, flags);
368 + ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
369 + spin_unlock_irqrestore(&mdio_lock, flags);
370 + return ret;
371 +}
372 +
373 +static int cns3xxx_mdio_register(void)
374 +{
375 + int err;
376 +
377 + if (!(mdio_bus = mdiobus_alloc()))
378 + return -ENOMEM;
379 +
380 + mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
381 +
382 + spin_lock_init(&mdio_lock);
383 + mdio_bus->name = "CNS3xxx MII Bus";
384 + mdio_bus->read = &cns3xxx_mdio_read;
385 + mdio_bus->write = &cns3xxx_mdio_write;
386 + strcpy(mdio_bus->id, "0");
387 +
388 + if ((err = mdiobus_register(mdio_bus)))
389 + mdiobus_free(mdio_bus);
390 + return err;
391 +}
392 +
393 +static void cns3xxx_mdio_remove(void)
394 +{
395 + mdiobus_unregister(mdio_bus);
396 + mdiobus_free(mdio_bus);
397 +}
398 +
399 +static void enable_tx_dma(struct sw *sw)
400 +{
401 + __raw_writel(0x1, &sw->regs->ts_dma_ctrl0);
402 +}
403 +
404 +static void enable_rx_dma(struct sw *sw)
405 +{
406 + __raw_writel(0x1, &sw->regs->fs_dma_ctrl0);
407 +}
408 +
409 +static void cns3xxx_adjust_link(struct net_device *dev)
410 +{
411 + struct port *port = netdev_priv(dev);
412 + struct phy_device *phydev = port->phydev;
413 +
414 + if (!phydev->link) {
415 + if (port->speed) {
416 + port->speed = 0;
417 + printk(KERN_INFO "%s: link down\n", dev->name);
418 + }
419 + return;
420 + }
421 +
422 + if (port->speed == phydev->speed && port->duplex == phydev->duplex)
423 + return;
424 +
425 + port->speed = phydev->speed;
426 + port->duplex = phydev->duplex;
427 +
428 + printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
429 + dev->name, port->speed, port->duplex ? "full" : "half");
430 +}
431 +
432 +irqreturn_t eth_rx_irq(int irq, void *pdev)
433 +{
434 + struct net_device *dev = pdev;
435 + struct sw *sw = netdev_priv(dev);
436 + if (likely(napi_schedule_prep(&sw->napi))) {
437 + disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
438 + __napi_schedule(&sw->napi);
439 + }
440 + return (IRQ_HANDLED);
441 +}
442 +
443 +irqreturn_t eth_stat_irq(int irq, void *pdev)
444 +{
445 + struct net_device *dev = pdev;
446 + struct sw *sw = netdev_priv(dev);
447 + u32 cfg;
448 + u32 stat = __raw_readl(&sw->regs->intr_stat);
449 + __raw_writel(0xffffffff, &sw->regs->intr_stat);
450 +
451 + if (stat & MAC2_RX_ERROR)
452 + switch_port_tab[3]->netdev->stats.rx_dropped++;
453 + if (stat & MAC1_RX_ERROR)
454 + switch_port_tab[1]->netdev->stats.rx_dropped++;
455 + if (stat & MAC0_RX_ERROR)
456 + switch_port_tab[0]->netdev->stats.rx_dropped++;
457 +
458 + if (stat & MAC0_STATUS_CHANGE) {
459 + cfg = __raw_readl(&sw->regs->mac_cfg[0]);
460 + switch_port_tab[0]->phydev->link = (cfg & 0x1);
461 + switch_port_tab[0]->phydev->duplex = ((cfg >> 4) & 0x1);
462 + if (((cfg >> 2) & 0x3) == 2)
463 + switch_port_tab[0]->phydev->speed = 1000;
464 + else if (((cfg >> 2) & 0x3) == 1)
465 + switch_port_tab[0]->phydev->speed = 100;
466 + else
467 + switch_port_tab[0]->phydev->speed = 10;
468 + cns3xxx_adjust_link(switch_port_tab[0]->netdev);
469 + }
470 +
471 + if (stat & MAC1_STATUS_CHANGE) {
472 + cfg = __raw_readl(&sw->regs->mac_cfg[1]);
473 + switch_port_tab[1]->phydev->link = (cfg & 0x1);
474 + switch_port_tab[1]->phydev->duplex = ((cfg >> 4) & 0x1);
475 + if (((cfg >> 2) & 0x3) == 2)
476 + switch_port_tab[1]->phydev->speed = 1000;
477 + else if (((cfg >> 2) & 0x3) == 1)
478 + switch_port_tab[1]->phydev->speed = 100;
479 + else
480 + switch_port_tab[1]->phydev->speed = 10;
481 + cns3xxx_adjust_link(switch_port_tab[1]->netdev);
482 + }
483 +
484 + if (stat & MAC2_STATUS_CHANGE) {
485 + cfg = __raw_readl(&sw->regs->mac_cfg[3]);
486 + switch_port_tab[3]->phydev->link = (cfg & 0x1);
487 + switch_port_tab[3]->phydev->duplex = ((cfg >> 4) & 0x1);
488 + if (((cfg >> 2) & 0x3) == 2)
489 + switch_port_tab[3]->phydev->speed = 1000;
490 + else if (((cfg >> 2) & 0x3) == 1)
491 + switch_port_tab[3]->phydev->speed = 100;
492 + else
493 + switch_port_tab[3]->phydev->speed = 10;
494 + cns3xxx_adjust_link(switch_port_tab[3]->netdev);
495 + }
496 +
497 + return (IRQ_HANDLED);
498 +}
499 +
500 +
501 +static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
502 +{
503 + struct _rx_ring *rx_ring = sw->rx_ring;
504 + unsigned int i = rx_ring->alloc_index;
505 + struct rx_desc *desc = &(rx_ring)->desc[i];
506 + struct sk_buff *skb;
507 + unsigned int phys;
508 +
509 + for (received += rx_ring->alloc_count; received > 0; received--) {
510 + if ((skb = dev_alloc_skb(MAX_MRU))) {
511 + if (SKB_DMA_REALIGN)
512 + skb_reserve(skb, SKB_DMA_REALIGN);
513 + skb_reserve(skb, NET_IP_ALIGN);
514 + phys = dma_map_single(NULL, skb->data,
515 + CNS3XXX_MAX_MTU, DMA_FROM_DEVICE);
516 + if (dma_mapping_error(NULL, phys)) {
517 + dev_kfree_skb(skb);
518 + /* Failed to map, better luck next time */
519 + goto out;;
520 + }
521 + desc->sdp = phys;
522 + } else {
523 + /* Failed to allocate skb, try again next time */
524 + goto out;
525 + }
526 +
527 + /* put the new buffer on RX-free queue */
528 + rx_ring->buff_tab[i] = skb;
529 + rx_ring->phys_tab[i] = phys;
530 + if (i == RX_DESCS - 1) {
531 + i = 0;
532 + desc->config0 = END_OF_RING | FIRST_SEGMENT |
533 + LAST_SEGMENT | CNS3XXX_MAX_MTU;
534 + desc = &(rx_ring)->desc[i];
535 + } else {
536 + desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | CNS3XXX_MAX_MTU;
537 + i++;
538 + desc++;
539 + }
540 + }
541 +out:
542 + rx_ring->alloc_count = received;
543 + rx_ring->alloc_index = i;
544 +}
545 +
546 +static void clear_tx_desc(struct sw *sw)
547 +{
548 + struct _tx_ring *tx_ring = sw->tx_ring;
549 + struct tx_desc *desc;
550 + int i;
551 + int index;
552 + int num_used = tx_ring->num_used;
553 + struct sk_buff *skb;
554 +
555 + if (num_used < (TX_DESCS >> 1))
556 + return;
557 +
558 + index = tx_ring->free_index;
559 + desc = &(tx_ring)->desc[index];
560 + for (i = 0; i < num_used; i++) {
561 + if (desc->cown) {
562 + skb = tx_ring->buff_tab[index];
563 + tx_ring->buff_tab[index] = 0;
564 + if (skb)
565 + dev_kfree_skb_any(skb);
566 + dma_unmap_single(NULL, tx_ring->phys_tab[index],
567 + desc->sdl, DMA_TO_DEVICE);
568 + if (++index == TX_DESCS) {
569 + index = 0;
570 + desc = &(tx_ring)->desc[index];
571 + } else {
572 + desc++;
573 + }
574 + } else {
575 + break;
576 + }
577 + }
578 + tx_ring->free_index = index;
579 + tx_ring->num_used -= i;
580 +}
581 +
582 +static int eth_poll(struct napi_struct *napi, int budget)
583 +{
584 + struct sw *sw = container_of(napi, struct sw, napi);
585 + struct net_device *dev;
586 + struct _rx_ring *rx_ring = sw->rx_ring;
587 + int received = 0;
588 + unsigned int length;
589 + unsigned int i = rx_ring->cur_index;
590 + struct rx_desc *desc = &(rx_ring)->desc[i];
591 +
592 + while (desc->cown) {
593 + struct sk_buff *skb;
594 +
595 + if (received >= budget)
596 + break;
597 +
598 + skb = rx_ring->buff_tab[i];
599 +
600 + dev = switch_port_tab[desc->sp]->netdev;
601 +
602 + length = desc->sdl;
603 + /* process received frame */
604 + dma_unmap_single(&dev->dev, rx_ring->phys_tab[i],
605 + length, DMA_FROM_DEVICE);
606 +
607 + skb_put(skb, length);
608 +
609 + skb->dev = dev;
610 + skb->protocol = eth_type_trans(skb, dev);
611 +
612 + dev->stats.rx_packets++;
613 + dev->stats.rx_bytes += length;
614 +
615 + /* RX Hardware checksum offload */
616 + switch (desc->prot) {
617 + case 1:
618 + case 2:
619 + case 5:
620 + case 6:
621 + case 13:
622 + case 14:
623 + if (desc->l4f)
624 + skb->ip_summed = CHECKSUM_NONE;
625 + else
626 + skb->ip_summed = CHECKSUM_UNNECESSARY;
627 + break;
628 + default:
629 + skb->ip_summed = CHECKSUM_NONE;
630 + break;
631 + }
632 +
633 + napi_gro_receive(napi, skb);
634 +
635 + received++;
636 +
637 + if (++i == RX_DESCS) {
638 + i = 0;
639 + desc = &(rx_ring)->desc[i];
640 + } else {
641 + desc++;
642 + }
643 + }
644 +
645 + cns3xxx_alloc_rx_buf(sw, received);
646 +
647 + rx_ring->cur_index = i;
648 +
649 + if (received != budget) {
650 + napi_complete(napi);
651 + enable_irq(IRQ_CNS3XXX_SW_R0RXC);
652 + }
653 +
654 + enable_rx_dma(sw);
655 +
656 + return received;
657 +}
658 +
659 +static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
660 +{
661 + struct port *port = netdev_priv(dev);
662 + struct sw *sw = port->sw;
663 + struct _tx_ring *tx_ring = sw->tx_ring;
664 + struct tx_desc *tx_desc;
665 + int index;
666 + int len;
667 + char pmap = (1 << port->id);
668 + unsigned int phys;
669 + int nr_frags = skb_shinfo(skb)->nr_frags;
670 + struct skb_frag_struct *frag;
671 + unsigned int i;
672 +
673 + if (pmap == 8)
674 + pmap = (1 << 4);
675 +
676 + if (skb->len > CNS3XXX_MAX_MTU) {
677 + dev_kfree_skb(skb);
678 + dev->stats.tx_errors++;
679 + return NETDEV_TX_OK;
680 + }
681 +
682 + spin_lock(&tx_lock);
683 +
684 + if ((tx_ring->num_used + nr_frags) >= TX_DESCS) {
685 + clear_tx_desc(sw);
686 + if ((tx_ring->num_used + nr_frags) >= TX_DESCS) {
687 + spin_unlock(&tx_lock);
688 + return NETDEV_TX_BUSY;
689 + }
690 + }
691 +
692 + index = tx_ring->cur_index;
693 + tx_ring->cur_index = ((tx_ring->cur_index + nr_frags + 1) % TX_DESCS);
694 +
695 + spin_unlock(&tx_lock);
696 +
697 + if (!nr_frags) {
698 + tx_desc = &(tx_ring)->desc[index];
699 +
700 + len = skb->len;
701 +
702 + phys = dma_map_single(NULL, skb->data, len,
703 + DMA_TO_DEVICE);
704 +
705 + tx_desc->sdp = phys;
706 + tx_desc->pmap = pmap;
707 + tx_ring->phys_tab[index] = phys;
708 +
709 + tx_ring->buff_tab[index] = skb;
710 + if (index == TX_DESCS - 1) {
711 + tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | LAST_SEGMENT |
712 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
713 + TCP_CHECKSUM | len;
714 + } else {
715 + tx_desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
716 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
717 + TCP_CHECKSUM | len;
718 + }
719 + } else {
720 + unsigned int config;
721 +
722 + index = ((index + nr_frags) % TX_DESCS);
723 + tx_desc = &(tx_ring)->desc[index];
724 +
725 + /* fragments */
726 + for (i = nr_frags; i > 0; i--) {
727 + void *addr;
728 +
729 + frag = &skb_shinfo(skb)->frags[i-1];
730 + len = frag->size;
731 +
732 + addr = page_address(skb_frag_page(frag)) +
733 + frag->page_offset;
734 + phys = dma_map_single(NULL, addr, len, DMA_TO_DEVICE);
735 +
736 + tx_desc->sdp = phys;
737 +
738 + tx_desc->pmap = pmap;
739 + tx_ring->phys_tab[index] = phys;
740 +
741 + config = FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
742 + TCP_CHECKSUM | len;
743 + if (i == nr_frags) {
744 + config |= LAST_SEGMENT;
745 + tx_ring->buff_tab[index] = skb;
746 + }
747 + if (index == TX_DESCS - 1)
748 + config |= END_OF_RING;
749 + tx_desc->config0 = config;
750 +
751 + if (index == 0) {
752 + index = TX_DESCS - 1;
753 + tx_desc = &(tx_ring)->desc[index];
754 + } else {
755 + index--;
756 + tx_desc--;
757 + }
758 + }
759 +
760 + /* header */
761 + len = skb->len - skb->data_len;
762 +
763 + phys = dma_map_single(NULL, skb->data, len,
764 + DMA_TO_DEVICE);
765 +
766 + tx_desc->sdp = phys;
767 + tx_desc->pmap = pmap;
768 + tx_ring->phys_tab[index] = phys;
769 +
770 + if (index == TX_DESCS - 1) {
771 + tx_desc->config0 = END_OF_RING | FIRST_SEGMENT |
772 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
773 + TCP_CHECKSUM | len;
774 + } else {
775 + tx_desc->config0 = FIRST_SEGMENT |
776 + FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM |
777 + TCP_CHECKSUM | len;
778 + }
779 + }
780 +
781 + mb();
782 +
783 + spin_lock(&tx_lock);
784 + tx_ring->num_used += nr_frags + 1;
785 + spin_unlock(&tx_lock);
786 +
787 + dev->stats.tx_packets++;
788 + dev->stats.tx_bytes += skb->len;
789 +
790 + enable_tx_dma(sw);
791 +
792 + return NETDEV_TX_OK;
793 +}
794 +
795 +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
796 +{
797 + struct port *port = netdev_priv(dev);
798 +
799 + if (!netif_running(dev))
800 + return -EINVAL;
801 + return phy_mii_ioctl(port->phydev, req, cmd);
802 +}
803 +
804 +/* ethtool support */
805 +
806 +static void cns3xxx_get_drvinfo(struct net_device *dev,
807 + struct ethtool_drvinfo *info)
808 +{
809 + strcpy(info->driver, DRV_NAME);
810 + strcpy(info->bus_info, "internal");
811 +}
812 +
813 +static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
814 +{
815 + struct port *port = netdev_priv(dev);
816 + return phy_ethtool_gset(port->phydev, cmd);
817 +}
818 +
819 +static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
820 +{
821 + struct port *port = netdev_priv(dev);
822 + return phy_ethtool_sset(port->phydev, cmd);
823 +}
824 +
825 +static int cns3xxx_nway_reset(struct net_device *dev)
826 +{
827 + struct port *port = netdev_priv(dev);
828 + return phy_start_aneg(port->phydev);
829 +}
830 +
831 +static struct ethtool_ops cns3xxx_ethtool_ops = {
832 + .get_drvinfo = cns3xxx_get_drvinfo,
833 + .get_settings = cns3xxx_get_settings,
834 + .set_settings = cns3xxx_set_settings,
835 + .nway_reset = cns3xxx_nway_reset,
836 + .get_link = ethtool_op_get_link,
837 +};
838 +
839 +
840 +static int init_rings(struct sw *sw)
841 +{
842 + int i;
843 + struct _rx_ring *rx_ring = sw->rx_ring;
844 + struct _tx_ring *tx_ring = sw->tx_ring;
845 +
846 + __raw_writel(0, &sw->regs->fs_dma_ctrl0);
847 + __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
848 + __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
849 + __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
850 +
851 + __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
852 +
853 + if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL,
854 + RX_POOL_ALLOC_SIZE, 32, 0)))
855 + return -ENOMEM;
856 +
857 + if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
858 + &rx_ring->phys_addr)))
859 + return -ENOMEM;
860 + memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
861 +
862 + /* Setup RX buffers */
863 + for (i = 0; i < RX_DESCS; i++) {
864 + struct rx_desc *desc = &(rx_ring)->desc[i];
865 + struct sk_buff *skb;
866 + if (!(skb = dev_alloc_skb(MAX_MRU)))
867 + return -ENOMEM;
868 + if (SKB_DMA_REALIGN)
869 + skb_reserve(skb, SKB_DMA_REALIGN);
870 + skb_reserve(skb, NET_IP_ALIGN);
871 + desc->sdl = CNS3XXX_MAX_MTU;
872 + if (i == (RX_DESCS - 1))
873 + desc->eor = 1;
874 + desc->fsd = 1;
875 + desc->lsd = 1;
876 +
877 + desc->sdp = dma_map_single(NULL, skb->data,
878 + CNS3XXX_MAX_MTU, DMA_FROM_DEVICE);
879 + if (dma_mapping_error(NULL, desc->sdp)) {
880 + return -EIO;
881 + }
882 + rx_ring->buff_tab[i] = skb;
883 + rx_ring->phys_tab[i] = desc->sdp;
884 + desc->cown = 0;
885 + }
886 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
887 + __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
888 +
889 + if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL,
890 + TX_POOL_ALLOC_SIZE, 32, 0)))
891 + return -ENOMEM;
892 +
893 + if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
894 + &tx_ring->phys_addr)))
895 + return -ENOMEM;
896 + memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
897 +
898 + /* Setup TX buffers */
899 + for (i = 0; i < TX_DESCS; i++) {
900 + struct tx_desc *desc = &(tx_ring)->desc[i];
901 + tx_ring->buff_tab[i] = 0;
902 +
903 + if (i == (TX_DESCS - 1))
904 + desc->eor = 1;
905 + desc->cown = 1;
906 + }
907 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
908 + __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
909 +
910 + return 0;
911 +}
912 +
913 +static void destroy_rings(struct sw *sw)
914 +{
915 + int i;
916 + if (sw->rx_ring->desc) {
917 + for (i = 0; i < RX_DESCS; i++) {
918 + struct _rx_ring *rx_ring = sw->rx_ring;
919 + struct rx_desc *desc = &(rx_ring)->desc[i];
920 + struct sk_buff *skb = sw->rx_ring->buff_tab[i];
921 + if (skb) {
922 + dma_unmap_single(NULL,
923 + desc->sdp,
924 + CNS3XXX_MAX_MTU, DMA_FROM_DEVICE);
925 + dev_kfree_skb(skb);
926 + }
927 + }
928 + dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
929 + dma_pool_destroy(rx_dma_pool);
930 + rx_dma_pool = 0;
931 + sw->rx_ring->desc = 0;
932 + }
933 + if (sw->tx_ring->desc) {
934 + for (i = 0; i < TX_DESCS; i++) {
935 + struct _tx_ring *tx_ring = sw->tx_ring;
936 + struct tx_desc *desc = &(tx_ring)->desc[i];
937 + struct sk_buff *skb = sw->tx_ring->buff_tab[i];
938 + if (skb) {
939 + dma_unmap_single(NULL, desc->sdp,
940 + skb->len, DMA_TO_DEVICE);
941 + dev_kfree_skb(skb);
942 + }
943 + }
944 + dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
945 + dma_pool_destroy(tx_dma_pool);
946 + tx_dma_pool = 0;
947 + sw->tx_ring->desc = 0;
948 + }
949 +}
950 +
951 +static int eth_open(struct net_device *dev)
952 +{
953 + struct port *port = netdev_priv(dev);
954 + struct sw *sw = port->sw;
955 + u32 temp;
956 +
957 + port->speed = 0; /* force "link up" message */
958 + phy_start(port->phydev);
959 +
960 + netif_start_queue(dev);
961 +
962 + if (!ports_open) {
963 + request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
964 + request_irq(IRQ_CNS3XXX_SW_STATUS, eth_stat_irq, IRQF_SHARED, "gig_stat", napi_dev);
965 + napi_enable(&sw->napi);
966 + netif_start_queue(napi_dev);
967 +
968 + __raw_writel(~(MAC0_STATUS_CHANGE | MAC1_STATUS_CHANGE | MAC2_STATUS_CHANGE |
969 + MAC0_RX_ERROR | MAC1_RX_ERROR | MAC2_RX_ERROR), &sw->regs->intr_mask);
970 +
971 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
972 + temp &= ~(PORT_DISABLE);
973 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
974 +
975 + temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
976 + temp &= ~(TS_SUSPEND | FS_SUSPEND);
977 + __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
978 +
979 + enable_rx_dma(sw);
980 + }
981 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
982 + temp &= ~(PORT_DISABLE);
983 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
984 +
985 + ports_open++;
986 + netif_carrier_on(dev);
987 +
988 + return 0;
989 +}
990 +
991 +static int eth_close(struct net_device *dev)
992 +{
993 + struct port *port = netdev_priv(dev);
994 + struct sw *sw = port->sw;
995 + u32 temp;
996 +
997 + ports_open--;
998 +
999 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1000 + temp |= (PORT_DISABLE);
1001 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1002 +
1003 + netif_stop_queue(dev);
1004 +
1005 + phy_stop(port->phydev);
1006 +
1007 + if (!ports_open) {
1008 + disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1009 + free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
1010 + disable_irq(IRQ_CNS3XXX_SW_STATUS);
1011 + free_irq(IRQ_CNS3XXX_SW_STATUS, napi_dev);
1012 + napi_disable(&sw->napi);
1013 + netif_stop_queue(napi_dev);
1014 + temp = __raw_readl(&sw->regs->mac_cfg[2]);
1015 + temp |= (PORT_DISABLE);
1016 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
1017 +
1018 + __raw_writel(TS_SUSPEND | FS_SUSPEND,
1019 + &sw->regs->dma_auto_poll_cfg);
1020 + }
1021 +
1022 + netif_carrier_off(dev);
1023 + return 0;
1024 +}
1025 +
1026 +static void eth_rx_mode(struct net_device *dev)
1027 +{
1028 + struct port *port = netdev_priv(dev);
1029 + struct sw *sw = port->sw;
1030 + u32 temp;
1031 +
1032 + temp = __raw_readl(&sw->regs->mac_glob_cfg);
1033 +
1034 + if (dev->flags & IFF_PROMISC) {
1035 + if (port->id == 3)
1036 + temp |= ((1 << 2) << PROMISC_OFFSET);
1037 + else
1038 + temp |= ((1 << port->id) << PROMISC_OFFSET);
1039 + } else {
1040 + if (port->id == 3)
1041 + temp &= ~((1 << 2) << PROMISC_OFFSET);
1042 + else
1043 + temp &= ~((1 << port->id) << PROMISC_OFFSET);
1044 + }
1045 + __raw_writel(temp, &sw->regs->mac_glob_cfg);
1046 +}
1047 +
1048 +static int eth_set_mac(struct net_device *netdev, void *p)
1049 +{
1050 + struct port *port = netdev_priv(netdev);
1051 + struct sw *sw = port->sw;
1052 + struct sockaddr *addr = p;
1053 + u32 cycles = 0;
1054 +
1055 + if (!is_valid_ether_addr(addr->sa_data))
1056 + return -EADDRNOTAVAIL;
1057 +
1058 + /* Invalidate old ARL Entry */
1059 + if (port->id == 3)
1060 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1061 + else
1062 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1063 + __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
1064 + (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
1065 + &sw->regs->arl_ctrl[1]);
1066 +
1067 + __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
1068 + (1 << 1)),
1069 + &sw->regs->arl_ctrl[2]);
1070 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
1071 +
1072 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1073 + && cycles < 5000) {
1074 + udelay(1);
1075 + cycles++;
1076 + }
1077 +
1078 + cycles = 0;
1079 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1080 +
1081 + if (port->id == 3)
1082 + __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1083 + else
1084 + __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1085 + __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
1086 + (addr->sa_data[2] << 8) | (addr->sa_data[3])),
1087 + &sw->regs->arl_ctrl[1]);
1088 +
1089 + __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
1090 + (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
1091 + __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
1092 +
1093 + while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1094 + && cycles < 5000) {
1095 + udelay(1);
1096 + cycles++;
1097 + }
1098 + return 0;
1099 +}
1100 +
1101 +static const struct net_device_ops cns3xxx_netdev_ops = {
1102 + .ndo_open = eth_open,
1103 + .ndo_stop = eth_close,
1104 + .ndo_start_xmit = eth_xmit,
1105 + .ndo_set_rx_mode = eth_rx_mode,
1106 + .ndo_do_ioctl = eth_ioctl,
1107 + .ndo_change_mtu = eth_change_mtu,
1108 + .ndo_set_mac_address = eth_set_mac,
1109 + .ndo_validate_addr = eth_validate_addr,
1110 +};
1111 +
1112 +static int __devinit eth_init_one(struct platform_device *pdev)
1113 +{
1114 + int i;
1115 + struct port *port;
1116 + struct sw *sw;
1117 + struct net_device *dev;
1118 + struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1119 + u32 regs_phys;
1120 + char phy_id[MII_BUS_ID_SIZE + 3];
1121 + int err;
1122 + u32 temp;
1123 +
1124 + if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1125 + return -ENOMEM;
1126 + strcpy(napi_dev->name, "switch%d");
1127 + napi_dev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
1128 +
1129 + SET_NETDEV_DEV(napi_dev, &pdev->dev);
1130 + sw = netdev_priv(napi_dev);
1131 + memset(sw, 0, sizeof(struct sw));
1132 + sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1133 + regs_phys = CNS3XXX_SWITCH_BASE;
1134 + sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1135 + if (!sw->mem_res) {
1136 + err = -EBUSY;
1137 + goto err_free;
1138 + }
1139 +
1140 + for (i = 0; i < 4; i++) {
1141 + temp = __raw_readl(&sw->regs->mac_cfg[i]);
1142 + temp |= (PORT_DISABLE);
1143 + __raw_writel(temp, &sw->regs->mac_cfg[i]);
1144 + }
1145 +
1146 + temp = PORT_DISABLE;
1147 + __raw_writel(temp, &sw->regs->mac_cfg[2]);
1148 +
1149 + temp = __raw_readl(&sw->regs->vlan_cfg);
1150 + temp |= NIC_MODE | VLAN_UNAWARE;
1151 + __raw_writel(temp, &sw->regs->vlan_cfg);
1152 +
1153 + __raw_writel(UNKNOWN_VLAN_TO_CPU |
1154 + CRC_STRIPPING, &sw->regs->mac_glob_cfg);
1155 +
1156 + if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
1157 + err = -ENOMEM;
1158 + goto err_free;
1159 + }
1160 + memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
1161 +
1162 + if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
1163 + err = -ENOMEM;
1164 + goto err_free_rx;
1165 + }
1166 + memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
1167 +
1168 + if ((err = init_rings(sw)) != 0) {
1169 + destroy_rings(sw);
1170 + err = -ENOMEM;
1171 + goto err_free_rings;
1172 + }
1173 + platform_set_drvdata(pdev, napi_dev);
1174 +
1175 + netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1176 +
1177 + for (i = 0; i < 3; i++) {
1178 + if (!(plat->ports & (1 << i))) {
1179 + continue;
1180 + }
1181 +
1182 + if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1183 + goto free_ports;
1184 + }
1185 +
1186 + port = netdev_priv(dev);
1187 + port->netdev = dev;
1188 + if (i == 2)
1189 + port->id = 3;
1190 + else
1191 + port->id = i;
1192 + port->sw = sw;
1193 +
1194 + temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1195 + temp |= (PORT_DISABLE | PORT_BLOCK_STATE | PORT_LEARN_DIS);
1196 + __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1197 +
1198 + dev->netdev_ops = &cns3xxx_netdev_ops;
1199 + dev->ethtool_ops = &cns3xxx_ethtool_ops;
1200 + dev->tx_queue_len = 1000;
1201 + dev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
1202 +
1203 + switch_port_tab[port->id] = port;
1204 + memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1205 +
1206 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1207 + port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1208 + PHY_INTERFACE_MODE_RGMII);
1209 + if ((err = IS_ERR(port->phydev))) {
1210 + switch_port_tab[port->id] = 0;
1211 + free_netdev(dev);
1212 + goto free_ports;
1213 + }
1214 +
1215 + port->phydev->irq = PHY_IGNORE_INTERRUPT;
1216 +
1217 + if ((err = register_netdev(dev))) {
1218 + phy_disconnect(port->phydev);
1219 + switch_port_tab[port->id] = 0;
1220 + free_netdev(dev);
1221 + goto free_ports;
1222 + }
1223 +
1224 + printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1225 + netif_carrier_off(dev);
1226 + dev = 0;
1227 + }
1228 +
1229 + return 0;
1230 +
1231 +free_ports:
1232 + err = -ENOMEM;
1233 + for (--i; i >= 0; i--) {
1234 + if (switch_port_tab[i]) {
1235 + port = switch_port_tab[i];
1236 + dev = port->netdev;
1237 + unregister_netdev(dev);
1238 + phy_disconnect(port->phydev);
1239 + switch_port_tab[i] = 0;
1240 + free_netdev(dev);
1241 + }
1242 + }
1243 +err_free_rings:
1244 + kfree(sw->tx_ring);
1245 +err_free_rx:
1246 + kfree(sw->rx_ring);
1247 +err_free:
1248 + free_netdev(napi_dev);
1249 + return err;
1250 +}
1251 +
1252 +static int __devexit eth_remove_one(struct platform_device *pdev)
1253 +{
1254 + struct net_device *dev = platform_get_drvdata(pdev);
1255 + struct sw *sw = netdev_priv(dev);
1256 + int i;
1257 + destroy_rings(sw);
1258 +
1259 + for (i = 3; i >= 0; i--) {
1260 + if (switch_port_tab[i]) {
1261 + struct port *port = switch_port_tab[i];
1262 + struct net_device *dev = port->netdev;
1263 + unregister_netdev(dev);
1264 + phy_disconnect(port->phydev);
1265 + switch_port_tab[i] = 0;
1266 + free_netdev(dev);
1267 + }
1268 + }
1269 +
1270 + release_resource(sw->mem_res);
1271 + free_netdev(napi_dev);
1272 + return 0;
1273 +}
1274 +
1275 +static struct platform_driver cns3xxx_eth_driver = {
1276 + .driver.name = DRV_NAME,
1277 + .probe = eth_init_one,
1278 + .remove = eth_remove_one,
1279 +};
1280 +
1281 +static int __init eth_init_module(void)
1282 +{
1283 + int err;
1284 + if ((err = cns3xxx_mdio_register()))
1285 + return err;
1286 + return platform_driver_register(&cns3xxx_eth_driver);
1287 +}
1288 +
1289 +static void __exit eth_cleanup_module(void)
1290 +{
1291 + platform_driver_unregister(&cns3xxx_eth_driver);
1292 + cns3xxx_mdio_remove();
1293 +}
1294 +
1295 +module_init(eth_init_module);
1296 +module_exit(eth_cleanup_module);
1297 +
1298 +MODULE_AUTHOR("Chris Lang");
1299 +MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1300 +MODULE_LICENSE("GPL v2");
1301 +MODULE_ALIAS("platform:cns3xxx_eth");
1302 --- a/drivers/net/ethernet/Kconfig
1303 +++ b/drivers/net/ethernet/Kconfig
1304 @@ -32,6 +32,7 @@ source "drivers/net/ethernet/calxeda/Kco
1305 source "drivers/net/ethernet/chelsio/Kconfig"
1306 source "drivers/net/ethernet/cirrus/Kconfig"
1307 source "drivers/net/ethernet/cisco/Kconfig"
1308 +source "drivers/net/ethernet/cavium/Kconfig"
1309 source "drivers/net/ethernet/davicom/Kconfig"
1310
1311 config DNET
1312 --- a/drivers/net/ethernet/Makefile
1313 +++ b/drivers/net/ethernet/Makefile
1314 @@ -15,6 +15,7 @@ obj-$(CONFIG_NET_BFIN) += adi/
1315 obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
1316 obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
1317 obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
1318 +obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
1319 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
1320 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
1321 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
1322 --- /dev/null
1323 +++ b/drivers/net/ethernet/cavium/Kconfig
1324 @@ -0,0 +1,24 @@
1325 +config NET_VENDOR_CAVIUM
1326 + bool "Cavium devices"
1327 + default y
1328 + depends on ARCH_CNS3XXX
1329 + ---help---
1330 + If you have a network (Ethernet) chipset belonging to this class,
1331 + say Y.
1332 +
1333 + Note that the answer to this question does not directly affect
1334 + the kernel: saying N will just case the configurator to skip all
1335 + the questions regarding AMD chipsets. If you say Y, you will be asked
1336 + for your specific chipset/driver in the following questions.
1337 +
1338 +if NET_VENDOR_CAVIUM
1339 +
1340 +config CNS3XXX_ETH
1341 + tristate "Cavium CNS3xxx Ethernet support"
1342 + depends on ARCH_CNS3XXX
1343 + select PHYLIB
1344 + help
1345 + Say Y here if you want to use built-in Ethernet ports
1346 + on CNS3XXX processor.
1347 +
1348 +endif
1349 --- /dev/null
1350 +++ b/drivers/net/ethernet/cavium/Makefile
1351 @@ -0,0 +1,5 @@
1352 +#
1353 +# Makefile for the Cavium ethernet device drivers.
1354 +#
1355 +
1356 +obj-$(CONFIG_CNS3XXX_ETH) += cns3xxx_eth.o