netdev: pass the stuck queue to the timeout handler
[openwrt/staging/blogic.git] / drivers / net / ethernet / wiznet / w5100.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Ethernet driver for the WIZnet W5100 chip.
4 *
5 * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
6 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/platform_data/wiznet.h>
15 #include <linux/ethtool.h>
16 #include <linux/skbuff.h>
17 #include <linux/types.h>
18 #include <linux/errno.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/io.h>
23 #include <linux/ioport.h>
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/gpio.h>
27
28 #include "w5100.h"
29
30 #define DRV_NAME "w5100"
31 #define DRV_VERSION "2012-04-04"
32
33 MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
34 MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
35 MODULE_ALIAS("platform:"DRV_NAME);
36 MODULE_LICENSE("GPL");
37
38 /*
39 * W5100/W5200/W5500 common registers
40 */
41 #define W5100_COMMON_REGS 0x0000
42 #define W5100_MR 0x0000 /* Mode Register */
43 #define MR_RST 0x80 /* S/W reset */
44 #define MR_PB 0x10 /* Ping block */
45 #define MR_AI 0x02 /* Address Auto-Increment */
46 #define MR_IND 0x01 /* Indirect mode */
47 #define W5100_SHAR 0x0009 /* Source MAC address */
48 #define W5100_IR 0x0015 /* Interrupt Register */
49 #define W5100_COMMON_REGS_LEN 0x0040
50
51 #define W5100_Sn_MR 0x0000 /* Sn Mode Register */
52 #define W5100_Sn_CR 0x0001 /* Sn Command Register */
53 #define W5100_Sn_IR 0x0002 /* Sn Interrupt Register */
54 #define W5100_Sn_SR 0x0003 /* Sn Status Register */
55 #define W5100_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */
56 #define W5100_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */
57 #define W5100_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */
58 #define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */
59 #define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */
60
61 #define S0_REGS(priv) ((priv)->s0_regs)
62
63 #define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR)
64 #define S0_MR_MACRAW 0x04 /* MAC RAW mode */
65 #define S0_MR_MF 0x40 /* MAC Filter for W5100 and W5200 */
66 #define W5500_S0_MR_MF 0x80 /* MAC Filter for W5500 */
67 #define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR)
68 #define S0_CR_OPEN 0x01 /* OPEN command */
69 #define S0_CR_CLOSE 0x10 /* CLOSE command */
70 #define S0_CR_SEND 0x20 /* SEND command */
71 #define S0_CR_RECV 0x40 /* RECV command */
72 #define W5100_S0_IR(priv) (S0_REGS(priv) + W5100_Sn_IR)
73 #define S0_IR_SENDOK 0x10 /* complete sending */
74 #define S0_IR_RECV 0x04 /* receiving data */
75 #define W5100_S0_SR(priv) (S0_REGS(priv) + W5100_Sn_SR)
76 #define S0_SR_MACRAW 0x42 /* mac raw mode */
77 #define W5100_S0_TX_FSR(priv) (S0_REGS(priv) + W5100_Sn_TX_FSR)
78 #define W5100_S0_TX_RD(priv) (S0_REGS(priv) + W5100_Sn_TX_RD)
79 #define W5100_S0_TX_WR(priv) (S0_REGS(priv) + W5100_Sn_TX_WR)
80 #define W5100_S0_RX_RSR(priv) (S0_REGS(priv) + W5100_Sn_RX_RSR)
81 #define W5100_S0_RX_RD(priv) (S0_REGS(priv) + W5100_Sn_RX_RD)
82
83 #define W5100_S0_REGS_LEN 0x0040
84
85 /*
86 * W5100 and W5200 common registers
87 */
88 #define W5100_IMR 0x0016 /* Interrupt Mask Register */
89 #define IR_S0 0x01 /* S0 interrupt */
90 #define W5100_RTR 0x0017 /* Retry Time-value Register */
91 #define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
92
93 /*
94 * W5100 specific register and memory
95 */
96 #define W5100_RMSR 0x001a /* Receive Memory Size */
97 #define W5100_TMSR 0x001b /* Transmit Memory Size */
98
99 #define W5100_S0_REGS 0x0400
100
101 #define W5100_TX_MEM_START 0x4000
102 #define W5100_TX_MEM_SIZE 0x2000
103 #define W5100_RX_MEM_START 0x6000
104 #define W5100_RX_MEM_SIZE 0x2000
105
106 /*
107 * W5200 specific register and memory
108 */
109 #define W5200_S0_REGS 0x4000
110
111 #define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */
112 #define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */
113
114 #define W5200_TX_MEM_START 0x8000
115 #define W5200_TX_MEM_SIZE 0x4000
116 #define W5200_RX_MEM_START 0xc000
117 #define W5200_RX_MEM_SIZE 0x4000
118
119 /*
120 * W5500 specific register and memory
121 *
122 * W5500 register and memory are organized by multiple blocks. Each one is
123 * selected by 16bits offset address and 5bits block select bits. So we
124 * encode it into 32bits address. (lower 16bits is offset address and
125 * upper 16bits is block select bits)
126 */
127 #define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */
128 #define W5500_RTR 0x0019 /* Retry Time-value Register */
129
130 #define W5500_S0_REGS 0x10000
131
132 #define W5500_Sn_RXMEM_SIZE(n) \
133 (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */
134 #define W5500_Sn_TXMEM_SIZE(n) \
135 (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */
136
137 #define W5500_TX_MEM_START 0x20000
138 #define W5500_TX_MEM_SIZE 0x04000
139 #define W5500_RX_MEM_START 0x30000
140 #define W5500_RX_MEM_SIZE 0x04000
141
142 /*
143 * Device driver private data structure
144 */
145
146 struct w5100_priv {
147 const struct w5100_ops *ops;
148
149 /* Socket 0 register offset address */
150 u32 s0_regs;
151 /* Socket 0 TX buffer offset address and size */
152 u32 s0_tx_buf;
153 u16 s0_tx_buf_size;
154 /* Socket 0 RX buffer offset address and size */
155 u32 s0_rx_buf;
156 u16 s0_rx_buf_size;
157
158 int irq;
159 int link_irq;
160 int link_gpio;
161
162 struct napi_struct napi;
163 struct net_device *ndev;
164 bool promisc;
165 u32 msg_enable;
166
167 struct workqueue_struct *xfer_wq;
168 struct work_struct rx_work;
169 struct sk_buff *tx_skb;
170 struct work_struct tx_work;
171 struct work_struct setrx_work;
172 struct work_struct restart_work;
173 };
174
175 /************************************************************************
176 *
177 * Lowlevel I/O functions
178 *
179 ***********************************************************************/
180
181 struct w5100_mmio_priv {
182 void __iomem *base;
183 /* Serialize access in indirect address mode */
184 spinlock_t reg_lock;
185 };
186
187 static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
188 {
189 return w5100_ops_priv(dev);
190 }
191
192 static inline void __iomem *w5100_mmio(struct net_device *ndev)
193 {
194 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
195
196 return mmio_priv->base;
197 }
198
199 /*
200 * In direct address mode host system can directly access W5100 registers
201 * after mapping to Memory-Mapped I/O space.
202 *
203 * 0x8000 bytes are required for memory space.
204 */
205 static inline int w5100_read_direct(struct net_device *ndev, u32 addr)
206 {
207 return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
208 }
209
210 static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
211 u8 data)
212 {
213 iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
214
215 return 0;
216 }
217
218 static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
219 {
220 __w5100_write_direct(ndev, addr, data);
221
222 return 0;
223 }
224
225 static int w5100_read16_direct(struct net_device *ndev, u32 addr)
226 {
227 u16 data;
228 data = w5100_read_direct(ndev, addr) << 8;
229 data |= w5100_read_direct(ndev, addr + 1);
230 return data;
231 }
232
233 static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
234 {
235 __w5100_write_direct(ndev, addr, data >> 8);
236 __w5100_write_direct(ndev, addr + 1, data);
237
238 return 0;
239 }
240
241 static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf,
242 int len)
243 {
244 int i;
245
246 for (i = 0; i < len; i++, addr++)
247 *buf++ = w5100_read_direct(ndev, addr);
248
249 return 0;
250 }
251
252 static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
253 const u8 *buf, int len)
254 {
255 int i;
256
257 for (i = 0; i < len; i++, addr++)
258 __w5100_write_direct(ndev, addr, *buf++);
259
260 return 0;
261 }
262
263 static int w5100_mmio_init(struct net_device *ndev)
264 {
265 struct platform_device *pdev = to_platform_device(ndev->dev.parent);
266 struct w5100_priv *priv = netdev_priv(ndev);
267 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
268 struct resource *mem;
269
270 spin_lock_init(&mmio_priv->reg_lock);
271
272 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
273 mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
274 if (IS_ERR(mmio_priv->base))
275 return PTR_ERR(mmio_priv->base);
276
277 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
278
279 return 0;
280 }
281
282 static const struct w5100_ops w5100_mmio_direct_ops = {
283 .chip_id = W5100,
284 .read = w5100_read_direct,
285 .write = w5100_write_direct,
286 .read16 = w5100_read16_direct,
287 .write16 = w5100_write16_direct,
288 .readbulk = w5100_readbulk_direct,
289 .writebulk = w5100_writebulk_direct,
290 .init = w5100_mmio_init,
291 };
292
293 /*
294 * In indirect address mode host system indirectly accesses registers by
295 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
296 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
297 * Mode Register (MR) is directly accessible.
298 *
299 * Only 0x04 bytes are required for memory space.
300 */
301 #define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
302 #define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
303
304 static int w5100_read_indirect(struct net_device *ndev, u32 addr)
305 {
306 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
307 unsigned long flags;
308 u8 data;
309
310 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
311 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
312 data = w5100_read_direct(ndev, W5100_IDM_DR);
313 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
314
315 return data;
316 }
317
318 static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data)
319 {
320 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
321 unsigned long flags;
322
323 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
324 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
325 w5100_write_direct(ndev, W5100_IDM_DR, data);
326 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
327
328 return 0;
329 }
330
331 static int w5100_read16_indirect(struct net_device *ndev, u32 addr)
332 {
333 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
334 unsigned long flags;
335 u16 data;
336
337 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
338 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
339 data = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
340 data |= w5100_read_direct(ndev, W5100_IDM_DR);
341 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
342
343 return data;
344 }
345
346 static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data)
347 {
348 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
349 unsigned long flags;
350
351 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
352 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
353 __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
354 w5100_write_direct(ndev, W5100_IDM_DR, data);
355 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
356
357 return 0;
358 }
359
360 static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
361 int len)
362 {
363 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
364 unsigned long flags;
365 int i;
366
367 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
368 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
369
370 for (i = 0; i < len; i++)
371 *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
372
373 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
374
375 return 0;
376 }
377
378 static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
379 const u8 *buf, int len)
380 {
381 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
382 unsigned long flags;
383 int i;
384
385 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
386 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
387
388 for (i = 0; i < len; i++)
389 __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
390
391 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
392
393 return 0;
394 }
395
396 static int w5100_reset_indirect(struct net_device *ndev)
397 {
398 w5100_write_direct(ndev, W5100_MR, MR_RST);
399 mdelay(5);
400 w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
401
402 return 0;
403 }
404
405 static const struct w5100_ops w5100_mmio_indirect_ops = {
406 .chip_id = W5100,
407 .read = w5100_read_indirect,
408 .write = w5100_write_indirect,
409 .read16 = w5100_read16_indirect,
410 .write16 = w5100_write16_indirect,
411 .readbulk = w5100_readbulk_indirect,
412 .writebulk = w5100_writebulk_indirect,
413 .init = w5100_mmio_init,
414 .reset = w5100_reset_indirect,
415 };
416
417 #if defined(CONFIG_WIZNET_BUS_DIRECT)
418
419 static int w5100_read(struct w5100_priv *priv, u32 addr)
420 {
421 return w5100_read_direct(priv->ndev, addr);
422 }
423
424 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
425 {
426 return w5100_write_direct(priv->ndev, addr, data);
427 }
428
429 static int w5100_read16(struct w5100_priv *priv, u32 addr)
430 {
431 return w5100_read16_direct(priv->ndev, addr);
432 }
433
434 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
435 {
436 return w5100_write16_direct(priv->ndev, addr, data);
437 }
438
439 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
440 {
441 return w5100_readbulk_direct(priv->ndev, addr, buf, len);
442 }
443
444 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
445 int len)
446 {
447 return w5100_writebulk_direct(priv->ndev, addr, buf, len);
448 }
449
450 #elif defined(CONFIG_WIZNET_BUS_INDIRECT)
451
452 static int w5100_read(struct w5100_priv *priv, u32 addr)
453 {
454 return w5100_read_indirect(priv->ndev, addr);
455 }
456
457 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
458 {
459 return w5100_write_indirect(priv->ndev, addr, data);
460 }
461
462 static int w5100_read16(struct w5100_priv *priv, u32 addr)
463 {
464 return w5100_read16_indirect(priv->ndev, addr);
465 }
466
467 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
468 {
469 return w5100_write16_indirect(priv->ndev, addr, data);
470 }
471
472 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
473 {
474 return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
475 }
476
477 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
478 int len)
479 {
480 return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
481 }
482
483 #else /* CONFIG_WIZNET_BUS_ANY */
484
485 static int w5100_read(struct w5100_priv *priv, u32 addr)
486 {
487 return priv->ops->read(priv->ndev, addr);
488 }
489
490 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
491 {
492 return priv->ops->write(priv->ndev, addr, data);
493 }
494
495 static int w5100_read16(struct w5100_priv *priv, u32 addr)
496 {
497 return priv->ops->read16(priv->ndev, addr);
498 }
499
500 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
501 {
502 return priv->ops->write16(priv->ndev, addr, data);
503 }
504
505 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
506 {
507 return priv->ops->readbulk(priv->ndev, addr, buf, len);
508 }
509
510 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
511 int len)
512 {
513 return priv->ops->writebulk(priv->ndev, addr, buf, len);
514 }
515
516 #endif
517
518 static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
519 {
520 u32 addr;
521 int remain = 0;
522 int ret;
523 const u32 mem_start = priv->s0_rx_buf;
524 const u16 mem_size = priv->s0_rx_buf_size;
525
526 offset %= mem_size;
527 addr = mem_start + offset;
528
529 if (offset + len > mem_size) {
530 remain = (offset + len) % mem_size;
531 len = mem_size - offset;
532 }
533
534 ret = w5100_readbulk(priv, addr, buf, len);
535 if (ret || !remain)
536 return ret;
537
538 return w5100_readbulk(priv, mem_start, buf + len, remain);
539 }
540
541 static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
542 int len)
543 {
544 u32 addr;
545 int ret;
546 int remain = 0;
547 const u32 mem_start = priv->s0_tx_buf;
548 const u16 mem_size = priv->s0_tx_buf_size;
549
550 offset %= mem_size;
551 addr = mem_start + offset;
552
553 if (offset + len > mem_size) {
554 remain = (offset + len) % mem_size;
555 len = mem_size - offset;
556 }
557
558 ret = w5100_writebulk(priv, addr, buf, len);
559 if (ret || !remain)
560 return ret;
561
562 return w5100_writebulk(priv, mem_start, buf + len, remain);
563 }
564
565 static int w5100_reset(struct w5100_priv *priv)
566 {
567 if (priv->ops->reset)
568 return priv->ops->reset(priv->ndev);
569
570 w5100_write(priv, W5100_MR, MR_RST);
571 mdelay(5);
572 w5100_write(priv, W5100_MR, MR_PB);
573
574 return 0;
575 }
576
577 static int w5100_command(struct w5100_priv *priv, u16 cmd)
578 {
579 unsigned long timeout;
580
581 w5100_write(priv, W5100_S0_CR(priv), cmd);
582
583 timeout = jiffies + msecs_to_jiffies(100);
584
585 while (w5100_read(priv, W5100_S0_CR(priv)) != 0) {
586 if (time_after(jiffies, timeout))
587 return -EIO;
588 cpu_relax();
589 }
590
591 return 0;
592 }
593
594 static void w5100_write_macaddr(struct w5100_priv *priv)
595 {
596 struct net_device *ndev = priv->ndev;
597
598 w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
599 }
600
601 static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask)
602 {
603 u32 imr;
604
605 if (priv->ops->chip_id == W5500)
606 imr = W5500_SIMR;
607 else
608 imr = W5100_IMR;
609
610 w5100_write(priv, imr, mask);
611 }
612
613 static void w5100_enable_intr(struct w5100_priv *priv)
614 {
615 w5100_socket_intr_mask(priv, IR_S0);
616 }
617
618 static void w5100_disable_intr(struct w5100_priv *priv)
619 {
620 w5100_socket_intr_mask(priv, 0);
621 }
622
623 static void w5100_memory_configure(struct w5100_priv *priv)
624 {
625 /* Configure 16K of internal memory
626 * as 8K RX buffer and 8K TX buffer
627 */
628 w5100_write(priv, W5100_RMSR, 0x03);
629 w5100_write(priv, W5100_TMSR, 0x03);
630 }
631
632 static void w5200_memory_configure(struct w5100_priv *priv)
633 {
634 int i;
635
636 /* Configure internal RX memory as 16K RX buffer and
637 * internal TX memory as 16K TX buffer
638 */
639 w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10);
640 w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10);
641
642 for (i = 1; i < 8; i++) {
643 w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0);
644 w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0);
645 }
646 }
647
648 static void w5500_memory_configure(struct w5100_priv *priv)
649 {
650 int i;
651
652 /* Configure internal RX memory as 16K RX buffer and
653 * internal TX memory as 16K TX buffer
654 */
655 w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10);
656 w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10);
657
658 for (i = 1; i < 8; i++) {
659 w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0);
660 w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0);
661 }
662 }
663
664 static int w5100_hw_reset(struct w5100_priv *priv)
665 {
666 u32 rtr;
667
668 w5100_reset(priv);
669
670 w5100_disable_intr(priv);
671 w5100_write_macaddr(priv);
672
673 switch (priv->ops->chip_id) {
674 case W5100:
675 w5100_memory_configure(priv);
676 rtr = W5100_RTR;
677 break;
678 case W5200:
679 w5200_memory_configure(priv);
680 rtr = W5100_RTR;
681 break;
682 case W5500:
683 w5500_memory_configure(priv);
684 rtr = W5500_RTR;
685 break;
686 default:
687 return -EINVAL;
688 }
689
690 if (w5100_read16(priv, rtr) != RTR_DEFAULT)
691 return -ENODEV;
692
693 return 0;
694 }
695
696 static void w5100_hw_start(struct w5100_priv *priv)
697 {
698 u8 mode = S0_MR_MACRAW;
699
700 if (!priv->promisc) {
701 if (priv->ops->chip_id == W5500)
702 mode |= W5500_S0_MR_MF;
703 else
704 mode |= S0_MR_MF;
705 }
706
707 w5100_write(priv, W5100_S0_MR(priv), mode);
708 w5100_command(priv, S0_CR_OPEN);
709 w5100_enable_intr(priv);
710 }
711
712 static void w5100_hw_close(struct w5100_priv *priv)
713 {
714 w5100_disable_intr(priv);
715 w5100_command(priv, S0_CR_CLOSE);
716 }
717
718 /***********************************************************************
719 *
720 * Device driver functions / callbacks
721 *
722 ***********************************************************************/
723
724 static void w5100_get_drvinfo(struct net_device *ndev,
725 struct ethtool_drvinfo *info)
726 {
727 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
728 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
729 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
730 sizeof(info->bus_info));
731 }
732
733 static u32 w5100_get_link(struct net_device *ndev)
734 {
735 struct w5100_priv *priv = netdev_priv(ndev);
736
737 if (gpio_is_valid(priv->link_gpio))
738 return !!gpio_get_value(priv->link_gpio);
739
740 return 1;
741 }
742
743 static u32 w5100_get_msglevel(struct net_device *ndev)
744 {
745 struct w5100_priv *priv = netdev_priv(ndev);
746
747 return priv->msg_enable;
748 }
749
750 static void w5100_set_msglevel(struct net_device *ndev, u32 value)
751 {
752 struct w5100_priv *priv = netdev_priv(ndev);
753
754 priv->msg_enable = value;
755 }
756
757 static int w5100_get_regs_len(struct net_device *ndev)
758 {
759 return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
760 }
761
762 static void w5100_get_regs(struct net_device *ndev,
763 struct ethtool_regs *regs, void *buf)
764 {
765 struct w5100_priv *priv = netdev_priv(ndev);
766
767 regs->version = 1;
768 w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
769 buf += W5100_COMMON_REGS_LEN;
770 w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN);
771 }
772
773 static void w5100_restart(struct net_device *ndev)
774 {
775 struct w5100_priv *priv = netdev_priv(ndev);
776
777 netif_stop_queue(ndev);
778 w5100_hw_reset(priv);
779 w5100_hw_start(priv);
780 ndev->stats.tx_errors++;
781 netif_trans_update(ndev);
782 netif_wake_queue(ndev);
783 }
784
785 static void w5100_restart_work(struct work_struct *work)
786 {
787 struct w5100_priv *priv = container_of(work, struct w5100_priv,
788 restart_work);
789
790 w5100_restart(priv->ndev);
791 }
792
793 static void w5100_tx_timeout(struct net_device *ndev, unsigned int txqueue)
794 {
795 struct w5100_priv *priv = netdev_priv(ndev);
796
797 if (priv->ops->may_sleep)
798 schedule_work(&priv->restart_work);
799 else
800 w5100_restart(ndev);
801 }
802
803 static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
804 {
805 struct w5100_priv *priv = netdev_priv(ndev);
806 u16 offset;
807
808 offset = w5100_read16(priv, W5100_S0_TX_WR(priv));
809 w5100_writebuf(priv, offset, skb->data, skb->len);
810 w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
811 ndev->stats.tx_bytes += skb->len;
812 ndev->stats.tx_packets++;
813 dev_kfree_skb(skb);
814
815 w5100_command(priv, S0_CR_SEND);
816 }
817
818 static void w5100_tx_work(struct work_struct *work)
819 {
820 struct w5100_priv *priv = container_of(work, struct w5100_priv,
821 tx_work);
822 struct sk_buff *skb = priv->tx_skb;
823
824 priv->tx_skb = NULL;
825
826 if (WARN_ON(!skb))
827 return;
828 w5100_tx_skb(priv->ndev, skb);
829 }
830
831 static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
832 {
833 struct w5100_priv *priv = netdev_priv(ndev);
834
835 netif_stop_queue(ndev);
836
837 if (priv->ops->may_sleep) {
838 WARN_ON(priv->tx_skb);
839 priv->tx_skb = skb;
840 queue_work(priv->xfer_wq, &priv->tx_work);
841 } else {
842 w5100_tx_skb(ndev, skb);
843 }
844
845 return NETDEV_TX_OK;
846 }
847
848 static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
849 {
850 struct w5100_priv *priv = netdev_priv(ndev);
851 struct sk_buff *skb;
852 u16 rx_len;
853 u16 offset;
854 u8 header[2];
855 u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv));
856
857 if (rx_buf_len == 0)
858 return NULL;
859
860 offset = w5100_read16(priv, W5100_S0_RX_RD(priv));
861 w5100_readbuf(priv, offset, header, 2);
862 rx_len = get_unaligned_be16(header) - 2;
863
864 skb = netdev_alloc_skb_ip_align(ndev, rx_len);
865 if (unlikely(!skb)) {
866 w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len);
867 w5100_command(priv, S0_CR_RECV);
868 ndev->stats.rx_dropped++;
869 return NULL;
870 }
871
872 skb_put(skb, rx_len);
873 w5100_readbuf(priv, offset + 2, skb->data, rx_len);
874 w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len);
875 w5100_command(priv, S0_CR_RECV);
876 skb->protocol = eth_type_trans(skb, ndev);
877
878 ndev->stats.rx_packets++;
879 ndev->stats.rx_bytes += rx_len;
880
881 return skb;
882 }
883
884 static void w5100_rx_work(struct work_struct *work)
885 {
886 struct w5100_priv *priv = container_of(work, struct w5100_priv,
887 rx_work);
888 struct sk_buff *skb;
889
890 while ((skb = w5100_rx_skb(priv->ndev)))
891 netif_rx_ni(skb);
892
893 w5100_enable_intr(priv);
894 }
895
896 static int w5100_napi_poll(struct napi_struct *napi, int budget)
897 {
898 struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
899 int rx_count;
900
901 for (rx_count = 0; rx_count < budget; rx_count++) {
902 struct sk_buff *skb = w5100_rx_skb(priv->ndev);
903
904 if (skb)
905 netif_receive_skb(skb);
906 else
907 break;
908 }
909
910 if (rx_count < budget) {
911 napi_complete_done(napi, rx_count);
912 w5100_enable_intr(priv);
913 }
914
915 return rx_count;
916 }
917
918 static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
919 {
920 struct net_device *ndev = ndev_instance;
921 struct w5100_priv *priv = netdev_priv(ndev);
922
923 int ir = w5100_read(priv, W5100_S0_IR(priv));
924 if (!ir)
925 return IRQ_NONE;
926 w5100_write(priv, W5100_S0_IR(priv), ir);
927
928 if (ir & S0_IR_SENDOK) {
929 netif_dbg(priv, tx_done, ndev, "tx done\n");
930 netif_wake_queue(ndev);
931 }
932
933 if (ir & S0_IR_RECV) {
934 w5100_disable_intr(priv);
935
936 if (priv->ops->may_sleep)
937 queue_work(priv->xfer_wq, &priv->rx_work);
938 else if (napi_schedule_prep(&priv->napi))
939 __napi_schedule(&priv->napi);
940 }
941
942 return IRQ_HANDLED;
943 }
944
945 static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
946 {
947 struct net_device *ndev = ndev_instance;
948 struct w5100_priv *priv = netdev_priv(ndev);
949
950 if (netif_running(ndev)) {
951 if (gpio_get_value(priv->link_gpio) != 0) {
952 netif_info(priv, link, ndev, "link is up\n");
953 netif_carrier_on(ndev);
954 } else {
955 netif_info(priv, link, ndev, "link is down\n");
956 netif_carrier_off(ndev);
957 }
958 }
959
960 return IRQ_HANDLED;
961 }
962
963 static void w5100_setrx_work(struct work_struct *work)
964 {
965 struct w5100_priv *priv = container_of(work, struct w5100_priv,
966 setrx_work);
967
968 w5100_hw_start(priv);
969 }
970
971 static void w5100_set_rx_mode(struct net_device *ndev)
972 {
973 struct w5100_priv *priv = netdev_priv(ndev);
974 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
975
976 if (priv->promisc != set_promisc) {
977 priv->promisc = set_promisc;
978
979 if (priv->ops->may_sleep)
980 schedule_work(&priv->setrx_work);
981 else
982 w5100_hw_start(priv);
983 }
984 }
985
986 static int w5100_set_macaddr(struct net_device *ndev, void *addr)
987 {
988 struct w5100_priv *priv = netdev_priv(ndev);
989 struct sockaddr *sock_addr = addr;
990
991 if (!is_valid_ether_addr(sock_addr->sa_data))
992 return -EADDRNOTAVAIL;
993 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
994 w5100_write_macaddr(priv);
995 return 0;
996 }
997
998 static int w5100_open(struct net_device *ndev)
999 {
1000 struct w5100_priv *priv = netdev_priv(ndev);
1001
1002 netif_info(priv, ifup, ndev, "enabling\n");
1003 w5100_hw_start(priv);
1004 napi_enable(&priv->napi);
1005 netif_start_queue(ndev);
1006 if (!gpio_is_valid(priv->link_gpio) ||
1007 gpio_get_value(priv->link_gpio) != 0)
1008 netif_carrier_on(ndev);
1009 return 0;
1010 }
1011
1012 static int w5100_stop(struct net_device *ndev)
1013 {
1014 struct w5100_priv *priv = netdev_priv(ndev);
1015
1016 netif_info(priv, ifdown, ndev, "shutting down\n");
1017 w5100_hw_close(priv);
1018 netif_carrier_off(ndev);
1019 netif_stop_queue(ndev);
1020 napi_disable(&priv->napi);
1021 return 0;
1022 }
1023
1024 static const struct ethtool_ops w5100_ethtool_ops = {
1025 .get_drvinfo = w5100_get_drvinfo,
1026 .get_msglevel = w5100_get_msglevel,
1027 .set_msglevel = w5100_set_msglevel,
1028 .get_link = w5100_get_link,
1029 .get_regs_len = w5100_get_regs_len,
1030 .get_regs = w5100_get_regs,
1031 };
1032
1033 static const struct net_device_ops w5100_netdev_ops = {
1034 .ndo_open = w5100_open,
1035 .ndo_stop = w5100_stop,
1036 .ndo_start_xmit = w5100_start_tx,
1037 .ndo_tx_timeout = w5100_tx_timeout,
1038 .ndo_set_rx_mode = w5100_set_rx_mode,
1039 .ndo_set_mac_address = w5100_set_macaddr,
1040 .ndo_validate_addr = eth_validate_addr,
1041 };
1042
1043 static int w5100_mmio_probe(struct platform_device *pdev)
1044 {
1045 struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
1046 const void *mac_addr = NULL;
1047 struct resource *mem;
1048 const struct w5100_ops *ops;
1049 int irq;
1050
1051 if (data && is_valid_ether_addr(data->mac_addr))
1052 mac_addr = data->mac_addr;
1053
1054 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1055 if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
1056 ops = &w5100_mmio_indirect_ops;
1057 else
1058 ops = &w5100_mmio_direct_ops;
1059
1060 irq = platform_get_irq(pdev, 0);
1061 if (irq < 0)
1062 return irq;
1063
1064 return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
1065 mac_addr, irq, data ? data->link_gpio : -EINVAL);
1066 }
1067
1068 static int w5100_mmio_remove(struct platform_device *pdev)
1069 {
1070 return w5100_remove(&pdev->dev);
1071 }
1072
1073 void *w5100_ops_priv(const struct net_device *ndev)
1074 {
1075 return netdev_priv(ndev) +
1076 ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
1077 }
1078 EXPORT_SYMBOL_GPL(w5100_ops_priv);
1079
1080 int w5100_probe(struct device *dev, const struct w5100_ops *ops,
1081 int sizeof_ops_priv, const void *mac_addr, int irq,
1082 int link_gpio)
1083 {
1084 struct w5100_priv *priv;
1085 struct net_device *ndev;
1086 int err;
1087 size_t alloc_size;
1088
1089 alloc_size = sizeof(*priv);
1090 if (sizeof_ops_priv) {
1091 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
1092 alloc_size += sizeof_ops_priv;
1093 }
1094 alloc_size += NETDEV_ALIGN - 1;
1095
1096 ndev = alloc_etherdev(alloc_size);
1097 if (!ndev)
1098 return -ENOMEM;
1099 SET_NETDEV_DEV(ndev, dev);
1100 dev_set_drvdata(dev, ndev);
1101 priv = netdev_priv(ndev);
1102
1103 switch (ops->chip_id) {
1104 case W5100:
1105 priv->s0_regs = W5100_S0_REGS;
1106 priv->s0_tx_buf = W5100_TX_MEM_START;
1107 priv->s0_tx_buf_size = W5100_TX_MEM_SIZE;
1108 priv->s0_rx_buf = W5100_RX_MEM_START;
1109 priv->s0_rx_buf_size = W5100_RX_MEM_SIZE;
1110 break;
1111 case W5200:
1112 priv->s0_regs = W5200_S0_REGS;
1113 priv->s0_tx_buf = W5200_TX_MEM_START;
1114 priv->s0_tx_buf_size = W5200_TX_MEM_SIZE;
1115 priv->s0_rx_buf = W5200_RX_MEM_START;
1116 priv->s0_rx_buf_size = W5200_RX_MEM_SIZE;
1117 break;
1118 case W5500:
1119 priv->s0_regs = W5500_S0_REGS;
1120 priv->s0_tx_buf = W5500_TX_MEM_START;
1121 priv->s0_tx_buf_size = W5500_TX_MEM_SIZE;
1122 priv->s0_rx_buf = W5500_RX_MEM_START;
1123 priv->s0_rx_buf_size = W5500_RX_MEM_SIZE;
1124 break;
1125 default:
1126 err = -EINVAL;
1127 goto err_register;
1128 }
1129
1130 priv->ndev = ndev;
1131 priv->ops = ops;
1132 priv->irq = irq;
1133 priv->link_gpio = link_gpio;
1134
1135 ndev->netdev_ops = &w5100_netdev_ops;
1136 ndev->ethtool_ops = &w5100_ethtool_ops;
1137 netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
1138
1139 /* This chip doesn't support VLAN packets with normal MTU,
1140 * so disable VLAN for this device.
1141 */
1142 ndev->features |= NETIF_F_VLAN_CHALLENGED;
1143
1144 err = register_netdev(ndev);
1145 if (err < 0)
1146 goto err_register;
1147
1148 priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
1149 netdev_name(ndev));
1150 if (!priv->xfer_wq) {
1151 err = -ENOMEM;
1152 goto err_wq;
1153 }
1154
1155 INIT_WORK(&priv->rx_work, w5100_rx_work);
1156 INIT_WORK(&priv->tx_work, w5100_tx_work);
1157 INIT_WORK(&priv->setrx_work, w5100_setrx_work);
1158 INIT_WORK(&priv->restart_work, w5100_restart_work);
1159
1160 if (!IS_ERR_OR_NULL(mac_addr))
1161 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1162 else
1163 eth_hw_addr_random(ndev);
1164
1165 if (priv->ops->init) {
1166 err = priv->ops->init(priv->ndev);
1167 if (err)
1168 goto err_hw;
1169 }
1170
1171 err = w5100_hw_reset(priv);
1172 if (err)
1173 goto err_hw;
1174
1175 if (ops->may_sleep) {
1176 err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
1177 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1178 netdev_name(ndev), ndev);
1179 } else {
1180 err = request_irq(priv->irq, w5100_interrupt,
1181 IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
1182 }
1183 if (err)
1184 goto err_hw;
1185
1186 if (gpio_is_valid(priv->link_gpio)) {
1187 char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
1188
1189 if (!link_name) {
1190 err = -ENOMEM;
1191 goto err_gpio;
1192 }
1193 snprintf(link_name, 16, "%s-link", netdev_name(ndev));
1194 priv->link_irq = gpio_to_irq(priv->link_gpio);
1195 if (request_any_context_irq(priv->link_irq, w5100_detect_link,
1196 IRQF_TRIGGER_RISING |
1197 IRQF_TRIGGER_FALLING,
1198 link_name, priv->ndev) < 0)
1199 priv->link_gpio = -EINVAL;
1200 }
1201
1202 return 0;
1203
1204 err_gpio:
1205 free_irq(priv->irq, ndev);
1206 err_hw:
1207 destroy_workqueue(priv->xfer_wq);
1208 err_wq:
1209 unregister_netdev(ndev);
1210 err_register:
1211 free_netdev(ndev);
1212 return err;
1213 }
1214 EXPORT_SYMBOL_GPL(w5100_probe);
1215
1216 int w5100_remove(struct device *dev)
1217 {
1218 struct net_device *ndev = dev_get_drvdata(dev);
1219 struct w5100_priv *priv = netdev_priv(ndev);
1220
1221 w5100_hw_reset(priv);
1222 free_irq(priv->irq, ndev);
1223 if (gpio_is_valid(priv->link_gpio))
1224 free_irq(priv->link_irq, ndev);
1225
1226 flush_work(&priv->setrx_work);
1227 flush_work(&priv->restart_work);
1228 destroy_workqueue(priv->xfer_wq);
1229
1230 unregister_netdev(ndev);
1231 free_netdev(ndev);
1232 return 0;
1233 }
1234 EXPORT_SYMBOL_GPL(w5100_remove);
1235
1236 #ifdef CONFIG_PM_SLEEP
1237 static int w5100_suspend(struct device *dev)
1238 {
1239 struct net_device *ndev = dev_get_drvdata(dev);
1240 struct w5100_priv *priv = netdev_priv(ndev);
1241
1242 if (netif_running(ndev)) {
1243 netif_carrier_off(ndev);
1244 netif_device_detach(ndev);
1245
1246 w5100_hw_close(priv);
1247 }
1248 return 0;
1249 }
1250
1251 static int w5100_resume(struct device *dev)
1252 {
1253 struct net_device *ndev = dev_get_drvdata(dev);
1254 struct w5100_priv *priv = netdev_priv(ndev);
1255
1256 if (netif_running(ndev)) {
1257 w5100_hw_reset(priv);
1258 w5100_hw_start(priv);
1259
1260 netif_device_attach(ndev);
1261 if (!gpio_is_valid(priv->link_gpio) ||
1262 gpio_get_value(priv->link_gpio) != 0)
1263 netif_carrier_on(ndev);
1264 }
1265 return 0;
1266 }
1267 #endif /* CONFIG_PM_SLEEP */
1268
1269 SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
1270 EXPORT_SYMBOL_GPL(w5100_pm_ops);
1271
1272 static struct platform_driver w5100_mmio_driver = {
1273 .driver = {
1274 .name = DRV_NAME,
1275 .pm = &w5100_pm_ops,
1276 },
1277 .probe = w5100_mmio_probe,
1278 .remove = w5100_mmio_remove,
1279 };
1280 module_platform_driver(w5100_mmio_driver);