18c715a29012ec3d0734543b45da85b9444b157b
[openwrt/staging/dedeckeh.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_adsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_adsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for Danube/
10 ** Amazon-SE/AR9)
11 ** COPYRIGHT : Copyright (c) 2006
12 ** Infineon Technologies AG
13 ** Am Campeon 1-12, 85579 Neubiberg, Germany
14 **
15 ** This program is free software; you can redistribute it and/or modify
16 ** it under the terms of the GNU General Public License as published by
17 ** the Free Software Foundation; either version 2 of the License, or
18 ** (at your option) any later version.
19 **
20 ** HISTORY
21 ** $Date $Author $Comment
22 ** 07 JUL 2009 Xu Liang Init Version
23 *******************************************************************************/
24
25
26
27 /*
28 * ####################################
29 * Head File
30 * ####################################
31 */
32
33 /*
34 * Common Head File
35 */
36 #include <linux/version.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/errno.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/ioctl.h>
44 #include <linux/etherdevice.h>
45 #include <linux/interrupt.h>
46 #include <linux/netdevice.h>
47 #include <linux/platform_device.h>
48 #include <linux/of_device.h>
49 #include <asm/io.h>
50
51 /*
52 * Chip Specific Head File
53 */
54 #include "ifxmips_ptm_adsl.h"
55
56
57 #include <lantiq_soc.h>
58
59 /*
60 * ####################################
61 * Kernel Version Adaption
62 * ####################################
63 */
64 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
65 #define MODULE_PARM(a, b) module_param(a, int, 0)
66
67
68
69 /*
70 * ####################################
71 * Parameters to Configure PPE
72 * ####################################
73 */
74
75 static int write_desc_delay = 0x20; /* Write descriptor delay */
76
77 static int rx_max_packet_size = ETH_MAX_FRAME_LENGTH;
78 /* Max packet size for RX */
79
80 static int dma_rx_descriptor_length = 24; /* Number of descriptors per DMA RX channel */
81 static int dma_tx_descriptor_length = 24; /* Number of descriptors per DMA TX channel */
82
83 static int eth_efmtc_crc_cfg = 0x03100710; /* default: tx_eth_crc_check: 1, tx_tc_crc_check: 1, tx_tc_crc_len = 16 */
84 /* rx_eth_crc_present: 1, rx_eth_crc_check: 1, rx_tc_crc_check: 1, rx_tc_crc_len = 16 */
85
86 MODULE_PARM(write_desc_delay, "i");
87 MODULE_PARM_DESC(write_desc_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
88
89 MODULE_PARM(rx_max_packet_size, "i");
90 MODULE_PARM_DESC(rx_max_packet_size, "Max packet size in byte for downstream ethernet frames");
91
92 MODULE_PARM(dma_rx_descriptor_length, "i");
93 MODULE_PARM_DESC(dma_rx_descriptor_length, "Number of descriptor assigned to DMA RX channel (>16)");
94 MODULE_PARM(dma_tx_descriptor_length, "i");
95 MODULE_PARM_DESC(dma_tx_descriptor_length, "Number of descriptor assigned to DMA TX channel (>16)");
96
97 MODULE_PARM(eth_efmtc_crc_cfg, "i");
98 MODULE_PARM_DESC(eth_efmtc_crc_cfg, "Configuration for PTM TX/RX ethernet/efm-tc CRC");
99
100
101
102 /*
103 * ####################################
104 * Definition
105 * ####################################
106 */
107
108
109 #define DUMP_SKB_LEN ~0
110
111
112
113 /*
114 * ####################################
115 * Declaration
116 * ####################################
117 */
118
119 /*
120 * Network Operations
121 */
122 static void ptm_setup(struct net_device *, int);
123 static struct net_device_stats *ptm_get_stats(struct net_device *);
124 static int ptm_open(struct net_device *);
125 static int ptm_stop(struct net_device *);
126 static unsigned int ptm_poll(int, unsigned int);
127 static int ptm_napi_poll(struct napi_struct *, int);
128 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
129 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
130 static void ptm_tx_timeout(struct net_device *);
131
132 /*
133 * DSL Data LED
134 */
135 static INLINE void adsl_led_flash(void);
136
137 /*
138 * buffer manage functions
139 */
140 static INLINE struct sk_buff* alloc_skb_rx(void);
141 //static INLINE struct sk_buff* alloc_skb_tx(unsigned int);
142 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int);
143 static INLINE int get_tx_desc(unsigned int, unsigned int *);
144
145 /*
146 * Mailbox handler and signal function
147 */
148 static INLINE int mailbox_rx_irq_handler(unsigned int);
149 static irqreturn_t mailbox_irq_handler(int, void *);
150 static INLINE void mailbox_signal(unsigned int, int);
151 #ifdef CONFIG_IFX_PTM_RX_TASKLET
152 static void do_ptm_tasklet(unsigned long);
153 #endif
154
155 /*
156 * Debug Functions
157 */
158 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
159 static void dump_skb(struct sk_buff *, u32, char *, int, int, int);
160 #else
161 #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
162 #endif
163 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
164 static void skb_swap(struct sk_buff *);
165 #else
166 #define skb_swap(skb) do {} while (0)
167 #endif
168
169 /*
170 * Proc File Functions
171 */
172 static INLINE void proc_file_create(void);
173 static INLINE void proc_file_delete(void);
174 static int proc_read_version(char *, char **, off_t, int, int *, void *);
175 static int proc_read_wanmib(char *, char **, off_t, int, int *, void *);
176 static int proc_write_wanmib(struct file *, const char *, unsigned long, void *);
177 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
178 static int proc_read_genconf(char *, char **, off_t, int, int *, void *);
179 #endif
180 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
181 static int proc_read_dbg(char *, char **, off_t, int, int *, void *);
182 static int proc_write_dbg(struct file *, const char *, unsigned long, void *);
183 #endif
184
185 /*
186 * Proc Help Functions
187 */
188 static INLINE int stricmp(const char *, const char *);
189 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
190 static INLINE int strincmp(const char *, const char *, int);
191 #endif
192 static INLINE int ifx_ptm_version(char *);
193
194 /*
195 * Init & clean-up functions
196 */
197 static INLINE void check_parameters(void);
198 static INLINE int init_priv_data(void);
199 static INLINE void clear_priv_data(void);
200 static INLINE void init_tables(void);
201
202 /*
203 * Exteranl Function
204 */
205 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
206 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
207 #else
208 static inline int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr)
209 {
210 if ( is_showtime != NULL )
211 *is_showtime = 0;
212 return 0;
213 }
214 #endif
215
216 /*
217 * External variable
218 */
219 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
220 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
221 extern int (*ifx_mei_atm_showtime_exit)(void);
222 #else
223 int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL;
224 EXPORT_SYMBOL(ifx_mei_atm_showtime_enter);
225 int (*ifx_mei_atm_showtime_exit)(void) = NULL;
226 EXPORT_SYMBOL(ifx_mei_atm_showtime_exit);
227 #endif
228
229
230
231 /*
232 * ####################################
233 * Local Variable
234 * ####################################
235 */
236
237 static struct ptm_priv_data g_ptm_priv_data;
238
239 static struct net_device_ops g_ptm_netdev_ops = {
240 .ndo_get_stats = ptm_get_stats,
241 .ndo_open = ptm_open,
242 .ndo_stop = ptm_stop,
243 .ndo_start_xmit = ptm_hard_start_xmit,
244 .ndo_validate_addr = eth_validate_addr,
245 .ndo_set_mac_address = eth_mac_addr,
246 .ndo_do_ioctl = ptm_ioctl,
247 .ndo_tx_timeout = ptm_tx_timeout,
248 };
249
250 static struct net_device *g_net_dev[2] = {0};
251 static char *g_net_dev_name[2] = {"dsl0", "dslfast0"};
252
253 #ifdef CONFIG_IFX_PTM_RX_TASKLET
254 static struct tasklet_struct g_ptm_tasklet[] = {
255 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 0},
256 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 1},
257 };
258 #endif
259
260 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
261
262 static struct proc_dir_entry* g_ptm_dir = NULL;
263
264 static int g_showtime = 0;
265
266
267
268 /*
269 * ####################################
270 * Local Function
271 * ####################################
272 */
273
274 static void ptm_setup(struct net_device *dev, int ndev)
275 {
276 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
277 netif_carrier_off(dev);
278 #endif
279
280 /* hook network operations */
281 dev->netdev_ops = &g_ptm_netdev_ops;
282 /* Allow up to 1508 bytes, for RFC4638 */
283 dev->max_mtu = ETH_DATA_LEN + 8;
284 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 25);
285 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
286
287 dev->dev_addr[0] = 0x00;
288 dev->dev_addr[1] = 0x20;
289 dev->dev_addr[2] = 0xda;
290 dev->dev_addr[3] = 0x86;
291 dev->dev_addr[4] = 0x23;
292 dev->dev_addr[5] = 0x75 + ndev;
293 }
294
295 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
296 {
297 int ndev;
298
299 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
300 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
301
302 g_ptm_priv_data.itf[ndev].stats.rx_errors = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu + WAN_MIB_TABLE[ndev].wrx_ethcrc_err_pdu;
303 g_ptm_priv_data.itf[ndev].stats.rx_dropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu + (WAN_MIB_TABLE[ndev].wrx_correct_pdu - g_ptm_priv_data.itf[ndev].stats.rx_packets);
304
305 return &g_ptm_priv_data.itf[ndev].stats;
306 }
307
308 static int ptm_open(struct net_device *dev)
309 {
310 int ndev;
311
312 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
313 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
314
315 napi_enable(&g_ptm_priv_data.itf[ndev].napi);
316
317 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
318
319 netif_start_queue(dev);
320
321 return 0;
322 }
323
324 static int ptm_stop(struct net_device *dev)
325 {
326 int ndev;
327
328 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
329 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
330
331 IFX_REG_W32_MASK((1 << ndev) | (1 << (ndev + 16)), 0, MBOX_IGU1_IER);
332
333 napi_disable(&g_ptm_priv_data.itf[ndev].napi);
334
335 netif_stop_queue(dev);
336
337 return 0;
338 }
339
340 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
341 {
342 unsigned int work_done = 0;
343
344 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
345
346 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes > 0 ) {
347 if ( mailbox_rx_irq_handler(ndev) < 0 )
348 break;
349
350 work_done++;
351 }
352
353 return work_done;
354 }
355 static int ptm_napi_poll(struct napi_struct *napi, int budget)
356 {
357 int ndev;
358 unsigned int work_done;
359
360 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != napi->dev; ndev++ );
361
362 work_done = ptm_poll(ndev, budget);
363
364 // interface down
365 if ( !netif_running(napi->dev) ) {
366 napi_complete(napi);
367 return work_done;
368 }
369
370 // no more traffic
371 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
372 // clear interrupt
373 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_ISRC);
374 // double check
375 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
376 napi_complete(napi);
377 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
378 return work_done;
379 }
380 }
381
382 // next round
383 return work_done;
384 }
385
386 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
387 {
388 int ndev;
389 unsigned int f_full;
390 int desc_base;
391 register struct tx_descriptor reg_desc = {0};
392
393 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
394 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
395
396 if ( !g_showtime ) {
397 err("not in showtime");
398 goto PTM_HARD_START_XMIT_FAIL;
399 }
400
401 /* allocate descriptor */
402 desc_base = get_tx_desc(ndev, &f_full);
403 if ( f_full ) {
404 netif_trans_update(dev);
405 netif_stop_queue(dev);
406
407 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_ISRC);
408 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_IER);
409 }
410 if ( desc_base < 0 )
411 goto PTM_HARD_START_XMIT_FAIL;
412
413 if ( g_ptm_priv_data.itf[ndev].tx_skb[desc_base] != NULL )
414 dev_kfree_skb_any(g_ptm_priv_data.itf[ndev].tx_skb[desc_base]);
415 g_ptm_priv_data.itf[ndev].tx_skb[desc_base] = skb;
416
417 reg_desc.dataptr = (unsigned int)skb->data >> 2;
418 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
419 reg_desc.byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
420 reg_desc.own = 1;
421 reg_desc.c = 1;
422 reg_desc.sop = reg_desc.eop = 1;
423
424 /* write discriptor to memory and write back cache */
425 g_ptm_priv_data.itf[ndev].tx_desc[desc_base] = reg_desc;
426 dma_cache_wback((unsigned long)skb->data, skb->len);
427 wmb();
428
429 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 1);
430
431 if ( (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ) {
432 skb_swap(skb);
433 }
434
435 g_ptm_priv_data.itf[ndev].stats.tx_packets++;
436 g_ptm_priv_data.itf[ndev].stats.tx_bytes += reg_desc.datalen;
437
438 netif_trans_update(dev);
439 mailbox_signal(ndev, 1);
440
441 adsl_led_flash();
442
443 return NETDEV_TX_OK;
444
445 PTM_HARD_START_XMIT_FAIL:
446 dev_kfree_skb_any(skb);
447 g_ptm_priv_data.itf[ndev].stats.tx_dropped++;
448 return NETDEV_TX_OK;
449 }
450
451 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
452 {
453 int ndev;
454
455 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
456 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
457
458 switch ( cmd )
459 {
460 case IFX_PTM_MIB_CW_GET:
461 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = WAN_MIB_TABLE[ndev].wrx_nonidle_cw;
462 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = WAN_MIB_TABLE[ndev].wrx_idle_cw;
463 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = WAN_MIB_TABLE[ndev].wrx_err_cw;
464 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = 0;
465 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = 0;
466 break;
467 case IFX_PTM_MIB_FRAME_GET:
468 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxCorrect = WAN_MIB_TABLE[ndev].wrx_correct_pdu;
469 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TC_CrcError = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu;
470 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxDropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu;
471 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TxSend = WAN_MIB_TABLE[ndev].wtx_total_pdu;
472 break;
473 case IFX_PTM_CFG_GET:
474 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = CFG_ETH_EFMTC_CRC->rx_eth_crc_present;
475 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = CFG_ETH_EFMTC_CRC->rx_eth_crc_check;
476 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = CFG_ETH_EFMTC_CRC->rx_tc_crc_check;
477 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = CFG_ETH_EFMTC_CRC->rx_tc_crc_len;
478 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = CFG_ETH_EFMTC_CRC->tx_eth_crc_gen;
479 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = CFG_ETH_EFMTC_CRC->tx_tc_crc_gen;
480 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = CFG_ETH_EFMTC_CRC->tx_tc_crc_len;
481 break;
482 case IFX_PTM_CFG_SET:
483 CFG_ETH_EFMTC_CRC->rx_eth_crc_present = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent ? 1 : 0;
484 CFG_ETH_EFMTC_CRC->rx_eth_crc_check = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 1 : 0;
485 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck && (((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 32) )
486 {
487 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 1;
488 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen;
489 }
490 else
491 {
492 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 0;
493 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = 0;
494 }
495 CFG_ETH_EFMTC_CRC->tx_eth_crc_gen = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 1 : 0;
496 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen && (((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 32) )
497 {
498 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 1;
499 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen;
500 }
501 else
502 {
503 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 0;
504 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = 0;
505 }
506 break;
507 default:
508 return -EOPNOTSUPP;
509 }
510
511 return 0;
512 }
513
514 static void ptm_tx_timeout(struct net_device *dev)
515 {
516 int ndev;
517
518 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
519 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
520
521 /* disable TX irq, release skb when sending new packet */
522 IFX_REG_W32_MASK(1 << (ndev + 16), 0, MBOX_IGU1_IER);
523
524 /* wake up TX queue */
525 netif_wake_queue(dev);
526
527 return;
528 }
529
530 static INLINE void adsl_led_flash(void)
531 {
532 }
533
534 static INLINE struct sk_buff* alloc_skb_rx(void)
535 {
536 struct sk_buff *skb;
537
538 /* allocate memroy including trailer and padding */
539 skb = dev_alloc_skb(rx_max_packet_size + RX_HEAD_MAC_ADDR_ALIGNMENT + DATA_BUFFER_ALIGNMENT);
540 if ( skb != NULL ) {
541 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
542 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
543 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
544 /* pub skb in reserved area "skb->data - 4" */
545 *((struct sk_buff **)skb->data - 1) = skb;
546 wmb();
547 /* write back and invalidate cache */
548 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
549 /* invalidate cache */
550 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
551 }
552
553 return skb;
554 }
555
556 #if 0
557 static INLINE struct sk_buff* alloc_skb_tx(unsigned int size)
558 {
559 struct sk_buff *skb;
560
561 /* allocate memory including padding */
562 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
563 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
564 /* must be burst length alignment */
565 if ( skb != NULL )
566 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
567 return skb;
568 }
569 #endif
570
571 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int dataptr)
572 {
573 unsigned int skb_dataptr;
574 struct sk_buff *skb;
575
576 skb_dataptr = ((dataptr - 1) << 2) | KSEG1;
577 skb = *(struct sk_buff **)skb_dataptr;
578
579 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
580 ASSERT(((unsigned int)skb->data | KSEG1) == ((dataptr << 2) | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
581
582 return skb;
583 }
584
585 static INLINE int get_tx_desc(unsigned int itf, unsigned int *f_full)
586 {
587 int desc_base = -1;
588 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[itf];
589
590 // assume TX is serial operation
591 // no protection provided
592
593 *f_full = 1;
594
595 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 ) {
596 desc_base = p_itf->tx_desc_pos;
597 if ( ++(p_itf->tx_desc_pos) == dma_tx_descriptor_length )
598 p_itf->tx_desc_pos = 0;
599 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 )
600 *f_full = 0;
601 }
602
603 return desc_base;
604 }
605
606 static INLINE int mailbox_rx_irq_handler(unsigned int ch) // return: < 0 - descriptor not available, 0 - received one packet
607 {
608 unsigned int ndev = ch;
609 struct sk_buff *skb;
610 struct sk_buff *new_skb;
611 volatile struct rx_descriptor *desc;
612 struct rx_descriptor reg_desc;
613 int netif_rx_ret;
614
615 desc = &g_ptm_priv_data.itf[ndev].rx_desc[g_ptm_priv_data.itf[ndev].rx_desc_pos];
616 if ( desc->own || !desc->c ) // if PP32 hold descriptor or descriptor not completed
617 return -EAGAIN;
618 if ( ++g_ptm_priv_data.itf[ndev].rx_desc_pos == dma_rx_descriptor_length )
619 g_ptm_priv_data.itf[ndev].rx_desc_pos = 0;
620
621 reg_desc = *desc;
622 skb = get_skb_rx_pointer(reg_desc.dataptr);
623
624 if ( !reg_desc.err ) {
625 new_skb = alloc_skb_rx();
626 if ( new_skb != NULL ) {
627 skb_reserve(skb, reg_desc.byteoff);
628 skb_put(skb, reg_desc.datalen);
629
630 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 0);
631
632 // parse protocol header
633 skb->dev = g_net_dev[ndev];
634 skb->protocol = eth_type_trans(skb, skb->dev);
635
636 netif_rx_ret = netif_receive_skb(skb);
637
638 if ( netif_rx_ret != NET_RX_DROP ) {
639 g_ptm_priv_data.itf[ndev].stats.rx_packets++;
640 g_ptm_priv_data.itf[ndev].stats.rx_bytes += reg_desc.datalen;
641 }
642
643 reg_desc.dataptr = ((unsigned int)new_skb->data >> 2) & 0x0FFFFFFF;
644 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
645 }
646 }
647 else
648 reg_desc.err = 0;
649
650 reg_desc.datalen = rx_max_packet_size;
651 reg_desc.own = 1;
652 reg_desc.c = 0;
653
654 // update descriptor
655 *desc = reg_desc;
656 wmb();
657
658 mailbox_signal(ndev, 0);
659
660 adsl_led_flash();
661
662 return 0;
663 }
664
665 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
666 {
667 unsigned int isr;
668 int i;
669
670 isr = IFX_REG_R32(MBOX_IGU1_ISR);
671 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
672 isr &= IFX_REG_R32(MBOX_IGU1_IER);
673
674 while ( (i = __fls(isr)) >= 0 ) {
675 isr ^= 1 << i;
676
677 if ( i >= 16 ) {
678 // TX
679 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
680 i -= 16;
681 if ( i < MAX_ITF_NUMBER )
682 netif_wake_queue(g_net_dev[i]);
683 }
684 else {
685 // RX
686 #ifdef CONFIG_IFX_PTM_RX_INTERRUPT
687 while ( WRX_DMA_CHANNEL_CONFIG(i)->vlddes > 0 )
688 mailbox_rx_irq_handler(i);
689 #else
690 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
691 napi_schedule(&g_ptm_priv_data.itf[i].napi);
692 #endif
693 }
694 }
695
696 return IRQ_HANDLED;
697 }
698
699 static INLINE void mailbox_signal(unsigned int itf, int is_tx)
700 {
701 int count = 1000;
702
703 if ( is_tx ) {
704 while ( MBOX_IGU3_ISR_ISR(itf + 16) && count > 0 )
705 count--;
706 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf + 16), MBOX_IGU3_ISRS);
707 }
708 else {
709 while ( MBOX_IGU3_ISR_ISR(itf) && count > 0 )
710 count--;
711 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf), MBOX_IGU3_ISRS);
712 }
713
714 ASSERT(count != 0, "MBOX_IGU3_ISR = 0x%08x", IFX_REG_R32(MBOX_IGU3_ISR));
715 }
716
717 #ifdef CONFIG_IFX_PTM_RX_TASKLET
718 static void do_ptm_tasklet(unsigned long arg)
719 {
720 unsigned int work_to_do = 25;
721 unsigned int work_done = 0;
722
723 ASSERT(arg >= 0 && arg < ARRAY_SIZE(g_net_dev), "arg = %lu (wrong value)", arg);
724
725 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(arg)->vlddes > 0 ) {
726 if ( mailbox_rx_irq_handler(arg) < 0 )
727 break;
728
729 work_done++;
730 }
731
732 // interface down
733 if ( !netif_running(g_net_dev[arg]) )
734 return;
735
736 // no more traffic
737 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
738 // clear interrupt
739 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_ISRC);
740 // double check
741 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
742 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_IER);
743 return;
744 }
745 }
746
747 // next round
748 tasklet_schedule(&g_ptm_tasklet[arg]);
749 }
750 #endif
751
752 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
753 static void dump_skb(struct sk_buff *skb, u32 len, char *title, int port, int ch, int is_tx)
754 {
755 int i;
756
757 if ( !(ifx_ptm_dbg_enable & (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : DBG_ENABLE_MASK_DUMP_SKB_RX)) )
758 return;
759
760 if ( skb->len < len )
761 len = skb->len;
762
763 if ( len > rx_max_packet_size ) {
764 printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32)skb, (u32)skb->data, skb->len);
765 return;
766 }
767
768 if ( ch >= 0 )
769 printk("%s (port %d, ch %d)\n", title, port, ch);
770 else
771 printk("%s\n", title);
772 printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len);
773 for ( i = 1; i <= len; i++ ) {
774 if ( i % 16 == 1 )
775 printk(" %4d:", i - 1);
776 printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF));
777 if ( i % 16 == 0 )
778 printk("\n");
779 }
780 if ( (i - 1) % 16 != 0 )
781 printk("\n");
782 }
783 #endif
784
785 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
786 static void skb_swap(struct sk_buff *skb)
787 {
788 unsigned char tmp[8];
789 unsigned char *p = skb->data;
790
791 if ( !(p[0] & 0x01) ) { // bypass broadcast/multicast
792 // swap MAC
793 memcpy(tmp, p, 6);
794 memcpy(p, p + 6, 6);
795 memcpy(p + 6, tmp, 6);
796 p += 12;
797
798 // bypass VLAN
799 while ( p[0] == 0x81 && p[1] == 0x00 )
800 p += 4;
801
802 // IP
803 if ( p[0] == 0x08 && p[1] == 0x00 ) {
804 p += 14;
805 memcpy(tmp, p, 4);
806 memcpy(p, p + 4, 4);
807 memcpy(p + 4, tmp, 4);
808 p += 8;
809 }
810
811 dma_cache_wback((unsigned long)skb->data, (unsigned long)p - (unsigned long)skb->data);
812 }
813 }
814 #endif
815
816 static INLINE void proc_file_create(void)
817 {
818 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
819 struct proc_dir_entry *res;
820
821 g_ptm_dir = proc_mkdir("driver/ifx_ptm", NULL);
822
823 create_proc_read_entry("version",
824 0,
825 g_ptm_dir,
826 proc_read_version,
827 NULL);
828
829 res = create_proc_entry("wanmib",
830 0,
831 g_ptm_dir);
832 if ( res != NULL ) {
833 res->read_proc = proc_read_wanmib;
834 res->write_proc = proc_write_wanmib;
835 }
836
837 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
838 create_proc_read_entry("genconf",
839 0,
840 g_ptm_dir,
841 proc_read_genconf,
842 NULL);
843
844 #ifdef CONFIG_AR9
845 create_proc_read_entry("regs",
846 0,
847 g_ptm_dir,
848 ifx_ptm_proc_read_regs,
849 NULL);
850 #endif
851 #endif
852
853 res = create_proc_entry("dbg",
854 0,
855 g_ptm_dir);
856 if ( res != NULL ) {
857 res->read_proc = proc_read_dbg;
858 res->write_proc = proc_write_dbg;
859 }
860 #endif
861 }
862
863 static INLINE void proc_file_delete(void)
864 {
865 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
866 remove_proc_entry("dbg", g_ptm_dir);
867 #endif
868
869 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
870 #ifdef CONFIG_AR9
871 remove_proc_entry("regs", g_ptm_dir);
872 #endif
873
874 remove_proc_entry("genconf", g_ptm_dir);
875 #endif
876
877 remove_proc_entry("wanmib", g_ptm_dir);
878
879 remove_proc_entry("version", g_ptm_dir);
880
881 remove_proc_entry("driver/ifx_ptm", NULL);
882 }
883
884 static int proc_read_version(char *buf, char **start, off_t offset, int count, int *eof, void *data)
885 {
886 int len = 0;
887
888 len += ifx_ptm_version(buf + len);
889
890 if ( offset >= len ) {
891 *start = buf;
892 *eof = 1;
893 return 0;
894 }
895 *start = buf + offset;
896 if ( (len -= offset) > count )
897 return count;
898 *eof = 1;
899 return len;
900 }
901
902 static int proc_read_wanmib(char *page, char **start, off_t off, int count, int *eof, void *data)
903 {
904 int len = 0;
905 int i;
906 char *title[] = {
907 "dsl0\n",
908 "dslfast0\n"
909 };
910
911 for ( i = 0; i < ARRAY_SIZE(title); i++ ) {
912 len += sprintf(page + off + len, title[i]);
913 len += sprintf(page + off + len, " wrx_correct_pdu = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu);
914 len += sprintf(page + off + len, " wrx_correct_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu_bytes);
915 len += sprintf(page + off + len, " wrx_tccrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu);
916 len += sprintf(page + off + len, " wrx_tccrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu_bytes);
917 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu);
918 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu_bytes);
919 len += sprintf(page + off + len, " wrx_nodesc_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_nodesc_drop_pdu);
920 len += sprintf(page + off + len, " wrx_len_violation_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_len_violation_drop_pdu);
921 len += sprintf(page + off + len, " wrx_idle_bytes = %d\n", WAN_MIB_TABLE[i].wrx_idle_bytes);
922 len += sprintf(page + off + len, " wrx_nonidle_cw = %d\n", WAN_MIB_TABLE[i].wrx_nonidle_cw);
923 len += sprintf(page + off + len, " wrx_idle_cw = %d\n", WAN_MIB_TABLE[i].wrx_idle_cw);
924 len += sprintf(page + off + len, " wrx_err_cw = %d\n", WAN_MIB_TABLE[i].wrx_err_cw);
925 len += sprintf(page + off + len, " wtx_total_pdu = %d\n", WAN_MIB_TABLE[i].wtx_total_pdu);
926 len += sprintf(page + off + len, " wtx_total_bytes = %d\n", WAN_MIB_TABLE[i].wtx_total_bytes);
927 }
928
929 *eof = 1;
930
931 return len;
932 }
933
934 static int proc_write_wanmib(struct file *file, const char *buf, unsigned long count, void *data)
935 {
936 char str[2048];
937 char *p;
938 int len, rlen;
939
940 int i;
941
942 len = count < sizeof(str) ? count : sizeof(str) - 1;
943 rlen = len - copy_from_user(str, buf, len);
944 while ( rlen && str[rlen - 1] <= ' ' )
945 rlen--;
946 str[rlen] = 0;
947 for ( p = str; *p && *p <= ' '; p++, rlen-- );
948 if ( !*p )
949 return count;
950
951 if ( stricmp(p, "clear") == 0 || stricmp(p, "clean") == 0 ) {
952 for ( i = 0; i < 2; i++ )
953 memset((void*)&WAN_MIB_TABLE[i], 0, sizeof(WAN_MIB_TABLE[i]));
954 }
955
956 return count;
957 }
958
959 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
960
961 static int proc_read_genconf(char *page, char **start, off_t off, int count, int *eof, void *data)
962 {
963 int len = 0;
964 int len_max = off + count;
965 char *pstr;
966 char str[2048];
967 int llen = 0;
968 int i;
969 unsigned long bit;
970
971 pstr = *start = page;
972
973 __sync();
974
975 llen += sprintf(str + llen, "CFG_WAN_WRDES_DELAY (0x%08X): %d\n", (unsigned int)CFG_WAN_WRDES_DELAY, IFX_REG_R32(CFG_WAN_WRDES_DELAY));
976 llen += sprintf(str + llen, "CFG_WRX_DMACH_ON (0x%08X):", (unsigned int)CFG_WRX_DMACH_ON);
977 for ( i = 0, bit = 1; i < MAX_RX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
978 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WRX_DMACH_ON) & bit) ? "on " : "off");
979 llen += sprintf(str + llen, "\n");
980 llen += sprintf(str + llen, "CFG_WTX_DMACH_ON (0x%08X):", (unsigned int)CFG_WTX_DMACH_ON);
981 for ( i = 0, bit = 1; i < MAX_TX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
982 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WTX_DMACH_ON) & bit) ? "on " : "off");
983 llen += sprintf(str + llen, "\n");
984 llen += sprintf(str + llen, "CFG_WRX_LOOK_BITTH (0x%08X): %d\n", (unsigned int)CFG_WRX_LOOK_BITTH, IFX_REG_R32(CFG_WRX_LOOK_BITTH));
985 llen += sprintf(str + llen, "CFG_ETH_EFMTC_CRC (0x%08X): rx_tc_crc_len - %2d, rx_tc_crc_check - %s\n", (unsigned int)CFG_ETH_EFMTC_CRC, CFG_ETH_EFMTC_CRC->rx_tc_crc_len, CFG_ETH_EFMTC_CRC->rx_tc_crc_check ? " on" : "off");
986 llen += sprintf(str + llen, " rx_eth_crc_check - %s, rx_eth_crc_present - %s\n", CFG_ETH_EFMTC_CRC->rx_eth_crc_check ? " on" : "off", CFG_ETH_EFMTC_CRC->rx_eth_crc_present ? " on" : "off");
987 llen += sprintf(str + llen, " tx_tc_crc_len - %2d, tx_tc_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_tc_crc_len, CFG_ETH_EFMTC_CRC->tx_tc_crc_gen ? " on" : "off");
988 llen += sprintf(str + llen, " tx_eth_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_eth_crc_gen ? " on" : "off");
989
990 llen += sprintf(str + llen, "RX Port:\n");
991 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
992 llen += sprintf(str + llen, " %d (0x%08X). mfs - %5d, dmach - %d, local_state - %d, partner_state - %d\n", i, (unsigned int)WRX_PORT_CONFIG(i), WRX_PORT_CONFIG(i)->mfs, WRX_PORT_CONFIG(i)->dmach, WRX_PORT_CONFIG(i)->local_state, WRX_PORT_CONFIG(i)->partner_state);
993 llen += sprintf(str + llen, "RX DMA Channel:\n");
994 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
995 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WRX_DMA_CHANNEL_CONFIG(i), WRX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WRX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WRX_DMA_CHANNEL_CONFIG(i)->deslen, WRX_DMA_CHANNEL_CONFIG(i)->vlddes);
996
997 llen += sprintf(str + llen, "TX Port:\n");
998 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
999 llen += sprintf(str + llen, " %d (0x%08X). tx_cwth2 - %d, tx_cwth1 - %d\n", i, (unsigned int)WTX_PORT_CONFIG(i), WTX_PORT_CONFIG(i)->tx_cwth2, WTX_PORT_CONFIG(i)->tx_cwth1);
1000 llen += sprintf(str + llen, "TX DMA Channel:\n");
1001 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1002 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WTX_DMA_CHANNEL_CONFIG(i), WTX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WTX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WTX_DMA_CHANNEL_CONFIG(i)->deslen, WTX_DMA_CHANNEL_CONFIG(i)->vlddes);
1003
1004 if ( len <= off && len + llen > off )
1005 {
1006 memcpy(pstr, str + off - len, len + llen - off);
1007 pstr += len + llen - off;
1008 }
1009 else if ( len > off )
1010 {
1011 memcpy(pstr, str, llen);
1012 pstr += llen;
1013 }
1014 len += llen;
1015 if ( len >= len_max )
1016 goto PROC_READ_GENCONF_OVERRUN_END;
1017
1018 *eof = 1;
1019
1020 return len - off;
1021
1022 PROC_READ_GENCONF_OVERRUN_END:
1023 return len - llen - off;
1024 }
1025
1026 #endif // defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1027
1028 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1029
1030 static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data)
1031 {
1032 int len = 0;
1033
1034 len += sprintf(page + off + len, "error print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ERR) ? "enabled" : "disabled");
1035 len += sprintf(page + off + len, "debug print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ? "enabled" : "disabled");
1036 len += sprintf(page + off + len, "assert - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ASSERT) ? "enabled" : "disabled");
1037 len += sprintf(page + off + len, "dump rx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_RX) ? "enabled" : "disabled");
1038 len += sprintf(page + off + len, "dump tx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_TX) ? "enabled" : "disabled");
1039 len += sprintf(page + off + len, "mac swap - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ? "enabled" : "disabled");
1040
1041 *eof = 1;
1042
1043 return len;
1044 }
1045
1046 static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data)
1047 {
1048 static const char *dbg_enable_mask_str[] = {
1049 " error print",
1050 " err",
1051 " debug print",
1052 " dbg",
1053 " assert",
1054 " assert",
1055 " dump rx skb",
1056 " rx",
1057 " dump tx skb",
1058 " tx",
1059 " dump init",
1060 " init",
1061 " dump qos",
1062 " qos",
1063 " mac swap",
1064 " swap",
1065 " all"
1066 };
1067 static const int dbg_enable_mask_str_len[] = {
1068 12, 4,
1069 12, 4,
1070 7, 7,
1071 12, 3,
1072 12, 3,
1073 10, 5,
1074 9, 4,
1075 9, 5,
1076 4
1077 };
1078 unsigned int dbg_enable_mask[] = {
1079 DBG_ENABLE_MASK_ERR,
1080 DBG_ENABLE_MASK_DEBUG_PRINT,
1081 DBG_ENABLE_MASK_ASSERT,
1082 DBG_ENABLE_MASK_DUMP_SKB_RX,
1083 DBG_ENABLE_MASK_DUMP_SKB_TX,
1084 DBG_ENABLE_MASK_DUMP_INIT,
1085 DBG_ENABLE_MASK_DUMP_QOS,
1086 DBG_ENABLE_MASK_MAC_SWAP,
1087 DBG_ENABLE_MASK_ALL
1088 };
1089
1090 char str[2048];
1091 char *p;
1092
1093 int len, rlen;
1094
1095 int f_enable = 0;
1096 int i;
1097
1098 len = count < sizeof(str) ? count : sizeof(str) - 1;
1099 rlen = len - copy_from_user(str, buf, len);
1100 while ( rlen && str[rlen - 1] <= ' ' )
1101 rlen--;
1102 str[rlen] = 0;
1103 for ( p = str; *p && *p <= ' '; p++, rlen-- );
1104 if ( !*p )
1105 return 0;
1106
1107 // debugging feature for enter/leave showtime
1108 if ( strincmp(p, "enter", 5) == 0 && ifx_mei_atm_showtime_enter != NULL )
1109 ifx_mei_atm_showtime_enter(NULL, NULL);
1110 else if ( strincmp(p, "leave", 5) == 0 && ifx_mei_atm_showtime_exit != NULL )
1111 ifx_mei_atm_showtime_exit();
1112
1113 if ( strincmp(p, "enable", 6) == 0 ) {
1114 p += 6;
1115 f_enable = 1;
1116 }
1117 else if ( strincmp(p, "disable", 7) == 0 ) {
1118 p += 7;
1119 f_enable = -1;
1120 }
1121 else if ( strincmp(p, "help", 4) == 0 || *p == '?' ) {
1122 printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/qos/swap/all] > /proc/driver/ifx_ptm/dbg\n");
1123 }
1124
1125 if ( f_enable ) {
1126 if ( *p == 0 ) {
1127 if ( f_enable > 0 )
1128 ifx_ptm_dbg_enable |= DBG_ENABLE_MASK_ALL & ~DBG_ENABLE_MASK_MAC_SWAP;
1129 else
1130 ifx_ptm_dbg_enable &= ~DBG_ENABLE_MASK_ALL | DBG_ENABLE_MASK_MAC_SWAP;
1131 }
1132 else {
1133 do {
1134 for ( i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++ )
1135 if ( strincmp(p, dbg_enable_mask_str[i], dbg_enable_mask_str_len[i]) == 0 ) {
1136 if ( f_enable > 0 )
1137 ifx_ptm_dbg_enable |= dbg_enable_mask[i >> 1];
1138 else
1139 ifx_ptm_dbg_enable &= ~dbg_enable_mask[i >> 1];
1140 p += dbg_enable_mask_str_len[i];
1141 break;
1142 }
1143 } while ( i < ARRAY_SIZE(dbg_enable_mask_str) );
1144 }
1145 }
1146
1147 return count;
1148 }
1149
1150 #endif // defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1151
1152 static INLINE int stricmp(const char *p1, const char *p2)
1153 {
1154 int c1, c2;
1155
1156 while ( *p1 && *p2 )
1157 {
1158 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1159 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1160 if ( (c1 -= c2) )
1161 return c1;
1162 p1++;
1163 p2++;
1164 }
1165
1166 return *p1 - *p2;
1167 }
1168
1169 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1170 static INLINE int strincmp(const char *p1, const char *p2, int n)
1171 {
1172 int c1 = 0, c2;
1173
1174 while ( n && *p1 && *p2 )
1175 {
1176 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1177 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1178 if ( (c1 -= c2) )
1179 return c1;
1180 p1++;
1181 p2++;
1182 n--;
1183 }
1184
1185 return n ? *p1 - *p2 : c1;
1186 }
1187 #endif
1188
1189 static INLINE int ifx_ptm_version(char *buf)
1190 {
1191 int len = 0;
1192 unsigned int major, minor;
1193
1194 ifx_ptm_get_fw_ver(&major, &minor);
1195
1196 len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
1197 len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
1198
1199 return len;
1200 }
1201
1202 static INLINE void check_parameters(void)
1203 {
1204 /* There is a delay between PPE write descriptor and descriptor is */
1205 /* really stored in memory. Host also has this delay when writing */
1206 /* descriptor. So PPE will use this value to determine if the write */
1207 /* operation makes effect. */
1208 if ( write_desc_delay < 0 )
1209 write_desc_delay = 0;
1210
1211 /* Because of the limitation of length field in descriptors, the packet */
1212 /* size could not be larger than 64K minus overhead size. */
1213 if ( rx_max_packet_size < ETH_MIN_FRAME_LENGTH )
1214 rx_max_packet_size = ETH_MIN_FRAME_LENGTH;
1215 else if ( rx_max_packet_size > 65536 - 1 )
1216 rx_max_packet_size = 65536 - 1;
1217
1218 if ( dma_rx_descriptor_length < 2 )
1219 dma_rx_descriptor_length = 2;
1220 if ( dma_tx_descriptor_length < 2 )
1221 dma_tx_descriptor_length = 2;
1222 }
1223
1224 static INLINE int init_priv_data(void)
1225 {
1226 void *p;
1227 int i;
1228 struct rx_descriptor rx_desc = {0};
1229 struct sk_buff *skb;
1230 volatile struct rx_descriptor *p_rx_desc;
1231 volatile struct tx_descriptor *p_tx_desc;
1232 struct sk_buff **ppskb;
1233
1234 // clear ptm private data structure
1235 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
1236
1237 // allocate memory for RX descriptors
1238 p = kzalloc(MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1239 if ( p == NULL )
1240 return -1;
1241 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT);
1242 g_ptm_priv_data.rx_desc_base = p;
1243 //p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1244
1245 // allocate memory for TX descriptors
1246 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1247 if ( p == NULL )
1248 return -1;
1249 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT);
1250 g_ptm_priv_data.tx_desc_base = p;
1251
1252 // allocate memroy for TX skb pointers
1253 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4, GFP_KERNEL);
1254 if ( p == NULL )
1255 return -1;
1256 dma_cache_wback_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4);
1257 g_ptm_priv_data.tx_skb_base = p;
1258
1259 p_rx_desc = (volatile struct rx_descriptor *)((((unsigned int)g_ptm_priv_data.rx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1260 p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_ptm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1261 ppskb = (struct sk_buff **)(((unsigned int)g_ptm_priv_data.tx_skb_base + 3) & ~3);
1262 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1263 g_ptm_priv_data.itf[i].rx_desc = &p_rx_desc[i * dma_rx_descriptor_length];
1264 g_ptm_priv_data.itf[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length];
1265 g_ptm_priv_data.itf[i].tx_skb = &ppskb[i * dma_tx_descriptor_length];
1266 }
1267
1268 rx_desc.own = 1;
1269 rx_desc.c = 0;
1270 rx_desc.sop = 1;
1271 rx_desc.eop = 1;
1272 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
1273 rx_desc.id = 0;
1274 rx_desc.err = 0;
1275 rx_desc.datalen = rx_max_packet_size;
1276 for ( i = 0; i < MAX_ITF_NUMBER * dma_rx_descriptor_length; i++ ) {
1277 skb = alloc_skb_rx();
1278 if ( skb == NULL )
1279 return -1;
1280 rx_desc.dataptr = ((unsigned int)skb->data >> 2) & 0x0FFFFFFF;
1281 p_rx_desc[i] = rx_desc;
1282 }
1283
1284 return 0;
1285 }
1286
1287 static INLINE void clear_priv_data(void)
1288 {
1289 int i, j;
1290 struct sk_buff *skb;
1291
1292 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1293 if ( g_ptm_priv_data.itf[i].tx_skb != NULL ) {
1294 for ( j = 0; j < dma_tx_descriptor_length; j++ )
1295 if ( g_ptm_priv_data.itf[i].tx_skb[j] != NULL )
1296 dev_kfree_skb_any(g_ptm_priv_data.itf[i].tx_skb[j]);
1297 }
1298 if ( g_ptm_priv_data.itf[i].rx_desc != NULL ) {
1299 for ( j = 0; j < dma_rx_descriptor_length; j++ ) {
1300 if ( g_ptm_priv_data.itf[i].rx_desc[j].sop || g_ptm_priv_data.itf[i].rx_desc[j].eop ) { // descriptor initialized
1301 skb = get_skb_rx_pointer(g_ptm_priv_data.itf[i].rx_desc[j].dataptr);
1302 dev_kfree_skb_any(skb);
1303 }
1304 }
1305 }
1306 }
1307
1308 if ( g_ptm_priv_data.rx_desc_base != NULL )
1309 kfree(g_ptm_priv_data.rx_desc_base);
1310
1311 if ( g_ptm_priv_data.tx_desc_base != NULL )
1312 kfree(g_ptm_priv_data.tx_desc_base);
1313
1314 if ( g_ptm_priv_data.tx_skb_base != NULL )
1315 kfree(g_ptm_priv_data.tx_skb_base);
1316 }
1317
1318 static INLINE void init_tables(void)
1319 {
1320 int i;
1321 volatile unsigned int *p;
1322 struct wrx_dma_channel_config rx_config = {0};
1323 struct wtx_dma_channel_config tx_config = {0};
1324 struct wrx_port_cfg_status rx_port_cfg = { 0 };
1325 struct wtx_port_cfg tx_port_cfg = { 0 };
1326
1327 /*
1328 * CDM Block 1
1329 */
1330 IFX_REG_W32(CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00), CDM_CFG); // CDM block 1 must be data memory and mapped to 0x5000 (dword addr)
1331 p = CDM_DATA_MEMORY(0, 0); // Clear CDM block 1
1332 for ( i = 0; i < CDM_DATA_MEMORY_DWLEN; i++, p++ )
1333 IFX_REG_W32(0, p);
1334
1335 /*
1336 * General Registers
1337 */
1338 IFX_REG_W32(write_desc_delay, CFG_WAN_WRDES_DELAY);
1339 IFX_REG_W32((1 << MAX_RX_DMA_CHANNEL_NUMBER) - 1, CFG_WRX_DMACH_ON);
1340 IFX_REG_W32((1 << MAX_TX_DMA_CHANNEL_NUMBER) - 1, CFG_WTX_DMACH_ON);
1341
1342 IFX_REG_W32(8, CFG_WRX_LOOK_BITTH); // WAN RX EFM-TC Looking Threshold
1343
1344 IFX_REG_W32(eth_efmtc_crc_cfg, CFG_ETH_EFMTC_CRC);
1345
1346 /*
1347 * WRX DMA Channel Configuration Table
1348 */
1349 rx_config.deslen = dma_rx_descriptor_length;
1350 rx_port_cfg.mfs = ETH_MAX_FRAME_LENGTH;
1351 rx_port_cfg.local_state = 0; // looking for sync
1352 rx_port_cfg.partner_state = 0; // parter receiver is out of sync
1353
1354 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ ) {
1355 rx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].rx_desc >> 2) & 0x0FFFFFFF;
1356 *WRX_DMA_CHANNEL_CONFIG(i) = rx_config;
1357
1358 rx_port_cfg.dmach = i;
1359 *WRX_PORT_CONFIG(i) = rx_port_cfg;
1360 }
1361
1362 /*
1363 * WTX DMA Channel Configuration Table
1364 */
1365 tx_config.deslen = dma_tx_descriptor_length;
1366 tx_port_cfg.tx_cwth1 = 5;
1367 tx_port_cfg.tx_cwth2 = 4;
1368
1369 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ ) {
1370 tx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].tx_desc >> 2) & 0x0FFFFFFF;
1371 *WTX_DMA_CHANNEL_CONFIG(i) = tx_config;
1372
1373 *WTX_PORT_CONFIG(i) = tx_port_cfg;
1374 }
1375 }
1376
1377
1378
1379 /*
1380 * ####################################
1381 * Global Function
1382 * ####################################
1383 */
1384
1385 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
1386 {
1387 int i;
1388
1389 g_showtime = 1;
1390
1391 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1392 netif_carrier_on(g_net_dev[i]);
1393
1394 printk("enter showtime\n");
1395
1396 return 0;
1397 }
1398
1399 static int ptm_showtime_exit(void)
1400 {
1401 int i;
1402
1403 if ( !g_showtime )
1404 return -1;
1405
1406 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1407 netif_carrier_off(g_net_dev[i]);
1408
1409 g_showtime = 0;
1410
1411 printk("leave showtime\n");
1412
1413 return 0;
1414 }
1415
1416
1417 static const struct of_device_id ltq_ptm_match[] = {
1418 #ifdef CONFIG_DANUBE
1419 { .compatible = "lantiq,ppe-danube", .data = NULL },
1420 #elif defined CONFIG_AMAZON_SE
1421 { .compatible = "lantiq,ppe-ase", .data = NULL },
1422 #elif defined CONFIG_AR9
1423 { .compatible = "lantiq,ppe-arx100", .data = NULL },
1424 #elif defined CONFIG_VR9
1425 { .compatible = "lantiq,ppe-xrx200", .data = NULL },
1426 #endif
1427 {},
1428 };
1429 MODULE_DEVICE_TABLE(of, ltq_ptm_match);
1430
1431 /*
1432 * ####################################
1433 * Init/Cleanup API
1434 * ####################################
1435 */
1436
1437 /*
1438 * Description:
1439 * Initialize global variables, PP32, comunication structures, register IRQ
1440 * and register device.
1441 * Input:
1442 * none
1443 * Output:
1444 * 0 --- successful
1445 * else --- failure, usually it is negative value of error code
1446 */
1447 static int ltq_ptm_probe(struct platform_device *pdev)
1448 {
1449 int ret;
1450 struct port_cell_info port_cell = {0};
1451 void *xdata_addr = NULL;
1452 int i;
1453 char ver_str[256];
1454
1455 check_parameters();
1456
1457 ret = init_priv_data();
1458 if ( ret != 0 ) {
1459 err("INIT_PRIV_DATA_FAIL");
1460 goto INIT_PRIV_DATA_FAIL;
1461 }
1462
1463 ifx_ptm_init_chip(pdev);
1464 init_tables();
1465
1466 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1467 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
1468 if ( g_net_dev[i] == NULL )
1469 goto ALLOC_NETDEV_FAIL;
1470 ptm_setup(g_net_dev[i], i);
1471 }
1472
1473 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1474 ret = register_netdev(g_net_dev[i]);
1475 if ( ret != 0 )
1476 goto REGISTER_NETDEV_FAIL;
1477 }
1478
1479 /* register interrupt handler */
1480 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
1481 if ( ret ) {
1482 if ( ret == -EBUSY ) {
1483 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1484 }
1485 else {
1486 err("request_irq fail");
1487 }
1488 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1489 }
1490 disable_irq(PPE_MAILBOX_IGU1_INT);
1491
1492 ret = ifx_pp32_start(0);
1493 if ( ret ) {
1494 err("ifx_pp32_start fail!");
1495 goto PP32_START_FAIL;
1496 }
1497 IFX_REG_W32(0, MBOX_IGU1_IER);
1498 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1499
1500 enable_irq(PPE_MAILBOX_IGU1_INT);
1501
1502
1503 proc_file_create();
1504
1505 port_cell.port_num = 1;
1506 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &xdata_addr);
1507 if ( g_showtime ) {
1508 ptm_showtime_enter(&port_cell, &xdata_addr);
1509 }
1510
1511 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1512 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1513
1514 ifx_ptm_version(ver_str);
1515 printk(KERN_INFO "%s", ver_str);
1516
1517 printk("ifxmips_ptm: PTM init succeed\n");
1518
1519 return 0;
1520
1521 PP32_START_FAIL:
1522 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1523 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1524 i = ARRAY_SIZE(g_net_dev);
1525 REGISTER_NETDEV_FAIL:
1526 while ( i-- )
1527 unregister_netdev(g_net_dev[i]);
1528 i = ARRAY_SIZE(g_net_dev);
1529 ALLOC_NETDEV_FAIL:
1530 while ( i-- ) {
1531 free_netdev(g_net_dev[i]);
1532 g_net_dev[i] = NULL;
1533 }
1534 INIT_PRIV_DATA_FAIL:
1535 clear_priv_data();
1536 printk("ifxmips_ptm: PTM init failed\n");
1537 return ret;
1538 }
1539
1540 /*
1541 * Description:
1542 * Release memory, free IRQ, and deregister device.
1543 * Input:
1544 * none
1545 * Output:
1546 * none
1547 */
1548 static int ltq_ptm_remove(struct platform_device *pdev)
1549 {
1550 int i;
1551
1552 ifx_mei_atm_showtime_enter = NULL;
1553 ifx_mei_atm_showtime_exit = NULL;
1554
1555 proc_file_delete();
1556
1557
1558 ifx_pp32_stop(0);
1559
1560 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1561
1562 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1563 unregister_netdev(g_net_dev[i]);
1564
1565 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1566 free_netdev(g_net_dev[i]);
1567 g_net_dev[i] = NULL;
1568 }
1569
1570 ifx_ptm_uninit_chip();
1571
1572 clear_priv_data();
1573
1574 return 0;
1575 }
1576
1577 static struct platform_driver ltq_ptm_driver = {
1578 .probe = ltq_ptm_probe,
1579 .remove = ltq_ptm_remove,
1580 .driver = {
1581 .name = "ptm",
1582 .owner = THIS_MODULE,
1583 .of_match_table = ltq_ptm_match,
1584 },
1585 };
1586
1587 module_platform_driver(ltq_ptm_driver);
1588
1589 MODULE_LICENSE("GPL");