layerscape: add 64b/32b target for ls1012ardb device
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.4 / 7072-LS1012-Add-PPFE-driver-in-Linux.patch
1 From 0157efe2fbe2fe56c34727d326cd74284c06cbd5 Mon Sep 17 00:00:00 2001
2 From: Bhaskar Upadhaya <Bhaskar.Upadhaya@freescale.com>
3 Date: Wed, 24 Aug 2016 10:51:21 +0800
4 Subject: [PATCH 072/113] LS1012: Add PPFE driver in Linux
5
6 commit 7584b690d4c8e4e435c2e6abcdb38d6595a0c302
7 [context adjustment]
8 [don't apply fsl-ls1012a-rdb.dts and fsl-ls1012a.dtsi]
9 [Let PPFE driver can be selectd as a module]
10
11 Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@freescale.com>
12 Integrated-by: Zhao Qiang <qiang.zhao@nxp.com>
13 Integrated-by: Yutang Jiang <yutang.jiang@nxp.com>
14 ---
15 drivers/staging/Kconfig | 2 +
16 drivers/staging/Makefile | 1 +
17 drivers/staging/fsl_ppfe/Kconfig | 5 +
18 drivers/staging/fsl_ppfe/Makefile | 44 +
19 drivers/staging/fsl_ppfe/config.h | 8 +
20 drivers/staging/fsl_ppfe/control_link.lds | 32 +
21 drivers/staging/fsl_ppfe/include/pfe/cbus.h | 88 +
22 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
23 .../staging/fsl_ppfe/include/pfe/cbus/class_csr.h | 242 ++
24 drivers/staging/fsl_ppfe/include/pfe/cbus/emac.h | 243 ++
25 .../staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h | 250 ++
26 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h | 78 +
27 drivers/staging/fsl_ppfe/include/pfe/cbus/gpt.h | 29 +
28 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h | 96 +
29 .../staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 51 +
30 .../staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h | 128 +
31 .../staging/fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
32 drivers/staging/fsl_ppfe/include/pfe/class.h | 133 +
33 drivers/staging/fsl_ppfe/include/pfe/class/ccu.h | 28 +
34 drivers/staging/fsl_ppfe/include/pfe/class/efet.h | 44 +
35 .../staging/fsl_ppfe/include/pfe/class/mac_hash.h | 55 +
36 drivers/staging/fsl_ppfe/include/pfe/class/perg.h | 39 +
37 .../staging/fsl_ppfe/include/pfe/class/vlan_hash.h | 46 +
38 drivers/staging/fsl_ppfe/include/pfe/gpt.h | 44 +
39 drivers/staging/fsl_ppfe/include/pfe/pe.h | 626 +++++
40 drivers/staging/fsl_ppfe/include/pfe/pfe.h | 444 +++
41 drivers/staging/fsl_ppfe/include/pfe/tmu.h | 68 +
42 .../staging/fsl_ppfe/include/pfe/tmu/phy_queue.h | 56 +
43 drivers/staging/fsl_ppfe/include/pfe/tmu/sched.h | 72 +
44 drivers/staging/fsl_ppfe/include/pfe/tmu/shaper.h | 37 +
45 drivers/staging/fsl_ppfe/include/pfe/uart.h | 31 +
46 drivers/staging/fsl_ppfe/include/pfe/util.h | 49 +
47 drivers/staging/fsl_ppfe/include/pfe/util/eape.h | 57 +
48 drivers/staging/fsl_ppfe/include/pfe/util/efet.h | 119 +
49 drivers/staging/fsl_ppfe/include/pfe/util/inq.h | 28 +
50 drivers/staging/fsl_ppfe/pfe_ctrl.c | 363 +++
51 drivers/staging/fsl_ppfe/pfe_ctrl.h | 111 +
52 drivers/staging/fsl_ppfe/pfe_ctrl_hal.c | 207 ++
53 drivers/staging/fsl_ppfe/pfe_ctrl_hal.h | 129 +
54 drivers/staging/fsl_ppfe/pfe_debugfs.c | 109 +
55 drivers/staging/fsl_ppfe/pfe_debugfs.h | 8 +
56 drivers/staging/fsl_ppfe/pfe_eth.c | 2956 ++++++++++++++++++++
57 drivers/staging/fsl_ppfe/pfe_eth.h | 384 +++
58 drivers/staging/fsl_ppfe/pfe_firmware.c | 322 +++
59 drivers/staging/fsl_ppfe/pfe_firmware.h | 41 +
60 drivers/staging/fsl_ppfe/pfe_hal.c | 2217 +++++++++++++++
61 drivers/staging/fsl_ppfe/pfe_hif.c | 939 +++++++
62 drivers/staging/fsl_ppfe/pfe_hif.h | 322 +++
63 drivers/staging/fsl_ppfe/pfe_hif_lib.c | 658 +++++
64 drivers/staging/fsl_ppfe/pfe_hif_lib.h | 219 ++
65 drivers/staging/fsl_ppfe/pfe_hw.c | 188 ++
66 drivers/staging/fsl_ppfe/pfe_hw.h | 32 +
67 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c | 341 +++
68 drivers/staging/fsl_ppfe/pfe_mod.c | 140 +
69 drivers/staging/fsl_ppfe/pfe_mod.h | 163 ++
70 drivers/staging/fsl_ppfe/pfe_perfmon.c | 175 ++
71 drivers/staging/fsl_ppfe/pfe_perfmon.h | 41 +
72 drivers/staging/fsl_ppfe/pfe_platform.c | 358 +++
73 drivers/staging/fsl_ppfe/pfe_sysfs.c | 855 ++++++
74 drivers/staging/fsl_ppfe/pfe_sysfs.h | 34 +
75 drivers/staging/fsl_ppfe/platform.h | 25 +
76 include/linux/skbuff.h | 11 +
77 net/core/skbuff.c | 84 +
78 63 files changed, 14821 insertions(+)
79 create mode 100644 drivers/staging/fsl_ppfe/Kconfig
80 create mode 100644 drivers/staging/fsl_ppfe/Makefile
81 create mode 100644 drivers/staging/fsl_ppfe/config.h
82 create mode 100644 drivers/staging/fsl_ppfe/control_link.lds
83 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
84 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
85 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
86 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac.h
87 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
88 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
89 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpt.h
90 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
91 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
92 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
93 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
94 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class.h
95 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/ccu.h
96 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/efet.h
97 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/mac_hash.h
98 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/perg.h
99 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/class/vlan_hash.h
100 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/gpt.h
101 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pe.h
102 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
103 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/tmu.h
104 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/tmu/phy_queue.h
105 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/tmu/sched.h
106 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/tmu/shaper.h
107 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/uart.h
108 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/util.h
109 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/util/eape.h
110 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/util/efet.h
111 create mode 100644 drivers/staging/fsl_ppfe/include/pfe/util/inq.h
112 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
113 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
114 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl_hal.c
115 create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl_hal.h
116 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
117 create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
118 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
119 create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
120 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
121 create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
122 create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
123 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
124 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
125 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
126 create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
127 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
128 create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
129 create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
130 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
131 create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
132 create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.c
133 create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
134 create mode 100644 drivers/staging/fsl_ppfe/pfe_platform.c
135 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
136 create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
137 create mode 100644 drivers/staging/fsl_ppfe/platform.h
138
139 --- a/drivers/staging/Kconfig
140 +++ b/drivers/staging/Kconfig
141 @@ -112,4 +112,6 @@ source "drivers/staging/wilc1000/Kconfig
142
143 source "drivers/staging/most/Kconfig"
144
145 +source "drivers/staging/fsl_ppfe/Kconfig"
146 +
147 endif # STAGING
148 --- a/drivers/staging/Makefile
149 +++ b/drivers/staging/Makefile
150 @@ -48,3 +48,4 @@ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
151 obj-$(CONFIG_FSL_DPA) += fsl_qbman/
152 obj-$(CONFIG_WILC1000) += wilc1000/
153 obj-$(CONFIG_MOST) += most/
154 +obj-$(CONFIG_FSL_PPFE) += fsl_ppfe/
155 --- /dev/null
156 +++ b/drivers/staging/fsl_ppfe/Kconfig
157 @@ -0,0 +1,5 @@
158 +config FSL_PPFE
159 + tristate "Freescale PPFE Driver"
160 + default m
161 + help
162 + only compiled as module !
163 --- /dev/null
164 +++ b/drivers/staging/fsl_ppfe/Makefile
165 @@ -0,0 +1,44 @@
166 +#
167 +# Copyright (C) 2007 Freescale Semiconductor, Inc.
168 +#
169 +# This program is free software; you can redistribute it and/or modify
170 +# it under the terms of the GNU General Public License as published by
171 +# the Free Software Foundation; either version 2 of the License, or
172 +# (at your option) any later version.
173 +#
174 +# This program is distributed in the hope that it will be useful,
175 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
176 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
177 +# GNU General Public License for more details.
178 +#
179 +# You should have received a copy of the GNU General Public License
180 +# along with this program; if not, write to the Free Software
181 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
182 +
183 +
184 +all: modules
185 +
186 +modules clean:
187 + make CROSS_COMPILE=$(CROSS_COMPILE) ARCH=$(ARCH) -C $(KERNELDIR) M=`pwd` $@
188 +
189 +EXTRA_CFLAGS += -I$(src)/include -I$(src) -DCOMCERTO_2000 -DCONFIG_PLATFORM_LS1012A -DGEMAC_MTIP -DCONFIG_UTIL_DISABLED
190 +
191 +EXTRA_LDFLAGS += -T$(srctree)/$(src)/control_link.lds
192 +
193 +#only compiled as module !
194 +obj-$(CONFIG_FSL_PPFE) += pfe.o
195 +
196 +pfe-y += pfe_mod.o \
197 + pfe_hw.o \
198 + pfe_firmware.o \
199 + pfe_ctrl.o \
200 + pfe_ctrl_hal.o \
201 + pfe_hif.o \
202 + pfe_hif_lib.o\
203 + pfe_eth.o \
204 + pfe_perfmon.o \
205 + pfe_sysfs.o \
206 + pfe_debugfs.o \
207 + pfe_ls1012a_platform.o \
208 + pfe_hal.o \
209 +
210 --- /dev/null
211 +++ b/drivers/staging/fsl_ppfe/config.h
212 @@ -0,0 +1,8 @@
213 +#ifndef _CONFIG_H_
214 +#define _CONFIG_H_
215 +#define CFG_WIFI_OFFLOAD (1 << 1)
216 +#define CFG_ICC (1 << 11)
217 +#define CFG_RTP (1 << 14)
218 +#define CFG_ELLIPTIC (1 << 15)
219 +#define CFG_ALL (0 | CFG_WIFI_OFFLOAD | CFG_ICC | CFG_RTP | CFG_ELLIPTIC )
220 +#endif /* _CONFIG_H_ */
221 --- /dev/null
222 +++ b/drivers/staging/fsl_ppfe/control_link.lds
223 @@ -0,0 +1,32 @@
224 +/*
225 + *
226 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
227 + *
228 + * This program is free software; you can redistribute it and/or modify
229 + * it under the terms of the GNU General Public License as published by
230 + * the Free Software Foundation; either version 2 of the License, or
231 + * (at your option) any later version.
232 + *
233 + * This program is distributed in the hope that it will be useful,
234 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
235 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
236 + * GNU General Public License for more details.
237 + *
238 + * You should have received a copy of the GNU General Public License
239 + * along with this program; if not, write to the Free Software
240 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
241 + */
242 +
243 +SECTIONS
244 +{
245 + .class_dmem_sh : SUBALIGN(8) {
246 + __class_dmem_sh = .;
247 + *(SORT(.class_dmem_sh_*))
248 + }
249 +
250 + .tmu_dmem_sh : SUBALIGN(8) {
251 + __tmu_dmem_sh = .;
252 + *(SORT(.tmu_dmem_sh_*))
253 + }
254 +
255 +}
256 --- /dev/null
257 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
258 @@ -0,0 +1,88 @@
259 +/*
260 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
261 + *
262 + * This program is free software; you can redistribute it and/or
263 + * modify it under the terms of the GNU General Public License
264 + * as published by the Free Software Foundation; either version 2
265 + * of the License, or (at your option) any later version.
266 + *
267 + * This program is distributed in the hope that it will be useful,
268 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
269 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
270 + * GNU General Public License for more details.
271 + *
272 + * You should have received a copy of the GNU General Public License
273 + * along with this program; if not, write to the Free Software
274 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
275 + *
276 +*/
277 +#ifndef _CBUS_H_
278 +#define _CBUS_H_
279 +
280 +#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
281 +#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
282 +#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
283 +#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
284 +#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
285 +#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
286 +#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000) /* FIXME not documented */
287 +#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000) /* FIXME not documented */
288 +#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
289 +#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
290 +#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
291 +#define LMEM_SIZE 0x10000
292 +#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
293 +#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
294 +#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
295 +#if defined(CONFIG_PLATFORM_C2000)
296 +#define EMAC3_BASE_ADDR (CBUS_BASE_ADDR + 0x330000)
297 +#define EGPI3_BASE_ADDR (CBUS_BASE_ADDR + 0x340000)
298 +#endif
299 +#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
300 +#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
301 +#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
302 +
303 +#define IS_LMEM(addr, len) (((unsigned long)(addr) >= (unsigned long)LMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= (unsigned long)LMEM_END))
304 +
305 +/**
306 +* \defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
307 +* XXX_MEM_ACCESS_ADDR register bit definitions.
308 +* @{
309 +*/
310 +#define PE_MEM_ACCESS_WRITE (1<<31) /**< Internal Memory Write. */
311 +#define PE_MEM_ACCESS_IMEM (1<<15)
312 +#define PE_MEM_ACCESS_DMEM (1<<16)
313 +#define PE_MEM_ACCESS_BYTE_ENABLE(offset,size) (((((1 << (size)) - 1) << (4 - (offset) - (size))) & 0xf) << 24) /**< Byte Enables of the Internal memory access. These are interpred in BE */
314 +// @}
315 +#if defined(CONFIG_PLATFORM_LS1012A)
316 +#include "cbus/emac_mtip.h"
317 +#else
318 +#include "cbus/emac.h"
319 +#endif //CONFIG_PLATFORM_LS1012A
320 +#include "cbus/gpi.h"
321 +#include "cbus/bmu.h"
322 +#include "cbus/hif.h"
323 +#include "cbus/tmu_csr.h"
324 +#include "cbus/class_csr.h"
325 +#include "cbus/hif_nocpy.h"
326 +#include "cbus/util_csr.h"
327 +#include "cbus/gpt.h"
328 +
329 +
330 +/* PFE cores states */
331 +#define CORE_DISABLE 0x00000000
332 +#define CORE_ENABLE 0x00000001
333 +#define CORE_SW_RESET 0x00000002
334 +
335 +/* LMEM defines */
336 +#define LMEM_HDR_SIZE 0x0010
337 +#define LMEM_BUF_SIZE_LN2 0x7
338 +#define LMEM_BUF_SIZE (1 << LMEM_BUF_SIZE_LN2)
339 +
340 +/* DDR defines */
341 +#define DDR_HDR_SIZE 0x0100
342 +#define DDR_BUF_SIZE_LN2 0xb
343 +#define DDR_BUF_SIZE (1 << DDR_BUF_SIZE_LN2)
344 +
345 +
346 +#endif /* _CBUS_H_ */
347 --- /dev/null
348 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
349 @@ -0,0 +1,55 @@
350 +/*
351 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
352 + *
353 + * This program is free software; you can redistribute it and/or
354 + * modify it under the terms of the GNU General Public License
355 + * as published by the Free Software Foundation; either version 2
356 + * of the License, or (at your option) any later version.
357 + *
358 + * This program is distributed in the hope that it will be useful,
359 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
360 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
361 + * GNU General Public License for more details.
362 + *
363 + * You should have received a copy of the GNU General Public License
364 + * along with this program; if not, write to the Free Software
365 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
366 + *
367 +*/
368 +#ifndef _BMU_H_
369 +#define _BMU_H_
370 +
371 +#define BMU_VERSION 0x000
372 +#define BMU_CTRL 0x004
373 +#define BMU_UCAST_CONFIG 0x008
374 +#define BMU_UCAST_BASE_ADDR 0x00c
375 +#define BMU_BUF_SIZE 0x010
376 +#define BMU_BUF_CNT 0x014
377 +#define BMU_THRES 0x018
378 +#define BMU_INT_SRC 0x020
379 +#define BMU_INT_ENABLE 0x024
380 +#define BMU_ALLOC_CTRL 0x030
381 +#define BMU_FREE_CTRL 0x034
382 +#define BMU_FREE_ERR_ADDR 0x038
383 +#define BMU_CURR_BUF_CNT 0x03c
384 +#define BMU_MCAST_CNT 0x040
385 +#define BMU_MCAST_ALLOC_CTRL 0x044
386 +#define BMU_REM_BUF_CNT 0x048
387 +#define BMU_LOW_WATERMARK 0x050
388 +#define BMU_HIGH_WATERMARK 0x054
389 +#define BMU_INT_MEM_ACCESS 0x100
390 +
391 +typedef struct {
392 + unsigned long baseaddr;
393 + u32 count;
394 + u32 size;
395 +} BMU_CFG;
396 +
397 +
398 +#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
399 +#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
400 +
401 +#define BMU2_MCAST_ALLOC_CTRL BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL
402 +
403 +#endif /* _BMU_H_ */
404 +
405 --- /dev/null
406 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
407 @@ -0,0 +1,242 @@
408 +/*
409 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
410 + *
411 + * This program is free software; you can redistribute it and/or
412 + * modify it under the terms of the GNU General Public License
413 + * as published by the Free Software Foundation; either version 2
414 + * of the License, or (at your option) any later version.
415 + *
416 + * This program is distributed in the hope that it will be useful,
417 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
418 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
419 + * GNU General Public License for more details.
420 + *
421 + * You should have received a copy of the GNU General Public License
422 + * along with this program; if not, write to the Free Software
423 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
424 + *
425 +*/
426 +#ifndef _CLASS_CSR_H_
427 +#define _CLASS_CSR_H_
428 +
429 +/** @file class_csr.h.
430 + * class_csr - block containing all the classifier control and status register. Mapped on CBUS and accessible from all PE's and ARM.
431 + */
432 +
433 +
434 +#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
435 +#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
436 +#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
437 +#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014) /**< (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
438 +#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f) /**< LMEM header size for the Classifier block.\ Data in the LMEM is written from this offset. */
439 +#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16) /**< DDR header size for the Classifier block.\ Data in the DDR is written from this offset. */
440 +
441 +#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020) /**< DMEM address of first [15:0] and second [31:16] buffers on QB side. */
442 +#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024) /**< DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
443 +
444 +#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060) /**< DMEM address of first [15:0] and second [31:16] buffers on RO side. */
445 +#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064) /**< DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
446 +
447 +/** @name Class PE memory access. Allows external PE's and HOST to read/write PMEM/DMEM memory ranges for each classifier PE.
448 + */
449 +//@{
450 +#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100) /**< {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]}, See \ref XXX_MEM_ACCESS_ADDR for details. */
451 +#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104) /**< Internal Memory Access Write Data [31:0] */
452 +#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108) /**< Internal Memory Access Read Data [31:0] */
453 +//@}
454 +#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
455 +#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
456 +
457 +#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
458 +#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
459 +#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
460 +#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
461 +#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
462 +#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
463 +#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
464 +#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
465 +#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
466 +#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
467 +#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
468 +#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
469 +#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
470 +#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
471 +#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
472 +#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
473 +#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
474 +#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
475 +#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
476 +#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
477 +#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
478 +#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
479 +#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
480 +#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
481 +#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
482 +#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
483 +#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
484 +#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
485 +#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
486 +#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
487 +#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
488 +#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
489 +#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
490 +#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
491 +#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
492 +#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
493 +#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
494 +#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
495 +#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
496 +#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
497 +#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
498 +#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
499 +#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
500 +#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
501 +#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
502 +#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
503 +#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
504 +#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
505 +#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
506 +#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
507 +#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
508 +#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
509 +#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
510 +#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
511 +#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
512 +#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
513 +
514 +#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
515 +#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
516 +#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
517 +#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
518 +#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
519 +#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
520 +#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
521 +#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
522 +#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
523 +#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
524 +
525 +#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
526 +
527 +#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
528 +#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
529 +
530 +#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234) /**< (route_entry_size[9:0], route_hash_size[23:16] (this is actually ln2(size))) */
531 +#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
532 +#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
533 +
534 +#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
535 +
536 +#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
537 +#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
538 +#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
539 +#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
540 +#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
541 +#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
542 +#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
543 +
544 +#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
545 +#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000) //bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE
546 +
547 +#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
548 +
549 +#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
550 +#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
551 +#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
552 +#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
553 +#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
554 +#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
555 +#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
556 +#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
557 +#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
558 +#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
559 +#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
560 +#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
561 +
562 +#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
563 +#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
564 +
565 +#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
566 +#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
567 +
568 +#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
569 +
570 +#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
571 +#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
572 +#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
573 +#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
574 +#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
575 +#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
576 +
577 +#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
578 +
579 +/* CLASS defines */
580 +#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
581 +#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
582 +
583 +#define CLASS_PBUF0_BASE_ADDR 0x000 /* Can be configured */
584 +#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE) /* Can be configured */
585 +#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE) /* Can be configured */
586 +#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE) /* Can be configured */
587 +
588 +#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_HEADER_OFFSET)
589 +#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_HEADER_OFFSET)
590 +#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_HEADER_OFFSET)
591 +#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + CLASS_PBUF_HEADER_OFFSET)
592 +
593 +#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | CLASS_PBUF0_BASE_ADDR)
594 +#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | CLASS_PBUF2_BASE_ADDR)
595 +
596 +#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) | CLASS_PBUF0_HEADER_BASE_ADDR)
597 +#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) | CLASS_PBUF2_HEADER_BASE_ADDR)
598 +
599 +#define CLASS_ROUTE_SIZE 128
600 +#define CLASS_MAX_ROUTE_SIZE 256
601 +#define CLASS_ROUTE_HASH_BITS 20
602 +#define CLASS_ROUTE_HASH_MASK ((1 << CLASS_ROUTE_HASH_BITS) - 1)
603 +
604 +#define CLASS_ROUTE0_BASE_ADDR 0x400 /* Can be configured */
605 +#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE) /* Can be configured */
606 +#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE) /* Can be configured */
607 +#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE) /* Can be configured */
608 +
609 +#define CLASS_SA_SIZE 128
610 +#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
611 +#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE) /* not used */
612 +#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE) /* not used */
613 +#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE) /* not used */
614 +
615 +/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
616 +#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE*4) - (CLASS_ROUTE_SIZE*4) - (CLASS_SA_SIZE))
617 +#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE))
618 +
619 +
620 +#define TWO_LEVEL_ROUTE (1 << 0)
621 +#define PHYNO_IN_HASH (1 << 1)
622 +#define HW_ROUTE_FETCH (1 << 3)
623 +#define HW_BRIDGE_FETCH (1 << 5)
624 +#define IP_ALIGNED (1 << 6)
625 +#define ARC_HIT_CHECK_EN (1 << 7)
626 +#define CLASS_TOE (1 << 11)
627 +#define HASH_NORMAL (0 << 12)
628 +#define HASH_CRC_PORT (1 << 12)
629 +#define HASH_CRC_IP (2 << 12)
630 +#define HASH_CRC_PORT_IP (3 << 12)
631 +#define QB2BUS_LE (1 << 15)
632 +
633 +#define TCP_CHKSUM_DROP (1 << 0)
634 +#define UDP_CHKSUM_DROP (1 << 1)
635 +#define IPV4_CHKSUM_DROP (1 << 9)
636 +
637 +/*CLASS_HIF_PARSE bits*/
638 +#define HIF_PKT_CLASS_EN (1 << 0)
639 +#define HIF_PKT_OFFSET(ofst) ((ofst&0xF) << 1)
640 +
641 +typedef struct {
642 + u32 toe_mode;
643 + unsigned long route_table_baseaddr;
644 + u32 route_table_hash_bits;
645 + u32 pe_sys_clk_ratio;
646 + u32 resume;
647 +} CLASS_CFG;
648 +
649 +#endif /* _CLASS_CSR_H_ */
650 --- /dev/null
651 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac.h
652 @@ -0,0 +1,243 @@
653 +/*
654 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
655 + *
656 + * This program is free software; you can redistribute it and/or
657 + * modify it under the terms of the GNU General Public License
658 + * as published by the Free Software Foundation; either version 2
659 + * of the License, or (at your option) any later version.
660 + *
661 + * This program is distributed in the hope that it will be useful,
662 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
663 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
664 + * GNU General Public License for more details.
665 + *
666 + * You should have received a copy of the GNU General Public License
667 + * along with this program; if not, write to the Free Software
668 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
669 + *
670 +*/
671 +#ifndef _EMAC_H_
672 +#define _EMAC_H_
673 +
674 +#define EMAC_NETWORK_CONTROL 0x000
675 +#define EMAC_NETWORK_CONFIG 0x004
676 +#define EMAC_NETWORK_STATUS 0x008
677 +#define EMAC_DMA_CONFIG 0x010
678 +
679 +#define EMAC_PHY_MANAGEMENT 0x034
680 +
681 +#define EMAC_HASH_BOT 0x080
682 +#define EMAC_HASH_TOP 0x084
683 +
684 +#define EMAC_SPEC1_ADD_BOT 0x088
685 +#define EMAC_SPEC1_ADD_TOP 0x08c
686 +#define EMAC_SPEC2_ADD_BOT 0x090
687 +#define EMAC_SPEC2_ADD_TOP 0x094
688 +#define EMAC_SPEC3_ADD_BOT 0x098
689 +#define EMAC_SPEC3_ADD_TOP 0x09c
690 +#define EMAC_SPEC4_ADD_BOT 0x0a0
691 +#define EMAC_SPEC4_ADD_TOP 0x0a4
692 +#define EMAC_WOL 0x0b8
693 +
694 +#define EMAC_STACKED_VLAN_REG 0x0c0
695 +
696 +#define EMAC_SPEC1_ADD_MASK_BOT 0x0c8
697 +#define EMAC_SPEC1_ADD_MASK_TOP 0x0cc
698 +
699 +#define EMAC_RMON_BASE_OFST 0x100
700 +
701 +#define EMAC_SPEC5_ADD_BOT 0x300
702 +#define EMAC_SPEC5_ADD_TOP 0x304
703 +#define EMAC_SPEC6_ADD_BOT 0x308
704 +#define EMAC_SPEC6_ADD_TOP 0x30c
705 +#define EMAC_SPEC7_ADD_BOT 0x310
706 +#define EMAC_SPEC7_ADD_TOP 0x314
707 +#define EMAC_SPEC8_ADD_BOT 0x318
708 +#define EMAC_SPEC8_ADD_TOP 0x31c
709 +#define EMAC_SPEC9_ADD_BOT 0x320
710 +#define EMAC_SPEC9_ADD_TOP 0x324
711 +#define EMAC_SPEC10_ADD_BOT 0x328
712 +#define EMAC_SPEC10_ADD_TOP 0x32c
713 +#define EMAC_SPEC11_ADD_BOT 0x330
714 +#define EMAC_SPEC11_ADD_TOP 0x334
715 +#define EMAC_SPEC12_ADD_BOT 0x338
716 +#define EMAC_SPEC12_ADD_TOP 0x33c
717 +#define EMAC_SPEC13_ADD_BOT 0x340
718 +#define EMAC_SPEC13_ADD_TOP 0x344
719 +#define EMAC_SPEC14_ADD_BOT 0x348
720 +#define EMAC_SPEC14_ADD_TOP 0x34c
721 +#define EMAC_SPEC15_ADD_BOT 0x350
722 +#define EMAC_SPEC15_ADD_TOP 0x354
723 +#define EMAC_SPEC16_ADD_BOT 0x358
724 +#define EMAC_SPEC16_ADD_TOP 0x35c
725 +#define EMAC_SPEC17_ADD_BOT 0x360
726 +#define EMAC_SPEC17_ADD_TOP 0x364
727 +#define EMAC_SPEC18_ADD_BOT 0x368
728 +#define EMAC_SPEC18_ADD_TOP 0x36c
729 +#define EMAC_SPEC19_ADD_BOT 0x370
730 +#define EMAC_SPEC19_ADD_TOP 0x374
731 +#define EMAC_SPEC20_ADD_BOT 0x378
732 +#define EMAC_SPEC20_ADD_TOP 0x37c
733 +#define EMAC_SPEC21_ADD_BOT 0x380
734 +#define EMAC_SPEC21_ADD_TOP 0x384
735 +#define EMAC_SPEC22_ADD_BOT 0x388
736 +#define EMAC_SPEC22_ADD_TOP 0x38c
737 +#define EMAC_SPEC23_ADD_BOT 0x390
738 +#define EMAC_SPEC23_ADD_TOP 0x394
739 +#define EMAC_SPEC24_ADD_BOT 0x398
740 +#define EMAC_SPEC24_ADD_TOP 0x39c
741 +#define EMAC_SPEC25_ADD_BOT 0x3a0
742 +#define EMAC_SPEC25_ADD_TOP 0x3a4
743 +#define EMAC_SPEC26_ADD_BOT 0x3a8
744 +#define EMAC_SPEC26_ADD_TOP 0x3ac
745 +#define EMAC_SPEC27_ADD_BOT 0x3b0
746 +#define EMAC_SPEC27_ADD_TOP 0x3b4
747 +#define EMAC_SPEC28_ADD_BOT 0x3b8
748 +#define EMAC_SPEC28_ADD_TOP 0x3bc
749 +#define EMAC_SPEC29_ADD_BOT 0x3c0
750 +#define EMAC_SPEC29_ADD_TOP 0x3c4
751 +#define EMAC_SPEC30_ADD_BOT 0x3c8
752 +#define EMAC_SPEC30_ADD_TOP 0x3cc
753 +#define EMAC_SPEC31_ADD_BOT 0x3d0
754 +#define EMAC_SPEC31_ADD_TOP 0x3d4
755 +#define EMAC_SPEC32_ADD_BOT 0x3d8
756 +#define EMAC_SPEC32_ADD_TOP 0x3dc
757 +
758 +#define EMAC_SPEC_ADDR_MAX 32
759 +
760 +#define EMAC_CONTROL 0x7a0
761 +
762 +/* GEMAC definitions and settings */
763 +
764 +#define EMAC_PORT_0 0
765 +#define EMAC_PORT_1 1
766 +#define EMAC_PORT_2 2
767 +
768 +/* The possible operating speeds of the MAC, currently supporting 10, 100 and
769 + * 1000Mb modes.
770 + */
771 +typedef enum {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS} MAC_SPEED;
772 +
773 +#define GMII 1
774 +#define MII 2
775 +#define RMII 3
776 +#define RGMII 4
777 +#define SGMII 5
778 +
779 +#define DUP_HALF 0x00
780 +#define DUP_FULL 0x01
781 +
782 +/* EMAC_NETWORK_CONTROL bits definition */
783 +#define EMAC_LB_PHY (1 << 0)
784 +#define EMAC_LB_MAC (1 << 1)
785 +#define EMAC_RX_ENABLE (1 << 2)
786 +#define EMAC_TX_ENABLE (1 << 3)
787 +#define EMAC_MDIO_EN (1 << 4) /* Enable MDIO port */
788 +
789 +/* WoL (Wake on Lan bit definition) */
790 +#define EMAC_WOL_MAGIC (1 << 16)
791 +#define EMAC_WOL_ARP (1 << 17)
792 +#define EMAC_WOL_SPEC_ADDR (1 << 18)
793 +#define EMAC_WOL_MULTI (1 << 19)
794 +
795 +/* EMAC_NETWORK_CONFIG bits definition */
796 +#define EMAC_SPEED_100 (1 << 0)
797 +#define EMAC_HALF_DUP (0 << 1)
798 +#define EMAC_FULL_DUP (1 << 1)
799 +#define EMAC_DUPLEX_MASK (1 << 1)
800 +#define EMAC_ENABLE_JUMBO_FRAME (1 << 3)
801 +#define EMAC_ENABLE_COPY_ALL (1 << 4)
802 +#define EMAC_NO_BROADCAST (1 << 5)
803 +#define EMAC_ENABLE_MULTICAST (1 << 6)
804 +#define EMAC_ENABLE_UNICAST (1 << 7)
805 +#define EMAC_ENABLE_1536_RX (1 << 8)
806 +#define EMAC_SPEED_1000 (1 << 10)
807 +#define EMAC_PCS_ENABLE (1 << 11)
808 +#define EMAC_ENABLE_PAUSE_RX (1 << 13)
809 +#define EMAC_REMOVE_FCS (1 << 17)
810 +#define EMAC_ENABLE_CHKSUM_RX (1 << 24)
811 +#define EMAC_MDC_DIV_MASK (0x7 << 18) /* PCLK divisor for MDC */
812 +#define EMAC_DATA_BUS_WIDTH_SHIFT 21
813 +#define EMAC_DATA_BUS_WIDTH_MASK (0x3 << EMAC_DATA_BUS_WIDTH_SHIFT)
814 +#define EMAC_DATA_BUS_WIDTH_32 (0x00 << EMAC_DATA_BUS_WIDTH_SHIFT)
815 +#define EMAC_DATA_BUS_WIDTH_64 (0x01 << EMAC_DATA_BUS_WIDTH_SHIFT)
816 +#define EMAC_DATA_BUS_WIDTH_128 (0x10 << EMAC_DATA_BUS_WIDTH_SHIFT)
817 +#define EMAC_ENABLE_FCS_RX (1 << 26)
818 +#define EMAC_SGMII_MODE_ENABLE (1 << 27)
819 +
820 +#define EMAC_SPEED_MASK (EMAC_SPEED_100 | EMAC_SPEED_1000)
821 +
822 +/* EMAC_STACKED_VLAN_REG bits definition */
823 +#define EMAC_ENABLE_STACKED_VLAN (1 << 31)
824 +
825 +/* EMAC_CONTROL bits definition */
826 +#define EMAC_TWO_BYTES_IP_ALIGN (1 << 0) // two bytes IP alignement
827 +
828 +/* EMAC_NET_STATUS bits definition */
829 +#define EMAC_PHY_IDLE (1<<2) /* PHY management is idle */
830 +#define EMAC_MDIO_IN (1<<1) /* Status of mdio_in pin */
831 +#define EMAC_LINK_STATUS (1<<0) /* Status of link pin */
832 +
833 +/* EMAC_DMA_CONFIG Bit definitions */
834 +#define EMAC_ENABLE_CHKSUM_TX (1<<11)
835 +
836 +//RMII enable – bit 1 / RGMII enable – bit 2
837 +#define EMAC_RMII_MODE_ENABLE ((1 << 1) | (0 << 2))
838 +#define EMAC_RMII_MODE_DISABLE (0 << 1)
839 +#define EMAC_RGMII_MODE_ENABLE ((0 << 1) | (1 << 2))
840 +#define EMAC_RGMII_MODE_DISABLE (0 << 2)
841 +#define EMAC_MII_MODE_ENABLE (EMAC_RMII_MODE_DISABLE | EMAC_RGMII_MODE_DISABLE)
842 +#define EMAC_GMII_MODE_ENABLE (EMAC_RMII_MODE_DISABLE | EMAC_RGMII_MODE_DISABLE)
843 +#define EMAC_MODE_MASK (0x3 << 1)
844 +
845 +/* Default configuration */
846 +#define EMAC0_DEFAULT_DUPLEX_MODE FULLDUPLEX
847 +#define EMAC0_DEFAULT_EMAC_MODE RGMII
848 +#define EMAC0_DEFAULT_EMAC_SPEED SPEED_1000M
849 +
850 +#define EMAC1_DEFAULT_DUPLEX_MODE FULLDUPLEX
851 +#define EMAC1_DEFAULT_EMAC_MODE RGMII
852 +#define EMAC1_DEFAULT_EMAC_SPEED SPEED_1000M
853 +
854 +#define EMAC2_DEFAULT_DUPLEX_MODE FULLDUPLEX
855 +#define EMAC2_DEFAULT_EMAC_MODE RGMII
856 +#define EMAC2_DEFAULT_EMAC_SPEED SPEED_1000M
857 +
858 +/* EMAC Hash size */
859 +#define EMAC_HASH_REG_BITS 64
860 +
861 +/* The Address organisation for the MAC device. All addresses are split into
862 + * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
863 + * the address and the other field are the high order bits - this may be 16-bits
864 + * in the case of MAC addresses, or 32-bits for the hash address.
865 + * In terms of memory storage, the first item (bottom) is assumed to be at a
866 + * lower address location than 'top'. i.e. top should be at address location of
867 + * 'bottom' + 4 bytes.
868 + */
869 +typedef struct {
870 + u32 bottom; /* Lower 32-bits of address. */
871 + u32 top; /* Upper 32-bits of address. */
872 +} MAC_ADDR;
873 +
874 +
875 +/* The following is the organisation of the address filters section of the MAC
876 + * registers. The Cadence MAC contains four possible specific address match
877 + * addresses, if an incoming frame corresponds to any one of these four
878 + * addresses then the frame will be copied to memory.
879 + * It is not necessary for all four of the address match registers to be
880 + * programmed, this is application dependant.
881 + */
882 +typedef struct {
883 + MAC_ADDR one; /* Specific address register 1. */
884 + MAC_ADDR two; /* Specific address register 2. */
885 + MAC_ADDR three; /* Specific address register 3. */
886 + MAC_ADDR four; /* Specific address register 4. */
887 +} SPEC_ADDR;
888 +
889 +typedef struct {
890 + u32 mode;
891 + u32 speed;
892 + u32 duplex;
893 +} GEMAC_CFG;
894 +
895 +#endif /* _EMAC_H_ */
896 --- /dev/null
897 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
898 @@ -0,0 +1,250 @@
899 +/*
900 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
901 + *
902 + * This program is free software; you can redistribute it and/or
903 + * modify it under the terms of the GNU General Public License
904 + * as published by the Free Software Foundation; either version 2
905 + * of the License, or (at your option) any later version.
906 + *
907 + * This program is distributed in the hope that it will be useful,
908 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
909 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
910 + * GNU General Public License for more details.
911 + *
912 + * You should have received a copy of the GNU General Public License
913 + * along with this program; if not, write to the Free Software
914 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
915 + *
916 +*/
917 +#ifndef _EMAC_H_
918 +#define _EMAC_H_
919 +
920 +#define EMAC_IEVENT_REG 0x004
921 +#define EMAC_IMASK_REG 0x008
922 +#define EMAC_R_DES_ACTIVE_REG 0x010
923 +#define EMAC_X_DES_ACTIVE_REG 0x014
924 +#define EMAC_ECNTRL_REG 0x024
925 +#define EMAC_MII_DATA_REG 0x040
926 +#define EMAC_MII_CTRL_REG 0x044
927 +#define EMAC_MIB_CTRL_STS_REG 0x064
928 +#define EMAC_RCNTRL_REG 0x084
929 +#define EMAC_TCNTRL_REG 0x0C4
930 +#define EMAC_PHY_ADDR_LOW 0x0E4
931 +#define EMAC_PHY_ADDR_HIGH 0x0E8
932 +#define EMAC_GAUR 0x120
933 +#define EMAC_GALR 0x124
934 +#define EMAC_TFWR_STR_FWD 0x144
935 +#define EMAC_RX_SECTIOM_FULL 0x190
936 +#define EMAC_TX_SECTION_EMPTY 0x1A0
937 +#define EMAC_TRUNC_FL 0x1B0
938 +
939 +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
940 +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
941 +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
942 +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
943 +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
944 +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
945 +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
946 +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
947 +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
948 +#define RMON_T_COL 0x224 /* RMON TX collision count */
949 +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
950 +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
951 +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
952 +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
953 +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
954 +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
955 +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
956 +#define RMON_T_OCTETS 0x244 /* RMON TX octets */
957 +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
958 +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
959 +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
960 +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
961 +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
962 +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
963 +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
964 +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
965 +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
966 +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
967 +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
968 +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
969 +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
970 +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
971 +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
972 +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
973 +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
974 +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
975 +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
976 +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
977 +#define RMON_R_RESVD_O 0x2a4 /* Reserved */
978 +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
979 +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
980 +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
981 +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
982 +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
983 +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
984 +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
985 +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
986 +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
987 +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
988 +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
989 +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
990 +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
991 +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
992 +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
993 +
994 +#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
995 +#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
996 +
997 +/* GEMAC definitions and settings */
998 +
999 +#define EMAC_PORT_0 0
1000 +#define EMAC_PORT_1 1
1001 +
1002 +/* GEMAC Bit definitions */
1003 +#define EMAC_IEVENT_HBERR 0x80000000
1004 +#define EMAC_IEVENT_BABR 0x40000000
1005 +#define EMAC_IEVENT_BABT 0x20000000
1006 +#define EMAC_IEVENT_GRA 0x10000000
1007 +#define EMAC_IEVENT_TXF 0x08000000
1008 +#define EMAC_IEVENT_TXB 0x04000000
1009 +#define EMAC_IEVENT_RXF 0x02000000
1010 +#define EMAC_IEVENT_RXB 0x01000000
1011 +#define EMAC_IEVENT_MII 0x00800000
1012 +#define EMAC_IEVENT_EBERR 0x00400000
1013 +#define EMAC_IEVENT_LC 0x00200000
1014 +#define EMAC_IEVENT_RL 0x00100000
1015 +#define EMAC_IEVENT_UN 0x00080000
1016 +
1017 +#define EMAC_IMASK_HBERR 0x80000000
1018 +#define EMAC_IMASK_BABR 0x40000000
1019 +#define EMAC_IMASKT_BABT 0x20000000
1020 +#define EMAC_IMASK_GRA 0x10000000
1021 +#define EMAC_IMASKT_TXF 0x08000000
1022 +#define EMAC_IMASK_TXB 0x04000000
1023 +#define EMAC_IMASKT_RXF 0x02000000
1024 +#define EMAC_IMASK_RXB 0x01000000
1025 +#define EMAC_IMASK_MII 0x00800000
1026 +#define EMAC_IMASK_EBERR 0x00400000
1027 +#define EMAC_IMASK_LC 0x00200000
1028 +#define EMAC_IMASKT_RL 0x00100000
1029 +#define EMAC_IMASK_UN 0x00080000
1030 +
1031 +#define EMAC_RCNTRL_MAX_FL_SHIFT 16
1032 +#define EMAC_RCNTRL_LOOP 0x00000001
1033 +#define EMAC_RCNTRL_DRT 0x00000002
1034 +#define EMAC_RCNTRL_MII_MODE 0x00000004
1035 +#define EMAC_RCNTRL_PROM 0x00000008
1036 +#define EMAC_RCNTRL_BC_REJ 0x00000010
1037 +#define EMAC_RCNTRL_FCE 0x00000020
1038 +#define EMAC_RCNTRL_RGMII 0x00000040
1039 +#define EMAC_RCNTRL_SGMII 0x00000080
1040 +#define EMAC_RCNTRL_RMII 0x00000100
1041 +#define EMAC_RCNTRL_RMII_10T 0x00000200
1042 +#define EMAC_RCNTRL_CRC_FWD 0x00004000
1043 +
1044 +#define EMAC_TCNTRL_GTS 0x00000001
1045 +#define EMAC_TCNTRL_HBC 0x00000002
1046 +#define EMAC_TCNTRL_FDEN 0x00000004
1047 +#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
1048 +#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
1049 +
1050 +#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
1051 +#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
1052 +#define EMAC_ECNTRL_SPEED 0x00000020
1053 +#define EMAC_ECNTRL_DBSWAP 0x00000100
1054 +
1055 +#define EMAC_X_WMRK_STRFWD 0x00000100
1056 +
1057 +#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
1058 +#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
1059 +
1060 +
1061 +
1062 +/* The possible operating speeds of the MAC, currently supporting 10, 100 and
1063 + * 1000Mb modes.
1064 + */
1065 +typedef enum {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS} MAC_SPEED;
1066 +
1067 +#define GMII 1
1068 +#define MII 2
1069 +#define RMII 3
1070 +#define RGMII 4
1071 +#define SGMII 5
1072 +
1073 +#define DUPLEX_HALF 0x00
1074 +#define DUPLEX_FULL 0x01
1075 +
1076 +
1077 +/* Default configuration */
1078 +#define EMAC0_DEFAULT_DUPLEX_MODE FULLDUPLEX
1079 +#define EMAC0_DEFAULT_EMAC_MODE RGMII
1080 +#define EMAC0_DEFAULT_EMAC_SPEED SPEED_1000M
1081 +
1082 +#define EMAC1_DEFAULT_DUPLEX_MODE FULLDUPLEX
1083 +#define EMAC1_DEFAULT_EMAC_MODE SGMII
1084 +#define EMAC1_DEFAULT_EMAC_SPEED SPEED_1000M
1085 +
1086 +/* MII-related definitios */
1087 +#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
1088 +#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
1089 +#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
1090 +#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
1091 +#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
1092 +#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
1093 +#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
1094 +
1095 +#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
1096 +#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
1097 +#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
1098 +#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
1099 +
1100 +#define EMAC_MII_DATA_RA(v) ((v & EMAC_MII_DATA_RA_MASK) << EMAC_MII_DATA_RA_SHIFT)
1101 +#define EMAC_MII_DATA_PA(v) ((v & EMAC_MII_DATA_RA_MASK) << EMAC_MII_DATA_PA_SHIFT)
1102 +#define EMAC_MII_DATA(v) (v & 0xffff)
1103 +
1104 +#define EMAC_MII_SPEED_SHIFT 1
1105 +#define EMAC_HOLDTIME_SHIFT 8
1106 +#define EMAC_HOLDTIME_MASK 0x7
1107 +#define EMAC_HOLDTIME(v) ((v & EMAC_HOLDTIME_MASK) << EMAC_HOLDTIME_SHIFT)
1108 +
1109 +/* The Address organisation for the MAC device. All addresses are split into
1110 + * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
1111 + * the address and the other field are the high order bits - this may be 16-bits
1112 + * in the case of MAC addresses, or 32-bits for the hash address.
1113 + * In terms of memory storage, the first item (bottom) is assumed to be at a
1114 + * lower address location than 'top'. i.e. top should be at address location of
1115 + * 'bottom' + 4 bytes.
1116 + */
1117 +typedef struct {
1118 + u32 bottom; /* Lower 32-bits of address. */
1119 + u32 top; /* Upper 32-bits of address. */
1120 +} MAC_ADDR;
1121 +
1122 +
1123 +/* The following is the organisation of the address filters section of the MAC
1124 + * registers. The Cadence MAC contains four possible specific address match
1125 + * addresses, if an incoming frame corresponds to any one of these four
1126 + * addresses then the frame will be copied to memory.
1127 + * It is not necessary for all four of the address match registers to be
1128 + * programmed, this is application dependant.
1129 + */
1130 +typedef struct {
1131 + MAC_ADDR one; /* Specific address register 1. */
1132 + MAC_ADDR two; /* Specific address register 2. */
1133 + MAC_ADDR three; /* Specific address register 3. */
1134 + MAC_ADDR four; /* Specific address register 4. */
1135 +} SPEC_ADDR;
1136 +
1137 +typedef struct {
1138 + u32 mode;
1139 + u32 speed;
1140 + u32 duplex;
1141 +} GEMAC_CFG;
1142 +
1143 +/* EMAC Hash size */
1144 +#define EMAC_HASH_REG_BITS 64
1145 +
1146 +#define EMAC_SPEC_ADDR_MAX 4
1147 +
1148 +#endif /* _EMAC_H_ */
1149 --- /dev/null
1150 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
1151 @@ -0,0 +1,78 @@
1152 +/*
1153 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1154 + *
1155 + * This program is free software; you can redistribute it and/or
1156 + * modify it under the terms of the GNU General Public License
1157 + * as published by the Free Software Foundation; either version 2
1158 + * of the License, or (at your option) any later version.
1159 + *
1160 + * This program is distributed in the hope that it will be useful,
1161 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1162 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1163 + * GNU General Public License for more details.
1164 + *
1165 + * You should have received a copy of the GNU General Public License
1166 + * along with this program; if not, write to the Free Software
1167 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1168 + *
1169 +*/
1170 +#ifndef _GPI_H_
1171 +#define _GPI_H_
1172 +
1173 +#define GPI_VERSION 0x00
1174 +#define GPI_CTRL 0x04
1175 +#define GPI_RX_CONFIG 0x08
1176 +#define GPI_HDR_SIZE 0x0c
1177 +#define GPI_BUF_SIZE 0x10
1178 +#define GPI_LMEM_ALLOC_ADDR 0x14
1179 +#define GPI_LMEM_FREE_ADDR 0x18
1180 +#define GPI_DDR_ALLOC_ADDR 0x1c
1181 +#define GPI_DDR_FREE_ADDR 0x20
1182 +#define GPI_CLASS_ADDR 0x24
1183 +#define GPI_DRX_FIFO 0x28
1184 +#define GPI_TRX_FIFO 0x2c
1185 +#define GPI_INQ_PKTPTR 0x30
1186 +#define GPI_DDR_DATA_OFFSET 0x34
1187 +#define GPI_LMEM_DATA_OFFSET 0x38
1188 +#define GPI_TMLF_TX 0x4c
1189 +#define GPI_DTX_ASEQ 0x50
1190 +#define GPI_FIFO_STATUS 0x54
1191 +#define GPI_FIFO_DEBUG 0x58
1192 +#define GPI_TX_PAUSE_TIME 0x5c
1193 +#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
1194 +#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
1195 +#define GPI_TOE_CHKSUM_EN 0x68
1196 +#define GPI_OVERRUN_DROPCNT 0x6c
1197 +
1198 +typedef struct {
1199 + u32 lmem_rtry_cnt;
1200 + u32 tmlf_txthres;
1201 + u32 aseq_len;
1202 +} GPI_CFG;
1203 +
1204 +
1205 +/* GPI commons defines */
1206 +#define GPI_LMEM_BUF_EN 0x1
1207 +#define GPI_DDR_BUF_EN 0x1
1208 +
1209 +/* EGPI 1 defines */
1210 +#define EGPI1_LMEM_RTRY_CNT 0x40
1211 +#define EGPI1_TMLF_TXTHRES 0xBC
1212 +#define EGPI1_ASEQ_LEN 0x50
1213 +
1214 +/* EGPI 2 defines */
1215 +#define EGPI2_LMEM_RTRY_CNT 0x40
1216 +#define EGPI2_TMLF_TXTHRES 0xBC
1217 +#define EGPI2_ASEQ_LEN 0x40
1218 +
1219 +/* EGPI 3 defines */
1220 +#define EGPI3_LMEM_RTRY_CNT 0x40
1221 +#define EGPI3_TMLF_TXTHRES 0xBC
1222 +#define EGPI3_ASEQ_LEN 0x40
1223 +
1224 +/* HGPI defines */
1225 +#define HGPI_LMEM_RTRY_CNT 0x40
1226 +#define HGPI_TMLF_TXTHRES 0xBC
1227 +#define HGPI_ASEQ_LEN 0x40
1228 +
1229 +#endif /* _GPI_H_ */
1230 --- /dev/null
1231 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpt.h
1232 @@ -0,0 +1,29 @@
1233 +/*
1234 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1235 + *
1236 + * This program is free software; you can redistribute it and/or
1237 + * modify it under the terms of the GNU General Public License
1238 + * as published by the Free Software Foundation; either version 2
1239 + * of the License, or (at your option) any later version.
1240 + *
1241 + * This program is distributed in the hope that it will be useful,
1242 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1243 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1244 + * GNU General Public License for more details.
1245 + *
1246 + * You should have received a copy of the GNU General Public License
1247 + * along with this program; if not, write to the Free Software
1248 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1249 + *
1250 +*/
1251 +#ifndef _CBUS_GPT_H_
1252 +#define _CBUS_GPT_H_
1253 +
1254 +#define CBUS_GPT_VERSION (CBUS_GPT_BASE_ADDR + 0x00)
1255 +#define CBUS_GPT_STATUS (CBUS_GPT_BASE_ADDR + 0x04)
1256 +#define CBUS_GPT_CONFIG (CBUS_GPT_BASE_ADDR + 0x08)
1257 +#define CBUS_GPT_COUNTER (CBUS_GPT_BASE_ADDR + 0x0c)
1258 +#define CBUS_GPT_PERIOD (CBUS_GPT_BASE_ADDR + 0x10)
1259 +#define CBUS_GPT_WIDTH (CBUS_GPT_BASE_ADDR + 0x14)
1260 +
1261 +#endif /* _CBUS_GPT_H_ */
1262 --- /dev/null
1263 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
1264 @@ -0,0 +1,96 @@
1265 +/*
1266 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1267 + *
1268 + * This program is free software; you can redistribute it and/or
1269 + * modify it under the terms of the GNU General Public License
1270 + * as published by the Free Software Foundation; either version 2
1271 + * of the License, or (at your option) any later version.
1272 + *
1273 + * This program is distributed in the hope that it will be useful,
1274 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1275 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1276 + * GNU General Public License for more details.
1277 + *
1278 + * You should have received a copy of the GNU General Public License
1279 + * along with this program; if not, write to the Free Software
1280 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1281 + *
1282 +*/
1283 +#ifndef _HIF_H_
1284 +#define _HIF_H_
1285 +
1286 +/** @file hif.h.
1287 + * hif - PFE hif block control and status register. Mapped on CBUS and accessible from all PE's and ARM.
1288 + */
1289 +#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
1290 +#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
1291 +#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
1292 +#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
1293 +#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
1294 +#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
1295 +#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
1296 +#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
1297 +#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
1298 +#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
1299 +#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
1300 +#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
1301 +#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
1302 +#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
1303 +#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
1304 +#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
1305 +#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
1306 +
1307 +/*HIF_INT_SRC/ HIF_INT_ENABLE control bits */
1308 +#define HIF_INT (1 << 0)
1309 +#define HIF_RXBD_INT (1 << 1)
1310 +#define HIF_RXPKT_INT (1 << 2)
1311 +#define HIF_TXBD_INT (1 << 3)
1312 +#define HIF_TXPKT_INT (1 << 4)
1313 +
1314 +/*HIF_TX_CTRL bits */
1315 +#define HIF_CTRL_DMA_EN (1<<0)
1316 +#define HIF_CTRL_BDP_POLL_CTRL_EN (1<<1)
1317 +#define HIF_CTRL_BDP_CH_START_WSTB (1<<2)
1318 +
1319 +/*HIF_INT_ENABLE bits */
1320 +#define HIF_INT_EN (1 << 0)
1321 +#define HIF_RXBD_INT_EN (1 << 1)
1322 +#define HIF_RXPKT_INT_EN (1 << 2)
1323 +#define HIF_TXBD_INT_EN (1 << 3)
1324 +#define HIF_TXPKT_INT_EN (1 << 4)
1325 +
1326 +/*HIF_POLL_CTRL bits*/
1327 +#define HIF_RX_POLL_CTRL_CYCLE 0x0400
1328 +#define HIF_TX_POLL_CTRL_CYCLE 0x0400
1329 +
1330 +/*HIF_INT_COAL bits*/
1331 +#define HIF_INT_COAL_ENABLE (1 << 31)
1332 +
1333 +/*Buffer descriptor control bits */
1334 +#define BD_CTRL_BUFLEN_MASK 0x3fff
1335 +#define BD_BUF_LEN(x) (x & BD_CTRL_BUFLEN_MASK)
1336 +#define BD_CTRL_CBD_INT_EN (1 << 16)
1337 +#define BD_CTRL_PKT_INT_EN (1 << 17)
1338 +#define BD_CTRL_LIFM (1 << 18)
1339 +#define BD_CTRL_LAST_BD (1 << 19)
1340 +#define BD_CTRL_DIR (1 << 20)
1341 +#define BD_CTRL_LMEM_CPY (1 << 21) /*Valid only for HIF_NOCPY*/
1342 +#define BD_CTRL_PKT_XFER (1 << 24)
1343 +#define BD_CTRL_DESC_EN (1 << 31)
1344 +#define BD_CTRL_PARSE_DISABLE (1 << 25)
1345 +#define BD_CTRL_BRFETCH_DISABLE (1 << 26)
1346 +#define BD_CTRL_RTFETCH_DISABLE (1 << 27)
1347 +
1348 +/*Buffer descriptor status bits*/
1349 +#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
1350 +#define BD_STATUS_DIR_PROC_ID (1 << 16)
1351 +#define BD_STATUS_CONN_ID_EN (1 << 17))
1352 +#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
1353 +#define BD_STATUS_LE_DATA (1 << 21)
1354 +#define BD_STATUS_CHKSUM_EN (1 << 22)
1355 +
1356 +/*HIF Buffer descriptor status bits */
1357 +#define DIR_PROC_ID (1 << 16)
1358 +#define PROC_ID(id) ((id) << 18)
1359 +
1360 +#endif /* _HIF_H_ */
1361 --- /dev/null
1362 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
1363 @@ -0,0 +1,51 @@
1364 +/*
1365 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1366 + *
1367 + * This program is free software; you can redistribute it and/or
1368 + * modify it under the terms of the GNU General Public License
1369 + * as published by the Free Software Foundation; either version 2
1370 + * of the License, or (at your option) any later version.
1371 + *
1372 + * This program is distributed in the hope that it will be useful,
1373 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1374 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1375 + * GNU General Public License for more details.
1376 + *
1377 + * You should have received a copy of the GNU General Public License
1378 + * along with this program; if not, write to the Free Software
1379 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1380 + *
1381 +*/
1382 +#ifndef _HIF_NOCPY_H_
1383 +#define _HIF_NOCPY_H_
1384 +
1385 +#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
1386 +#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
1387 +#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
1388 +#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
1389 +#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
1390 +#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
1391 +#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
1392 +#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
1393 +#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
1394 +#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
1395 +#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
1396 +#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
1397 +#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
1398 +#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
1399 +#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
1400 +#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
1401 +#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
1402 +#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
1403 +#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
1404 +#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
1405 +#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
1406 +#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
1407 +#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
1408 +#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
1409 +#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
1410 +#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
1411 +#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
1412 +
1413 +
1414 +#endif /* _HIF_NOCPY_H_ */
1415 --- /dev/null
1416 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
1417 @@ -0,0 +1,128 @@
1418 +/*
1419 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1420 + *
1421 + * This program is free software; you can redistribute it and/or
1422 + * modify it under the terms of the GNU General Public License
1423 + * as published by the Free Software Foundation; either version 2
1424 + * of the License, or (at your option) any later version.
1425 + *
1426 + * This program is distributed in the hope that it will be useful,
1427 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1428 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1429 + * GNU General Public License for more details.
1430 + *
1431 + * You should have received a copy of the GNU General Public License
1432 + * along with this program; if not, write to the Free Software
1433 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1434 + *
1435 +*/
1436 +#ifndef _TMU_CSR_H_
1437 +#define _TMU_CSR_H_
1438 +
1439 +#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
1440 +#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
1441 +#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
1442 +#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
1443 +#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
1444 +#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
1445 +#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
1446 +#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
1447 +#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
1448 +#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
1449 +#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
1450 +#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
1451 +#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
1452 +#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
1453 +#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
1454 +#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
1455 +#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
1456 +#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
1457 +#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
1458 +#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
1459 +#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
1460 +#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
1461 +#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
1462 +#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
1463 +#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
1464 +#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
1465 +#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
1466 +#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
1467 +#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
1468 +#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
1469 +#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
1470 +#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
1471 +#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
1472 +#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
1473 +#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
1474 +#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
1475 +#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
1476 +#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
1477 +#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
1478 +#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
1479 +#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
1480 +#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
1481 +#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
1482 +#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
1483 +#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
1484 +#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
1485 +#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8) /**< [9:0] Scheduler Enable for each of the scheduler in the TDQ. This is a global Enable for all schedulers in PHY0 */
1486 +#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
1487 +#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
1488 +#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
1489 +#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
1490 +#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
1491 +#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
1492 +#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
1493 +#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
1494 +#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
1495 +#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
1496 +
1497 +#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4) /**< [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory Write [27:24] Byte Enables of the Internal memory access [23:0] Address of the internal memory. This address is used to access both the PM and DM of all the PE's */
1498 +#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8) /**< Internal Memory Access Write Data */
1499 +#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec) /**< Internal Memory Access Read Data. The commands are blocked at the mem_access only */
1500 +
1501 +#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0) /**< [31:0] PHY0 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */
1502 +#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4) /**< [31:0] PHY1 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */
1503 +#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8) /**< [31:0] PHY2 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */
1504 +#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc) /**< [31:0] PHY3 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */
1505 +#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
1506 +#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
1507 +
1508 +#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
1509 +#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
1510 +#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
1511 +
1512 +#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
1513 +#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
1514 +#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
1515 +#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134) /**< [31:0] PHY4 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */
1516 +#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138) /**< [9:0] Scheduler Enable for each of the scheduler in the TDQ. This is a global Enable for all schedulers in PHY1 */
1517 +#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c) /**< [9:0] Scheduler Enable for each of the scheduler in the TDQ. This is a global Enable for all schedulers in PHY2 */
1518 +#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140) /**< [9:0] Scheduler Enable for each of the scheduler in the TDQ. This is a global Enable for all schedulers in PHY3 */
1519 +#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
1520 +#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148) /**< [31:0] PHY5 in queue address (must be initialized with one of the xxx_INQ_PKTPTR cbus addresses) */
1521 +
1522 +#define SW_RESET (1 << 0) /**< Global software reset */
1523 +#define INQ_RESET (1 << 2)
1524 +#define TEQ_RESET (1 << 3)
1525 +#define TDQ_RESET (1 << 4)
1526 +#define PE_RESET (1 << 5)
1527 +#define MEM_INIT (1 << 6)
1528 +#define MEM_INIT_DONE (1 << 7)
1529 +#define LLM_INIT (1 << 8)
1530 +#define LLM_INIT_DONE (1 << 9)
1531 +#define ECC_MEM_INIT_DONE (1<<10)
1532 +
1533 +typedef struct {
1534 + u32 pe_sys_clk_ratio;
1535 + unsigned long llm_base_addr;
1536 + u32 llm_queue_len;
1537 +} TMU_CFG;
1538 +
1539 +/* Not HW related for pfe_ctrl / pfe common defines */
1540 +#define DEFAULT_MAX_QDEPTH 80
1541 +#define DEFAULT_Q0_QDEPTH 511 //We keep one large queue for host tx qos
1542 +#define DEFAULT_TMU3_QDEPTH 127
1543 +
1544 +
1545 +#endif /* _TMU_CSR_H_ */
1546 --- /dev/null
1547 +++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
1548 @@ -0,0 +1,61 @@
1549 +/*
1550 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1551 + *
1552 + * This program is free software; you can redistribute it and/or
1553 + * modify it under the terms of the GNU General Public License
1554 + * as published by the Free Software Foundation; either version 2
1555 + * of the License, or (at your option) any later version.
1556 + *
1557 + * This program is distributed in the hope that it will be useful,
1558 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1559 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1560 + * GNU General Public License for more details.
1561 + *
1562 + * You should have received a copy of the GNU General Public License
1563 + * along with this program; if not, write to the Free Software
1564 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1565 + *
1566 +*/
1567 +#ifndef _UTIL_CSR_H_
1568 +#define _UTIL_CSR_H_
1569 +
1570 +#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
1571 +#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
1572 +#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
1573 +
1574 +#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
1575 +
1576 +#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
1577 +#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
1578 +#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
1579 +#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
1580 +
1581 +#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
1582 +#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
1583 +#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
1584 +
1585 +#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
1586 +#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
1587 +
1588 +#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
1589 +#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
1590 +#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
1591 +#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
1592 +#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
1593 +#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
1594 +#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
1595 +#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
1596 +#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
1597 +#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
1598 +
1599 +#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
1600 +#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
1601 +#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
1602 +
1603 +#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
1604 +
1605 +typedef struct {
1606 + u32 pe_sys_clk_ratio;
1607 +} UTIL_CFG;
1608 +
1609 +#endif /* _UTIL_CSR_H_ */
1610 --- /dev/null
1611 +++ b/drivers/staging/fsl_ppfe/include/pfe/class.h
1612 @@ -0,0 +1,133 @@
1613 +/*
1614 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1615 + *
1616 + * This program is free software; you can redistribute it and/or
1617 + * modify it under the terms of the GNU General Public License
1618 + * as published by the Free Software Foundation; either version 2
1619 + * of the License, or (at your option) any later version.
1620 + *
1621 + * This program is distributed in the hope that it will be useful,
1622 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1623 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1624 + * GNU General Public License for more details.
1625 + *
1626 + * You should have received a copy of the GNU General Public License
1627 + * along with this program; if not, write to the Free Software
1628 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1629 + *
1630 +*/
1631 +#ifndef _CLASS_H_
1632 +#define _CLASS_H_
1633 +
1634 +#include "pe.h"
1635 +
1636 +#define CLASS_DMEM_BASE_ADDR 0x00000000
1637 +#define CLASS_DMEM_SIZE 0x2000
1638 +#define CLASS_DMEM_END (CLASS_DMEM_BASE_ADDR + CLASS_DMEM_SIZE)
1639 +#define CLASS_PMEM_BASE_ADDR 0x00010000
1640 +
1641 +#define CBUS_BASE_ADDR 0xc0000000
1642 +#define CLASS_APB_BASE_ADDR 0xc1000000
1643 +#define CLASS_AHB1_BASE_ADDR 0xc2000000
1644 +#define CLASS_AHB2_BASE_ADDR 0xc3000000
1645 +
1646 +#include "cbus.h"
1647 +
1648 +#define GPT_BASE_ADDR (CLASS_APB_BASE_ADDR + 0x00000)
1649 +#define UART_BASE_ADDR (CLASS_APB_BASE_ADDR + 0x10000)
1650 +#define PERG_BASE_ADDR (CLASS_APB_BASE_ADDR + 0x20000)
1651 +#define EFET_BASE_ADDR (CLASS_APB_BASE_ADDR + 0x40000)
1652 +
1653 +#define MAC_HASH_BASE_ADDR (CLASS_AHB1_BASE_ADDR + 0x30000)
1654 +#define VLAN_HASH_BASE_ADDR (CLASS_AHB1_BASE_ADDR + 0x50000)
1655 +
1656 +#define PE_LMEM_BASE_ADDR (CLASS_AHB2_BASE_ADDR + 0x10000)
1657 +#define PE_LMEM_SIZE 0x8000
1658 +#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
1659 +#define CCU_BASE_ADDR (CLASS_AHB2_BASE_ADDR + 0x20000)
1660 +
1661 +#define IS_DMEM(addr, len) (((unsigned long)(addr) >= CLASS_DMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= CLASS_DMEM_END))
1662 +#define IS_PE_LMEM(addr, len) (((unsigned long)(addr) >= PE_LMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= PE_LMEM_END))
1663 +
1664 +
1665 +#include "gpt.h"
1666 +#include "uart.h"
1667 +#include "class/perg.h"
1668 +#include "class/efet.h"
1669 +#include "class/mac_hash.h"
1670 +#include "class/vlan_hash.h"
1671 +#include "class/ccu.h"
1672 +
1673 +
1674 +#define CLASS_MAX_PBUFFERS 4
1675 +
1676 +#define PBUF_HWPARSE_OFFSET 0x10 /* Fixed by hardware */
1677 +
1678 +#define PAYLOAD_DMEM_MAX_SIZE (CLASS_PBUF_SIZE - CLASS_PBUF_HEADER_OFFSET - sizeof(class_rx_hdr_t))
1679 +
1680 +
1681 +#define MIN_PKT_SIZE 56
1682 +
1683 +#define PARSE_ETH_TYPE (1 << 0)
1684 +#define PARSE_VLAN_TYPE (1 << 1)
1685 +#define PARSE_PPPOE_TYPE (1 << 2)
1686 +#define PARSE_ARP_TYPE (1 << 3)
1687 +#define PARSE_MCAST_TYPE (1 << 4)
1688 +#define PARSE_IP_TYPE (1 << 5)
1689 +#define PARSE_IPV6_TYPE (1 << 6)
1690 +#define PARSE_IPV4_TYPE (1 << 7)
1691 +
1692 +#define PARSE_IPX_TYPE (1 << 9)
1693 +
1694 +#define PARSE_UDP_FLOW (1 << 11)
1695 +#define PARSE_TCP_FLOW (1 << 12)
1696 +#define PARSE_ICMP_FLOW (1 << 13)
1697 +#define PARSE_IGMP_FLOW (1 << 14)
1698 +#define PARSE_FRAG_FLOW (1 << 15)
1699 +
1700 +#define PARSE_HIF_PKT (1 << 23)
1701 +#define PARSE_ARC_HIT (1 << 24)
1702 +#define PARSE_PKT_OVERFLOW (1 << 25)
1703 +
1704 +#define PARSE_PROTO_MISMATCH (1 << 28)
1705 +#define PARSE_L3_MISMATCH (1 << 29)
1706 +#define PARSE_L2_MISMATCH (1 << 30)
1707 +#define PARSE_INCOMPLETE (1 << 31)
1708 +
1709 +
1710 +typedef struct _hwparse_t {
1711 + u16 sid;
1712 + u16 connid;
1713 + u8 toevec;
1714 + u8 pLayer2Hdr;
1715 + u8 pLayer3Hdr;
1716 + u8 pLayer4Hdr;
1717 + u16 vlanid;
1718 + u16 ifParseFlags;
1719 + u32 parseFlags;
1720 + u16 srcport;
1721 + u16 dstport;
1722 + u32 proto:8;
1723 + u32 port:4;
1724 + u32 hash:20;
1725 + u64 rte_res_valid:1;
1726 + u64 vlan_res_valid:1;
1727 + u64 dst_res_valid:1;
1728 + u64 src_res_valid:1;
1729 + u64 vlan_lookup:20;
1730 + u64 dst_lookup:20;
1731 + u64 src_lookup:20;
1732 +} hwparse_t;
1733 +
1734 +
1735 +typedef struct {
1736 + u8 num_cpy; /* no of copies to send out from RO block, for each there must be a corresponding tx pre-header */
1737 + u8 dma_len; /* len to be DMAed to DDR mem, including all tx pre-headers */
1738 + u16 src_addr; /* class dmem source address, pointing to first tx pre-header */
1739 + u32 dst_addr; /* DDR memory destination address of first tx pre-header, must be so packet data is continuous in DDR */
1740 + u32 res1; /* reserved for software usage - queue number? */
1741 + u16 res2; /* reserved for software usage */
1742 + u16 tsv; /* time stamp val */
1743 +} class_tx_desc_t;
1744 +
1745 +#endif /* _CLASS_H_ */
1746 --- /dev/null
1747 +++ b/drivers/staging/fsl_ppfe/include/pfe/class/ccu.h
1748 @@ -0,0 +1,28 @@
1749 +/*
1750 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1751 + *
1752 + * This program is free software; you can redistribute it and/or
1753 + * modify it under the terms of the GNU General Public License
1754 + * as published by the Free Software Foundation; either version 2
1755 + * of the License, or (at your option) any later version.
1756 + *
1757 + * This program is distributed in the hope that it will be useful,
1758 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1759 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1760 + * GNU General Public License for more details.
1761 + *
1762 + * You should have received a copy of the GNU General Public License
1763 + * along with this program; if not, write to the Free Software
1764 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1765 + *
1766 +*/
1767 +#ifndef _CCU_H_
1768 +#define _CCU_H_
1769 +
1770 +#define CCU_ADDR (CCU_BASE_ADDR + 0x00)
1771 +#define CCU_CNT (CCU_BASE_ADDR + 0x04)
1772 +#define CCU_STATUS (CCU_BASE_ADDR + 0x08)
1773 +#define CCU_VAL (CCU_BASE_ADDR + 0x0c)
1774 +
1775 +#endif /* _CCU_H_ */
1776 +
1777 --- /dev/null
1778 +++ b/drivers/staging/fsl_ppfe/include/pfe/class/efet.h
1779 @@ -0,0 +1,44 @@
1780 +/*
1781 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1782 + *
1783 + * This program is free software; you can redistribute it and/or
1784 + * modify it under the terms of the GNU General Public License
1785 + * as published by the Free Software Foundation; either version 2
1786 + * of the License, or (at your option) any later version.
1787 + *
1788 + * This program is distributed in the hope that it will be useful,
1789 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1790 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1791 + * GNU General Public License for more details.
1792 + *
1793 + * You should have received a copy of the GNU General Public License
1794 + * along with this program; if not, write to the Free Software
1795 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1796 + *
1797 +*/
1798 +#ifndef _CLASS_EFET_H_
1799 +#define _CLASS_EFET_H_
1800 +
1801 +//#define CLASS_EFET_ASYNC 1
1802 +
1803 +#define CLASS_EFET_ENTRY_ADDR (EFET_BASE_ADDR + 0x00)
1804 +#define CLASS_EFET_ENTRY_SIZE (EFET_BASE_ADDR + 0x04)
1805 +#define CLASS_EFET_ENTRY_DMEM_ADDR (EFET_BASE_ADDR + 0x08)
1806 +#define CLASS_EFET_ENTRY_STATUS (EFET_BASE_ADDR + 0x0c)
1807 +#define CLASS_EFET_ENTRY_ENDIAN (EFET_BASE_ADDR + 0x10)
1808 +
1809 +#define CBUS2DMEM 0
1810 +#define DMEM2CBUS 1
1811 +
1812 +#define EFET2BUS_LE (1 << 0)
1813 +#define PE2BUS_LE (1 << 1)
1814 +
1815 +#ifdef CLASS_EFET_ASYNC
1816 +void class_efet_async(u32 cbus_addr, u32 dmem_addr, u32 len, u32 dir);
1817 +#endif
1818 +
1819 +void class_efet_sync(u32 cbus_addr, u32 dmem_addr, u32 len, u32 dir);
1820 +
1821 +
1822 +#endif /* _CLASS_EFET_H_ */
1823 +
1824 --- /dev/null
1825 +++ b/drivers/staging/fsl_ppfe/include/pfe/class/mac_hash.h
1826 @@ -0,0 +1,55 @@
1827 +/*
1828 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1829 + *
1830 + * This program is free software; you can redistribute it and/or
1831 + * modify it under the terms of the GNU General Public License
1832 + * as published by the Free Software Foundation; either version 2
1833 + * of the License, or (at your option) any later version.
1834 + *
1835 + * This program is distributed in the hope that it will be useful,
1836 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1837 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1838 + * GNU General Public License for more details.
1839 + *
1840 + * You should have received a copy of the GNU General Public License
1841 + * along with this program; if not, write to the Free Software
1842 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1843 + *
1844 +*/
1845 +#ifndef _MAC_HASH_H_
1846 +#define _MAC_HASH_H_
1847 +
1848 +#define MAC_HASH_REQ1_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x000)
1849 +#define MAC_HASH_REQ2_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x020)
1850 +#define MAC_HASH_REQ3_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x040)
1851 +#define MAC_HASH_REQ4_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x060)
1852 +#define MAC_HASH_REQ5_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x080)
1853 +#define MAC_HASH_REQ6_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x0a0)
1854 +#define MAC_HASH_REQ7_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x0c0)
1855 +#define MAC_HASH_REQ8_BASE_ADDR (MAC_HASH_BASE_ADDR + 0x0e0)
1856 +
1857 +#define MAC_HASH_REQ_CMD(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x000)
1858 +#define MAC_HASH_REQ_MAC1_ADDR(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x004)
1859 +#define MAC_HASH_REQ_MAC2_ADDR(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x008)
1860 +#define MAC_HASH_REQ_MASK1_ADDR(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x00c)
1861 +#define MAC_HASH_REQ_MASK2_ADDR(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x010)
1862 +#define MAC_HASH_REQ_ENTRY(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x014)
1863 +#define MAC_HASH_REQ_STATUS(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x018)
1864 +#define MAC_HASH_REQ_ENTRY_MAYCH(i) (MAC_HASH_REQ##i##_BASE_ADDR + 0x01c)
1865 +
1866 +
1867 +#define MAC_HASH_FREELIST_PTR_HEAD (MAC_HASH_BASE_ADDR + 0x100)
1868 +#define MAC_HASH_FREELIST_PTR_TAIL (MAC_HASH_BASE_ADDR + 0x104)
1869 +#define MAC_HASH_FREELIST_ENTRIES_ADDR (MAC_HASH_BASE_ADDR + 0x108)
1870 +
1871 +
1872 +#define HASH_CMD_INIT 1
1873 +#define HASH_CMD_ADD 2
1874 +#define HASH_CMD_DELETE 3
1875 +#define HASH_CMD_UPDATE 4
1876 +#define HASH_CMD_SEARCH 5
1877 +#define HASH_CMD_MEM_READ 6
1878 +#define HASH_CMD_MEM_WRITE 7
1879 +
1880 +#endif /* _MAC_HASH_H_ */
1881 +
1882 --- /dev/null
1883 +++ b/drivers/staging/fsl_ppfe/include/pfe/class/perg.h
1884 @@ -0,0 +1,39 @@
1885 +/*
1886 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1887 + *
1888 + * This program is free software; you can redistribute it and/or
1889 + * modify it under the terms of the GNU General Public License
1890 + * as published by the Free Software Foundation; either version 2
1891 + * of the License, or (at your option) any later version.
1892 + *
1893 + * This program is distributed in the hope that it will be useful,
1894 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1895 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1896 + * GNU General Public License for more details.
1897 + *
1898 + * You should have received a copy of the GNU General Public License
1899 + * along with this program; if not, write to the Free Software
1900 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1901 + *
1902 +*/
1903 +#ifndef _PERG_H_
1904 +#define _PERG_H_
1905 +
1906 +#define PERG_QB_BUF_STATUS (PERG_BASE_ADDR + 0x00)
1907 +#define PERG_RO_BUF_STATUS (PERG_BASE_ADDR + 0x04)
1908 +#define PERG_CLR_QB_BUF_STATUS (PERG_BASE_ADDR + 0x08)
1909 +#define PERG_SET_RO_BUF_STATUS (PERG_BASE_ADDR + 0x0c)
1910 +#define PERG_CLR_RO_ERR_PKT (PERG_BASE_ADDR + 0x10)
1911 +#define PERG_CLR_BMU2_ERR_PKT (PERG_BASE_ADDR + 0x14)
1912 +
1913 +#define PERG_ID (PERG_BASE_ADDR + 0x18)
1914 +#define PERG_TIMER1 (PERG_BASE_ADDR + 0x1c)
1915 +//FIXME #define PERG_TIMER2 (PERG_BASE_ADDR + 0x20)
1916 +#define PERG_BMU1_CURRDEPTH (PERG_BASE_ADDR + 0x20)
1917 +#define PERG_BMU2_CURRDEPTH (PERG_BASE_ADDR + 0x24)
1918 +#define PERG_HOST_GP (PERG_BASE_ADDR + 0x2c)
1919 +#define PERG_PE_GP (PERG_BASE_ADDR + 0x30)
1920 +#define PERG_INT_ENABLE (PERG_BASE_ADDR + 0x34)
1921 +#define PERG_INT_SRC (PERG_BASE_ADDR + 0x38)
1922 +
1923 +#endif /* _PERG_H_ */
1924 --- /dev/null
1925 +++ b/drivers/staging/fsl_ppfe/include/pfe/class/vlan_hash.h
1926 @@ -0,0 +1,46 @@
1927 +/*
1928 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1929 + *
1930 + * This program is free software; you can redistribute it and/or
1931 + * modify it under the terms of the GNU General Public License
1932 + * as published by the Free Software Foundation; either version 2
1933 + * of the License, or (at your option) any later version.
1934 + *
1935 + * This program is distributed in the hope that it will be useful,
1936 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1937 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1938 + * GNU General Public License for more details.
1939 + *
1940 + * You should have received a copy of the GNU General Public License
1941 + * along with this program; if not, write to the Free Software
1942 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1943 + *
1944 +*/
1945 +#ifndef _VLAN_HASH_H_
1946 +#define _VLAN_HASH_H_
1947 +
1948 +#define VLAN_HASH_REQ1_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x000)
1949 +#define VLAN_HASH_REQ2_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x020)
1950 +#define VLAN_HASH_REQ3_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x040)
1951 +#define VLAN_HASH_REQ4_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x060)
1952 +#define VLAN_HASH_REQ5_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x080)
1953 +#define VLAN_HASH_REQ6_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x0a0)
1954 +#define VLAN_HASH_REQ7_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x0c0)
1955 +#define VLAN_HASH_REQ8_BASE_ADDR (VLAN_HASH_BASE_ADDR + 0x0e0)
1956 +
1957 +#define VLAN_HASH_REQ_CMD(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x000)
1958 +#define VLAN_HASH_REQ_MAC1_ADDR(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x004)
1959 +#define VLAN_HASH_REQ_MAC2_ADDR(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x008)
1960 +#define VLAN_HASH_REQ_MASK1_ADDR(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x00c)
1961 +#define VLAN_HASH_REQ_MASK2_ADDR(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x010)
1962 +#define VLAN_HASH_REQ_ENTRY(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x014)
1963 +#define VLAN_HASH_REQ_STATUS(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x018)
1964 +#define VLAN_HASH_REQ_ENTRY_MAYCH(i) (VLAN_HASH_REQ##i##_BASE_ADDR + 0x01c)
1965 +
1966 +
1967 +#define VLAN_HASH_FREELIST_PTR_HEAD (VLAN_HASH_BASE_ADDR + 0x100)
1968 +#define VLAN_HASH_FREELIST_PTR_TAIL (VLAN_HASH_BASE_ADDR + 0x104)
1969 +#define VLAN_HASH_FREELIST_ENTRIES_ADDR (VLAN_HASH_BASE_ADDR + 0x108)
1970 +
1971 +#endif /* _VLAN_HASH_H_ */
1972 +
1973 --- /dev/null
1974 +++ b/drivers/staging/fsl_ppfe/include/pfe/gpt.h
1975 @@ -0,0 +1,44 @@
1976 +/*
1977 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
1978 + *
1979 + * This program is free software; you can redistribute it and/or
1980 + * modify it under the terms of the GNU General Public License
1981 + * as published by the Free Software Foundation; either version 2
1982 + * of the License, or (at your option) any later version.
1983 + *
1984 + * This program is distributed in the hope that it will be useful,
1985 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1986 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1987 + * GNU General Public License for more details.
1988 + *
1989 + * You should have received a copy of the GNU General Public License
1990 + * along with this program; if not, write to the Free Software
1991 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
1992 + *
1993 +*/
1994 +#ifndef _GPT_H_
1995 +#define _GPT_H_
1996 +
1997 +#define GPT_VERSION (GPT_BASE_ADDR + 0x00)
1998 +#define GPT_STATUS (GPT_BASE_ADDR + 0x04)
1999 +#define GPT_CONFIG (GPT_BASE_ADDR + 0x08)
2000 +#define GPT_COUNTER (GPT_BASE_ADDR + 0x0c)
2001 +#define GPT_PERIOD (GPT_BASE_ADDR + 0x10)
2002 +#define GPT_WIDTH (GPT_BASE_ADDR + 0x14)
2003 +
2004 +/*** These bits are defined for GPT_STATUS register */
2005 +#define GPT_STAT_IRQ (1<<0)
2006 +#define GPT_STAT_OVERFLOW_ERR (1<<4)
2007 +#define GPT_STAT_TMR_ENABLE (1<<8)
2008 +#define GPT_STAT_TMR_DISABLE (1<<9)
2009 +
2010 +/*** These bits are defined for GPT_CONFIG register */
2011 +#define GPT_CONFIG_PWM_MODE 0x1
2012 +#define GPT_CONFIG_WCAP_MODE 0x2
2013 +#define GPT_CONFIG_CAP_PULSE_OUT (1<<2)
2014 +#define GPT_CONFIG_PERIOD_CNT (1<<3)
2015 +#define GPT_CONFIG_INTR_ENABLE (1<<4)
2016 +#define GPT_CONFIG_AUX_SEL (1<<5)
2017 +
2018 +
2019 +#endif /* _GPT_H_ */
2020 --- /dev/null
2021 +++ b/drivers/staging/fsl_ppfe/include/pfe/pe.h
2022 @@ -0,0 +1,626 @@
2023 +/*
2024 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
2025 + *
2026 + * This program is free software; you can redistribute it and/or
2027 + * modify it under the terms of the GNU General Public License
2028 + * as published by the Free Software Foundation; either version 2
2029 + * of the License, or (at your option) any later version.
2030 + *
2031 + * This program is distributed in the hope that it will be useful,
2032 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2033 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2034 + * GNU General Public License for more details.
2035 + *
2036 + * You should have received a copy of the GNU General Public License
2037 + * along with this program; if not, write to the Free Software
2038 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
2039 + *
2040 +*/
2041 +#ifndef _PE_H_
2042 +#define _PE_H_
2043 +
2044 +#include "hal.h"
2045 +
2046 +#if defined(COMCERTO_2000_CLASS)
2047 +#include "pfe/class.h"
2048 +#elif defined(COMCERTO_2000_TMU)
2049 +#include "pfe/tmu.h"
2050 +#elif defined(COMCERTO_2000_UTIL)
2051 +#include "pfe/util.h"
2052 +#endif
2053 +
2054 +enum {
2055 + CLASS0_ID = 0,
2056 + CLASS1_ID,
2057 + CLASS2_ID,
2058 + CLASS3_ID,
2059 + CLASS4_ID,
2060 + CLASS5_ID,
2061 + TMU0_ID,
2062 + TMU1_ID,
2063 + TMU2_ID,
2064 + TMU3_ID,
2065 + UTIL_ID,
2066 + MAX_PE
2067 +};
2068 +#define PE_ID_ANY MAX_PE
2069 +
2070 +/* Hardware definition of physical ports */
2071 +/* CLASS rx header phy number */
2072 +enum CLASS_RX_PHY {
2073 + RX_PHY_0 = 0x0,
2074 + RX_PHY_1,
2075 + RX_PHY_2,
2076 + RX_PHY_HIF,
2077 + RX_PHY_HIF_NOCPY,
2078 + RX_PHY_CLASS = 1 << 14, /**< Control bit (in PHYNO field) used to inform CLASS PE that packet comes from Class. */
2079 + RX_PHY_UTIL = 1 << 15 /**< Control bit (in PHYNO field) used to inform CLASS PE that packet comes from UtilPE. */
2080 +};
2081 +
2082 +#define RX_PHY_SW_INPUT_PORT_OFFSET 11 /**< Offset in PHYNO field where the original input port will be stored for packets coming directly from software (UtilPE or Class). */
2083 +
2084 +
2085 +/* CLASS/TMU tx header phy number */
2086 +enum TMU_TX_PHY {
2087 + TX_PHY_TMU0 = 0x0,
2088 + TX_PHY_TMU1,
2089 + TX_PHY_TMU2,
2090 + TX_PHY_TMU3
2091 +};
2092 +
2093 +
2094 +// NOTE: Any changes to the following drop counter definitions must also
2095 +// be reflected in the pfe/pfe.h file and in pfe_ctrl/pfe_sysfs.c.
2096 +
2097 +#if defined(COMCERTO_2000_CLASS)
2098 +
2099 +#define CLASS_DROP_ICC 0
2100 +#define CLASS_DROP_HOST_PKT_ERROR 1
2101 +#define CLASS_DROP_RX_ERROR 2
2102 +#define CLASS_DROP_IPSEC_OUT 3
2103 +#define CLASS_DROP_IPSEC_IN 4
2104 +#define CLASS_DROP_EXPT_IPSEC 5
2105 +#define CLASS_DROP_REASSEMBLY 6
2106 +#define CLASS_DROP_FRAGMENTER 7
2107 +#define CLASS_DROP_NATT 8
2108 +#define CLASS_DROP_SOCKET 9
2109 +#define CLASS_DROP_MULTICAST 10
2110 +#define CLASS_DROP_NATPT 11
2111 +#define CLASS_DROP_TX_DISABLE 12
2112 +
2113 +#define CLASS_NUM_DROP_COUNTERS 13
2114 +
2115 +extern U32 drop_counter[CLASS_NUM_DROP_COUNTERS];
2116 +#define DROP_PACKET(pmtd, counter) free_packet(pmtd, CLASS_DROP_##counter)
2117 +#define DROP_BUFFER(addr, counter) free_buffer(addr, CLASS_DROP_##counter)
2118 +
2119 +#elif defined(COMCERTO_2000_UTIL)
2120 +
2121 +#define UTIL_DROP_IPSEC_OUT 0
2122 +#define UTIL_DROP_IPSEC_IN 1
2123 +#define UTIL_DROP_IPSEC_RATE_LIMIT 2
2124 +#define UTIL_DROP_FRAGMENTER 3
2125 +#define UTIL_DROP_SOCKET 4
2126 +#define UTIL_DROP_TX_DISABLE 5
2127 +#define UTIL_DROP_RX_ERROR 6
2128 +#define UTIL_DROP_NO_MTD 7
2129 +
2130 +#define UTIL_NUM_DROP_COUNTERS 8
2131 +
2132 +extern U32 drop_counter[UTIL_NUM_DROP_COUNTERS];
2133 +#define DROP_PACKET(pmtd, counter) free_packet(pmtd, UTIL_DROP_##counter)
2134 +#define DROP_BUFFER(addr, counter) free_buffer(addr, UTIL_DROP_##counter)
2135 +
2136 +#endif
2137 +
2138 +
2139 +
2140 +#define DDR_BASE_ADDR 0x00020000
2141 +#define DDR_END 0x86000000 /* This includes ACP and IRAM areas */
2142 +#define IRAM_BASE_ADDR 0x83000000
2143 +
2144 +#define IS_DDR(addr, len) (((unsigned long)(addr) >= DDR_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= DDR_END))
2145 +/* action bits of act_phyno is defined as follows */
2146 +
2147 +#define ACT_SRC_MAC_REPLACE (1 << (4 + 0))
2148 +#define ACT_VLAN_ADD (1 << (4 + 1))
2149 +#define ACT_TCPCHKSUM_REPLACE (1 << (4 + 2))
2150 +#define ACT_VLAN_REPLACE (1 << (4 + 3))
2151 +#define ACT_DONT_FREE_BUFFER (1 << (4 + 5))
2152 +#define ACT_IPCHKSUM_REPLACE (1 << (4 + 6))
2153 +
2154 +typedef struct {
2155 + u8 start_data_off; /* packet data start offset, relative to start of this tx pre-header */
2156 + u8 start_buf_off; /* this tx pre-header start offset, relative to start of DDR buffer */
2157 + u16 pkt_length; /* total packet length */
2158 + u8 act_phyno; /* action / phy number */
2159 + u8 queueno; /* queueno */
2160 + u16 unused;
2161 +} class_tx_hdr_t;
2162 +
2163 +typedef struct {
2164 + u8 start_data_off; /* packet data start offset, relative to start of this tx pre-header */
2165 + u8 start_buf_off; /* this tx pre-header start offset, relative to start of DDR buffer */
2166 + u16 pkt_length; /* total packet length */
2167 + u8 act_phyno; /* action / phy number */
2168 + u8 queueno; /* queueno */
2169 + u16 src_mac_msb; /* indicates src_mac 47:32 */
2170 + u32 src_mac_lsb; /* indicates src_mac 31:0 */
2171 + u32 vlanid; /* vlanid */
2172 +} class_tx_hdr_mc_t;
2173 +
2174 +typedef struct {
2175 + u32 next_ptr; /* ptr to the start of the first DDR buffer */
2176 + u16 length; /* total packet length */
2177 + u16 phyno; /* input physical port number */
2178 + u32 status; /* gemac status bits bits[32:63]*/
2179 + u32 status2; /* gemac status bits bits[0:31] */
2180 +} class_rx_hdr_t;
2181 +/* class_rx_hdr status bits (status0 bits in hardware blocks)
2182 + * from hif_top/dma_dxr_dtx.v
2183 + * STATUS[9:0] is the encoding of bits in the LMEM buffer as seen by the QB block,
2184 + * NOT the encoding of bits as seen by the Class PEs in the DMEM rx header */
2185 +#define STATUS_PARSE_DISABLE (1 << 0)
2186 +#define STATUS_BRFETCH_DISABLE (1 << 1)
2187 +#define STATUS_RTFETCH_DISABLE (1 << 2)
2188 +#define STATUS_DIR_PROC_ID (1 << 3)
2189 +#define STATUS_CONN_ID_EN (1 << 4))
2190 +#define STATUS_PE2PROC_ID(x) (((x) & 7) << 5)
2191 +#define STATUS_LE_DATA (1 << 8)
2192 +#define STATUS_CHKSUM_EN (1 << 9)
2193 +
2194 +/* from gpi/gpi_rmlf.v */
2195 +#define STATUS_CUMULATIVE_ERR (1 << 16)
2196 +#define STATUS_LENGTH_ERR (1 << 17)
2197 +#define STATUS_CRC_ERR (1 << 18)
2198 +#define STATUS_TOO_SHORT_ERR (1 << 19)
2199 +#define STATUS_TOO_LONG_ERR (1 << 20)
2200 +#define STATUS_CODE_ERR (1 << 21)
2201 +#define STATUS_MC_HASH_MATCH (1 << 22)
2202 +#define STATUS_CUMULATIVE_ARC_HIT (1 << 23)
2203 +#define STATUS_UNICAST_HASH_MATCH (1 << 24)
2204 +#define STATUS_IP_CHECKSUM_CORRECT (1 << 25)
2205 +#define STATUS_TCP_CHECKSUM_CORRECT (1 << 26)
2206 +#define STATUS_UDP_CHECKSUM_CORRECT (1 << 27)
2207 +#define STATUS_OVERFLOW_ERR (1 << 28)
2208 +
2209 +#define UTIL_MAGIC_NUM 0xffd8ffe000104a46
2210 +#define UTIL_DDRC_WA
2211 +
2212 +/* The following structure is filled by class-pe when the packet
2213 + * has to be sent to util-pe, by filling the required information */
2214 +typedef struct {
2215 + u32 mtd_flags : 16;
2216 + u32 packet_type : 8;
2217 + u32 input_port : 4;
2218 + u32 data_offset : 4;
2219 + u32 word[MTD_PRIV];
2220 +#ifdef UTIL_DDRC_WA
2221 + u64 magic_num; // magic_number to verify the data validity in utilpe
2222 +#endif
2223 +} __attribute__((aligned(8))) util_rx_hdr_t; // Size must be a multiple of 64-bit to allow copies using EFET.
2224 +
2225 +#define UTIL_RX_IPS_IN_PKT EVENT_IPS_IN
2226 +#define UTIL_RX_IPS_OUT_PKT EVENT_IPS_OUT
2227 +#define UTIL_RX_RTP_PKT EVENT_RTP_RELAY
2228 +#define UTIL_RX_RTP_QOS_PKT EVENT_RTP_QOS
2229 +#define UTIL_RX_FRAG4_PKT EVENT_FRAG4
2230 +#define UTIL_RX_FRAG6_PKT EVENT_FRAG6
2231 +
2232 +/** Structure passed from UtilPE to Class, stored at the end of the LMEM buffer. Defined and used by software only.
2233 + *
2234 + */
2235 +
2236 +typedef struct
2237 +{
2238 + void *next;
2239 + u16 next_length;
2240 + u8 next_l3offset;
2241 + u8 next_l4offset;
2242 +} frag_info;
2243 +
2244 +typedef struct {
2245 + u8 packet_type : 6;
2246 + u8 padding : 2;
2247 +
2248 + u8 offset : 3;
2249 + u8 ddr_offset : 5;
2250 +
2251 + u16 mtd_flags;
2252 + union {
2253 + u16 half[6];
2254 + u8 byte[12];
2255 +
2256 + struct {
2257 + u16 sa_handle[2]; // SA_MAX_OP value should be used here instead of 2
2258 + u8 proto;
2259 + S8 sa_op;
2260 + u8 l2hdr_len;
2261 + u8 adj_dmem;
2262 + } ipsec;
2263 +
2264 + struct {
2265 + u16 l4offset;
2266 + u16 socket_id;
2267 + BOOL update;
2268 + u8 reserved;
2269 + u32 payload_diff;
2270 + } relay;
2271 +
2272 + struct {
2273 + u16 l3offset;
2274 + u16 l4offset;
2275 +
2276 + frag_info frag;
2277 + } ipv6;
2278 +
2279 + struct {
2280 + u16 l3offset;
2281 + } ipv4;
2282 +
2283 + struct {
2284 + u32 ddr_addr;
2285 + u16 length;
2286 + u8 port;
2287 + u8 queue;
2288 + u8 action;
2289 + } tx;
2290 + };
2291 +} lmem_trailer_t;
2292 +
2293 +/* The following values are defined for packet_type of lmem_trailer_t.
2294 + * These represent different types of packets sent from util to class
2295 + * for processing */
2296 +enum {
2297 + UTIL_TX_IPS_IN = 0,
2298 + UTIL_TX_IPV4_RTP_PKT,
2299 + UTIL_TX_IPV6_RTP_PKT,
2300 + UTIL_TX_IPV4_PKT,
2301 + UTIL_TX_IPV6_PKT,
2302 + UTIL_TX_EXPT_PKT,
2303 +#ifdef CFG_PCAP
2304 + UTIL_TX_PKT,
2305 +#endif
2306 + UTIL_TX_MAX_PKT
2307 +};
2308 +
2309 +
2310 +#define UTIL_TX_TRAILER_SIZE sizeof(lmem_trailer_t)
2311 +#define UTIL_TX_TRAILER(mtd) ((lmem_trailer_t *)ROUND_UP32((u32)(mtd)->rx_dmem_end))
2312 +
2313 +typedef struct {
2314 + u32 pkt_ptr;
2315 + u8 phyno;
2316 + u8 queueno;
2317 + u16 len;
2318 +} tmu_tx_hdr_t;
2319 +
2320 +struct hif_pkt_hdr {
2321 + u8 client_id;
2322 + u8 qNo;
2323 + u16 client_ctrl_le_lsw;
2324 + u16 client_ctrl_le_msw;
2325 +};
2326 +
2327 +
2328 +#if defined(CFG_WIFI_OFFLOAD)
2329 +#define MAX_WIFI_VAPS 3
2330 +#define PFE_WIFI_PKT_HEADROOM 96 /*PFE inserts this headroom for WiFi tx packets only in lro mode */
2331 +#else
2332 +#define MAX_WIFI_VAPS 0
2333 +#endif
2334 +
2335 +/* HIF header client id */
2336 +enum HIF_CLIENT_ID {
2337 + CLIENT_ID_GEM0 = 0,
2338 + CLIENT_ID_GEM1,
2339 + CLIENT_ID_GEM2,
2340 + CLIENT_ID_WIFI0,
2341 + CLIENT_ID_WIFI_LAST = MAX_WIFI_VAPS + CLIENT_ID_GEM2,
2342 + CLIENT_ID_PCAP,
2343 + CLIENT_ID_UNKNOWN = 0xff,
2344 +};
2345 +
2346 +
2347 +#define IS_WIFI_CLIENT_ID(_clid) (((_clid) >= CLIENT_ID_WIFI0) && ((_clid) <= CLIENT_ID_WIFI_LAST))
2348 +
2349 +/* These match LE definition */
2350 +#define HIF_CTRL_TX_TSO_NOCPY __cpu_to_le32(1 << 8)
2351 +#define HIF_CTRL_TX_IPSEC_OUT __cpu_to_le32(1 << 7)
2352 +#define HIF_CTRL_TX_WIFI_OWNMAC __cpu_to_le32(1 << 6)
2353 +#define HIF_CTRL_TX_TSO_END __cpu_to_le32(1 << 5)
2354 +#define HIF_CTRL_TX_TSO6 __cpu_to_le32(1 << 4)
2355 +#define HIF_CTRL_TX_TSO __cpu_to_le32(1 << 3)
2356 +#define HIF_CTRL_TX_CHECKSUM __cpu_to_le32(1 << 2)
2357 +#define HIF_CTRL_TX_CSUM_VALIDATE __cpu_to_le32(1 << 1)
2358 +#define HIF_CTRL_TX_WIFI_TXOFLD __cpu_to_le32(1 << 0)
2359 +
2360 +#define HIF_CTRL_RX_OFFSET_MASK __cpu_to_le32(0xf << 24)
2361 +#define HIF_CTRL_RX_PE_ID_MASK __cpu_to_le32(0xf << 16)
2362 +#define HIF_CTRL_RX_IPSEC_IN __cpu_to_le32(1 << 4)
2363 +#define HIF_CTRL_RX_WIFI_EXPT __cpu_to_le32(1 << 3)
2364 +#define HIF_CTRL_RX_CHECKSUMMED __cpu_to_le32(1 << 2)
2365 +#define HIF_CTRL_RX_CONTINUED __cpu_to_le32(1 << 1)
2366 +#define HIF_CTRL_RX_WIFI_HEADROOM __cpu_to_le32(1 << 0)
2367 +
2368 +#ifdef CFG_LRO
2369 +struct hif_lro_hdr {
2370 + u16 data_offset;
2371 + u16 mss;
2372 +};
2373 +#endif
2374 +
2375 +struct hif_ipsec_hdr {
2376 + u16 sa_handle[2];
2377 +};
2378 +
2379 +#define MAX_TSO_BUF_DESCS 5
2380 +struct hif_tso_buf_desc {
2381 + u32 addr;
2382 + u32 ctrl;
2383 +#define TSO_CTRL_LAST_BUFFER (1 << 31)
2384 +};
2385 +
2386 +struct hif_tso_hdr {
2387 + u16 ip_off;
2388 + u16 ip_id;
2389 + u16 ip_len;
2390 + u16 tcp_off;
2391 + u32 tcp_seq;
2392 +};
2393 +
2394 +struct hif_tso_hdr_nocpy {
2395 + u16 ip_off;
2396 + u16 ip_id;
2397 + u16 ip_len;
2398 + u16 tcp_off;
2399 + u32 tcp_seq;
2400 + struct hif_tso_buf_desc bdesc[MAX_TSO_BUF_DESCS];
2401 +};
2402 +
2403 +struct hif_pcap_hdr {
2404 + u8 ifindex;
2405 + u8 unused;
2406 + u16 seqno;
2407 + u32 timestamp;
2408 +};
2409 +
2410 +
2411 +struct pe_sync_mailbox
2412 +{
2413 + u32 stop;
2414 + u32 stopped;
2415 +};
2416 +
2417 +struct pe_msg_mailbox
2418 +{
2419 + u32 dst;
2420 + u32 src;
2421 + u32 len;
2422 + u32 request;
2423 +};
2424 +
2425 +
2426 +/** Basic busy loop delay function
2427 +*
2428 +* @param cycles Number of cycles to delay (actual cpu cycles should be close to 3 x cycles)
2429 +*
2430 +*/
2431 +static inline void delay(u32 cycles)
2432 +{
2433 + volatile int i;
2434 +
2435 + for (i = 0; i < cycles; i++);
2436 +}
2437 +
2438 +
2439 +/** Read PE id
2440 +*
2441 +* @return PE id (0 - 5 for CLASS-PE's, 6 - 9 for TMU-PE's, 10 for UTIL-PE)
2442 +*
2443 +*/
2444 +static inline u32 esi_get_mpid(void)
2445 +{
2446 + u32 mpid;
2447 +
2448 + asm ("rcsr %0, Configuration, MPID" : "=d" (mpid));
2449 +
2450 + return mpid;
2451 +}
2452 +
2453 +
2454 +#define esi_get_csr(bank, csr) \
2455 +({ \
2456 + u32 res; \
2457 + asm ("rcsr %0, " #bank ", " #csr : "=d" (res)); \
2458 + res; \
2459 +})
2460 +
2461 +#define esi_get_isa0() esi_get_csr(Configuration, ISA0)
2462 +#define esi_get_isa1() esi_get_csr(Configuration, ISA1)
2463 +#define esi_get_isa2() esi_get_csr(Configuration, ISA2)
2464 +#define esi_get_isa3() esi_get_csr(Configuration, ISA3)
2465 +#define esi_get_epc() esi_get_csr(Thread, EPC)
2466 +#define esi_get_ecas() esi_get_csr(Thread, ECAS)
2467 +#define esi_get_eid() esi_get_csr(Thread, EID)
2468 +#define esi_get_ed() esi_get_csr(Thread, ED)
2469 +
2470 +static inline void esi_pe_stop(U32 state)
2471 +{
2472 + PESTATUS_SETSTATE(state);
2473 + while (1)
2474 + {
2475 + asm("stop");
2476 + }
2477 +}
2478 +
2479 +
2480 +/** Same 64bit alignment memory copy using efet.
2481 +* Either the source or destination address must be in DMEM, the other address can be in LMEM or DDR.
2482 +* Both the source and destination must have the same 64bit alignment, length should be more than four bytes
2483 +* or dst/src must be 32bit aligned. Otherwise use efet_memcpy_any()
2484 +* Uses efet synchronous interface to copy the data.
2485 +*
2486 +* @param dst Destination address to write to (must have the same 64bit alignment as src)
2487 +* @param src Source address to read from (must have the same 64bit alignment as dst)
2488 +* @param len Number of bytes to copy
2489 +*
2490 +*/
2491 +void efet_memcpy(void *dst, void *src, unsigned int len);
2492 +
2493 +/** Same 64bit alignment memory copy using efet.
2494 +* Either the source or destination address must be in DMEM, the other address can be in LMEM or DDR.
2495 +* Both the source and destination must have the same 64bit alignment, there is no restriction on length.
2496 +* For UTIL-PE revA0, this function will still fail to handle small/unaligned writes.
2497 +* Uses efet synchronous interface to copy the data.
2498 +*
2499 +* @param dst Destination address to write to (must have the same 64bit alignment as src)
2500 +* @param src Source address to read from (must have the same 64bit alignment as dst)
2501 +* @param len Number of bytes to copy
2502 +*
2503 +*/
2504 +void efet_memcpy_any(void *dst, void *src, unsigned int len);
2505 +
2506 +/** Same 64bit alignment memory copy using efet.
2507 +* Either the source or destination address must be in DMEM, the other address can be in LMEM or DDR.
2508 +* Both the source and destination must have the same 64bit alignment, length should be more than four bytes
2509 +* or dst/src must be 32bit aligned.
2510 +* Uses efet asynchronous interface to copy the data.
2511 +*
2512 +* @param dst Destination address to write to (must have the same 64bit alignment as src)
2513 +* @param src Source address to read from (must have the same 64bit alignment as dst)
2514 +* @param len Number of bytes to copy
2515 +*
2516 +*/
2517 +void efet_memcpy_nowait(void *dst, void *src, unsigned int len);
2518 +
2519 +/** Unaligned memory copy using efet.
2520 +* Either the source or destination address must be in DMEM, the other address can be in LMEM or DDR.
2521 +* There is not restriction on source and destination, nor on length.
2522 +*
2523 +* @param dst Destination address to write to
2524 +* @param src Source address to read from
2525 +* @param len Number of bytes to copy
2526 +* @param dmem_buf temp dmem buffer to use, must be 64bit aligned
2527 +* @param dmem_len length of dmem buffer, must be 64bit aligned and at least 16 bytes
2528 +*
2529 +*/
2530 +void efet_memcpy_unaligned(void *dst, void *src, unsigned int len, void *dmem_buf, unsigned int dmem_len);
2531 +
2532 +/** Aligned memory copy of 4 bytes to register address.
2533 +* Register address must be 32 bit aligned.
2534 +*
2535 +* @param val value to be copied.
2536 +* @param reg_addr Register address (must be 16bit aligned)
2537 +*
2538 +*/
2539 +void __efet_writel(u32 val, void *addr);
2540 +
2541 +#ifdef REVA_WA
2542 +#define efet_writel(val, addr) __efet_writel((u32)(val), (void *) (addr))
2543 +#else
2544 +#define efet_writel(val, addr) writel((u32)(val), (void *) (addr))
2545 +#endif
2546 +
2547 +
2548 +/** 32bit aligned memory copy.
2549 +* Source and destination addresses must be 32bit aligned, there is no restriction on the length.
2550 +*
2551 +* @param dst Destination address (must be 32bit aligned)
2552 +* @param src Source address (must be 32bit aligned)
2553 +* @param len Number of bytes to copy
2554 +*
2555 +*/
2556 +void memcpy_aligned32(void *dst, void *src, unsigned int len);
2557 +
2558 +/** Aligned memory copy.
2559 +* Source and destination addresses must have the same alignment
2560 +* relative to 32bit boundaries (but otherwsie may have any alignment),
2561 +* there is no restriction on the length.
2562 +*
2563 +* @param dst Destination address
2564 +* @param src Source address (must have same 32bit alignment as dst)
2565 +* @param len Number of bytes to copy
2566 +*
2567 +*/
2568 +void memcpy_aligned(void *dst, void *src, unsigned int len);
2569 +
2570 +/** Unaligned memory copy.
2571 +* Implements unaligned memory copy. We first align the destination
2572 +* to a 32bit boundary (using byte copies) then the src, and finally use a loop
2573 +* of read, shift, write
2574 +*
2575 +* @param dst Destination address
2576 +* @param src Source address (must have same 32bit alignment as dst)
2577 +* @param len Number of bytes to copy
2578 +*
2579 +*/
2580 +void memcpy_unaligned(void *dst, void *src, unsigned int len);
2581 +
2582 +/** Generic memory set.
2583 +* Implements a generic memory set. Not very optimal (uses byte writes for the entire range)
2584 +*
2585 +*
2586 +* @param dst Destination address
2587 +* @param val Value to set memory to
2588 +* @param len Number of bytes to set
2589 +*
2590 +*/
2591 +void memset(void *dst, u8 val, unsigned int len);
2592 +
2593 +/** Generic memory copy.
2594 +* Implements generic memory copy. If source and destination have the same
2595 +* alignment memcpy_aligned() is used, otherwise memcpy_unaligned()
2596 +*
2597 +* @param dst Destination address
2598 +* @param src Source address
2599 +* @param len Number of bytes to copy
2600 +*
2601 +*/
2602 +void memcpy(void *dst, void *src, unsigned int len);
2603 +
2604 +/** Generic memorymove.
2605 +* Implements generic memorymove, where copies across overlapping
2606 +* memory regions is supported.
2607 +* Uses the dmem_buf passed as a parameter as a temporary buffer.
2608 +* Includes two copies, forces one of the copies to be definitely aligned.
2609 +* The "dmem_len" being passed should be atleast 3 bytes greater than "len"
2610 +* The 3 bytes here are shift bytes used to ensure one aligned copy.
2611 +*
2612 +* @param dst Destination address
2613 +* @param src Source address
2614 +* @param len Number of bytes to copy
2615 +* @param dmem_buf temp dmem buffer to use, must be 32bit aligned
2616 +* @param dmem_len length of dmem buffer, must be 32bit aligned and at least 3 bytes greater
2617 +* than @param len
2618 +*
2619 +*/
2620 +
2621 +void *memorymove(void * dst, void * src, unsigned int len, void *dmem_buf, unsigned int dmem_len);
2622 +
2623 +/** Aligned memory copy in DDR memory.
2624 + * Implements aligned memory copy between two DDR buffers using efet_memcpy64 and DMEM
2625 + * Both the source and destination must have the same 64bit alignment, there is no restriction on length.
2626 + * If start or end are not 64bit aligned, data in destination buffer before start/after end will be corrupted.
2627 + *
2628 + * @param dst DDR Destination address
2629 + * @param src DDR Source address
2630 + * @param len Number of bytes to copy
2631 + * @param dmem_buf temp dmem buffer to use, must be 64bit aligned
2632 + * @param dmem_len length of dmem buffer, must be 64bit aligned and at least 16 bytes
2633 + */
2634 +void memcpy_ddr_to_ddr(void *dst, void *src, unsigned int len, void *dmem_buf, unsigned int dmem_len);
2635 +
2636 +/** Unaligned memory copy in DDR memory.
2637 + * Implements generic memory copy between two DDR buffers using efet_memcpy and DMEM
2638 + * There is no restriction on the source, destination and length alignments.
2639 + *
2640 + * @param dst DDR Destination address
2641 + * @param src DDR Source address
2642 + * @param len Number of bytes to copy
2643 + * @param dmem_buf temp dmem buffer to use, must be 64bit aligned
2644 + * @param dmem_len length of dmem buffer, must be 64bit aligned and at least 16 bytes
2645 + */
2646 +void memcpy_ddr_to_ddr_unaligned(void *dst, void *src, unsigned int len, void *dmem_buf, unsigned int dmem_len);
2647 +
2648 +#endif /* _PE_H_ */
2649 --- /dev/null
2650 +++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
2651 @@ -0,0 +1,444 @@
2652 +/*
2653 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
2654 + *
2655 + * This program is free software; you can redistribute it and/or
2656 + * modify it under the terms of the GNU General Public License
2657 + * as published by the Free Software Foundation; either version 2
2658 + * of the License, or (at your option) any later version.
2659 + *
2660 + * This program is distributed in the hope that it will be useful,
2661 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2662 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2663 + * GNU General Public License for more details.
2664 + *
2665 + * You should have received a copy of the GNU General Public License
2666 + * along with this program; if not, write to the Free Software
2667 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
2668 + *
2669 +*/
2670 +#ifndef _PFE_H_
2671 +#define _PFE_H_
2672 +
2673 +#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
2674 +#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20)) /* Only valid for mem access register interface */
2675 +#define CLASS_DMEM_SIZE 0x00002000
2676 +#define CLASS_IMEM_SIZE 0x00008000
2677 +
2678 +#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
2679 +#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20)) /* Only valid for mem access register interface */
2680 +#define TMU_DMEM_SIZE 0x00000800
2681 +#define TMU_IMEM_SIZE 0x00002000
2682 +
2683 +#define UTIL_DMEM_BASE_ADDR 0x00000000
2684 +#define UTIL_DMEM_SIZE 0x00002000
2685 +
2686 +#define PE_LMEM_BASE_ADDR 0xc3010000
2687 +#define PE_LMEM_SIZE 0x8000
2688 +#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
2689 +
2690 +#define DMEM_BASE_ADDR 0x00000000
2691 +#define DMEM_SIZE 0x2000 /**< TMU has less... */
2692 +#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
2693 +
2694 +#define PMEM_BASE_ADDR 0x00010000
2695 +#define PMEM_SIZE 0x8000 /**< TMU has less... */
2696 +#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
2697 +
2698 +
2699 +/* These check memory ranges from PE point of view/memory map */
2700 +#define IS_DMEM(addr, len) (((unsigned long)(addr) >= DMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= DMEM_END))
2701 +#define IS_PMEM(addr, len) (((unsigned long)(addr) >= PMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= PMEM_END))
2702 +#define IS_PE_LMEM(addr, len) (((unsigned long)(addr) >= PE_LMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= PE_LMEM_END))
2703 +
2704 +#define IS_PFE_LMEM(addr, len) (((unsigned long)(addr) >= CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && (((unsigned long)(addr) + (len)) <= CBUS_VIRT_TO_PFE(LMEM_END)))
2705 +#define __IS_PHYS_DDR(addr, len) (((unsigned long)(addr) >= DDR_PHYS_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= DDR_PHYS_END))
2706 +#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
2707 +
2708 +/* If using a run-time virtual address for the cbus base address use this code */
2709 +extern void *cbus_base_addr;
2710 +extern void *ddr_base_addr;
2711 +extern unsigned long ddr_phys_base_addr;
2712 +extern unsigned int ddr_size;
2713 +
2714 +#if defined(COMCERTO_2000_CONTROL)
2715 +#include <linux/version.h>
2716 +#if defined (CONFIG_PLATFORM_C2000)
2717 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
2718 +/*This is copied from arch/arm/include/asm/system_info.h */
2719 +extern unsigned int system_rev;
2720 +#endif
2721 +#endif
2722 +#endif
2723 +
2724 +#define CBUS_BASE_ADDR cbus_base_addr
2725 +#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
2726 +#define DDR_BASE_ADDR ddr_base_addr
2727 +#define DDR_SIZE ddr_size
2728 +
2729 +#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
2730 +
2731 +#if defined(CONFIG_PLATFORM_C2000)
2732 +#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /**< CBUS physical base address as seen by PE's. */
2733 +#define DDR_PHYS_TO_PFE(p) (p)
2734 +#define DDR_PFE_TO_PHYS(p) (p)
2735 +#define CBUS_PHYS_TO_PFE(p) (p)
2736 +#else
2737 +#define LS1012A_PFE_RESET_WA /*PFE doesn't have global reset and re-init should takecare few things to make PFE functional after reset */
2738 +#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /**< CBUS physical base address as seen by PE's. */
2739 +#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000 /**< CBUS physical base address as seen by PE's. */
2740 +#define DDR_PHYS_TO_PFE(p) (((unsigned long int) (p)) & 0x7FFFFFFF)
2741 +#define DDR_PFE_TO_PHYS(p) (((unsigned long int) (p)) | 0x80000000)
2742 +#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE) /*Translates to PFE address map */
2743 +#endif
2744 +
2745 +#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
2746 +#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
2747 +#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
2748 +
2749 +#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + PFE_CBUS_PHYS_BASE_ADDR)
2750 +#define CBUS_PFE_TO_VIRT(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
2751 +
2752 +/* The below part of the code is used in QOS control driver from host */
2753 +#define TMU_APB_BASE_ADDR 0xc1000000 /** TMU base address seen by pe's */
2754 +
2755 +#define SHAPER0_BASE_ADDR (TMU_APB_BASE_ADDR + 0x020000)
2756 +#define SHAPER1_BASE_ADDR (TMU_APB_BASE_ADDR + 0x030000)
2757 +#define SHAPER2_BASE_ADDR (TMU_APB_BASE_ADDR + 0x040000)
2758 +#define SHAPER3_BASE_ADDR (TMU_APB_BASE_ADDR + 0x050000)
2759 +#define SHAPER4_BASE_ADDR (TMU_APB_BASE_ADDR + 0x060000)
2760 +#define SHAPER5_BASE_ADDR (TMU_APB_BASE_ADDR + 0x070000)
2761 +#define SHAPER6_BASE_ADDR (TMU_APB_BASE_ADDR + 0x080000)
2762 +#define SHAPER7_BASE_ADDR (TMU_APB_BASE_ADDR + 0x090000)
2763 +#define SHAPER8_BASE_ADDR (TMU_APB_BASE_ADDR + 0x0a0000)
2764 +#define SHAPER9_BASE_ADDR (TMU_APB_BASE_ADDR + 0x0b0000)
2765 +
2766 +#define SCHED0_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1c0000)
2767 +#define SCHED1_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1d0000)
2768 +#define SCHED2_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1e0000)
2769 +#define SCHED3_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1f0000)
2770 +#define SCHED4_BASE_ADDR (TMU_APB_BASE_ADDR + 0x200000)
2771 +#define SCHED5_BASE_ADDR (TMU_APB_BASE_ADDR + 0x210000)
2772 +#define SCHED6_BASE_ADDR (TMU_APB_BASE_ADDR + 0x220000)
2773 +#define SCHED7_BASE_ADDR (TMU_APB_BASE_ADDR + 0x230000)
2774 +
2775 +#define PHY_QUEUE_BASE_ADDR (TMU_APB_BASE_ADDR + 0x260000)
2776 +#define QUEUE_RESULT0 (PHY_QUEUE_BASE_ADDR + 0x48) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY3), [6:0] winner input queue number */
2777 +#define QUEUE_RESULT1 (PHY_QUEUE_BASE_ADDR + 0x4c) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY4), [6:0] winner input queue number */
2778 +#define QUEUE_RESULT2 (PHY_QUEUE_BASE_ADDR + 0x50) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY5), [6:0] winner input queue number */
2779 +
2780 +#define QUEUE_RESULT0_REGOFFSET (QUEUE_RESULT0 - QUEUE_RESULT0)
2781 +#define QUEUE_RESULT1_REGOFFSET (QUEUE_RESULT1 - QUEUE_RESULT0)
2782 +#define QUEUE_RESULT2_REGOFFSET (QUEUE_RESULT2 - QUEUE_RESULT0)
2783 +
2784 +
2785 +#include "cbus.h"
2786 +
2787 +enum {
2788 + CLASS0_ID = 0,
2789 + CLASS1_ID,
2790 + CLASS2_ID,
2791 + CLASS3_ID,
2792 +#if !defined(CONFIG_PLATFORM_PCI)
2793 + CLASS4_ID,
2794 + CLASS5_ID,
2795 +#endif
2796 +#if !defined(CONFIG_TMU_DUMMY)
2797 + TMU0_ID,
2798 + TMU1_ID,
2799 + TMU2_ID,
2800 + TMU3_ID,
2801 +#else
2802 + TMU0_ID,
2803 +#endif
2804 +#if !defined(CONFIG_UTIL_DISABLED)
2805 + UTIL_ID,
2806 +#endif
2807 + MAX_PE
2808 +};
2809 +
2810 +enum {
2811 + CLASS_TYPE = 0,
2812 + TMU_TYPE,
2813 + UTIL_TYPE
2814 +};
2815 +
2816 +#if !defined(CONFIG_PLATFORM_PCI)
2817 +#define CLASS_MASK ((1 << CLASS0_ID) | (1 << CLASS1_ID) | (1 << CLASS2_ID) | (1 << CLASS3_ID) | (1 << CLASS4_ID) | (1 << CLASS5_ID))
2818 +#define CLASS_MAX_ID CLASS5_ID
2819 +#else
2820 +#define CLASS_MASK ((1 << CLASS0_ID) | (1 << CLASS1_ID) | (1 << CLASS2_ID) | (1 << CLASS3_ID))
2821 +#define CLASS_MAX_ID CLASS3_ID
2822 +#endif
2823 +
2824 +#if !defined(CONFIG_TMU_DUMMY)
2825 +#if defined(CONFIG_PLATFORM_LS1012A)
2826 +#define TMU_MASK ((1 << TMU0_ID) | (1 << TMU1_ID) | (1 << TMU3_ID))
2827 +#else
2828 +#define TMU_MASK ((1 << TMU0_ID) | (1 << TMU1_ID) | (1 << TMU2_ID) | (1 << TMU3_ID))
2829 +#endif
2830 +#define TMU_MAX_ID TMU3_ID
2831 +#else
2832 +#define TMU_MASK (1 << TMU0_ID)
2833 +#define TMU_MAX_ID TMU0_ID
2834 +#endif
2835 +
2836 +#if !defined(CONFIG_UTIL_DISABLED)
2837 +#define UTIL_MASK (1 << UTIL_ID)
2838 +#endif
2839 +
2840 +typedef struct tPE_STATUS
2841 +{
2842 + u32 cpu_state;
2843 + u32 activity_counter;
2844 + u32 rx;
2845 + union {
2846 + u32 tx;
2847 + u32 tmu_qstatus;
2848 + };
2849 + u32 drop;
2850 +#if defined(CFG_PE_DEBUG)
2851 + u32 debug_indicator;
2852 + u32 debug[16];
2853 +#endif
2854 +} __attribute__((aligned(16))) PE_STATUS;
2855 +
2856 +
2857 +struct pe_sync_mailbox
2858 +{
2859 + u32 stop;
2860 + u32 stopped;
2861 +};
2862 +
2863 +struct pe_msg_mailbox
2864 +{
2865 + u32 dst;
2866 + u32 src;
2867 + u32 len;
2868 + u32 request;
2869 +};
2870 +
2871 +// Drop counter definitions
2872 +
2873 +#define CLASS_NUM_DROP_COUNTERS 13
2874 +#define UTIL_NUM_DROP_COUNTERS 8
2875 +
2876 +
2877 +/** PE information.
2878 + * Structure containing PE's specific information. It is used to create
2879 + * generic C functions common to all PE's.
2880 + * Before using the library functions this structure needs to be initialized with the different registers virtual addresses
2881 + * (according to the ARM MMU mmaping). The default initialization supports a virtual == physical mapping.
2882 + *
2883 + */
2884 +struct pe_info
2885 +{
2886 + u32 dmem_base_addr; /**< PE's dmem base address */
2887 + u32 pmem_base_addr; /**< PE's pmem base address */
2888 + u32 pmem_size; /**< PE's pmem size */
2889 +
2890 + void *mem_access_wdata; /**< PE's _MEM_ACCESS_WDATA register address */
2891 + void *mem_access_addr; /**< PE's _MEM_ACCESS_ADDR register address */
2892 + void *mem_access_rdata; /**< PE's _MEM_ACCESS_RDATA register address */
2893 +};
2894 +
2895 +
2896 +void pe_lmem_read(u32 *dst, u32 len, u32 offset);
2897 +void pe_lmem_write(u32 *src, u32 len, u32 offset);
2898 +
2899 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
2900 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
2901 +
2902 +u32 pe_pmem_read(int id, u32 addr, u8 size);
2903 +
2904 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
2905 +u32 pe_dmem_read(int id, u32 addr, u8 size);
2906 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
2907 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
2908 +void class_bus_write(u32 val, u32 addr, u8 size);
2909 +u32 class_bus_read(u32 addr, u8 size);
2910 +
2911 +
2912 +#define class_bus_readl(addr) class_bus_read(addr, 4)
2913 +#define class_bus_readw(addr) class_bus_read(addr, 2)
2914 +#define class_bus_readb(addr) class_bus_read(addr, 1)
2915 +
2916 +#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
2917 +#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
2918 +#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
2919 +
2920 +#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
2921 +#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
2922 +#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
2923 +
2924 +#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
2925 +#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
2926 +#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
2927 +
2928 +//int pe_load_elf_section(int id, const void *data, Elf32_Shdr *shdr);
2929 +int pe_load_elf_section(int id, const void *data, Elf32_Shdr *shdr, struct device *dev);
2930 +
2931 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base, unsigned int ddr_size);
2932 +void bmu_init(void *base, BMU_CFG *cfg);
2933 +void bmu_reset(void *base);
2934 +void bmu_enable(void *base);
2935 +void bmu_disable(void *base);
2936 +void bmu_set_config(void *base, BMU_CFG *cfg);
2937 +
2938 +/* An enumerated type for loopback values. This can be one of three values, no
2939 + * loopback -normal operation, local loopback with internal loopback module of
2940 + * MAC or PHY loopback which is through the external PHY.
2941 + */
2942 +#ifndef __MAC_LOOP_ENUM__
2943 +#define __MAC_LOOP_ENUM__
2944 +typedef enum {LB_NONE, LB_EXT, LB_LOCAL} MAC_LOOP;
2945 +#endif
2946 +
2947 +
2948 +void gemac_init(void *base, void *config);
2949 +void gemac_disable_rx_checksum_offload(void *base);
2950 +void gemac_enable_rx_checksum_offload(void *base);
2951 +void gemac_set_mdc_div(void *base, int mdc_div);
2952 +void gemac_set_speed(void *base, MAC_SPEED gem_speed);
2953 +void gemac_set_duplex(void *base, int duplex);
2954 +void gemac_set_mode(void *base, int mode);
2955 +void gemac_enable(void *base);
2956 +void gemac_tx_disable(void *base);
2957 +void gemac_disable(void *base);
2958 +void gemac_reset(void *base);
2959 +void gemac_set_address(void *base, SPEC_ADDR *addr);
2960 +SPEC_ADDR gemac_get_address(void *base);
2961 +void gemac_set_loop( void *base, MAC_LOOP gem_loop );
2962 +void gemac_set_laddr1(void *base, MAC_ADDR *address);
2963 +void gemac_set_laddr2(void *base, MAC_ADDR *address);
2964 +void gemac_set_laddr3(void *base, MAC_ADDR *address);
2965 +void gemac_set_laddr4(void *base, MAC_ADDR *address);
2966 +void gemac_set_laddrN(void *base, MAC_ADDR *address, unsigned int entry_index);
2967 +void gemac_clear_laddr1(void *base);
2968 +void gemac_clear_laddr2(void *base);
2969 +void gemac_clear_laddr3(void *base);
2970 +void gemac_clear_laddr4(void *base);
2971 +void gemac_clear_laddrN(void *base, unsigned int entry_index);
2972 +MAC_ADDR gemac_get_hash( void *base );
2973 +void gemac_set_hash( void *base, MAC_ADDR *hash );
2974 +MAC_ADDR gem_get_laddr1(void *base);
2975 +MAC_ADDR gem_get_laddr2(void *base);
2976 +MAC_ADDR gem_get_laddr3(void *base);
2977 +MAC_ADDR gem_get_laddr4(void *base);
2978 +MAC_ADDR gem_get_laddrN(void *base, unsigned int entry_index);
2979 +void gemac_set_config(void *base, GEMAC_CFG *cfg);
2980 +void gemac_allow_broadcast(void *base);
2981 +void gemac_no_broadcast(void *base);
2982 +void gemac_enable_unicast(void *base);
2983 +void gemac_disable_unicast(void *base);
2984 +void gemac_enable_multicast(void *base);
2985 +void gemac_disable_multicast(void *base);
2986 +void gemac_enable_fcs_rx(void *base);
2987 +void gemac_disable_fcs_rx(void *base);
2988 +void gemac_enable_1536_rx(void *base);
2989 +void gemac_disable_1536_rx(void *base);
2990 +void gemac_enable_rx_jmb(void *base);
2991 +void gemac_disable_rx_jmb(void *base);
2992 +void gemac_enable_stacked_vlan(void *base);
2993 +void gemac_disable_stacked_vlan(void *base);
2994 +void gemac_enable_pause_rx(void *base);
2995 +void gemac_disable_pause_rx(void *base);
2996 +void gemac_enable_copy_all(void *base);
2997 +void gemac_disable_copy_all(void *base);
2998 +void gemac_set_bus_width(void *base, int width);
2999 +void gemac_set_wol(void *base, u32 wol_conf);
3000 +
3001 +void gpi_init(void *base, GPI_CFG *cfg);
3002 +void gpi_reset(void *base);
3003 +void gpi_enable(void *base);
3004 +void gpi_disable(void *base);
3005 +void gpi_set_config(void *base, GPI_CFG *cfg);
3006 +
3007 +void class_init(CLASS_CFG *cfg);
3008 +void class_reset(void);
3009 +void class_enable(void);
3010 +void class_disable(void);
3011 +void class_set_config(CLASS_CFG *cfg);
3012 +
3013 +void tmu_reset(void);
3014 +void tmu_init(TMU_CFG *cfg);
3015 +void tmu_enable(u32 pe_mask);
3016 +void tmu_disable(u32 pe_mask);
3017 +u32 tmu_qstatus(u32 if_id);
3018 +u32 tmu_pkts_processed(u32 if_id);
3019 +
3020 +void util_init(UTIL_CFG *cfg);
3021 +void util_reset(void);
3022 +void util_enable(void);
3023 +void util_disable(void);
3024 +
3025 +void hif_nocpy_init(void);
3026 +void hif_nocpy_tx_enable(void);
3027 +void hif_nocpy_tx_disable(void);
3028 +void hif_nocpy_rx_enable(void);
3029 +void hif_nocpy_rx_disable(void);
3030 +
3031 +void hif_init(void);
3032 +void hif_tx_enable(void);
3033 +void hif_tx_disable(void);
3034 +void hif_rx_enable(void);
3035 +void hif_rx_disable(void);
3036 +
3037 +
3038 +/** Get Chip Revision level
3039 +*
3040 +*/
3041 +
3042 +static inline unsigned int CHIP_REVISION(void)
3043 +{
3044 +#if defined (CONFIG_PLATFORM_C2000)
3045 +#if 1
3046 + return system_rev;
3047 + //return 0;
3048 +#else
3049 + return (readl(COMCERTO_GPIO_DEVICE_ID_REG) >> 24) & 0xf;
3050 +#endif
3051 +
3052 +#else
3053 + /*For LS1012A return always 1 */
3054 + return 1;
3055 +#endif
3056 +}
3057 +
3058 +/** Start HIF rx DMA
3059 +*
3060 +*/
3061 +static inline void hif_rx_dma_start(void)
3062 +{
3063 + /*TODO not sure poll_cntrl_en is required or not */
3064 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
3065 +}
3066 +
3067 +/** Start HIF tx DMA
3068 +*
3069 +*/
3070 +static inline void hif_tx_dma_start(void)
3071 +{
3072 + /*TODO not sure poll_cntrl_en is required or not */
3073 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
3074 +}
3075 +
3076 +/** Start HIF_NOCPY rx DMA
3077 +*
3078 +*/
3079 +static inline void hif_nocpy_rx_dma_start(void)
3080 +{
3081 + /*TODO not sure poll_cntrl_en is required or not */
3082 + writel((HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB), HIF_NOCPY_RX_CTRL);
3083 +}
3084 +
3085 +/** Start HIF_NOCPY tx DMA
3086 +*
3087 +*/
3088 +static inline void hif_nocpy_tx_dma_start(void)
3089 +{
3090 + /*TODO not sure poll_cntrl_en is required or not */
3091 + writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_NOCPY_TX_CTRL);
3092 +}
3093 +
3094 +#endif /* _PFE_H_ */
3095 +
3096 --- /dev/null
3097 +++ b/drivers/staging/fsl_ppfe/include/pfe/tmu.h
3098 @@ -0,0 +1,68 @@
3099 +/*
3100 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
3101 + *
3102 + * This program is free software; you can redistribute it and/or
3103 + * modify it under the terms of the GNU General Public License
3104 + * as published by the Free Software Foundation; either version 2
3105 + * of the License, or (at your option) any later version.
3106 + *
3107 + * This program is distributed in the hope that it will be useful,
3108 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3109 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3110 + * GNU General Public License for more details.
3111 + *
3112 + * You should have received a copy of the GNU General Public License
3113 + * along with this program; if not, write to the Free Software
3114 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
3115 + *
3116 +*/
3117 +#ifndef _TMU_H_
3118 +#define _TMU_H_
3119 +
3120 +#define TMU_DMEM_BASE_ADDR 0x00000000
3121 +#define TMU_PMEM_BASE_ADDR 0x00010000
3122 +
3123 +#define CBUS_BASE_ADDR 0xc0000000
3124 +#define TMU_APB_BASE_ADDR 0xc1000000
3125 +
3126 +#if defined (COMCERTO_2000_TMU) || defined (COMCERTO_2000_CONTROL)
3127 +
3128 +#include "cbus.h"
3129 +
3130 +#define GPT_BASE_ADDR (TMU_APB_BASE_ADDR + 0x00000)
3131 +#define UART_BASE_ADDR (TMU_APB_BASE_ADDR + 0x10000)
3132 +
3133 +#define SHAPER0_BASE_ADDR (TMU_APB_BASE_ADDR + 0x020000)
3134 +#define SHAPER1_BASE_ADDR (TMU_APB_BASE_ADDR + 0x030000)
3135 +#define SHAPER2_BASE_ADDR (TMU_APB_BASE_ADDR + 0x040000)
3136 +#define SHAPER3_BASE_ADDR (TMU_APB_BASE_ADDR + 0x050000)
3137 +#define SHAPER4_BASE_ADDR (TMU_APB_BASE_ADDR + 0x060000)
3138 +#define SHAPER5_BASE_ADDR (TMU_APB_BASE_ADDR + 0x070000)
3139 +#define SHAPER6_BASE_ADDR (TMU_APB_BASE_ADDR + 0x080000)
3140 +#define SHAPER7_BASE_ADDR (TMU_APB_BASE_ADDR + 0x090000)
3141 +#define SHAPER8_BASE_ADDR (TMU_APB_BASE_ADDR + 0x0a0000)
3142 +#define SHAPER9_BASE_ADDR (TMU_APB_BASE_ADDR + 0x0b0000)
3143 +
3144 +#define SCHED0_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1c0000)
3145 +#define SCHED1_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1d0000)
3146 +#define SCHED2_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1e0000)
3147 +#define SCHED3_BASE_ADDR (TMU_APB_BASE_ADDR + 0x1f0000)
3148 +#define SCHED4_BASE_ADDR (TMU_APB_BASE_ADDR + 0x200000)
3149 +#define SCHED5_BASE_ADDR (TMU_APB_BASE_ADDR + 0x210000)
3150 +#define SCHED6_BASE_ADDR (TMU_APB_BASE_ADDR + 0x220000)
3151 +#define SCHED7_BASE_ADDR (TMU_APB_BASE_ADDR + 0x230000)
3152 +
3153 +#define SHAPER_STATUS (TMU_APB_BASE_ADDR + 0x270000) /**< [9:0] bitmask of shapers that have positive credit */
3154 +
3155 +#include "gpt.h"
3156 +#include "uart.h"
3157 +#include "tmu/shaper.h"
3158 +#include "tmu/sched.h"
3159 +
3160 +#endif
3161 +
3162 +#define PHY_QUEUE_BASE_ADDR (TMU_APB_BASE_ADDR + 0x260000)
3163 +
3164 +#include "tmu/phy_queue.h"
3165 +
3166 +#endif /* _TMU_H_ */
3167 --- /dev/null
3168 +++ b/drivers/staging/fsl_ppfe/include/pfe/tmu/phy_queue.h
3169 @@ -0,0 +1,56 @@
3170 +/*
3171 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
3172 + *
3173 + * This program is free software; you can redistribute it and/or
3174 + * modify it under the terms of the GNU General Public License
3175 + * as published by the Free Software Foundation; either version 2
3176 + * of the License, or (at your option) any later version.
3177 + *
3178 + * This program is distributed in the hope that it will be useful,
3179 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3180 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3181 + * GNU General Public License for more details.
3182 + *
3183 + * You should have received a copy of the GNU General Public License
3184 + * along with this program; if not, write to the Free Software
3185 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
3186 + *
3187 +*/
3188 +#ifndef _PHY_QUEUE_H_
3189 +#define _PHY_QUEUE_H_
3190 +
3191 +#define PHY_QUEUE_SHAPER_STATUS (PHY_QUEUE_BASE_ADDR + 0x00) /**< [28:19] same as SHAPER_STATUS, [18:3] same as QUEUE_STATUS, [2:0] must be zero before a new packet may be dequeued */
3192 +#define QUEUE_STATUS (PHY_QUEUE_BASE_ADDR + 0x04) /**< [15:0] bit mask of input queues with pending packets */
3193 +
3194 +#define QUEUE0_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x08)
3195 +#define QUEUE1_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x0c)
3196 +#define QUEUE2_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x10)
3197 +#define QUEUE3_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x14)
3198 +#define QUEUE4_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x18)
3199 +#define QUEUE5_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x1c)
3200 +#define QUEUE6_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x20)
3201 +#define QUEUE7_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x24)
3202 +#define QUEUE8_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x28)
3203 +#define QUEUE9_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x2c)
3204 +#define QUEUE10_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x30)
3205 +#define QUEUE11_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x34)
3206 +#define QUEUE12_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x38)
3207 +#define QUEUE13_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x3c)
3208 +#define QUEUE14_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x40)
3209 +#define QUEUE15_PKT_LEN (PHY_QUEUE_BASE_ADDR + 0x44)
3210 +#define QUEUE_RESULT0 (PHY_QUEUE_BASE_ADDR + 0x48) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY3), [6:0] winner input queue number */
3211 +#define QUEUE_RESULT1 (PHY_QUEUE_BASE_ADDR + 0x4c) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY4), [6:0] winner input queue number */
3212 +#define QUEUE_RESULT2 (PHY_QUEUE_BASE_ADDR + 0x50) /**< [7] set to one to indicate output PHY (TMU0->PHY0, TMU1->PHY1, TMU2->PHY2, TMU3->PHY5), [6:0] winner input queue number */
3213 +#define TMU_PE_GP_REG (PHY_QUEUE_BASE_ADDR + 0x54)
3214 +#define QUEUE_GBL_PKTLEN (PHY_QUEUE_BASE_ADDR + 0x5c)
3215 +#define QUEUE_GBL_PKTLEN_MASK (PHY_QUEUE_BASE_ADDR + 0x60)
3216 +
3217 +#define QUEUE_RESULT0_REGOFFSET (QUEUE_RESULT0 - QUEUE_RESULT0)
3218 +#define QUEUE_RESULT1_REGOFFSET (QUEUE_RESULT1 - QUEUE_RESULT0)
3219 +#define QUEUE_RESULT2_REGOFFSET (QUEUE_RESULT2 - QUEUE_RESULT0)
3220 +
3221 +#define TEQ_HTD (1 << 22)
3222 +#define TEQ_HWRED (1 << 21)
3223 +
3224 +
3225 +#endif /* _PHY_QUEUE_H_ */
3226 --- /dev/null
3227 +++ b/drivers/staging/fsl_ppfe/include/pfe/tmu/sched.h
3228 @@ -0,0 +1,72 @@
3229 +/*
3230 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
3231 + *
3232 + * This program is free software; you can redistribute it and/or
3233 + * modify it under the terms of the GNU General Public License
3234 + * as published by the Free Software Foundation; either version 2
3235 + * of the License, or (at your option) any later version.
3236 + *
3237 + * This program is distributed in the hope that it will be useful,
3238 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3239 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3240 + * GNU General Public License for more details.
3241 + *
3242 + * You should have received a copy of the GNU General Public License
3243 + * along with this program; if not, write to the Free Software
3244 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
3245 + *
3246 +*/
3247 +#ifndef _SCHED_H_
3248 +#define _SCHED_H_
3249 +
3250 +/* Offsets from SCHEDx_BASE_ADDR */
3251 +#define SCHED_CTRL 0x00
3252 +#define SCHED_SLOT_TIME 0x04
3253 +#define SCHED_RES 0x08
3254 +#define SCHED_QUEUE_ALLOC0 0x0c
3255 +#define SCHED_QUEUE_ALLOC1 0x10
3256 +#define SCHED_BW 0x14
3257 +#define SCHED_GUR_DEF_CTR 0x18
3258 +#define SCHED_AVL_CTR 0x1c
3259 +#define SCHED_QU0_WGHT 0x20
3260 +#define SCHED_QU1_WGHT 0x24
3261 +#define SCHED_QU2_WGHT 0x28
3262 +#define SCHED_QU3_WGHT 0x2c
3263 +#define SCHED_QU4_WGHT 0x30
3264 +#define SCHED_QU5_WGHT 0x34
3265 +#define SCHED_QU6_WGHT 0x38
3266 +#define SCHED_QU7_WGHT 0x3c
3267 +#define SCHED_QUE0_DEFICIT_CNT 0x40
3268 +#define SCHED_QUE1_DEFICIT_CNT 0x44
3269 +#define SCHED_QUE2_DEFICIT_CNT 0x48
3270 +#define SCHED_QUE3_DEFICIT_CNT 0x4c
3271 +#define SCHED_QUE4_DEFICIT_CNT 0x50
3272 +#define SCHED_QUE5_DEFICIT_CNT 0x54
3273 +#define SCHED_QUE6_DEFICIT_CNT 0x58
3274 +#define SCHED_QUE7_DEFICIT_CNT 0x5c
3275 +#define SCHED_PKT_LEN 0x60
3276 +
3277 +#define SCHED_CTRL_ALGOTYPE(x) (((x) & 0xf) << 0)
3278 +#define SCHED_CTRL_CALQUOTA(x) (((x) & 0x1) << 4)
3279 +#define SCHED_CTRL_ACTIVE_Q(x) (((x) & 0xff) << 8)
3280 +#define SCHED_CTRL_SHARE_BW(x) (((x) & 0xff) << 16)
3281 +#define SCHED_CTRL_BARROW_BW(x) (((x) & 0xff) << 24)
3282 +
3283 +#define SCHED_QUEUE_ALLOC(x, b) (((x) & 0x1f) << (b))
3284 +
3285 +#define SCHED_QUEUE_ALLOC0_QUEUEA(x) (((x) & 0x1f) << 0)
3286 +#define SCHED_QUEUE_ALLOC0_QUEUEB(x) (((x) & 0x1f) << 8)
3287 +#define SCHED_QUEUE_ALLOC0_QUEUEC(x) (((x) & 0x1f) << 16)
3288 +#define SCHED_QUEUE_ALLOC0_QUEUED(x) (((x) & 0x1f) << 24)
3289 +
3290 +#define SCHED_QUEUE_ALLOC0_RES0(x) (((x) & 0x7) << 5)
3291 +#define SCHED_QUEUE_ALLOC0_RES1(x) (((x) & 0x7) << 13)
3292 +#define SCHED_QUEUE_ALLOC0_RES2(x) (((x) & 0x7) << 21)
3293 +#define SCHED_QUEUE_ALLOC0_RES3(x) (((x) & 0x7) << 29)
3294 +
3295 +#define SCHED_QUEUE_ALLOC1_QUEUEA(x) (((x) & 0x1f) << 0)
3296 +#define SCHED_QUEUE_ALLOC1_QUEUEB(x) (((x) & 0x1f) << 8)
3297 +#define SCHED_QUEUE_ALLOC1_QUEUEC(x) (((x) & 0x1f) << 16)
3298 +#define SCHED_QUEUE_ALLOC1_QUEUED(x) (((x) & 0x1f) << 24)
3299 +
3300 +#endif /* _SCHED_H_ */
3301 --- /dev/null
3302 +++ b/drivers/staging/fsl_ppfe/include/pfe/tmu/shaper.h
3303 @@ -0,0 +1,37 @@
3304 +/*
3305 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
3306 + *
3307 + * This program is free software; you can redistribute it and/or
3308 + * modify it under the terms of the GNU General Public License
3309 + * as published by the Free Software Foundation; either version 2
3310 + * of the License, or (at your option) any later version.
3311 + *
3312 + * This program is distributed in the hope that it will be useful,
3313 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3314 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3315 + * GNU General Public License for more details.
3316 + *
3317 + * You should have received a copy of the GNU General Public License
3318 + * along with this program; if not, write to the Free Software
3319 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
3320 + *
3321 +*/
3322 +#ifndef _SHAPER_H_
3323 +#define _SHAPER_H_
3324 +
3325 +/* Offsets from SHAPPERx_BASE_ADDR */
3326 +#define SHAPER_CTRL 0x00
3327 +#define SHAPER_WEIGHT 0x04
3328 +#define SHAPER_PKT_LEN 0x08
3329 +
3330 +#define SHAPER_CTRL_ENABLE(x) (((x) & 0x1) << 0)
3331 +#define SHAPER_CTRL_QNO(x) (((x) & 0x3f) << 1)
3332 +#define SHAPER_CTRL_CLKDIV(x) (((x) & 0xffff) << 16)
3333 +
3334 +#define SHAPER_WEIGHT_FRACWT(x) (((x) & 0xff) << 0)
3335 +#define SHAPER_WEIGHT_INTWT(x) (((x) & 0x3) << 8)
3336 +#define SHAPER_WEIGHT_MAXCREDIT(x) (((x) & 0x3fffff) << 10)
3337 +
3338 +#define PORT_SHAPER_MASK (1 << 0)
3339 +
3340 +#endif /* _SHAPER_H_ */
3341 --- /dev/null
3342 +++ b/drivers/staging/fsl_ppfe/include/pfe/uart.h
3343 @@ -0,0 +1,31 @@
3344 +/*
3345 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
3346 + *
3347 + * This program is free software; you can redistribute it and/or
3348 + * modify it under the terms of the GNU General Public License
3349 + * as published by the Free Software Foundation; either version 2
3350 + * of the License, or (at your option) any later version.
3351 + *
3352 + * This program is distributed in the hope that it will be useful,
3353 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3354 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3355 + * GNU General Public License for more details.
3356 + *
3357 + * You should have received a copy of the GNU General Public License
3358 + * along with this program; if not, write to the Free Software
3359 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
3360 + *
3361 +*/
3362 +#ifndef _UART_H_
3363 +#define _UART_H_
3364 +
3365 +#define UART_THR (UART_BASE_ADDR + 0x00)
3366 +#define UART_IER (UART_BASE_ADDR + 0x04)
3367 +#define UART_IIR (UART_BASE_ADDR + 0x08)
3368 +#define UART_LCR (UART_BASE_ADDR + 0x0c)
3369 +#define UART_MCR (UART_BASE_ADDR + 0x10)
3370 +#define UART_LSR (UART_BASE_ADDR + 0x14)
3371 +#define UART_MDR (UART_BASE_ADDR + 0x18)
3372 +#define UART_SCRATCH (UART_BASE_ADDR + 0x1c)
3373 +
3374 +#endif /* _UART_H_ */
3375 --- /dev/null
3376 +++ b/drivers/staging/fsl_ppfe/include/pfe/util.h
3377 @@ -0,0 +1,49 @@
3378 +/*
3379 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
3380 + *
3381 + * This program is free software; you can redistribute it and/or
3382 + * modify it under the terms of the GNU General Public License
3383 + * as published by the Free Software Foundation; either version 2
3384 + * of the License, or (at your option) any later version.
3385 + *
3386 + * This program is distributed in the hope that it will be useful,
3387 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3388 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3389 + * GNU General Public License for more details.
3390 + *
3391 + * You should have received a copy of the GNU General Public License
3392 + * along with this program; if not, write to the Free Software
3393 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
3394 + *
3395 +*/
3396 +#ifndef _UTIL_H_
3397 +#define _UTIL_H_
3398 +
3399 +#define UTIL_DMEM_BASE_ADDR 0x00000000
3400 +#define UTIL_DMEM_SIZE 0x00002000
3401 +#define UTIL_DMEM_END (UTIL_DMEM_BASE_ADDR + UTIL_DMEM_SIZE)
3402 +
3403 +#define IS_DMEM(addr, len) (((unsigned long)(addr) >= UTIL_DMEM_BASE_ADDR) && (((unsigned long)(addr) + (len)) <= UTIL_DMEM_END))
3404 +
3405 +#define CBUS_BASE_ADDR 0xc0000000
3406 +#define UTIL_APB_BASE_ADDR 0xc1000000
3407 +
3408 +#include "cbus.h"
3409 +
3410 +#define GPT_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x00000)
3411 +#define UART_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x10000)
3412 +#define EAPE_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x20000)
3413 +#define INQ_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x30000)
3414 +#define EFET1_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x40000)
3415 +#define EFET2_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x50000)
3416 +#define EFET3_BASE_ADDR (UTIL_APB_BASE_ADDR + 0x60000)
3417 +
3418 +
3419 +#include "gpt.h"
3420 +#include "uart.h"
3421 +#include "util/eape.h"
3422 +#include "util/inq.h"
3423 +#include "util/efet.h"
3424 +
3425 +
3426 +#endif /* _UTIL_H_ */
3427 --- /dev/null
3428 +++ b/drivers/staging/fsl_ppfe/include/pfe/util/eape.h
3429 @@ -0,0 +1,57 @@
3430 +/*
3431 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
3432 + *
3433 + * This program is free software; you can redistribute it and/or
3434 + * modify it under the terms of the GNU General Public License
3435 + * as published by the Free Software Foundation; either version 2
3436 + * of the License, or (at your option) any later version.
3437 + *
3438 + * This program is distributed in the hope that it will be useful,
3439 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3440 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3441 + * GNU General Public License for more details.
3442 + *
3443 + * You should have received a copy of the GNU General Public License
3444 + * along with this program; if not, write to the Free Software
3445 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
3446 + *
3447 +*/
3448 +#ifndef _EAPE_H_
3449 +#define _EAPE_H_
3450 +
3451 +#define EAPE_STATUS (EAPE_BASE_ADDR + 0x0)
3452 +#define EAPE_INT_ENABLE (EAPE_BASE_ADDR + 0x4)
3453 +#define EAPE_INT_SRC (EAPE_BASE_ADDR + 0x8)
3454 +#define EAPE_HOST_INT_ENABLE (EAPE_BASE_ADDR + 0xc)
3455 +
3456 +/** The following bits represents to enable interrupts from host and to host
3457 +* from / to utilpe */
3458 +
3459 +#define IRQ_EN_EFET_TO_UTIL 0x1
3460 +#define IRQ_EN_QB_TO_UTIL 0x2
3461 +#define IRQ_EN_INQ_TO_UTIL 0x4
3462 +#define IRQ_EN_EAPE_TO_UTIL 0x8
3463 +#define IRQ_EN_GPT_TMR_TO_UTIL 0x10
3464 +#define IRQ_EN_UART_TO_UTIL 0x20
3465 +#define IRQ_EN_SYSLP_TO_UTIL 0x40
3466 +#define IRQ_EN_UPEGP_TO_UTIL 0x80
3467 +
3468 +/** Out interrupts */
3469 +
3470 +#define IRQ_EN_EFET_OUT 0x100
3471 +#define IRQ_EN_QB_OUT 0x200
3472 +#define IRQ_EN_INQ_OUT 0x400
3473 +#define IRQ_EN_EAPE_OUT 0x800
3474 +#define IRQ_EN_GPT_TMR_OUT 0x1000
3475 +#define IRQ_EN_UART_OUT 0x2000
3476 +#define IRQ_EN_SYSLP_OUT 0x4000
3477 +#define IRQ_EN_UPEGP_OUT 0x8000
3478 +
3479 +/** The following bits are enabled in the status register
3480 + * which are mapped to IPSEC status register bits */
3481 +#define EAPE_IN_STAT_AVAIL 0x1
3482 +#define EAPE_OUT_STAT_AVAIL 0x2
3483 +#define EAPE_IN_CMD_AVAIL 0x4
3484 +#define EAPE_OUT_CMD_AVAIL 0x8
3485 +
3486 +#endif /* _EAPE_H_ */
3487 --- /dev/null
3488 +++ b/drivers/staging/fsl_ppfe/include/pfe/util/efet.h
3489 @@ -0,0 +1,119 @@
3490 +/*
3491 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
3492 + *
3493 + * This program is free software; you can redistribute it and/or
3494 + * modify it under the terms of the GNU General Public License
3495 + * as published by the Free Software Foundation; either version 2
3496 + * of the License, or (at your option) any later version.
3497 + *
3498 + * This program is distributed in the hope that it will be useful,
3499 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3500 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3501 + * GNU General Public License for more details.
3502 + *
3503 + * You should have received a copy of the GNU General Public License
3504 + * along with this program; if not, write to the Free Software
3505 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
3506 + *
3507 +*/
3508 +#ifndef _UTIL_EFET_H_
3509 +#define _UTIL_EFET_H_
3510 +
3511 +#define EFET_ENTRY_ADDR 0x00
3512 +#define EFET_ENTRY_SIZE 0x04
3513 +#define EFET_ENTRY_DMEM_ADDR 0x08
3514 +#define EFET_ENTRY_STATUS 0x0c
3515 +#define EFET_ENTRY_ENDIAN 0x10
3516 +
3517 +#define CBUS2DMEM 0
3518 +#define DMEM2CBUS 1
3519 +
3520 +#define EFET2BUS_LE (1 << 0)
3521 +
3522 +#define EFET1 0
3523 +#define EFET2 1
3524 +#define EFET3 2
3525 +#define MAX_UTIL_EFET_LEN 128
3526 +
3527 +extern const unsigned long util_efet_baseaddr[3];
3528 +extern u32 util_efet_status;
3529 +
3530 +/* The barrier call is an empirical work-around for an unknown bug: for some unknown reason, it solves
3531 + * a UtilPE crash observed with LRO and packet steering. Other solutions also worked (e.g. barrier,
3532 + * nop calls in other positions). However, no common pattern could be extracted from those solutions
3533 + * to narrow down the source of the crash.
3534 + */
3535 +
3536 +#define __UTIL_EFET(i, cbus_addr, dmem_addr,len,dir) do { \
3537 + __writel((len & 0x3FF) | (dir << 16), util_efet_baseaddr[i] + EFET_ENTRY_SIZE); \
3538 + __writel(dmem_addr, util_efet_baseaddr[i] + EFET_ENTRY_DMEM_ADDR);\
3539 + __writel(cbus_addr, util_efet_baseaddr[i] + EFET_ENTRY_ADDR);\
3540 + nop();\
3541 + }while(0)
3542 +
3543 +#define UTIL_EFET(i, cbus_addr, dmem_addr,len,dir) do { \
3544 + __UTIL_EFET(i, cbus_addr, dmem_addr, len, dir); \
3545 + util_efet_status |= (1 << i); \
3546 + } while(0)
3547 +
3548 +
3549 +/** Waits for the util efet to finish a transaction, blocking the caller
3550 +* (without updating the status).
3551 +* Can be called at any time.
3552 +*
3553 +* @param i Efet index
3554 +*
3555 +*
3556 +*/
3557 +static inline void __util_efet_wait(int i)
3558 +{
3559 + while (!(readl(util_efet_baseaddr[i] + EFET_ENTRY_STATUS) & 0x1)) ;
3560 +}
3561 +
3562 +/** Waits for the util efet to finish a transaction, blocking the caller.
3563 +* Can be called at any time.
3564 +*
3565 +* @param i Efet index
3566 +*
3567 +*/
3568 +static inline void util_efet_wait(int i)
3569 +{
3570 + __util_efet_wait(i);
3571 +
3572 + util_efet_status &= ~(1 << i);
3573 +}
3574 +
3575 +/** Asynchronous interface to util efet read/write functions.
3576 +* It will wait for the efet to finish previous transaction, but does not wait for the current transaction to finish.
3577 +*
3578 +* @param i Efet index
3579 +* @param cbus_addr Cbus address (must be 64bits aligned)
3580 +* @param dmem_addr DMEM address (must be 64bits aligned)
3581 +* @param len Number of bytes to copy (must be 64bits aligned size)
3582 +* @param dir Direction of the transaction (0 - cbus to dmem, 1 - dmem to cbus)
3583 +*
3584 +*/
3585 +static inline void util_efet_async(int i, u32 cbus_addr, u32 dmem_addr, u32 len, u8 dir)
3586 +{
3587 + if (util_efet_status & (1 << i))
3588 + util_efet_wait(i);
3589 +
3590 + UTIL_EFET(i, cbus_addr, dmem_addr, len, dir);
3591 +}
3592 +
3593 +
3594 +static inline void util_efet_async0( u32 cbus_addr, u32 dmem_addr, u32 len, u8 dir)
3595 +{
3596 + util_efet_async(0, cbus_addr, dmem_addr, len,dir);
3597 +}
3598 +
3599 +/* EFET 2 is aways used for SYNC operations */
3600 +static inline void util_efet_sync2(u32 cbus_addr, u32 dmem_addr, u32 len, u8 dir)
3601 +{
3602 + __UTIL_EFET(2, cbus_addr, dmem_addr, len,dir);
3603 + __util_efet_wait(2);
3604 +}
3605 +
3606 +void util_efet_sync0(u32 cbus_addr, u32 dmem_addr, u32 len, u8 dir);
3607 +#endif /* _UTIL_EFET_H_ */
3608 +
3609 --- /dev/null
3610 +++ b/drivers/staging/fsl_ppfe/include/pfe/util/inq.h
3611 @@ -0,0 +1,28 @@
3612 +/*
3613 + * Copyright (c) 2011, 2014 Freescale Semiconductor, Inc.
3614 + *
3615 + * This program is free software; you can redistribute it and/or
3616 + * modify it under the terms of the GNU General Public License
3617 + * as published by the Free Software Foundation; either version 2
3618 + * of the License, or (at your option) any later version.
3619 + *
3620 + * This program is distributed in the hope that it will be useful,
3621 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3622 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3623 + * GNU General Public License for more details.
3624 + *
3625 + * You should have received a copy of the GNU General Public License
3626 + * along with this program; if not, write to the Free Software
3627 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
3628 + *
3629 +*/
3630 +#ifndef _INQ_H_
3631 +#define _INQ_H_
3632 +
3633 +#define INQ_HOST_GP (INQ_BASE_ADDR + 0x00) /* FIXME what are these for ? */
3634 +#define INQ_UPE_GP (INQ_BASE_ADDR + 0x04) /* FIXME what are these for ? */
3635 +
3636 +#define INQ_QB_PKTPTR (INQ_BASE_ADDR + 0x08)
3637 +#define INQ_FIFO_CNT (INQ_BASE_ADDR + 0x0c)
3638 +
3639 +#endif /* _INQ_H_ */
3640 --- /dev/null
3641 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
3642 @@ -0,0 +1,363 @@
3643 +/*
3644 + *
3645 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
3646 + *
3647 + * This program is free software; you can redistribute it and/or modify
3648 + * it under the terms of the GNU General Public License as published by
3649 + * the Free Software Foundation; either version 2 of the License, or
3650 + * (at your option) any later version.
3651 + *
3652 + * This program is distributed in the hope that it will be useful,
3653 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3654 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3655 + * GNU General Public License for more details.
3656 + *
3657 + * You should have received a copy of the GNU General Public License
3658 + * along with this program; if not, write to the Free Software
3659 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
3660 + */
3661 +
3662 +#ifdef __KERNEL__
3663 +#include <linux/kernel.h>
3664 +#include <linux/sched.h>
3665 +#include <linux/module.h>
3666 +#include <linux/list.h>
3667 +#include <linux/kthread.h>
3668 +#else
3669 +#include "platform.h"
3670 +#endif
3671 +
3672 +#include "pfe_mod.h"
3673 +#include "pfe_ctrl.h"
3674 +
3675 +#include "pfe_ctrl_hal.h"
3676 +
3677 +static struct pe_sync_mailbox CLASS_DMEM_SH2(sync_mailbox);
3678 +static struct pe_sync_mailbox TMU_DMEM_SH2(sync_mailbox);
3679 +
3680 +static struct pe_msg_mailbox CLASS_DMEM_SH2(msg_mailbox);
3681 +static struct pe_msg_mailbox TMU_DMEM_SH2(msg_mailbox);
3682 +
3683 +#if !defined(CONFIG_PLATFORM_LS1012A)
3684 +static u32 CLASS_DMEM_SH2(resume);
3685 +static u32 TMU_DMEM_SH2(resume);
3686 +#endif
3687 +
3688 +#if !defined(CONFIG_UTIL_DISABLED)
3689 +static struct pe_sync_mailbox UTIL_DMEM_SH2(sync_mailbox);
3690 +static struct pe_msg_mailbox UTIL_DMEM_SH2(msg_mailbox);
3691 +static u32 UTIL_DMEM_SH2(resume);
3692 +#endif
3693 +
3694 +static int pfe_ctrl_timer(void *data);
3695 +
3696 +static int initialized = 0;
3697 +
3698 +#define TIMEOUT_MS 1000
3699 +
3700 +int relax(unsigned long end)
3701 +{
3702 +#ifdef __KERNEL__
3703 + if (time_after(jiffies, end)) {
3704 + if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000)) {
3705 + return -1;
3706 + }
3707 +
3708 + if (need_resched())
3709 + schedule();
3710 + }
3711 +#else
3712 + udelay(1);
3713 +#endif
3714 +
3715 + return 0;
3716 +}
3717 +
3718 +#if !defined(CONFIG_PLATFORM_LS1012A)
3719 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
3720 +{
3721 + int id;
3722 +
3723 + kthread_stop(ctrl->timer_thread);
3724 +
3725 + mutex_lock(&ctrl->mutex);
3726 +
3727 + initialized = 0;
3728 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
3729 + pe_dmem_write(id, cpu_to_be32(0x1), (unsigned long)virt_to_class_dmem(&class_resume), 4);
3730 +
3731 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
3732 +#if defined(CONFIG_PLATFORM_LS1012A)
3733 + if(id == TMU2_ID) continue;
3734 +#endif
3735 + pe_dmem_write(id, cpu_to_be32(0x1), (unsigned long)virt_to_class_dmem(&tmu_resume), 4);
3736 + }
3737 +
3738 +#if !defined(CONFIG_UTIL_DISABLED)
3739 + pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), (unsigned long)virt_to_class_dmem(&util_resume), 4);
3740 +#endif
3741 +
3742 + pe_sync_stop(&pfe->ctrl, 0xFF);
3743 +
3744 + mutex_unlock(&ctrl->mutex);
3745 +}
3746 +
3747 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
3748 +{
3749 + mutex_lock(&ctrl->mutex);
3750 + initialized = 1;
3751 + pe_start(&pfe->ctrl, 0xFF);
3752 + mutex_unlock(&ctrl->mutex);
3753 +
3754 + ctrl->timer_thread = kthread_create(pfe_ctrl_timer, ctrl, "pfe_ctrl_timer");
3755 +
3756 + wake_up_process(ctrl->timer_thread);
3757 +}
3758 +#endif
3759 +
3760 +/** PE sync stop.
3761 +* Stops packet processing for a list of PE's (specified using a bitmask).
3762 +* The caller must hold ctrl->mutex.
3763 +*
3764 +* @param ctrl Control context
3765 +* @param pe_mask Mask of PE id's to stop
3766 +*
3767 +*/
3768 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
3769 +{
3770 + struct pe_sync_mailbox *mbox;
3771 + int pe_stopped = 0;
3772 + unsigned long end = jiffies + 2;
3773 + int i;
3774 +
3775 +#if defined(CONFIG_PLATFORM_LS1012A)
3776 + //TODO Util should be removed after IPSec is ported
3777 + pe_mask &= 0x2FF; //Exclude Util + TMU2
3778 +#endif
3779 + for (i = 0; i < MAX_PE; i++)
3780 + if (pe_mask & (1 << i)) {
3781 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
3782 +
3783 + pe_dmem_write(i, cpu_to_be32(0x1), (unsigned long)&mbox->stop, 4);
3784 + }
3785 +
3786 + while (pe_stopped != pe_mask) {
3787 + for (i = 0; i < MAX_PE; i++)
3788 + if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
3789 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
3790 +
3791 + if (pe_dmem_read(i, (unsigned long)&mbox->stopped, 4) & cpu_to_be32(0x1))
3792 + pe_stopped |= (1 << i);
3793 + }
3794 +
3795 + if (relax(end) < 0)
3796 + goto err;
3797 + }
3798 +
3799 + return 0;
3800 +
3801 +err:
3802 + printk(KERN_ERR "%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
3803 +
3804 + for (i = 0; i < MAX_PE; i++)
3805 + if (pe_mask & (1 << i)) {
3806 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
3807 +
3808 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned long)&mbox->stop, 4);
3809 + }
3810 +
3811 + return -EIO;
3812 +}
3813 +
3814 +/** PE start.
3815 +* Starts packet processing for a list of PE's (specified using a bitmask).
3816 +* The caller must hold ctrl->mutex.
3817 +*
3818 +* @param ctrl Control context
3819 +* @param pe_mask Mask of PE id's to start
3820 +*
3821 +*/
3822 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
3823 +{
3824 + struct pe_sync_mailbox *mbox;
3825 + int i;
3826 +
3827 +#if defined(CONFIG_PLATFORM_LS1012A)
3828 + //TODO Util should be removed after IPSec is ported
3829 + pe_mask &= 0x2FF; //Exclude Util + TMU2
3830 +#endif
3831 + for (i = 0; i < MAX_PE; i++)
3832 + if (pe_mask & (1 << i)) {
3833 +
3834 + mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
3835 +
3836 + pe_dmem_write(i, cpu_to_be32(0x0), (unsigned long)&mbox->stop, 4);
3837 + }
3838 +}
3839 +
3840 +
3841 +/** Sends a control request to a given PE (to copy data to/from internal memory from/to DDR).
3842 +* The caller must hold ctrl->mutex.
3843 +*
3844 +* @param ctrl Control context
3845 +* @param id PE id
3846 +* @param dst Physical destination address of data
3847 +* @param src Physical source address of data
3848 +* @param len Data length
3849 +*
3850 +*/
3851 +int pe_request(struct pfe_ctrl *ctrl, int id, unsigned short cmd_type, unsigned long dst, unsigned long src, int len)
3852 +{
3853 + struct pe_msg_mailbox mbox = {
3854 + .dst = cpu_to_be32(dst),
3855 + .src = cpu_to_be32(src),
3856 + .len = cpu_to_be32(len),
3857 + .request = cpu_to_be32((cmd_type << 16) | 0x1),
3858 + };
3859 + struct pe_msg_mailbox *pmbox = (void *)ctrl->msg_mailbox_baseaddr[id];
3860 + unsigned long end = jiffies + 2;
3861 + u32 rc;
3862 +
3863 + /* This works because .request is written last */
3864 + pe_dmem_memcpy_to32(id, (unsigned long)pmbox, &mbox, sizeof(mbox));
3865 +
3866 + while ((rc = pe_dmem_read(id, (unsigned long)&pmbox->request, 4)) & cpu_to_be32(0xffff)) {
3867 + if (relax(end) < 0)
3868 + goto err;
3869 + }
3870 +
3871 + rc = be32_to_cpu(rc);
3872 +
3873 + return rc >> 16;
3874 +
3875 +err:
3876 + printk(KERN_ERR "%s: timeout, %x\n", __func__, be32_to_cpu(rc));
3877 + pe_dmem_write(id, cpu_to_be32(0), (unsigned long)&pmbox->request, 4);
3878 + return -EIO;
3879 +}
3880 +
3881 +
3882 +/** Control code timer thread.
3883 +*
3884 +* A kernel thread is used so that the timer code can be run under the control path mutex.
3885 +* The thread wakes up regularly and checks if any timer in the timer list as expired.
3886 +* The timers are re-started automatically.
3887 +* The code tries to keep the number of times a timer runs per unit time constant on average,
3888 +* if the thread scheduling is delayed, it's possible for a particular timer to be scheduled in
3889 +* quick succession to make up for the lost time.
3890 +*
3891 +* @param data Pointer to the control context structure
3892 +*
3893 +* @return 0 on sucess, a negative value on error
3894 +*
3895 +*/
3896 +static int pfe_ctrl_timer(void *data)
3897 +{
3898 + struct pfe_ctrl *ctrl = data;
3899 + TIMER_ENTRY *timer, *next;
3900 +
3901 + printk(KERN_INFO "%s\n", __func__);
3902 +
3903 + while (1)
3904 + {
3905 + schedule_timeout_uninterruptible(ctrl->timer_period);
3906 +
3907 + mutex_lock(&ctrl->mutex);
3908 +
3909 + list_for_each_entry_safe(timer, next, &ctrl->timer_list, list)
3910 + {
3911 + if (time_after(jiffies, timer->timeout))
3912 + {
3913 + timer->timeout += timer->period;
3914 +
3915 + timer->handler();
3916 + }
3917 + }
3918 +
3919 + mutex_unlock(&ctrl->mutex);
3920 +
3921 + if (kthread_should_stop())
3922 + break;
3923 + }
3924 +
3925 + printk(KERN_INFO "%s exiting\n", __func__);
3926 +
3927 + return 0;
3928 +}
3929 +
3930 +
3931 +int pfe_ctrl_init(struct pfe *pfe)
3932 +{
3933 + struct pfe_ctrl *ctrl = &pfe->ctrl;
3934 + int id;
3935 + int rc;
3936 +
3937 + printk(KERN_INFO "%s\n", __func__);
3938 +
3939 + mutex_init(&ctrl->mutex);
3940 + spin_lock_init(&ctrl->lock);
3941 +
3942 + ctrl->timer_period = HZ / TIMER_TICKS_PER_SEC;
3943 +
3944 + INIT_LIST_HEAD(&ctrl->timer_list);
3945 +
3946 + /*INIT_WORK(&ctrl->work, comcerto_fpp_workqueue);*/
3947 +
3948 + INIT_LIST_HEAD(&ctrl->msg_list);
3949 +
3950 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
3951 + ctrl->sync_mailbox_baseaddr[id] = virt_to_class_dmem(&class_sync_mailbox);
3952 + ctrl->msg_mailbox_baseaddr[id] = virt_to_class_dmem(&class_msg_mailbox);
3953 + }
3954 +
3955 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
3956 +#if defined(CONFIG_PLATFORM_LS1012A)
3957 + if(id == TMU2_ID) continue;
3958 +#endif
3959 + ctrl->sync_mailbox_baseaddr[id] = virt_to_tmu_dmem(&tmu_sync_mailbox);
3960 + ctrl->msg_mailbox_baseaddr[id] = virt_to_tmu_dmem(&tmu_msg_mailbox);
3961 + }
3962 +
3963 +#if !defined(CONFIG_UTIL_DISABLED)
3964 + ctrl->sync_mailbox_baseaddr[UTIL_ID] = virt_to_util_dmem(&util_sync_mailbox);
3965 + ctrl->msg_mailbox_baseaddr[UTIL_ID] = virt_to_util_dmem(&util_msg_mailbox);
3966 +#endif
3967 +
3968 + ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
3969 + ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr + ROUTE_TABLE_BASEADDR;
3970 + ctrl->ipsec_lmem_phys_baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR + IPSEC_LMEM_BASEADDR);
3971 + ctrl->ipsec_lmem_baseaddr = (LMEM_BASE_ADDR + IPSEC_LMEM_BASEADDR);
3972 +
3973 + ctrl->timer_thread = kthread_create(pfe_ctrl_timer, ctrl, "pfe_ctrl_timer");
3974 + if (IS_ERR(ctrl->timer_thread))
3975 + {
3976 + printk (KERN_ERR "%s: kthread_create() failed\n", __func__);
3977 + rc = PTR_ERR(ctrl->timer_thread);
3978 + goto err0;
3979 + }
3980 +
3981 + ctrl->dev = pfe->dev;
3982 +
3983 + wake_up_process(ctrl->timer_thread);
3984 +
3985 + printk(KERN_INFO "%s finished\n", __func__);
3986 +
3987 + initialized = 1;
3988 +
3989 + return 0;
3990 +
3991 +err0:
3992 + return rc;
3993 +}
3994 +
3995 +
3996 +void pfe_ctrl_exit(struct pfe *pfe)
3997 +{
3998 + struct pfe_ctrl *ctrl = &pfe->ctrl;
3999 +
4000 + printk(KERN_INFO "%s\n", __func__);
4001 +
4002 + initialized = 0;
4003 +
4004 + kthread_stop(ctrl->timer_thread);
4005 +}
4006 --- /dev/null
4007 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
4008 @@ -0,0 +1,111 @@
4009 +/*
4010 + *
4011 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
4012 + *
4013 + * This program is free software; you can redistribute it and/or modify
4014 + * it under the terms of the GNU General Public License as published by
4015 + * the Free Software Foundation; either version 2 of the License, or
4016 + * (at your option) any later version.
4017 + *
4018 + * This program is distributed in the hope that it will be useful,
4019 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4020 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4021 + * GNU General Public License for more details.
4022 + *
4023 + * You should have received a copy of the GNU General Public License
4024 + * along with this program; if not, write to the Free Software
4025 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
4026 + */
4027 +
4028 +#ifndef _PFE_CTRL_H_
4029 +#define _PFE_CTRL_H_
4030 +
4031 +#include <linux/dmapool.h>
4032 +
4033 +#include "pfe_mod.h"
4034 +#include "pfe/pfe.h"
4035 +
4036 +#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
4037 +#define DMA_BUF_SIZE_256 0x100 /* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
4038 +#define DMA_BUF_SIZE_512 0x200 /* 512bytes dma allocated buffers used by rtp relay feature */
4039 +#define DMA_BUF_MIN_ALIGNMENT 8
4040 +#define DMA_BUF_BOUNDARY (4 * 1024) /* bursts can not cross 4k boundary */
4041 +
4042 +#define CMD_TX_ENABLE 0x0501
4043 +#define CMD_TX_DISABLE 0x0502
4044 +
4045 +#define CMD_RX_LRO 0x0011
4046 +#define CMD_PKTCAP_ENABLE 0x0d01
4047 +#define CMD_QM_EXPT_RATE 0x020c
4048 +
4049 +#define EXPT_TYPE_PCAP 0x3
4050 +
4051 +struct pfe_ctrl {
4052 + struct mutex mutex;
4053 + spinlock_t lock;
4054 +
4055 + void *dma_pool;
4056 + void *dma_pool_512;
4057 + void *dma_pool_128;
4058 +
4059 + struct device *dev;
4060 +
4061 + void *hash_array_baseaddr; /** Virtual base address of the conntrack hash array */
4062 + unsigned long hash_array_phys_baseaddr; /** Physical base address of the conntrack hash array */
4063 +
4064 + struct task_struct *timer_thread;
4065 + struct list_head timer_list;
4066 + unsigned long timer_period;
4067 +
4068 + int (*event_cb)(u16, u16, u16*);
4069 +
4070 + unsigned long sync_mailbox_baseaddr[MAX_PE]; /* Sync mailbox PFE internal address, initialized when parsing elf images */
4071 + unsigned long msg_mailbox_baseaddr[MAX_PE]; /* Msg mailbox PFE internal address, initialized when parsing elf images */
4072 +
4073 + unsigned long class_dmem_sh;
4074 + unsigned long class_pe_lmem_sh;
4075 + unsigned long tmu_dmem_sh;
4076 + unsigned long util_dmem_sh;
4077 + unsigned long util_ddr_sh;
4078 + struct clk *clk_axi;
4079 + unsigned int sys_clk; // AXI clock value, in KHz
4080 + void *ipsec_lmem_baseaddr;
4081 + unsigned long ipsec_lmem_phys_baseaddr;
4082 +
4083 + /* used for asynchronous message transfer to PFE */
4084 + struct list_head msg_list;
4085 + struct work_struct work;
4086 +};
4087 +
4088 +int pfe_ctrl_init(struct pfe *pfe);
4089 +void pfe_ctrl_exit(struct pfe *pfe);
4090 +
4091 +int pe_send_cmd(unsigned short cmd_type, unsigned short action, unsigned long param1, unsigned long param2);
4092 +int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
4093 +void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
4094 +int pe_request(struct pfe_ctrl *ctrl, int id,unsigned short cmd_type, unsigned long dst, unsigned long src, int len);
4095 +int pe_read(struct pfe_ctrl *ctrl, int id, u32 *dst, unsigned long src, int len, int clear_flag);
4096 +int tmu_pe_request(struct pfe_ctrl *ctrl, int id, unsigned int tmu_cmd_bitmask);
4097 +
4098 +int pfe_ctrl_set_eth_state(int id, unsigned int state, unsigned char *mac_addr);
4099 +int pfe_ctrl_set_lro(char enable);
4100 +#ifdef CFG_PCAP
4101 +int pfe_ctrl_set_pcap(char enable);
4102 +int pfe_ctrl_set_pcap_ratelimit(u32 pkts_per_msec);
4103 +#endif
4104 +void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
4105 +void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
4106 +int relax(unsigned long end);
4107 +
4108 +/* used for asynchronous message transfer to PFE */
4109 +#define FPP_MAX_MSG_LENGTH 256 /* expressed in U8 -> 256 bytes*/
4110 +struct fpp_msg {
4111 + struct list_head list;
4112 + void (*callback)(unsigned long, int, u16, u16 *);
4113 + unsigned long data;
4114 + u16 fcode;
4115 + u16 length;
4116 + u16 *payload;
4117 +};
4118 +
4119 +#endif /* _PFE_CTRL_H_ */
4120 --- /dev/null
4121 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl_hal.c
4122 @@ -0,0 +1,207 @@
4123 +/*
4124 + *
4125 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
4126 + *
4127 + * This program is free software; you can redistribute it and/or modify
4128 + * it under the terms of the GNU General Public License as published by
4129 + * the Free Software Foundation; either version 2 of the License, or
4130 + * (at your option) any later version.
4131 + *
4132 + * This program is distributed in the hope that it will be useful,
4133 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4134 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4135 + * GNU General Public License for more details.
4136 + *
4137 + * You should have received a copy of the GNU General Public License
4138 + * along with this program; if not, write to the Free Software
4139 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
4140 + */
4141 +
4142 +/* OS abstraction functions used by PFE control code */
4143 +
4144 +#include <linux/slab.h>
4145 +
4146 +#include "pfe_ctrl_hal.h"
4147 +
4148 +#include "pfe_mod.h"
4149 +
4150 +extern char *__class_dmem_sh;
4151 +extern char *__tmu_dmem_sh;
4152 +#if !defined(CONFIG_UTIL_DISABLED)
4153 +extern char *__util_dmem_sh;
4154 +extern char *__util_ddr_sh;
4155 +#endif
4156 +
4157 +HostMessage msg_buf;
4158 +static int msg_buf_used = 0;
4159 +unsigned long virt_to_class_dmem(void *p)
4160 +{
4161 + struct pfe_ctrl *ctrl = &pfe->ctrl;
4162 +
4163 + if (p)
4164 + return (unsigned long)p - (unsigned long)&__class_dmem_sh + ctrl->class_dmem_sh;
4165 + else
4166 + return 0;
4167 +}
4168 +unsigned long virt_to_tmu_dmem(void *p)
4169 +{
4170 + struct pfe_ctrl *ctrl = &pfe->ctrl;
4171 +
4172 + if (p)
4173 + return (unsigned long)p - (unsigned long)&__tmu_dmem_sh + ctrl->tmu_dmem_sh;
4174 + else
4175 + return 0;
4176 +}
4177 +
4178 +
4179 +#if !defined(CONFIG_UTIL_DISABLED)
4180 +unsigned long virt_to_util_dmem(void *p)
4181 +{
4182 + struct pfe_ctrl *ctrl = &pfe->ctrl;
4183 +
4184 + if (p)
4185 + return (unsigned long)p - (unsigned long)&__util_dmem_sh + ctrl->util_dmem_sh;
4186 + else
4187 + return 0;
4188 +}
4189 +
4190 +/** Returns the DDR physical address of a Util PE shared DDR variable.
4191 + *
4192 + * @param p pointer (kernel space, virtual) to be converted to a physical address.
4193 + */
4194 +unsigned long virt_to_util_ddr(void *p)
4195 +{
4196 + struct pfe_ctrl *ctrl = &pfe->ctrl;
4197 +
4198 + if (p)
4199 + return (unsigned long)p - (unsigned long)&__util_ddr_sh + ctrl->util_ddr_sh;
4200 + else
4201 + return 0;
4202 +}
4203 +/** Returns the virtual address of a Util PE shared DDR variable.
4204 + *
4205 + * @param p pointer (kernel space, virtual) to be converted to a pointer (usable in kernel space)
4206 + * pointing to the actual data.
4207 + */
4208 +
4209 +void * virt_to_util_virt(void *p)
4210 +{
4211 + if (p)
4212 + return DDR_PHYS_TO_VIRT(virt_to_util_ddr(p));
4213 + else
4214 + return NULL;
4215 +}
4216 +#endif
4217 +unsigned long virt_to_phys_iram(void *p)
4218 +{
4219 + if (p)
4220 + return (p - pfe->iram_baseaddr) + pfe->iram_phys_baseaddr;
4221 + else
4222 + return 0;
4223 +}
4224 +
4225 +unsigned long virt_to_phys_ipsec_lmem(void *p)
4226 +{
4227 + struct pfe_ctrl *ctrl = &pfe->ctrl;
4228 +
4229 + if (p)
4230 + return (p - ctrl->ipsec_lmem_baseaddr) + ctrl->ipsec_lmem_phys_baseaddr;
4231 + else
4232 + return 0;
4233 +}
4234 +
4235 +unsigned long virt_to_phys_ipsec_axi(void *p)
4236 +{
4237 + if (p)
4238 + return (p - pfe->ipsec_baseaddr) + pfe->ipsec_phys_baseaddr;
4239 + else
4240 + return 0;
4241 +}
4242 +
4243 +
4244 +HostMessage *msg_alloc(void)
4245 +{
4246 + if (msg_buf_used)
4247 + {
4248 + printk(KERN_ERR "%s: failed\n", __func__);
4249 + return NULL;
4250 + }
4251 +
4252 + msg_buf_used = 1;
4253 +
4254 + return &msg_buf;
4255 +}
4256 +
4257 +void msg_free(HostMessage *msg)
4258 +{
4259 + if (!msg_buf_used)
4260 + printk(KERN_ERR "%s: freing already free msg buffer\n", __func__);
4261 +
4262 + msg_buf_used = 0;
4263 +}
4264 +
4265 +int msg_send(HostMessage *msg)
4266 +{
4267 + struct pfe_ctrl *ctrl = &pfe->ctrl;
4268 + int rc = -1;
4269 +
4270 + if (!ctrl->event_cb)
4271 + goto out;
4272 +
4273 + if (ctrl->event_cb(msg->code, msg->length, msg->data) < 0)
4274 + goto out;
4275 +
4276 + rc = 0;
4277 +
4278 +out:
4279 + msg_free(msg);
4280 +
4281 + return rc;
4282 +}
4283 +
4284 +
4285 +void timer_init(TIMER_ENTRY *timer, TIMER_HANDLER handler)
4286 +{
4287 + timer->handler = handler;
4288 + timer->running = 0;
4289 +}
4290 +
4291 +
4292 +void timer_add(TIMER_ENTRY *timer, u16 granularity)
4293 +{
4294 + struct pfe_ctrl *ctrl = &pfe->ctrl;
4295 +
4296 +
4297 + timer->period = granularity;
4298 + timer->timeout = jiffies + timer->period;
4299 +
4300 + if (!timer->running)
4301 + {
4302 + list_add(&timer->list, &ctrl->timer_list);
4303 + timer->running = 1;
4304 + }
4305 +}
4306 +
4307 +
4308 +void timer_del(TIMER_ENTRY *timer)
4309 +{
4310 +
4311 + if (timer->running)
4312 + {
4313 + list_del(&timer->list);
4314 + timer->running = 0;
4315 + }
4316 +}
4317 +
4318 +
4319 +void *Heap_Alloc(int size)
4320 +{
4321 + /* FIXME we may want to use dma API's and use non cacheable memory */
4322 + return pfe_kmalloc(size, GFP_KERNEL);
4323 +}
4324 +
4325 +
4326 +void Heap_Free(void *p)
4327 +{
4328 + pfe_kfree(p);
4329 +}
4330 --- /dev/null
4331 +++ b/drivers/staging/fsl_ppfe/pfe_ctrl_hal.h
4332 @@ -0,0 +1,129 @@
4333 +/*
4334 + *
4335 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
4336 + *
4337 + * This program is free software; you can redistribute it and/or modify
4338 + * it under the terms of the GNU General Public License as published by
4339 + * the Free Software Foundation; either version 2 of the License, or
4340 + * (at your option) any later version.
4341 + *
4342 + * This program is distributed in the hope that it will be useful,
4343 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4344 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4345 + * GNU General Public License for more details.
4346 + *
4347 + * You should have received a copy of the GNU General Public License
4348 + * along with this program; if not, write to the Free Software
4349 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
4350 + */
4351 +
4352 +#ifndef _PFE_CTRL_HAL_H_
4353 +#define _PFE_CTRL_HAL_H_
4354 +
4355 +#include <linux/timer.h>
4356 +#include <linux/jiffies.h>
4357 +#include <linux/string.h>
4358 +#include <linux/elf.h>
4359 +#include <linux/slab.h>
4360 +#include <asm/byteorder.h>
4361 +#include <asm/io.h>
4362 +
4363 +#include "pfe_mod.h"
4364 +
4365 +#define CLASS_DMEM_SH(var) __attribute__((section(".class_dmem_sh_" #var))) var
4366 +#define CLASS_PE_LMEM_SH(var) __attribute__((section(".class_pe_lmem_sh_" #var))) var
4367 +#define TMU_DMEM_SH(var) __attribute__((section(".tmu_dmem_sh_" #var))) var
4368 +#define UTIL_DMEM_SH(var) __attribute__((section(".util_dmem_sh_" #var))) var
4369 +#define UTIL_DDR_SH(var) __attribute__((section(".util_ddr_sh_" #var))) var
4370 +
4371 +#define CLASS_DMEM_SH2(var) __attribute__((section(".class_dmem_sh_" #var))) class_##var
4372 +#define CLASS_PE_LMEM_SH2(var) __attribute__((section(".class_pe_lmem_sh_" #var))) class_##var
4373 +#define TMU_DMEM_SH2(var) __attribute__((section(".tmu_dmem_sh_" #var))) tmu_##var
4374 +#define UTIL_DMEM_SH2(var) __attribute__((section(".util_dmem_sh_" #var))) util_##var
4375 +
4376 +/** Translate the name of a shared variable to its PFE counterpart.
4377 + * Those macros may be used to determine the address of a shared variable,
4378 + * and will work even if the variable is accessed through a macro, as is the case
4379 + * with most fields of gFppGlobals.
4380 + */
4381 +#define CONCAT(str, var) str##var
4382 +#define CLASS_VARNAME2(var) CONCAT(class_, var)
4383 +#define UTIL_VARNAME2(var) CONCAT(util_, var)
4384 +#define TMU_VARNAME2(var) CONCAT(tmu_, var)
4385 +
4386 +typedef struct tHostMessage {
4387 + u16 length;
4388 + u16 code;
4389 + u16 data[128];
4390 +} HostMessage;
4391 +
4392 +HostMessage *msg_alloc(void);
4393 +void msg_free(HostMessage *msg);
4394 +int msg_send(HostMessage *msg);
4395 +
4396 +
4397 +unsigned long virt_to_class(void *p);
4398 +unsigned long virt_to_class_dmem(void *p);
4399 +unsigned long virt_to_class_pe_lmem(void *p);
4400 +unsigned long virt_to_tmu_dmem(void *p);
4401 +unsigned long virt_to_util_dmem(void *p);
4402 +unsigned long virt_to_util_ddr(void *p);
4403 +void * virt_to_util_virt(void *p);
4404 +unsigned long virt_to_phys_iram(void *p);
4405 +unsigned long virt_to_phys_ipsec_lmem(void *p);
4406 +unsigned long virt_to_phys_ipsec_axi(void *p);
4407 +
4408 +
4409 +#define TIMER_TICKS_PER_SEC 100
4410 +
4411 +#if TIMER_TICKS_PER_SEC > HZ
4412 +#error TIMER_TICKS_PER_SEC is too high
4413 +#endif
4414 +
4415 +
4416 +typedef void (* TIMER_HANDLER)(void);
4417 +
4418 +typedef struct {
4419 + struct list_head list;
4420 + unsigned long timeout;
4421 + unsigned long period;
4422 + TIMER_HANDLER handler;
4423 + char running;
4424 +} TIMER_ENTRY;
4425 +
4426 +
4427 +/** Initializes a timer structure.
4428 +* Must be called once for each TIMER_ENTRY structure.
4429 +* The caller must be holding the ctrl->mutex.
4430 +*
4431 +* @param timer pointer to the timer to be initialized
4432 +* @param handler timer handler function pointer
4433 +*
4434 +*/
4435 +void timer_init(TIMER_ENTRY *timer, TIMER_HANDLER handler);
4436 +
4437 +/** Adds a timer to the running timer list.
4438 +* It's safe to call even if the timer was already running. In this case we just update the granularity.
4439 +* The caller must be holding the ctrl->mutex.
4440 +*
4441 +* @param timer pointer to the timer to be added
4442 +* @param granularity granularity of the timer (in timer tick units)
4443 +*
4444 +*/
4445 +void timer_add(TIMER_ENTRY *timer, u16 granularity);
4446 +
4447 +/** Deletes a timer from the running timer list.
4448 +* It's safe to call even if the timer is no longer running.
4449 +* The caller must be holding the ctrl->mutex.
4450 +*
4451 +* @param timer pointer to the timer to be removed
4452 +*/
4453 +void timer_del(TIMER_ENTRY *timer);
4454 +
4455 +void *Heap_Alloc(int size);
4456 +
4457 +#define Heap_Alloc_ARAM(s) Heap_Alloc(s)
4458 +#define __Heap_Alloc(h, s) Heap_Alloc(s)
4459 +void Heap_Free(void *p);
4460 +
4461 +#endif /* _PFE_CTRL_HAL_H_ */
4462 --- /dev/null
4463 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
4464 @@ -0,0 +1,109 @@
4465 +/*
4466 + * (C) Copyright 2013
4467 + * Author : Freescale Technologes
4468 + *
4469 + * See file CREDITS for list of people who contributed to this
4470 + * project.
4471 + *
4472 + * This program is free software; you can redistribute it and/or
4473 + * modify it under the terms of the GNU General Public License as
4474 + * published by the Free Software Foundation; either version 2 of
4475 + * the License, or (at your option) any later version.
4476 + *
4477 + * This program is distributed in the hope that it will be useful,
4478 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4479 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4480 + * GNU General Public License for more details.
4481 + *
4482 + * You should have received a copy of the GNU General Public License
4483 + * along with this program; if not, write to the Free Software
4484 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
4485 + * MA 02111-1307 USA
4486 + * */
4487 +
4488 +#include <linux/module.h>
4489 +#include <linux/debugfs.h>
4490 +#include <linux/platform_device.h>
4491 +
4492 +#include "pfe_mod.h"
4493 +
4494 +static int dmem_show(struct seq_file *s, void *unused)
4495 +{
4496 + u32 dmem_addr, val;
4497 + int id = (long int)s->private;
4498 + int i;
4499 +
4500 + for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
4501 + seq_printf(s, "%04x:", dmem_addr);
4502 +
4503 + for (i = 0; i < 8; i++) {
4504 + val = pe_dmem_read(id, dmem_addr + i * 4, 4);
4505 + seq_printf(s, " %02x %02x %02x %02x", val & 0xff, (val >> 8) & 0xff, (val >> 16) & 0xff, (val >> 24) & 0xff);
4506 + }
4507 +
4508 + seq_printf(s, "\n");
4509 + }
4510 +
4511 + return 0;
4512 +}
4513 +
4514 +static int dmem_open(struct inode *inode, struct file *file)
4515 +{
4516 + return single_open(file, dmem_show, inode->i_private);
4517 +}
4518 +
4519 +static const struct file_operations dmem_fops = {
4520 + .open = dmem_open,
4521 + .read = seq_read,
4522 + .llseek = seq_lseek,
4523 + .release = single_release,
4524 +};
4525 +
4526 +int pfe_debugfs_init(struct pfe *pfe)
4527 +{
4528 + struct dentry *d;
4529 +
4530 + printk(KERN_INFO "%s\n", __func__);
4531 +
4532 + pfe->dentry = debugfs_create_dir("pfe", NULL);
4533 + if (IS_ERR_OR_NULL(pfe->dentry))
4534 + goto err_dir;
4535 +
4536 + d = debugfs_create_file("pe0_dmem", S_IRUGO, pfe->dentry, (void *)0, &dmem_fops);
4537 + if (IS_ERR_OR_NULL(d))
4538 + goto err_pe;
4539 +
4540 + d = debugfs_create_file("pe1_dmem", S_IRUGO, pfe->dentry, (void *)1, &dmem_fops);
4541 + if (IS_ERR_OR_NULL(d))
4542 + goto err_pe;
4543 +
4544 + d = debugfs_create_file("pe2_dmem", S_IRUGO, pfe->dentry, (void *)2, &dmem_fops);
4545 + if (IS_ERR_OR_NULL(d))
4546 + goto err_pe;
4547 +
4548 + d = debugfs_create_file("pe3_dmem", S_IRUGO, pfe->dentry, (void *)3, &dmem_fops);
4549 + if (IS_ERR_OR_NULL(d))
4550 + goto err_pe;
4551 +
4552 + d = debugfs_create_file("pe4_dmem", S_IRUGO, pfe->dentry, (void *)4, &dmem_fops);
4553 + if (IS_ERR_OR_NULL(d))
4554 + goto err_pe;
4555 +
4556 + d = debugfs_create_file("pe5_dmem", S_IRUGO, pfe->dentry, (void *)5, &dmem_fops);
4557 + if (IS_ERR_OR_NULL(d))
4558 + goto err_pe;
4559 +
4560 + return 0;
4561 +
4562 +err_pe:
4563 + debugfs_remove_recursive(pfe->dentry);
4564 +
4565 +err_dir:
4566 + return -1;
4567 +}
4568 +
4569 +void pfe_debugfs_exit(struct pfe *pfe)
4570 +{
4571 + debugfs_remove_recursive(pfe->dentry);
4572 +}
4573 +
4574 --- /dev/null
4575 +++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
4576 @@ -0,0 +1,8 @@
4577 +#ifndef _PFE_DEBUGFS_H_
4578 +#define _PFE_DEBUGFS_H_
4579 +
4580 +int pfe_debugfs_init(struct pfe *pfe);
4581 +void pfe_debugfs_exit(struct pfe *pfe);
4582 +#endif /* _PFE_DEBUGFS_H_ */
4583 +
4584 +
4585 --- /dev/null
4586 +++ b/drivers/staging/fsl_ppfe/pfe_eth.c
4587 @@ -0,0 +1,2956 @@
4588 +/*
4589 + *
4590 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
4591 + *
4592 + * This program is free software; you can redistribute it and/or modify
4593 + * it under the terms of the GNU General Public License as published by
4594 + * the Free Software Foundation; either version 2 of the License, or
4595 + * (at your option) any later version.
4596 + *
4597 + * This program is distributed in the hope that it will be useful,
4598 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4599 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4600 + * GNU General Public License for more details.
4601 + *
4602 + * You should have received a copy of the GNU General Public License
4603 + * along with this program; if not, write to the Free Software
4604 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
4605 + */
4606 +
4607 +/** @pfe_eth.c.
4608 + * Ethernet driver for to handle exception path for PFE.
4609 + * - uses HIF functions to send/receive packets.
4610 + * - uses ctrl function to start/stop interfaces.
4611 + * - uses direct register accesses to control phy operation.
4612 + */
4613 +#include <linux/version.h>
4614 +#include <linux/kernel.h>
4615 +#include <linux/interrupt.h>
4616 +#include <linux/dma-mapping.h>
4617 +#include <linux/dmapool.h>
4618 +#include <linux/netdevice.h>
4619 +#include <linux/etherdevice.h>
4620 +#include <linux/ethtool.h>
4621 +#include <linux/mii.h>
4622 +#include <linux/phy.h>
4623 +#include <linux/timer.h>
4624 +#include <linux/hrtimer.h>
4625 +#include <linux/platform_device.h>
4626 +
4627 +#include <net/ip.h>
4628 +#include <net/sock.h>
4629 +
4630 +#include <asm/io.h>
4631 +#include <asm/irq.h>
4632 +#include <asm/delay.h>
4633 +#include <linux/regmap.h>
4634 +#include <linux/i2c.h>
4635 +
4636 +#if defined(CONFIG_NF_CONNTRACK_MARK)
4637 +#include <net/netfilter/nf_conntrack.h>
4638 +#endif
4639 +
4640 +#include "pfe_mod.h"
4641 +#include "pfe_eth.h"
4642 +
4643 +const char comcerto_eth_driver_version[]="1.0";
4644 +static void *cbus_emac_base[3];
4645 +static void *cbus_gpi_base[3];
4646 +
4647 +/* Forward Declaration */
4648 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
4649 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv, int force);
4650 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int txQ_num, int from_tx, int n_desc);
4651 +
4652 +#if defined(CONFIG_PLATFORM_C2000)
4653 +static void pfe_eth_set_device_wakeup(struct pfe *pfe);
4654 +
4655 +
4656 +
4657 +unsigned int gemac_regs[] = {
4658 + 0x0000, /* Network control */
4659 + 0x0004, /* Network configuration */
4660 + 0x0008, /* Network status */
4661 + 0x0010, /* DMA configuration */
4662 + 0x0014, /* Transmit status */
4663 + 0x0020, /* Receive status */
4664 + 0x0024, /* Interrupt status */
4665 + 0x0030, /* Interrupt mask */
4666 + 0x0038, /* Received pause quantum */
4667 + 0x003c, /* Transmit pause quantum */
4668 + 0x0080, /* Hash register bottom [31:0] */
4669 + 0x0084, /* Hash register bottom [63:32] */
4670 + 0x0088, /* Specific address 1 bottom [31:0] */
4671 + 0x008c, /* Specific address 1 top [47:32] */
4672 + 0x0090, /* Specific address 2 bottom [31:0] */
4673 + 0x0094, /* Specific address 2 top [47:32] */
4674 + 0x0098, /* Specific address 3 bottom [31:0] */
4675 + 0x009c, /* Specific address 3 top [47:32] */
4676 + 0x00a0, /* Specific address 4 bottom [31:0] */
4677 + 0x00a4, /* Specific address 4 top [47:32] */
4678 + 0x00a8, /* Type ID Match 1 */
4679 + 0x00ac, /* Type ID Match 2 */
4680 + 0x00b0, /* Type ID Match 3 */
4681 + 0x00b4, /* Type ID Match 4 */
4682 + 0x00b8, /* Wake Up ON LAN */
4683 + 0x00bc, /* IPG stretch register */
4684 + 0x00c0, /* Stacked VLAN Register */
4685 + 0x00fc, /* Module ID */
4686 + 0x07a0 /* EMAC Control register */
4687 +};
4688 +#else
4689 +unsigned int gemac_regs[] = {
4690 + 0x0004, /*Interrupt event */
4691 + 0x0008, /*Interrupt mask */
4692 + 0x0024, /*Ethernet control */
4693 + 0x0064, /*MIB Control/Status */
4694 + 0x0084, /*Receive control/status */
4695 + 0x00C4, /*Transmit control */
4696 + 0x00E4, /*Physical address low */
4697 + 0x00E8, /*Physical address high */
4698 + 0x0144, /*Transmit FIFO Watermark and Store and Forward Control*/
4699 + 0x0190, /* Receive FIFO Section Full Threshold */
4700 + 0x01A0, /* Transmit FIFO Section Empty Threshold */
4701 + 0x01B0, /* Frame Truncation Length */
4702 +};
4703 +#endif
4704 +/********************************************************************/
4705 +/* SYSFS INTERFACE */
4706 +/********************************************************************/
4707 +
4708 +
4709 +
4710 +#ifdef PFE_ETH_NAPI_STATS
4711 +/*
4712 + * pfe_eth_show_napi_stats
4713 + */
4714 +static ssize_t pfe_eth_show_napi_stats(struct device *dev,
4715 + struct device_attribute *attr, char *buf)
4716 +{
4717 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
4718 + ssize_t len = 0;
4719 +
4720 + len += sprintf(buf + len, "sched: %u\n", priv->napi_counters[NAPI_SCHED_COUNT]);
4721 + len += sprintf(buf + len, "poll: %u\n", priv->napi_counters[NAPI_POLL_COUNT]);
4722 + len += sprintf(buf + len, "packet: %u\n", priv->napi_counters[NAPI_PACKET_COUNT]);
4723 + len += sprintf(buf + len, "budget: %u\n", priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
4724 + len += sprintf(buf + len, "desc: %u\n", priv->napi_counters[NAPI_DESC_COUNT]);
4725 +
4726 + return len;
4727 +}
4728 +
4729 +/*
4730 + * pfe_eth_set_napi_stats
4731 + */
4732 +static ssize_t pfe_eth_set_napi_stats(struct device *dev,
4733 + struct device_attribute *attr, const char *buf, size_t count)
4734 +{
4735 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
4736 +
4737 + memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
4738 +
4739 + return count;
4740 +}
4741 +#endif
4742 +#ifdef PFE_ETH_TX_STATS
4743 +/** pfe_eth_show_tx_stats
4744 + *
4745 + */
4746 +static ssize_t pfe_eth_show_tx_stats(struct device *dev,
4747 + struct device_attribute *attr, char *buf)
4748 +{
4749 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
4750 + ssize_t len = 0;
4751 + int i;
4752 +
4753 + len += sprintf(buf + len, "TX queues stats:\n");
4754 +
4755 + for (i = 0; i < emac_txq_cnt; i++) {
4756 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, i);
4757 +
4758 + len += sprintf(buf + len, "\n");
4759 + __netif_tx_lock_bh(tx_queue);
4760 +
4761 + hif_tx_lock(&pfe->hif);
4762 + len += sprintf(buf + len, "Queue %2d : credits = %10d\n", i, hif_lib_tx_credit_avail(pfe, priv->id, i));
4763 + len += sprintf(buf + len, " tx packets = %10d\n", pfe->tmu_credit.tx_packets[priv->id][i]);
4764 + hif_tx_unlock(&pfe->hif);
4765 +
4766 + /* Don't output additionnal stats if queue never used */
4767 + if (!pfe->tmu_credit.tx_packets[priv->id][i])
4768 + goto skip;
4769 +
4770 + len += sprintf(buf + len, " clean_fail = %10d\n", priv->clean_fail[i]);
4771 + len += sprintf(buf + len, " stop_queue = %10d\n", priv->stop_queue_total[i]);
4772 + len += sprintf(buf + len, " stop_queue_hif = %10d\n", priv->stop_queue_hif[i]);
4773 + len += sprintf(buf + len, " stop_queue_hif_client = %10d\n", priv->stop_queue_hif_client[i]);
4774 + len += sprintf(buf + len, " stop_queue_credit = %10d\n", priv->stop_queue_credit[i]);
4775 +skip:
4776 + __netif_tx_unlock_bh(tx_queue);
4777 + }
4778 + return len;
4779 +}
4780 +
4781 +/** pfe_eth_set_tx_stats
4782 + *
4783 + */
4784 +static ssize_t pfe_eth_set_tx_stats(struct device *dev,
4785 + struct device_attribute *attr, const char *buf, size_t count)
4786 +{
4787 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
4788 + int i;
4789 +
4790 + for (i = 0; i < emac_txq_cnt; i++) {
4791 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, i);
4792 +
4793 + __netif_tx_lock_bh(tx_queue);
4794 + priv->clean_fail[i] = 0;
4795 + priv->stop_queue_total[i] = 0;
4796 + priv->stop_queue_hif[i] = 0;
4797 + priv->stop_queue_hif_client[i]= 0;
4798 + priv->stop_queue_credit[i] = 0;
4799 + __netif_tx_unlock_bh(tx_queue);
4800 + }
4801 +
4802 + return count;
4803 +}
4804 +#endif
4805 +/** pfe_eth_show_txavail
4806 + *
4807 + */
4808 +static ssize_t pfe_eth_show_txavail(struct device *dev,
4809 + struct device_attribute *attr, char *buf)
4810 +{
4811 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
4812 + ssize_t len = 0;
4813 + int i;
4814 +
4815 + for (i = 0; i < emac_txq_cnt; i++) {
4816 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, i);
4817 +
4818 + __netif_tx_lock_bh(tx_queue);
4819 +
4820 + len += sprintf(buf + len, "%d", hif_lib_tx_avail(&priv->client, i));
4821 +
4822 + __netif_tx_unlock_bh(tx_queue);
4823 +
4824 + if (i == (emac_txq_cnt - 1))
4825 + len += sprintf(buf + len, "\n");
4826 + else
4827 + len += sprintf(buf + len, " ");
4828 + }
4829 +
4830 + return len;
4831 +}
4832 +
4833 +
4834 +/** pfe_eth_show_default_priority
4835 + *
4836 + */
4837 +static ssize_t pfe_eth_show_default_priority(struct device *dev,
4838 + struct device_attribute *attr,
4839 + char *buf)
4840 +{
4841 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
4842 + unsigned long flags;
4843 + int rc;
4844 +
4845 + spin_lock_irqsave(&priv->lock, flags);
4846 + rc = sprintf(buf, "%d\n", priv->default_priority);
4847 + spin_unlock_irqrestore(&priv->lock, flags);
4848 +
4849 + return rc;
4850 +}
4851 +
4852 +/** pfe_eth_set_default_priority
4853 + *
4854 + */
4855 +
4856 +static ssize_t pfe_eth_set_default_priority(struct device *dev,
4857 + struct device_attribute *attr,
4858 + const char *buf, size_t count)
4859 +{
4860 + struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
4861 + unsigned long flags;
4862 +
4863 + spin_lock_irqsave(&priv->lock, flags);
4864 + priv->default_priority = simple_strtoul(buf, NULL, 0);
4865 + spin_unlock_irqrestore(&priv->lock, flags);
4866 +
4867 + return count;
4868 +}
4869 +
4870 +static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
4871 +static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority, pfe_eth_set_default_priority);
4872 +
4873 +#ifdef PFE_ETH_NAPI_STATS
4874 +static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats, pfe_eth_set_napi_stats);
4875 +#endif
4876 +
4877 +#ifdef PFE_ETH_TX_STATS
4878 +static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats, pfe_eth_set_tx_stats);
4879 +#endif
4880 +
4881 +
4882 +/** pfe_eth_sysfs_init
4883 + *
4884 + */
4885 +static int pfe_eth_sysfs_init(struct net_device *dev)
4886 +{
4887 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
4888 + int err;
4889 +
4890 + /* Initialize the default values */
4891 + /* By default, packets without conntrack will use this default high priority queue */
4892 + priv->default_priority = 15;
4893 +
4894 + /* Create our sysfs files */
4895 + err = device_create_file(&dev->dev, &dev_attr_default_priority);
4896 + if (err) {
4897 + netdev_err(dev, "failed to create default_priority sysfs files\n");
4898 + goto err_priority;
4899 + }
4900 +
4901 + err = device_create_file(&dev->dev, &dev_attr_txavail);
4902 + if (err) {
4903 + netdev_err(dev, "failed to create default_priority sysfs files\n");
4904 + goto err_txavail;
4905 + }
4906 +
4907 +#ifdef PFE_ETH_NAPI_STATS
4908 + err = device_create_file(&dev->dev, &dev_attr_napi_stats);
4909 + if (err) {
4910 + netdev_err(dev, "failed to create napi stats sysfs files\n");
4911 + goto err_napi;
4912 + }
4913 +#endif
4914 +
4915 +#ifdef PFE_ETH_TX_STATS
4916 + err = device_create_file(&dev->dev, &dev_attr_tx_stats);
4917 + if (err) {
4918 + netdev_err(dev, "failed to create tx stats sysfs files\n");
4919 + goto err_tx;
4920 + }
4921 +#endif
4922 +
4923 + return 0;
4924 +
4925 +#ifdef PFE_ETH_TX_STATS
4926 +err_tx:
4927 +#endif
4928 +#ifdef PFE_ETH_NAPI_STATS
4929 + device_remove_file(&dev->dev, &dev_attr_napi_stats);
4930 +
4931 +err_napi:
4932 +#endif
4933 + device_remove_file(&dev->dev, &dev_attr_txavail);
4934 +
4935 +err_txavail:
4936 + device_remove_file(&dev->dev, &dev_attr_default_priority);
4937 +
4938 +err_priority:
4939 + return -1;
4940 +}
4941 +
4942 +/** pfe_eth_sysfs_exit
4943 + *
4944 + */
4945 +void pfe_eth_sysfs_exit(struct net_device *dev)
4946 +{
4947 +#ifdef PFE_ETH_TX_STATS
4948 + device_remove_file(&dev->dev, &dev_attr_tx_stats);
4949 +#endif
4950 +
4951 +#ifdef PFE_ETH_NAPI_STATS
4952 + device_remove_file(&dev->dev, &dev_attr_napi_stats);
4953 +#endif
4954 + device_remove_file(&dev->dev, &dev_attr_txavail);
4955 + device_remove_file(&dev->dev, &dev_attr_default_priority);
4956 +}
4957 +
4958 +/*************************************************************************/
4959 +/* ETHTOOL INTERCAE */
4960 +/*************************************************************************/
4961 +
4962 +#if defined(CONFIG_PLATFORM_C2000)
4963 +static char stat_gstrings[][ETH_GSTRING_LEN] = {
4964 + "tx- octets",
4965 + "tx- packets",
4966 + "tx- broadcast",
4967 + "tx- multicast",
4968 + "tx- pause",
4969 + "tx- 64 bytes packets",
4970 + "tx- 64 - 127 bytes packets",
4971 + "tx- 128 - 255 bytes packets",
4972 + "tx- 256 - 511 bytes packets",
4973 + "tx- 512 - 1023 bytes packets",
4974 + "tx- 1024 - 1518 bytes packets",
4975 + "tx- > 1518 bytes packets",
4976 + "tx- underruns - errors",
4977 + "tx- single collision",
4978 + "tx- multi collision",
4979 + "tx- exces. collision - errors",
4980 + "tx- late collision - errors",
4981 + "tx- deferred",
4982 + "tx- carrier sense - errors",
4983 + "rx- octets",
4984 + "rx- packets",
4985 + "rx- broadcast",
4986 + "rx- multicast",
4987 + "rx- pause",
4988 + "rx- 64 bytes packets",
4989 + "rx- 64 - 127 bytes packets",
4990 + "rx- 128 - 255 bytes packets",
4991 + "rx- 256 - 511 bytes packets",
4992 + "rx- 512 - 1023 bytes packets",
4993 + "rx- 1024 - 1518 bytes packets",
4994 + "rx- > 1518 bytes packets",
4995 + "rx- undersize -errors",
4996 + "rx- oversize - errors ",
4997 + "rx- jabbers - errors",
4998 + "rx- fcs - errors",
4999 + "rx- length - errors",
5000 + "rx- symbol - errors",
5001 + "rx- align - errors",
5002 + "rx- ressource - errors",
5003 + "rx- overrun - errors",
5004 + "rx- IP cksum - errors",
5005 + "rx- TCP cksum - errors",
5006 + "rx- UDP cksum - errors"
5007 +};
5008 +
5009 +
5010 +/**
5011 + * pfe_eth_gstrings - Fill in a buffer with the strings which correspond to
5012 + * the stats.
5013 + *
5014 + */
5015 +static void pfe_eth_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
5016 +{
5017 + switch (stringset) {
5018 + case ETH_SS_STATS:
5019 + memcpy(buf, stat_gstrings, (EMAC_RMON_LEN - 2) * ETH_GSTRING_LEN);
5020 + break;
5021 +
5022 + default:
5023 + WARN_ON(1);
5024 + break;
5025 + }
5026 +}
5027 +
5028 +/**
5029 + * pfe_eth_fill_stats - Fill in an array of 64-bit statistics from
5030 + * various sources. This array will be appended
5031 + * to the end of the ethtool_stats* structure, and
5032 + * returned to user space
5033 + */
5034 +static void pfe_eth_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
5035 +{
5036 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5037 + int i;
5038 + for (i=0;i<EMAC_RMON_LEN;i++, buf++) {
5039 + *buf = readl(priv->EMAC_baseaddr + EMAC_RMON_BASE_OFST + (i << 2));
5040 + if ( ( i == EMAC_RMON_TXBYTES_POS ) || ( i == EMAC_RMON_RXBYTES_POS ) ){
5041 + i++;
5042 + *buf |= (u64)readl(priv->EMAC_baseaddr + EMAC_RMON_BASE_OFST + (i << 2)) << 32;
5043 + }
5044 + }
5045 +
5046 +}
5047 +
5048 +/**
5049 + * pfe_eth_stats_count - Returns the number of stats (and their corresponding strings)
5050 + *
5051 + */
5052 +static int pfe_eth_stats_count(struct net_device *dev, int sset)
5053 +{
5054 + switch (sset) {
5055 + case ETH_SS_STATS:
5056 + return EMAC_RMON_LEN - 2;
5057 + default:
5058 + return -EOPNOTSUPP;
5059 + }
5060 +}
5061 +
5062 +#if defined(CONFIG_PLATFORM_C2000)
5063 +/**
5064 + * pfe_eth_set_wol - Set the magic packet option, in WoL register.
5065 + *
5066 + */
5067 +static int pfe_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5068 +{
5069 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5070 +
5071 + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_ARP | WAKE_MCAST | WAKE_UCAST))
5072 + return -EOPNOTSUPP;
5073 +
5074 + priv->wol = 0;
5075 +
5076 + if (wol->wolopts & WAKE_MAGIC)
5077 + priv->wol |= EMAC_WOL_MAGIC;
5078 + if (wol->wolopts & WAKE_ARP)
5079 + priv->wol |= EMAC_WOL_ARP;
5080 + if (wol->wolopts & WAKE_MCAST)
5081 + priv->wol |= EMAC_WOL_MULTI;
5082 + if (wol->wolopts & WAKE_UCAST)
5083 + priv->wol |= EMAC_WOL_SPEC_ADDR;
5084 +
5085 + pfe_eth_set_device_wakeup(priv->pfe);
5086 +
5087 + return 0;
5088 +}
5089 +
5090 +/**
5091 + *
5092 + * pfe_eth_get_wol - Get the WoL options.
5093 + *
5094 + */
5095 +static void pfe_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5096 +{
5097 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5098 +
5099 + wol->supported = (WAKE_MAGIC | WAKE_ARP | WAKE_MCAST | WAKE_UCAST);
5100 + wol->wolopts = 0;
5101 +
5102 + if(priv->wol & EMAC_WOL_MAGIC)
5103 + wol->wolopts |= WAKE_MAGIC;
5104 + if(priv->wol & EMAC_WOL_ARP)
5105 + wol->wolopts |= WAKE_ARP;
5106 + if(priv->wol & EMAC_WOL_MULTI)
5107 + wol->wolopts |= WAKE_UCAST;
5108 + if(priv->wol & EMAC_WOL_SPEC_ADDR)
5109 + wol->wolopts |= WAKE_UCAST;
5110 +
5111 + memset(&wol->sopass, 0, sizeof(wol->sopass));
5112 +}
5113 +#endif
5114 +/**
5115 + * pfe_eth_gemac_reglen - Return the length of the register structure.
5116 + *
5117 + */
5118 +static int pfe_eth_gemac_reglen(struct net_device *dev)
5119 +{
5120 + return (sizeof (gemac_regs)/ sizeof(u32)) + (( MAX_UC_SPEC_ADDR_REG - 3 ) * 2);
5121 +}
5122 +
5123 +/**
5124 + * pfe_eth_gemac_get_regs - Return the gemac register structure.
5125 + *
5126 + */
5127 +static void pfe_eth_gemac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
5128 +{
5129 + int i,j;
5130 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5131 + u32 *buf = (u32 *) regbuf;
5132 +
5133 + for (i = 0; i < sizeof (gemac_regs) / sizeof (u32); i++)
5134 + buf[i] = readl( priv->EMAC_baseaddr + gemac_regs[i] );
5135 +
5136 + for (j = 0; j < (( MAX_UC_SPEC_ADDR_REG - 3 ) * 2); j++,i++)
5137 + buf[i] = readl( priv->EMAC_baseaddr + EMAC_SPEC5_ADD_BOT + (j<<2) );
5138 +
5139 +}
5140 +
5141 +
5142 +#else //if defined(CONFIG_PLATFORM_C2000)
5143 +/*MTIP GEMAC */
5144 +static const struct fec_stat {
5145 + char name[ETH_GSTRING_LEN];
5146 + u16 offset;
5147 +} fec_stats[] = {
5148 + /* RMON TX */
5149 + { "tx_dropped", RMON_T_DROP },
5150 + { "tx_packets", RMON_T_PACKETS },
5151 + { "tx_broadcast", RMON_T_BC_PKT },
5152 + { "tx_multicast", RMON_T_MC_PKT },
5153 + { "tx_crc_errors", RMON_T_CRC_ALIGN },
5154 + { "tx_undersize", RMON_T_UNDERSIZE },
5155 + { "tx_oversize", RMON_T_OVERSIZE },
5156 + { "tx_fragment", RMON_T_FRAG },
5157 + { "tx_jabber", RMON_T_JAB },
5158 + { "tx_collision", RMON_T_COL },
5159 + { "tx_64byte", RMON_T_P64 },
5160 + { "tx_65to127byte", RMON_T_P65TO127 },
5161 + { "tx_128to255byte", RMON_T_P128TO255 },
5162 + { "tx_256to511byte", RMON_T_P256TO511 },
5163 + { "tx_512to1023byte", RMON_T_P512TO1023 },
5164 + { "tx_1024to2047byte", RMON_T_P1024TO2047 },
5165 + { "tx_GTE2048byte", RMON_T_P_GTE2048 },
5166 + { "tx_octets", RMON_T_OCTETS },
5167 +
5168 + /* IEEE TX */
5169 + { "IEEE_tx_drop", IEEE_T_DROP },
5170 + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
5171 + { "IEEE_tx_1col", IEEE_T_1COL },
5172 + { "IEEE_tx_mcol", IEEE_T_MCOL },
5173 + { "IEEE_tx_def", IEEE_T_DEF },
5174 + { "IEEE_tx_lcol", IEEE_T_LCOL },
5175 + { "IEEE_tx_excol", IEEE_T_EXCOL },
5176 + { "IEEE_tx_macerr", IEEE_T_MACERR },
5177 + { "IEEE_tx_cserr", IEEE_T_CSERR },
5178 + { "IEEE_tx_sqe", IEEE_T_SQE },
5179 + { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
5180 + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
5181 +
5182 + /* RMON RX */
5183 + { "rx_packets", RMON_R_PACKETS },
5184 + { "rx_broadcast", RMON_R_BC_PKT },
5185 + { "rx_multicast", RMON_R_MC_PKT },
5186 + { "rx_crc_errors", RMON_R_CRC_ALIGN },
5187 + { "rx_undersize", RMON_R_UNDERSIZE },
5188 + { "rx_oversize", RMON_R_OVERSIZE },
5189 + { "rx_fragment", RMON_R_FRAG },
5190 + { "rx_jabber", RMON_R_JAB },
5191 + { "rx_64byte", RMON_R_P64 },
5192 + { "rx_65to127byte", RMON_R_P65TO127 },
5193 + { "rx_128to255byte", RMON_R_P128TO255 },
5194 + { "rx_256to511byte", RMON_R_P256TO511 },
5195 + { "rx_512to1023byte", RMON_R_P512TO1023 },
5196 + { "rx_1024to2047byte", RMON_R_P1024TO2047 },
5197 + { "rx_GTE2048byte", RMON_R_P_GTE2048 },
5198 + { "rx_octets", RMON_R_OCTETS },
5199 +
5200 + /* IEEE RX */
5201 + { "IEEE_rx_drop", IEEE_R_DROP },
5202 + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
5203 + { "IEEE_rx_crc", IEEE_R_CRC },
5204 + { "IEEE_rx_align", IEEE_R_ALIGN },
5205 + { "IEEE_rx_macerr", IEEE_R_MACERR },
5206 + { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
5207 + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
5208 +};
5209 +
5210 +static void pfe_eth_fill_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
5211 +{
5212 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5213 + int i;
5214 +
5215 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
5216 + data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
5217 +}
5218 +
5219 +static void pfe_eth_gstrings(struct net_device *netdev,
5220 + u32 stringset, u8 *data)
5221 +{
5222 + int i;
5223 + switch (stringset) {
5224 + case ETH_SS_STATS:
5225 + for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
5226 + memcpy(data + i * ETH_GSTRING_LEN,
5227 + fec_stats[i].name, ETH_GSTRING_LEN);
5228 + break;
5229 + }
5230 +}
5231 +
5232 +static int pfe_eth_stats_count(struct net_device *dev, int sset)
5233 +{
5234 + switch (sset) {
5235 + case ETH_SS_STATS:
5236 + return ARRAY_SIZE(fec_stats);
5237 + default:
5238 + return -EOPNOTSUPP;
5239 + }
5240 +}
5241 +
5242 +/**
5243 + * pfe_eth_gemac_reglen - Return the length of the register structure.
5244 + *
5245 + */
5246 +static int pfe_eth_gemac_reglen(struct net_device *dev)
5247 +{
5248 + printk("%s() \n", __func__);
5249 + return (sizeof (gemac_regs)/ sizeof(u32)) ;
5250 +}
5251 +
5252 +/**
5253 + * pfe_eth_gemac_get_regs - Return the gemac register structure.
5254 + *
5255 + */
5256 +static void pfe_eth_gemac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
5257 +{
5258 + int i;
5259 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5260 + u32 *buf = (u32 *) regbuf;
5261 +
5262 + printk("%s() \n", __func__);
5263 + for (i = 0; i < sizeof (gemac_regs) / sizeof (u32); i++)
5264 + buf[i] = readl( priv->EMAC_baseaddr + gemac_regs[i] );
5265 +
5266 +}
5267 +
5268 +
5269 +#endif
5270 +
5271 +/**
5272 + * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
5273 + *
5274 + */
5275 +static void pfe_eth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
5276 +{
5277 + strncpy(drvinfo->driver, DRV_NAME, COMCERTO_INFOSTR_LEN);
5278 + strncpy(drvinfo->version, comcerto_eth_driver_version, COMCERTO_INFOSTR_LEN);
5279 + strncpy(drvinfo->fw_version, "N/A", COMCERTO_INFOSTR_LEN);
5280 + strncpy(drvinfo->bus_info, "N/A", COMCERTO_INFOSTR_LEN);
5281 + drvinfo->testinfo_len = 0;
5282 + drvinfo->regdump_len = 0;
5283 + drvinfo->eedump_len = 0;
5284 +}
5285 +
5286 +/**
5287 + * pfe_eth_set_settings - Used to send commands to PHY.
5288 + *
5289 + */
5290 +
5291 +static int pfe_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5292 +{
5293 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5294 + struct phy_device *phydev = priv->phydev;
5295 +
5296 + if (NULL == phydev)
5297 + return -ENODEV;
5298 +
5299 + return phy_ethtool_sset(phydev, cmd);
5300 +}
5301 +
5302 +
5303 +/**
5304 + * pfe_eth_getsettings - Return the current settings in the ethtool_cmd structure.
5305 + *
5306 + */
5307 +static int pfe_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5308 +{
5309 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5310 + struct phy_device *phydev = priv->phydev;
5311 +
5312 + if (NULL == phydev)
5313 + return -ENODEV;
5314 +
5315 + return phy_ethtool_gset(phydev, cmd);
5316 +}
5317 +
5318 +
5319 +/**
5320 + * pfe_eth_get_msglevel - Gets the debug message mask.
5321 + *
5322 + */
5323 +static uint32_t pfe_eth_get_msglevel(struct net_device *dev)
5324 +{
5325 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5326 +
5327 + return priv->msg_enable;
5328 +}
5329 +
5330 +/**
5331 + * pfe_eth_set_msglevel - Sets the debug message mask.
5332 + *
5333 + */
5334 +static void pfe_eth_set_msglevel(struct net_device *dev, uint32_t data)
5335 +{
5336 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5337 +
5338 + priv->msg_enable = data;
5339 +}
5340 +
5341 +#define HIF_RX_COAL_MAX_CLKS (~(1<<31))
5342 +#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk/1000)
5343 +#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS/HIF_RX_COAL_CLKS_PER_USEC)
5344 +
5345 +/**
5346 + * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
5347 + *
5348 + */
5349 +static int pfe_eth_set_coalesce(struct net_device *dev,
5350 + struct ethtool_coalesce *ec)
5351 +{
5352 + if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
5353 + return -EINVAL;
5354 +
5355 + if (!ec->rx_coalesce_usecs) {
5356 + writel(0, HIF_INT_COAL);
5357 + return 0;
5358 + }
5359 +
5360 + writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) | HIF_INT_COAL_ENABLE, HIF_INT_COAL);
5361 +
5362 + return 0;
5363 +}
5364 +
5365 +/**
5366 + * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
5367 + *
5368 + */
5369 +static int pfe_eth_get_coalesce(struct net_device *dev,
5370 + struct ethtool_coalesce *ec)
5371 +{
5372 + int reg_val = readl(HIF_INT_COAL);
5373 +
5374 + if (reg_val & HIF_INT_COAL_ENABLE)
5375 + ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) / HIF_RX_COAL_CLKS_PER_USEC;
5376 + else
5377 + ec->rx_coalesce_usecs = 0;
5378 +
5379 + return 0;
5380 +}
5381 +
5382 +#if defined(CONFIG_PLATFORM_C2000)
5383 +/**
5384 + * pfe_eth_pause_rx_enabled - Tests if pause rx is enabled on GEM
5385 + *
5386 + */
5387 +static int pfe_eth_pause_rx_enabled(struct pfe_eth_priv_s *priv)
5388 +{
5389 + return (readl(priv->EMAC_baseaddr + EMAC_NETWORK_CONFIG) & EMAC_ENABLE_PAUSE_RX) != 0;
5390 +}
5391 +
5392 +/**
5393 + * pfe_eth_set_pauseparam - Sets pause parameters
5394 + *
5395 + */
5396 +static int pfe_eth_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5397 +{
5398 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5399 +
5400 + if (epause->rx_pause)
5401 + {
5402 + gemac_enable_pause_rx(priv->EMAC_baseaddr);
5403 + if (priv->phydev)
5404 + priv->phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
5405 + }
5406 + else
5407 + {
5408 + gemac_disable_pause_rx(priv->EMAC_baseaddr);
5409 + if (priv->phydev)
5410 + priv->phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
5411 + }
5412 +
5413 + return 0;
5414 +}
5415 +
5416 +/**
5417 + * pfe_eth_get_pauseparam - Gets pause parameters
5418 + *
5419 + */
5420 +static void pfe_eth_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5421 +{
5422 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5423 +
5424 + epause->autoneg = 0;
5425 + epause->tx_pause = 0;
5426 + epause->rx_pause = pfe_eth_pause_rx_enabled(priv);
5427 +}
5428 +
5429 +/** pfe_eth_get_hash
5430 + */
5431 +static int pfe_eth_get_hash(u8 * addr)
5432 +{
5433 + u8 temp1,temp2,temp3,temp4,temp5,temp6,temp7,temp8;
5434 + temp1 = addr[0] & 0x3F ;
5435 + temp2 = ((addr[0] & 0xC0) >> 6)| ((addr[1] & 0x0F) << 2);
5436 + temp3 = ((addr[1] & 0xF0) >> 4) | ((addr[2] & 0x03) << 4);
5437 + temp4 = (addr[2] & 0xFC) >> 2;
5438 + temp5 = addr[3] & 0x3F;
5439 + temp6 = ((addr[3] & 0xC0) >> 6) | ((addr[4] & 0x0F) << 2);
5440 + temp7 = ((addr[4] & 0xF0) >>4 ) | ((addr[5] & 0x03) << 4);
5441 + temp8 = ((addr[5] &0xFC) >> 2);
5442 + return (temp1 ^ temp2 ^ temp3 ^ temp4 ^ temp5 ^ temp6 ^ temp7 ^ temp8);
5443 +}
5444 +
5445 +#else
5446 + /*TODO Add pause frame support for LS1012A */
5447 +
5448 +/** pfe_eth_get_hash
5449 + */
5450 +#define HASH_BITS 6 /* #bits in hash */
5451 +#define CRC32_POLY 0xEDB88320
5452 +
5453 +static int pfe_eth_get_hash(u8 * addr)
5454 +{
5455 + unsigned int i, bit, data, crc, hash;
5456 +
5457 + /* calculate crc32 value of mac address */
5458 + crc = 0xffffffff;
5459 +
5460 + for (i = 0; i < 6; i++) {
5461 + data = addr[i];
5462 + for (bit = 0; bit < 8; bit++, data >>= 1) {
5463 + crc = (crc >> 1) ^
5464 + (((crc ^ data) & 1) ? CRC32_POLY : 0);
5465 + }
5466 + }
5467 +
5468 + /* only upper 6 bits (HASH_BITS) are used
5469 + * which point to specific bit in he hash registers
5470 + */
5471 + hash = (crc >> (32 - HASH_BITS)) & 0x3f;
5472 +
5473 + return hash;
5474 +}
5475 +
5476 +#endif
5477 +
5478 +struct ethtool_ops pfe_ethtool_ops = {
5479 + .get_settings = pfe_eth_get_settings,
5480 + .set_settings = pfe_eth_set_settings,
5481 + .get_drvinfo = pfe_eth_get_drvinfo,
5482 + .get_regs_len = pfe_eth_gemac_reglen,
5483 + .get_regs = pfe_eth_gemac_get_regs,
5484 + .get_link = ethtool_op_get_link,
5485 +#if defined(CONFIG_PLATFORM_C2000)
5486 + .get_wol = pfe_eth_get_wol,
5487 + .set_wol = pfe_eth_set_wol,
5488 + .set_pauseparam = pfe_eth_set_pauseparam,
5489 + .get_pauseparam = pfe_eth_get_pauseparam,
5490 +#endif
5491 + .get_strings = pfe_eth_gstrings,
5492 + .get_sset_count = pfe_eth_stats_count,
5493 + .get_ethtool_stats = pfe_eth_fill_stats,
5494 + .get_msglevel = pfe_eth_get_msglevel,
5495 + .set_msglevel = pfe_eth_set_msglevel,
5496 + .set_coalesce = pfe_eth_set_coalesce,
5497 + .get_coalesce = pfe_eth_get_coalesce,
5498 +};
5499 +
5500 +
5501 +
5502 +#if defined(CONFIG_PLATFORM_C2000)
5503 +/** pfe_eth_mdio_reset
5504 + */
5505 +int pfe_eth_mdio_reset(struct mii_bus *bus)
5506 +{
5507 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
5508 +
5509 + netif_info(priv, hw, priv->dev, "%s\n", __func__);
5510 +
5511 +#if !defined(CONFIG_PLATFORM_EMULATION)
5512 + mutex_lock(&bus->mdio_lock);
5513 +
5514 + /* Setup the MII Mgmt clock speed */
5515 + if (priv->mii_bus)
5516 + gemac_set_mdc_div(priv->EMAC_baseaddr, priv->mdc_div);
5517 +
5518 + /* Reset the management interface */
5519 + __raw_writel(__raw_readl(priv->EMAC_baseaddr + EMAC_NETWORK_CONTROL) | EMAC_MDIO_EN,
5520 + priv->EMAC_baseaddr + EMAC_NETWORK_CONTROL);
5521 +
5522 + /* Wait until the bus is free */
5523 + while(!(__raw_readl(priv->EMAC_baseaddr + EMAC_NETWORK_STATUS) & EMAC_PHY_IDLE));
5524 +
5525 + mutex_unlock(&bus->mdio_lock);
5526 +#endif
5527 +
5528 + return 0;
5529 +}
5530 +
5531 +
5532 +/** pfe_eth_gemac_phy_timeout
5533 + *
5534 + */
5535 +static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
5536 +{
5537 + while(!(__raw_readl(priv->EMAC_baseaddr + EMAC_NETWORK_STATUS) & EMAC_PHY_IDLE)) {
5538 +
5539 + if (timeout-- <= 0) {
5540 + return -1;
5541 + }
5542 +
5543 + udelay(10);
5544 + }
5545 +
5546 + return 0;
5547 +}
5548 +
5549 +
5550 +/** pfe_eth_mdio_write
5551 + */
5552 +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
5553 +{
5554 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
5555 + u32 write_data;
5556 +
5557 +#if !defined(CONFIG_PLATFORM_EMULATION)
5558 +
5559 + netif_info(priv, hw, priv->dev, "%s: phy %d\n", __func__, mii_id);
5560 +
5561 +// netif_info(priv, hw, priv->dev, "%s %d %d %x\n", bus->id, mii_id, regnum, value);
5562 +
5563 + write_data = 0x50020000;
5564 + write_data |= ((mii_id << 23) | (regnum << 18) | value);
5565 + __raw_writel(write_data, priv->EMAC_baseaddr + EMAC_PHY_MANAGEMENT);
5566 +
5567 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)){
5568 + netdev_err(priv->dev, "%s: phy MDIO write timeout\n", __func__);
5569 + return -1;
5570 + }
5571 +
5572 +#endif
5573 +
5574 + return 0;
5575 +}
5576 +
5577 +
5578 +/** pfe_eth_mdio_read
5579 + */
5580 +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
5581 +{
5582 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
5583 + u16 value = 0;
5584 + u32 write_data;
5585 +
5586 +#if !defined(CONFIG_PLATFORM_EMULATION)
5587 + netif_info(priv, hw, priv->dev, "%s: phy %d\n", __func__, mii_id);
5588 +
5589 + write_data = 0x60020000;
5590 + write_data |= ((mii_id << 23) | (regnum << 18));
5591 +
5592 + __raw_writel(write_data, priv->EMAC_baseaddr + EMAC_PHY_MANAGEMENT);
5593 +
5594 + if (pfe_eth_gemac_phy_timeout( priv, EMAC_MDIO_TIMEOUT)) {
5595 + netdev_err(priv->dev, "%s: phy MDIO read timeout\n", __func__);
5596 + return -1;
5597 + }
5598 +
5599 + value = __raw_readl(priv->EMAC_baseaddr + EMAC_PHY_MANAGEMENT) & 0xFFFF;
5600 +#endif
5601 +
5602 +// netif_info(priv, hw, priv->dev, "%s %d %d %x\n", bus->id, mii_id, regnum, value);
5603 +
5604 + return value;
5605 +}
5606 +
5607 +#else
5608 +/** pfe_eth_mdio_reset
5609 + */
5610 +int pfe_eth_mdio_reset(struct mii_bus *bus)
5611 +{
5612 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
5613 + u32 phy_speed, pclk = 250000000; /*TODO this needs to be checked read from the correct source*/
5614 +
5615 + netif_info(priv, hw, priv->dev, "%s\n", __func__);
5616 +
5617 + mutex_lock(&bus->mdio_lock);
5618 +
5619 + /*
5620 + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
5621 + *
5622 + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
5623 + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
5624 + */
5625 + phy_speed = (DIV_ROUND_UP(pclk, 4000000) << EMAC_MII_SPEED_SHIFT);
5626 + phy_speed |= EMAC_HOLDTIME(0x5);
5627 + __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
5628 +
5629 + mutex_unlock(&bus->mdio_lock);
5630 +
5631 + return 0;
5632 +}
5633 +
5634 +/** pfe_eth_gemac_phy_timeout
5635 + *
5636 + */
5637 +static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
5638 +{
5639 + while(!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) & EMAC_IEVENT_MII)) {
5640 +
5641 + if (timeout-- <= 0) {
5642 + return -1;
5643 + }
5644 +
5645 + udelay(10);
5646 + }
5647 + __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
5648 +
5649 + return 0;
5650 +}
5651 +
5652 +static int pfe_eth_mdio_mux(u8 muxval)
5653 +{
5654 + struct i2c_adapter *a;
5655 + struct i2c_msg msg;
5656 + unsigned char buf[2];
5657 + int ret;
5658 +
5659 + a = i2c_get_adapter(0);
5660 + if (!a)
5661 + return -ENODEV;
5662 +
5663 + /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
5664 + buf[0] = 0x54; //reg number
5665 + buf[1] = (muxval << 6)| 0x3; //data
5666 + msg.addr = 0x66;
5667 + msg.buf = buf;
5668 + msg.len = 2;
5669 + msg.flags = 0;
5670 + ret = i2c_transfer(a, &msg, 1);
5671 + i2c_put_adapter(a);
5672 + if (ret != 1)
5673 + return -ENODEV;
5674 + return 0;
5675 +
5676 +
5677 +}
5678 +
5679 +static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
5680 +{
5681 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
5682 +
5683 + /*FIXME Dirty hack to configure mux */
5684 + if(priv->mdio_muxval) {
5685 + if(mii_id == 0x1)
5686 + pfe_eth_mdio_mux(0x1);
5687 + else
5688 + pfe_eth_mdio_mux(0x2);
5689 + }
5690 +
5691 + /* start a write op */
5692 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
5693 + EMAC_MII_DATA_PA(mii_id) | EMAC_MII_DATA_RA(regnum) |
5694 + EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
5695 + priv->PHY_baseaddr + EMAC_MII_DATA_REG);
5696 +
5697 + if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)){
5698 + netdev_err(priv->dev, "%s: phy MDIO write timeout\n", __func__);
5699 + return -1;
5700 + }
5701 + netif_info(priv, hw, priv->dev, "%s: phy %x reg %x val %x\n", __func__, mii_id, regnum, value);
5702 +
5703 + return 0;
5704 +
5705 +
5706 +}
5707 +static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
5708 +{
5709 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
5710 + u16 value = 0;
5711 +
5712 + /*FIXME Dirty hack to configure mux */
5713 + if(priv->mdio_muxval){
5714 + if(mii_id == 0x1)
5715 + pfe_eth_mdio_mux(0x1);
5716 + else
5717 + pfe_eth_mdio_mux(0x2);
5718 + }
5719 +
5720 + /* start a read op */
5721 + __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
5722 + EMAC_MII_DATA_PA(mii_id) | EMAC_MII_DATA_RA(regnum) |
5723 + EMAC_MII_DATA_TA, priv->PHY_baseaddr + EMAC_MII_DATA_REG);
5724 +
5725 + if (pfe_eth_gemac_phy_timeout( priv, EMAC_MDIO_TIMEOUT)) {
5726 + netdev_err(priv->dev, "%s: phy MDIO read timeout\n", __func__);
5727 + return -1;
5728 + }
5729 +
5730 + value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr + EMAC_MII_DATA_REG));
5731 + netif_info(priv, hw, priv->dev, "%s: phy %x reg %x val %x\n", __func__, mii_id, regnum, value);
5732 + return value;
5733 +}
5734 +#endif
5735 +static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv, struct comcerto_mdio_platform_data *minfo)
5736 +{
5737 + struct mii_bus *bus;
5738 + int rc;
5739 +
5740 + netif_info(priv, drv, priv->dev, "%s\n", __func__);
5741 + printk( "%s\n", __func__);
5742 +
5743 +#if !defined(CONFIG_PLATFORM_EMULATION)
5744 + bus = mdiobus_alloc();
5745 + if (!bus) {
5746 + netdev_err(priv->dev, "mdiobus_alloc() failed\n");
5747 + rc = -ENOMEM;
5748 + goto err0;
5749 + }
5750 +
5751 + bus->name = "Comcerto MDIO Bus";
5752 + bus->read = &pfe_eth_mdio_read;
5753 + bus->write = &pfe_eth_mdio_write;
5754 + bus->reset = &pfe_eth_mdio_reset;
5755 + snprintf(bus->id, MII_BUS_ID_SIZE, "comcerto-%x", priv->id);
5756 + bus->priv = priv;
5757 +
5758 + bus->phy_mask = minfo->phy_mask;
5759 + priv->mdc_div = minfo->mdc_div;
5760 +
5761 + if (!priv->mdc_div)
5762 + priv->mdc_div = 64;
5763 +
5764 + bus->irq = minfo->irq;
5765 +
5766 + bus->parent = priv->pfe->dev;
5767 +
5768 + netif_info(priv, drv, priv->dev, "%s: mdc_div: %d, phy_mask: %x \n", __func__, priv->mdc_div, bus->phy_mask);
5769 + rc = mdiobus_register(bus);
5770 + if (rc) {
5771 + netdev_err(priv->dev, "mdiobus_register(%s) failed\n", bus->name);
5772 + goto err1;
5773 + }
5774 +
5775 + priv->mii_bus = bus;
5776 + pfe_eth_mdio_reset(bus);
5777 +
5778 + return 0;
5779 +
5780 +err1:
5781 + mdiobus_free(bus);
5782 +err0:
5783 + return rc;
5784 +#else
5785 + return 0;
5786 +#endif
5787 +
5788 +}
5789 +
5790 +/** pfe_eth_mdio_exit
5791 + */
5792 +static void pfe_eth_mdio_exit(struct mii_bus *bus)
5793 +{
5794 + if (!bus)
5795 + return;
5796 +
5797 + netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct pfe_eth_priv_s *)(bus->priv))->dev, "%s\n", __func__);
5798 +
5799 + mdiobus_unregister(bus);
5800 + mdiobus_free(bus);
5801 +}
5802 +
5803 +#if defined(CONFIG_PLATFORM_C2000)
5804 +/** pfe_get_interface
5805 + */
5806 +static phy_interface_t pfe_get_interface(struct net_device *dev)
5807 +{
5808 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5809 + u32 mii_mode = priv->einfo->mii_config;
5810 +
5811 + netif_info(priv, drv, dev, "%s\n", __func__);
5812 +
5813 + if (priv->einfo->gemac_mode & (GEMAC_SW_CONF)) {
5814 + switch (mii_mode) {
5815 + case CONFIG_COMCERTO_USE_GMII:
5816 + return PHY_INTERFACE_MODE_GMII;
5817 + break;
5818 + case CONFIG_COMCERTO_USE_RGMII:
5819 + return PHY_INTERFACE_MODE_RGMII;
5820 + break;
5821 + case CONFIG_COMCERTO_USE_RMII:
5822 + return PHY_INTERFACE_MODE_RMII;
5823 + break;
5824 + case CONFIG_COMCERTO_USE_SGMII:
5825 + return PHY_INTERFACE_MODE_SGMII;
5826 + break;
5827 +
5828 + default :
5829 + case CONFIG_COMCERTO_USE_MII:
5830 + return PHY_INTERFACE_MODE_MII;
5831 + break;
5832 +
5833 + }
5834 + } else {
5835 + // Bootstrap config read from controller
5836 + BUG();
5837 + return 0;
5838 + }
5839 +}
5840 +#endif
5841 +
5842 +/** pfe_get_phydev_speed
5843 + */
5844 +static int pfe_get_phydev_speed(struct phy_device *phydev)
5845 +{
5846 + switch (phydev->speed) {
5847 + case 10:
5848 + return SPEED_10M;
5849 + case 100:
5850 + return SPEED_100M;
5851 + case 1000:
5852 + default:
5853 + return SPEED_1000M;
5854 + }
5855 +
5856 +}
5857 +
5858 +/** pfe_set_rgmii_speed
5859 + */
5860 +#define RGMIIPCR 0x434
5861 +/* RGMIIPCR bit definitions*/
5862 +#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
5863 +#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
5864 +#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
5865 +#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
5866 +#define SCFG_RGMIIPCR_SETFD (0x00000001)
5867 +
5868 +static void pfe_set_rgmii_speed(struct phy_device *phydev)
5869 +{
5870 + u32 rgmii_pcr;
5871 +
5872 + regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
5873 + rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M|SCFG_RGMIIPCR_SETSP_10M);
5874 +
5875 + switch (phydev->speed) {
5876 + case 10:
5877 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
5878 + break;
5879 + case 1000:
5880 + rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
5881 + break;
5882 + case 100:
5883 + default:
5884 + /* Default is 100M */
5885 + break;
5886 + }
5887 + regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
5888 +
5889 +
5890 +}
5891 +/** pfe_get_phydev_duplex
5892 + */
5893 +static int pfe_get_phydev_duplex(struct phy_device *phydev)
5894 +{
5895 + //return ( phydev->duplex == DUPLEX_HALF ) ? DUP_HALF:DUP_FULL ;
5896 + return DUPLEX_FULL;
5897 +}
5898 +
5899 +/** pfe_eth_adjust_link
5900 + */
5901 +static void pfe_eth_adjust_link(struct net_device *dev)
5902 +{
5903 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5904 + unsigned long flags;
5905 + struct phy_device *phydev = priv->phydev;
5906 + int new_state = 0;
5907 +
5908 + netif_info(priv, drv, dev, "%s\n", __func__);
5909 +
5910 + spin_lock_irqsave(&priv->lock, flags);
5911 + if (phydev->link) {
5912 + /* Now we make sure that we can be in full duplex mode.
5913 + * If not, we operate in half-duplex mode. */
5914 + if (phydev->duplex != priv->oldduplex) {
5915 + new_state = 1;
5916 + gemac_set_duplex(priv->EMAC_baseaddr, pfe_get_phydev_duplex(phydev));
5917 + priv->oldduplex = phydev->duplex;
5918 + }
5919 +
5920 + if (phydev->speed != priv->oldspeed) {
5921 + new_state = 1;
5922 + gemac_set_speed(priv->EMAC_baseaddr, pfe_get_phydev_speed(phydev));
5923 + if(priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII)
5924 + pfe_set_rgmii_speed(phydev);
5925 + priv->oldspeed = phydev->speed;
5926 + }
5927 +
5928 + if (!priv->oldlink) {
5929 + new_state = 1;
5930 + priv->oldlink = 1;
5931 + }
5932 +
5933 + } else if (priv->oldlink) {
5934 + new_state = 1;
5935 + priv->oldlink = 0;
5936 + priv->oldspeed = 0;
5937 + priv->oldduplex = -1;
5938 + }
5939 +
5940 + if (new_state && netif_msg_link(priv))
5941 + phy_print_status(phydev);
5942 +
5943 + spin_unlock_irqrestore(&priv->lock, flags);
5944 +}
5945 +
5946 +
5947 +/** pfe_phy_exit
5948 + */
5949 +static void pfe_phy_exit(struct net_device *dev)
5950 +{
5951 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5952 +
5953 + netif_info(priv, drv, dev, "%s\n", __func__);
5954 +
5955 + phy_disconnect(priv->phydev);
5956 + priv->phydev = NULL;
5957 +}
5958 +
5959 +/** pfe_eth_stop
5960 + */
5961 +static void pfe_eth_stop( struct net_device *dev , int wake)
5962 +{
5963 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
5964 +
5965 + netif_info(priv, drv, dev, "%s\n", __func__);
5966 +
5967 + if (wake)
5968 + gemac_tx_disable(priv->EMAC_baseaddr);
5969 + else {
5970 + gemac_disable(priv->EMAC_baseaddr);
5971 + gpi_disable(priv->GPI_baseaddr);
5972 +
5973 + if (priv->phydev)
5974 + phy_stop(priv->phydev);
5975 + }
5976 +}
5977 +
5978 +/** pfe_eth_start
5979 + */
5980 +static int pfe_eth_start( struct pfe_eth_priv_s *priv )
5981 +{
5982 + netif_info(priv, drv, priv->dev, "%s\n", __func__);
5983 +
5984 + if (priv->phydev)
5985 + phy_start(priv->phydev);
5986 +
5987 + gpi_enable(priv->GPI_baseaddr);
5988 + gemac_enable(priv->EMAC_baseaddr);
5989 +
5990 + return 0;
5991 +}
5992 +
5993 +/*Configure on chip serdes through mdio
5994 + * Is there any better way to do this? */
5995 +static void ls1012a_configure_serdes(struct net_device *dev)
5996 +{
5997 + struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0]; // FIXME This will not work for EMAC2 as SGMII
5998 + /*int value,sgmii_2500=0; */
5999 + struct mii_bus *bus = priv->mii_bus;
6000 +
6001 + netif_info(priv, drv, dev, "%s\n", __func__);
6002 + /* PCS configuration done with corresponding GEMAC */
6003 +
6004 + pfe_eth_mdio_read(bus, 0, 0);
6005 + pfe_eth_mdio_read(bus, 0, 1);
6006 +#if 1
6007 + /*These settings taken from validtion team */
6008 + pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
6009 + pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
6010 + pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
6011 + pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
6012 + pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
6013 + pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
6014 + return;
6015 +#else
6016 + /*Reset serdes */
6017 + pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
6018 +
6019 + /* SGMII IF mode + AN enable only for 1G SGMII, not for 2.5G */
6020 + value = PHY_SGMII_IF_MODE_SGMII;
6021 + if (!sgmii_2500)
6022 + value |= PHY_SGMII_IF_MODE_AN;
6023 +
6024 + pfe_eth_mdio_write(bus, 0, 0x14, value);
6025 +
6026 + /* Dev ability according to SGMII specification */
6027 + value = PHY_SGMII_DEV_ABILITY_SGMII;
6028 + pfe_eth_mdio_write(bus, 0, 0x4, value);
6029 +
6030 + //These values taken from validation team
6031 + pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
6032 + pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
6033 +
6034 + /* Restart AN */
6035 + value = PHY_SGMII_CR_DEF_VAL;
6036 + if (!sgmii_2500)
6037 + value |= PHY_SGMII_CR_RESET_AN;
6038 + pfe_eth_mdio_write(bus, 0, 0, value);
6039 +
6040 +#endif
6041 +}
6042 +
6043 +/** pfe_phy_init
6044 + *
6045 + */
6046 +static int pfe_phy_init(struct net_device *dev)
6047 +{
6048 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6049 + struct phy_device *phydev;
6050 + char phy_id[MII_BUS_ID_SIZE + 3];
6051 + char bus_id[MII_BUS_ID_SIZE];
6052 + phy_interface_t interface;
6053 +
6054 + priv->oldlink = 0;
6055 + priv->oldspeed = 0;
6056 + priv->oldduplex = -1;
6057 +
6058 + snprintf(bus_id, MII_BUS_ID_SIZE, "comcerto-%d", 0);
6059 + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, priv->einfo->phy_id);
6060 +
6061 + netif_info(priv, drv, dev, "%s: %s\n", __func__, phy_id);
6062 +#if defined(CONFIG_PLATFORM_C2000)
6063 + interface = pfe_get_interface(dev);
6064 +#else
6065 + interface = priv->einfo->mii_config;
6066 + if(interface == PHY_INTERFACE_MODE_SGMII) {
6067 + /*Configure SGMII PCS */
6068 + if(pfe->scfg) {
6069 + /*Config MDIO from serdes */
6070 + regmap_write(pfe->scfg, 0x484, 0x00000000);
6071 + }
6072 + ls1012a_configure_serdes(dev);
6073 + }
6074 +
6075 + if(pfe->scfg) {
6076 + /*Config MDIO from PAD */
6077 + regmap_write(pfe->scfg, 0x484, 0x80000000);
6078 + }
6079 +#endif
6080 +
6081 +
6082 + priv->oldlink = 0;
6083 + priv->oldspeed = 0;
6084 + priv->oldduplex = -1;
6085 +
6086 + printk("%s interface %x \n", __func__, interface);
6087 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
6088 + phydev = phy_connect(dev, phy_id, &pfe_eth_adjust_link, interface);
6089 +#else
6090 + phydev = phy_connect(dev, phy_id, &pfe_eth_adjust_link, 0, interface);
6091 +#endif
6092 +
6093 + if (IS_ERR(phydev)) {
6094 + netdev_err(dev, "phy_connect() failed\n");
6095 + return PTR_ERR(phydev);
6096 + }
6097 +
6098 + priv->phydev = phydev;
6099 + phydev->irq = PHY_POLL;
6100 +
6101 +#if defined(CONFIG_PLATFORM_C2000)
6102 + /* Pause frame support */
6103 + phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
6104 + if (pfe_eth_pause_rx_enabled(priv))
6105 + phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
6106 + else
6107 + phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
6108 +#else
6109 + /*TODO Add pause frame support for LS1012A */
6110 +#endif
6111 +
6112 + return 0;
6113 +}
6114 +
6115 +/** pfe_gemac_init
6116 + */
6117 +static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
6118 +{
6119 + GEMAC_CFG cfg;
6120 +
6121 + netif_info(priv, ifup, priv->dev, "%s\n", __func__);
6122 +
6123 + /* software config */
6124 + /* MII interface mode selection */
6125 + switch (priv->einfo->mii_config) {
6126 + case CONFIG_COMCERTO_USE_GMII:
6127 + cfg.mode = GMII;
6128 + break;
6129 +
6130 + case CONFIG_COMCERTO_USE_MII:
6131 + cfg.mode = MII;
6132 + break;
6133 +
6134 + case CONFIG_COMCERTO_USE_RGMII:
6135 + cfg.mode = RGMII;
6136 + break;
6137 +
6138 + case CONFIG_COMCERTO_USE_RMII:
6139 + cfg.mode = RMII;
6140 + break;
6141 +
6142 + case CONFIG_COMCERTO_USE_SGMII:
6143 + cfg.mode = SGMII;
6144 + break;
6145 +
6146 + default:
6147 + cfg.mode = RGMII;
6148 + }
6149 +
6150 + /* Speed selection */
6151 + switch (priv->einfo->gemac_mode & GEMAC_SW_SPEED_1G ) {
6152 + case GEMAC_SW_SPEED_1G:
6153 + cfg.speed = SPEED_1000M;
6154 + break;
6155 +
6156 + case GEMAC_SW_SPEED_100M:
6157 + cfg.speed = SPEED_100M;
6158 + break;
6159 +
6160 + case GEMAC_SW_SPEED_10M:
6161 + cfg.speed = SPEED_10M;
6162 + break;
6163 +
6164 + default:
6165 + cfg.speed = SPEED_1000M;
6166 + }
6167 +
6168 + /* Duplex selection */
6169 + cfg.duplex = ( priv->einfo->gemac_mode & GEMAC_SW_FULL_DUPLEX ) ? DUPLEX_FULL : DUPLEX_HALF;
6170 +
6171 + gemac_set_config( priv->EMAC_baseaddr, &cfg);
6172 + gemac_allow_broadcast( priv->EMAC_baseaddr );
6173 + gemac_disable_unicast( priv->EMAC_baseaddr );
6174 + gemac_disable_multicast( priv->EMAC_baseaddr );
6175 + gemac_disable_fcs_rx( priv->EMAC_baseaddr );
6176 + gemac_enable_1536_rx( priv->EMAC_baseaddr );
6177 + gemac_enable_rx_jmb( priv->EMAC_baseaddr );
6178 + gemac_enable_stacked_vlan( priv->EMAC_baseaddr );
6179 + gemac_enable_pause_rx( priv->EMAC_baseaddr );
6180 + gemac_set_bus_width(priv->EMAC_baseaddr, 64);
6181 + /*TODO just for testing remove it later */
6182 + gemac_enable_copy_all(priv->EMAC_baseaddr);
6183 +
6184 + /*GEM will perform checksum verifications*/
6185 + if (priv->dev->features & NETIF_F_RXCSUM)
6186 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
6187 + else
6188 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
6189 +
6190 + return 0;
6191 +}
6192 +
6193 +/** pfe_eth_event_handler
6194 + */
6195 +static int pfe_eth_event_handler(void *data, int event, int qno)
6196 +{
6197 + struct pfe_eth_priv_s *priv = data;
6198 +
6199 + switch (event) {
6200 + case EVENT_RX_PKT_IND:
6201 +
6202 + if (qno == 0) {
6203 + if (napi_schedule_prep(&priv->high_napi)) {
6204 + netif_info(priv, intr, priv->dev, "%s: schedule high prio poll\n", __func__);
6205 +
6206 +#ifdef PFE_ETH_NAPI_STATS
6207 + priv->napi_counters[NAPI_SCHED_COUNT]++;
6208 +#endif
6209 +
6210 + __napi_schedule(&priv->high_napi);
6211 + }
6212 + }
6213 + else if (qno == 1) {
6214 + if (napi_schedule_prep(&priv->low_napi)) {
6215 + netif_info(priv, intr, priv->dev, "%s: schedule low prio poll\n", __func__);
6216 +
6217 +#ifdef PFE_ETH_NAPI_STATS
6218 + priv->napi_counters[NAPI_SCHED_COUNT]++;
6219 +#endif
6220 + __napi_schedule(&priv->low_napi);
6221 + }
6222 + }
6223 + else if (qno == 2) {
6224 + if (napi_schedule_prep(&priv->lro_napi)) {
6225 + netif_info(priv, intr, priv->dev, "%s: schedule lro prio poll\n", __func__);
6226 +
6227 +#ifdef PFE_ETH_NAPI_STATS
6228 + priv->napi_counters[NAPI_SCHED_COUNT]++;
6229 +#endif
6230 + __napi_schedule(&priv->lro_napi);
6231 + }
6232 + }
6233 +
6234 + break;
6235 +
6236 + case EVENT_TXDONE_IND:
6237 + case EVENT_HIGH_RX_WM:
6238 + default:
6239 + break;
6240 + }
6241 +
6242 + return 0;
6243 +}
6244 +
6245 +/** pfe_eth_open
6246 + */
6247 +static int pfe_eth_open(struct net_device *dev)
6248 +{
6249 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6250 + struct hif_client_s *client;
6251 + int rc;
6252 +
6253 + netif_info(priv, ifup, dev, "%s\n", __func__);
6254 +
6255 + /* Register client driver with HIF */
6256 + client = &priv->client;
6257 + memset(client, 0, sizeof(*client));
6258 + client->id = PFE_CL_GEM0 + priv->id;
6259 + client->tx_qn = emac_txq_cnt;
6260 + client->rx_qn = EMAC_RXQ_CNT;
6261 + client->priv = priv;
6262 + client->pfe = priv->pfe;
6263 + client->event_handler = pfe_eth_event_handler;
6264 +
6265 + /* FIXME : For now hif lib sets all tx and rx queues to same size */
6266 + client->tx_qsize = EMAC_TXQ_DEPTH;
6267 + client->rx_qsize = EMAC_RXQ_DEPTH;
6268 +
6269 + if ((rc = hif_lib_client_register(client))) {
6270 + netdev_err(dev, "%s: hif_lib_client_register(%d) failed\n", __func__, client->id);
6271 + goto err0;
6272 + }
6273 +
6274 + netif_info(priv, drv, dev, "%s: registered client: %p\n", __func__, client);
6275 +
6276 +#if defined(CONFIG_PLATFORM_C2000)
6277 + /* Enable gemac tx clock */
6278 + clk_enable(priv->gemtx_clk);
6279 +#endif
6280 +
6281 + pfe_gemac_init(priv);
6282 +
6283 + if (!is_valid_ether_addr(dev->dev_addr)) {
6284 + netdev_err(dev, "%s: invalid MAC address\n", __func__);
6285 + rc = -EADDRNOTAVAIL;
6286 + goto err1;
6287 + }
6288 +
6289 + gemac_set_laddrN( priv->EMAC_baseaddr, ( MAC_ADDR *)dev->dev_addr, 1 );
6290 +
6291 + napi_enable(&priv->high_napi);
6292 + napi_enable(&priv->low_napi);
6293 + napi_enable(&priv->lro_napi);
6294 +
6295 + rc = pfe_eth_start(priv);
6296 +
6297 + netif_tx_wake_all_queues(dev);
6298 +
6299 + //pfe_ctrl_set_eth_state(priv->id, 1, dev->dev_addr);
6300 +
6301 + priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000;
6302 + add_timer(&priv->tx_timer);
6303 +
6304 + return rc;
6305 +
6306 +err1:
6307 + hif_lib_client_unregister(&priv->client);
6308 +#if defined(CONFIG_PLATFORM_C2000)
6309 + clk_disable(priv->gemtx_clk);
6310 +#endif
6311 +
6312 +err0:
6313 + return rc;
6314 +}
6315 +/*
6316 + * pfe_eth_shutdown
6317 + */
6318 +int pfe_eth_shutdown( struct net_device *dev, int wake)
6319 +{
6320 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6321 + int i, qstatus;
6322 + unsigned long next_poll = jiffies + 1, end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
6323 + int tx_pkts, prv_tx_pkts;
6324 +
6325 + netif_info(priv, ifdown, dev, "%s\n", __func__);
6326 +
6327 + del_timer_sync(&priv->tx_timer);
6328 +
6329 + for(i = 0; i < emac_txq_cnt; i++)
6330 + hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
6331 +
6332 + netif_tx_stop_all_queues(dev);
6333 +
6334 + do {
6335 + tx_pkts = 0;
6336 + pfe_eth_flush_tx(priv, 1);
6337 +
6338 + for (i = 0; i < emac_txq_cnt; i++)
6339 + tx_pkts += hif_lib_tx_pending(&priv->client, i);
6340 +
6341 + if (tx_pkts) {
6342 + /*Don't wait forever, break if we cross max timeout */
6343 + if (time_after(jiffies, end)) {
6344 + printk(KERN_ERR "(%s)Tx is not complete after %dmsec\n", dev->name, TX_POLL_TIMEOUT_MS);
6345 + break;
6346 + }
6347 +
6348 + printk("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n", __func__, dev->name, tx_pkts);
6349 + if (need_resched())
6350 + schedule();
6351 + }
6352 +
6353 + } while(tx_pkts);
6354 +
6355 + end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
6356 + /*Disable transmit in PFE before disabling GEMAC */
6357 + //pfe_ctrl_set_eth_state(priv->id, 0, NULL);
6358 +
6359 + prv_tx_pkts = tmu_pkts_processed(priv->id);
6360 + /*Wait till TMU transmits all pending packets
6361 + * poll tmu_qstatus and pkts processed by TMU for every 10ms
6362 + * Consider TMU is busy, If we see TMU qeueu pending or any packets processed by TMU
6363 + */
6364 + while(1) {
6365 +
6366 + if (time_after(jiffies, next_poll)) {
6367 +
6368 + tx_pkts = tmu_pkts_processed(priv->id);
6369 + qstatus = tmu_qstatus(priv->id) & 0x7ffff;
6370 +
6371 + if(!qstatus && (tx_pkts == prv_tx_pkts)) {
6372 + break;
6373 + }
6374 + /*Don't wait forever, break if we cross max timeout(TX_POLL_TIMEOUT_MS) */
6375 + if (time_after(jiffies, end)) {
6376 + printk(KERN_ERR "TMU%d is busy after %dmsec\n", priv->id, TX_POLL_TIMEOUT_MS);
6377 + break;
6378 + }
6379 + prv_tx_pkts = tx_pkts;
6380 + next_poll++;
6381 + }
6382 + if (need_resched())
6383 + schedule();
6384 +
6385 +
6386 + }
6387 + /* Wait for some more time to complete transmitting packet if any */
6388 + next_poll = jiffies + 1;
6389 + while(1) {
6390 + if (time_after(jiffies, next_poll))
6391 + break;
6392 + if (need_resched())
6393 + schedule();
6394 + }
6395 +
6396 + pfe_eth_stop(dev, wake);
6397 +
6398 + napi_disable(&priv->lro_napi);
6399 + napi_disable(&priv->low_napi);
6400 + napi_disable(&priv->high_napi);
6401 +
6402 +#if defined(CONFIG_PLATFORM_C2000)
6403 + /* Disable gemac tx clock */
6404 + clk_disable(priv->gemtx_clk);
6405 +#endif
6406 +
6407 + hif_lib_client_unregister(&priv->client);
6408 +
6409 + return 0;
6410 +}
6411 +
6412 +/* pfe_eth_close
6413 + *
6414 + */
6415 +static int pfe_eth_close( struct net_device *dev )
6416 +{
6417 + pfe_eth_shutdown(dev, 0);
6418 +
6419 + return 0;
6420 +}
6421 +
6422 +/* pfe_eth_suspend
6423 + *
6424 + * return value : 1 if netdevice is configured to wakeup system
6425 + * 0 otherwise
6426 + */
6427 +int pfe_eth_suspend(struct net_device *dev)
6428 +{
6429 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6430 + int retval = 0;
6431 +
6432 + if (priv->wol) {
6433 + gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
6434 + retval = 1;
6435 + }
6436 + pfe_eth_shutdown(dev, priv->wol);
6437 +
6438 + return retval;
6439 +}
6440 +
6441 +/** pfe_eth_resume
6442 + *
6443 + */
6444 +int pfe_eth_resume(struct net_device *dev)
6445 +{
6446 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6447 +
6448 + if (priv->wol)
6449 + gemac_set_wol(priv->EMAC_baseaddr, 0);
6450 +
6451 + return pfe_eth_open(dev);
6452 +}
6453 +
6454 +#if defined(CONFIG_PLATFORM_C2000)
6455 +/** pfe_eth_set_device_wakeup
6456 + *
6457 + * Called when a netdevice changes its wol status.
6458 + * Scans state of all interfaces and updae PFE device
6459 + * wakeable state
6460 + */
6461 +static void pfe_eth_set_device_wakeup(struct pfe *pfe)
6462 +{
6463 + int i;
6464 + int wake = 0;
6465 +
6466 + for(i = 0; i < NUM_GEMAC_SUPPORT; i++)
6467 + wake |= pfe->eth.eth_priv[i]->wol;
6468 +
6469 + device_set_wakeup_enable(pfe->dev, wake);
6470 + //TODO Find correct IRQ mapping.
6471 + //TODO interface with PMU
6472 + //int irq_set_irq_wake(unsigned int irq, unsigned int on)
6473 +}
6474 +#endif
6475 +/** pfe_eth_get_queuenum
6476 + *
6477 + */
6478 +static int pfe_eth_get_queuenum( struct pfe_eth_priv_s *priv, struct sk_buff *skb )
6479 +{
6480 + int queuenum = 0;
6481 + unsigned long flags;
6482 +
6483 + /* Get the Fast Path queue number */
6484 + /* Use conntrack mark (if conntrack exists), then packet mark (if any), then fallback to default */
6485 +#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
6486 + if (skb->nfct) {
6487 + enum ip_conntrack_info cinfo;
6488 + struct nf_conn *ct;
6489 + ct = nf_ct_get(skb, &cinfo);
6490 +
6491 + if (ct) {
6492 + u_int32_t connmark;
6493 + connmark = ct->mark;
6494 +
6495 + if ((connmark & 0x80000000) && priv->id != 0)
6496 + connmark >>= 16;
6497 +
6498 + queuenum = connmark & EMAC_QUEUENUM_MASK;
6499 + }
6500 + }
6501 + else /* continued after #endif ... */
6502 +#endif
6503 + if (skb->mark)
6504 + queuenum = skb->mark & EMAC_QUEUENUM_MASK;
6505 + else {
6506 + spin_lock_irqsave(&priv->lock, flags);
6507 + queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
6508 + spin_unlock_irqrestore(&priv->lock, flags);
6509 + }
6510 +
6511 + return queuenum;
6512 +}
6513 +
6514 +
6515 +
6516 +/** pfe_eth_might_stop_tx
6517 + *
6518 + */
6519 +static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum, struct netdev_queue *tx_queue, unsigned int n_desc, unsigned int n_segs)
6520 +{
6521 + int tried = 0;
6522 + ktime_t kt;
6523 +
6524 +try_again:
6525 + if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc)
6526 + || (hif_lib_tx_avail(&priv->client, queuenum) < n_desc)
6527 + || (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
6528 +
6529 + if (!tried) {
6530 + hif_tx_unlock(&pfe->hif);
6531 + pfe_eth_flush_txQ(priv, queuenum, 1, n_desc);
6532 + hif_lib_update_credit(&priv->client, queuenum);
6533 + tried = 1;
6534 + hif_tx_lock(&pfe->hif);
6535 + goto try_again;
6536 + }
6537 +#ifdef PFE_ETH_TX_STATS
6538 + if (__hif_tx_avail(&pfe->hif) < n_desc)
6539 + priv->stop_queue_hif[queuenum]++;
6540 + else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
6541 + priv->stop_queue_hif_client[queuenum]++;
6542 + }
6543 + else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs) {
6544 + priv->stop_queue_credit[queuenum]++;
6545 + }
6546 + priv->stop_queue_total[queuenum]++;
6547 +#endif
6548 + netif_tx_stop_queue(tx_queue);
6549 +
6550 + kt = ktime_set(0, COMCERTO_TX_FAST_RECOVERY_TIMEOUT_MS * NSEC_PER_MSEC);
6551 + hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt, HRTIMER_MODE_REL);
6552 + return -1;
6553 + }
6554 + else {
6555 + return 0;
6556 + }
6557 +}
6558 +
6559 +#define SA_MAX_OP 2
6560 +/** pfe_hif_send_packet
6561 + *
6562 + * At this level if TX fails we drop the packet
6563 + */
6564 +static void pfe_hif_send_packet( struct sk_buff *skb, struct pfe_eth_priv_s *priv, int queuenum)
6565 +{
6566 + struct skb_shared_info *sh = skb_shinfo(skb);
6567 + unsigned int nr_frags;
6568 + u32 ctrl = 0;
6569 +
6570 + netif_info(priv, tx_queued, priv->dev, "%s\n", __func__);
6571 +
6572 + if (skb_is_gso(skb)) {
6573 + priv->stats.tx_dropped++;
6574 + return;
6575 + }
6576 +
6577 + if (skb->ip_summed == CHECKSUM_PARTIAL) {
6578 + if (skb->len > 1522) {
6579 + skb->ip_summed = 0;
6580 + ctrl = 0;
6581 +
6582 + if (pfe_compute_csum(skb)){
6583 + kfree_skb(skb);
6584 + return;
6585 + }
6586 + }
6587 + else
6588 + ctrl = HIF_CTRL_TX_CHECKSUM;
6589 + }
6590 +
6591 + nr_frags = sh->nr_frags;
6592 +
6593 + if (nr_frags) {
6594 + skb_frag_t *f;
6595 + int i;
6596 +
6597 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data, skb_headlen(skb), ctrl, HIF_FIRST_BUFFER, skb);
6598 +
6599 + for (i = 0; i < nr_frags - 1; i++) {
6600 + f = &sh->frags[i];
6601 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, 0x0, skb);
6602 + }
6603 +
6604 + f = &sh->frags[i];
6605 +
6606 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, HIF_LAST_BUFFER|HIF_DATA_VALID, skb);
6607 +
6608 + netif_info(priv, tx_queued, priv->dev, "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n", __func__, skb, nr_frags, skb->len);
6609 + }
6610 + else {
6611 + __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data, skb->len, ctrl, HIF_FIRST_BUFFER | HIF_LAST_BUFFER | HIF_DATA_VALID, skb);
6612 + netif_info(priv, tx_queued, priv->dev, "%s: pkt sent successfully skb:%p len:%d\n", __func__, skb, skb->len);
6613 + }
6614 + hif_tx_dma_start();
6615 + priv->stats.tx_packets++;
6616 + priv->stats.tx_bytes += skb->len;
6617 + hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
6618 +}
6619 +
6620 +/** pfe_eth_flush_txQ
6621 + */
6622 +static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int txQ_num, int from_tx, int n_desc)
6623 +{
6624 + struct sk_buff *skb;
6625 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, txQ_num);
6626 + int count = max(TX_FREE_MAX_COUNT, n_desc);
6627 + unsigned int flags;
6628 +
6629 + netif_info(priv, tx_done, priv->dev, "%s\n", __func__);
6630 +
6631 + if (!from_tx)
6632 + __netif_tx_lock_bh(tx_queue);
6633 +
6634 + /* Clean HIF and client queue */
6635 + while (count && (skb = hif_lib_tx_get_next_complete(&priv->client, txQ_num, &flags, count))) {
6636 +
6637 + /* FIXME : Invalid data can be skipped in hif_lib itself */
6638 + if (flags & HIF_DATA_VALID) {
6639 + dev_kfree_skb_any(skb);
6640 +
6641 + }
6642 + // When called from the timer, flush all descriptors
6643 + if (from_tx)
6644 + count--;
6645 + }
6646 +
6647 + if (!from_tx)
6648 + __netif_tx_unlock_bh(tx_queue);
6649 +}
6650 +
6651 +/** pfe_eth_flush_tx
6652 + */
6653 +static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv, int force)
6654 +{
6655 + int ii;
6656 +
6657 + netif_info(priv, tx_done, priv->dev, "%s\n", __func__);
6658 +
6659 + for (ii = 0; ii < emac_txq_cnt; ii++) {
6660 + if (force || (time_after(jiffies, priv->client.tx_q[ii].jiffies_last_packet + (COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ)/1000))) {
6661 + pfe_eth_flush_txQ(priv, ii, 0, 0); //We will release everything we can based on from_tx param, so the count param can be set to any value
6662 + hif_lib_update_credit(&priv->client, ii);
6663 + }
6664 + }
6665 +}
6666 +
6667 +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int *n_segs)
6668 +{
6669 + struct skb_shared_info *sh = skb_shinfo(skb);
6670 +
6671 + // Scattered data
6672 + if (sh->nr_frags) {
6673 + *n_desc = sh->nr_frags + 1;
6674 + *n_segs = 1;
6675 + }
6676 + // Regular case
6677 + else {
6678 + *n_desc = 1;
6679 + *n_segs = 1;
6680 + }
6681 + return;
6682 +}
6683 +
6684 +/** pfe_eth_send_packet
6685 + */
6686 +static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *dev)
6687 +{
6688 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6689 + int txQ_num = skb_get_queue_mapping(skb);
6690 + int n_desc, n_segs, count;
6691 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, txQ_num);
6692 +
6693 + netif_info(priv, tx_queued, dev, "%s\n", __func__);
6694 +
6695 + if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ + sizeof(unsigned long)))) {
6696 +
6697 + netif_warn(priv, tx_err, priv->dev, "%s: copying skb\n", __func__);
6698 +
6699 + if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned long)), 0, GFP_ATOMIC)) {
6700 + /* No need to re-transmit, no way to recover*/
6701 + kfree_skb(skb);
6702 + priv->stats.tx_dropped++;
6703 + return NETDEV_TX_OK;
6704 + }
6705 + }
6706 +
6707 + pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
6708 +
6709 + hif_tx_lock(&pfe->hif);
6710 + if(unlikely(pfe_eth_might_stop_tx(priv, txQ_num, tx_queue, n_desc, n_segs))) {
6711 +#ifdef PFE_ETH_TX_STATS
6712 + if(priv->was_stopped[txQ_num]) {
6713 + priv->clean_fail[txQ_num]++;
6714 + priv->was_stopped[txQ_num] = 0;
6715 + }
6716 +#endif
6717 + hif_tx_unlock(&pfe->hif);
6718 + return NETDEV_TX_BUSY;
6719 + }
6720 +
6721 + pfe_hif_send_packet(skb, priv, txQ_num);
6722 +
6723 + hif_tx_unlock(&pfe->hif);
6724 +
6725 + dev->trans_start = jiffies;
6726 +
6727 + // Recycle buffers if a socket's send buffer becomes half full or if the HIF client queue starts filling up
6728 + if (((count = (hif_lib_tx_pending(&priv->client, txQ_num) - HIF_CL_TX_FLUSH_MARK)) > 0)
6729 + || (skb->sk && ((sk_wmem_alloc_get(skb->sk) << 1) > skb->sk->sk_sndbuf)))
6730 + pfe_eth_flush_txQ(priv, txQ_num, 1, count);
6731 +
6732 +#ifdef PFE_ETH_TX_STATS
6733 + priv->was_stopped[txQ_num] = 0;
6734 +#endif
6735 +
6736 + return NETDEV_TX_OK;
6737 +}
6738 +
6739 +/** pfe_eth_select_queue
6740 + *
6741 + */
6742 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
6743 +static u16 pfe_eth_select_queue( struct net_device *dev, struct sk_buff *skb,
6744 + void *accel_priv, select_queue_fallback_t fallback)
6745 +#else
6746 +static u16 pfe_eth_select_queue( struct net_device *dev, struct sk_buff *skb )
6747 +#endif
6748 +{
6749 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6750 +
6751 + return pfe_eth_get_queuenum(priv, skb);
6752 +}
6753 +
6754 +
6755 +/** pfe_eth_get_stats
6756 + */
6757 +static struct net_device_stats *pfe_eth_get_stats(struct net_device *dev)
6758 +{
6759 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6760 +
6761 + netif_info(priv, drv, dev, "%s\n", __func__);
6762 +
6763 + return &priv->stats;
6764 +}
6765 +
6766 +
6767 +/** pfe_eth_change_mtu
6768 + */
6769 +static int pfe_eth_change_mtu(struct net_device *dev, int new_mtu)
6770 +{
6771 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6772 + int oldsize = dev->mtu ;
6773 + int frame_size = new_mtu + ETH_HLEN +4;
6774 +
6775 + netif_info(priv, drv, dev, "%s\n", __func__);
6776 +
6777 + if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
6778 + netif_err(priv, drv, dev, "Invalid MTU setting\n");
6779 + return -EINVAL;
6780 + }
6781 +
6782 + if ((new_mtu > 1500) && (dev->features & NETIF_F_TSO))
6783 + {
6784 + priv->usr_features = dev->features;
6785 + if (dev->features & NETIF_F_TSO)
6786 + {
6787 + netdev_err(dev, "MTU cannot be set to more than 1500 while TSO is enabled. disabling TSO.\n");
6788 + dev->features &= ~(NETIF_F_TSO);
6789 + }
6790 + }
6791 + else if ((dev->mtu > 1500) && (new_mtu <= 1500))
6792 + {
6793 + if (priv->usr_features & NETIF_F_TSO)
6794 + {
6795 + priv->usr_features &= ~(NETIF_F_TSO);
6796 + dev->features |= NETIF_F_TSO;
6797 + netdev_err(dev, "MTU is <= 1500, Enabling TSO feature.\n");
6798 + }
6799 + }
6800 +
6801 + /* Only stop and start the controller if it isn't already
6802 + * stopped, and we changed something */
6803 + if ((oldsize != new_mtu) && (dev->flags & IFF_UP)){
6804 + netdev_err(dev, "Can not change MTU - fast_path must be disabled and ifconfig down must be issued first\n");
6805 +
6806 + return -EINVAL;
6807 + }
6808 +
6809 + dev->mtu = new_mtu;
6810 +
6811 + return 0;
6812 +}
6813 +
6814 +/** pfe_eth_set_mac_address
6815 + */
6816 +static int pfe_eth_set_mac_address(struct net_device *dev, void *addr)
6817 +{
6818 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6819 + struct sockaddr *sa = addr;
6820 +
6821 + netif_info(priv, drv, dev, "%s\n", __func__);
6822 +
6823 + if (!is_valid_ether_addr(sa->sa_data))
6824 + return -EADDRNOTAVAIL;
6825 +
6826 + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
6827 +
6828 + gemac_set_laddrN(priv->EMAC_baseaddr, (MAC_ADDR *)dev->dev_addr, 1);
6829 +
6830 + return 0;
6831 +
6832 +}
6833 +
6834 +/** pfe_eth_enet_addr_byte_mac
6835 + */
6836 +int pfe_eth_enet_addr_byte_mac(u8 * enet_byte_addr, MAC_ADDR *enet_addr)
6837 +{
6838 + if ((enet_byte_addr == NULL) || (enet_addr == NULL))
6839 + {
6840 + return -1;
6841 + }
6842 + else
6843 + {
6844 + enet_addr->bottom = enet_byte_addr[0] |
6845 + (enet_byte_addr[1] << 8) |
6846 + (enet_byte_addr[2] << 16) |
6847 + (enet_byte_addr[3] << 24);
6848 + enet_addr->top = enet_byte_addr[4] |
6849 + (enet_byte_addr[5] << 8);
6850 + return 0;
6851 + }
6852 +}
6853 +
6854 +
6855 +
6856 +/** pfe_eth_set_multi
6857 + */
6858 +static void pfe_eth_set_multi(struct net_device *dev)
6859 +{
6860 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6861 + MAC_ADDR hash_addr; /* hash register structure */
6862 + MAC_ADDR spec_addr; /* specific mac address register structure */
6863 + int result; /* index into hash register to set.. */
6864 + int uc_count = 0;
6865 + struct netdev_hw_addr *ha;
6866 +
6867 + if (dev->flags & IFF_PROMISC) {
6868 + netif_info(priv, drv, dev, "entering promiscuous mode\n");
6869 +
6870 + priv->promisc = 1;
6871 + gemac_enable_copy_all(priv->EMAC_baseaddr);
6872 + } else {
6873 + priv->promisc = 0;
6874 + gemac_disable_copy_all(priv->EMAC_baseaddr);
6875 + }
6876 +
6877 + /* Enable broadcast frame reception if required. */
6878 + if (dev->flags & IFF_BROADCAST) {
6879 + gemac_allow_broadcast(priv->EMAC_baseaddr);
6880 + } else {
6881 + netif_info(priv, drv, dev, "disabling broadcast frame reception\n");
6882 +
6883 + gemac_no_broadcast(priv->EMAC_baseaddr);
6884 + }
6885 +
6886 + if (dev->flags & IFF_ALLMULTI) {
6887 + /* Set the hash to rx all multicast frames */
6888 + hash_addr.bottom = 0xFFFFFFFF;
6889 + hash_addr.top = 0xFFFFFFFF;
6890 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
6891 + gemac_enable_multicast(priv->EMAC_baseaddr);
6892 + netdev_for_each_uc_addr(ha, dev) {
6893 + if(uc_count >= MAX_UC_SPEC_ADDR_REG) break;
6894 + pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
6895 + gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr, uc_count + 2);
6896 + uc_count++;
6897 + }
6898 + } else if ((netdev_mc_count(dev) > 0) || (netdev_uc_count(dev))) {
6899 + u8 *addr;
6900 +
6901 + hash_addr.bottom = 0;
6902 + hash_addr.top = 0;
6903 +
6904 + netdev_for_each_mc_addr(ha, dev) {
6905 + addr = ha->addr;
6906 +
6907 + netif_info(priv, drv, dev, "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
6908 + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
6909 +
6910 + result = pfe_eth_get_hash(addr);
6911 +
6912 + if (result >= EMAC_HASH_REG_BITS) {
6913 + break;
6914 + } else {
6915 + if (result < 32) {
6916 + hash_addr.bottom |= (1 << result);
6917 + } else {
6918 + hash_addr.top |= (1 << (result - 32));
6919 + }
6920 + }
6921 +
6922 + }
6923 +
6924 + uc_count = -1;
6925 + netdev_for_each_uc_addr(ha, dev) {
6926 + addr = ha->addr;
6927 +
6928 + if(++uc_count < MAX_UC_SPEC_ADDR_REG)
6929 + {
6930 + netdev_info(dev, "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
6931 + addr[0], addr[1], addr[2],
6932 + addr[3], addr[4], addr[5]);
6933 +
6934 + pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
6935 + gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr, uc_count + 2);
6936 + }
6937 + else
6938 + {
6939 + netif_info(priv, drv, dev, "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
6940 + addr[0], addr[1], addr[2],
6941 + addr[3], addr[4], addr[5]);
6942 +
6943 + result = pfe_eth_get_hash(addr);
6944 + if (result >= EMAC_HASH_REG_BITS) {
6945 + break;
6946 + } else {
6947 + if (result < 32)
6948 + hash_addr.bottom |= (1 << result);
6949 + else
6950 + hash_addr.top |= (1 << (result - 32));
6951 + }
6952 +
6953 +
6954 + }
6955 + }
6956 +
6957 + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
6958 + if(netdev_mc_count(dev))
6959 + gemac_enable_multicast(priv->EMAC_baseaddr);
6960 + else
6961 + gemac_disable_multicast(priv->EMAC_baseaddr);
6962 + }
6963 +
6964 + if(netdev_uc_count(dev) >= MAX_UC_SPEC_ADDR_REG)
6965 + gemac_enable_unicast(priv->EMAC_baseaddr);
6966 + else
6967 + {
6968 + /* Check if there are any specific address HW registers that need
6969 + * to be flushed
6970 + * */
6971 + for(uc_count = netdev_uc_count(dev); uc_count < MAX_UC_SPEC_ADDR_REG; uc_count++)
6972 + gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
6973 +
6974 + gemac_disable_unicast(priv->EMAC_baseaddr);
6975 + }
6976 +
6977 + if (dev->flags & IFF_LOOPBACK) {
6978 + gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
6979 + }
6980 +
6981 + return;
6982 +}
6983 +
6984 +/** pfe_eth_set_features
6985 + */
6986 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
6987 +static int pfe_eth_set_features(struct net_device *dev, netdev_features_t features)
6988 +#else
6989 +static int pfe_eth_set_features(struct net_device *dev, u32 features)
6990 +#endif
6991 +{
6992 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
6993 + int rc = 0;
6994 +
6995 + if (features & NETIF_F_RXCSUM)
6996 + gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
6997 + else
6998 + gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
6999 + return rc;
7000 +}
7001 +
7002 +/** pfe_eth_fix_features
7003 + */
7004 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
7005 +static netdev_features_t pfe_eth_fix_features(struct net_device *dev, netdev_features_t features)
7006 +#else
7007 +static unsigned int pfe_eth_fix_features(struct net_device *dev,u32 features)
7008 +#endif
7009 +{
7010 + struct pfe_eth_priv_s *priv = netdev_priv(dev);
7011 +
7012 + if (dev->mtu > 1500)
7013 + {
7014 + if (features & (NETIF_F_TSO))
7015 + {
7016 + priv->usr_features |= NETIF_F_TSO;
7017 + features &= ~(NETIF_F_TSO);
7018 + netdev_err(dev, "TSO cannot be enabled when the MTU is larger than 1500. Please set the MTU to 1500 or lower first.\n");
7019 + }
7020 + }
7021 +
7022 + return features;
7023 +}
7024 +
7025 +/** pfe_eth_tx_timeout
7026 + */
7027 +void pfe_eth_tx_timeout(unsigned long data )
7028 +{
7029 + struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)data;
7030 +
7031 + netif_info(priv, timer, priv->dev, "%s\n", __func__);
7032 +
7033 + pfe_eth_flush_tx(priv, 0);
7034 +
7035 + priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000;
7036 + add_timer(&priv->tx_timer);
7037 +}
7038 +
7039 +/** pfe_eth_fast_tx_timeout
7040 + */
7041 +static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
7042 +{
7043 + struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct pfe_eth_fast_timer, timer);
7044 + struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base, struct pfe_eth_priv_s, fast_tx_timeout);
7045 + struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, fast_tx_timeout->queuenum);
7046 +
7047 + if(netif_tx_queue_stopped(tx_queue)) {
7048 +#ifdef PFE_ETH_TX_STATS
7049 + priv->was_stopped[fast_tx_timeout->queuenum] = 1;
7050 +#endif
7051 + netif_tx_wake_queue(tx_queue);
7052 + }
7053 +
7054 + return HRTIMER_NORESTART;
7055 +}
7056 +
7057 +/** pfe_eth_fast_tx_timeout_init
7058 + */
7059 +static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
7060 +{
7061 + int i;
7062 + for (i = 0; i < emac_txq_cnt; i++) {
7063 + priv->fast_tx_timeout[i].queuenum = i;
7064 + hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7065 + priv->fast_tx_timeout[i].timer.function = pfe_eth_fast_tx_timeout;
7066 + priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
7067 + }
7068 +}
7069 +
7070 +static struct sk_buff *pfe_eth_rx_skb(struct net_device *dev, struct pfe_eth_priv_s *priv, unsigned int qno)
7071 +{
7072 + void *buf_addr;
7073 + unsigned int rx_ctrl;
7074 + unsigned int desc_ctrl = 0;
7075 + struct hif_ipsec_hdr *ipsec_hdr = NULL;
7076 + struct sk_buff *skb;
7077 + struct sk_buff *skb_frag, *skb_frag_last = NULL;
7078 + int length = 0, offset;
7079 +
7080 + skb = priv->skb_inflight[qno];
7081 +
7082 + if (skb && (skb_frag_last = skb_shinfo(skb)->frag_list)) {
7083 + while (skb_frag_last->next)
7084 + skb_frag_last = skb_frag_last->next;
7085 + }
7086 +
7087 + while (!(desc_ctrl & CL_DESC_LAST)) {
7088 +
7089 + buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length, &offset, &rx_ctrl, &desc_ctrl, (void **)&ipsec_hdr);
7090 + if (!buf_addr)
7091 + goto incomplete;
7092 +
7093 +#ifdef PFE_ETH_NAPI_STATS
7094 + priv->napi_counters[NAPI_DESC_COUNT]++;
7095 +#endif
7096 +
7097 + /* First frag */
7098 + if (desc_ctrl & CL_DESC_FIRST) {
7099 +#if defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_PCI)
7100 + skb = dev_alloc_skb(PFE_BUF_SIZE);
7101 + if (unlikely(!skb)) {
7102 + goto pkt_drop;
7103 + }
7104 +
7105 + skb_copy_to_linear_data(skb, buf_addr, length + offset);
7106 + kfree(buf_addr);
7107 +#else
7108 +#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
7109 + skb = alloc_skb(length + offset + 32, GFP_ATOMIC);
7110 +#else
7111 + skb = alloc_skb_header(PFE_BUF_SIZE, buf_addr, GFP_ATOMIC);
7112 +#endif
7113 + if (unlikely(!skb)) {
7114 + goto pkt_drop;
7115 + }
7116 +#endif
7117 + skb_reserve(skb, offset);
7118 +#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
7119 + __memcpy(skb->data, buf_addr + offset, length);
7120 + if (ipsec_hdr) {
7121 + sah_local = *(unsigned int *)&ipsec_hdr->sa_handle[0];
7122 + }
7123 + kfree(buf_addr);
7124 +#endif
7125 + skb_put(skb, length);
7126 + skb->dev = dev;
7127 +
7128 + if ((dev->features & NETIF_F_RXCSUM) && (rx_ctrl & HIF_CTRL_RX_CHECKSUMMED))
7129 + skb->ip_summed = CHECKSUM_UNNECESSARY;
7130 + else
7131 + skb_checksum_none_assert(skb);
7132 +
7133 + } else {
7134 +
7135 + /* Next frags */
7136 + if (unlikely(!skb)) {
7137 + printk(KERN_ERR "%s: NULL skb_inflight\n", __func__);
7138 + goto pkt_drop;
7139 + }
7140 +
7141 +#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
7142 + skb_frag = alloc_skb(length + offset + 32, GFP_ATOMIC);
7143 +#else
7144 + skb_frag = alloc_skb_header(PFE_BUF_SIZE, buf_addr, GFP_ATOMIC);
7145 +#endif
7146 + if (unlikely(!skb_frag)) {
7147 + kfree(buf_addr);
7148 + goto pkt_drop;
7149 + }
7150 +
7151 + skb_reserve(skb_frag, offset);
7152 +#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
7153 + __memcpy(skb_frag->data, buf_addr + offset, length);
7154 + kfree(buf_addr);
7155 +#endif
7156 + skb_put(skb_frag, length);
7157 +
7158 + skb_frag->dev = dev;
7159 +
7160 + if (skb_shinfo(skb)->frag_list)
7161 + skb_frag_last->next = skb_frag;
7162 + else
7163 + skb_shinfo(skb)->frag_list = skb_frag;
7164 +
7165 + skb->truesize += skb_frag->truesize;
7166 + skb->data_len += length;
7167 + skb->len += length;
7168 + skb_frag_last = skb_frag;
7169 + }
7170 + }
7171 +
7172 + priv->skb_inflight[qno] = NULL;
7173 + return skb;
7174 +
7175 +incomplete:
7176 + priv->skb_inflight[qno] = skb;
7177 + return NULL;
7178 +
7179 +pkt_drop:
7180 + priv->skb_inflight[qno] = NULL;
7181 +
7182 + if (skb) {
7183 + kfree_skb(skb);
7184 + } else {
7185 + kfree(buf_addr);
7186 + }
7187 +
7188 + priv->stats.rx_errors++;
7189 +
7190 + return NULL;
7191 +}
7192 +
7193 +
7194 +/** pfe_eth_poll
7195 + */
7196 +static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi, unsigned int qno, int budget)
7197 +{
7198 + struct net_device *dev = priv->dev;
7199 + struct sk_buff *skb;
7200 + int work_done = 0;
7201 + unsigned int len;
7202 +
7203 + netif_info(priv, intr, priv->dev, "%s\n", __func__);
7204 +
7205 +#ifdef PFE_ETH_NAPI_STATS
7206 + priv->napi_counters[NAPI_POLL_COUNT]++;
7207 +#endif
7208 +
7209 + do {
7210 + skb = pfe_eth_rx_skb(dev, priv, qno);
7211 +
7212 + if (!skb)
7213 + break;
7214 +
7215 + len = skb->len;
7216 +
7217 + /* Packet will be processed */
7218 + skb->protocol = eth_type_trans(skb, dev);
7219 +
7220 + netif_receive_skb(skb);
7221 +
7222 + priv->stats.rx_packets++;
7223 + priv->stats.rx_bytes += len;
7224 +
7225 + dev->last_rx = jiffies;
7226 +
7227 + work_done++;
7228 +
7229 +#ifdef PFE_ETH_NAPI_STATS
7230 + priv->napi_counters[NAPI_PACKET_COUNT]++;
7231 +#endif
7232 +
7233 + } while (work_done < budget);
7234 +
7235 + /* If no Rx receive nor cleanup work was done, exit polling mode.
7236 + * No more netif_running(dev) check is required here , as this is checked in
7237 + * net/core/dev.c ( 2.6.33.5 kernel specific).
7238 + */
7239 + if (work_done < budget) {
7240 + napi_complete(napi);
7241 +
7242 + hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND, qno);
7243 + }
7244 +#ifdef PFE_ETH_NAPI_STATS
7245 + else
7246 + priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
7247 +#endif
7248 +
7249 + return work_done;
7250 +}
7251 +
7252 +/** pfe_eth_lro_poll
7253 + */
7254 +static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
7255 +{
7256 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s, lro_napi);
7257 +
7258 + netif_info(priv, intr, priv->dev, "%s\n", __func__);
7259 +
7260 + return pfe_eth_poll(priv, napi, 2, budget);
7261 +}
7262 +
7263 +
7264 +/** pfe_eth_low_poll
7265 + */
7266 +static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
7267 +{
7268 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s, low_napi);
7269 +
7270 + netif_info(priv, intr, priv->dev, "%s\n", __func__);
7271 +
7272 + return pfe_eth_poll(priv, napi, 1, budget);
7273 +}
7274 +
7275 +/** pfe_eth_high_poll
7276 + */
7277 +static int pfe_eth_high_poll(struct napi_struct *napi, int budget )
7278 +{
7279 + struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s, high_napi);
7280 +
7281 + netif_info(priv, intr, priv->dev, "%s\n", __func__);
7282 +
7283 + return pfe_eth_poll(priv, napi, 0, budget);
7284 +}
7285 +
7286 +static const struct net_device_ops pfe_netdev_ops = {
7287 + .ndo_open = pfe_eth_open,
7288 + .ndo_stop = pfe_eth_close,
7289 + .ndo_start_xmit = pfe_eth_send_packet,
7290 + .ndo_select_queue = pfe_eth_select_queue,
7291 + .ndo_get_stats = pfe_eth_get_stats,
7292 + .ndo_change_mtu = pfe_eth_change_mtu,
7293 + .ndo_set_mac_address = pfe_eth_set_mac_address,
7294 + .ndo_set_rx_mode = pfe_eth_set_multi,
7295 + .ndo_set_features = pfe_eth_set_features,
7296 + .ndo_fix_features = pfe_eth_fix_features,
7297 + .ndo_validate_addr = eth_validate_addr,
7298 +};
7299 +
7300 +
7301 +/** pfe_eth_init_one
7302 + */
7303 +
7304 +static int pfe_eth_init_one( struct pfe *pfe, int id )
7305 +{
7306 + struct net_device *dev = NULL;
7307 + struct pfe_eth_priv_s *priv = NULL;
7308 + struct comcerto_eth_platform_data *einfo;
7309 + struct comcerto_mdio_platform_data *minfo;
7310 + struct comcerto_pfe_platform_data *pfe_info;
7311 + int err;
7312 +
7313 + /* Extract pltform data */
7314 +#if defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_PCI)
7315 + pfe_info = (struct comcerto_pfe_platform_data *) &comcerto_pfe_pdata;
7316 +#else
7317 + pfe_info = (struct comcerto_pfe_platform_data *) pfe->dev->platform_data;
7318 +#endif
7319 + if (!pfe_info) {
7320 + printk(KERN_ERR "%s: pfe missing additional platform data\n", __func__);
7321 + err = -ENODEV;
7322 + goto err0;
7323 + }
7324 +
7325 + einfo = (struct comcerto_eth_platform_data *) pfe_info->comcerto_eth_pdata;
7326 +
7327 + /* einfo never be NULL, but no harm in having this check */
7328 + if (!einfo) {
7329 + printk(KERN_ERR "%s: pfe missing additional gemacs platform data\n", __func__);
7330 + err = -ENODEV;
7331 + goto err0;
7332 + }
7333 +
7334 + minfo = (struct comcerto_mdio_platform_data *) pfe_info->comcerto_mdio_pdata;
7335 +
7336 + /* einfo never be NULL, but no harm in having this check */
7337 + if (!minfo) {
7338 + printk(KERN_ERR "%s: pfe missing additional mdios platform data\n", __func__);
7339 + err = -ENODEV;
7340 + goto err0;
7341 + }
7342 +
7343 + /*
7344 + * FIXME: Need to check some flag in "einfo" to know whether
7345 + * GEMAC is enabled Or not.
7346 + */
7347 +
7348 + /* Create an ethernet device instance */
7349 + dev = alloc_etherdev_mq(sizeof (*priv), emac_txq_cnt);
7350 +
7351 + if (!dev) {
7352 + printk(KERN_ERR "%s: gemac %d device allocation failed\n", __func__, einfo[id].gem_id);
7353 + err = -ENOMEM;
7354 + goto err0;
7355 + }
7356 +
7357 + priv = netdev_priv(dev);
7358 + priv->dev = dev;
7359 + priv->id = einfo[id].gem_id;
7360 + priv->pfe = pfe;
7361 +
7362 +#if defined(CONFIG_PLATFORM_C2000)
7363 + /* get gemac tx clock */
7364 + priv->gemtx_clk = clk_get(NULL, "gemtx");
7365 +
7366 + if (IS_ERR(priv->gemtx_clk)) {
7367 + printk(KERN_ERR "%s: Unable to get the clock for gemac %d\n", __func__, priv->id);
7368 + err = -ENODEV;
7369 + goto err1;
7370 + }
7371 +#endif
7372 +
7373 + pfe->eth.eth_priv[id] = priv;
7374 +
7375 + /* Set the info in the priv to the current info */
7376 + priv->einfo = &einfo[id];
7377 + priv->EMAC_baseaddr = cbus_emac_base[id];
7378 + priv->PHY_baseaddr = cbus_emac_base[0];
7379 + priv->mdio_muxval = einfo[id].mdio_muxval;
7380 + priv->GPI_baseaddr = cbus_gpi_base[id];
7381 +
7382 + /* FIXME : For now TMU queue numbers hardcoded, later should be taken from pfe.h */
7383 +#define HIF_GEMAC_TMUQ_BASE 6
7384 + priv->low_tmuQ = HIF_GEMAC_TMUQ_BASE + (id * 2);
7385 + priv->high_tmuQ = priv->low_tmuQ + 1;
7386 +
7387 + spin_lock_init(&priv->lock);
7388 + priv->tx_timer.data = (unsigned long)priv;
7389 + priv->tx_timer.function = pfe_eth_tx_timeout;
7390 + priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000;
7391 + init_timer(&priv->tx_timer);
7392 +
7393 + pfe_eth_fast_tx_timeout_init(priv);
7394 +
7395 + /* Copy the station address into the dev structure, */
7396 + memcpy(dev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
7397 +
7398 + /* Initialize mdio */
7399 + if (minfo[id].enabled) {
7400 + if ((err = pfe_eth_mdio_init(priv, &minfo[id]))) {
7401 + netdev_err(dev, "%s: pfe_eth_mdio_init() failed\n", __func__);
7402 + goto err2;
7403 + }
7404 + }
7405 +
7406 + dev->mtu = 1500;
7407 +
7408 + /* supported features */
7409 + dev->hw_features = NETIF_F_SG;
7410 + /* Enable after checksum offload is validated
7411 + dev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7412 + NETIF_F_SG; */
7413 +
7414 + /* enabled by default */
7415 + dev->features = dev->hw_features;
7416 +
7417 + priv->usr_features = dev->features;
7418 +
7419 + dev->netdev_ops = &pfe_netdev_ops;
7420 +
7421 + dev->ethtool_ops = &pfe_ethtool_ops;
7422 +
7423 + /* Enable basic messages by default */
7424 + priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK | NETIF_MSG_PROBE;
7425 +
7426 + netif_napi_add(dev, &priv->low_napi, pfe_eth_low_poll, HIF_RX_POLL_WEIGHT - 16);
7427 + netif_napi_add(dev, &priv->high_napi, pfe_eth_high_poll, HIF_RX_POLL_WEIGHT - 16);
7428 + netif_napi_add(dev, &priv->lro_napi, pfe_eth_lro_poll, HIF_RX_POLL_WEIGHT - 16);
7429 +
7430 + err = register_netdev(dev);
7431 +
7432 + if (err) {
7433 + netdev_err(dev, "register_netdev() failed\n");
7434 + goto err3;
7435 + }
7436 +
7437 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
7438 + err = pfe_phy_init(dev);
7439 + if (err) {
7440 + netdev_err(dev, "%s: pfe_phy_init() failed\n", __func__);
7441 + goto err4;
7442 + }
7443 + }
7444 +
7445 +
7446 + /* Create all the sysfs files */
7447 + if(pfe_eth_sysfs_init(dev))
7448 + goto err4;
7449 +
7450 + netif_info(priv, probe, dev, "%s: created interface, baseaddr: %p\n", __func__, priv->EMAC_baseaddr);
7451 +
7452 + return 0;
7453 +err4:
7454 + unregister_netdev(dev);
7455 +err3:
7456 + pfe_eth_mdio_exit(priv->mii_bus);
7457 +err2:
7458 +#if defined(CONFIG_PLATFORM_C2000)
7459 + clk_put(priv->gemtx_clk);
7460 +err1:
7461 +#endif
7462 + free_netdev(priv->dev);
7463 +
7464 +err0:
7465 + return err;
7466 +}
7467 +
7468 +/** pfe_eth_init
7469 + */
7470 +int pfe_eth_init(struct pfe *pfe)
7471 +{
7472 + int ii = 0;
7473 + int err;
7474 +
7475 + printk(KERN_INFO "%s\n", __func__);
7476 +
7477 + cbus_emac_base[0] = EMAC1_BASE_ADDR;
7478 + cbus_emac_base[1] = EMAC2_BASE_ADDR;
7479 +
7480 + cbus_gpi_base[0] = EGPI1_BASE_ADDR;
7481 + cbus_gpi_base[1] = EGPI2_BASE_ADDR;
7482 +
7483 +#if !defined(CONFIG_PLATFORM_LS1012A)
7484 + cbus_emac_base[2] = EMAC3_BASE_ADDR;
7485 + cbus_gpi_base[2] = EGPI3_BASE_ADDR;
7486 +#endif
7487 +
7488 + for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
7489 + if ((err = pfe_eth_init_one(pfe, ii)))
7490 + goto err0;
7491 + }
7492 +
7493 + return 0;
7494 +
7495 +err0:
7496 + while(ii--){
7497 + pfe_eth_exit_one( pfe->eth.eth_priv[ii] );
7498 + }
7499 +
7500 + /* Register three network devices in the kernel */
7501 + return err;
7502 +}
7503 +
7504 +/** pfe_eth_exit_one
7505 + */
7506 +static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
7507 +{
7508 + netif_info(priv, probe, priv->dev, "%s\n", __func__);
7509 +
7510 + pfe_eth_sysfs_exit(priv->dev);
7511 +
7512 +#if defined(CONFIG_PLATFORM_C2000)
7513 + clk_put(priv->gemtx_clk);
7514 +#endif
7515 +
7516 + unregister_netdev(priv->dev);
7517 +
7518 + pfe_eth_mdio_exit(priv->mii_bus);
7519 +
7520 + if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
7521 + pfe_phy_exit(priv->dev);
7522 +
7523 + free_netdev(priv->dev);
7524 +}
7525 +
7526 +/** pfe_eth_exit
7527 + */
7528 +void pfe_eth_exit(struct pfe *pfe)
7529 +{
7530 + int ii;
7531 +
7532 + printk(KERN_INFO "%s\n", __func__);
7533 +
7534 + for(ii = 0; ii < NUM_GEMAC_SUPPORT; ii++ ) {
7535 + /*
7536 + * FIXME: Need to check some flag in "einfo" to know whether
7537 + * GEMAC is enabled Or not.
7538 + */
7539 +
7540 + pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
7541 + }
7542 +}
7543 +
7544 --- /dev/null
7545 +++ b/drivers/staging/fsl_ppfe/pfe_eth.h
7546 @@ -0,0 +1,384 @@
7547 +/*
7548 + *
7549 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
7550 + *
7551 + * This program is free software; you can redistribute it and/or modify
7552 + * it under the terms of the GNU General Public License as published by
7553 + * the Free Software Foundation; either version 2 of the License, or
7554 + * (at your option) any later version.
7555 + *
7556 + * This program is distributed in the hope that it will be useful,
7557 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7558 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7559 + * GNU General Public License for more details.
7560 + *
7561 + * You should have received a copy of the GNU General Public License
7562 + * along with this program; if not, write to the Free Software
7563 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7564 + */
7565 +
7566 +#ifndef _PFE_ETH_H_
7567 +#define _PFE_ETH_H_
7568 +#include <linux/kernel.h>
7569 +#include <linux/netdevice.h>
7570 +#include <linux/etherdevice.h>
7571 +#include <linux/ethtool.h>
7572 +#include <linux/mii.h>
7573 +#include <linux/phy.h>
7574 +#include <linux/clk.h>
7575 +#include <linux/interrupt.h>
7576 +#include <linux/time.h>
7577 +
7578 +#define PFE_ETH_NAPI_STATS
7579 +#define PFE_ETH_TX_STATS
7580 +
7581 +#define PFE_ETH_FRAGS_MAX (65536/HIF_RX_PKT_MIN_SIZE)
7582 +#define LRO_LEN_COUNT_MAX 32
7583 +#define LRO_NB_COUNT_MAX 32
7584 +
7585 +#if defined(CONFIG_PLATFORM_PCI) || defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_LS1012A)
7586 +
7587 +#define CONFIG_COMCERTO_GEMAC 1
7588 +
7589 +#define CONFIG_COMCERTO_USE_MII 1
7590 +#define CONFIG_COMCERTO_USE_RMII 2
7591 +#define CONFIG_COMCERTO_USE_GMII 4
7592 +#define CONFIG_COMCERTO_USE_RGMII 8
7593 +#define CONFIG_COMCERTO_USE_SGMII 16
7594 +
7595 +#define GEMAC_SW_CONF (1 << 8) | (1 << 11) // GEMAC configured by SW
7596 +#define GEMAC_PHY_CONF 0 // GEMAC configured by phy lines (not for MII/GMII)
7597 +#define GEMAC_SW_FULL_DUPLEX (1 << 9)
7598 +#define GEMAC_SW_SPEED_10M (0 << 12)
7599 +#define GEMAC_SW_SPEED_100M (1 << 12)
7600 +#define GEMAC_SW_SPEED_1G (2 << 12)
7601 +
7602 +#define GEMAC_NO_PHY (1 << 0) // set if no phy connected to MAC (ex ethernet switch). In this case use MAC fixed configuration
7603 +#define GEMAC_PHY_RGMII_ADD_DELAY (1 << 1)
7604 +
7605 +/* gemac to interface name assignment */
7606 +#define GEMAC0_ITF_NAME "eth5"
7607 +#define GEMAC1_ITF_NAME "eth6"
7608 +#define GEMAC2_ITF_NAME "eth7"
7609 +
7610 +#define GEMAC0_MAC { 0x00, 0xED, 0xCD, 0xEF, 0xAA, 0xCC }
7611 +#define GEMAC1_MAC { 0x00, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E }
7612 +
7613 +struct comcerto_eth_platform_data {
7614 + /* device specific information */
7615 + u32 device_flags;
7616 + char name[16];
7617 +
7618 +
7619 + /* board specific information */
7620 + u32 mii_config;
7621 + u32 gemac_mode;
7622 + u32 phy_flags;
7623 + u32 gem_id;
7624 + u32 bus_id;
7625 + u32 phy_id;
7626 + u32 mdio_muxval;
7627 + u8 mac_addr[ETH_ALEN];
7628 +};
7629 +
7630 +struct comcerto_mdio_platform_data {
7631 + int enabled;
7632 + int irq[32];
7633 + u32 phy_mask;
7634 + int mdc_div;
7635 +};
7636 +
7637 +struct comcerto_pfe_platform_data
7638 +{
7639 + struct comcerto_eth_platform_data comcerto_eth_pdata[3];
7640 + struct comcerto_mdio_platform_data comcerto_mdio_pdata[3];
7641 +};
7642 +#if !defined(CONFIG_PLATFORM_LS1012A)
7643 +static struct comcerto_pfe_platform_data comcerto_pfe_pdata = {
7644 + .comcerto_eth_pdata[0] = {
7645 + .name = GEMAC0_ITF_NAME,
7646 + .device_flags = CONFIG_COMCERTO_GEMAC,
7647 + .mii_config = CONFIG_COMCERTO_USE_MII,
7648 + .gemac_mode = GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_100M,
7649 +#if defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_PCI)
7650 + .phy_flags = GEMAC_NO_PHY,
7651 +#else
7652 + .phy_flags = GEMAC_PHY_RGMII_ADD_DELAY,
7653 +#endif
7654 + .bus_id = 0,
7655 + .phy_id = 0,
7656 + .gem_id = 0,
7657 + .mac_addr = (u8[])GEMAC0_MAC,
7658 + },
7659 +
7660 + .comcerto_eth_pdata[1] = {
7661 + .name = GEMAC1_ITF_NAME,
7662 + .device_flags = CONFIG_COMCERTO_GEMAC,
7663 + .mii_config = CONFIG_COMCERTO_USE_RGMII,
7664 + .gemac_mode = GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G,
7665 + .phy_flags = GEMAC_NO_PHY,
7666 + .gem_id = 1,
7667 + .mac_addr = (u8[])GEMAC1_MAC,
7668 + },
7669 +
7670 + .comcerto_eth_pdata[2] = {
7671 + .name = GEMAC2_ITF_NAME,
7672 + },
7673 +
7674 + .comcerto_mdio_pdata[0] = {
7675 + .enabled = 1,
7676 + .phy_mask = 0xFFFFFFFE,
7677 + .mdc_div = 96,
7678 + .irq = {
7679 + [0] = PHY_POLL,
7680 + },
7681 + },
7682 +};
7683 +#endif
7684 +#endif
7685 +
7686 +#if defined(CONFIG_PLATFORM_LS1012A)
7687 +#define NUM_GEMAC_SUPPORT 2
7688 +#define DRV_NAME "ls1012a-geth"
7689 +#else
7690 +#define NUM_GEMAC_SUPPORT 3
7691 +#define DRV_NAME "c2000-geth"
7692 +#endif
7693 +#define COMCERTO_INFOSTR_LEN 32
7694 +#define COMCERTO_TX_RECOVERY_TIMEOUT_MS 500
7695 +#define COMCERTO_TX_FAST_RECOVERY_TIMEOUT_MS 3
7696 +#define TX_POLL_TIMEOUT_MS 1000
7697 +
7698 +#define EMAC_TXQ_CNT 16
7699 +#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
7700 +
7701 +#define JUMBO_FRAME_SIZE 10258
7702 +/**
7703 + * Client Tx queue threshold, for txQ flush condition.
7704 + * It must be smaller than the queue size (in case we ever change it in the future).
7705 + */
7706 +#define HIF_CL_TX_FLUSH_MARK 32
7707 +
7708 +/**
7709 + * Max number of TX resources (HIF descriptors or skbs) that will be released
7710 + * in a single go during batch recycling.
7711 + * Should be lower than the flush mark so the SW can provide the HW with a
7712 + * continuous stream of packets instead of bursts.
7713 + */
7714 +#define TX_FREE_MAX_COUNT 16
7715 +#define EMAC_RXQ_CNT 3
7716 +#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT /* make sure clients can receive a full burst of packets */
7717 +#define EMAC_RMON_TXBYTES_POS 0x00
7718 +#define EMAC_RMON_RXBYTES_POS 0x14
7719 +
7720 +#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
7721 +#define EMAC_MDIO_TIMEOUT 1000
7722 +#define MAX_UC_SPEC_ADDR_REG 31
7723 +
7724 +
7725 +/* The set of statistics registers implemented in the Cadence MAC.
7726 + * The statistics registers implemented are a subset of all the statistics
7727 + * available, but contains all the compulsory ones.
7728 + * For full descriptions on the registers, refer to the Cadence MAC programmers
7729 + * guide or the IEEE 802.3 specifications.
7730 + */
7731 +struct gemac_stats{
7732 + u32 octets_tx_bot; /* Lower 32-bits for number of octets tx'd */
7733 + u32 octets_tx_top; /* Upper 16-bits for number of octets tx'd */
7734 + u32 frames_tx; /* Number of frames transmitted OK */
7735 + u32 broadcast_tx; /* Number of broadcast frames transmitted */
7736 + u32 multicast_tx; /* Number of multicast frames transmitted */
7737 + u32 pause_tx; /* Number of pause frames transmitted. */
7738 + u32 frame64_tx; /* Number of 64byte frames transmitted */
7739 + u32 frame65_127_tx; /* Number of 65-127 byte frames transmitted */
7740 + u32 frame128_255_tx; /* Number of 128-255 byte frames transmitted */
7741 + u32 frame256_511_tx; /* Number of 256-511 byte frames transmitted */
7742 + u32 frame512_1023_tx; /* Number of 512-1023 byte frames transmitted */
7743 + u32 frame1024_1518_tx; /* Number of 1024-1518 byte frames transmitted*/
7744 + u32 frame1519_tx; /* Number of frames greater than 1518 bytes tx*/
7745 + u32 tx_urun; /* Transmit underrun errors due to DMA */
7746 + u32 single_col; /* Number of single collision frames */
7747 + u32 multi_col; /* Number of multi collision frames */
7748 + u32 excess_col; /* Number of excessive collision frames. */
7749 + u32 late_col; /* Collisions occuring after slot time */
7750 + u32 def_tx; /* Frames deferred due to crs */
7751 + u32 crs_errors; /* Errors caused by crs not being asserted. */
7752 + u32 octets_rx_bot; /* Lower 32-bits for number of octets rx'd */
7753 + u32 octets_rx_top; /* Upper 16-bits for number of octets rx'd */
7754 + u32 frames_rx; /* Number of frames received OK */
7755 + u32 broadcast_rx; /* Number of broadcast frames received */
7756 + u32 multicast_rx; /* Number of multicast frames received */
7757 + u32 pause_rx; /* Number of pause frames received. */
7758 + u32 frame64_rx; /* Number of 64byte frames received */
7759 + u32 frame65_127_rx; /* Number of 65-127 byte frames received */
7760 + u32 frame128_255_rx; /* Number of 128-255 byte frames received */
7761 + u32 frame256_511_rx; /* Number of 256-511 byte frames received */
7762 + u32 frame512_1023_rx; /* Number of 512-1023 byte frames received */
7763 + u32 frame1024_1518_rx; /* Number of 1024-1518 byte frames received*/
7764 + u32 frame1519_rx; /* Number of frames greater than 1518 bytes rx*/
7765 + u32 usize_frames; /* Frames received less than min of 64 bytes */
7766 + u32 excess_length; /* Number of excessive length frames rx */
7767 + u32 jabbers; /* Excessive length + crc or align errors. */
7768 + u32 fcs_errors; /* Number of frames received with crc errors */
7769 + u32 length_check_errors;/* Number of frames with incorrect length */
7770 + u32 rx_symbol_errors; /* Number of times rx_er asserted during rx */
7771 + u32 align_errors; /* Frames received without integer no. bytes */
7772 + u32 rx_res_errors; /* Number of times buffers ran out during rx */
7773 + u32 rx_orun; /* Receive overrun errors due to DMA */
7774 + u32 ip_cksum; /* IP header checksum errors */
7775 + u32 tcp_cksum; /* TCP checksum errors */
7776 + u32 udp_cksum; /* UDP checksum errors */
7777 +};
7778 +
7779 +#define EMAC_REG_SPACE sizeof(struct gemac_reg)
7780 +#define EMAC_RMON_LEN (sizeof(struct gemac_stats)/sizeof(u32))
7781 +
7782 +
7783 +struct pfe_eth_fast_timer {
7784 + int queuenum;
7785 + struct hrtimer timer;
7786 + void * base;
7787 +};
7788 +
7789 +typedef struct pfe_eth_priv_s
7790 +{
7791 + struct pfe *pfe;
7792 + struct hif_client_s client;
7793 + struct napi_struct lro_napi;
7794 + struct napi_struct low_napi;
7795 + struct napi_struct high_napi;
7796 + int low_tmuQ;
7797 + int high_tmuQ;
7798 + struct net_device_stats stats;
7799 + struct net_device *dev;
7800 + int id;
7801 + int promisc;
7802 + unsigned int msg_enable;
7803 + unsigned int usr_features;
7804 +
7805 + spinlock_t lock;
7806 + unsigned int event_status;
7807 + int irq;
7808 + void* EMAC_baseaddr;
7809 + void* PHY_baseaddr; /* This points to the EMAC base from where we access PHY */
7810 + void* GPI_baseaddr;
7811 + int mdio_muxval;
7812 + /* PHY stuff */
7813 + struct phy_device *phydev;
7814 + int oldspeed;
7815 + int oldduplex;
7816 + int oldlink;
7817 + /* mdio info */
7818 + int mdc_div;
7819 + struct mii_bus *mii_bus;
7820 + struct clk *gemtx_clk;
7821 + int wol;
7822 +
7823 + int default_priority;
7824 + struct timer_list tx_timer;
7825 + struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
7826 +
7827 + struct comcerto_eth_platform_data *einfo;
7828 + struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
7829 +
7830 +#ifdef PFE_ETH_LRO_STATS
7831 + unsigned int lro_len_counters[LRO_LEN_COUNT_MAX];
7832 + unsigned int lro_nb_counters[LRO_NB_COUNT_MAX]; //TODO change to exact max number when RX scatter done
7833 +#endif
7834 +
7835 +
7836 +#ifdef PFE_ETH_TX_STATS
7837 + unsigned int stop_queue_total[EMAC_TXQ_CNT];
7838 + unsigned int stop_queue_hif[EMAC_TXQ_CNT];
7839 + unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
7840 + unsigned int stop_queue_credit[EMAC_TXQ_CNT];
7841 + unsigned int clean_fail[EMAC_TXQ_CNT];
7842 + unsigned int was_stopped[EMAC_TXQ_CNT];
7843 +#endif
7844 +
7845 +#ifdef PFE_ETH_NAPI_STATS
7846 + unsigned int napi_counters[NAPI_MAX_COUNT];
7847 +#endif
7848 + unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
7849 +
7850 +}pfe_eth_priv_t;
7851 +
7852 +struct pfe_eth {
7853 + struct pfe_eth_priv_s *eth_priv[3];
7854 +};
7855 +
7856 +int pfe_eth_init(struct pfe *pfe);
7857 +void pfe_eth_exit(struct pfe *pfe);
7858 +int pfe_eth_suspend(struct net_device *dev);
7859 +int pfe_eth_resume(struct net_device *dev);
7860 +int pfe_eth_mdio_reset(struct mii_bus *bus);
7861 +
7862 +/** pfe_compute_csum
7863 + *
7864 + */
7865 +static int inline pfe_compute_csum(struct sk_buff *skb)
7866 +{
7867 + struct skb_shared_info *sh;
7868 + unsigned int nr_frags;
7869 + skb_frag_t *f;
7870 + u32 csum = 0;
7871 + int i;
7872 + int len;
7873 +
7874 + /* Make sure that no intermediate buffers/fragments are odd byte aligned */
7875 + if (skb_is_nonlinear(skb)) {
7876 + int linearize = 0;
7877 +
7878 + sh = skb_shinfo(skb);
7879 + nr_frags = sh->nr_frags;
7880 + len = skb_headlen(skb) - skb_transport_offset(skb);
7881 +
7882 + if (len & 0x1) {
7883 + linearize = 1;
7884 + //printk("#1 Odd length %d\n", len);
7885 + }
7886 + else {
7887 + for (i = 0; i < nr_frags - 1; i++) {
7888 + f = &sh->frags[i];
7889 + len = skb_frag_size(f);
7890 +
7891 + if (len & 0x1) {
7892 + linearize = 1;
7893 + //printk("#2 %d Odd length %d\n", i, len);
7894 + break;
7895 + }
7896 + }
7897 + }
7898 +
7899 + if (linearize)
7900 + if (skb_linearize(skb))
7901 + return -1;
7902 + }
7903 +
7904 + /* Compute checksum */
7905 + if (!skb_is_nonlinear(skb)) {
7906 + *(u16*)(skb_transport_header(skb) + skb->csum_offset) = csum_fold(csum_partial(skb_transport_header(skb), skb->len - skb_transport_offset(skb), 0));
7907 + }
7908 + else {
7909 + sh = skb_shinfo(skb);
7910 + nr_frags = sh->nr_frags;
7911 +
7912 + if (nr_frags) {
7913 + csum = csum_partial(skb_transport_header(skb), skb_headlen(skb) - skb_transport_offset(skb), 0);
7914 +
7915 + for (i = 0; i < nr_frags - 1; i++) {
7916 + f = &sh->frags[i];
7917 + csum = csum_partial(skb_frag_address(f), skb_frag_size(f), csum);
7918 + }
7919 +
7920 + f = &sh->frags[i];
7921 + *(u16*)(skb_transport_header(skb) + skb->csum_offset) = csum_fold(csum_partial(skb_frag_address(f), skb_frag_size(f), csum));
7922 + }
7923 + }
7924 +
7925 + return 0;
7926 +}
7927 +
7928 +
7929 +
7930 +#endif /* _PFE_ETH_H_ */
7931 --- /dev/null
7932 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
7933 @@ -0,0 +1,322 @@
7934 +/*
7935 + *
7936 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
7937 + *
7938 + * This program is free software; you can redistribute it and/or modify
7939 + * it under the terms of the GNU General Public License as published by
7940 + * the Free Software Foundation; either version 2 of the License, or
7941 + * (at your option) any later version.
7942 + *
7943 + * This program is distributed in the hope that it will be useful,
7944 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
7945 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7946 + * GNU General Public License for more details.
7947 + *
7948 + * You should have received a copy of the GNU General Public License
7949 + * along with this program; if not, write to the Free Software
7950 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7951 + */
7952 +
7953 +/** @file
7954 + * Contains all the functions to handle parsing and loading of PE firmware files.
7955 + */
7956 +#include <linux/firmware.h>
7957 +
7958 +#include "pfe_mod.h"
7959 +#include "pfe_firmware.h"
7960 +#include "pfe/pfe.h"
7961 +
7962 +static Elf32_Shdr * get_elf_section_header(const struct firmware *fw, const char *section)
7963 +{
7964 + Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *)fw->data;
7965 + Elf32_Shdr *shdr, *shdr_shstr;
7966 + Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
7967 + Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
7968 + Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
7969 + Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
7970 + Elf32_Off shstr_offset;
7971 + Elf32_Word sh_name;
7972 + const char *name;
7973 + int i;
7974 +
7975 + /* Section header strings */
7976 + shdr_shstr = (Elf32_Shdr *)(fw->data + e_shoff + e_shstrndx * e_shentsize);
7977 + shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
7978 +
7979 + for (i = 0; i < e_shnum; i++) {
7980 + shdr = (Elf32_Shdr *)(fw->data + e_shoff + i * e_shentsize);
7981 +
7982 + sh_name = be32_to_cpu(shdr->sh_name);
7983 +
7984 + name = (const char *)(fw->data + shstr_offset + sh_name);
7985 +
7986 + if (!strcmp(name, section))
7987 + return shdr;
7988 + }
7989 +
7990 + printk(KERN_ERR "%s: didn't find section %s\n", __func__, section);
7991 +
7992 + return NULL;
7993 +}
7994 +
7995 +static unsigned long get_elf_section(const struct firmware *fw, const char *section)
7996 +{
7997 + Elf32_Shdr *shdr = get_elf_section_header(fw, section);
7998 +
7999 + if (shdr)
8000 + return be32_to_cpu(shdr->sh_addr);
8001 + else
8002 + return -1;
8003 +}
8004 +
8005 +#if defined(CFG_DIAGS)
8006 +static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info *diags_info)
8007 +{
8008 + Elf32_Shdr *shdr;
8009 + unsigned long offset, size;
8010 +
8011 + shdr = get_elf_section_header(fw, ".pfe_diags_str");
8012 + if (shdr)
8013 + {
8014 + offset = be32_to_cpu(shdr->sh_offset);
8015 + size = be32_to_cpu(shdr->sh_size);
8016 + diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
8017 + diags_info->diags_str_size = size;
8018 + diags_info->diags_str_array = pfe_kmalloc(size, GFP_KERNEL);
8019 + memcpy(diags_info->diags_str_array, fw->data+offset, size);
8020 +
8021 + return 0;
8022 + } else
8023 + {
8024 + return -1;
8025 + }
8026 +}
8027 +#endif
8028 +
8029 +static void pfe_check_version_info(const struct firmware *fw)
8030 +{
8031 + static char *version = NULL;
8032 +
8033 + Elf32_Shdr *shdr = get_elf_section_header(fw, ".version");
8034 +
8035 + if (shdr)
8036 + {
8037 + if(!version)
8038 + {
8039 + /* this is the first fw we load, use its version string as reference (whatever it is) */
8040 + version = (char *)(fw->data + be32_to_cpu(shdr->sh_offset));
8041 +
8042 + printk(KERN_INFO "PFE binary version: %s\n", version);
8043 + }
8044 + else
8045 + {
8046 + /* already have loaded at least one firmware, check sequence can start now */
8047 + if(strcmp(version, (char *)(fw->data + be32_to_cpu(shdr->sh_offset))))
8048 + {
8049 + printk(KERN_INFO "WARNING: PFE firmware binaries from incompatible version\n");
8050 + }
8051 + }
8052 + }
8053 + else
8054 + {
8055 + /* version cannot be verified, a potential issue that should be reported */
8056 + printk(KERN_INFO "WARNING: PFE firmware binaries from incompatible version\n");
8057 + }
8058 +}
8059 +
8060 +/** PFE elf firmware loader.
8061 +* Loads an elf firmware image into a list of PE's (specified using a bitmask)
8062 +*
8063 +* @param pe_mask Mask of PE id's to load firmware to
8064 +* @param fw Pointer to the firmware image
8065 +*
8066 +* @return 0 on sucess, a negative value on error
8067 +*
8068 +*/
8069 +int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
8070 +{
8071 + Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *)fw->data;
8072 + Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
8073 + Elf32_Shdr *shdr = (Elf32_Shdr *) (fw->data + be32_to_cpu(elf_hdr->e_shoff));
8074 + int id, section;
8075 + int rc;
8076 +
8077 + printk(KERN_INFO "%s\n", __func__);
8078 +
8079 + /* Some sanity checks */
8080 + if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG))
8081 + {
8082 + printk(KERN_ERR "%s: incorrect elf magic number\n", __func__);
8083 + return -EINVAL;
8084 + }
8085 +
8086 + if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32)
8087 + {
8088 + printk(KERN_ERR "%s: incorrect elf class(%x)\n", __func__, elf_hdr->e_ident[EI_CLASS]);
8089 + return -EINVAL;
8090 + }
8091 +
8092 + if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB)
8093 + {
8094 + printk(KERN_ERR "%s: incorrect elf data(%x)\n", __func__, elf_hdr->e_ident[EI_DATA]);
8095 + return -EINVAL;
8096 + }
8097 +
8098 + if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC)
8099 + {
8100 + printk(KERN_ERR "%s: incorrect elf file type(%x)\n", __func__, be16_to_cpu(elf_hdr->e_type));
8101 + return -EINVAL;
8102 + }
8103 +
8104 + for (section = 0; section < sections; section++, shdr++)
8105 + {
8106 + if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC | SHF_EXECINSTR)))
8107 + continue;
8108 +
8109 + for (id = 0; id < MAX_PE; id++)
8110 + if (pe_mask & (1 << id))
8111 + {
8112 + rc = pe_load_elf_section(id, fw->data, shdr, pfe->dev);
8113 + if (rc < 0)
8114 + goto err;
8115 + }
8116 + }
8117 +
8118 + pfe_check_version_info(fw);
8119 +
8120 + return 0;
8121 +
8122 +err:
8123 + return rc;
8124 +}
8125 +
8126 +
8127 +/** PFE firmware initialization.
8128 +* Loads different firmware files from filesystem.
8129 +* Initializes PE IMEM/DMEM and UTIL-PE DDR
8130 +* Initializes control path symbol addresses (by looking them up in the elf firmware files
8131 +* Takes PE's out of reset
8132 +*
8133 +* @return 0 on sucess, a negative value on error
8134 +*
8135 +*/
8136 +int pfe_firmware_init(struct pfe *pfe)
8137 +{
8138 + const struct firmware *class_fw, *tmu_fw;
8139 + int rc = 0;
8140 +#if !defined(CONFIG_UTIL_DISABLED)
8141 + const char* util_fw_name;
8142 + const struct firmware *util_fw;
8143 +#endif
8144 +
8145 + printk(KERN_INFO "%s\n", __func__);
8146 +
8147 + if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
8148 + printk(KERN_ERR "%s: request firmware %s failed\n", __func__, CLASS_FIRMWARE_FILENAME);
8149 + rc = -ETIMEDOUT;
8150 + goto err0;
8151 + }
8152 +
8153 + if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
8154 + printk(KERN_ERR "%s: request firmware %s failed\n", __func__, TMU_FIRMWARE_FILENAME);
8155 + rc = -ETIMEDOUT;
8156 + goto err1;
8157 + }
8158 +#if !defined(CONFIG_UTIL_DISABLED)
8159 +#if defined(CONFIG_PLATFORM_C2000)
8160 + util_fw_name = (system_rev == 0) ? UTIL_REVA0_FIRMWARE_FILENAME : UTIL_FIRMWARE_FILENAME;
8161 +#else
8162 + util_fw_name = UTIL_FIRMWARE_FILENAME;
8163 +#endif
8164 +
8165 + if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
8166 + printk(KERN_ERR "%s: request firmware %s failed\n", __func__, util_fw_name);
8167 + rc = -ETIMEDOUT;
8168 + goto err2;
8169 + }
8170 +#endif
8171 + rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
8172 + if (rc < 0) {
8173 + printk(KERN_ERR "%s: class firmware load failed\n", __func__);
8174 + goto err3;
8175 + }
8176 +
8177 + pfe->ctrl.class_dmem_sh = get_elf_section(class_fw, ".dmem_sh");
8178 + pfe->ctrl.class_pe_lmem_sh = get_elf_section(class_fw, ".pe_lmem_sh");
8179 +
8180 +#if defined(CFG_DIAGS)
8181 + rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
8182 + if (rc < 0) {
8183 + printk (KERN_WARNING "PFE diags won't be available for class PEs\n");
8184 + rc = 0;
8185 + }
8186 +#endif
8187 +
8188 + printk(KERN_INFO "%s: class firmware loaded %#lx %#lx\n", __func__, pfe->ctrl.class_dmem_sh, pfe->ctrl.class_pe_lmem_sh);
8189 +
8190 + rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
8191 + if (rc < 0) {
8192 + printk(KERN_ERR "%s: tmu firmware load failed\n", __func__);
8193 + goto err3;
8194 + }
8195 +
8196 + pfe->ctrl.tmu_dmem_sh = get_elf_section(tmu_fw, ".dmem_sh");
8197 +
8198 + printk(KERN_INFO "%s: tmu firmware loaded %#lx\n", __func__, pfe->ctrl.tmu_dmem_sh);
8199 +
8200 +#if !defined(CONFIG_UTIL_DISABLED)
8201 + rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
8202 + if (rc < 0) {
8203 + printk(KERN_ERR "%s: util firmware load failed\n", __func__);
8204 + goto err3;
8205 + }
8206 +
8207 + pfe->ctrl.util_dmem_sh = get_elf_section(util_fw, ".dmem_sh");
8208 + pfe->ctrl.util_ddr_sh = get_elf_section(util_fw, ".ddr_sh");
8209 +
8210 +#if defined(CFG_DIAGS)
8211 + rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
8212 + if (rc < 0) {
8213 + printk(KERN_WARNING "PFE diags won't be available for util PE\n");
8214 + rc = 0;
8215 + }
8216 +#endif
8217 +
8218 + printk(KERN_INFO "%s: util firmware loaded %#lx\n", __func__, pfe->ctrl.util_dmem_sh);
8219 +
8220 + util_enable();
8221 +#endif
8222 +
8223 + tmu_enable(0xf);
8224 + class_enable();
8225 +
8226 +err3:
8227 +#if !defined(CONFIG_UTIL_DISABLED)
8228 + release_firmware(util_fw);
8229 +
8230 +err2:
8231 +#endif
8232 + release_firmware(tmu_fw);
8233 +
8234 +err1:
8235 + release_firmware(class_fw);
8236 +
8237 +err0:
8238 + return rc;
8239 +}
8240 +
8241 +/** PFE firmware cleanup
8242 +* Puts PE's in reset
8243 +*
8244 +*
8245 +*/
8246 +void pfe_firmware_exit(struct pfe *pfe)
8247 +{
8248 + printk(KERN_INFO "%s\n", __func__);
8249 +
8250 + class_disable();
8251 + tmu_disable(0xf);
8252 +#if !defined(CONFIG_UTIL_DISABLED)
8253 + util_disable();
8254 +#endif
8255 +}
8256 --- /dev/null
8257 +++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
8258 @@ -0,0 +1,41 @@
8259 +/*
8260 + *
8261 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
8262 + *
8263 + * This program is free software; you can redistribute it and/or modify
8264 + * it under the terms of the GNU General Public License as published by
8265 + * the Free Software Foundation; either version 2 of the License, or
8266 + * (at your option) any later version.
8267 + *
8268 + * This program is distributed in the hope that it will be useful,
8269 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8270 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8271 + * GNU General Public License for more details.
8272 + *
8273 + * You should have received a copy of the GNU General Public License
8274 + * along with this program; if not, write to the Free Software
8275 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
8276 + */
8277 +
8278 +#ifndef _PFE_FIRMWARE_H_
8279 +#define _PFE_FIRMWARE_H_
8280 +
8281 +#if defined(CONFIG_PLATFORM_C2000)
8282 +#define CLASS_FIRMWARE_FILENAME "class_c2000.elf"
8283 +#define TMU_FIRMWARE_FILENAME "tmu_c2000.elf"
8284 +#define UTIL_FIRMWARE_FILENAME "util_c2000.elf"
8285 +#define UTIL_REVA0_FIRMWARE_FILENAME "util_c2000_revA0.elf"
8286 +#else
8287 +#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
8288 +#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
8289 +#endif
8290 +
8291 +#define PFE_FW_CHECK_PASS 0
8292 +#define PFE_FW_CHECK_FAIL 1
8293 +#define NUM_PFE_FW 3
8294 +
8295 +int pfe_firmware_init(struct pfe *pfe);
8296 +void pfe_firmware_exit(struct pfe *pfe);
8297 +
8298 +#endif /* _PFE_FIRMWARE_H_ */
8299 +
8300 --- /dev/null
8301 +++ b/drivers/staging/fsl_ppfe/pfe_hal.c
8302 @@ -0,0 +1,2217 @@
8303 +/*
8304 + *
8305 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
8306 + *
8307 + * This program is free software; you can redistribute it and/or modify
8308 + * it under the terms of the GNU General Public License as published by
8309 + * the Free Software Foundation; either version 2 of the License, or
8310 + * (at your option) any later version.
8311 + *
8312 + * This program is distributed in the hope that it will be useful,
8313 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8314 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8315 + * GNU General Public License for more details.
8316 + *
8317 + * You should have received a copy of the GNU General Public License
8318 + * along with this program; if not, write to the Free Software
8319 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
8320 + */
8321 +
8322 +
8323 +#include "pfe_ctrl_hal.h"
8324 +#include "pfe/pfe.h"
8325 +
8326 +void *cbus_base_addr;
8327 +void *ddr_base_addr;
8328 +unsigned long ddr_phys_base_addr;
8329 +unsigned int ddr_size;
8330 +
8331 +static struct pe_info pe[MAX_PE];
8332 +
8333 +/** Initializes the PFE library.
8334 +* Must be called before using any of the library functions.
8335 +*
8336 +* @param[in] cbus_base CBUS virtual base address (as mapped in the host CPU address space)
8337 +* @param[in] ddr_base PFE DDR range virtual base address (as mapped in the host CPU address space)
8338 +* @param[in] ddr_phys_base PFE DDR range physical base address (as mapped in platform)
8339 +* @param[in] size PFE DDR range size (as defined by the host software)
8340 +*/
8341 +void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base, unsigned int size)
8342 +{
8343 + cbus_base_addr = cbus_base;
8344 + ddr_base_addr = ddr_base;
8345 + ddr_phys_base_addr = ddr_phys_base;
8346 + ddr_size = size;
8347 +
8348 + pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
8349 + pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
8350 + pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
8351 + pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
8352 + pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
8353 + pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
8354 +
8355 + pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
8356 + pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
8357 + pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
8358 + pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
8359 + pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
8360 + pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
8361 +
8362 + pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
8363 + pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
8364 + pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
8365 + pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
8366 + pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
8367 + pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
8368 +
8369 + pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
8370 + pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
8371 + pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
8372 + pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
8373 + pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
8374 + pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
8375 +
8376 +#if !defined(CONFIG_PLATFORM_PCI)
8377 + pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
8378 + pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
8379 + pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
8380 + pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
8381 + pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
8382 + pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
8383 +
8384 + pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
8385 + pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
8386 + pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
8387 + pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
8388 + pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
8389 + pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
8390 +#endif
8391 + pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
8392 + pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
8393 + pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
8394 + pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
8395 + pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
8396 + pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
8397 +
8398 +#if !defined(CONFIG_TMU_DUMMY)
8399 + pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
8400 + pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
8401 + pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
8402 + pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
8403 + pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
8404 + pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
8405 +
8406 +#if !defined(CONFIG_PLATFORM_LS1012A)
8407 + pe[TMU2_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(2);
8408 + pe[TMU2_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(2);
8409 + pe[TMU2_ID].pmem_size = TMU_IMEM_SIZE;
8410 + pe[TMU2_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
8411 + pe[TMU2_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
8412 + pe[TMU2_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
8413 +#endif
8414 +
8415 + pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
8416 + pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
8417 + pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
8418 + pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
8419 + pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
8420 + pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
8421 +#endif
8422 +
8423 +#if !defined(CONFIG_UTIL_DISABLED)
8424 + pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
8425 + pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
8426 + pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
8427 + pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
8428 +#endif
8429 +}
8430 +
8431 +
8432 +/** Writes a buffer to PE internal memory from the host
8433 + * through indirect access registers.
8434 + *
8435 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID)
8436 + * @param[in] src Buffer source address
8437 + * @param[in] mem_access_addr DMEM destination address (must be 32bit aligned)
8438 + * @param[in] len Number of bytes to copy
8439 + */
8440 +void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned int len)
8441 +{
8442 + u32 offset = 0, val, addr;
8443 + unsigned int len32 = len >> 2;
8444 + int i;
8445 +
8446 + addr = mem_access_addr | PE_MEM_ACCESS_WRITE | PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
8447 +
8448 + for (i = 0; i < len32; i++, offset += 4, src += 4) {
8449 + val = *(u32 *)src;
8450 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
8451 + writel(addr + offset, pe[id].mem_access_addr);
8452 + }
8453 +
8454 + if ((len = (len & 0x3))) {
8455 + val = 0;
8456 +
8457 + addr = (mem_access_addr | PE_MEM_ACCESS_WRITE | PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
8458 +
8459 + for (i = 0; i < len; i++, src++)
8460 + val |= (*(u8 *)src) << (8 * i);
8461 +
8462 + writel(cpu_to_be32(val), pe[id].mem_access_wdata);
8463 + writel(addr, pe[id].mem_access_addr);
8464 + }
8465 +}
8466 +
8467 +/** Writes a buffer to PE internal data memory (DMEM) from the host
8468 + * through indirect access registers.
8469 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID)
8470 + * @param[in] src Buffer source address
8471 + * @param[in] dst DMEM destination address (must be 32bit aligned)
8472 + * @param[in] len Number of bytes to copy
8473 + */
8474 +void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
8475 +{
8476 + pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst | PE_MEM_ACCESS_DMEM, src, len);
8477 +}
8478 +
8479 +
8480 +/** Writes a buffer to PE internal program memory (PMEM) from the host
8481 + * through indirect access registers.
8482 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., TMU3_ID)
8483 + * @param[in] src Buffer source address
8484 + * @param[in] dst PMEM destination address (must be 32bit aligned)
8485 + * @param[in] len Number of bytes to copy
8486 + */
8487 +void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
8488 +{
8489 + pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size - 1)) | PE_MEM_ACCESS_IMEM, src, len);
8490 +}
8491 +
8492 +
8493 +/** Reads PE internal program memory (IMEM) from the host
8494 + * through indirect access registers.
8495 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., TMU3_ID)
8496 + * @param[in] addr PMEM read address (must be aligned on size)
8497 + * @param[in] size Number of bytes to read (maximum 4, must not cross 32bit boundaries)
8498 + * @return the data read (in PE endianess, i.e BE).
8499 + */
8500 +u32 pe_pmem_read(int id, u32 addr, u8 size)
8501 +{
8502 + u32 offset = addr & 0x3;
8503 + u32 mask = 0xffffffff >> ((4 - size) << 3);
8504 + u32 val;
8505 +
8506 + addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1)) | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
8507 +
8508 + writel(addr, pe[id].mem_access_addr);
8509 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
8510 +
8511 + return (val >> (offset << 3)) & mask;
8512 +}
8513 +
8514 +
8515 +/** Writes PE internal data memory (DMEM) from the host
8516 + * through indirect access registers.
8517 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID)
8518 + * @param[in] addr DMEM write address (must be aligned on size)
8519 + * @param[in] val Value to write (in PE endianess, i.e BE)
8520 + * @param[in] size Number of bytes to write (maximum 4, must not cross 32bit boundaries)
8521 + */
8522 +void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
8523 +{
8524 + u32 offset = addr & 0x3;
8525 +
8526 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE | PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
8527 +
8528 + /* Indirect access interface is byte swapping data being written */
8529 + writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
8530 + writel(addr, pe[id].mem_access_addr);
8531 +}
8532 +
8533 +
8534 +/** Reads PE internal data memory (DMEM) from the host
8535 + * through indirect access registers.
8536 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID)
8537 + * @param[in] addr DMEM read address (must be aligned on size)
8538 + * @param[in] size Number of bytes to read (maximum 4, must not cross 32bit boundaries)
8539 + * @return the data read (in PE endianess, i.e BE).
8540 + */
8541 +u32 pe_dmem_read(int id, u32 addr, u8 size)
8542 +{
8543 + u32 offset = addr & 0x3;
8544 + u32 mask = 0xffffffff >> ((4 - size) << 3);
8545 + u32 val;
8546 +
8547 + addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
8548 +
8549 + writel(addr, pe[id].mem_access_addr);
8550 +
8551 + /* Indirect access interface is byte swapping data being read */
8552 + val = be32_to_cpu(readl(pe[id].mem_access_rdata));
8553 +
8554 + return (val >> (offset << 3)) & mask;
8555 +}
8556 +
8557 +
8558 +/** This function is used to write to CLASS internal bus peripherals (ccu, pe-lem) from the host
8559 +* through indirect access registers.
8560 +* @param[in] val value to write
8561 +* @param[in] addr Address to write to (must be aligned on size)
8562 +* @param[in] size Number of bytes to write (1, 2 or 4)
8563 +*
8564 +*/
8565 +void class_bus_write(u32 val, u32 addr, u8 size)
8566 +{
8567 + u32 offset = addr & 0x3;
8568 +
8569 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
8570 +
8571 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE | (size << 24);
8572 +
8573 + writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
8574 + writel(addr, CLASS_BUS_ACCESS_ADDR);
8575 +}
8576 +
8577 +
8578 +/** Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
8579 +* through indirect access registers.
8580 +* @param[in] addr Address to read from (must be aligned on size)
8581 +* @param[in] size Number of bytes to read (1, 2 or 4)
8582 +* @return the read data
8583 +*
8584 +*/
8585 +u32 class_bus_read(u32 addr, u8 size)
8586 +{
8587 + u32 offset = addr & 0x3;
8588 + u32 mask = 0xffffffff >> ((4 - size) << 3);
8589 + u32 val;
8590 +
8591 + writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
8592 +
8593 + addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
8594 +
8595 + writel(addr, CLASS_BUS_ACCESS_ADDR);
8596 + val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
8597 +
8598 + return (val >> (offset << 3)) & mask;
8599 +}
8600 +
8601 +
8602 +/** Writes data to the cluster memory (PE_LMEM)
8603 +* @param[in] dst PE LMEM destination address (must be 32bit aligned)
8604 +* @param[in] src Buffer source address
8605 +* @param[in] len Number of bytes to copy
8606 +*/
8607 +void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
8608 +{
8609 + u32 len32 = len >> 2;
8610 + int i;
8611 +
8612 + for (i = 0; i < len32; i++, src += 4, dst += 4)
8613 + class_bus_write(*(u32 *)src, dst, 4);
8614 +
8615 + if (len & 0x2)
8616 + {
8617 + class_bus_write(*(u16 *)src, dst, 2);
8618 + src += 2;
8619 + dst += 2;
8620 + }
8621 +
8622 + if (len & 0x1)
8623 + {
8624 + class_bus_write(*(u8 *)src, dst, 1);
8625 + src++;
8626 + dst++;
8627 + }
8628 +}
8629 +
8630 +/** Writes value to the cluster memory (PE_LMEM)
8631 +* @param[in] dst PE LMEM destination address (must be 32bit aligned)
8632 +* @param[in] val Value to write
8633 +* @param[in] len Number of bytes to write
8634 +*/
8635 +void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
8636 +{
8637 + u32 len32 = len >> 2;
8638 + int i;
8639 +
8640 + val = val | (val << 8) | (val << 16) | (val << 24);
8641 +
8642 + for (i = 0; i < len32; i++, dst += 4)
8643 + class_bus_write(val, dst, 4);
8644 +
8645 + if (len & 0x2)
8646 + {
8647 + class_bus_write(val, dst, 2);
8648 + dst += 2;
8649 + }
8650 +
8651 + if (len & 0x1)
8652 + {
8653 + class_bus_write(val, dst, 1);
8654 + dst++;
8655 + }
8656 +}
8657 +
8658 +#if !defined(CONFIG_UTIL_DISABLED)
8659 +
8660 +/** Writes UTIL program memory (DDR) from the host.
8661 + *
8662 + * @param[in] addr Address to write (virtual, must be aligned on size)
8663 + * @param[in] val Value to write (in PE endianess, i.e BE)
8664 + * @param[in] size Number of bytes to write (2 or 4)
8665 + */
8666 +static void util_pmem_write(u32 val, void *addr, u8 size)
8667 +{
8668 + void *addr64 = (void *)((unsigned long)addr & ~0x7);
8669 + unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
8670 +
8671 + //IMEM should be loaded as a 64bit swapped value in a 64bit aligned location
8672 + if (size == 4)
8673 + writel(be32_to_cpu(val), addr64 + off);
8674 + else
8675 + writew(be16_to_cpu((u16)val), addr64 + off);
8676 +}
8677 +
8678 +
8679 +/** Writes a buffer to UTIL program memory (DDR) from the host.
8680 + *
8681 + * @param[in] dst Address to write (virtual, must be at least 16bit aligned)
8682 + * @param[in] src Buffer to write (in PE endianess, i.e BE, must have same alignment as dst)
8683 + * @param[in] len Number of bytes to write (must be at least 16bit aligned)
8684 + */
8685 +static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
8686 +{
8687 + unsigned int len32;
8688 + int i;
8689 +
8690 + if ((unsigned long)src & 0x2) {
8691 + util_pmem_write(*(u16 *)src, dst, 2);
8692 + src += 2;
8693 + dst += 2;
8694 + len -= 2;
8695 + }
8696 +
8697 + len32 = len >> 2;
8698 +
8699 + for (i = 0; i < len32; i++, dst += 4, src += 4)
8700 + util_pmem_write(*(u32 *)src, dst, 4);
8701 +
8702 + if (len & 0x2)
8703 + util_pmem_write(*(u16 *)src, dst, len & 0x2);
8704 +}
8705 +#endif
8706 +
8707 +/** Loads an elf section into pmem
8708 + * Code needs to be at least 16bit aligned and only PROGBITS sections are supported
8709 + *
8710 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., TMU3_ID)
8711 + * @param[in] data pointer to the elf firmware
8712 + * @param[in] shdr pointer to the elf section header
8713 + *
8714 + */
8715 +static int pe_load_pmem_section(int id, const void *data, Elf32_Shdr *shdr)
8716 +{
8717 + u32 offset = be32_to_cpu(shdr->sh_offset);
8718 + u32 addr = be32_to_cpu(shdr->sh_addr);
8719 + u32 size = be32_to_cpu(shdr->sh_size);
8720 + u32 type = be32_to_cpu(shdr->sh_type);
8721 +
8722 +#if !defined(CONFIG_UTIL_DISABLED)
8723 + if (id == UTIL_ID)
8724 + {
8725 + printk(KERN_ERR "%s: unsuported pmem section for UTIL\n", __func__);
8726 + return -EINVAL;
8727 + }
8728 +#endif
8729 +
8730 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3))
8731 + {
8732 + printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
8733 + __func__, addr, (unsigned long) data + offset);
8734 +
8735 + return -EINVAL;
8736 + }
8737 +
8738 + if (addr & 0x1)
8739 + {
8740 + printk(KERN_ERR "%s: load address(%x) is not 16bit aligned\n", __func__, addr);
8741 + return -EINVAL;
8742 + }
8743 +
8744 + if (size & 0x1)
8745 + {
8746 + printk(KERN_ERR "%s: load size(%x) is not 16bit aligned\n", __func__, size);
8747 + return -EINVAL;
8748 + }
8749 +
8750 + switch (type)
8751 + {
8752 + case SHT_PROGBITS:
8753 + pe_pmem_memcpy_to32(id, addr, data + offset, size);
8754 +
8755 + break;
8756 +
8757 + default:
8758 + printk(KERN_ERR "%s: unsuported section type(%x)\n", __func__, type);
8759 + return -EINVAL;
8760 + break;
8761 + }
8762 +
8763 + return 0;
8764 +}
8765 +
8766 +
8767 +/** Loads an elf section into dmem
8768 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly initialized to 0
8769 + *
8770 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID)
8771 + * @param[in] data pointer to the elf firmware
8772 + * @param[in] shdr pointer to the elf section header
8773 + *
8774 + */
8775 +static int pe_load_dmem_section(int id, const void *data, Elf32_Shdr *shdr)
8776 +{
8777 + u32 offset = be32_to_cpu(shdr->sh_offset);
8778 + u32 addr = be32_to_cpu(shdr->sh_addr);
8779 + u32 size = be32_to_cpu(shdr->sh_size);
8780 + u32 type = be32_to_cpu(shdr->sh_type);
8781 + u32 size32 = size >> 2;
8782 + int i;
8783 +
8784 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3))
8785 + {
8786 + printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
8787 + __func__, addr, (unsigned long)data + offset);
8788 +
8789 + return -EINVAL;
8790 + }
8791 +
8792 + if (addr & 0x3)
8793 + {
8794 + printk(KERN_ERR "%s: load address(%x) is not 32bit aligned\n", __func__, addr);
8795 + return -EINVAL;
8796 + }
8797 +
8798 + switch (type)
8799 + {
8800 + case SHT_PROGBITS:
8801 + pe_dmem_memcpy_to32(id, addr, data + offset, size);
8802 + break;
8803 +
8804 + case SHT_NOBITS:
8805 + for (i = 0; i < size32; i++, addr += 4)
8806 + pe_dmem_write(id, 0, addr, 4);
8807 +
8808 + if (size & 0x3)
8809 + pe_dmem_write(id, 0, addr, size & 0x3);
8810 +
8811 + break;
8812 +
8813 + default:
8814 + printk(KERN_ERR "%s: unsuported section type(%x)\n", __func__, type);
8815 + return -EINVAL;
8816 + break;
8817 + }
8818 +
8819 + return 0;
8820 +}
8821 +
8822 +
8823 +/** Loads an elf section into DDR
8824 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly initialized to 0
8825 + *
8826 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID)
8827 + * @param[in] data pointer to the elf firmware
8828 + * @param[in] shdr pointer to the elf section header
8829 + *
8830 + */
8831 +static int pe_load_ddr_section(int id, const void *data, Elf32_Shdr *shdr, struct device *dev)
8832 +{
8833 + u32 offset = be32_to_cpu(shdr->sh_offset);
8834 + u32 addr = be32_to_cpu(shdr->sh_addr);
8835 + u32 size = be32_to_cpu(shdr->sh_size);
8836 + u32 type = be32_to_cpu(shdr->sh_type);
8837 + u32 flags = be32_to_cpu(shdr->sh_flags);
8838 +
8839 + switch (type)
8840 + {
8841 + case SHT_PROGBITS:
8842 + if (flags & SHF_EXECINSTR)
8843 + {
8844 + if (id <= CLASS_MAX_ID)
8845 + {
8846 + /* DO the loading only once in DDR */
8847 + if (id == CLASS0_ID)
8848 + {
8849 + printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) rcvd\n", __func__, addr, (unsigned long)data + offset);
8850 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3))
8851 + {
8852 + printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
8853 + __func__, addr, (unsigned long)data + offset);
8854 +
8855 + return -EINVAL;
8856 + }
8857 +
8858 + if (addr & 0x1)
8859 + {
8860 + printk(KERN_ERR "%s: load address(%x) is not 16bit aligned\n", __func__, addr);
8861 + return -EINVAL;
8862 + }
8863 +
8864 + if (size & 0x1)
8865 + {
8866 + printk(KERN_ERR "%s: load length(%x) is not 16bit aligned\n", __func__, size);
8867 + return -EINVAL;
8868 + }
8869 + memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data + offset, size);
8870 + }
8871 + }
8872 +#if !defined(CONFIG_UTIL_DISABLED)
8873 + else if (id == UTIL_ID)
8874 + {
8875 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3))
8876 + {
8877 + printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
8878 + __func__, addr, (unsigned long)data + offset);
8879 +
8880 + return -EINVAL;
8881 + }
8882 +
8883 + if (addr & 0x1)
8884 + {
8885 + printk(KERN_ERR "%s: load address(%x) is not 16bit aligned\n", __func__, addr);
8886 + return -EINVAL;
8887 + }
8888 +
8889 + if (size & 0x1)
8890 + {
8891 + printk(KERN_ERR "%s: load length(%x) is not 16bit aligned\n", __func__, size);
8892 + return -EINVAL;
8893 + }
8894 +
8895 + util_pmem_memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data + offset, size);
8896 + }
8897 +#endif
8898 + else
8899 + {
8900 + printk(KERN_ERR "%s: unsuported ddr section type(%x) for PE(%d)\n", __func__, type, id);
8901 + return -EINVAL;
8902 + }
8903 +
8904 + }
8905 + else
8906 + {
8907 + memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data + offset, size);
8908 + }
8909 +
8910 + break;
8911 +
8912 + case SHT_NOBITS:
8913 + memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
8914 +
8915 + break;
8916 +
8917 + default:
8918 + printk(KERN_ERR "%s: unsuported section type(%x)\n", __func__, type);
8919 + return -EINVAL;
8920 + break;
8921 + }
8922 +
8923 + return 0;
8924 +}
8925 +
8926 +
8927 +/** Loads an elf section into pe lmem
8928 + * Data needs to be at least 32bit aligned, NOBITS sections are correctly initialized to 0
8929 + *
8930 + * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
8931 + * @param[in] data pointer to the elf firmware
8932 + * @param[in] shdr pointer to the elf section header
8933 + *
8934 + */
8935 +static int pe_load_pe_lmem_section(int id, const void *data, Elf32_Shdr *shdr)
8936 +{
8937 + u32 offset = be32_to_cpu(shdr->sh_offset);
8938 + u32 addr = be32_to_cpu(shdr->sh_addr);
8939 + u32 size = be32_to_cpu(shdr->sh_size);
8940 + u32 type = be32_to_cpu(shdr->sh_type);
8941 +
8942 + if (id > CLASS_MAX_ID)
8943 + {
8944 + printk(KERN_ERR "%s: unsuported pe-lmem section type(%x) for PE(%d)\n", __func__, type, id);
8945 + return -EINVAL;
8946 + }
8947 +
8948 + if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3))
8949 + {
8950 + printk(KERN_ERR "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
8951 + __func__, addr, (unsigned long)data + offset);
8952 +
8953 + return -EINVAL;
8954 + }
8955 +
8956 + if (addr & 0x3)
8957 + {
8958 + printk(KERN_ERR "%s: load address(%x) is not 32bit aligned\n", __func__, addr);
8959 + return -EINVAL;
8960 + }
8961 +
8962 + switch (type)
8963 + {
8964 + case SHT_PROGBITS:
8965 + class_pe_lmem_memcpy_to32(addr, data + offset, size);
8966 + break;
8967 +
8968 + case SHT_NOBITS:
8969 + class_pe_lmem_memset(addr, 0, size);
8970 + break;
8971 +
8972 + default:
8973 + printk(KERN_ERR "%s: unsuported section type(%x)\n", __func__, type);
8974 + return -EINVAL;
8975 + break;
8976 + }
8977 +
8978 + return 0;
8979 +}
8980 +
8981 +
8982 +/** Loads an elf section into a PE
8983 + * For now only supports loading a section to dmem (all PE's), pmem (class and tmu PE's),
8984 + * DDDR (util PE code)
8985 + *
8986 + * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ..., UTIL_ID)
8987 + * @param[in] data pointer to the elf firmware
8988 + * @param[in] shdr pointer to the elf section header
8989 + *
8990 + */
8991 +int pe_load_elf_section(int id, const void *data, Elf32_Shdr *shdr, struct device *dev)
8992 +{
8993 + u32 addr = be32_to_cpu(shdr->sh_addr);
8994 + u32 size = be32_to_cpu(shdr->sh_size);
8995 +
8996 + if (IS_DMEM(addr, size))
8997 + return pe_load_dmem_section(id, data, shdr);
8998 + else if (IS_PMEM(addr, size))
8999 + return pe_load_pmem_section(id, data, shdr);
9000 + else if (IS_PFE_LMEM(addr, size))
9001 + return 0; /* FIXME */
9002 + else if (IS_PHYS_DDR(addr, size))
9003 + return pe_load_ddr_section(id, data, shdr, dev);
9004 + else if (IS_PE_LMEM(addr, size))
9005 + return pe_load_pe_lmem_section(id, data, shdr);
9006 + else {
9007 + printk(KERN_ERR "%s: unsuported memory range(%x)\n", __func__, addr);
9008 +// return -EINVAL;
9009 + }
9010 +
9011 + return 0;
9012 +}
9013 +
9014 +
9015 +/**************************** BMU ***************************/
9016 +
9017 +/** Initializes a BMU block.
9018 +* @param[in] base BMU block base address
9019 +* @param[in] cfg BMU configuration
9020 +*/
9021 +void bmu_init(void *base, BMU_CFG *cfg)
9022 +{
9023 + bmu_disable(base);
9024 +
9025 + bmu_set_config(base, cfg);
9026 +
9027 + bmu_reset(base);
9028 +}
9029 +
9030 +/** Resets a BMU block.
9031 +* @param[in] base BMU block base address
9032 +*/
9033 +void bmu_reset(void *base)
9034 +{
9035 + writel(CORE_SW_RESET, base + BMU_CTRL);
9036 +
9037 + /* Wait for self clear */
9038 + while (readl(base + BMU_CTRL) & CORE_SW_RESET) ;
9039 +}
9040 +
9041 +/** Enabled a BMU block.
9042 +* @param[in] base BMU block base address
9043 +*/
9044 +void bmu_enable(void *base)
9045 +{
9046 + writel (CORE_ENABLE, base + BMU_CTRL);
9047 +}
9048 +
9049 +/** Disables a BMU block.
9050 +* @param[in] base BMU block base address
9051 +*/
9052 +void bmu_disable(void *base)
9053 +{
9054 + writel (CORE_DISABLE, base + BMU_CTRL);
9055 +}
9056 +
9057 +/** Sets the configuration of a BMU block.
9058 +* @param[in] base BMU block base address
9059 +* @param[in] cfg BMU configuration
9060 +*/
9061 +void bmu_set_config(void *base, BMU_CFG *cfg)
9062 +{
9063 + writel (cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
9064 + writel (cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
9065 + writel (cfg->size & 0xffff, base + BMU_BUF_SIZE);
9066 +// writel (BMU1_THRES_CNT, base + BMU_THRES);
9067 +
9068 + /* Interrupts are never used */
9069 +// writel (0x0, base + BMU_INT_SRC);
9070 + writel (0x0, base + BMU_INT_ENABLE);
9071 +}
9072 +#if defined(CONFIG_PLATFORM_C2000)
9073 +/**************************** GEMAC ***************************/
9074 +
9075 +/** Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
9076 + * TCP or UDP checksums are discarded
9077 + *
9078 + * @param[in] base GEMAC base address.
9079 + */
9080 +void gemac_enable_rx_checksum_offload(void *base)
9081 +{
9082 + writel(readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_CHKSUM_RX, base + EMAC_NETWORK_CONFIG);
9083 + writel(readl(CLASS_L4_CHKSUM_ADDR) | IPV4_CHKSUM_DROP, CLASS_L4_CHKSUM_ADDR);
9084 +}
9085 +
9086 +/** Disable Rx Checksum Engine.
9087 + *
9088 + * @param[in] base GEMAC base address.
9089 + */
9090 +void gemac_disable_rx_checksum_offload(void *base)
9091 +{
9092 + writel(readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_CHKSUM_RX, base + EMAC_NETWORK_CONFIG);
9093 + writel(readl(CLASS_L4_CHKSUM_ADDR) & ~IPV4_CHKSUM_DROP, CLASS_L4_CHKSUM_ADDR);
9094 +}
9095 +
9096 +/** Setup the MII Mgmt clock speed.
9097 + * @param[in] base GEMAC base address (GEMAC0, GEMAC1, GEMAC2)
9098 + * @param[in] mdc_div MII clock dividor
9099 + */
9100 +void gemac_set_mdc_div(void *base, int mdc_div)
9101 +{
9102 + u32 val = readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_MDC_DIV_MASK;
9103 + u32 div;
9104 +
9105 + switch (mdc_div) {
9106 + case 8:
9107 + div = 0;
9108 + break;
9109 +
9110 + case 16:
9111 + div = 1;
9112 + break;
9113 +
9114 + case 32:
9115 + div = 2;
9116 + break;
9117 +
9118 + case 48:
9119 + div = 3;
9120 + break;
9121 +
9122 + default:
9123 + case 64:
9124 + div = 4;
9125 + break;
9126 +
9127 + case 96:
9128 + div = 5;
9129 + break;
9130 +
9131 + case 128:
9132 + div = 6;
9133 + break;
9134 +
9135 + case 224:
9136 + div = 7;
9137 + break;
9138 + }
9139 +
9140 + val |= div << 18;
9141 +
9142 + writel(val, base + EMAC_NETWORK_CONFIG);
9143 +}
9144 +
9145 +/** GEMAC set speed.
9146 +* @param[in] base GEMAC base address
9147 +* @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
9148 +*/
9149 +void gemac_set_speed(void *base, MAC_SPEED gem_speed)
9150 +{
9151 + u32 val = readl(base + EMAC_NETWORK_CONFIG);
9152 +
9153 + val = val & ~EMAC_SPEED_MASK;
9154 +
9155 + switch (gem_speed)
9156 + {
9157 + case SPEED_10M:
9158 + val &= (~EMAC_PCS_ENABLE);
9159 + break;
9160 +
9161 + case SPEED_100M:
9162 + val = val | EMAC_SPEED_100;
9163 + val &= (~EMAC_PCS_ENABLE);
9164 + break;
9165 +
9166 + case SPEED_1000M:
9167 + val = val | EMAC_SPEED_1000;
9168 + val &= (~EMAC_PCS_ENABLE);
9169 + break;
9170 +
9171 + case SPEED_1000M_PCS:
9172 + val = val | EMAC_SPEED_1000;
9173 + val |= EMAC_PCS_ENABLE;
9174 + break;
9175 +
9176 + default:
9177 + val = val | EMAC_SPEED_100;
9178 + val &= (~EMAC_PCS_ENABLE);
9179 + break;
9180 + }
9181 +
9182 + writel (val, base + EMAC_NETWORK_CONFIG);
9183 +}
9184 +
9185 +/** GEMAC set duplex.
9186 +* @param[in] base GEMAC base address
9187 +* @param[in] duplex GEMAC duplex mode (Full, Half)
9188 +*/
9189 +void gemac_set_duplex(void *base, int duplex)
9190 +{
9191 + u32 val = readl(base + EMAC_NETWORK_CONFIG);
9192 +
9193 + if (duplex == DUPLEX_HALF)
9194 + val = (val & ~EMAC_DUPLEX_MASK) | EMAC_HALF_DUP;
9195 + else
9196 + val = (val & ~EMAC_DUPLEX_MASK) | EMAC_FULL_DUP;
9197 +
9198 + writel (val, base + EMAC_NETWORK_CONFIG);
9199 +}
9200 +
9201 +/** GEMAC set mode.
9202 +* @param[in] base GEMAC base address
9203 +* @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
9204 +*/
9205 +
9206 +#if defined(CONFIG_IP_ALIGNED)
9207 +#define IP_ALIGNED_BITVAL EMAC_TWO_BYTES_IP_ALIGN
9208 +#else
9209 +#define IP_ALIGNED_BITVAL 0
9210 +#endif
9211 +
9212 +void gemac_set_mode(void *base, int mode)
9213 +{
9214 + switch (mode)
9215 + {
9216 + case GMII:
9217 + writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_GMII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL);
9218 + writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG);
9219 + break;
9220 +
9221 + case RGMII:
9222 + writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_RGMII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL);
9223 + writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG);
9224 + break;
9225 +
9226 + case RMII:
9227 + writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_RMII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL);
9228 + writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG);
9229 + break;
9230 +
9231 + case MII:
9232 + writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_MII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL);
9233 + writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG);
9234 + break;
9235 +
9236 + case SGMII:
9237 + writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | (EMAC_RMII_MODE_DISABLE | EMAC_RGMII_MODE_DISABLE) | IP_ALIGNED_BITVAL, base + EMAC_CONTROL);
9238 + writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_SGMII_MODE_ENABLE, base + EMAC_NETWORK_CONFIG);
9239 + break;
9240 +
9241 + default:
9242 + writel ((readl(base + EMAC_CONTROL) & ~EMAC_MODE_MASK) | EMAC_MII_MODE_ENABLE | IP_ALIGNED_BITVAL, base + EMAC_CONTROL);
9243 + writel (readl(base + EMAC_NETWORK_CONFIG) & (~EMAC_SGMII_MODE_ENABLE), base + EMAC_NETWORK_CONFIG);
9244 + break;
9245 + }
9246 +}
9247 +/** GEMAC Enable MDIO: Activate the Management interface. This is required to program the PHY
9248 + * @param[in] base GEMAC base address
9249 + */
9250 +void gemac_enable_mdio(void *base)
9251 +{
9252 + u32 data;
9253 +
9254 + data = readl(base + EMAC_NETWORK_CONTROL);
9255 + data |= EMAC_MDIO_EN;
9256 + writel(data, base + EMAC_NETWORK_CONTROL);
9257 +}
9258 +
9259 +/** GEMAC Disable MDIO: Disable the Management interface.
9260 + * @param[in] base GEMAC base address
9261 + */
9262 +void gemac_disable_mdio(void *base)
9263 +{
9264 + u32 data;
9265 +
9266 + data = readl(base + EMAC_NETWORK_CONTROL);
9267 + data &= ~EMAC_MDIO_EN;
9268 + writel(data, base + EMAC_NETWORK_CONTROL);
9269 +}
9270 +
9271 +
9272 +/** GEMAC reset function.
9273 +* @param[in] base GEMAC base address
9274 +*/
9275 +void gemac_reset(void *base)
9276 +{
9277 +}
9278 +
9279 +/** GEMAC enable function.
9280 +* @param[in] base GEMAC base address
9281 +*/
9282 +void gemac_enable(void *base)
9283 +{
9284 + writel (readl(base + EMAC_NETWORK_CONTROL) | EMAC_TX_ENABLE | EMAC_RX_ENABLE, base + EMAC_NETWORK_CONTROL);
9285 +}
9286 +
9287 +/** GEMAC disable function.
9288 +* @param[in] base GEMAC base address
9289 +*/
9290 +void gemac_disable(void *base)
9291 +{
9292 + writel (readl(base + EMAC_NETWORK_CONTROL) & ~(EMAC_TX_ENABLE | EMAC_RX_ENABLE), base + EMAC_NETWORK_CONTROL);
9293 +}
9294 +
9295 +/** GEMAC TX disable function.
9296 +* @param[in] base GEMAC base address
9297 +*/
9298 +void gemac_tx_disable(void *base)
9299 +{
9300 + writel (readl(base + EMAC_NETWORK_CONTROL) & ~(EMAC_TX_ENABLE), base + EMAC_NETWORK_CONTROL);
9301 +}
9302 +
9303 +/** GEMAC set mac address configuration.
9304 +* @param[in] base GEMAC base address
9305 +* @param[in] addr MAC address to be configured
9306 +*/
9307 +void gemac_set_address(void *base, SPEC_ADDR *addr)
9308 +{
9309 + writel(addr->one.bottom, base + EMAC_SPEC1_ADD_BOT);
9310 + writel(addr->one.top, base + EMAC_SPEC1_ADD_TOP);
9311 + writel(addr->two.bottom, base + EMAC_SPEC2_ADD_BOT);
9312 + writel(addr->two.top, base + EMAC_SPEC2_ADD_TOP);
9313 + writel(addr->three.bottom, base + EMAC_SPEC3_ADD_BOT);
9314 + writel(addr->three.top, base + EMAC_SPEC3_ADD_TOP);
9315 + writel(addr->four.bottom, base + EMAC_SPEC4_ADD_BOT);
9316 + writel(addr->four.top, base + EMAC_SPEC4_ADD_TOP);
9317 +}
9318 +
9319 +/** GEMAC get mac address configuration.
9320 +* @param[in] base GEMAC base address
9321 +*
9322 +* @return MAC addresses configured
9323 +*/
9324 +SPEC_ADDR gemac_get_address(void *base)
9325 +{
9326 + SPEC_ADDR addr;
9327 +
9328 + addr.one.bottom = readl(base + EMAC_SPEC1_ADD_BOT);
9329 + addr.one.top = readl(base + EMAC_SPEC1_ADD_TOP);
9330 + addr.two.bottom = readl(base + EMAC_SPEC2_ADD_BOT);
9331 + addr.two.top = readl(base + EMAC_SPEC2_ADD_TOP);
9332 + addr.three.bottom = readl(base + EMAC_SPEC3_ADD_BOT);
9333 + addr.three.top = readl(base + EMAC_SPEC3_ADD_TOP);
9334 + addr.four.bottom = readl(base + EMAC_SPEC4_ADD_BOT);
9335 + addr.four.top = readl(base + EMAC_SPEC4_ADD_TOP);
9336 +
9337 + return addr;
9338 +}
9339 +
9340 +/** Sets the hash register of the MAC.
9341 + * This register is used for matching unicast and multicast frames.
9342 + *
9343 + * @param[in] base GEMAC base address.
9344 + * @param[in] hash 64-bit hash to be configured.
9345 + */
9346 +void gemac_set_hash( void *base, MAC_ADDR *hash )
9347 +{
9348 + writel(hash->bottom, base + EMAC_HASH_BOT);
9349 + writel(hash->top, base + EMAC_HASH_TOP);
9350 +}
9351 +
9352 +/** Get the current value hash register of the MAC.
9353 + * This register is used for matching unicast and multicast frames.
9354 + *
9355 + * @param[in] base GEMAC base address
9356 +
9357 + * @returns 64-bit hash.
9358 + */
9359 +MAC_ADDR gemac_get_hash( void *base )
9360 +{
9361 + MAC_ADDR hash;
9362 +
9363 + hash.bottom = readl(base + EMAC_HASH_BOT);
9364 + hash.top = readl(base + EMAC_HASH_TOP);
9365 +
9366 + return hash;
9367 +}
9368 +
9369 +/** GEMAC set specific local addresses of the MAC.
9370 +* Rather than setting up all four specific addresses, this function sets them up individually.
9371 +*
9372 +* @param[in] base GEMAC base address
9373 +* @param[in] addr MAC address to be configured
9374 +*/
9375 +void gemac_set_laddr1(void *base, MAC_ADDR *address)
9376 +{
9377 + writel(address->bottom, base + EMAC_SPEC1_ADD_BOT);
9378 + writel(address->top, base + EMAC_SPEC1_ADD_TOP);
9379 +}
9380 +
9381 +
9382 +void gemac_set_laddr2(void *base, MAC_ADDR *address)
9383 +{
9384 + writel(address->bottom, base + EMAC_SPEC2_ADD_BOT);
9385 + writel(address->top, base + EMAC_SPEC2_ADD_TOP);
9386 +}
9387 +
9388 +
9389 +void gemac_set_laddr3(void *base, MAC_ADDR *address)
9390 +{
9391 + writel(address->bottom, base + EMAC_SPEC3_ADD_BOT);
9392 + writel(address->top, base + EMAC_SPEC3_ADD_TOP);
9393 +}
9394 +
9395 +
9396 +void gemac_set_laddr4(void *base, MAC_ADDR *address)
9397 +{
9398 + writel(address->bottom, base + EMAC_SPEC4_ADD_BOT);
9399 + writel(address->top, base + EMAC_SPEC4_ADD_TOP);
9400 +}
9401 +
9402 +void gemac_set_laddrN(void *base, MAC_ADDR *address, unsigned int entry_index)
9403 +{
9404 + if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) )
9405 + return;
9406 +
9407 + entry_index = entry_index - 1;
9408 +
9409 + if (entry_index < 4)
9410 + {
9411 + writel(address->bottom, base + (entry_index * 8) + EMAC_SPEC1_ADD_BOT);
9412 + writel(address->top, base + (entry_index * 8) + EMAC_SPEC1_ADD_TOP);
9413 + }
9414 + else
9415 + {
9416 + writel(address->bottom, base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_BOT);
9417 + writel(address->top, base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_TOP);
9418 + }
9419 +}
9420 +
9421 +/** Get specific local addresses of the MAC.
9422 +* This allows returning of a single specific address stored in the MAC.
9423 +* @param[in] base GEMAC base address
9424 +*
9425 +* @return Specific MAC address 1
9426 +*
9427 +*/
9428 +MAC_ADDR gem_get_laddr1(void *base)
9429 +{
9430 + MAC_ADDR addr;
9431 + addr.bottom = readl(base + EMAC_SPEC1_ADD_BOT);
9432 + addr.top = readl(base + EMAC_SPEC1_ADD_TOP);
9433 + return addr;
9434 +}
9435 +
9436 +
9437 +MAC_ADDR gem_get_laddr2(void *base)
9438 +{
9439 + MAC_ADDR addr;
9440 + addr.bottom = readl(base + EMAC_SPEC2_ADD_BOT);
9441 + addr.top = readl(base + EMAC_SPEC2_ADD_TOP);
9442 + return addr;
9443 +}
9444 +
9445 +
9446 +MAC_ADDR gem_get_laddr3(void *base)
9447 +{
9448 + MAC_ADDR addr;
9449 + addr.bottom = readl(base + EMAC_SPEC3_ADD_BOT);
9450 + addr.top = readl(base + EMAC_SPEC3_ADD_TOP);
9451 + return addr;
9452 +}
9453 +
9454 +
9455 +MAC_ADDR gem_get_laddr4(void *base)
9456 +{
9457 + MAC_ADDR addr;
9458 + addr.bottom = readl(base + EMAC_SPEC4_ADD_BOT);
9459 + addr.top = readl(base + EMAC_SPEC4_ADD_TOP);
9460 + return addr;
9461 +}
9462 +
9463 +MAC_ADDR gem_get_laddrN(void *base, unsigned int entry_index)
9464 +{
9465 + MAC_ADDR addr = {0xffffffff, 0xffffffff};
9466 +
9467 + if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) )
9468 + return addr;
9469 +
9470 + entry_index = entry_index - 1;
9471 +
9472 + if (entry_index < 4)
9473 + {
9474 + addr.bottom = readl(base + (entry_index * 8) + EMAC_SPEC1_ADD_BOT);
9475 + addr.top = readl(base + (entry_index * 8) + EMAC_SPEC1_ADD_TOP);
9476 + }
9477 + else
9478 + {
9479 + addr.bottom = readl(base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_BOT);
9480 + addr.top = readl(base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_TOP);
9481 + }
9482 +
9483 + return addr;
9484 +}
9485 +
9486 +/** Clear specific local addresses of the MAC.
9487 + * @param[in] base GEMAC base address
9488 + */
9489 +
9490 +void gemac_clear_laddr1(void *base)
9491 +{
9492 + writel(0, base + EMAC_SPEC1_ADD_BOT);
9493 +}
9494 +
9495 +void gemac_clear_laddr2(void *base)
9496 +{
9497 + writel(0, base + EMAC_SPEC2_ADD_BOT);
9498 +}
9499 +
9500 +void gemac_clear_laddr3(void *base)
9501 +{
9502 + writel(0, base + EMAC_SPEC3_ADD_BOT);
9503 +}
9504 +
9505 +void gemac_clear_laddr4(void *base)
9506 +{
9507 + writel(0, base + EMAC_SPEC4_ADD_BOT);
9508 +}
9509 +
9510 +void gemac_clear_laddrN(void *base, unsigned int entry_index)
9511 +{
9512 + if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) )
9513 + return;
9514 +
9515 + entry_index = entry_index - 1;
9516 +
9517 + if ( entry_index < 4 )
9518 + writel(0, base + (entry_index * 8) + EMAC_SPEC1_ADD_BOT);
9519 + else
9520 + writel(0, base + ((entry_index - 4) * 8) + EMAC_SPEC5_ADD_BOT);
9521 +}
9522 +
9523 +/** Set the loopback mode of the MAC. This can be either no loopback for normal
9524 + * operation, local loopback through MAC internal loopback module or PHY
9525 + * loopback for external loopback through a PHY. This asserts the external loop
9526 + * pin.
9527 + *
9528 + * @param[in] base GEMAC base address.
9529 + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC Loopback,
9530 + * LB_EXT - PHY Loopback.
9531 + */
9532 +void gemac_set_loop( void *base, MAC_LOOP gem_loop )
9533 +{
9534 + switch (gem_loop) {
9535 + case LB_LOCAL:
9536 + writel(readl(base + EMAC_NETWORK_CONTROL) & (~EMAC_LB_PHY),
9537 + base + EMAC_NETWORK_CONTROL);
9538 + writel(readl(base + EMAC_NETWORK_CONTROL) | (EMAC_LB_MAC),
9539 + base + EMAC_NETWORK_CONTROL);
9540 + break;
9541 + case LB_EXT:
9542 + writel(readl(base + EMAC_NETWORK_CONTROL) & (~EMAC_LB_MAC),
9543 + base + EMAC_NETWORK_CONTROL);
9544 + writel(readl(base + EMAC_NETWORK_CONTROL) | (EMAC_LB_PHY),
9545 + base + EMAC_NETWORK_CONTROL);
9546 + break;
9547 + default:
9548 + writel(readl(base + EMAC_NETWORK_CONTROL) & (~(EMAC_LB_MAC | EMAC_LB_PHY)),
9549 + base + EMAC_NETWORK_CONTROL);
9550 + }
9551 +}
9552 +
9553 +/** GEMAC allow frames
9554 + * @param[in] base GEMAC base address
9555 + */
9556 +void gemac_enable_copy_all(void *base)
9557 +{
9558 + writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_COPY_ALL, base + EMAC_NETWORK_CONFIG);
9559 +}
9560 +
9561 +/** GEMAC do not allow frames
9562 + * @param[in] base GEMAC base address
9563 +*/
9564 +void gemac_disable_copy_all(void *base)
9565 +{
9566 + writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_COPY_ALL, base + EMAC_NETWORK_CONFIG);
9567 +}
9568 +
9569 +/** GEMAC allow broadcast function.
9570 +* @param[in] base GEMAC base address
9571 +*/
9572 +void gemac_allow_broadcast(void *base)
9573 +{
9574 + writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_NO_BROADCAST, base + EMAC_NETWORK_CONFIG);
9575 +}
9576 +
9577 +/** GEMAC no broadcast function.
9578 +* @param[in] base GEMAC base address
9579 +*/
9580 +void gemac_no_broadcast(void *base)
9581 +{
9582 + writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_NO_BROADCAST, base + EMAC_NETWORK_CONFIG);
9583 +}
9584 +
9585 +/** GEMAC enable unicast function.
9586 +* @param[in] base GEMAC base address
9587 +*/
9588 +void gemac_enable_unicast(void *base)
9589 +{
9590 + writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_UNICAST, base + EMAC_NETWORK_CONFIG);
9591 +}
9592 +
9593 +/** GEMAC disable unicast function.
9594 +* @param[in] base GEMAC base address
9595 +*/
9596 +void gemac_disable_unicast(void *base)
9597 +{
9598 + writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_UNICAST, base + EMAC_NETWORK_CONFIG);
9599 +}
9600 +
9601 +/** GEMAC enable multicast function.
9602 +* @param[in] base GEMAC base address
9603 +*/
9604 +void gemac_enable_multicast(void *base)
9605 +{
9606 + writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_MULTICAST, base + EMAC_NETWORK_CONFIG);
9607 +}
9608 +
9609 +/** GEMAC disable multicast function.
9610 +* @param[in] base GEMAC base address
9611 +*/
9612 +void gemac_disable_multicast(void *base)
9613 +{
9614 + writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_MULTICAST, base + EMAC_NETWORK_CONFIG);
9615 +}
9616 +
9617 +/** GEMAC enable fcs rx function.
9618 +* @param[in] base GEMAC base address
9619 +*/
9620 +void gemac_enable_fcs_rx(void *base)
9621 +{
9622 + writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_FCS_RX, base + EMAC_NETWORK_CONFIG);
9623 +}
9624 +
9625 +/** GEMAC disable fcs rx function.
9626 +* @param[in] base GEMAC base address
9627 +*/
9628 +void gemac_disable_fcs_rx(void *base)
9629 +{
9630 + writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_FCS_RX, base + EMAC_NETWORK_CONFIG);
9631 +}
9632 +
9633 +/** GEMAC enable 1536 rx function.
9634 +* @param[in] base GEMAC base address
9635 +*/
9636 +void gemac_enable_1536_rx(void *base)
9637 +{
9638 + writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_1536_RX, base + EMAC_NETWORK_CONFIG);
9639 +}
9640 +
9641 +/** GEMAC disable 1536 rx function.
9642 +* @param[in] base GEMAC base address
9643 +*/
9644 +void gemac_disable_1536_rx(void *base)
9645 +{
9646 + writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_1536_RX, base + EMAC_NETWORK_CONFIG);
9647 +}
9648 +
9649 +/** GEMAC enable jumbo function.
9650 +* @param[in] base GEMAC base address
9651 +*/
9652 +void gemac_enable_rx_jmb(void *base)
9653 +{
9654 + writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_JUMBO_FRAME, base + EMAC_NETWORK_CONFIG);
9655 +}
9656 +
9657 +/** GEMAC disable jumbo function.
9658 +* @param[in] base GEMAC base address
9659 +*/
9660 +void gemac_disable_rx_jmb(void *base)
9661 +{
9662 + writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_JUMBO_FRAME, base + EMAC_NETWORK_CONFIG);
9663 +}
9664 +
9665 +/** GEMAC enable stacked vlan function.
9666 +* @param[in] base GEMAC base address
9667 +*/
9668 +void gemac_enable_stacked_vlan(void *base)
9669 +{
9670 + writel (readl(base + EMAC_STACKED_VLAN_REG) | EMAC_ENABLE_STACKED_VLAN, base + EMAC_STACKED_VLAN_REG);
9671 +}
9672 +
9673 +/** GEMAC enable stacked vlan function.
9674 +* @param[in] base GEMAC base address
9675 +*/
9676 +void gemac_disable_stacked_vlan(void *base)
9677 +{
9678 + writel (readl(base + EMAC_STACKED_VLAN_REG) & ~EMAC_ENABLE_STACKED_VLAN, base + EMAC_STACKED_VLAN_REG);
9679 +}
9680 +
9681 +/** GEMAC enable pause rx function.
9682 +* @param[in] base GEMAC base address
9683 +*/
9684 +void gemac_enable_pause_rx(void *base)
9685 +{
9686 + writel (readl(base + EMAC_NETWORK_CONFIG) | EMAC_ENABLE_PAUSE_RX, base + EMAC_NETWORK_CONFIG);
9687 +}
9688 +
9689 +/** GEMAC disable pause rx function.
9690 +* @param[in] base GEMAC base address
9691 +*/
9692 +void gemac_disable_pause_rx(void *base)
9693 +{
9694 + writel (readl(base + EMAC_NETWORK_CONFIG) & ~EMAC_ENABLE_PAUSE_RX, base + EMAC_NETWORK_CONFIG);
9695 +}
9696 +
9697 +/** GEMAC wol configuration
9698 +* @param[in] base GEMAC base address
9699 +* @param[in] wol_conf WoL register configuration
9700 +*/
9701 +void gemac_set_wol(void *base, u32 wol_conf)
9702 +{
9703 + writel(wol_conf, base + EMAC_WOL);
9704 +}
9705 +
9706 +/** Sets Gemac bus width to 64bit
9707 + * @param[in] base GEMAC base address
9708 + * @param[in] width gemac bus width to be set possible values are 32/64/128
9709 + * */
9710 +void gemac_set_bus_width(void *base, int width)
9711 +{
9712 + u32 val = readl(base + EMAC_NETWORK_CONFIG);
9713 + switch(width)
9714 + {
9715 + case 32:
9716 + val = (val & ~EMAC_DATA_BUS_WIDTH_MASK) | EMAC_DATA_BUS_WIDTH_32;
9717 + case 128:
9718 + val = (val & ~EMAC_DATA_BUS_WIDTH_MASK) | EMAC_DATA_BUS_WIDTH_128;
9719 + case 64:
9720 + default:
9721 + val = (val & ~EMAC_DATA_BUS_WIDTH_MASK) | EMAC_DATA_BUS_WIDTH_64;
9722 +
9723 + }
9724 + writel (val, base + EMAC_NETWORK_CONFIG);
9725 +}
9726 +
9727 +/** Sets Gemac configuration.
9728 +* @param[in] base GEMAC base address
9729 +* @param[in] cfg GEMAC configuration
9730 +*/
9731 +void gemac_set_config(void *base, GEMAC_CFG *cfg)
9732 +{
9733 + gemac_set_mode(base, cfg->mode);
9734 +
9735 + gemac_set_speed(base, cfg->speed);
9736 +
9737 + gemac_set_duplex(base,cfg->duplex);
9738 +}
9739 +#elif defined(CONFIG_PLATFORM_LS1012A)
9740 +/**************************** MTIP GEMAC ***************************/
9741 +
9742 +/** Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
9743 + * TCP or UDP checksums are discarded
9744 + *
9745 + * @param[in] base GEMAC base address.
9746 + */
9747 +void gemac_enable_rx_checksum_offload(void *base)
9748 +{
9749 + /*Do not find configuration to do this */
9750 +}
9751 +
9752 +/** Disable Rx Checksum Engine.
9753 + *
9754 + * @param[in] base GEMAC base address.
9755 + */
9756 +void gemac_disable_rx_checksum_offload(void *base)
9757 +{
9758 + /*Do not find configuration to do this */
9759 +}
9760 +
9761 +/** GEMAC set speed.
9762 +* @param[in] base GEMAC base address
9763 +* @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
9764 +*/
9765 +void gemac_set_speed(void *base, MAC_SPEED gem_speed)
9766 +{
9767 + u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
9768 + u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
9769 +
9770 + switch (gem_speed)
9771 + {
9772 + case SPEED_10M:
9773 + rcr |= EMAC_RCNTRL_RMII_10T;
9774 + break;
9775 +
9776 +
9777 + case SPEED_1000M:
9778 + ecr |= EMAC_ECNTRL_SPEED;
9779 + break;
9780 +
9781 + case SPEED_100M:
9782 + default:
9783 + /*It is in 100M mode */
9784 + break;
9785 + }
9786 + writel(ecr, (base + EMAC_ECNTRL_REG));
9787 + writel(rcr, (base + EMAC_RCNTRL_REG));
9788 +}
9789 +
9790 +/** GEMAC set duplex.
9791 +* @param[in] base GEMAC base address
9792 +* @param[in] duplex GEMAC duplex mode (Full, Half)
9793 +*/
9794 +void gemac_set_duplex(void *base, int duplex)
9795 +{
9796 +
9797 + if (duplex == DUPLEX_HALF) {
9798 + printk("%s() TODO\n", __func__);
9799 + writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base + EMAC_TCNTRL_REG);
9800 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base + EMAC_RCNTRL_REG));
9801 + }else{
9802 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base + EMAC_TCNTRL_REG);
9803 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base + EMAC_RCNTRL_REG));
9804 + }
9805 +}
9806 +
9807 +/** GEMAC set mode.
9808 +* @param[in] base GEMAC base address
9809 +* @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
9810 +*/
9811 +void gemac_set_mode(void *base, int mode)
9812 +{
9813 + u32 val = readl(base + EMAC_RCNTRL_REG);
9814 +
9815 + /*Remove loopbank*/
9816 + val &= ~EMAC_RCNTRL_LOOP;
9817 +
9818 + /*Enable flow control and MII mode*/
9819 + val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
9820 +
9821 + writel(val, base + EMAC_RCNTRL_REG);
9822 +}
9823 +
9824 +/** GEMAC enable function.
9825 +* @param[in] base GEMAC base address
9826 +*/
9827 +void gemac_enable(void *base)
9828 +{
9829 + writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base + EMAC_ECNTRL_REG);
9830 +}
9831 +
9832 +/** GEMAC disable function.
9833 +* @param[in] base GEMAC base address
9834 +*/
9835 +void gemac_disable(void *base)
9836 +{
9837 + writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base + EMAC_ECNTRL_REG);
9838 +}
9839 +
9840 +/** GEMAC TX disable function.
9841 +* @param[in] base GEMAC base address
9842 +*/
9843 +void gemac_tx_disable(void *base)
9844 +{
9845 + writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base + EMAC_TCNTRL_REG);
9846 +}
9847 +
9848 +/** Sets the hash register of the MAC.
9849 + * This register is used for matching unicast and multicast frames.
9850 + *
9851 + * @param[in] base GEMAC base address.
9852 + * @param[in] hash 64-bit hash to be configured.
9853 + */
9854 +void gemac_set_hash( void *base, MAC_ADDR *hash )
9855 +{
9856 + writel(hash->bottom, base + EMAC_GALR);
9857 + writel(hash->top, base + EMAC_GAUR);
9858 +}
9859 +
9860 +void gemac_set_laddrN(void *base, MAC_ADDR *address, unsigned int entry_index)
9861 +{
9862 + if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) )
9863 + return;
9864 +
9865 + entry_index = entry_index - 1;
9866 + if (entry_index < 1) {
9867 + writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
9868 + writel((htonl(address->top) | 0x8808), base + EMAC_PHY_ADDR_HIGH);
9869 + }
9870 + else
9871 + {
9872 + /* TODO for other entry_index */
9873 + /*printk("%s for entry_index %d \n",__func__, entry_index); */
9874 + writel(htonl(address->bottom), base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
9875 + writel((htonl(address->top) | 0x8808), base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
9876 + }
9877 +
9878 +}
9879 +
9880 +void gemac_clear_laddrN(void *base, unsigned int entry_index)
9881 +{
9882 + if( (entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX) )
9883 + return;
9884 +
9885 + entry_index = entry_index - 1;
9886 + if (entry_index < 1) {
9887 + writel(0, base + EMAC_PHY_ADDR_LOW);
9888 + writel(0, base + EMAC_PHY_ADDR_HIGH);
9889 + }
9890 + else
9891 + {
9892 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
9893 + writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
9894 + }
9895 +
9896 +
9897 +}
9898 +
9899 +/** Set the loopback mode of the MAC. This can be either no loopback for normal
9900 + * operation, local loopback through MAC internal loopback module or PHY
9901 + * loopback for external loopback through a PHY. This asserts the external loop
9902 + * pin.
9903 + *
9904 + * @param[in] base GEMAC base address.
9905 + * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC Loopback,
9906 + * LB_EXT - PHY Loopback.
9907 + */
9908 +void gemac_set_loop( void *base, MAC_LOOP gem_loop )
9909 +{
9910 + printk("%s()\n", __func__);
9911 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base + EMAC_RCNTRL_REG));
9912 +}
9913 +
9914 +
9915 +/** GEMAC allow frames
9916 + * @param[in] base GEMAC base address
9917 + */
9918 +void gemac_enable_copy_all(void *base)
9919 +{
9920 + writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base + EMAC_RCNTRL_REG));
9921 +}
9922 +
9923 +/** GEMAC do not allow frames
9924 + * @param[in] base GEMAC base address
9925 +*/
9926 +void gemac_disable_copy_all(void *base)
9927 +{
9928 + writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base + EMAC_RCNTRL_REG));
9929 +}
9930 +
9931 +/** GEMAC allow broadcast function.
9932 +* @param[in] base GEMAC base address
9933 +*/
9934 +void gemac_allow_broadcast(void *base)
9935 +{
9936 + writel (readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base + EMAC_RCNTRL_REG);
9937 +}
9938 +
9939 +/** GEMAC no broadcast function.
9940 +* @param[in] base GEMAC base address
9941 +*/
9942 +void gemac_no_broadcast(void *base)
9943 +{
9944 + writel (readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base + EMAC_RCNTRL_REG);
9945 +}
9946 +
9947 +/** GEMAC enable unicast function.
9948 +* @param[in] base GEMAC base address
9949 +*/
9950 +void gemac_enable_unicast(void *base)
9951 +{
9952 + return;
9953 +}
9954 +
9955 +/** GEMAC disable unicast function.
9956 +* @param[in] base GEMAC base address
9957 +*/
9958 +void gemac_disable_unicast(void *base)
9959 +{
9960 + return;
9961 +}
9962 +
9963 +/** GEMAC enable multicast function.
9964 +* @param[in] base GEMAC base address
9965 +*/
9966 +void gemac_enable_multicast(void *base)
9967 +{
9968 + return;
9969 +}
9970 +
9971 +/** GEMAC disable multicast function.
9972 +* @param[in] base GEMAC base address
9973 +*/
9974 +void gemac_disable_multicast(void *base)
9975 +{
9976 + /* TODO how to disable multicast? */
9977 + return;
9978 +}
9979 +
9980 +/** GEMAC enable fcs rx function.
9981 +* @param[in] base GEMAC base address
9982 +*/
9983 +void gemac_enable_fcs_rx(void *base)
9984 +{
9985 + /*Do not find configuration to do this */
9986 +}
9987 +
9988 +/** GEMAC disable fcs rx function.
9989 +* @param[in] base GEMAC base address
9990 +*/
9991 +void gemac_disable_fcs_rx(void *base)
9992 +{
9993 + /*Do not find configuration to do this */
9994 +}
9995 +
9996 +
9997 +/** GEMAC enable 1536 rx function.
9998 +* @param[in] base GEMAC base address
9999 +*/
10000 +void gemac_enable_1536_rx(void *base)
10001 +{
10002 + /* Set 1536 as Maximum frame length */
10003 + writel (readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base + EMAC_RCNTRL_REG);
10004 +}
10005 +
10006 +/** GEMAC enable jumbo function.
10007 +* @param[in] base GEMAC base address
10008 +*/
10009 +void gemac_enable_rx_jmb(void *base)
10010 +{
10011 + /*TODO what is the jumbo size supported by MTIP */
10012 + writel (readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base + EMAC_RCNTRL_REG);
10013 +}
10014 +
10015 +/** GEMAC enable stacked vlan function.
10016 +* @param[in] base GEMAC base address
10017 +*/
10018 +void gemac_enable_stacked_vlan(void *base)
10019 +{
10020 + /* MTIP doesn't support stacked vlan */
10021 + return;
10022 +}
10023 +
10024 +/** GEMAC enable pause rx function.
10025 +* @param[in] base GEMAC base address
10026 +*/
10027 +void gemac_enable_pause_rx(void *base)
10028 +{
10029 + writel (readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE, base + EMAC_RCNTRL_REG);
10030 +}
10031 +
10032 +/** GEMAC disable pause rx function.
10033 +* @param[in] base GEMAC base address
10034 +*/
10035 +void gemac_disable_pause_rx(void *base)
10036 +{
10037 + writel (readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE, base + EMAC_RCNTRL_REG);
10038 +}
10039 +
10040 +/** GEMAC wol configuration
10041 +* @param[in] base GEMAC base address
10042 +* @param[in] wol_conf WoL register configuration
10043 +*/
10044 +void gemac_set_wol(void *base, u32 wol_conf)
10045 +{
10046 + printk("%s() TODO\n", __func__);
10047 +}
10048 +
10049 +/** Sets Gemac bus width to 64bit
10050 + * @param[in] base GEMAC base address
10051 + * @param[in] width gemac bus width to be set possible values are 32/64/128
10052 + * */
10053 +void gemac_set_bus_width(void *base, int width)
10054 +{
10055 +}
10056 +
10057 +/** Sets Gemac configuration.
10058 +* @param[in] base GEMAC base address
10059 +* @param[in] cfg GEMAC configuration
10060 +*/
10061 +void gemac_set_config(void *base, GEMAC_CFG *cfg)
10062 +{
10063 +
10064 + /*GEMAC config taken from VLSI */
10065 + writel(0x00000004, base + EMAC_TFWR_STR_FWD);
10066 + writel(0x00000005, base + EMAC_RX_SECTIOM_FULL);
10067 + writel(0x00003fff, base + EMAC_TRUNC_FL);
10068 + writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
10069 + writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
10070 +
10071 + gemac_set_mode(base, cfg->mode);
10072 +
10073 + gemac_set_speed(base, cfg->speed);
10074 +
10075 + gemac_set_duplex(base,cfg->duplex);
10076 +}
10077 +
10078 +
10079 +#endif //CONFIG_PLATFORM_LS1012A)
10080 +
10081 +
10082 +
10083 +/**************************** GPI ***************************/
10084 +
10085 +/** Initializes a GPI block.
10086 +* @param[in] base GPI base address
10087 +* @param[in] cfg GPI configuration
10088 +*/
10089 +void gpi_init(void *base, GPI_CFG *cfg)
10090 +{
10091 + gpi_reset(base);
10092 +
10093 + gpi_disable(base);
10094 +
10095 + gpi_set_config(base, cfg);
10096 +}
10097 +
10098 +/** Resets a GPI block.
10099 +* @param[in] base GPI base address
10100 +*/
10101 +void gpi_reset(void *base)
10102 +{
10103 + writel (CORE_SW_RESET, base + GPI_CTRL);
10104 +}
10105 +
10106 +/** Enables a GPI block.
10107 +* @param[in] base GPI base address
10108 +*/
10109 +void gpi_enable(void *base)
10110 +{
10111 + writel (CORE_ENABLE, base + GPI_CTRL);
10112 +}
10113 +
10114 +/** Disables a GPI block.
10115 +* @param[in] base GPI base address
10116 +*/
10117 +void gpi_disable(void *base)
10118 +{
10119 + writel (CORE_DISABLE, base + GPI_CTRL);
10120 +}
10121 +
10122 +
10123 +/** Sets the configuration of a GPI block.
10124 +* @param[in] base GPI base address
10125 +* @param[in] cfg GPI configuration
10126 +*/
10127 +void gpi_set_config(void *base, GPI_CFG *cfg)
10128 +{
10129 + writel (CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base + GPI_LMEM_ALLOC_ADDR);
10130 + writel (CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base + GPI_LMEM_FREE_ADDR);
10131 + writel (CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base + GPI_DDR_ALLOC_ADDR);
10132 + writel (CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base + GPI_DDR_FREE_ADDR);
10133 + writel (CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
10134 + writel (DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
10135 + writel (LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
10136 + writel (0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
10137 + writel (0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
10138 + writel ((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
10139 + writel ((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
10140 +
10141 + writel (((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) | GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
10142 + writel (cfg->tmlf_txthres, base + GPI_TMLF_TX);
10143 + writel (cfg->aseq_len, base + GPI_DTX_ASEQ);
10144 + writel (1, base + GPI_TOE_CHKSUM_EN);
10145 +}
10146 +
10147 +/**************************** CLASSIFIER ***************************/
10148 +
10149 +/** Initializes CLASSIFIER block.
10150 +* @param[in] cfg CLASSIFIER configuration
10151 +*/
10152 +void class_init(CLASS_CFG *cfg)
10153 +{
10154 + class_reset();
10155 +
10156 + class_disable();
10157 +
10158 + class_set_config(cfg);
10159 +}
10160 +
10161 +/** Resets CLASSIFIER block.
10162 +*
10163 +*/
10164 +void class_reset(void)
10165 +{
10166 + writel(CORE_SW_RESET, CLASS_TX_CTRL);
10167 +}
10168 +
10169 +/** Enables all CLASS-PE's cores.
10170 +*
10171 +*/
10172 +void class_enable(void)
10173 +{
10174 + writel(CORE_ENABLE, CLASS_TX_CTRL);
10175 +}
10176 +
10177 +/** Disables all CLASS-PE's cores.
10178 +*
10179 +*/
10180 +void class_disable(void)
10181 +{
10182 + writel(CORE_DISABLE, CLASS_TX_CTRL);
10183 +}
10184 +
10185 +/** Sets the configuration of the CLASSIFIER block.
10186 +* @param[in] cfg CLASSIFIER configuration
10187 +*/
10188 +void class_set_config(CLASS_CFG *cfg)
10189 +{
10190 + u32 val;
10191 +
10192 + /* Initialize route table */
10193 + if (!cfg->resume)
10194 + memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 << cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
10195 +
10196 +#if !defined(LS1012A_PFE_RESET_WA)
10197 + writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
10198 +#endif
10199 +
10200 + writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
10201 + writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
10202 + writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) | CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits), CLASS_ROUTE_HASH_ENTRY_SIZE);
10203 + writel(HIF_PKT_CLASS_EN| HIF_PKT_OFFSET(sizeof(struct hif_hdr)), CLASS_HIF_PARSE);
10204 +
10205 + val = HASH_CRC_PORT_IP | QB2BUS_LE;
10206 +
10207 +#if defined(CONFIG_IP_ALIGNED)
10208 + val |= IP_ALIGNED;
10209 +#endif
10210 +
10211 + /* Class PE packet steering will only work if TOE mode, bridge fetch or
10212 + * route fetch are enabled (see class/qb_fet.v). Route fetch would trigger
10213 + * additional memory copies (likely from DDR because of hash table size, which
10214 + * cannot be reduced because PE software still relies on hash value computed
10215 + * in HW), so when not in TOE mode we simply enable HW bridge fetch even
10216 + * though we don't use it.
10217 + */
10218 + if (cfg->toe_mode)
10219 + val |= CLASS_TOE;
10220 + else
10221 + val |= HW_BRIDGE_FETCH;
10222 +
10223 + writel(val, CLASS_ROUTE_MULTI);
10224 +
10225 + writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr), CLASS_ROUTE_TABLE_BASE);
10226 + writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
10227 + writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
10228 + writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
10229 + writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
10230 + writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
10231 +
10232 + writel(23, CLASS_AFULL_THRES);
10233 + writel(23, CLASS_TSQ_FIFO_THRES);
10234 +
10235 + writel(24, CLASS_MAX_BUF_CNT);
10236 + writel(24, CLASS_TSQ_MAX_CNT);
10237 +}
10238 +
10239 +/**************************** TMU ***************************/
10240 +
10241 +void tmu_reset(void)
10242 +{
10243 + writel(SW_RESET, TMU_CTRL);
10244 +}
10245 +
10246 +/** Initializes TMU block.
10247 +* @param[in] cfg TMU configuration
10248 +*/
10249 +void tmu_init(TMU_CFG *cfg)
10250 +{
10251 + int q, phyno;
10252 +
10253 + tmu_disable(0xF);
10254 + mdelay(10);
10255 +
10256 +#if !defined(LS1012A_PFE_RESET_WA)
10257 + /* keep in soft reset */
10258 + writel(SW_RESET, TMU_CTRL);
10259 +#endif
10260 + writel(0x3, TMU_SYS_GENERIC_CONTROL);
10261 + writel(750, TMU_INQ_WATERMARK);
10262 + writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR + GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
10263 + writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR + GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
10264 +#if !defined(CONFIG_PLATFORM_LS1012A)
10265 + writel(CBUS_VIRT_TO_PFE(EGPI3_BASE_ADDR + GPI_INQ_PKTPTR), TMU_PHY2_INQ_ADDR);
10266 +#endif
10267 + writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR + GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
10268 + writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
10269 + writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
10270 + writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), TMU_BMU_INQ_ADDR);
10271 +
10272 + writel(0x3FF, TMU_TDQ0_SCH_CTRL); // enabling all 10 schedulers [9:0] of each TDQ
10273 + writel(0x3FF, TMU_TDQ1_SCH_CTRL);
10274 +#if !defined(CONFIG_PLATFORM_LS1012A)
10275 + writel(0x3FF, TMU_TDQ2_SCH_CTRL);
10276 +#endif
10277 + writel(0x3FF, TMU_TDQ3_SCH_CTRL);
10278 +
10279 +#if !defined(LS1012A_PFE_RESET_WA)
10280 + writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
10281 +#endif
10282 +
10283 +#if !defined(LS1012A_PFE_RESET_WA)
10284 + writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR); // Extra packet pointers will be stored from this address onwards
10285 +
10286 + writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
10287 + writel(5, TMU_TDQ_IIFG_CFG);
10288 + writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
10289 +
10290 + writel(0x0, TMU_CTRL);
10291 +
10292 + /* MEM init */
10293 + printk(KERN_INFO "%s: mem init\n", __func__);
10294 + writel(MEM_INIT, TMU_CTRL);
10295 +
10296 + while(!(readl(TMU_CTRL) & MEM_INIT_DONE)) ;
10297 +
10298 + /* LLM init */
10299 + printk(KERN_INFO "%s: lmem init\n", __func__);
10300 + writel(LLM_INIT, TMU_CTRL);
10301 +
10302 + while(!(readl(TMU_CTRL) & LLM_INIT_DONE)) ;
10303 +#endif
10304 + // set up each queue for tail drop
10305 + for (phyno = 0; phyno < 4; phyno++)
10306 + {
10307 +#if defined(CONFIG_PLATFORM_LS1012A)
10308 + if(phyno == 2) continue;
10309 +#endif
10310 + for (q = 0; q < 16; q++)
10311 + {
10312 + u32 qdepth;
10313 + writel((phyno << 8) | q, TMU_TEQ_CTRL);
10314 + writel(1 << 22, TMU_TEQ_QCFG); //Enable tail drop
10315 +
10316 + if (phyno == 3)
10317 + qdepth = DEFAULT_TMU3_QDEPTH;
10318 + else
10319 + qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
10320 +
10321 + // LOG: 68855
10322 + // The following is a workaround for the reordered packet and BMU2 buffer leakage issue.
10323 + if (CHIP_REVISION() == 0)
10324 + qdepth = 31;
10325 +
10326 + writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
10327 + writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
10328 + }
10329 + }
10330 +
10331 +#ifdef CFG_LRO
10332 + /* Set TMU-3 queue 5 (LRO) in no-drop mode */
10333 + writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
10334 + writel(0, TMU_TEQ_QCFG);
10335 +#endif
10336 +
10337 + writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
10338 +
10339 + writel(0x0, TMU_CTRL);
10340 +}
10341 +
10342 +/** Enables TMU-PE cores.
10343 +* @param[in] pe_mask TMU PE mask
10344 +*/
10345 +void tmu_enable(u32 pe_mask)
10346 +{
10347 + writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
10348 +}
10349 +
10350 +/** Disables TMU cores.
10351 +* @param[in] pe_mask TMU PE mask
10352 +*/
10353 +void tmu_disable(u32 pe_mask)
10354 +{
10355 + writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
10356 +}
10357 +/** This will return the tmu queue status
10358 + * @param[in] if_id gem interface id or TMU index
10359 + * @return returns the bit mask of busy queues, zero means all queues are empty
10360 + */
10361 +u32 tmu_qstatus(u32 if_id)
10362 +{
10363 + return cpu_to_be32(pe_dmem_read(TMU0_ID+if_id, PESTATUS_ADDR_TMU + offsetof(PE_STATUS, tmu_qstatus), 4));
10364 +}
10365 +
10366 +u32 tmu_pkts_processed(u32 if_id)
10367 +{
10368 + return cpu_to_be32(pe_dmem_read(TMU0_ID+if_id, PESTATUS_ADDR_TMU + offsetof(PE_STATUS, rx), 4));
10369 +}
10370 +/**************************** UTIL ***************************/
10371 +
10372 +/** Resets UTIL block.
10373 +*/
10374 +void util_reset(void)
10375 +{
10376 + writel(CORE_SW_RESET, UTIL_TX_CTRL);
10377 +}
10378 +
10379 +/** Initializes UTIL block.
10380 +* @param[in] cfg UTIL configuration
10381 +*/
10382 +void util_init(UTIL_CFG *cfg)
10383 +{
10384 + writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
10385 +}
10386 +
10387 +/** Enables UTIL-PE core.
10388 +*
10389 +*/
10390 +void util_enable(void)
10391 +{
10392 + writel(CORE_ENABLE, UTIL_TX_CTRL);
10393 +}
10394 +
10395 +/** Disables UTIL-PE core.
10396 +*
10397 +*/
10398 +void util_disable(void)
10399 +{
10400 + writel(CORE_DISABLE, UTIL_TX_CTRL);
10401 +}
10402 +
10403 +/**************************** HIF ***************************/
10404 +
10405 +/** Initializes HIF no copy block.
10406 +*
10407 +*/
10408 +void hif_nocpy_init(void)
10409 +{
10410 + writel(4, HIF_NOCPY_TX_PORT_NO);
10411 + writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), HIF_NOCPY_LMEM_ALLOC_ADDR);
10412 + writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), HIF_NOCPY_CLASS_ADDR);
10413 + writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), HIF_NOCPY_TMU_PORT0_ADDR);
10414 + writel(HIF_RX_POLL_CTRL_CYCLE<<16|HIF_TX_POLL_CTRL_CYCLE, HIF_NOCPY_POLL_CTRL);
10415 +}
10416 +
10417 +/** Enable hif_nocpy tx DMA and interrupt
10418 +*
10419 +*/
10420 +void hif_nocpy_tx_enable(void)
10421 +{
10422 + /*TODO not sure poll_cntrl_en is required or not */
10423 + writel( HIF_CTRL_DMA_EN, HIF_NOCPY_TX_CTRL);
10424 + //writel((readl(HIF_NOCPY_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN), HIF_NOCPY_INT_ENABLE);
10425 +}
10426 +
10427 +/** Disable hif_nocpy tx DMA and interrupt
10428 +*
10429 +*/
10430 +void hif_nocpy_tx_disable(void)
10431 +{
10432 + u32 hif_int;
10433 +
10434 + writel(0, HIF_NOCPY_TX_CTRL);
10435 +
10436 + hif_int = readl(HIF_NOCPY_INT_ENABLE);
10437 + hif_int &= HIF_TXPKT_INT_EN;
10438 + writel(hif_int, HIF_NOCPY_INT_ENABLE);
10439 +}
10440 +
10441 +/** Enable hif rx DMA and interrupt
10442 +*
10443 +*/
10444 +void hif_nocpy_rx_enable(void)
10445 +{
10446 + hif_nocpy_rx_dma_start();
10447 + writel((readl(HIF_NOCPY_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN), HIF_NOCPY_INT_ENABLE);
10448 +}
10449 +
10450 +/** Disable hif_nocpy rx DMA and interrupt
10451 +*
10452 +*/
10453 +void hif_nocpy_rx_disable(void)
10454 +{
10455 + u32 hif_int;
10456 +
10457 + writel(0, HIF_NOCPY_RX_CTRL);
10458 +
10459 + hif_int = readl(HIF_NOCPY_INT_ENABLE);
10460 + hif_int &= HIF_RXPKT_INT_EN;
10461 + writel(hif_int, HIF_NOCPY_INT_ENABLE);
10462 +
10463 +}
10464 +/** Initializes HIF copy block.
10465 +*
10466 +*/
10467 +void hif_init(void)
10468 +{
10469 + /*Initialize HIF registers*/
10470 + writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE, HIF_POLL_CTRL);
10471 +}
10472 +
10473 +/** Enable hif tx DMA and interrupt
10474 +*
10475 +*/
10476 +void hif_tx_enable(void)
10477 +{
10478 + /*TODO not sure poll_cntrl_en is required or not */
10479 + writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
10480 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN), HIF_INT_ENABLE);
10481 +}
10482 +
10483 +/** Disable hif tx DMA and interrupt
10484 +*
10485 +*/
10486 +void hif_tx_disable(void)
10487 +{
10488 + u32 hif_int;
10489 +
10490 + writel(0, HIF_TX_CTRL);
10491 +
10492 + hif_int = readl(HIF_INT_ENABLE);
10493 + hif_int &= HIF_TXPKT_INT_EN;
10494 + writel(hif_int, HIF_INT_ENABLE);
10495 +}
10496 +
10497 +/** Enable hif rx DMA and interrupt
10498 +*
10499 +*/
10500 +void hif_rx_enable(void)
10501 +{
10502 + hif_rx_dma_start();
10503 + writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN), HIF_INT_ENABLE);
10504 +}
10505 +
10506 +/** Disable hif rx DMA and interrupt
10507 +*
10508 +*/
10509 +void hif_rx_disable(void)
10510 +{
10511 + u32 hif_int;
10512 +
10513 + writel(0, HIF_RX_CTRL);
10514 +
10515 + hif_int = readl(HIF_INT_ENABLE);
10516 + hif_int &= HIF_RXPKT_INT_EN;
10517 + writel(hif_int, HIF_INT_ENABLE);
10518 +
10519 +}
10520 --- /dev/null
10521 +++ b/drivers/staging/fsl_ppfe/pfe_hif.c
10522 @@ -0,0 +1,939 @@
10523 +/*
10524 + *
10525 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
10526 + *
10527 + * This program is free software; you can redistribute it and/or modify
10528 + * it under the terms of the GNU General Public License as published by
10529 + * the Free Software Foundation; either version 2 of the License, or
10530 + * (at your option) any later version.
10531 + *
10532 + * This program is distributed in the hope that it will be useful,
10533 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
10534 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10535 + * GNU General Public License for more details.
10536 + *
10537 + * You should have received a copy of the GNU General Public License
10538 + * along with this program; if not, write to the Free Software
10539 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
10540 + */
10541 +
10542 +#ifdef __KERNEL__
10543 +#include <linux/kernel.h>
10544 +#include <linux/interrupt.h>
10545 +#include <linux/dma-mapping.h>
10546 +#include <linux/dmapool.h>
10547 +#include <linux/sched.h>
10548 +#include <linux/module.h>
10549 +#include <linux/list.h>
10550 +#include <linux/kthread.h>
10551 +#include <linux/slab.h>
10552 +
10553 +#include <asm/io.h>
10554 +#include <asm/irq.h>
10555 +#else
10556 +#include "platform.h"
10557 +#endif
10558 +
10559 +
10560 +#include "pfe_mod.h"
10561 +#if 0
10562 +#define DMA_MAP_SINGLE(dev, vaddr, size, direction) dma_map_single(dev, vaddr, size, direction)
10563 +#define DMA_UNMAP_SINGLE(dev, vaddr, size, direction) dma_unmap_single(dev, vaddr, size, direction)
10564 +void ct_flush(void *addr, u32 size)
10565 +{
10566 + dma_map_single(pfe->dev, addr, size, DMA_TO_DEVICE);
10567 +}
10568 +#else
10569 +#define DMA_UNMAP_SINGLE(dev, vaddr, size, direction)
10570 +#define DMA_MAP_SINGLE(dev, vaddr, size, direction) virt_to_phys(vaddr)
10571 +#define ct_flush(addr, sz)
10572 +#endif
10573 +
10574 +#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT)
10575 +
10576 +#define inc_cl_idx(idxname) idxname = (idxname+1) & (queue->size-1)
10577 +#define inc_hif_rxidx(idxname) idxname = (idxname+1) & (hif->RxRingSize-1)
10578 +#define inc_hif_txidx(idxname) idxname = (idxname+1) & (hif->TxRingSize-1)
10579 +
10580 +unsigned char napi_first_batch = 0;
10581 +
10582 +static int pfe_hif_alloc_descr(struct pfe_hif *hif)
10583 +{
10584 +#if !defined(CONFIG_PLATFORM_PCI)
10585 + void *addr;
10586 + dma_addr_t dma_addr;
10587 + int err = 0;
10588 +
10589 + printk(KERN_INFO "%s\n", __func__);
10590 + addr = dma_alloc_coherent(pfe->dev,
10591 + HIF_RX_DESC_NT * sizeof(struct hif_desc) + HIF_TX_DESC_NT * sizeof(struct hif_desc),
10592 + &dma_addr, GFP_KERNEL);
10593 +
10594 + if (!addr) {
10595 + printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", __func__);
10596 + err = -ENOMEM;
10597 + goto err0;
10598 + }
10599 +
10600 + hif->descr_baseaddr_p = dma_addr;
10601 + hif->descr_baseaddr_v = addr;
10602 +#else
10603 + hif->descr_baseaddr_p = pfe->ddr_phys_baseaddr + HIF_DESC_BASEADDR;
10604 + hif->descr_baseaddr_v = pfe->ddr_baseaddr + HIF_DESC_BASEADDR;
10605 +#endif
10606 + hif->RxRingSize = HIF_RX_DESC_NT;
10607 + hif->TxRingSize = HIF_TX_DESC_NT;
10608 +
10609 + return 0;
10610 +
10611 +err0:
10612 + return err;
10613 +}
10614 +
10615 +static void pfe_hif_free_descr(struct pfe_hif *hif)
10616 +{
10617 + printk(KERN_INFO "%s\n", __func__);
10618 +#if !defined(CONFIG_PLATFORM_PCI)
10619 + dma_free_coherent(pfe->dev,
10620 + hif->RxRingSize * sizeof(struct hif_desc) + hif->TxRingSize * sizeof(struct hif_desc),
10621 + hif->descr_baseaddr_v, hif->descr_baseaddr_p);
10622 +#endif
10623 +}
10624 +void pfe_hif_desc_dump(struct pfe_hif *hif)
10625 +{
10626 + struct hif_desc *desc;
10627 + unsigned long desc_p;
10628 + int ii=0;
10629 +
10630 + printk(KERN_INFO "%s\n", __func__);
10631 +
10632 + desc = hif->RxBase;
10633 + desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v + hif->descr_baseaddr_p);
10634 +
10635 + printk("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
10636 + for (ii = 0; ii < hif->RxRingSize; ii++) {
10637 + printk(KERN_INFO "status: %08x, ctrl: %08x, data: %08x, next: %x\n",
10638 + desc->status, desc->ctrl, desc->data, desc->next);
10639 + desc++;
10640 + }
10641 +
10642 + desc = hif->TxBase;
10643 + desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v + hif->descr_baseaddr_p);
10644 +
10645 + printk("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
10646 + for (ii = 0; ii < hif->TxRingSize; ii++) {
10647 + printk(KERN_INFO "status: %08x, ctrl: %08x, data: %08x, next: %x\n",
10648 + desc->status, desc->ctrl, desc->data, desc->next);
10649 + desc++;
10650 + }
10651 +
10652 +}
10653 +
10654 +/* pfe_hif_release_buffers
10655 + *
10656 + */
10657 +static void pfe_hif_release_buffers(struct pfe_hif *hif)
10658 +{
10659 + struct hif_desc *desc;
10660 + int i = 0;
10661 +
10662 + hif->RxBase = hif->descr_baseaddr_v;
10663 +
10664 + printk(KERN_INFO "%s\n", __func__);
10665 + /*Free Rx buffers */
10666 +#if !defined(CONFIG_PLATFORM_PCI)
10667 + desc = hif->RxBase;
10668 + for (i = 0; i < hif->RxRingSize; i++) {
10669 + if (desc->data) {
10670 + if ((i < hif->shm->rx_buf_pool_cnt) && (hif->shm->rx_buf_pool[i] == NULL)) {
10671 + //dma_unmap_single(hif->dev, desc->data, hif->rx_buf_len[i], DMA_FROM_DEVICE);
10672 + DMA_UNMAP_SINGLE(hif->dev, desc->data, hif->rx_buf_len[i], DMA_FROM_DEVICE);
10673 + hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
10674 + }
10675 + else {
10676 + /*TODO This should not happen*/
10677 + printk(KERN_ERR "%s: buffer pool already full\n", __func__);
10678 + }
10679 + }
10680 +
10681 + desc->data = 0;
10682 + desc->status = 0;
10683 + desc->ctrl = 0;
10684 + desc++;
10685 + }
10686 +#endif
10687 +}
10688 +
10689 +
10690 +/*
10691 + * pfe_hif_init_buffers
10692 + * This function initializes the HIF Rx/Tx ring descriptors and
10693 + * initialize Rx queue with buffers.
10694 + */
10695 +static int pfe_hif_init_buffers(struct pfe_hif *hif)
10696 +{
10697 + struct hif_desc *desc, *first_desc_p;
10698 + u32 data;
10699 + int i = 0;
10700 +
10701 + printk(KERN_INFO "%s\n", __func__);
10702 +
10703 + /* Check enough Rx buffers available in the shared memory */
10704 + if (hif->shm->rx_buf_pool_cnt < hif->RxRingSize)
10705 + return -ENOMEM;
10706 +
10707 + hif->RxBase = hif->descr_baseaddr_v;
10708 + memset(hif->RxBase, 0, hif->RxRingSize * sizeof(struct hif_desc));
10709 +
10710 + /*Initialize Rx descriptors */
10711 + desc = hif->RxBase;
10712 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
10713 +
10714 + for (i = 0; i < hif->RxRingSize; i++) {
10715 + /* Initialize Rx buffers from the shared memory */
10716 +
10717 +#if defined(CONFIG_PLATFORM_PCI)
10718 + data = pfe->ddr_phys_baseaddr + HIF_RX_PKT_DDR_BASEADDR + i * DDR_BUF_SIZE;
10719 +#else
10720 + data = (u32)DMA_MAP_SINGLE(hif->dev, hif->shm->rx_buf_pool[i], pfe_pkt_size, DMA_FROM_DEVICE);
10721 + hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
10722 + hif->rx_buf_len[i] = pfe_pkt_size;
10723 + // printk("#%d %p %p %d\n", i, data, hif->rx_buf_addr[i], hif->rx_buf_len[i]);
10724 + hif->shm->rx_buf_pool[i] = NULL;
10725 +#endif
10726 + if (likely(dma_mapping_error(hif->dev, data) == 0)) {
10727 + desc->data = DDR_PHYS_TO_PFE(data);
10728 + } else {
10729 + printk(KERN_ERR "%s : low on mem\n", __func__);
10730 +
10731 + goto err;
10732 + }
10733 +
10734 + desc->status = 0;
10735 + wmb();
10736 + desc->ctrl = BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
10737 + BD_CTRL_DESC_EN | BD_BUF_LEN(pfe_pkt_size);
10738 + /* Chain descriptors */
10739 + desc->next = (u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1);
10740 + desc++;
10741 + }
10742 +
10743 + /* Overwrite last descriptor to chain it to first one*/
10744 + desc--;
10745 + desc->next = (u32)DDR_PHYS_TO_PFE(first_desc_p);
10746 +
10747 + hif->RxtocleanIndex = 0;
10748 +
10749 + /*Initialize Rx buffer descriptor ring base address */
10750 + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
10751 +
10752 + hif->TxBase = hif->RxBase + hif->RxRingSize;
10753 + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p + hif->RxRingSize;
10754 + memset(hif->TxBase, 0, hif->TxRingSize * sizeof(struct hif_desc));
10755 +
10756 + /*Initialize tx descriptors */
10757 + desc = hif->TxBase;
10758 +
10759 + for (i = 0; i < hif->TxRingSize; i++) {
10760 + /* Chain descriptors */
10761 + desc->next = (u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1);
10762 +#if defined(CONFIG_PLATFORM_PCI)
10763 + desc->data = pfe->ddr_phys_baseaddr + HIF_TX_PKT_DDR_BASEADDR + i * DDR_BUF_SIZE;
10764 +#endif
10765 + desc->ctrl = 0;
10766 + desc++;
10767 + }
10768 +
10769 + /* Overwrite last descriptor to chain it to first one */
10770 + desc--;
10771 + desc->next = (u32)DDR_PHYS_TO_PFE(first_desc_p);
10772 + hif->TxAvail = hif->TxRingSize;
10773 + hif->Txtosend = 0;
10774 + hif->Txtoclean = 0;
10775 + hif->Txtoflush = 0;
10776 +
10777 + /*Initialize Tx buffer descriptor ring base address */
10778 + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
10779 +
10780 + return 0;
10781 +
10782 +err:
10783 + pfe_hif_release_buffers(hif);
10784 + return -ENOMEM;
10785 +}
10786 +
10787 +/* pfe_hif_client_register
10788 + *
10789 + * This function used to register a client driver with the HIF driver.
10790 + *
10791 + * Return value:
10792 + * 0 - on Successful registration
10793 + */
10794 +static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id, struct hif_client_shm *client_shm)
10795 +{
10796 + struct hif_client *client = &hif->client[client_id];
10797 + u32 i, cnt;
10798 + struct rx_queue_desc *rx_qbase;
10799 + struct tx_queue_desc *tx_qbase;
10800 + struct hif_rx_queue *rx_queue;
10801 + struct hif_tx_queue *tx_queue;
10802 + int err = 0;
10803 +
10804 + printk(KERN_INFO "%s\n", __func__);
10805 +
10806 + spin_lock_bh(&hif->tx_lock);
10807 +
10808 + if (test_bit(client_id, &hif->shm->gClient_status[0])) {
10809 + printk(KERN_ERR "%s: client %d already registered\n", __func__, client_id);
10810 + err = -1;
10811 + goto unlock;
10812 + }
10813 +
10814 + memset(client, 0, sizeof(struct hif_client));
10815 +
10816 + /*Initialize client Rx queues baseaddr, size */
10817 +
10818 + cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
10819 + /*Check if client is requesting for more queues than supported */
10820 + if (cnt > HIF_CLIENT_QUEUES_MAX)
10821 + cnt = HIF_CLIENT_QUEUES_MAX;
10822 +
10823 + client->rx_qn = cnt;
10824 + rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
10825 + for (i = 0; i < cnt; i++)
10826 + {
10827 + rx_queue = &client->rx_q[i];
10828 + rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
10829 + rx_queue->size = client_shm->rx_qsize;
10830 + rx_queue->write_idx = 0;
10831 + }
10832 +
10833 + /*Initialize client Tx queues baseaddr, size */
10834 + cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
10835 +
10836 + /*Check if client is requesting for more queues than supported */
10837 + if (cnt > HIF_CLIENT_QUEUES_MAX)
10838 + cnt = HIF_CLIENT_QUEUES_MAX;
10839 +
10840 + client->tx_qn = cnt;
10841 + tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
10842 + for (i = 0; i < cnt; i++)
10843 + {
10844 + tx_queue = &client->tx_q[i];
10845 + tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
10846 + tx_queue->size = client_shm->tx_qsize;
10847 + tx_queue->ack_idx = 0;
10848 + }
10849 +
10850 + set_bit(client_id, &hif->shm->gClient_status[0]);
10851 +
10852 +unlock:
10853 + spin_unlock_bh(&hif->tx_lock);
10854 +
10855 + return err;
10856 +}
10857 +
10858 +
10859 +/* pfe_hif_client_unregister
10860 + *
10861 + * This function used to unregister a client from the HIF driver.
10862 + *
10863 + */
10864 +static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
10865 +{
10866 + printk(KERN_INFO "%s\n", __func__);
10867 +
10868 + /* Mark client as no longer available (which prevents further packet receive for this client) */
10869 + spin_lock_bh(&hif->tx_lock);
10870 +
10871 + if (!test_bit(client_id, &hif->shm->gClient_status[0])) {
10872 + printk(KERN_ERR "%s: client %d not registered\n", __func__, client_id);
10873 +
10874 + spin_unlock_bh(&hif->tx_lock);
10875 + return;
10876 + }
10877 +
10878 + clear_bit(client_id, &hif->shm->gClient_status[0]);
10879 +
10880 + spin_unlock_bh(&hif->tx_lock);
10881 +}
10882 +
10883 +/* client_put_rxpacket-
10884 + * This functions puts the Rx pkt in the given client Rx queue.
10885 + * It actually swap the Rx pkt in the client Rx descriptor buffer
10886 + * and returns the free buffer from it.
10887 + *
10888 + * If the funtion returns NULL means client Rx queue is full and
10889 + * packet couldn't send to client queue.
10890 + */
10891 +static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len, u32 flags, u32 client_ctrl, u32 *rem_len)
10892 +{
10893 + void *free_pkt = NULL;
10894 + struct rx_queue_desc *desc = queue->base + queue->write_idx;
10895 +
10896 + if (desc->ctrl & CL_DESC_OWN) {
10897 +#if defined(CONFIG_PLATFORM_PCI)
10898 + memcpy(desc->data, pkt, len);
10899 + free_pkt = PFE_HOST_TO_PCI(pkt);
10900 + smp_wmb();
10901 + desc->ctrl = CL_DESC_BUF_LEN(len) | flags;
10902 + inc_cl_idx(queue->write_idx);
10903 +#else
10904 + //TODO: move allocations after Rx loop to improve instruction cache locality
10905 + if (page_mode) {
10906 + int rem_page_size = PAGE_SIZE - PRESENT_OFST_IN_PAGE(pkt);
10907 + int cur_pkt_size = ROUND_MIN_RX_SIZE(len + pfe_pkt_headroom);
10908 + *rem_len = (rem_page_size - cur_pkt_size);
10909 + //printk("%p rem_len %d cur_len %d buf_len %d\n", pkt, rem_page_size, cur_pkt_size, *rem_len);
10910 + if (*rem_len)
10911 + {
10912 + free_pkt = pkt + cur_pkt_size;
10913 + get_page(virt_to_page(free_pkt));
10914 + } else {
10915 + free_pkt = (void *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
10916 + *rem_len = pfe_pkt_size;
10917 + }
10918 + } else {
10919 + free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC | GFP_DMA_PFE);
10920 + *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
10921 + }
10922 +
10923 + if (free_pkt) {
10924 + desc->data = pkt;
10925 + desc->client_ctrl = client_ctrl;
10926 + smp_wmb();
10927 + desc->ctrl = CL_DESC_BUF_LEN(len) | flags;
10928 + inc_cl_idx(queue->write_idx);
10929 + free_pkt += pfe_pkt_headroom;
10930 + }
10931 +#endif
10932 + }
10933 +
10934 + return free_pkt;
10935 +}
10936 +
10937 +
10938 +/* pfe_hif_rx_process-
10939 + * This function does pfe hif rx queue processing.
10940 + * Dequeue packet from Rx queue and send it to corresponding client queue
10941 + */
10942 +static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
10943 +{
10944 + struct hif_desc *desc;
10945 + struct hif_hdr *pkt_hdr;
10946 + struct __hif_hdr hif_hdr;
10947 + void *free_buf;
10948 + int rtc, len, rx_processed = 0;
10949 + struct __hif_desc local_desc;
10950 + int flags;
10951 + unsigned int desc_p;
10952 + unsigned int buf_size = 0;
10953 +
10954 + spin_lock_bh(&hif->lock);
10955 +
10956 + rtc = hif->RxtocleanIndex;
10957 +
10958 + while (rx_processed < budget)
10959 + {
10960 + /*TODO may need to implement rx process budget */
10961 + desc = hif->RxBase + rtc;
10962 +
10963 + __memcpy12(&local_desc, desc);
10964 +
10965 + /* ACK pending Rx interrupt */
10966 + if (local_desc.ctrl & BD_CTRL_DESC_EN) {
10967 + writel(HIF_INT_MASK, HIF_INT_SRC);
10968 +
10969 + if(rx_processed == 0)
10970 + {
10971 + if(napi_first_batch == 1)
10972 + {
10973 + desc_p = hif->descr_baseaddr_p + ((unsigned long int)(desc) - (unsigned long int)hif->descr_baseaddr_v);
10974 +#if defined(CONFIG_PLATFORM_C2000)
10975 + outer_inv_range(desc_p, (desc_p + 16));
10976 +#endif
10977 + napi_first_batch = 0;
10978 + }
10979 + }
10980 +
10981 + __memcpy12(&local_desc, desc);
10982 +
10983 + if (local_desc.ctrl & BD_CTRL_DESC_EN)
10984 + break;
10985 + }
10986 +
10987 + napi_first_batch = 0;
10988 +
10989 +#ifdef HIF_NAPI_STATS
10990 + hif->napi_counters[NAPI_DESC_COUNT]++;
10991 +#endif
10992 + len = BD_BUF_LEN(local_desc.ctrl);
10993 +#if defined(CONFIG_PLATFORM_PCI)
10994 + pkt_hdr = &hif_hdr;
10995 + memcpy(pkt_hdr, (void *)PFE_PCI_TO_HOST(local_desc.data), sizeof(struct hif_hdr));
10996 +#else
10997 + //dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data), hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
10998 + DMA_UNMAP_SINGLE(hif->dev, DDR_PFE_TO_PHYS(local_desc.data), hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
10999 +
11000 + pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
11001 +
11002 + /* Track last HIF header received */
11003 + if (!hif->started) {
11004 + hif->started = 1;
11005 +
11006 + __memcpy8(&hif_hdr, pkt_hdr);
11007 +
11008 + hif->qno = hif_hdr.hdr.qNo;
11009 + hif->client_id = hif_hdr.hdr.client_id;
11010 + hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) | hif_hdr.hdr.client_ctrl;
11011 + flags = CL_DESC_FIRST;
11012 +
11013 +// printk(KERN_INFO "start of packet: id %d, q %d, len %d, flags %x %x\n", hif->client_id, hif->qno, len, local_desc.ctrl, hif->client_ctrl);
11014 + }
11015 + else {
11016 +// printk(KERN_INFO "continuation: id %d, q %d, len %d, flags %x\n", hif->client_id, hif->qno, len, local_desc.ctrl);
11017 + flags = 0;
11018 + }
11019 +
11020 + if (local_desc.ctrl & BD_CTRL_LIFM)
11021 + flags |= CL_DESC_LAST;
11022 +#endif
11023 + /* Check for valid client id and still registered */
11024 + if ((hif->client_id >= HIF_CLIENTS_MAX) || !(test_bit(hif->client_id, &hif->shm->gClient_status[0]))) {
11025 + if (printk_ratelimit())
11026 + printk(KERN_ERR "%s: packet with invalid client id %d qNo %d\n", __func__, hif->client_id, hif->qno);
11027 +
11028 +#if defined(CONFIG_PLATFORM_PCI)
11029 + free_buf = local_desc.data;
11030 +#else
11031 + free_buf = pkt_hdr;
11032 +#endif
11033 + goto pkt_drop;
11034 + }
11035 +
11036 + /* Check to valid queue number */
11037 + if (hif->client[hif->client_id].rx_qn <= hif->qno) {
11038 + printk(KERN_INFO "%s: packet with invalid queue: %d\n", __func__, hif->qno);
11039 + hif->qno = 0;
11040 + }
11041 +
11042 +#if defined(CONFIG_PLATFORM_PCI)
11043 + free_buf = client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
11044 + (void *)PFE_PCI_TO_HOST(desc->data), len, flags, hif->client_ctrl, &buf_zize);
11045 +#else
11046 + free_buf = client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
11047 + (void *)pkt_hdr, len, flags, hif->client_ctrl, &buf_size);
11048 +#endif
11049 +
11050 + hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND, hif->qno);
11051 +
11052 + if (unlikely(!free_buf)) {
11053 +#ifdef HIF_NAPI_STATS
11054 + hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
11055 +#endif
11056 + /* If we want to keep in polling mode to retry later, we need to tell napi that we consumed
11057 + the full budget or we will hit a livelock scenario. The core code keeps this napi instance
11058 + at the head of the list and none of the other instances get to run */
11059 + rx_processed = budget;
11060 +
11061 + if (flags & CL_DESC_FIRST)
11062 + hif->started = 0;
11063 +
11064 + break;
11065 + }
11066 +
11067 + pkt_drop:
11068 +#if defined(CONFIG_PLATFORM_PCI)
11069 + desc->data = (u32)free_buf;
11070 +#else
11071 + /*Fill free buffer in the descriptor */
11072 + hif->rx_buf_addr[rtc] = free_buf;
11073 + hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
11074 + desc->data = DDR_PHYS_TO_PFE((u32)DMA_MAP_SINGLE(hif->dev, free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE));
11075 + //printk("#%p %p %d\n", desc->data, hif->rx_buf_addr[rtc], hif->rx_buf_len[rtc]);
11076 +#endif
11077 + wmb();
11078 + desc->ctrl = BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
11079 + BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc]);
11080 +
11081 + inc_hif_rxidx(rtc);
11082 +
11083 + if (local_desc.ctrl & BD_CTRL_LIFM) {
11084 + if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
11085 + rx_processed++;
11086 +
11087 +#ifdef HIF_NAPI_STATS
11088 + hif->napi_counters[NAPI_PACKET_COUNT]++;
11089 +#endif
11090 + }
11091 + hif->started = 0;
11092 + }
11093 + }
11094 +
11095 + hif->RxtocleanIndex = rtc;
11096 + spin_unlock_bh(&hif->lock);
11097 +
11098 + /* we made some progress, re-start rx dma in case it stopped */
11099 + hif_rx_dma_start();
11100 +
11101 + return rx_processed;
11102 +}
11103 +
11104 +
11105 +/* client_ack_txpacket-
11106 + * This function ack the Tx packet in the give client Tx queue by resetting
11107 + * ownership bit in the descriptor.
11108 + */
11109 +static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no)
11110 +{
11111 + struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
11112 + struct tx_queue_desc *desc = queue->base + queue->ack_idx;
11113 +
11114 + if (desc->ctrl & CL_DESC_OWN) {
11115 + /*TODO Do we need to match the pkt address also? */
11116 + desc->ctrl &= ~CL_DESC_OWN;
11117 + inc_cl_idx(queue->ack_idx);
11118 +
11119 + return 0;
11120 + }
11121 + else {
11122 + /*This should not happen */
11123 + printk(KERN_ERR "%s: %d %d %d %d %d %p %d\n", __func__, hif->Txtosend, hif->Txtoclean, hif->TxAvail, client_id, q_no, queue, queue->ack_idx);
11124 + BUG();
11125 + return 1;
11126 + }
11127 +}
11128 +
11129 +void __hif_tx_done_process(struct pfe_hif *hif, int count)
11130 +{
11131 + struct hif_desc *desc;
11132 + struct hif_desc_sw *desc_sw;
11133 + int ttc, tx_avl;
11134 +
11135 + ttc = hif->Txtoclean;
11136 + tx_avl = hif->TxAvail;
11137 +
11138 + while ((tx_avl < hif->TxRingSize) && count--) {
11139 + desc = hif->TxBase + ttc;
11140 +
11141 + if (desc->ctrl & BD_CTRL_DESC_EN)
11142 + break;
11143 +
11144 + desc_sw = &hif->tx_sw_queue[ttc];
11145 +
11146 + if (desc_sw->data) {
11147 +#if !defined(CONFIG_PLATFORM_PCI)
11148 + //dmap_unmap_single(hif->dev, desc_sw->data, desc_sw->len, DMA_TO_DEVICE);
11149 + DMA_UNMAP_SINGLE(hif->dev, desc_sw->data, desc_sw->len, DMA_TO_DEVICE);
11150 +#endif
11151 + }
11152 + client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
11153 +
11154 + inc_hif_txidx(ttc);
11155 + tx_avl++;
11156 + }
11157 +
11158 + hif->Txtoclean = ttc;
11159 + hif->TxAvail = tx_avl;
11160 +}
11161 +
11162 +
11163 +/* __hif_xmit_pkt -
11164 + * This function puts one packet in the HIF Tx queue
11165 + */
11166 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, u32 len, unsigned int flags)
11167 +{
11168 + struct hif_desc *desc;
11169 + struct hif_desc_sw *desc_sw;
11170 +
11171 +#if defined(CONFIG_PLATFORM_EMULATION)
11172 + {
11173 + struct hif_queue *queue = &hif->client[client_id].rx_q[0];
11174 + struct queue_desc *qdesc = queue->base + queue->write_idx;
11175 + void *buf;
11176 +
11177 + printk("%s: packet loop backed client_id:%d qno:%d data : %p len:%d\n", __func__, client_id, q_no, data, len);
11178 +#if 1
11179 + if (qdesc->ctrl & CL_DESC_OWN) {
11180 + buf = (void *)qdesc->data;
11181 + memcpy(buf, data, len);
11182 + wmb();
11183 + qdesc->ctrl = CL_DESC_BUF_LEN(len);
11184 + inc_cl_idx(queue->write_idx);
11185 + printk("%s: packet loop backed..\n", __func__);
11186 + hif_lib_indicate_client(client_id, EVENT_RX_PKT_IND, q_no);
11187 + client_ack_txpacket(&hif->client[client_id].tx_q[q_no]);
11188 + }
11189 +#endif
11190 + }
11191 +
11192 +#else
11193 + desc = hif->TxBase + hif->Txtosend;
11194 + desc_sw = &hif->tx_sw_queue[hif->Txtosend];
11195 +
11196 + desc_sw->len = len;
11197 + desc_sw->client_id = client_id;
11198 + desc_sw->q_no = q_no;
11199 + desc_sw->flags = flags;
11200 +
11201 +#if !defined(CONFIG_PLATFORM_PCI)
11202 + if (flags & HIF_DONT_DMA_MAP) {
11203 + desc_sw->data = 0;
11204 + desc->data = (u32)DDR_PHYS_TO_PFE(data);
11205 + } else {
11206 + desc_sw->data = DMA_MAP_SINGLE(hif->dev, data, len, DMA_TO_DEVICE);
11207 + desc->data = (u32)DDR_PHYS_TO_PFE(desc_sw->data);
11208 + }
11209 +#else
11210 +#define ALIGN32(x) ((x) & ~0x3)
11211 + memcpy(PFE_PCI_TO_HOST(desc->data), data, ALIGN32(len+0x3));
11212 +#endif
11213 +
11214 + inc_hif_txidx(hif->Txtosend);
11215 + hif->TxAvail--;
11216 +
11217 + /* For TSO we skip actual TX until the last descriptor */
11218 + /* This reduce the number of required wmb() */
11219 + if ((flags & HIF_TSO) && (!((flags & HIF_DATA_VALID) && (flags & HIF_LAST_BUFFER))))
11220 + goto skip_tx;
11221 +
11222 + wmb();
11223 +
11224 + do {
11225 + desc_sw = &hif->tx_sw_queue[hif->Txtoflush];
11226 + desc = hif->TxBase + hif->Txtoflush;
11227 +
11228 + if (desc_sw->flags & HIF_LAST_BUFFER) {
11229 + if ((desc_sw->client_id < PFE_CL_VWD0) || (desc_sw->client_id > (PFE_CL_VWD0 + MAX_VAP_SUPPORT)))
11230 + desc->ctrl = BD_CTRL_LIFM | BD_CTRL_BRFETCH_DISABLE |
11231 + BD_CTRL_RTFETCH_DISABLE | BD_CTRL_PARSE_DISABLE |
11232 + BD_CTRL_DESC_EN | BD_BUF_LEN(desc_sw->len);
11233 + else {
11234 +
11235 + desc->ctrl = BD_CTRL_LIFM | BD_CTRL_DESC_EN | BD_BUF_LEN(desc_sw->len);
11236 + }
11237 + }
11238 + else
11239 + desc->ctrl = BD_CTRL_DESC_EN | BD_BUF_LEN(desc_sw->len);
11240 +
11241 + inc_hif_txidx(hif->Txtoflush);
11242 + }
11243 + while (hif->Txtoflush != hif->Txtosend);
11244 +
11245 +skip_tx:
11246 + return;
11247 +
11248 +#endif
11249 +}
11250 +
11251 +
11252 +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, unsigned int len)
11253 +{
11254 + int rc = 0;
11255 +
11256 + spin_lock_bh(&hif->tx_lock);
11257 +
11258 + if (!hif->TxAvail)
11259 + rc = 1;
11260 + else {
11261 + __hif_xmit_pkt(hif, client_id, q_no, data, len, HIF_FIRST_BUFFER | HIF_LAST_BUFFER);
11262 + hif_tx_dma_start();
11263 + }
11264 + if (hif->TxAvail < (hif->TxRingSize >> 1))
11265 + __hif_tx_done_process(hif, TX_FREE_MAX_COUNT);
11266 +
11267 + spin_unlock_bh(&hif->tx_lock);
11268 +
11269 + return rc;
11270 +}
11271 +
11272 +/* hif_isr-
11273 + * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
11274 + */
11275 +static irqreturn_t hif_isr(int irq, void *dev_id)
11276 +{
11277 + struct pfe_hif *hif = (struct pfe_hif *) dev_id;
11278 + int int_status;
11279 +
11280 + /*Read hif interrupt source register */
11281 + int_status = readl_relaxed(HIF_INT_SRC);
11282 +
11283 + if ((int_status & HIF_INT) == 0)
11284 + return(IRQ_NONE);
11285 +
11286 + int_status &= ~(HIF_INT);
11287 +
11288 + if (int_status & HIF_RXPKT_INT) {
11289 + int_status &= ~(HIF_RXPKT_INT);
11290 +
11291 + /* Disable interrupts */
11292 + writel_relaxed(0, HIF_INT_ENABLE);
11293 +
11294 + napi_first_batch = 1;
11295 +
11296 + if (napi_schedule_prep(&hif->napi))
11297 + {
11298 +#ifdef HIF_NAPI_STATS
11299 + hif->napi_counters[NAPI_SCHED_COUNT]++;
11300 +#endif
11301 + __napi_schedule(&hif->napi);
11302 + }
11303 + }
11304 +
11305 + if (int_status) {
11306 + printk(KERN_INFO "%s : Invalid interrupt : %d\n", __func__, int_status);
11307 + writel(int_status, HIF_INT_SRC);
11308 + }
11309 +
11310 + return IRQ_HANDLED;
11311 +}
11312 +
11313 +
11314 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
11315 +{
11316 + unsigned int client_id = data1;
11317 +
11318 + if (client_id >= HIF_CLIENTS_MAX)
11319 + {
11320 + printk(KERN_ERR "%s: client id %d out of bounds\n", __func__, client_id);
11321 + return;
11322 + }
11323 +
11324 + switch (req) {
11325 + case REQUEST_CL_REGISTER:
11326 + /* Request for register a client */
11327 + printk(KERN_INFO "%s: register client_id %d\n", __func__, client_id);
11328 + pfe_hif_client_register(hif, client_id, (struct hif_client_shm *)&hif->shm->client[client_id]);
11329 + break;
11330 +
11331 + case REQUEST_CL_UNREGISTER:
11332 + printk(KERN_INFO "%s: unregister client_id %d\n", __func__, client_id);
11333 +
11334 + /* Request for unregister a client */
11335 + pfe_hif_client_unregister(hif, client_id);
11336 +
11337 + break;
11338 +
11339 + default:
11340 + printk(KERN_ERR "%s: unsupported request %d\n", __func__, req);
11341 + break;
11342 + }
11343 +
11344 + /*TODO check for TMU queue resume request */
11345 +
11346 + /*Process client Tx queues
11347 + * Currently we don't have checking for tx pending*/
11348 +}
11349 +
11350 +/** pfe_hif_rx_poll
11351 + * This function is NAPI poll function to process HIF Rx queue.
11352 + */
11353 +static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
11354 +{
11355 + struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
11356 + int work_done;
11357 +
11358 +#ifdef HIF_NAPI_STATS
11359 + hif->napi_counters[NAPI_POLL_COUNT]++;
11360 +#endif
11361 +
11362 + work_done = pfe_hif_rx_process(hif, budget);
11363 +
11364 + if (work_done < budget)
11365 + {
11366 + napi_complete(napi);
11367 + writel_relaxed(HIF_INT_MASK, HIF_INT_ENABLE);
11368 + }
11369 +#ifdef HIF_NAPI_STATS
11370 + else
11371 + hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
11372 +#endif
11373 +
11374 + return work_done;
11375 +}
11376 +
11377 +/* pfe_hif_init
11378 + * This function initializes the baseaddresses and irq, etc.
11379 + */
11380 +int pfe_hif_init(struct pfe *pfe)
11381 +{
11382 + struct pfe_hif *hif = &pfe->hif;
11383 + int err;
11384 +
11385 + printk(KERN_INFO "%s\n", __func__);
11386 +
11387 + hif->dev = pfe->dev;
11388 + hif->irq = pfe->hif_irq;
11389 +
11390 + if ((err = pfe_hif_alloc_descr(hif))) {
11391 + goto err0;
11392 + }
11393 +
11394 + if (pfe_hif_init_buffers(hif)) {
11395 + printk(KERN_ERR "%s: Could not initialize buffer descriptors\n", __func__);
11396 + err = -ENOMEM;
11397 + goto err1;
11398 + }
11399 +
11400 + /* Initilize NAPI for Rx processing */
11401 + init_dummy_netdev(&hif->dummy_dev);
11402 + netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll, HIF_RX_POLL_WEIGHT);
11403 + napi_enable(&hif->napi);
11404 +
11405 + spin_lock_init(&hif->tx_lock);
11406 + spin_lock_init(&hif->lock);
11407 +
11408 + hif_init();
11409 + hif_rx_enable();
11410 + hif_tx_enable();
11411 +
11412 + /* Disable tx done interrupt */
11413 + writel(HIF_INT_MASK, HIF_INT_ENABLE);
11414 +
11415 + gpi_enable(HGPI_BASE_ADDR);
11416 +
11417 +#ifdef __KERNEL__
11418 + err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
11419 + if (err) {
11420 + printk(KERN_ERR "%s: failed to get the hif IRQ = %d\n", __func__, hif->irq);
11421 + goto err1;
11422 + }
11423 +#else
11424 + /*TODO register interrupts */
11425 +#endif
11426 +
11427 + return 0;
11428 +err1:
11429 + pfe_hif_free_descr(hif);
11430 +err0:
11431 + return err;
11432 +}
11433 +
11434 +/* pfe_hif_exit-
11435 + */
11436 +void pfe_hif_exit(struct pfe *pfe)
11437 +{
11438 + struct pfe_hif *hif = &pfe->hif;
11439 +
11440 + printk(KERN_INFO "%s\n", __func__);
11441 +
11442 + spin_lock_bh(&hif->lock);
11443 + hif->shm->gClient_status[0] = 0;
11444 + hif->shm->gClient_status[1] = 0; /* Make sure all clients are disabled */
11445 +
11446 + spin_unlock_bh(&hif->lock);
11447 +
11448 + /*Disable Rx/Tx */
11449 + gpi_disable(HGPI_BASE_ADDR);
11450 + hif_rx_disable();
11451 + hif_tx_disable();
11452 +
11453 + napi_disable(&hif->napi);
11454 + netif_napi_del(&hif->napi);
11455 +
11456 +#ifdef __KERNEL__
11457 + free_irq(hif->irq, hif);
11458 +#endif
11459 + pfe_hif_release_buffers(hif);
11460 + pfe_hif_free_descr(hif);
11461 +}
11462 --- /dev/null
11463 +++ b/drivers/staging/fsl_ppfe/pfe_hif.h
11464 @@ -0,0 +1,322 @@
11465 +/*
11466 + *
11467 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
11468 + *
11469 + * This program is free software; you can redistribute it and/or modify
11470 + * it under the terms of the GNU General Public License as published by
11471 + * the Free Software Foundation; either version 2 of the License, or
11472 + * (at your option) any later version.
11473 + *
11474 + * This program is distributed in the hope that it will be useful,
11475 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
11476 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11477 + * GNU General Public License for more details.
11478 + *
11479 + * You should have received a copy of the GNU General Public License
11480 + * along with this program; if not, write to the Free Software
11481 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
11482 + */
11483 +
11484 +#ifndef _PFE_HIF_H_
11485 +#define _PFE_HIF_H_
11486 +
11487 +#include <linux/netdevice.h>
11488 +
11489 +#define HIF_NAPI_STATS
11490 +
11491 +#define HIF_CLIENT_QUEUES_MAX 16
11492 +#define HIF_RX_POLL_WEIGHT 64
11493 +
11494 +#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
11495 +#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
11496 +#define ROUND_MIN_RX_SIZE(_sz) ((_sz + (HIF_RX_PKT_MIN_SIZE - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
11497 +#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)_buf & (PAGE_SIZE - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
11498 +
11499 +enum {
11500 + NAPI_SCHED_COUNT = 0,
11501 + NAPI_POLL_COUNT,
11502 + NAPI_PACKET_COUNT,
11503 + NAPI_DESC_COUNT,
11504 + NAPI_FULL_BUDGET_COUNT,
11505 + NAPI_CLIENT_FULL_COUNT,
11506 + NAPI_MAX_COUNT
11507 +};
11508 +
11509 +
11510 +/* XXX HIF_TX_DESC_NT value should be always greter than 4,
11511 + * Otherwise HIF_TX_POLL_MARK will become zero.
11512 + */
11513 +#if defined(CONFIG_PLATFORM_PCI)
11514 +#define HIF_RX_DESC_NT 4
11515 +#define HIF_TX_DESC_NT 4
11516 +#else
11517 +#if defined(CONFIG_COMCERTO_64K_PAGES)
11518 +#define HIF_RX_DESC_NT 64
11519 +#else
11520 +#define HIF_RX_DESC_NT 256
11521 +#endif
11522 +#define HIF_TX_DESC_NT 2048
11523 +#endif
11524 +
11525 +#define HIF_FIRST_BUFFER (1 << 0)
11526 +#define HIF_LAST_BUFFER (1 << 1)
11527 +#define HIF_DONT_DMA_MAP (1 << 2) //TODO merge it with TSO
11528 +#define HIF_DATA_VALID (1 << 3)
11529 +#define HIF_TSO (1 << 4)
11530 +
11531 +#define MAX_VAP_SUPPORT 3
11532 +#define MAX_WIFI_VAPS MAX_VAP_SUPPORT
11533 +
11534 +enum {
11535 + PFE_CL_GEM0 = 0,
11536 + PFE_CL_GEM1,
11537 + PFE_CL_GEM2,
11538 + PFE_CL_VWD0,
11539 + PFE_CL_VWD_LAST = PFE_CL_VWD0 + MAX_VAP_SUPPORT,
11540 + PFE_CL_PCAP0,
11541 + HIF_CLIENTS_MAX
11542 +};
11543 +
11544 +/*structure to store client queue info */
11545 +struct hif_rx_queue {
11546 + struct rx_queue_desc *base;
11547 + u32 size;
11548 + u32 write_idx;
11549 +};
11550 +
11551 +struct hif_tx_queue {
11552 + struct tx_queue_desc *base;
11553 + u32 size;
11554 + u32 ack_idx;
11555 +};
11556 +
11557 +/*Structure to store the client info */
11558 +struct hif_client {
11559 + int rx_qn;
11560 + struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
11561 + int tx_qn;
11562 + struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
11563 +};
11564 +
11565 +/*HIF hardware buffer descriptor */
11566 +struct hif_desc {
11567 + volatile u32 ctrl;
11568 + volatile u32 status;
11569 + volatile u32 data;
11570 + volatile u32 next;
11571 +};
11572 +
11573 +struct __hif_desc {
11574 + u32 ctrl;
11575 + u32 status;
11576 + u32 data;
11577 +};
11578 +
11579 +struct hif_desc_sw {
11580 + dma_addr_t data;
11581 + u16 len;
11582 + u8 client_id;
11583 + u8 q_no;
11584 + u16 flags;
11585 +};
11586 +
11587 +struct hif_hdr {
11588 + u8 client_id;
11589 + u8 qNo;
11590 + u16 client_ctrl;
11591 + u16 client_ctrl1;
11592 +};
11593 +
11594 +struct __hif_hdr {
11595 + union {
11596 + struct hif_hdr hdr;
11597 + u32 word[2];
11598 + };
11599 +};
11600 +
11601 +struct hif_lro_hdr {
11602 + u16 data_offset;
11603 + u16 mss;
11604 +};
11605 +
11606 +struct hif_ipsec_hdr {
11607 + u16 sa_handle[2];
11608 +}__attribute__((packed));
11609 +
11610 +#define MAX_TSO_BUF_DESCS 5
11611 +struct hif_tso_buf_desc {
11612 + u32 addr;
11613 + u32 ctrl;
11614 +#define TSO_CTRL_LAST_BUFFER (1 << 31)
11615 +};
11616 +
11617 +struct hif_tso_hdr {
11618 + struct hif_hdr pkt_hdr;
11619 + u16 ip_off;
11620 + u16 ip_id;
11621 + u16 ip_len;
11622 + u16 tcp_off;
11623 + u32 tcp_seq;
11624 +} __attribute__((packed));
11625 +
11626 +struct hif_tso_hdr_nocpy {
11627 + struct hif_tso_hdr tso_hdr;
11628 + struct hif_tso_buf_desc bdesc[MAX_TSO_BUF_DESCS];
11629 +} __attribute__((packed));
11630 +
11631 +struct hif_pcap_hdr {
11632 + u8 ifindex;
11633 + u8 unused;
11634 + u16 seqno;
11635 + u32 timestamp;
11636 +}__attribute__((packed));
11637 +
11638 +/* HIF_CTRL_TX... defines */
11639 +#define HIF_CTRL_TX_TSO_NOCPY (1 << 8)
11640 +#define HIF_CTRL_TX_IPSEC_OUT (1 << 7)
11641 +#define HIF_CTRL_TX_OWN_MAC (1 << 6)
11642 +#define HIF_CTRL_TX_TSO_END (1 << 5)
11643 +#define HIF_CTRL_TX_TSO6 (1 << 4)
11644 +#define HIF_CTRL_TX_TSO (1 << 3)
11645 +#define HIF_CTRL_TX_CHECKSUM (1 << 2)
11646 +#define HIF_CTRL_TX_CSUM_VALIDATE (1 << 1)
11647 +#define HIF_CTRL_TX_WIFI (1 << 0)
11648 +
11649 +/* HIF_CTRL_RX... defines */
11650 +#define HIF_CTRL_RX_OFFSET_OFST (24)
11651 +#define HIF_CTRL_RX_PE_ID_OFST (16)
11652 +#define HIF_CTRL_RX_IPSEC_IN (1 << 4)
11653 +#define HIF_CTRL_RX_WIFI_EXPT (1 << 3)
11654 +#define HIF_CTRL_RX_CHECKSUMMED (1 << 2)
11655 +#define HIF_CTRL_RX_CONTINUED (1 << 1)
11656 +#define HIF_CTRL_RX_WIFI_HEADROOM (1 << 0)
11657 +
11658 +#define HIF_CTRL_VAPID_OFST (8)
11659 +
11660 +struct pfe_hif {
11661 + /* To store registered clients in hif layer */
11662 + struct hif_client client[HIF_CLIENTS_MAX];
11663 + struct hif_shm *shm;
11664 + int irq;
11665 +
11666 + void *descr_baseaddr_v;
11667 + unsigned long descr_baseaddr_p;
11668 +
11669 + struct hif_desc *RxBase;
11670 + u32 RxRingSize;
11671 + u32 RxtocleanIndex;
11672 + void *rx_buf_addr[HIF_RX_DESC_NT];
11673 + int rx_buf_len[HIF_RX_DESC_NT];
11674 + unsigned int qno;
11675 + unsigned int client_id;
11676 + unsigned int client_ctrl;
11677 + unsigned int started;
11678 +
11679 + struct hif_desc *TxBase;
11680 + u32 TxRingSize;
11681 + u32 Txtosend;
11682 + u32 Txtoclean;
11683 + u32 TxAvail;
11684 + u32 Txtoflush;
11685 + struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
11686 + struct hif_tso_hdr_nocpy *tso_hdr_v;
11687 + dma_addr_t tso_hdr_p;
11688 +
11689 + spinlock_t tx_lock;
11690 + spinlock_t lock;
11691 + struct net_device dummy_dev;
11692 + struct napi_struct napi;
11693 + struct device *dev;
11694 +
11695 +#ifdef CONFIG_HOTPLUG_CPU
11696 + struct notifier_block cpu_notify;
11697 +#endif
11698 +
11699 +#ifdef HIF_NAPI_STATS
11700 + unsigned int napi_counters[NAPI_MAX_COUNT];
11701 +#endif
11702 +};
11703 +
11704 +void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, u32 len, unsigned int flags);
11705 +int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, unsigned int len);
11706 +void __hif_tx_done_process(struct pfe_hif *hif, int count);
11707 +void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2);
11708 +int pfe_hif_init(struct pfe *pfe);
11709 +void pfe_hif_exit(struct pfe *pfe);
11710 +
11711 +static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
11712 +{
11713 + spin_lock_bh(&hif->tx_lock);
11714 + __hif_tx_done_process(hif, count);
11715 + spin_unlock_bh(&hif->tx_lock);
11716 +}
11717 +
11718 +static inline void hif_tx_lock(struct pfe_hif *hif)
11719 +{
11720 + spin_lock_bh(&hif->tx_lock);
11721 +}
11722 +
11723 +static inline void hif_tx_unlock(struct pfe_hif *hif)
11724 +{
11725 + spin_unlock_bh(&hif->tx_lock);
11726 +}
11727 +
11728 +static inline int __hif_tx_avail(struct pfe_hif *hif)
11729 +{
11730 + return hif->TxAvail;
11731 +}
11732 +
11733 +#if defined(CONFIG_PLATFORM_C2000)
11734 +static inline void __memcpy8(void *dst, void *src)
11735 +{
11736 + asm volatile ( "ldm %1, {r9, r10}\n\t"
11737 + "stm %0, {r9, r10}\n\t"
11738 + :
11739 + : "r" (dst), "r" (src)
11740 + : "r9", "r10", "memory"
11741 + );
11742 +}
11743 +
11744 +static inline void __memcpy12(void *dst, void *src)
11745 +{
11746 + asm volatile ( "ldm %1, {r8, r9, r10}\n\t"
11747 + "stm %0, {r8, r9, r10}\n\t"
11748 + :
11749 + : "r" (dst), "r" (src)
11750 + : "r8", "r9", "r10", "memory"
11751 + );
11752 +}
11753 +
11754 +static inline void __memcpy16(void *dst, void *src)
11755 +{
11756 + asm volatile ( "ldm %1, {r7, r8, r9, r10}\n\t"
11757 + "stm %0, {r7, r8, r9, r10}\n\t"
11758 + :
11759 + : "r"(dst), "r"(src)
11760 + : "r7", "r8", "r9", "r10", "memory"
11761 + );
11762 +}
11763 +
11764 +#define HIF_MEMCPY_BURSTSIZE 32 /*__memcpy copy 32byte in a burst*/
11765 +static inline void __memcpy(void *dst, void *src, unsigned int len)
11766 +{
11767 + void *end = src + len;
11768 +
11769 + dst = (void *)((unsigned long)dst & ~0x3);
11770 + src = (void *)((unsigned long)src & ~0x3);
11771 +
11772 + while (src < end) {
11773 + asm volatile ( "ldm %1!, {r3, r4, r5, r6, r7, r8, r9, r10}\n\t"
11774 + "stm %0!, {r3, r4, r5, r6, r7, r8, r9, r10}\n\t"
11775 + : "+r"(dst), "+r"(src)
11776 + :
11777 + : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "memory"
11778 + );
11779 + }
11780 +}
11781 +#else
11782 +#define __memcpy8(dst, src) memcpy(dst, src, 8)
11783 +#define __memcpy12(dst, src) memcpy(dst, src, 12)
11784 +#define __memcpy(dst, src, len) memcpy(dst, src, len)
11785 +#endif
11786 +#endif /* _PFE_HIF_H_ */
11787 --- /dev/null
11788 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
11789 @@ -0,0 +1,658 @@
11790 +/*
11791 + *
11792 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
11793 + *
11794 + * This program is free software; you can redistribute it and/or modify
11795 + * it under the terms of the GNU General Public License as published by
11796 + * the Free Software Foundation; either version 2 of the License, or
11797 + * (at your option) any later version.
11798 + *
11799 + * This program is distributed in the hope that it will be useful,
11800 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
11801 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11802 + * GNU General Public License for more details.
11803 + *
11804 + * You should have received a copy of the GNU General Public License
11805 + * along with this program; if not, write to the Free Software
11806 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
11807 + */
11808 +
11809 +#include <linux/version.h>
11810 +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
11811 +#include <asm/system.h>
11812 +#endif
11813 +#include <linux/kernel.h>
11814 +#include <linux/slab.h>
11815 +#include <linux/interrupt.h>
11816 +#include <linux/workqueue.h>
11817 +#include <linux/dma-mapping.h>
11818 +#include <linux/dmapool.h>
11819 +#include <linux/sched.h>
11820 +#include <linux/skbuff.h>
11821 +#include <linux/moduleparam.h>
11822 +#include <linux/cpu.h>
11823 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
11824 +//#include <asm/system_info.h>
11825 +#endif
11826 +
11827 +#include "pfe_mod.h"
11828 +#include "pfe_hif.h"
11829 +#include "pfe_hif_lib.h"
11830 +#include "pfe_ctrl_hal.h"
11831 +
11832 +
11833 +unsigned int lro_mode = 0;
11834 +unsigned int page_mode = 0;
11835 +unsigned int tx_qos = 0;
11836 +unsigned int pfe_pkt_size;
11837 +unsigned int pfe_pkt_headroom;
11838 +unsigned int emac_txq_cnt;
11839 +
11840 +/** @pfe_hal_lib.c.
11841 + * Common functions used by HIF client drivers
11842 + */
11843 +
11844 +/*HIF shared memory Global variable */
11845 +struct hif_shm ghif_shm;
11846 +
11847 +/* TMU tx transmitted packets counter, 1 per TMU */
11848 +unsigned int TMU_DMEM_SH(tx_trans)[EMAC_TXQ_CNT];
11849 +
11850 +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
11851 + * This function should be called after pfe_hif_exit
11852 + *
11853 + * @param[in] hif_shm Shared memory address location in DDR
11854 + */
11855 +static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
11856 +{
11857 + int i;
11858 + void *pkt;
11859 +
11860 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
11861 + pkt = hif_shm->rx_buf_pool[i];
11862 + if (pkt) {
11863 + hif_shm->rx_buf_pool[i] = NULL;
11864 + pkt -= pfe_pkt_headroom;
11865 +
11866 + if (page_mode) {
11867 + put_page(virt_to_page(pkt));
11868 + } else
11869 + kfree(pkt);
11870 + }
11871 + }
11872 +}
11873 +
11874 +/* Initialize shared memory used between HIF driver and clients,
11875 + * allocate rx_buffer_pool required for HIF Rx descriptors.
11876 + * This function should be called before initializing HIF driver.
11877 + *
11878 + * @param[in] hif_shm Shared memory address location in DDR
11879 + * @rerurn 0 - on succes, <0 on fail to initialize
11880 + */
11881 +static int pfe_hif_shm_init(struct hif_shm *hif_shm)
11882 +{
11883 + int i;
11884 + void *pkt;
11885 +
11886 + memset(hif_shm, 0, sizeof(struct hif_shm));
11887 + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
11888 +
11889 + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
11890 + if (page_mode) {
11891 + pkt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA_PFE);
11892 + } else
11893 + pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
11894 +
11895 + if (pkt)
11896 + hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
11897 + else
11898 + goto err0;
11899 + }
11900 +
11901 + return 0;
11902 +
11903 +err0:
11904 + printk(KERN_ERR "%s Low memory\n", __func__);
11905 + pfe_hif_shm_clean(hif_shm);
11906 + return -ENOMEM;
11907 +}
11908 +
11909 +/*This function sends indication to HIF driver
11910 + *
11911 + * @param[in] hif hif context
11912 + **/
11913 +static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int data2)
11914 +{
11915 + //TODO : If we separate HIF and HIF LIB, then send req and data through shared memory.
11916 +
11917 + hif_process_client_req(hif, req, data1, data2);
11918 +}
11919 +
11920 +void hif_lib_indicate_client(int client_id, int event_type, int qno)
11921 +{
11922 + struct hif_client_s *client = pfe->hif_client[client_id];
11923 +
11924 + /*
11925 + * TODO : Right now, all events are queue number based. So we are masking events per queue
11926 + * basis. Later if we add any events those do not depend on queue number, then we may
11927 + * may need may need to add masking per event.
11928 + */
11929 + if (!client || (event_type >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX) )
11930 + return;
11931 +
11932 + if (!test_and_set_bit(qno, &client->queue_mask[event_type])) {
11933 + client->event_handler(client->priv, event_type, qno);
11934 + }
11935 +
11936 +}
11937 +
11938 +
11939 +/*This function releases Rx queue descriptors memory and pre-filled buffers
11940 + *
11941 + * @param[in] client hif_client context
11942 + */
11943 +static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
11944 +{
11945 + struct rx_queue_desc *desc;
11946 + int qno, ii;
11947 + void *buf;
11948 +
11949 + for (qno = 0; qno < client->rx_qn; qno++) {
11950 + desc = client->rx_q[qno].base;
11951 +
11952 + for (ii = 0; ii < client->rx_q[qno].size; ii++) {
11953 + buf = (void *)desc->data;
11954 + if (buf) {
11955 + buf -= pfe_pkt_headroom;
11956 +
11957 + if (page_mode)
11958 + free_page((unsigned long)buf);
11959 + else
11960 + kfree(buf);
11961 +
11962 + desc->ctrl = 0;
11963 + }
11964 +
11965 + desc++;
11966 + }
11967 + }
11968 +
11969 + kfree(client->rx_qbase);
11970 +}
11971 +
11972 +
11973 +/*This function allocates memory for the rxq descriptors and pre-fill rx queues
11974 + * with buffers.
11975 + * @param[in] client client context
11976 + * @param[in] q_size size of the rxQ, all queues are of same size
11977 + */
11978 +static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int q_size)
11979 +{
11980 + struct rx_queue_desc *desc;
11981 + struct hif_client_rx_queue *queue;
11982 + int ii, qno;
11983 +
11984 + /*Allocate memory for the client queues */
11985 + client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct rx_queue_desc), GFP_KERNEL);
11986 + if (!client->rx_qbase){
11987 + goto err;
11988 + }
11989 +
11990 + for (qno = 0; qno < client->rx_qn; qno++) {
11991 + queue = &client->rx_q[qno];
11992 +
11993 + queue->base = client->rx_qbase + qno * q_size * sizeof(struct rx_queue_desc);
11994 + queue->size = q_size;
11995 + queue->read_idx = 0;
11996 + queue->write_idx = 0;
11997 +
11998 + dbg_print_info("rx queue: %d, base: %p, size: %d \n", qno, queue->base, queue->size);
11999 + }
12000 +
12001 + for (qno = 0; qno < client->rx_qn; qno++) {
12002 + queue = &client->rx_q[qno];
12003 + desc = queue->base;
12004 +
12005 + for (ii = 0; ii < queue->size; ii++) {
12006 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
12007 + desc++;
12008 + }
12009 + }
12010 +
12011 + return 0;
12012 +
12013 +err:
12014 + return 1;
12015 +}
12016 +
12017 +#define inc_cl_idx(idxname) idxname = (idxname+1) & (queue->size-1)
12018 +
12019 +static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
12020 +{
12021 + dbg_print_info( "%s\n", __func__);
12022 +
12023 + /* Check if there are any pending packets. Client must flush the tx queues
12024 + before unregistering, by calling by calling hif_lib_tx_get_next_complete() */
12025 + /* Hif no longer calls since we are no longer registered */
12026 +
12027 + if (queue->tx_pending)
12028 + printk(KERN_ERR "%s: pending transmit packets\n", __func__);
12029 +}
12030 +
12031 +static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
12032 +{
12033 + int qno;
12034 +
12035 + dbg_print_info("%s\n", __func__);
12036 +
12037 + for (qno = 0; qno < client->tx_qn; qno++) {
12038 + hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
12039 + }
12040 +
12041 + kfree(client->tx_qbase);
12042 +}
12043 +
12044 +static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int q_size)
12045 +{
12046 + struct hif_client_tx_queue *queue;
12047 + int qno;
12048 +
12049 + client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct tx_queue_desc), GFP_KERNEL);
12050 + if (!client->tx_qbase) {
12051 + return 1;
12052 + }
12053 +
12054 + for (qno = 0; qno < client->tx_qn; qno++) {
12055 + queue = &client->tx_q[qno];
12056 +
12057 + queue->base = client->tx_qbase + qno * q_size * sizeof(struct tx_queue_desc);
12058 + queue->size = q_size;
12059 + queue->read_idx = 0;
12060 + queue->write_idx = 0;
12061 + queue->tx_pending = 0;
12062 + queue->nocpy_flag = 0;
12063 + queue->prev_tmu_tx_pkts = 0;
12064 + queue->done_tmu_tx_pkts = 0;
12065 +
12066 + dbg_print_info("tx queue: %d, base: %p, size: %d \n", qno, queue->base, queue->size);
12067 + }
12068 +
12069 + return 0;
12070 +}
12071 +
12072 +static int hif_lib_event_dummy( void *priv, int event_type, int qno)
12073 +{
12074 + return 0;
12075 +}
12076 +
12077 +int hif_lib_client_register(struct hif_client_s *client)
12078 +{
12079 + struct hif_shm *hif_shm;
12080 + struct hif_client_shm *client_shm;
12081 + int err, i;
12082 +// int loop_cnt = 0;
12083 +
12084 + dbg_print_info("%s\n", __func__);
12085 +
12086 + /*Allocate memory before spin_lock*/
12087 + if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
12088 + err = -ENOMEM;
12089 + goto err_rx;
12090 + }
12091 +
12092 + if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
12093 + err = -ENOMEM;
12094 + goto err_tx;
12095 + }
12096 +
12097 + spin_lock_bh(&pfe->hif.lock);
12098 + if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) || (pfe->hif_client[client->id])) {
12099 + err = -EINVAL;
12100 + goto err;
12101 + }
12102 +
12103 + hif_shm = client->pfe->hif.shm;
12104 +
12105 + if (!client->event_handler)
12106 + client->event_handler = hif_lib_event_dummy;
12107 +
12108 + /*Initialize client specific shared memory */
12109 + client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
12110 + client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
12111 + client_shm->rx_qsize = client->rx_qsize;
12112 + client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
12113 + client_shm->tx_qsize = client->tx_qsize;
12114 + client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) | (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
12115 +// spin_lock_init(&client->rx_lock);
12116 +
12117 + for (i = 0; i < HIF_EVENT_MAX; i++) {
12118 + client->queue_mask[i] = 0; /* By default all events are unmasked */
12119 + }
12120 +
12121 + /*Indicate to HIF driver*/
12122 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
12123 +
12124 + dbg_print_info("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
12125 + __func__, client, client->id, client->tx_qsize, client->rx_qsize);
12126 +
12127 + client->cpu_id = -1;
12128 +
12129 + pfe->hif_client[client->id] = client;
12130 + spin_unlock_bh(&pfe->hif.lock);
12131 +
12132 + return 0;
12133 +
12134 +err:
12135 + spin_unlock_bh(&pfe->hif.lock);
12136 + hif_lib_client_release_tx_buffers(client);
12137 +
12138 +err_tx:
12139 + hif_lib_client_release_rx_buffers(client);
12140 +
12141 +err_rx:
12142 + return err;
12143 +}
12144 +
12145 +int hif_lib_client_unregister(struct hif_client_s *client)
12146 +{
12147 + struct pfe *pfe = client->pfe;
12148 + u32 client_id = client->id;
12149 +
12150 + printk(KERN_INFO "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n",
12151 + __func__, client, client->id, client->tx_qsize, client->rx_qsize);
12152 +
12153 +
12154 + spin_lock_bh(&pfe->hif.lock);
12155 + hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
12156 +
12157 + hif_lib_client_release_tx_buffers(client);
12158 + hif_lib_client_release_rx_buffers(client);
12159 + pfe->hif_client[client_id] = NULL;
12160 + spin_unlock_bh(&pfe->hif.lock);
12161 +
12162 + return 0;
12163 +}
12164 +
12165 +int hif_lib_event_handler_start(struct hif_client_s *client, int event, int qno)
12166 +{
12167 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
12168 + struct rx_queue_desc *desc = queue->base + queue->read_idx;
12169 +
12170 + if ((event >= HIF_EVENT_MAX) || ( qno >= HIF_CLIENT_QUEUES_MAX)) {
12171 + dbg_print_info("%s: Unsupported event : %d queue number : %d\n", __func__, event, qno);
12172 + return -1;
12173 + }
12174 +
12175 + test_and_clear_bit(qno, &client->queue_mask[event]);
12176 +
12177 + switch (event) {
12178 + case EVENT_RX_PKT_IND:
12179 + if (!(desc->ctrl & CL_DESC_OWN))
12180 + hif_lib_indicate_client(client->id, EVENT_RX_PKT_IND, qno);
12181 + break;
12182 +
12183 + case EVENT_HIGH_RX_WM:
12184 + case EVENT_TXDONE_IND:
12185 + default:
12186 + break;
12187 + }
12188 +
12189 + return 0;
12190 +}
12191 +
12192 +
12193 +/*This function gets one packet from the specified client queue
12194 + * It also refill the rx buffer */
12195 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int *ofst, unsigned int *rx_ctrl, unsigned int *desc_ctrl, void **priv_data)
12196 +{
12197 + struct hif_client_rx_queue *queue = &client->rx_q[qno];
12198 + struct rx_queue_desc *desc;
12199 + void *pkt = NULL;
12200 +
12201 + //printk(KERN_INFO "%s\n", __func__);
12202 +#if defined(CONFIG_PLATFORM_EMULATION)
12203 + printk(KERN_INFO "%s:qno:%d cid:%d desc:%p rdidx:%d \n",
12204 + __func__, qno, client->id, desc,
12205 + queue->read_idx);
12206 +#endif
12207 +
12208 + /* Following lock is to protect rx queue access from, hif_lib_event_handler_start.
12209 + * In general below lock is not required, because hif_lib_xmit_pkt and
12210 + * hif_lib_event_handler_start are called from napi poll and which is not
12211 + * re-entrant. But if some client use in different way this lock is required.
12212 + */
12213 + //spin_lock_irqsave(&client->rx_lock, flags);
12214 + desc = queue->base + queue->read_idx;
12215 + if (!(desc->ctrl & CL_DESC_OWN)) {
12216 + pkt = desc->data - pfe_pkt_headroom;
12217 +
12218 + *rx_ctrl = desc->client_ctrl;
12219 + *desc_ctrl = desc->ctrl;
12220 +
12221 + if (desc->ctrl & CL_DESC_FIRST) {
12222 + u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
12223 +
12224 + if (size) {
12225 + *len = CL_DESC_BUF_LEN(desc->ctrl) - PFE_PKT_HEADER_SZ - size;
12226 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ + size;
12227 + *priv_data = desc->data + PFE_PKT_HEADER_SZ;
12228 + } else {
12229 + *len = CL_DESC_BUF_LEN(desc->ctrl) - PFE_PKT_HEADER_SZ;
12230 + *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ;
12231 + *priv_data = NULL;
12232 + }
12233 +
12234 + } else {
12235 + *len = CL_DESC_BUF_LEN(desc->ctrl);
12236 + *ofst = pfe_pkt_headroom;
12237 + }
12238 +
12239 + desc->data = NULL; // Needed so we don't free a buffer/page twice on module_exit
12240 + smp_wmb();
12241 +
12242 + desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
12243 + inc_cl_idx(queue->read_idx);
12244 + }
12245 +
12246 + //spin_unlock_irqrestore(&client->rx_lock, flags);
12247 + return pkt;
12248 +}
12249 +
12250 +static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int client_id, unsigned int qno, u32 client_ctrl)
12251 +{
12252 + /* Optimize the write since the destinaton may be non-cacheable */
12253 + if (!((unsigned long)pkt_hdr & 0x3)) {
12254 + ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) | client_id;
12255 + } else {
12256 + ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
12257 + ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
12258 + }
12259 +}
12260 +
12261 +/*This function puts the given packet in the specific client queue */
12262 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, unsigned int flags, void *client_data)
12263 +{
12264 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
12265 + struct tx_queue_desc *desc = queue->base + queue->write_idx;
12266 +
12267 + //printk(KERN_INFO "%s\n",__func__);
12268 +
12269 + /* First buffer */
12270 + if (flags & HIF_FIRST_BUFFER)
12271 + {
12272 + data -= sizeof(struct hif_hdr);
12273 + len += sizeof(struct hif_hdr);
12274 +
12275 + hif_hdr_write(data, client->id, qno, client_ctrl);
12276 + }
12277 +
12278 + desc->data = client_data;
12279 + desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
12280 +
12281 + __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
12282 +
12283 + inc_cl_idx(queue->write_idx);
12284 + queue->tx_pending++;
12285 + queue->jiffies_last_packet = jiffies;
12286 +
12287 +}
12288 +
12289 +/*This function puts the given packet in the specific client queue */
12290 +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, void *client_data)
12291 +{
12292 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
12293 + struct tx_queue_desc *desc = queue->base + queue->write_idx;
12294 +
12295 + //printk(KERN_INFO "%s\n",__func__);
12296 +
12297 + if (queue->tx_pending < queue->size) {
12298 + /*Construct pkt header */
12299 +
12300 + data -= sizeof(struct hif_hdr);
12301 + len += sizeof(struct hif_hdr);
12302 +
12303 + hif_hdr_write(data, client->id, qno, client_ctrl);
12304 +
12305 + desc->data = client_data;
12306 + desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(HIF_FIRST_BUFFER | HIF_LAST_BUFFER | HIF_DATA_VALID);
12307 +
12308 + if (hif_xmit_pkt(&pfe->hif, client->id, qno, data, len))
12309 + return 1;
12310 +
12311 + inc_cl_idx(queue->write_idx);
12312 + queue->tx_pending++;
12313 + queue->jiffies_last_packet = jiffies;
12314 +
12315 + return 0;
12316 + }
12317 +
12318 + dbg_print_info("%s Tx client %d qno %d is full\n",__func__, client->id, qno);
12319 + return 1;
12320 +}
12321 +
12322 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno, unsigned int *flags, int count)
12323 +{
12324 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
12325 + struct tx_queue_desc *desc = queue->base + queue->read_idx;
12326 +
12327 + dbg_print_info("%s: qno : %d rd_indx: %d pending:%d\n",__func__, qno, queue->read_idx, queue->tx_pending);
12328 +
12329 + if (!queue->tx_pending )
12330 + return NULL;
12331 +
12332 + if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
12333 + u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID + client->id, virt_to_tmu_dmem(&tx_trans[qno]), 4));
12334 +
12335 + if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
12336 + queue->done_tmu_tx_pkts = UINT_MAX - queue->prev_tmu_tx_pkts + tmu_tx_pkts;
12337 + else
12338 + queue->done_tmu_tx_pkts = tmu_tx_pkts - queue->prev_tmu_tx_pkts;
12339 +
12340 + queue->prev_tmu_tx_pkts = tmu_tx_pkts;
12341 +
12342 + if (!queue->done_tmu_tx_pkts) {
12343 + return NULL;
12344 + }
12345 + }
12346 +
12347 + if (desc->ctrl & CL_DESC_OWN) {
12348 + hif_tx_done_process(&pfe->hif, count);
12349 +
12350 + //Check again, if packets done in tx queue.
12351 + if (desc->ctrl & CL_DESC_OWN)
12352 + return NULL;
12353 + }
12354 +
12355 + inc_cl_idx(queue->read_idx);
12356 + queue->tx_pending--;
12357 +
12358 + *flags = CL_DESC_GET_FLAGS(desc->ctrl);
12359 +
12360 + if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
12361 + queue->done_tmu_tx_pkts--;
12362 +
12363 +
12364 + return desc->data;
12365 +}
12366 +
12367 +//FIXME: TMU queues length mapping needs to be declared in shared PFE/PFE_CTRL header
12368 +static void hif_lib_tmu_credit_init(struct pfe *pfe)
12369 +{
12370 + int i, q;
12371 +
12372 + for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
12373 + for (q = 0; q < emac_txq_cnt; q++) {
12374 + pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ? DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
12375 + pfe->tmu_credit.tx_credit[i][q] = pfe->tmu_credit.tx_credit_max[i][q];
12376 + }
12377 +}
12378 +/** __hif_lib_update_credit
12379 + *
12380 + * @param[in] client hif client context
12381 + * @param[in] queue queue number in match with TMU
12382 + */
12383 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
12384 +{
12385 + unsigned int tmu_tx_packets, tmp;
12386 +
12387 + if (tx_qos) {
12388 + tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID + client->id, virt_to_tmu_dmem(&tx_trans[queue]), 4));
12389 +
12390 + // tx_packets counter overflowed
12391 + if (tmu_tx_packets > pfe->tmu_credit.tx_packets[client->id][queue]) {
12392 + tmp = UINT_MAX - tmu_tx_packets + pfe->tmu_credit.tx_packets[client->id][queue];
12393 + pfe->tmu_credit.tx_credit[client->id][queue] = pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
12394 + }
12395 + // TMU tx <= pfe_eth tx, normal case or both OF since last time
12396 + else
12397 + pfe->tmu_credit.tx_credit[client->id][queue] = pfe->tmu_credit.tx_credit_max[client->id][queue] - (pfe->tmu_credit.tx_packets[client->id][queue] - tmu_tx_packets);
12398 + }
12399 +}
12400 +
12401 +/** hif_lib_update_credit
12402 + *
12403 + * @param[in] client hif client context
12404 + * @param[in] queue queue number in match with TMU
12405 + */
12406 +void hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
12407 +{
12408 + spin_lock_bh(&pfe->hif.tx_lock);
12409 + __hif_lib_update_credit(client, queue);
12410 + spin_unlock_bh(&pfe->hif.tx_lock);
12411 +}
12412 +
12413 +int pfe_hif_lib_init(struct pfe *pfe)
12414 +{
12415 + int rc;
12416 +
12417 + printk(KERN_INFO "%s\n", __func__);
12418 +
12419 + if (lro_mode) {
12420 + page_mode = 1;
12421 + pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
12422 + pfe_pkt_headroom = 0;
12423 + } else {
12424 + page_mode = 0;
12425 + pfe_pkt_size = PFE_PKT_SIZE;
12426 + pfe_pkt_headroom = PFE_PKT_HEADROOM;
12427 + }
12428 +
12429 + if (tx_qos)
12430 + emac_txq_cnt = EMAC_TXQ_CNT / 2;
12431 + else
12432 + emac_txq_cnt = EMAC_TXQ_CNT;
12433 +
12434 + hif_lib_tmu_credit_init(pfe);
12435 + pfe->hif.shm = &ghif_shm;
12436 + rc = pfe_hif_shm_init(pfe->hif.shm);
12437 +
12438 + return rc;
12439 +}
12440 +
12441 +
12442 +void pfe_hif_lib_exit(struct pfe *pfe)
12443 +{
12444 + printk(KERN_INFO "%s\n", __func__);
12445 +
12446 + pfe_hif_shm_clean(pfe->hif.shm);
12447 +}
12448 --- /dev/null
12449 +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
12450 @@ -0,0 +1,219 @@
12451 +/*
12452 + *
12453 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
12454 + *
12455 + * This program is free software; you can redistribute it and/or modify
12456 + * it under the terms of the GNU General Public License as published by
12457 + * the Free Software Foundation; either version 2 of the License, or
12458 + * (at your option) any later version.
12459 + *
12460 + * This program is distributed in the hope that it will be useful,
12461 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
12462 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12463 + * GNU General Public License for more details.
12464 + *
12465 + * You should have received a copy of the GNU General Public License
12466 + * along with this program; if not, write to the Free Software
12467 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
12468 + */
12469 +
12470 +#ifndef _PFE_HIF_LIB_H_
12471 +#define _PFE_HIF_LIB_H_
12472 +
12473 +#include "pfe_hif.h"
12474 +
12475 +#ifdef HIF_LIB_DEBUG
12476 +#define dbg_print_info( fmt, args...) \
12477 + printk(KERN_INFO fmt, ##args)
12478 +#else
12479 +#define dbg_print_info( fmt, args...)
12480 +#endif
12481 +
12482 +#define HIF_CL_REQ_TIMEOUT 10
12483 +
12484 +#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
12485 +#define GFP_DMA_PFE (GFP_DMA_NCNB | __GFP_NOWARN)
12486 +#else
12487 +#define GFP_DMA_PFE 0
12488 +#endif
12489 +
12490 +enum {
12491 + REQUEST_CL_REGISTER = 0,
12492 + REQUEST_CL_UNREGISTER,
12493 + HIF_REQUEST_MAX
12494 +};
12495 +
12496 +enum {
12497 + EVENT_HIGH_RX_WM = 0, /* Event to indicate that client rx queue is reached water mark level */
12498 + EVENT_RX_PKT_IND, /* Event to indicate that, packet recieved for client */
12499 + EVENT_TXDONE_IND, /* Event to indicate that, packet tx done for client */
12500 + HIF_EVENT_MAX
12501 +};
12502 +
12503 +/*structure to store client queue info */
12504 +
12505 +/*structure to store client queue info */
12506 +struct hif_client_rx_queue {
12507 + struct rx_queue_desc *base;
12508 + u32 size;
12509 + u32 read_idx;
12510 + u32 write_idx;
12511 +};
12512 +
12513 +struct hif_client_tx_queue {
12514 + struct tx_queue_desc *base;
12515 + u32 size;
12516 + u32 read_idx;
12517 + u32 write_idx;
12518 + u32 tx_pending;
12519 + unsigned long jiffies_last_packet;
12520 + u32 nocpy_flag;
12521 + u32 prev_tmu_tx_pkts;
12522 + u32 done_tmu_tx_pkts;
12523 + u32 cur_tso_hdr_p;
12524 + int tso_buf_cnt;
12525 +};
12526 +
12527 +struct hif_client_s
12528 +{
12529 + int id;
12530 + int tx_qn;
12531 + int rx_qn;
12532 + void *rx_qbase;
12533 + void *tx_qbase;
12534 + /* FIXME tx/rx_qsize fields can be removed after per queue depth is supported*/
12535 + int tx_qsize;
12536 + int rx_qsize;
12537 + int cpu_id;
12538 +
12539 +// spinlock_t rx_lock;
12540 + struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
12541 + struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
12542 + int (*event_handler)(void *priv, int event, int data);
12543 + unsigned long queue_mask[HIF_EVENT_MAX];
12544 + struct pfe *pfe;
12545 + void *priv;
12546 +};
12547 +
12548 +
12549 +/* Client specific shared memory
12550 + * It contains number of Rx/Tx queues, base addresses and queue sizes */
12551 +struct hif_client_shm {
12552 + u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
12553 + unsigned long rx_qbase; /*Rx queue base address */
12554 + u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
12555 + unsigned long tx_qbase; /* Tx queue base address */
12556 + u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
12557 +};
12558 +
12559 +/*Client shared memory ctrl bit description */
12560 +#define CLIENT_CTRL_RX_Q_CNT_OFST 0
12561 +#define CLIENT_CTRL_TX_Q_CNT_OFST 8
12562 +#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) & 0xFF)
12563 +#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) & 0xFF)
12564 +
12565 +
12566 +
12567 +/*Shared memory used to communicate between HIF driver and host/client drivers
12568 + * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
12569 + * initialized with host buffers and buffers count in the pool.
12570 + * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
12571 + *
12572 + */
12573 +struct hif_shm {
12574 + u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
12575 + void *rx_buf_pool[HIF_RX_DESC_NT];/*Rx buffers required to initialize HIF rx descriptors */
12576 + unsigned long gClient_status[2]; /*Global client status bit mask */
12577 + u32 hif_qfull; /*TODO Client-id that caused for the TMU3 queue stop */
12578 + u32 hif_qresume; /*TODO */
12579 + struct hif_client_shm client[HIF_CLIENTS_MAX]; /* Client specific shared memory */
12580 +};
12581 +
12582 +
12583 +#define CL_DESC_OWN (1 << 31) /* This sets owner ship to HIF driver */
12584 +#define CL_DESC_LAST (1 << 30) /* This indicates last packet for multi buffers handling */
12585 +#define CL_DESC_FIRST (1 << 29) /* This indicates first packet for multi buffers handling */
12586 +#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
12587 +#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
12588 +#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
12589 +
12590 +struct rx_queue_desc {
12591 + void *data;
12592 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
12593 + u32 client_ctrl;
12594 +};
12595 +
12596 +struct tx_queue_desc {
12597 + void *data;
12598 + u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
12599 +};
12600 +
12601 +/* HIF Rx is not working properly for 2-byte aligned buffers and
12602 + * ip_header should be 4byte aligned for better iperformance.
12603 + * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
12604 + */
12605 +#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
12606 +#define PFE_BUF_SIZE 2048 /* must be big enough for headroom, pkt size and skb shared info */
12607 +#define PFE_PKT_HEADROOM 128
12608 +#define SKB_SHARED_INFO_SIZE 256 /* At least sizeof(struct skb_shared_info) bytes */
12609 +
12610 +//#define PFE_PKT_SIZE 1544 /* maximum ethernet packet size */
12611 +#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM - SKB_SHARED_INFO_SIZE) /* maximum ethernet packet size after reassembly offload*/
12612 +#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
12613 +#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
12614 +#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
12615 +#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE + MAX_L4_HDR_SIZE)
12616 +#define MAX_WIFI_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE + 6)
12617 +#define MAX_PFE_PKT_SIZE 16380UL /* Used in page mode to clamp packet size to the maximum supported by the hif hw interface (<16KiB) */
12618 +
12619 +extern unsigned int pfe_pkt_size;
12620 +extern unsigned int pfe_pkt_headroom;
12621 +extern unsigned int page_mode;
12622 +extern unsigned int lro_mode;
12623 +extern unsigned int tx_qos;
12624 +extern unsigned int emac_txq_cnt;
12625 +
12626 +int pfe_hif_lib_init(struct pfe *pfe);
12627 +void pfe_hif_lib_exit(struct pfe *pfe);
12628 +int hif_lib_client_register(struct hif_client_s *client);
12629 +int hif_lib_client_unregister(struct hif_client_s *client);
12630 +void __hif_lib_xmit_tso_hdr(struct hif_client_s *client, unsigned int qno, u32 client_ctrl, unsigned int ip_off, unsigned int ip_id, unsigned int ip_len, unsigned int tcp_off, unsigned int tcp_seq);
12631 +void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, unsigned int flags, void *client_data);
12632 +int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, void *client_data);
12633 +void hif_lib_indicate_client(int cl_id, int event, int data);
12634 +int hif_lib_event_handler_start( struct hif_client_s *client, int event, int data );
12635 +int hif_lib_tmu_queue_start( struct hif_client_s *client, int qno );
12636 +int hif_lib_tmu_queue_stop( struct hif_client_s *client, int qno );
12637 +void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno, unsigned int *flags, int count);
12638 +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int *ofst, unsigned int *rx_ctrl, unsigned int *desc_ctrl, void **priv_data);
12639 +void hif_lib_update_credit(struct hif_client_s *client, unsigned int qno);
12640 +void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
12641 +void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
12642 +void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int enable);
12643 +static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int qno)
12644 +{
12645 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
12646 +
12647 + return (queue->size - queue->tx_pending);
12648 +}
12649 +
12650 +static inline int hif_lib_get_tx_wrIndex(struct hif_client_s *client, unsigned int qno)
12651 +{
12652 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
12653 +
12654 + return queue->write_idx;
12655 +}
12656 +
12657 +
12658 +static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int qno)
12659 +{
12660 + struct hif_client_tx_queue *queue = &client->tx_q[qno];
12661 +
12662 + return queue->tx_pending;
12663 +}
12664 +
12665 +#define hif_lib_tx_credit_avail(pfe, id, qno) pfe->tmu_credit.tx_credit[id][qno]
12666 +#define hif_lib_tx_credit_max(pfe, id, qno) pfe->tmu_credit.tx_credit_max[id][qno]
12667 +#define hif_lib_tx_credit_use(pfe, id, qno, credit) do {if (tx_qos) {pfe->tmu_credit.tx_credit[id][qno]-= credit; pfe->tmu_credit.tx_packets[id][qno]+=credit;}} while (0)
12668 +
12669 +#endif /* _PFE_HIF_LIB_H_ */
12670 --- /dev/null
12671 +++ b/drivers/staging/fsl_ppfe/pfe_hw.c
12672 @@ -0,0 +1,188 @@
12673 +/*
12674 + *
12675 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
12676 + *
12677 + * This program is free software; you can redistribute it and/or modify
12678 + * it under the terms of the GNU General Public License as published by
12679 + * the Free Software Foundation; either version 2 of the License, or
12680 + * (at your option) any later version.
12681 + *
12682 + * This program is distributed in the hope that it will be useful,
12683 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
12684 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12685 + * GNU General Public License for more details.
12686 + *
12687 + * You should have received a copy of the GNU General Public License
12688 + * along with this program; if not, write to the Free Software
12689 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
12690 + */
12691 +
12692 +#include "pfe_mod.h"
12693 +#include "pfe_hw.h"
12694 +
12695 +/* Functions to handle most of pfe hw register initialization */
12696 +
12697 +int pfe_hw_init(struct pfe *pfe, int resume)
12698 +{
12699 + CLASS_CFG class_cfg = {
12700 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
12701 + .route_table_baseaddr = pfe->ddr_phys_baseaddr + ROUTE_TABLE_BASEADDR,
12702 + .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
12703 + };
12704 +
12705 + TMU_CFG tmu_cfg = {
12706 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
12707 + .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
12708 + .llm_queue_len = TMU_LLM_QUEUE_LEN,
12709 + };
12710 +
12711 +#if !defined(CONFIG_UTIL_DISABLED)
12712 + UTIL_CFG util_cfg = {
12713 + .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
12714 + };
12715 +#endif
12716 +
12717 + BMU_CFG bmu1_cfg = {
12718 + .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR + BMU1_LMEM_BASEADDR),
12719 + .count = BMU1_BUF_COUNT,
12720 + .size = BMU1_BUF_SIZE,
12721 + };
12722 +
12723 + BMU_CFG bmu2_cfg = {
12724 + .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR),
12725 + .count = BMU2_BUF_COUNT,
12726 + .size = BMU2_BUF_SIZE,
12727 + };
12728 +
12729 + GPI_CFG egpi1_cfg = {
12730 + .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
12731 + .tmlf_txthres = EGPI1_TMLF_TXTHRES,
12732 + .aseq_len = EGPI1_ASEQ_LEN,
12733 + };
12734 +
12735 + GPI_CFG egpi2_cfg = {
12736 + .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
12737 + .tmlf_txthres = EGPI2_TMLF_TXTHRES,
12738 + .aseq_len = EGPI2_ASEQ_LEN,
12739 + };
12740 +
12741 +#if defined(CONFIG_PLATFORM_C2000)
12742 + GPI_CFG egpi3_cfg = {
12743 + .lmem_rtry_cnt = EGPI3_LMEM_RTRY_CNT,
12744 + .tmlf_txthres = EGPI3_TMLF_TXTHRES,
12745 + .aseq_len = EGPI3_ASEQ_LEN,
12746 + };
12747 +#endif
12748 +
12749 + GPI_CFG hgpi_cfg = {
12750 + .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
12751 + .tmlf_txthres = HGPI_TMLF_TXTHRES,
12752 + .aseq_len = HGPI_ASEQ_LEN,
12753 + };
12754 +
12755 + printk(KERN_INFO "%s\n", __func__);
12756 +
12757 +#if defined(CONFIG_PLATFORM_LS1012A) && !defined(LS1012A_PFE_RESET_WA)
12758 + /* LS1012A needs this to make PE work correctly */
12759 + writel(0x3, CLASS_PE_SYS_CLK_RATIO);
12760 + writel(0x3, TMU_PE_SYS_CLK_RATIO);
12761 + writel(0x3, UTIL_PE_SYS_CLK_RATIO);
12762 + udelay(10);
12763 +#endif
12764 +
12765 + printk(KERN_INFO "CLASS version: %x\n", readl(CLASS_VERSION));
12766 + printk(KERN_INFO "TMU version: %x\n", readl(TMU_VERSION));
12767 +
12768 + printk(KERN_INFO "BMU1 version: %x\n", readl(BMU1_BASE_ADDR + BMU_VERSION));
12769 + printk(KERN_INFO "BMU2 version: %x\n", readl(BMU2_BASE_ADDR + BMU_VERSION));
12770 +#if defined(CONFIG_PLATFORM_C2000)
12771 + printk(KERN_INFO "EMAC1 network cfg: %x\n", readl(EMAC1_BASE_ADDR + EMAC_NETWORK_CONFIG));
12772 + printk(KERN_INFO "EMAC2 network cfg: %x\n", readl(EMAC2_BASE_ADDR + EMAC_NETWORK_CONFIG));
12773 +#if !defined(CONFIG_PLATFORM_PCI)
12774 + printk(KERN_INFO "EMAC3 network cfg: %x\n", readl(EMAC3_BASE_ADDR + EMAC_NETWORK_CONFIG));
12775 +#endif
12776 +#else
12777 + //TODO print MTIP config
12778 +#endif
12779 +
12780 + printk(KERN_INFO "EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR + GPI_VERSION));
12781 + printk(KERN_INFO "EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR + GPI_VERSION));
12782 +#if !defined(CONFIG_PLATFORM_PCI) && !defined(CONFIG_PLATFORM_LS1012A)
12783 + printk(KERN_INFO "EGPI3 version: %x\n", readl(EGPI3_BASE_ADDR + GPI_VERSION));
12784 +#endif
12785 + printk(KERN_INFO "HGPI version: %x\n", readl(HGPI_BASE_ADDR + GPI_VERSION));
12786 +
12787 +#if !defined(CONFIG_PLATFORM_PCI)
12788 + printk(KERN_INFO "GPT version: %x\n", readl(CBUS_GPT_VERSION));
12789 +#endif
12790 +
12791 + printk(KERN_INFO "HIF version: %x\n", readl(HIF_VERSION));
12792 + printk(KERN_INFO "HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
12793 +
12794 +#if !defined(CONFIG_UTIL_DISABLED)
12795 + printk(KERN_INFO "UTIL version: %x\n", readl(UTIL_VERSION));
12796 +#endif
12797 + while(!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE)) ;
12798 +
12799 + hif_rx_disable();
12800 + hif_tx_disable();
12801 +
12802 + bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
12803 +
12804 + printk(KERN_INFO "bmu_init(1) done\n");
12805 +
12806 + bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
12807 +
12808 + printk(KERN_INFO "bmu_init(2) done\n");
12809 +
12810 + class_cfg.resume = resume ? 1 : 0;
12811 +
12812 + class_init(&class_cfg);
12813 +
12814 + printk(KERN_INFO "class_init() done\n");
12815 +
12816 + tmu_init(&tmu_cfg);
12817 +
12818 + printk(KERN_INFO "tmu_init() done\n");
12819 +#if !defined(CONFIG_UTIL_DISABLED)
12820 + util_init(&util_cfg);
12821 +
12822 + printk(KERN_INFO "util_init() done\n");
12823 +#endif
12824 + gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
12825 +
12826 + printk(KERN_INFO "gpi_init(1) done\n");
12827 +
12828 + gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
12829 +
12830 + printk(KERN_INFO "gpi_init(2) done\n");
12831 +#if !defined(CONFIG_PLATFORM_PCI) && !defined(CONFIG_PLATFORM_LS1012A)
12832 + gpi_init(EGPI3_BASE_ADDR, &egpi3_cfg);
12833 +
12834 + printk(KERN_INFO "gpi_init(3) done\n");
12835 +#endif
12836 + gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
12837 +
12838 + printk(KERN_INFO "gpi_init(hif) done\n");
12839 +
12840 + bmu_enable(BMU1_BASE_ADDR);
12841 +
12842 + printk(KERN_INFO "bmu_enable(1) done\n");
12843 +
12844 + bmu_enable(BMU2_BASE_ADDR);
12845 +
12846 + printk(KERN_INFO "bmu_enable(2) done\n");
12847 +
12848 + return 0;
12849 +}
12850 +
12851 +void pfe_hw_exit(struct pfe *pfe)
12852 +{
12853 + printk(KERN_INFO "%s\n", __func__);
12854 +
12855 + bmu_disable(BMU1_BASE_ADDR);
12856 + bmu_reset(BMU1_BASE_ADDR);
12857 +
12858 + bmu_disable(BMU2_BASE_ADDR);
12859 + bmu_reset(BMU2_BASE_ADDR);
12860 +}
12861 --- /dev/null
12862 +++ b/drivers/staging/fsl_ppfe/pfe_hw.h
12863 @@ -0,0 +1,32 @@
12864 +/*
12865 + *
12866 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
12867 + *
12868 + * This program is free software; you can redistribute it and/or modify
12869 + * it under the terms of the GNU General Public License as published by
12870 + * the Free Software Foundation; either version 2 of the License, or
12871 + * (at your option) any later version.
12872 + *
12873 + * This program is distributed in the hope that it will be useful,
12874 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
12875 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12876 + * GNU General Public License for more details.
12877 + *
12878 + * You should have received a copy of the GNU General Public License
12879 + * along with this program; if not, write to the Free Software
12880 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
12881 + */
12882 +
12883 +#ifndef _PFE_HW_H_
12884 +#define _PFE_HW_H_
12885 +
12886 +#if !defined(CONFIG_PLATFORM_PCI)
12887 +#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
12888 +#else
12889 +#define PE_SYS_CLK_RATIO 0 /* SYS = 40MHz, HFE = 40MHz */
12890 +#endif
12891 +
12892 +int pfe_hw_init(struct pfe *pfe, int resume);
12893 +void pfe_hw_exit(struct pfe *pfe);
12894 +
12895 +#endif /* _PFE_HW_H_ */
12896 --- /dev/null
12897 +++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
12898 @@ -0,0 +1,341 @@
12899 +/*
12900 + *
12901 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
12902 + *
12903 + * This program is free software; you can redistribute it and/or modify
12904 + * it under the terms of the GNU General Public License as published by
12905 + * the Free Software Foundation; either version 2 of the License, or
12906 + * (at your option) any later version.
12907 + *
12908 + * This program is distributed in the hope that it will be useful,
12909 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
12910 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12911 + * GNU General Public License for more details.
12912 + *
12913 + * You should have received a copy of the GNU General Public License
12914 + * along with this program; if not, write to the Free Software
12915 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
12916 + */
12917 +
12918 +#include <linux/module.h>
12919 +#include <linux/device.h>
12920 +#include <linux/of_net.h>
12921 +#include <linux/of_address.h>
12922 +#include <linux/platform_device.h>
12923 +#include <linux/slab.h>
12924 +#include <linux/clk.h>
12925 +#include <linux/mfd/syscon.h>
12926 +#include <linux/regmap.h>
12927 +
12928 +
12929 +#include "pfe_mod.h"
12930 +
12931 +struct comcerto_pfe_platform_data pfe_platform_data;
12932 +
12933 +
12934 +
12935 +static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int if_cnt,
12936 + struct comcerto_pfe_platform_data *pdata)
12937 +{
12938 + struct device_node *gem = NULL, *phy = NULL;
12939 + int size;
12940 + int ii = 0, phy_id = 0;
12941 + const u32 *addr;
12942 + const void *mac_addr;
12943 +
12944 + for (ii = 0; ii < if_cnt; ii++) {
12945 + gem = of_get_next_child(parent, gem);
12946 + if (!gem)
12947 + goto err;
12948 + addr = of_get_property(gem, "reg", &size);
12949 + if (addr && (be32_to_cpup(addr) == port))
12950 + break;
12951 + }
12952 +
12953 + if (ii >= if_cnt) {
12954 + printk(KERN_ERR "%s:%d Failed to find interface = %d\n", __func__, __LINE__, if_cnt);
12955 + goto err;
12956 + }
12957 +
12958 + pdata->comcerto_eth_pdata[port].gem_id = port;
12959 +
12960 + mac_addr = of_get_mac_address(gem);
12961 +
12962 + if (mac_addr) {
12963 + memcpy(pdata->comcerto_eth_pdata[port].mac_addr, mac_addr, ETH_ALEN);
12964 + }
12965 +
12966 + if ((pdata->comcerto_eth_pdata[port].mii_config = of_get_phy_mode(gem)) < 0)
12967 + printk(KERN_ERR "%s:%d Incorrect Phy mode....\n", __func__, __LINE__);
12968 +
12969 +
12970 + addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
12971 + if (!addr)
12972 + printk(KERN_ERR "%s:%d Invalid gemac-bus-id....\n", __func__, __LINE__);
12973 + else
12974 + pdata->comcerto_eth_pdata[port].bus_id = be32_to_cpup(addr);
12975 +
12976 + addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
12977 + if (!addr)
12978 + printk(KERN_ERR "%s:%d Invalid gemac-phy-id....\n", __func__, __LINE__);
12979 + else
12980 + phy_id = pdata->comcerto_eth_pdata[port].phy_id = be32_to_cpup(addr);
12981 +
12982 + addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
12983 + if (!addr)
12984 + printk(KERN_ERR "%s: Invalid mdio-mux-val....\n", __func__);
12985 + else
12986 + phy_id = pdata->comcerto_eth_pdata[port].mdio_muxval= be32_to_cpup(addr);
12987 +
12988 +
12989 + addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
12990 + if (!addr)
12991 + printk(KERN_ERR "%s:%d Invalid pfe-phy-if-flags....\n", __func__, __LINE__);
12992 + else
12993 + pdata->comcerto_eth_pdata[port].phy_flags = be32_to_cpup(addr);
12994 +
12995 + addr = of_get_property(gem, "fsl,pfe-gemac-mode", &size);
12996 + if (!addr)
12997 + printk(KERN_ERR "%s:%d Invalid pfe-gemac-mode....\n", __func__, __LINE__);
12998 + else
12999 + pdata->comcerto_eth_pdata[port].gemac_mode = be32_to_cpup(addr);
13000 +
13001 +
13002 + /* If PHY is enabled, read mdio properties */
13003 + if (pdata->comcerto_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
13004 + goto done;
13005 +
13006 + phy = of_get_next_child(gem, NULL);
13007 +
13008 + addr = of_get_property(phy, "reg", &size);
13009 +
13010 + if (!addr)
13011 + printk(KERN_ERR "%s:%d Invalid phy enable flag....\n", __func__, __LINE__);
13012 + else
13013 + pdata->comcerto_mdio_pdata[port].enabled = be32_to_cpup(addr);
13014 +
13015 + addr = of_get_property (phy, "fsl,mdio-phy-mask", &size);
13016 + if (!addr)
13017 + printk(KERN_ERR "%s:%d Unable to read mdio-phy-mask....\n", __func__, __LINE__);
13018 + else
13019 + pdata->comcerto_mdio_pdata[port].phy_mask= be32_to_cpup(addr);
13020 + pdata->comcerto_mdio_pdata[port].irq[0] = PHY_POLL;
13021 +
13022 +done:
13023 +
13024 + return 0;
13025 +
13026 +err:
13027 + return -1;
13028 +}
13029 +/**
13030 + * pfe_platform_probe -
13031 + *
13032 + *
13033 + */
13034 +static int pfe_platform_probe(struct platform_device *pdev)
13035 +{
13036 + struct resource res;
13037 + int ii, rc, interface_count = 0, size = 0;
13038 + const u32 *prop;
13039 + struct device_node *np;
13040 +
13041 + np = pdev->dev.of_node;
13042 +
13043 + if (!np) {
13044 + printk(KERN_ERR "Invalid device node\n");
13045 + return -EINVAL;
13046 + }
13047 +
13048 + pfe = kzalloc(sizeof(struct pfe), GFP_KERNEL);
13049 + if (!pfe) {
13050 + rc = -ENOMEM;
13051 + goto err_alloc;
13052 + }
13053 +
13054 + platform_set_drvdata(pdev, pfe);
13055 +
13056 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13057 +
13058 + if (of_address_to_resource(np, 2, &res))
13059 + {
13060 + rc = -ENOMEM;
13061 + printk(KERN_ERR "failed to get ddr resource\n");
13062 + goto err_ddr;
13063 + }
13064 +
13065 +
13066 + pfe->ddr_phys_baseaddr = res.start;
13067 + pfe->ddr_size = resource_size(&res);
13068 +
13069 + //pfe->ddr_baseaddr = ioremap(res.start, resource_size(&res));
13070 + pfe->ddr_baseaddr = phys_to_virt(res.start);
13071 + if (!pfe->ddr_baseaddr) {
13072 + printk(KERN_ERR "ioremap() ddr failed\n");
13073 + rc = -ENOMEM;
13074 + goto err_ddr;
13075 + }
13076 +
13077 + /*printk("%s:%d : DDR Res : Phy addr:len = %x:%x Mapped addr : %x\n", __func__, __LINE__,
13078 + pfe->ddr_phys_baseaddr, pfe->ddr_size, pfe->ddr_baseaddr);*/
13079 +
13080 + pfe->scfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,"fsl,pfe-scfg");
13081 + if (IS_ERR(pfe->scfg)) {
13082 + dev_err(&pdev->dev, "No syscfg phandle specified\n");
13083 + return PTR_ERR(pfe->scfg);
13084 + }
13085 + /*printk("%s scfg %p\n",__func__,pfe->scfg);*/
13086 +
13087 +
13088 +#if 1
13089 + if (!(pfe->cbus_baseaddr = of_iomap(np, 1)))
13090 + {
13091 + rc = -ENOMEM;
13092 + printk(KERN_ERR "failed to get axi resource\n");
13093 + goto err_axi;
13094 + }
13095 +
13096 + /*printk("%s:%d : AXI Mapped addr : %lx\n", __func__, __LINE__, pfe->cbus_baseaddr);
13097 + printk("%s:%d : AXI Mapped addr : phys %lx\n", __func__, __LINE__, virt_to_phys(pfe->cbus_baseaddr));*/
13098 +#else
13099 +
13100 + if (of_address_to_resource(np, 1, &res))
13101 + {
13102 + rc = -ENOMEM;
13103 + printk(KERN_ERR "failed to get AXI resource\n");
13104 + goto err_iram;
13105 + }
13106 + pfe->cbus_baseaddr = ioremap(res.start, resource_size(&res));
13107 + if (!pfe->cbus_baseaddr) {
13108 + printk(KERN_INFO "ioremap() AXI failed %lx %x\n", res.start, resource_size(&res));
13109 + rc = -ENOMEM;
13110 + goto err_iram;
13111 + }
13112 + printk("%s:%d : AXI Mapped addr : %x PHY addr = %x\n", __func__, __LINE__, pfe->cbus_baseaddr, res.start);
13113 +#endif
13114 +
13115 + pfe->hif_irq = platform_get_irq(pdev, 0);
13116 + if (pfe->hif_irq < 0) {
13117 + printk(KERN_ERR "platform_get_irq_byname(hif) failed\n");
13118 + rc = pfe->hif_irq;
13119 + goto err_hif_irq;
13120 + }
13121 + /*printk("hif_irq: %d \n", pfe->hif_irq);*/
13122 +
13123 + /* Read interface count */
13124 + prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
13125 + if (!prop) {
13126 + printk(KERN_ERR "Failed to read number of interfaces\n");
13127 + rc = -ENXIO;
13128 + goto err_prop;
13129 + }
13130 +
13131 + interface_count = be32_to_cpup(prop);
13132 + /*printk(KERN_INFO "%s:%d Number of interfaces : %d\n", __func__, __LINE__, interface_count);*/
13133 + if (interface_count <= 0) {
13134 + printk(KERN_ERR "No ethernet interface count : %d\n", interface_count);
13135 + rc = -ENXIO;
13136 + goto err_prop;
13137 + }
13138 +
13139 + for (ii = 0; ii < interface_count; ii++) {
13140 + pfe_get_gemac_if_proprties(np, ii, interface_count, &pfe_platform_data);
13141 + }
13142 +
13143 +
13144 + pfe->dev = &pdev->dev;
13145 +
13146 + pfe->dev->platform_data = &pfe_platform_data;
13147 +
13148 + //FIXME get the correct clock from dts
13149 + pfe->ctrl.sys_clk = 250000; // save sys_clk value as KHz
13150 +
13151 + rc = pfe_probe(pfe);
13152 + if (rc < 0)
13153 + goto err_probe;
13154 +
13155 + return 0;
13156 +
13157 +err_probe:
13158 +err_prop:
13159 + /*TODO complet the code */
13160 +err_hif_irq:
13161 + iounmap(pfe->cbus_baseaddr);
13162 +
13163 +err_axi:
13164 + iounmap(pfe->ddr_baseaddr);
13165 +
13166 +err_ddr:
13167 + platform_set_drvdata(pdev, NULL);
13168 +
13169 + kfree(pfe);
13170 +
13171 +err_alloc:
13172 + return rc;
13173 +}
13174 +
13175 +
13176 +/**
13177 + * pfe_platform_remove -
13178 + *
13179 + *
13180 + */
13181 +static int pfe_platform_remove(struct platform_device *pdev)
13182 +{
13183 + struct pfe *pfe = platform_get_drvdata(pdev);
13184 + int rc;
13185 +
13186 + printk(KERN_INFO "%s\n", __func__);
13187 +
13188 + rc = pfe_remove(pfe);
13189 +
13190 + iounmap(pfe->cbus_baseaddr);
13191 + iounmap(pfe->ddr_baseaddr);
13192 +
13193 + platform_set_drvdata(pdev, NULL);
13194 +
13195 + kfree(pfe);
13196 +
13197 + return rc;
13198 +}
13199 +
13200 +static struct of_device_id pfe_match[] = {
13201 + {
13202 + .compatible = "fsl,pfe",
13203 + },
13204 + {},
13205 +};
13206 +MODULE_DEVICE_TABLE(of, pfe_match);
13207 +
13208 +static struct platform_driver pfe_platform_driver = {
13209 + .probe = pfe_platform_probe,
13210 + .remove = pfe_platform_remove,
13211 + .driver = {
13212 + .name = "pfe",
13213 + .of_match_table = pfe_match,
13214 + },
13215 +};
13216 +
13217 +#if 0
13218 +static int __init pfe_module_init(void)
13219 +{
13220 + printk(KERN_INFO "%s\n", __func__);
13221 +
13222 + return platform_driver_register(&pfe_platform_driver);
13223 +}
13224 +
13225 +
13226 +static void __exit pfe_module_exit(void)
13227 +{
13228 + platform_driver_unregister(&pfe_platform_driver);
13229 +
13230 + printk(KERN_INFO "%s\n", __func__);
13231 +}
13232 +module_init(pfe_module_init);
13233 +module_exit(pfe_module_exit);
13234 +#endif
13235 +
13236 +module_platform_driver(pfe_platform_driver);
13237 +MODULE_LICENSE("GPL");
13238 +MODULE_DESCRIPTION("PFE Ethernet driver");
13239 +MODULE_AUTHOR("NXP DNCPE");
13240 --- /dev/null
13241 +++ b/drivers/staging/fsl_ppfe/pfe_mod.c
13242 @@ -0,0 +1,140 @@
13243 +/*
13244 + *
13245 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
13246 + *
13247 + * This program is free software; you can redistribute it and/or modify
13248 + * it under the terms of the GNU General Public License as published by
13249 + * the Free Software Foundation; either version 2 of the License, or
13250 + * (at your option) any later version.
13251 + *
13252 + * This program is distributed in the hope that it will be useful,
13253 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
13254 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13255 + * GNU General Public License for more details.
13256 + *
13257 + * You should have received a copy of the GNU General Public License
13258 + * along with this program; if not, write to the Free Software
13259 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13260 + */
13261 +
13262 +#include <linux/dma-mapping.h>
13263 +#include "pfe_mod.h"
13264 +
13265 +struct pfe *pfe;
13266 +
13267 +/**
13268 + * pfe_probe -
13269 + *
13270 + *
13271 + */
13272 +int pfe_probe(struct pfe *pfe)
13273 +{
13274 + int rc;
13275 +
13276 +
13277 + if (DDR_MAX_SIZE > pfe->ddr_size) {
13278 + printk(KERN_ERR "%s: required DDR memory (%x) above platform ddr memory (%x)\n", __func__, DDR_MAX_SIZE, pfe->ddr_size);
13279 + rc = -ENOMEM;
13280 + goto err_hw;
13281 + }
13282 +
13283 + if (((int) (pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) & (8*SZ_1M - 1)) != 0) {
13284 + printk(KERN_ERR "%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n", __func__, (int) pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR);
13285 + rc = -ENOMEM;
13286 + goto err_hw;
13287 + }
13288 +
13289 +
13290 + printk(KERN_INFO "cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
13291 + (unsigned long)pfe->cbus_baseaddr, (unsigned long)pfe->ddr_baseaddr,
13292 + pfe->ddr_phys_baseaddr, pfe->ddr_size);
13293 +
13294 + pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr, pfe->ddr_phys_baseaddr, pfe->ddr_size);
13295 +
13296 + rc = pfe_hw_init(pfe, 0);
13297 + if (rc < 0)
13298 + goto err_hw;
13299 +
13300 + rc = pfe_hif_lib_init(pfe);
13301 + if (rc < 0)
13302 + goto err_hif_lib;
13303 +
13304 + rc = pfe_hif_init(pfe);
13305 + if (rc < 0)
13306 + goto err_hif;
13307 +
13308 + rc = pfe_firmware_init(pfe);
13309 + if (rc < 0)
13310 + goto err_firmware;
13311 +
13312 + rc = pfe_ctrl_init(pfe);
13313 + if (rc < 0)
13314 + goto err_ctrl;
13315 +
13316 + rc = pfe_eth_init(pfe);
13317 + if (rc < 0)
13318 + goto err_eth;
13319 +
13320 + rc = pfe_sysfs_init(pfe);
13321 + if(rc < 0)
13322 + goto err_sysfs;
13323 +
13324 + rc = pfe_debugfs_init(pfe);
13325 + if (rc < 0)
13326 + goto err_debugfs;
13327 +
13328 + return 0;
13329 +
13330 +err_debugfs:
13331 + pfe_sysfs_exit(pfe);
13332 +
13333 +err_sysfs:
13334 + pfe_eth_exit(pfe);
13335 +
13336 +err_eth:
13337 + pfe_ctrl_exit(pfe);
13338 +
13339 +err_ctrl:
13340 + pfe_firmware_exit(pfe);
13341 +
13342 +err_firmware:
13343 + pfe_hif_exit(pfe);
13344 +
13345 +err_hif:
13346 + pfe_hif_lib_exit(pfe);
13347 +
13348 +err_hif_lib:
13349 + pfe_hw_exit(pfe);
13350 +
13351 +err_hw:
13352 + return rc;
13353 +}
13354 +
13355 +
13356 +/**
13357 + * pfe_remove -
13358 + *
13359 + *
13360 + */
13361 +int pfe_remove(struct pfe *pfe)
13362 +{
13363 + printk(KERN_INFO "%s\n", __func__);
13364 +
13365 + pfe_debugfs_exit(pfe);
13366 +
13367 + pfe_sysfs_exit(pfe);
13368 +
13369 + pfe_eth_exit(pfe);
13370 +
13371 + pfe_ctrl_exit(pfe);
13372 +
13373 + pfe_firmware_exit(pfe);
13374 +
13375 + pfe_hif_exit(pfe);
13376 +
13377 + pfe_hif_lib_exit(pfe);
13378 +
13379 + pfe_hw_exit(pfe);
13380 +
13381 + return 0;
13382 +}
13383 --- /dev/null
13384 +++ b/drivers/staging/fsl_ppfe/pfe_mod.h
13385 @@ -0,0 +1,163 @@
13386 +/*
13387 + *
13388 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
13389 + *
13390 + * This program is free software; you can redistribute it and/or modify
13391 + * it under the terms of the GNU General Public License as published by
13392 + * the Free Software Foundation; either version 2 of the License, or
13393 + * (at your option) any later version.
13394 + *
13395 + * This program is distributed in the hope that it will be useful,
13396 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
13397 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13398 + * GNU General Public License for more details.
13399 + *
13400 + * You should have received a copy of the GNU General Public License
13401 + * along with this program; if not, write to the Free Software
13402 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13403 + */
13404 +
13405 +#ifndef _PFE_MOD_H_
13406 +#define _PFE_MOD_H_
13407 +
13408 +#include <linux/device.h>
13409 +#include <linux/elf.h>
13410 +
13411 +struct pfe;
13412 +
13413 +#include "config.h"
13414 +#include "pfe_hw.h"
13415 +#include "pfe_firmware.h"
13416 +#include "pfe_ctrl.h"
13417 +#include "pfe_hif.h"
13418 +#include "pfe_hif_lib.h"
13419 +#include "pfe_eth.h"
13420 +#include "pfe_sysfs.h"
13421 +#include "pfe_perfmon.h"
13422 +#include "pfe_debugfs.h"
13423 +
13424 +struct pfe_tmu_credit {
13425 + /* Number of allowed TX packet in-flight, matches TMU queue size */
13426 + unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
13427 + unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
13428 + unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
13429 +};
13430 +
13431 +struct pfe {
13432 + struct regmap *scfg;
13433 + unsigned long ddr_phys_baseaddr;
13434 + void *ddr_baseaddr;
13435 + unsigned int ddr_size;
13436 + void *cbus_baseaddr;
13437 + void *apb_baseaddr;
13438 + unsigned long iram_phys_baseaddr;
13439 + void *iram_baseaddr;
13440 + unsigned long ipsec_phys_baseaddr;
13441 + void *ipsec_baseaddr;
13442 + int hif_irq;
13443 + int hif_client_irq;
13444 + struct device *dev;
13445 + struct dentry *dentry;
13446 + struct pfe_ctrl ctrl;
13447 + struct pfe_hif hif;
13448 + struct pfe_eth eth;
13449 + struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
13450 +#if defined(CFG_DIAGS)
13451 + struct pfe_diags diags;
13452 +#endif
13453 + struct pfe_tmu_credit tmu_credit;
13454 + struct pfe_cpumon cpumon;
13455 + struct pfe_memmon memmon;
13456 + int wake;
13457 + struct clk * hfe_clock;
13458 +};
13459 +
13460 +extern struct pfe *pfe;
13461 +
13462 +int pfe_probe(struct pfe *pfe);
13463 +int pfe_remove(struct pfe *pfe);
13464 +
13465 +#ifndef SZ_1K
13466 +#define SZ_1K 1024
13467 +#endif
13468 +
13469 +#ifndef SZ_1M
13470 +#define SZ_1M (1024 * 1024)
13471 +#endif
13472 +
13473 +/* DDR Mapping */
13474 +#if !defined(CONFIG_PLATFORM_PCI)
13475 +#define ROUTE_TABLE_BASEADDR 0
13476 +#define ROUTE_TABLE_HASH_BITS 15 /**< 32K entries */
13477 +#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) * CLASS_ROUTE_SIZE)
13478 +#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
13479 +#define BMU2_BUF_COUNT (4096 - 256) /**< This is to get a total DDR size of 12MiB */
13480 +#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
13481 +#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
13482 +#define UTIL_CODE_SIZE (128 * SZ_1K)
13483 +#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
13484 +#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
13485 +#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
13486 +#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
13487 +#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
13488 +#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
13489 +#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
13490 +#define TMU_LLM_QUEUE_LEN (8 * 512) /**< Must be power of two and at least 16 * 8 = 128 bytes */
13491 +#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN) /**< (4 TMU's x 16 queues x queue_len) */
13492 +
13493 +#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
13494 +
13495 +#else
13496 +
13497 +#define UTIL_CODE_BASEADDR 0
13498 +#if defined(CONFIG_UTIL_DISABLED)
13499 +#define UTIL_CODE_SIZE (0 * SZ_1K)
13500 +#else
13501 +#define UTIL_CODE_SIZE (8 * SZ_1K)
13502 +#endif
13503 +#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
13504 +#define UTIL_DDR_DATA_SIZE (0 * SZ_1K)
13505 +#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
13506 +#define CLASS_DDR_DATA_SIZE (0 * SZ_1K)
13507 +#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
13508 +#define TMU_DDR_DATA_SIZE (0 * SZ_1K)
13509 +#define ROUTE_TABLE_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
13510 +#define ROUTE_TABLE_HASH_BITS 5 /**< 32 entries */
13511 +#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) * CLASS_ROUTE_SIZE)
13512 +#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
13513 +#define BMU2_BUF_COUNT 16
13514 +#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
13515 +#define TMU_LLM_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
13516 +#define TMU_LLM_QUEUE_LEN (16 * 8) /**< Must be power of two and at least 16 * 8 = 128 bytes */
13517 +#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN) /**< (4 TMU's x 16 queues x queue_len) */
13518 +#define HIF_DESC_BASEADDR (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
13519 +#define HIF_RX_DESC_SIZE (16*HIF_RX_DESC_NT)
13520 +#define HIF_TX_DESC_SIZE (16*HIF_TX_DESC_NT)
13521 +#define HIF_DESC_SIZE (HIF_RX_DESC_SIZE + HIF_TX_DESC_SIZE)
13522 +#define HIF_RX_PKT_DDR_BASEADDR (HIF_DESC_BASEADDR + HIF_DESC_SIZE)
13523 +#define HIF_RX_PKT_DDR_SIZE (HIF_RX_DESC_NT * DDR_BUF_SIZE)
13524 +#define HIF_TX_PKT_DDR_BASEADDR (HIF_RX_PKT_DDR_BASEADDR + HIF_RX_PKT_DDR_SIZE)
13525 +#define HIF_TX_PKT_DDR_SIZE (HIF_TX_DESC_NT * DDR_BUF_SIZE)
13526 +#define ROUTE_BASEADDR (HIF_TX_PKT_DDR_BASEADDR + HIF_TX_PKT_DDR_SIZE)
13527 +#define ROUTE_SIZE (2 * CLASS_ROUTE_SIZE)
13528 +
13529 +#define DDR_MAX_SIZE (ROUTE_BASEADDR + ROUTE_SIZE)
13530 +
13531 +#define PFE_HOST_TO_PCI(addr) (((u32)addr)- ((u32)DDR_BASE_ADDR))
13532 +#define PFE_PCI_TO_HOST(addr) (((u32)addr)+ ((u32)DDR_BASE_ADDR))
13533 +#endif
13534 +
13535 +/* IRAM Mapping */
13536 +#define IPSEC_IRAM_BASEADDR 0
13537 +#define IPSEC_IRAM_SIZE 0x2000
13538 +
13539 +/* LMEM Mapping */
13540 +#define BMU1_LMEM_BASEADDR 0
13541 +#define BMU1_BUF_COUNT 256
13542 +#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
13543 +#define IPSEC_LMEM_BASEADDR (BMU1_LMEM_BASEADDR + BMU1_LMEM_SIZE)
13544 +#define IPSEC_LMEM_SIZE (30 * 1024)
13545 +
13546 +
13547 +
13548 +#endif /* _PFE_MOD_H */
13549 --- /dev/null
13550 +++ b/drivers/staging/fsl_ppfe/pfe_perfmon.c
13551 @@ -0,0 +1,175 @@
13552 +/*
13553 + *
13554 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
13555 + *
13556 + * This program is free software; you can redistribute it and/or modify
13557 + * it under the terms of the GNU General Public License as published by
13558 + * the Free Software Foundation; either version 2 of the License, or
13559 + * (at your option) any later version.
13560 + *
13561 + * This program is distributed in the hope that it will be useful,
13562 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
13563 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13564 + * GNU General Public License for more details.
13565 + *
13566 + * You should have received a copy of the GNU General Public License
13567 + * along with this program; if not, write to the Free Software
13568 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13569 + */
13570 +
13571 +/* PFE performance monitoring functions */
13572 +
13573 +#include "pfe_ctrl_hal.h"
13574 +#include "pfe_perfmon.h"
13575 +
13576 +static TIMER_ENTRY cpumon_timer;
13577 +
13578 +u32 CLASS_DMEM_SH2(cpu_ticks[2]);
13579 +u32 TMU_DMEM_SH2(cpu_ticks[2]);
13580 +#if !defined(CONFIG_UTIL_DISABLED)
13581 +u32 UTIL_DMEM_SH2(cpu_ticks[2]);
13582 +#endif
13583 +
13584 +#define compute_active_pct(total_ticks, active_ticks) ((active_ticks * 100 + (total_ticks >> 1)) / total_ticks)
13585 +
13586 +static void cpumon_timer_handler(void)
13587 +{
13588 + int id;
13589 + u32 dmem_addr;
13590 + u32 ticks[2];
13591 + u32 total, active;
13592 + struct pfe_ctrl *ctrl = &pfe->ctrl;
13593 + struct pfe_cpumon *cpumon = &pfe->cpumon;
13594 +
13595 + // Process class PE's
13596 + total = active = 0;
13597 + dmem_addr = virt_to_class_dmem(&class_cpu_ticks[0]);
13598 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
13599 + {
13600 + cpumon->cpu_usage_pct[id] = 0;
13601 + if (pe_sync_stop(ctrl, (1 << id)) < 0)
13602 + continue;
13603 + ticks[0] = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
13604 + ticks[1] = be32_to_cpu(pe_dmem_read(id, dmem_addr + 4, 4));
13605 + pe_dmem_write(id, 0, dmem_addr, 4);
13606 + pe_dmem_write(id, 0, dmem_addr + 4, 4);
13607 + pe_start(ctrl, (1 << id));
13608 + ticks[0] >>= 8; // divide both values by 256, so multiply by 100 won't overflow
13609 + ticks[1] >>= 8;
13610 + total += ticks[0];
13611 + active += ticks[1];
13612 + if (ticks[0] != 0)
13613 + cpumon->cpu_usage_pct[id] = compute_active_pct(ticks[0], ticks[1]);
13614 + }
13615 + if (total != 0)
13616 + cpumon->class_usage_pct = compute_active_pct(total, active);
13617 + else
13618 + cpumon->class_usage_pct = 0;
13619 +
13620 + // Process TMU PE's
13621 + total = active = 0;
13622 + dmem_addr = virt_to_tmu_dmem(&tmu_cpu_ticks[0]);
13623 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++)
13624 + {
13625 +#if defined(CONFIG_PLATFORM_LS1012A)
13626 + if(id == TMU2_ID) continue;
13627 +#endif
13628 + cpumon->cpu_usage_pct[id] = 0;
13629 + if (pe_sync_stop(ctrl, (1 << id)) < 0)
13630 + continue;
13631 + ticks[0] = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
13632 + ticks[1] = be32_to_cpu(pe_dmem_read(id, dmem_addr + 4, 4));
13633 + pe_dmem_write(id, 0, dmem_addr, 4);
13634 + pe_dmem_write(id, 0, dmem_addr + 4, 4);
13635 + pe_start(ctrl, (1 << id));
13636 + ticks[0] >>= 8; // divide both values by 256, so multiply by 100 won't overflow
13637 + ticks[1] >>= 8;
13638 + if (ticks[0] != 0)
13639 + cpumon->cpu_usage_pct[id] = compute_active_pct(ticks[0], ticks[1]);
13640 + }
13641 +#if !defined(CONFIG_UTIL_DISABLED)
13642 + // Process Util PE
13643 + dmem_addr = virt_to_util_dmem(&util_cpu_ticks[0]);
13644 + cpumon->cpu_usage_pct[UTIL_ID] = 0;
13645 + if (pe_sync_stop(ctrl, (1 << UTIL_ID)) < 0)
13646 + return;
13647 + ticks[0] = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
13648 + ticks[1] = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr + 4, 4));
13649 + pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
13650 + pe_dmem_write(UTIL_ID, 0, dmem_addr + 4, 4);
13651 + pe_start(ctrl, (1 << UTIL_ID));
13652 + ticks[0] >>= 8; // divide both values by 256, so multiply by 100 won't overflow
13653 + ticks[1] >>= 8;
13654 + if (ticks[0] != 0)
13655 + cpumon->cpu_usage_pct[UTIL_ID] = compute_active_pct(ticks[0], ticks[1]);
13656 +#endif
13657 +}
13658 +
13659 +static int pfe_cpumon_init(struct pfe *pfe)
13660 +{
13661 + timer_init(&cpumon_timer, cpumon_timer_handler);
13662 + timer_add(&cpumon_timer, CT_CPUMON_INTERVAL);
13663 + return 0;
13664 +}
13665 +
13666 +static void pfe_cpumon_exit(struct pfe *pfe)
13667 +{
13668 + timer_del(&cpumon_timer);
13669 +}
13670 +
13671 +
13672 +/*********************************************************************************/
13673 +
13674 +// Memory monitor functions
13675 +
13676 +void * pfe_kmalloc(size_t size, int flags)
13677 +{
13678 + struct pfe_memmon *memmon = &pfe->memmon;
13679 + void *ptr;
13680 + ptr = kmalloc(size, flags);
13681 + if (ptr)
13682 + memmon->kernel_memory_allocated += ksize(ptr);
13683 + return ptr;
13684 +}
13685 +
13686 +void * pfe_kzalloc(size_t size, int flags)
13687 +{
13688 + struct pfe_memmon *memmon = &pfe->memmon;
13689 + void *ptr;
13690 + ptr = kzalloc(size, flags);
13691 + if (ptr)
13692 + memmon->kernel_memory_allocated += ksize(ptr);
13693 + return ptr;
13694 +}
13695 +
13696 +void pfe_kfree(void *ptr)
13697 +{
13698 + struct pfe_memmon *memmon = &pfe->memmon;
13699 + memmon->kernel_memory_allocated -= ksize(ptr);
13700 + kfree(ptr);
13701 +}
13702 +
13703 +static int pfe_memmon_init(struct pfe *pfe)
13704 +{
13705 + return 0;
13706 +}
13707 +
13708 +static void pfe_memmon_exit(struct pfe *pfe)
13709 +{
13710 +}
13711 +
13712 +/*********************************************************************************/
13713 +
13714 +
13715 +int pfe_perfmon_init(struct pfe *pfe)
13716 +{
13717 + pfe_cpumon_init(pfe);
13718 + pfe_memmon_init(pfe);
13719 + return 0;
13720 +}
13721 +
13722 +void pfe_perfmon_exit(struct pfe *pfe)
13723 +{
13724 + pfe_cpumon_exit(pfe);
13725 + pfe_memmon_exit(pfe);
13726 +}
13727 --- /dev/null
13728 +++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
13729 @@ -0,0 +1,41 @@
13730 +/*
13731 + *
13732 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
13733 + *
13734 + * This program is free software; you can redistribute it and/or modify
13735 + * it under the terms of the GNU General Public License as published by
13736 + * the Free Software Foundation; either version 2 of the License, or
13737 + * (at your option) any later version.
13738 + *
13739 + * This program is distributed in the hope that it will be useful,
13740 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
13741 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13742 + * GNU General Public License for more details.
13743 + *
13744 + * You should have received a copy of the GNU General Public License
13745 + * along with this program; if not, write to the Free Software
13746 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13747 + */
13748 +
13749 +#ifndef _PFE_PERFMON_H_
13750 +#define _PFE_PERFMON_H_
13751 +
13752 +#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
13753 +
13754 +struct pfe_cpumon {
13755 + u32 cpu_usage_pct[MAX_PE];
13756 + u32 class_usage_pct;
13757 +};
13758 +
13759 +struct pfe_memmon {
13760 + u32 kernel_memory_allocated;
13761 +};
13762 +
13763 +void * pfe_kmalloc(size_t size, int flags);
13764 +void * pfe_kzalloc(size_t size, int flags);
13765 +void pfe_kfree(void *ptr);
13766 +
13767 +int pfe_perfmon_init(struct pfe *pfe);
13768 +void pfe_perfmon_exit(struct pfe *pfe);
13769 +
13770 +#endif /* _PFE_PERFMON_H_ */
13771 --- /dev/null
13772 +++ b/drivers/staging/fsl_ppfe/pfe_platform.c
13773 @@ -0,0 +1,358 @@
13774 +/*
13775 + *
13776 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
13777 + *
13778 + * This program is free software; you can redistribute it and/or modify
13779 + * it under the terms of the GNU General Public License as published by
13780 + * the Free Software Foundation; either version 2 of the License, or
13781 + * (at your option) any later version.
13782 + *
13783 + * This program is distributed in the hope that it will be useful,
13784 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
13785 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13786 + * GNU General Public License for more details.
13787 + *
13788 + * You should have received a copy of the GNU General Public License
13789 + * along with this program; if not, write to the Free Software
13790 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13791 + */
13792 +
13793 +#include <linux/module.h>
13794 +#include <linux/platform_device.h>
13795 +#include <linux/slab.h>
13796 +#include <linux/clk.h>
13797 +
13798 +#include "pfe_mod.h"
13799 +
13800 +/**
13801 + * pfe_platform_probe -
13802 + *
13803 + *
13804 + */
13805 +static int pfe_platform_probe(struct platform_device *pdev)
13806 +{
13807 + struct resource *r;
13808 + int rc;
13809 + struct clk *clk_axi;
13810 +
13811 + printk(KERN_INFO "%s\n", __func__);
13812 +
13813 + pfe = kzalloc(sizeof(struct pfe), GFP_KERNEL);
13814 + if (!pfe) {
13815 + rc = -ENOMEM;
13816 + goto err_alloc;
13817 + }
13818 +
13819 + platform_set_drvdata(pdev, pfe);
13820 +
13821 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ddr");
13822 + if (!r) {
13823 + printk(KERN_INFO "platform_get_resource_byname(ddr) failed\n");
13824 + rc = -ENXIO;
13825 + goto err_ddr;
13826 + }
13827 +
13828 + pfe->ddr_phys_baseaddr = r->start;
13829 + pfe->ddr_size = resource_size(r);
13830 +
13831 + pfe->ddr_baseaddr = ioremap(r->start, resource_size(r));
13832 + if (!pfe->ddr_baseaddr) {
13833 + printk(KERN_INFO "ioremap() ddr failed\n");
13834 + rc = -ENOMEM;
13835 + goto err_ddr;
13836 + }
13837 +
13838 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "axi");
13839 + if (!r) {
13840 + printk(KERN_INFO "platform_get_resource_byname(axi) failed\n");
13841 + rc = -ENXIO;
13842 + goto err_axi;
13843 + }
13844 +
13845 + pfe->cbus_baseaddr = ioremap(r->start, resource_size(r));
13846 + if (!pfe->cbus_baseaddr) {
13847 + printk(KERN_INFO "ioremap() axi failed\n");
13848 + rc = -ENOMEM;
13849 + goto err_axi;
13850 + }
13851 +
13852 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
13853 + if (!r) {
13854 + printk(KERN_INFO "platform_get_resource_byname(apb) failed\n");
13855 + rc = -ENXIO;
13856 + goto err_apb;
13857 + }
13858 +
13859 + pfe->apb_baseaddr = ioremap(r->start, resource_size(r));
13860 + if (!pfe->apb_baseaddr) {
13861 + printk(KERN_INFO "ioremap() apb failed\n");
13862 + rc = -ENOMEM;
13863 + goto err_apb;
13864 + }
13865 +
13866 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iram");
13867 + if (!r) {
13868 + printk(KERN_INFO "platform_get_resource_byname(iram) failed\n");
13869 + rc = -ENXIO;
13870 + goto err_iram;
13871 + }
13872 +
13873 + pfe->iram_phys_baseaddr = r->start;
13874 + pfe->iram_baseaddr = ioremap(r->start, resource_size(r));
13875 + if (!pfe->iram_baseaddr) {
13876 + printk(KERN_INFO "ioremap() iram failed\n");
13877 + rc = -ENOMEM;
13878 + goto err_iram;
13879 + }
13880 +
13881 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipsec");
13882 + if (!r) {
13883 + printk(KERN_INFO "platform_get_resource_byname(ipsec) failed\n");
13884 + rc = -ENXIO;
13885 + goto err_ipsec;
13886 + }
13887 +
13888 + pfe->ipsec_phys_baseaddr = r->start;
13889 + /* Just map only initial 1MB , as its enough to access espah engine
13890 + */
13891 + //pfe->ipsec_baseaddr = ioremap(r->start, resource_size(r));
13892 + pfe->ipsec_baseaddr = ioremap(r->start, 1*1024*1024);
13893 + if (!pfe->ipsec_baseaddr) {
13894 + printk(KERN_INFO "ioremap() ipsec failed\n");
13895 + rc = -ENOMEM;
13896 + goto err_ipsec;
13897 + }
13898 +
13899 + printk(KERN_INFO "ipsec: baseaddr :%x --- %x\n", (u32)pfe->ipsec_phys_baseaddr, (u32)pfe->ipsec_baseaddr);
13900 +
13901 + pfe->hif_irq = platform_get_irq_byname(pdev, "hif");
13902 + if (pfe->hif_irq < 0) {
13903 + printk(KERN_INFO "platform_get_irq_byname(hif) failed\n");
13904 + rc = pfe->hif_irq;
13905 + goto err_hif_irq;
13906 + }
13907 +
13908 +#if 0
13909 + pfe->hif_client_irq = platform_get_irq_byname(pdev, "hif_client");
13910 + if (pfe->hif_client_irq < 0) {
13911 + printk(KERN_INFO "platform_get_irq_byname(hif_client) failed\n");
13912 + rc = pfe->hif_client_irq;
13913 + goto err_hif_irq;
13914 + }
13915 +#endif
13916 +
13917 + pfe->dev = &pdev->dev;
13918 +
13919 +
13920 + /* Get the system clock */
13921 + clk_axi = clk_get(NULL,"axi");
13922 + if (IS_ERR(clk_axi)) {
13923 + printk(KERN_INFO "clk_get call failed\n");
13924 + rc = -ENXIO;
13925 + goto err_clk;
13926 + }
13927 +
13928 + /* HFE core clock */
13929 + pfe->hfe_clock = clk_get(NULL, "hfe_core");
13930 + if (IS_ERR(pfe->hfe_clock)) {
13931 + printk(KERN_INFO "clk_get call failed\n");
13932 + rc = -ENXIO;
13933 + goto err_hfe_clock;
13934 + }
13935 +
13936 + clk_disable(pfe->hfe_clock);
13937 + c2000_block_reset(COMPONENT_PFE_SYS, 1);
13938 + mdelay(1);
13939 + c2000_block_reset(COMPONENT_PFE_SYS, 0);
13940 + clk_enable(pfe->hfe_clock);
13941 +
13942 + pfe->ctrl.clk_axi = clk_axi;
13943 + pfe->ctrl.sys_clk = clk_get_rate(clk_axi) / 1000; // save sys_clk value as KHz
13944 +
13945 + rc = pfe_probe(pfe);
13946 + if (rc < 0)
13947 + goto err_probe;
13948 +
13949 + return 0;
13950 +
13951 +err_probe:
13952 + clk_put(pfe->hfe_clock);
13953 +err_hfe_clock:
13954 + clk_put(clk_axi);
13955 +err_clk:
13956 +err_hif_irq:
13957 + iounmap(pfe->ipsec_baseaddr);
13958 +err_ipsec:
13959 + iounmap(pfe->iram_baseaddr);
13960 +err_iram:
13961 + iounmap(pfe->apb_baseaddr);
13962 +
13963 +err_apb:
13964 + iounmap(pfe->cbus_baseaddr);
13965 +
13966 +err_axi:
13967 + iounmap(pfe->ddr_baseaddr);
13968 +
13969 +err_ddr:
13970 + platform_set_drvdata(pdev, NULL);
13971 +
13972 + kfree(pfe);
13973 +
13974 +err_alloc:
13975 + return rc;
13976 +}
13977 +
13978 +
13979 +/**
13980 + * pfe_platform_remove -
13981 + *
13982 + *
13983 + */
13984 +static int pfe_platform_remove(struct platform_device *pdev)
13985 +{
13986 + struct pfe *pfe = platform_get_drvdata(pdev);
13987 + int rc;
13988 +
13989 + printk(KERN_INFO "%s\n", __func__);
13990 +
13991 + rc = pfe_remove(pfe);
13992 +
13993 + c2000_block_reset(COMPONENT_PFE_SYS, 1);
13994 + clk_disable(pfe->hfe_clock);
13995 + clk_put(pfe->hfe_clock);
13996 + clk_put(pfe->ctrl.clk_axi);
13997 + iounmap(pfe->ipsec_baseaddr);
13998 + iounmap(pfe->iram_baseaddr);
13999 + iounmap(pfe->apb_baseaddr);
14000 + iounmap(pfe->cbus_baseaddr);
14001 + iounmap(pfe->ddr_baseaddr);
14002 +
14003 + platform_set_drvdata(pdev, NULL);
14004 +
14005 + kfree(pfe);
14006 +
14007 + return rc;
14008 +}
14009 +
14010 +#ifdef CONFIG_PM
14011 +
14012 +#ifdef CONFIG_PM_SLEEP
14013 +static int pfe_platform_suspend(struct device *dev)
14014 +{
14015 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
14016 + struct net_device *netdev;
14017 + int i;
14018 +
14019 + printk(KERN_INFO "%s\n", __func__);
14020 +
14021 + pfe->wake = 0;
14022 +
14023 + for (i = 0; i < (NUM_GEMAC_SUPPORT - 1); i++ ) {
14024 + netdev = pfe->eth.eth_priv[i]->dev;
14025 +
14026 + netif_device_detach(netdev);
14027 +
14028 + if (netif_running(netdev))
14029 + if(pfe_eth_suspend(netdev))
14030 + pfe->wake =1;
14031 + }
14032 +
14033 + /* Shutdown PFE only if we're not waking up the system */
14034 + if (!pfe->wake) {
14035 + pfe_ctrl_suspend(&pfe->ctrl);
14036 + pfe_hif_exit(pfe);
14037 + pfe_hif_lib_exit(pfe);
14038 +
14039 + class_disable();
14040 + tmu_disable(0xf);
14041 +#if !defined(CONFIG_UTIL_DISABLED)
14042 + util_disable();
14043 +#endif
14044 + pfe_hw_exit(pfe);
14045 + c2000_block_reset(COMPONENT_PFE_SYS, 1);
14046 + clk_disable(pfe->hfe_clock);
14047 + }
14048 +
14049 + return 0;
14050 +}
14051 +
14052 +static int pfe_platform_resume(struct device *dev)
14053 +{
14054 + struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
14055 + struct net_device *netdev;
14056 + int i;
14057 +
14058 + printk(KERN_INFO "%s\n", __func__);
14059 +
14060 + if (!pfe->wake) {
14061 + /* Sequence follows VLSI recommendation (bug 71927) */
14062 + c2000_block_reset(COMPONENT_PFE_SYS, 1);
14063 + mdelay(1);
14064 + c2000_block_reset(COMPONENT_PFE_SYS, 0);
14065 + clk_enable(pfe->hfe_clock);
14066 +
14067 + pfe_hw_init(pfe, 1);
14068 + pfe_hif_lib_init(pfe);
14069 + pfe_hif_init(pfe);
14070 +#if !defined(CONFIG_UTIL_DISABLED)
14071 + util_enable();
14072 +#endif
14073 + tmu_enable(0xf);
14074 + class_enable();
14075 + pfe_ctrl_resume(&pfe->ctrl);
14076 + }
14077 +
14078 + for(i = 0; i < (NUM_GEMAC_SUPPORT - 1); i++) {
14079 + netdev = pfe->eth.eth_priv[i]->dev;
14080 +
14081 + if (pfe->eth.eth_priv[i]->mii_bus)
14082 + pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
14083 +
14084 + if (netif_running(netdev))
14085 + pfe_eth_resume(netdev);
14086 +
14087 + netif_device_attach(netdev);
14088 + }
14089 + return 0;
14090 +}
14091 +#else
14092 +#define pfe_platform_suspend NULL
14093 +#define pfe_platform_resume NULL
14094 +#endif
14095 +
14096 +static const struct dev_pm_ops pfe_platform_pm_ops = {
14097 + SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
14098 +};
14099 +
14100 +#endif
14101 +
14102 +static struct platform_driver pfe_platform_driver = {
14103 + .probe = pfe_platform_probe,
14104 + .remove = pfe_platform_remove,
14105 + .driver = {
14106 + .name = "pfe",
14107 +#ifdef CONFIG_PM
14108 + .pm = &pfe_platform_pm_ops,
14109 +#endif
14110 + },
14111 +};
14112 +
14113 +
14114 +static int __init pfe_module_init(void)
14115 +{
14116 + printk(KERN_INFO "%s\n", __func__);
14117 +
14118 + return platform_driver_register(&pfe_platform_driver);
14119 +}
14120 +
14121 +
14122 +static void __exit pfe_module_exit(void)
14123 +{
14124 + platform_driver_unregister(&pfe_platform_driver);
14125 +
14126 + printk(KERN_INFO "%s\n", __func__);
14127 +}
14128 +
14129 +MODULE_LICENSE("GPL");
14130 +module_init(pfe_module_init);
14131 +module_exit(pfe_module_exit);
14132 --- /dev/null
14133 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
14134 @@ -0,0 +1,855 @@
14135 +/*
14136 + * (C) Copyright 2011
14137 + * Author : Freescale Semiconductor, Inc.
14138 + *
14139 + * See file CREDITS for list of people who contributed to this
14140 + * project.
14141 + *
14142 + * This program is free software; you can redistribute it and/or
14143 + * modify it under the terms of the GNU General Public License as
14144 + * published by the Free Software Foundation; either version 2 of
14145 + * the License, or (at your option) any later version.
14146 + *
14147 + * This program is distributed in the hope that it will be useful,
14148 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
14149 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14150 + * GNU General Public License for more details.
14151 + *
14152 + * You should have received a copy of the GNU General Public License
14153 + * along with this program; if not, write to the Free Software
14154 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
14155 + * MA 02111-1307 USA
14156 + * */
14157 +
14158 +#include <linux/module.h>
14159 +#include <linux/platform_device.h>
14160 +
14161 +#include "pfe_mod.h"
14162 +#include "pfe_ctrl_hal.h"
14163 +
14164 +#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
14165 +#define NUM_QUEUES 16
14166 +
14167 +static char register_name[20][5] = {
14168 + "EPC", "ECAS", "EID", "ED",
14169 + "r0", "r1", "r2", "r3",
14170 + "r4", "r5", "r6", "r7",
14171 + "r8", "r9", "r10", "r11",
14172 + "r12", "r13", "r14", "r15",
14173 +};
14174 +
14175 +static char exception_name[14][20] = {
14176 + "Reset",
14177 + "HardwareFailure",
14178 + "NMI",
14179 + "InstBreakpoint",
14180 + "DataBreakpoint",
14181 + "Unsupported",
14182 + "PrivilegeViolation",
14183 + "InstBusError",
14184 + "DataBusError",
14185 + "AlignmentError",
14186 + "ArithmeticError",
14187 + "SystemCall",
14188 + "MemoryManagement",
14189 + "Interrupt",
14190 +};
14191 +
14192 +static unsigned long class_do_clear = 0;
14193 +static unsigned long tmu_do_clear = 0;
14194 +#if !defined(CONFIG_UTIL_DISABLED)
14195 +static unsigned long util_do_clear = 0;
14196 +#endif
14197 +
14198 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
14199 +
14200 +static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long do_clear)
14201 +{
14202 + ssize_t len = 0;
14203 + u32 val;
14204 + char statebuf[5];
14205 + struct pfe_cpumon *cpumon = &pfe->cpumon;
14206 + u32 debug_indicator;
14207 + u32 debug[20];
14208 +
14209 + *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
14210 + dmem_addr += 4;
14211 +
14212 + statebuf[4] = '\0';
14213 + len += sprintf(buf + len, "state=%4s ", statebuf);
14214 +
14215 + val = pe_dmem_read(id, dmem_addr, 4);
14216 + dmem_addr += 4;
14217 + len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
14218 +
14219 + val = pe_dmem_read(id, dmem_addr, 4);
14220 + if (do_clear && val)
14221 + pe_dmem_write(id, 0, dmem_addr, 4);
14222 + dmem_addr += 4;
14223 + len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
14224 +
14225 + val = pe_dmem_read(id, dmem_addr, 4);
14226 + if (do_clear && val)
14227 + pe_dmem_write(id, 0, dmem_addr, 4);
14228 + dmem_addr += 4;
14229 + if (id >= TMU0_ID && id <= TMU_MAX_ID)
14230 + len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
14231 + else
14232 + len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
14233 +
14234 + val = pe_dmem_read(id, dmem_addr, 4);
14235 + if (do_clear && val)
14236 + pe_dmem_write(id, 0, dmem_addr, 4);
14237 + dmem_addr += 4;
14238 + if (val)
14239 + len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
14240 +
14241 + len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
14242 +
14243 + len += sprintf(buf + len, "\n");
14244 +
14245 + debug_indicator = pe_dmem_read(id, dmem_addr, 4);
14246 + dmem_addr += 4;
14247 + if (!strncmp((char *)&debug_indicator, "DBUG", 4))
14248 + {
14249 + int j, last = 0;
14250 + for (j = 0; j < 16; j++)
14251 + {
14252 + debug[j] = pe_dmem_read(id, dmem_addr, 4);
14253 + if (debug[j])
14254 + {
14255 + if (do_clear)
14256 + pe_dmem_write(id, 0, dmem_addr, 4);
14257 + last = j + 1;
14258 + }
14259 + dmem_addr += 4;
14260 + }
14261 + for (j = 0; j < last; j++)
14262 + {
14263 + len += sprintf(buf + len, "%08x%s", cpu_to_be32(debug[j]),
14264 + (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
14265 + }
14266 + }
14267 +
14268 + if (!strncmp(statebuf, "DEAD", 4))
14269 + {
14270 + u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
14271 +
14272 + len += sprintf(buf + len, "Exception details:\n");
14273 + for (i = 0; i < 20; i++) {
14274 + debug[i] = pe_dmem_read(id, dump, 4);
14275 + dump +=4;
14276 + if (i == 2)
14277 + len += sprintf(buf + len, "%4s = %08x (=%s) ", register_name[i], cpu_to_be32(debug[i]), exception_name[min((u32) cpu_to_be32(debug[i]), (u32)13)]);
14278 + else
14279 + len += sprintf(buf + len, "%4s = %08x%s", register_name[i], cpu_to_be32(debug[i]),
14280 + (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
14281 + }
14282 + }
14283 +
14284 + return len;
14285 +}
14286 +
14287 +static ssize_t class_phy_stats(char *buf, int phy)
14288 +{
14289 + ssize_t len = 0;
14290 + int off1 = phy * 0x28;
14291 + int off2 = phy * 0x10;
14292 +
14293 + if (phy == 3)
14294 + off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
14295 +
14296 + len += sprintf(buf + len, "phy: %d\n", phy);
14297 + len += sprintf(buf + len, " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
14298 + readl(CLASS_PHY1_RX_PKTS + off1), readl(CLASS_PHY1_TX_PKTS + off1),
14299 + readl(CLASS_PHY1_INTF_MATCH_PKTS + off1), readl(CLASS_PHY1_V4_PKTS + off1),
14300 + readl(CLASS_PHY1_V6_PKTS + off1));
14301 +
14302 + len += sprintf(buf + len, " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
14303 + readl(CLASS_PHY1_ICMP_PKTS + off2), readl(CLASS_PHY1_IGMP_PKTS + off2),
14304 + readl(CLASS_PHY1_TCP_PKTS + off2), readl(CLASS_PHY1_UDP_PKTS + off2));
14305 +
14306 + len += sprintf(buf + len, " err\n");
14307 + len += sprintf(buf + len, " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
14308 + readl(CLASS_PHY1_LP_FAIL_PKTS + off1), readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
14309 + readl(CLASS_PHY1_L3_FAIL_PKTS + off1), readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
14310 + readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
14311 +
14312 + return len;
14313 +}
14314 +
14315 +/** qm_read_drop_stat
14316 + * This function is used to read the drop statistics from the TMU
14317 + * hw drop counter. Since the hw counter is always cleared afer
14318 + * reading, this function maintains the previous drop count, and
14319 + * adds the new value to it. That value can be retrieved by
14320 + * passing a pointer to it with the total_drops arg.
14321 + *
14322 + * @param tmu TMU number (0 - 3)
14323 + * @param queue queue number (0 - 15)
14324 + * @param total_drops pointer to location to store total drops (or NULL)
14325 + * @param do_reset if TRUE, clear total drops after updating
14326 + *
14327 + */
14328 +
14329 +u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
14330 +{
14331 + static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
14332 + u32 val;
14333 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
14334 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
14335 + val = readl(TMU_TEQ_DROP_STAT);
14336 + qtotal[tmu][queue] += val;
14337 + if (total_drops)
14338 + *total_drops = qtotal[tmu][queue];
14339 + if (do_reset)
14340 + qtotal[tmu][queue] = 0;
14341 + return val;
14342 +}
14343 +
14344 +static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
14345 +{
14346 + ssize_t len = 0;
14347 + u32 drops;
14348 +
14349 + len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
14350 +
14351 + drops = qm_read_drop_stat(tmu, queue, NULL, 0);
14352 +
14353 + /* Select queue */
14354 + writel((tmu << 8) | queue, TMU_TEQ_CTRL);
14355 + writel((tmu << 8) | queue, TMU_LLM_CTRL);
14356 +
14357 + len += sprintf(buf + len, "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
14358 + drops, readl(TMU_TEQ_TRANS_STAT),
14359 + readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
14360 + readl(TMU_LLM_QUE_DROPCNT));
14361 +
14362 + return len;
14363 +}
14364 +
14365 +
14366 +static ssize_t tmu_queues(char *buf, int tmu)
14367 +{
14368 + ssize_t len = 0;
14369 + int queue;
14370 +
14371 + for (queue = 0; queue < 16; queue++)
14372 + len += tmu_queue_stats(buf + len, tmu, queue);
14373 +
14374 + return len;
14375 +}
14376 +
14377 +static ssize_t tmu_ctx(char *buf, int tmu)
14378 +{
14379 + ssize_t len = 0;
14380 + int i;
14381 + u32 val, tmu_context_addr = TMU_CONTEXT_ADDR;
14382 +
14383 + len += sprintf(buf+len, " TMU %d \n", TMU0_ID+tmu);
14384 + for (i = 1; i <= 160 ; i++, tmu_context_addr += 4)
14385 + {
14386 + val = pe_dmem_read(TMU0_ID+tmu, tmu_context_addr , 4);
14387 + if (i == 5)
14388 + len += sprintf(buf+len, "\nShapers: Each shaper structure is 8 bytes and there are 10 shapers\n");
14389 +
14390 + if (i == 25)
14391 + len += sprintf(buf+len, "\nScheduler: Each scheduler structure is 48 bytes and there are 8 schedulers\n");
14392 + if (i == 121)
14393 + len += sprintf(buf+len, "\nQueue: Each queue structure is 2 bytes and there are 16 queues\n");
14394 +
14395 + if (i == 129)
14396 + len += sprintf(buf+len, "\nqlenmasks array for 16 queues\n");
14397 + if (i == 145)
14398 + len += sprintf(buf+len, "\nqresultmap array for 16 queues\n");
14399 + if (i%8 == 0)
14400 + len += sprintf(buf+len, "%08x \n", cpu_to_be32(val));
14401 + else
14402 + len += sprintf(buf+len, "%08x ", cpu_to_be32(val));
14403 + }
14404 +
14405 + len += sprintf(buf+len, "\n");
14406 +
14407 + return len;
14408 +}
14409 +
14410 +static ssize_t block_version(char *buf, void *addr)
14411 +{
14412 + ssize_t len = 0;
14413 + u32 val;
14414 +
14415 + val = readl(addr);
14416 + len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n", (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
14417 +
14418 + return len;
14419 +}
14420 +
14421 +static ssize_t bmu(char *buf, int id, void *base)
14422 +{
14423 + ssize_t len = 0;
14424 +
14425 + len += sprintf(buf + len, "bmu: %d\n ", id);
14426 +
14427 + len += block_version(buf + len, base + BMU_VERSION);
14428 +
14429 + len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base + BMU_BUF_SIZE)));
14430 + len += sprintf(buf + len, " buf count: %x\n", readl(base + BMU_BUF_CNT));
14431 + len += sprintf(buf + len, " buf rem: %x\n", readl(base + BMU_REM_BUF_CNT));
14432 + len += sprintf(buf + len, " buf curr: %x\n", readl(base + BMU_CURR_BUF_CNT));
14433 + len += sprintf(buf + len, " free err: %x\n", readl(base + BMU_FREE_ERR_ADDR));
14434 +
14435 + return len;
14436 +}
14437 +
14438 +static ssize_t gpi(char *buf, int id, void *base)
14439 +{
14440 + ssize_t len = 0;
14441 + u32 val;
14442 +
14443 + len += sprintf(buf + len, "gpi%d:\n ", id);
14444 + len += block_version(buf + len, base + GPI_VERSION);
14445 +
14446 + len += sprintf(buf + len, " tx under stick: %x\n", readl(base + GPI_FIFO_STATUS));
14447 + val = readl(base + GPI_FIFO_DEBUG);
14448 + len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) & 0x3f);
14449 + len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) & 0x3f);
14450 + len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) & 0x1ff);
14451 + len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) & 0x1ff);
14452 + len += sprintf(buf + len, " overrun: %x\n", readl(base + GPI_OVERRUN_DROPCNT));
14453 +
14454 + return len;
14455 +}
14456 +
14457 +static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
14458 +{
14459 + class_do_clear = simple_strtoul(buf, NULL, 0);
14460 + return count;
14461 +}
14462 +
14463 +static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr, char *buf)
14464 +{
14465 + ssize_t len = 0;
14466 + int id;
14467 + u32 val;
14468 + struct pfe_cpumon *cpumon = &pfe->cpumon;
14469 +
14470 + len += block_version(buf + len, CLASS_VERSION);
14471 +
14472 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
14473 + {
14474 + len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
14475 +
14476 + val = readl(CLASS_PE0_DEBUG + id * 4);
14477 + len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
14478 +
14479 + len += display_pe_status(buf + len, id, PESTATUS_ADDR_CLASS, class_do_clear);
14480 + }
14481 + len += sprintf(buf + len, "aggregate load=%d%%\n\n", cpumon->class_usage_pct);
14482 +
14483 + len += sprintf(buf + len, "pe status: 0x%x\n", readl(CLASS_PE_STATUS));
14484 + len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n", readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
14485 + len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n", readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
14486 + len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
14487 +
14488 + len += class_phy_stats(buf + len, 0);
14489 + len += class_phy_stats(buf + len, 1);
14490 + len += class_phy_stats(buf + len, 2);
14491 + len += class_phy_stats(buf + len, 3);
14492 +
14493 + return len;
14494 +}
14495 +
14496 +static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
14497 +{
14498 + tmu_do_clear = simple_strtoul(buf, NULL, 0);
14499 + return count;
14500 +}
14501 +
14502 +static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr, char *buf)
14503 +{
14504 + ssize_t len = 0;
14505 + int id;
14506 + u32 val;
14507 +
14508 + len += block_version(buf + len, TMU_VERSION);
14509 +
14510 + for (id = TMU0_ID; id <= TMU_MAX_ID; id++)
14511 + {
14512 +#if defined(CONFIG_PLATFORM_LS1012A)
14513 + if(id == TMU2_ID) continue;
14514 +#endif
14515 + len += sprintf(buf + len, "%d: ", id - TMU0_ID);
14516 +
14517 + len += display_pe_status(buf + len, id, PESTATUS_ADDR_TMU, tmu_do_clear);
14518 + }
14519 +
14520 + len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
14521 + len += sprintf(buf + len, "inq fifo cnt: %x\n", readl(TMU_PHY_INQ_FIFO_CNT));
14522 + val = readl(TMU_INQ_STAT);
14523 + len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
14524 + len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
14525 +
14526 +
14527 + return len;
14528 +}
14529 +
14530 +
14531 +static unsigned long drops_do_clear = 0;
14532 +static u32 CLASS_DMEM_SH2(drop_counter)[CLASS_NUM_DROP_COUNTERS];
14533 +#if !defined(CONFIG_UTIL_DISABLED)
14534 +static u32 UTIL_DMEM_SH2(drop_counter)[UTIL_NUM_DROP_COUNTERS];
14535 +#endif
14536 +
14537 +char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
14538 + "ICC",
14539 + "Host Pkt Error",
14540 + "Rx Error",
14541 + "IPsec Outbound",
14542 + "IPsec Inbound",
14543 + "EXPT IPsec Error",
14544 + "Reassembly",
14545 + "Fragmenter",
14546 + "NAT-T",
14547 + "Socket",
14548 + "Multicast",
14549 + "NAT-PT",
14550 + "Tx Disabled",
14551 +};
14552 +
14553 +#if !defined(CONFIG_UTIL_DISABLED)
14554 +char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
14555 + "IPsec Outbound",
14556 + "IPsec Inbound",
14557 + "IPsec Rate Limiter",
14558 + "Fragmenter",
14559 + "Socket",
14560 + "Tx Disabled",
14561 + "Rx Error",
14562 +};
14563 +#endif
14564 +
14565 +static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
14566 +{
14567 + drops_do_clear = simple_strtoul(buf, NULL, 0);
14568 + return count;
14569 +}
14570 +
14571 +static u32 tmu_drops[4][16];
14572 +static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr, char *buf)
14573 +{
14574 + ssize_t len = 0;
14575 + int id, dropnum;
14576 + int tmu, queue;
14577 + u32 val;
14578 + u32 dmem_addr;
14579 + int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
14580 + struct pfe_ctrl *ctrl = &pfe->ctrl;
14581 +
14582 + memset(class_drop_counter, 0, sizeof(class_drop_counter));
14583 + for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
14584 + {
14585 + if (drops_do_clear)
14586 + pe_sync_stop(ctrl, (1 << id));
14587 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS; dropnum++)
14588 + {
14589 + dmem_addr = virt_to_class_dmem(&class_drop_counter[dropnum]);
14590 + val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
14591 + class_drop_counter[dropnum] += val;
14592 + num_class_drops += val;
14593 + if (drops_do_clear)
14594 + pe_dmem_write(id, 0, dmem_addr, 4);
14595 + }
14596 + if (drops_do_clear)
14597 + pe_start(ctrl, (1 << id));
14598 + }
14599 +
14600 +#if !defined(CONFIG_UTIL_DISABLED)
14601 + if (drops_do_clear)
14602 + pe_sync_stop(ctrl, (1 << UTIL_ID));
14603 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++)
14604 + {
14605 + dmem_addr = virt_to_util_dmem(&util_drop_counter[dropnum]);
14606 + val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
14607 + util_drop_counter[dropnum] = val;
14608 + num_util_drops += val;
14609 + if (drops_do_clear)
14610 + pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
14611 + }
14612 + if (drops_do_clear)
14613 + pe_start(ctrl, (1 << UTIL_ID));
14614 +#endif
14615 + for (tmu = 0; tmu < 4; tmu++)
14616 + {
14617 + for (queue = 0; queue < 16; queue++)
14618 + {
14619 + qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue], drops_do_clear);
14620 + num_tmu_drops += tmu_drops[tmu][queue];
14621 + }
14622 + }
14623 +
14624 + if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
14625 + len += sprintf(buf + len, "No PE drops\n\n");
14626 +
14627 + if (num_class_drops > 0)
14628 + {
14629 + len += sprintf(buf + len, "Class PE drops --\n");
14630 + for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS; dropnum++)
14631 + {
14632 + if (class_drop_counter[dropnum] > 0)
14633 + len += sprintf(buf + len, " %s: %d\n", class_drop_description[dropnum], class_drop_counter[dropnum]);
14634 + }
14635 + len += sprintf(buf + len, "\n");
14636 + }
14637 +
14638 +#if !defined(CONFIG_UTIL_DISABLED)
14639 + if (num_util_drops > 0)
14640 + {
14641 + len += sprintf(buf + len, "Util PE drops --\n");
14642 + for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++)
14643 + {
14644 + if (util_drop_counter[dropnum] > 0)
14645 + len += sprintf(buf + len, " %s: %d\n", util_drop_description[dropnum], util_drop_counter[dropnum]);
14646 + }
14647 + len += sprintf(buf + len, "\n");
14648 + }
14649 +#endif
14650 + if (num_tmu_drops > 0)
14651 + {
14652 + len += sprintf(buf + len, "TMU drops --\n");
14653 + for (tmu = 0; tmu < 4; tmu++)
14654 + {
14655 + for (queue = 0; queue < 16; queue++)
14656 + {
14657 + if (tmu_drops[tmu][queue] > 0)
14658 + len += sprintf(buf + len, " TMU%d-Q%d: %d\n", tmu, queue, tmu_drops[tmu][queue]);
14659 + }
14660 + }
14661 + len += sprintf(buf + len, "\n");
14662 + }
14663 +
14664 + return len;
14665 +}
14666 +
14667 +static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute *attr, char *buf)
14668 +{
14669 + return tmu_queues(buf, 0);
14670 +}
14671 +
14672 +static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute *attr, char *buf)
14673 +{
14674 + return tmu_queues(buf, 1);
14675 +}
14676 +
14677 +static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute *attr, char *buf)
14678 +{
14679 + return tmu_queues(buf, 2);
14680 +}
14681 +
14682 +static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute *attr, char *buf)
14683 +{
14684 + return tmu_queues(buf, 3);
14685 +}
14686 +
14687 +static ssize_t pfe_show_tmu0_ctx(struct device *dev, struct device_attribute *attr, char *buf)
14688 +{
14689 + return tmu_ctx(buf, 0);
14690 +}
14691 +static ssize_t pfe_show_tmu1_ctx(struct device *dev, struct device_attribute *attr, char *buf)
14692 +{
14693 + return tmu_ctx(buf, 1);
14694 +}
14695 +static ssize_t pfe_show_tmu2_ctx(struct device *dev, struct device_attribute *attr, char *buf)
14696 +{
14697 + return tmu_ctx(buf, 2);
14698 +}
14699 +
14700 +static ssize_t pfe_show_tmu3_ctx(struct device *dev, struct device_attribute *attr, char *buf)
14701 +{
14702 + return tmu_ctx(buf, 3);
14703 +}
14704 +
14705 +
14706 +#if !defined(CONFIG_UTIL_DISABLED)
14707 +static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
14708 +{
14709 + util_do_clear = simple_strtoul(buf, NULL, 0);
14710 + return count;
14711 +}
14712 +
14713 +static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr, char *buf)
14714 +{
14715 + ssize_t len = 0;
14716 + struct pfe_ctrl *ctrl = &pfe->ctrl;
14717 +
14718 +
14719 + len += block_version(buf + len, UTIL_VERSION);
14720 +
14721 + pe_sync_stop(ctrl, (1 << UTIL_ID));
14722 + len += display_pe_status(buf + len, UTIL_ID, PESTATUS_ADDR_UTIL, util_do_clear);
14723 + pe_start(ctrl, (1 << UTIL_ID));
14724 +
14725 + len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
14726 + len += sprintf(buf + len, "max buf cnt: %x\n", readl(UTIL_MAX_BUF_CNT));
14727 + len += sprintf(buf + len, "tsq max cnt: %x\n", readl(UTIL_TSQ_MAX_CNT));
14728 +
14729 + return len;
14730 +}
14731 +#endif
14732 +
14733 +static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr, char *buf)
14734 +{
14735 + ssize_t len = 0;
14736 +
14737 + len += bmu(buf + len, 1, BMU1_BASE_ADDR);
14738 + len += bmu(buf + len, 2, BMU2_BASE_ADDR);
14739 +
14740 + return len;
14741 +}
14742 +
14743 +static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr, char *buf)
14744 +{
14745 + ssize_t len = 0;
14746 +
14747 + len += sprintf(buf + len, "hif:\n ");
14748 + len += block_version(buf + len, HIF_VERSION);
14749 +
14750 + len += sprintf(buf + len, " tx curr bd: %x\n", readl(HIF_TX_CURR_BD_ADDR));
14751 + len += sprintf(buf + len, " tx status: %x\n", readl(HIF_TX_STATUS));
14752 + len += sprintf(buf + len, " tx dma status: %x\n", readl(HIF_TX_DMA_STATUS));
14753 +
14754 + len += sprintf(buf + len, " rx curr bd: %x\n", readl(HIF_RX_CURR_BD_ADDR));
14755 + len += sprintf(buf + len, " rx status: %x\n", readl(HIF_RX_STATUS));
14756 + len += sprintf(buf + len, " rx dma status: %x\n", readl(HIF_RX_DMA_STATUS));
14757 +
14758 + len += sprintf(buf + len, "hif nocopy:\n ");
14759 + len += block_version(buf + len, HIF_NOCPY_VERSION);
14760 +
14761 + len += sprintf(buf + len, " tx curr bd: %x\n", readl(HIF_NOCPY_TX_CURR_BD_ADDR));
14762 + len += sprintf(buf + len, " tx status: %x\n", readl(HIF_NOCPY_TX_STATUS));
14763 + len += sprintf(buf + len, " tx dma status: %x\n", readl(HIF_NOCPY_TX_DMA_STATUS));
14764 +
14765 + len += sprintf(buf + len, " rx curr bd: %x\n", readl(HIF_NOCPY_RX_CURR_BD_ADDR));
14766 + len += sprintf(buf + len, " rx status: %x\n", readl(HIF_NOCPY_RX_STATUS));
14767 + len += sprintf(buf + len, " rx dma status: %x\n", readl(HIF_NOCPY_RX_DMA_STATUS));
14768 +
14769 + return len;
14770 +}
14771 +
14772 +
14773 +static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr, char *buf)
14774 +{
14775 + ssize_t len = 0;
14776 +
14777 + len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
14778 + len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
14779 +#if !defined(CONFIG_PLATFORM_LS1012A)
14780 + len += gpi(buf + len, 2, EGPI3_BASE_ADDR);
14781 +#endif
14782 + len += gpi(buf + len, 3, HGPI_BASE_ADDR);
14783 +
14784 + return len;
14785 +}
14786 +
14787 +static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute *attr, char *buf)
14788 +{
14789 + ssize_t len = 0;
14790 + struct pfe_memmon *memmon = &pfe->memmon;
14791 +
14792 + len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n", memmon->kernel_memory_allocated, (memmon->kernel_memory_allocated + 1023) / 1024);
14793 +
14794 + return len;
14795 +}
14796 +
14797 +#ifdef HIF_NAPI_STATS
14798 +static ssize_t pfe_show_hif_napi_stats(struct device *dev, struct device_attribute *attr, char *buf)
14799 +{
14800 + struct platform_device *pdev = to_platform_device(dev);
14801 + struct pfe *pfe = platform_get_drvdata(pdev);
14802 + ssize_t len = 0;
14803 +
14804 + len += sprintf(buf + len, "sched: %u\n", pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
14805 + len += sprintf(buf + len, "poll: %u\n", pfe->hif.napi_counters[NAPI_POLL_COUNT]);
14806 + len += sprintf(buf + len, "packet: %u\n", pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
14807 + len += sprintf(buf + len, "budget: %u\n", pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
14808 + len += sprintf(buf + len, "desc: %u\n", pfe->hif.napi_counters[NAPI_DESC_COUNT]);
14809 + len += sprintf(buf + len, "full: %u\n", pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
14810 +
14811 + return len;
14812 +}
14813 +
14814 +static ssize_t pfe_set_hif_napi_stats(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
14815 +{
14816 + struct platform_device *pdev = to_platform_device(dev);
14817 + struct pfe *pfe = platform_get_drvdata(pdev);
14818 +
14819 + memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
14820 +
14821 + return count;
14822 +}
14823 +
14824 +static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats, pfe_set_hif_napi_stats);
14825 +#endif
14826 +
14827 +
14828 +static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
14829 +static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
14830 +#if !defined(CONFIG_UTIL_DISABLED)
14831 +static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
14832 +#endif
14833 +static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
14834 +static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
14835 +static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
14836 +static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
14837 +static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
14838 +static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
14839 +static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
14840 +static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
14841 +static DEVICE_ATTR(tmu0_ctx, 0444, pfe_show_tmu0_ctx, NULL);
14842 +static DEVICE_ATTR(tmu1_ctx, 0444, pfe_show_tmu1_ctx, NULL);
14843 +static DEVICE_ATTR(tmu2_ctx, 0444, pfe_show_tmu2_ctx, NULL);
14844 +static DEVICE_ATTR(tmu3_ctx, 0444, pfe_show_tmu3_ctx, NULL);
14845 +static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
14846 +
14847 +
14848 +int pfe_sysfs_init(struct pfe *pfe)
14849 +{
14850 + if (device_create_file(pfe->dev, &dev_attr_class))
14851 + goto err_class;
14852 +
14853 + if (device_create_file(pfe->dev, &dev_attr_tmu))
14854 + goto err_tmu;
14855 +
14856 +#if !defined(CONFIG_UTIL_DISABLED)
14857 + if (device_create_file(pfe->dev, &dev_attr_util))
14858 + goto err_util;
14859 +#endif
14860 +
14861 + if (device_create_file(pfe->dev, &dev_attr_bmu))
14862 + goto err_bmu;
14863 +
14864 + if (device_create_file(pfe->dev, &dev_attr_hif))
14865 + goto err_hif;
14866 +
14867 + if (device_create_file(pfe->dev, &dev_attr_gpi))
14868 + goto err_gpi;
14869 +
14870 + if (device_create_file(pfe->dev, &dev_attr_drops))
14871 + goto err_drops;
14872 +
14873 + if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
14874 + goto err_tmu0_queues;
14875 +
14876 + if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
14877 + goto err_tmu1_queues;
14878 +
14879 + if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
14880 + goto err_tmu2_queues;
14881 +
14882 + if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
14883 + goto err_tmu3_queues;
14884 +
14885 + if (device_create_file(pfe->dev, &dev_attr_tmu0_ctx))
14886 + goto err_tmu0_ctx;
14887 +
14888 + if (device_create_file(pfe->dev, &dev_attr_tmu1_ctx))
14889 + goto err_tmu1_ctx;
14890 +
14891 + if (device_create_file(pfe->dev, &dev_attr_tmu2_ctx))
14892 + goto err_tmu2_ctx;
14893 +
14894 + if (device_create_file(pfe->dev, &dev_attr_tmu3_ctx))
14895 + goto err_tmu3_ctx;
14896 +
14897 + if (device_create_file(pfe->dev, &dev_attr_pfemem))
14898 + goto err_pfemem;
14899 +
14900 +#ifdef HIF_NAPI_STATS
14901 + if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
14902 + goto err_hif_napi_stats;
14903 +#endif
14904 +
14905 + return 0;
14906 +
14907 +#ifdef HIF_NAPI_STATS
14908 +err_hif_napi_stats:
14909 + device_remove_file(pfe->dev, &dev_attr_pfemem);
14910 +#endif
14911 +
14912 +err_pfemem:
14913 + device_remove_file(pfe->dev, &dev_attr_tmu3_ctx);
14914 +
14915 +err_tmu3_ctx:
14916 + device_remove_file(pfe->dev, &dev_attr_tmu2_ctx);
14917 +
14918 +err_tmu2_ctx:
14919 + device_remove_file(pfe->dev, &dev_attr_tmu1_ctx);
14920 +
14921 +err_tmu1_ctx:
14922 + device_remove_file(pfe->dev, &dev_attr_tmu0_ctx);
14923 +
14924 +err_tmu0_ctx:
14925 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
14926 +
14927 +err_tmu3_queues:
14928 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
14929 +
14930 +err_tmu2_queues:
14931 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
14932 +
14933 +err_tmu1_queues:
14934 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
14935 +
14936 +err_tmu0_queues:
14937 + device_remove_file(pfe->dev, &dev_attr_drops);
14938 +
14939 +err_drops:
14940 + device_remove_file(pfe->dev, &dev_attr_gpi);
14941 +
14942 +err_gpi:
14943 + device_remove_file(pfe->dev, &dev_attr_hif);
14944 +
14945 +err_hif:
14946 + device_remove_file(pfe->dev, &dev_attr_bmu);
14947 +
14948 +err_bmu:
14949 +#if !defined(CONFIG_UTIL_DISABLED)
14950 + device_remove_file(pfe->dev, &dev_attr_util);
14951 +
14952 +err_util:
14953 +#endif
14954 + device_remove_file(pfe->dev, &dev_attr_tmu);
14955 +
14956 +err_tmu:
14957 + device_remove_file(pfe->dev, &dev_attr_class);
14958 +
14959 +err_class:
14960 + return -1;
14961 +}
14962 +
14963 +
14964 +void pfe_sysfs_exit(struct pfe *pfe)
14965 +{
14966 +#ifdef HIF_NAPI_STATS
14967 + device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
14968 +#endif
14969 +
14970 + device_remove_file(pfe->dev, &dev_attr_pfemem);
14971 + device_remove_file(pfe->dev, &dev_attr_tmu3_ctx);
14972 + device_remove_file(pfe->dev, &dev_attr_tmu2_ctx);
14973 + device_remove_file(pfe->dev, &dev_attr_tmu1_ctx);
14974 + device_remove_file(pfe->dev, &dev_attr_tmu0_ctx);
14975 + device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
14976 + device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
14977 + device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
14978 + device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
14979 + device_remove_file(pfe->dev, &dev_attr_drops);
14980 + device_remove_file(pfe->dev, &dev_attr_gpi);
14981 + device_remove_file(pfe->dev, &dev_attr_hif);
14982 + device_remove_file(pfe->dev, &dev_attr_bmu);
14983 +#if !defined(CONFIG_UTIL_DISABLED)
14984 + device_remove_file(pfe->dev, &dev_attr_util);
14985 +#endif
14986 + device_remove_file(pfe->dev, &dev_attr_tmu);
14987 + device_remove_file(pfe->dev, &dev_attr_class);
14988 +}
14989 +
14990 --- /dev/null
14991 +++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
14992 @@ -0,0 +1,34 @@
14993 +/*
14994 + *
14995 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
14996 + *
14997 + * This program is free software; you can redistribute it and/or modify
14998 + * it under the terms of the GNU General Public License as published by
14999 + * the Free Software Foundation; either version 2 of the License, or
15000 + * (at your option) any later version.
15001 + *
15002 + * This program is distributed in the hope that it will be useful,
15003 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
15004 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15005 + * GNU General Public License for more details.
15006 + *
15007 + * You should have received a copy of the GNU General Public License
15008 + * along with this program; if not, write to the Free Software
15009 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15010 + */
15011 +
15012 +#ifndef _PFE_SYSFS_H_
15013 +#define _PFE_SYSFS_H_
15014 +
15015 +#include <linux/proc_fs.h>
15016 +
15017 +#define PESTATUS_ADDR_CLASS 0x800
15018 +#define PESTATUS_ADDR_TMU 0x80
15019 +#define PESTATUS_ADDR_UTIL 0x0
15020 +
15021 +#define TMU_CONTEXT_ADDR 0x3c8
15022 +#define IPSEC_CNTRS_ADDR 0x840
15023 +
15024 +int pfe_sysfs_init(struct pfe *pfe);
15025 +void pfe_sysfs_exit(struct pfe *pfe);
15026 +#endif /* _PFE_SYSFS_H_ */
15027 --- /dev/null
15028 +++ b/drivers/staging/fsl_ppfe/platform.h
15029 @@ -0,0 +1,25 @@
15030 +/*
15031 + *
15032 + * Copyright (C) 2007 Freescale Semiconductor, Inc.
15033 + *
15034 + * This program is free software; you can redistribute it and/or modify
15035 + * it under the terms of the GNU General Public License as published by
15036 + * the Free Software Foundation; either version 2 of the License, or
15037 + * (at your option) any later version.
15038 + *
15039 + * This program is distributed in the hope that it will be useful,
15040 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
15041 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15042 + * GNU General Public License for more details.
15043 + *
15044 + * You should have received a copy of the GNU General Public License
15045 + * along with this program; if not, write to the Free Software
15046 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15047 + */
15048 +
15049 +#ifndef _PLATFORM_H_
15050 +#define _PLATFORM_H_
15051 +
15052 +#define virt_to_phys(virt) ((unsigned long)virt)
15053 +
15054 +#endif /* _PLATFORM_H_ */
15055 --- a/include/linux/skbuff.h
15056 +++ b/include/linux/skbuff.h
15057 @@ -858,6 +858,17 @@ static inline struct sk_buff *alloc_skb_
15058 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
15059 }
15060
15061 +extern struct sk_buff *__alloc_skb_header(unsigned int size, void *data,
15062 + gfp_t gfp_mask,
15063 + int fclone,
15064 + int node);
15065 +static inline struct sk_buff *alloc_skb_header(unsigned int size,
15066 + u8 *data,
15067 + gfp_t priority)
15068 +{
15069 + return __alloc_skb_header(size, data, priority, 0, -1);
15070 +}
15071 +
15072 struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
15073 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
15074 {
15075 --- a/net/core/skbuff.c
15076 +++ b/net/core/skbuff.c
15077 @@ -283,6 +283,90 @@ nodata:
15078 EXPORT_SYMBOL(__alloc_skb);
15079
15080 /**
15081 + * __alloc_skb_header - allocate a network buffer
15082 + * @size: size to allocate
15083 + * @gfp_mask: allocation mask
15084 + * @fclone: allocate from fclone cache instead of head cache
15085 + * and allocate a cloned (child) skb
15086 + *
15087 + * Allocate a new &sk_buff. The returned buffer has no headroom and a
15088 + * tail room of size bytes. The object has a reference count of one.
15089 + * The return is the buffer. On a failure the return is %NULL.
15090 + *
15091 + * Buffers may only be allocated from interrupts using a @gfp_mask of
15092 + * %GFP_ATOMIC.
15093 + */
15094 +struct sk_buff *__alloc_skb_header(unsigned int size, void *data,
15095 + gfp_t gfp_mask, int fclone, int node)
15096 +{
15097 + struct kmem_cache *cache;
15098 + struct skb_shared_info *shinfo;
15099 + struct sk_buff *skb;
15100 +
15101 + cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
15102 +
15103 + if (size <= SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) {
15104 + skb = NULL;
15105 + goto out;
15106 + }
15107 +
15108 + /* Get the HEAD */
15109 + skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
15110 + if (!skb)
15111 + goto out;
15112 + prefetchw(skb);
15113 +
15114 + /* kmalloc might give us more room than requested.
15115 + * Put skb_shared_info exactly at the end of allocated zone,
15116 + * to allow max possible filling before reallocation.
15117 + */
15118 + size = SKB_WITH_OVERHEAD(ksize(data));
15119 + prefetchw(data + size);
15120 +
15121 + /* Only clear those fields we need to clear, not those that we will
15122 + * actually initialise below. Hence, don't put any more fields after
15123 + * the tail pointer in struct sk_buff!
15124 + */
15125 + memset(skb, 0, offsetof(struct sk_buff, tail));
15126 + /* Account for allocated memory : skb + skb->head */
15127 + skb->truesize = SKB_TRUESIZE(size);
15128 + atomic_set(&skb->users, 1);
15129 + skb->head = data;
15130 + skb->data = data;
15131 + skb_reset_tail_pointer(skb);
15132 + skb->end = skb->tail + size;
15133 +#ifdef NET_SKBUFF_DATA_USES_OFFSET
15134 + skb->mac_header = ~0U;
15135 +#endif
15136 +
15137 +#if defined(CONFIG_COMCERTO_CUSTOM_SKB_LAYOUT)
15138 + skb->mspd_data = NULL;
15139 + skb->mspd_len = 0;
15140 +#endif
15141 +
15142 + /* make sure we initialize shinfo sequentially */
15143 + shinfo = skb_shinfo(skb);
15144 + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
15145 + atomic_set(&shinfo->dataref, 1);
15146 + kmemcheck_annotate_variable(shinfo->destructor_arg);
15147 +
15148 + if (fclone) {
15149 + struct sk_buff *child = skb + 1;
15150 + atomic_t *fclone_ref = (atomic_t *)(child + 1);
15151 +
15152 + kmemcheck_annotate_bitfield(child, flags1);
15153 + kmemcheck_annotate_bitfield(child, flags2);
15154 + skb->fclone = SKB_FCLONE_ORIG;
15155 + atomic_set(fclone_ref, 1);
15156 +
15157 + child->fclone = SKB_FCLONE_UNAVAILABLE;
15158 + }
15159 +out:
15160 + return skb;
15161 +}
15162 +EXPORT_SYMBOL(__alloc_skb_header);
15163 +
15164 +/**
15165 * __build_skb - build a network buffer
15166 * @data: data buffer provided by caller
15167 * @frag_size: size of data, or 0 if head was kmalloced