kernel: add Intel/Lantiq VRX518 EP driver
[openwrt/staging/hauke.git] / package / kernel / lantiq / vrx518_ep / src / test / ep_test.c
1 /*******************************************************************************
2
3 Intel SmartPHY DSL PCIe Endpoint/ACA Linux Test driver
4 Copyright(c) 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 *******************************************************************************/
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/version.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/interrupt.h>
31 #include <linux/delay.h>
32
33 #include <net/dc_ep.h>
34
35 #include "ep_test.h"
36
37 #define DRV_VERSION "1.0.0"
38 static const char ep_test_driver_version[] = DRV_VERSION;
39 static struct dc_ep_dev pcie_dev[DC_EP_MAX_PEER + 1];
40 static int ppe_irq_num;
41
42 #define ep_wr32(value, reg) (writel(value, dev->membase + reg))
43 #define ep_rd32(reg) (readl(dev->membase + reg))
44
45 #define ep_wr32_mask(clr, set, reg) \
46 ep_wr32(((ep_rd32(reg) & ~(clr)) | (set)), (reg))
47
48 struct aca_hd_desc {
49 void *base;
50 dma_addr_t phy_base;
51 size_t size;/* in bytes */
52 };
53
54 struct aca_hd_desc_cfg {
55 struct aca_hd_desc txin;
56 struct aca_hd_desc txout;
57 struct aca_hd_desc rxout;
58 };
59
60 static struct aca_hd_desc_cfg aca_soc_hd_desc[DC_EP_MAX_PEER + 1];
61
62 static void ep_mem_write(u8 __iomem *dst, const void *src, size_t len)
63 {
64 int i;
65 const u32 *src_addr = src;
66
67 if (len % 4)
68 pr_info("Warning!!: Copy len is not multiple of 4\n");
69
70 len = len >> 2;
71
72 for (i = 0; i < len; i++)
73 writel(src_addr[i], (dst + (i << 2)));
74 }
75
76 static irqreturn_t dc_ep_ppe_intr(int irq, void *dev_id)
77 {
78 struct dc_ep_dev *dev = dev_id;
79
80 ppe_irq_num++;
81 if (ep_rd32(MBOX_IGU0_ISR) == 0) {
82 pr_err("Fatal error, dummy interrupt\n");
83 return IRQ_NONE;
84 }
85
86 ep_wr32(PPE_MBOX_TEST_BIT, MBOX_IGU0_ISRC);
87 ep_rd32(MBOX_IGU0_ISR);
88 return IRQ_HANDLED;
89 }
90
91 static void dc_ep_ppe_mbox_reg_dump(struct dc_ep_dev *dev)
92 {
93 pr_info("MBOX_IGU0_ISRS addr %p data 0x%08x\n",
94 dev->membase + MBOX_IGU0_ISRS,
95 ep_rd32(MBOX_IGU0_ISRS));
96 pr_info("MBOX_IGU0_ISRC addr %p data 0x%08x\n",
97 dev->membase + MBOX_IGU0_ISRC,
98 ep_rd32(MBOX_IGU0_ISRC));
99 pr_info("MBOX_IGU0_ISR addr %p data 0x%08x\n",
100 dev->membase + MBOX_IGU0_ISR,
101 ep_rd32(MBOX_IGU0_ISR));
102 pr_info("MBOX_IGU0_IER addr %p data 0x%08x\n",
103 dev->membase + MBOX_IGU0_IER,
104 ep_rd32(MBOX_IGU0_IER));
105 }
106
107 #define PPE_INT_TIMEOUT 10
108 static int dc_ep_ppe_mbox_int_stress_test(struct dc_ep_dev *dev)
109 {
110 int i;
111 int j;
112 int ret;
113
114 /* Clear it first */
115 ep_wr32(PPE_MBOX_TEST_BIT, MBOX_IGU0_ISRC);
116
117 ret = request_irq(dev->irq, dc_ep_ppe_intr, 0, "PPE_MSI", dev);
118 if (ret) {
119 pr_err("%s request irq %d failed\n", __func__, dev->irq);
120 return -1;
121 }
122 pr_info("PPE test\n");
123 ep_wr32(PPE_MBOX_TEST_BIT, MBOX_IGU0_IER);
124 ppe_irq_num = 0;
125 /* Purposely trigger interrupt */
126 for (i = 0; i < PPE_MBOX_IRQ_TEST_NUM; i++) {
127 j = 0;
128 while ((ep_rd32(MBOX_IGU0_ISR) & PPE_MBOX_TEST_BIT)) {
129 j++;
130 if (j > PPE_INT_TIMEOUT)
131 break;
132 }
133 ep_wr32(PPE_MBOX_TEST_BIT, MBOX_IGU0_ISRS);
134 /* Write flush */
135 ep_rd32(MBOX_IGU0_ISR);
136 }
137 mdelay(10);
138 pr_info("irq triggered %d expected %d\n", ppe_irq_num,
139 PPE_MBOX_IRQ_TEST_NUM);
140 dc_ep_ppe_mbox_reg_dump(dev);
141 ppe_irq_num = 0;
142 return 0;
143 }
144
145 static void umt_txin_send(struct dc_ep_dev *dev,
146 u8 __iomem *soc_dbase, int num)
147 {
148 int i;
149 struct aca_dma_desc desc;
150
151 memset(&desc, 0, sizeof(desc));
152 desc.own = 0;
153 desc.sop = 1;
154 desc.eop = 1;
155 desc.dic = 1;
156 desc.pdu_type = 1;
157 desc.data_len = 127;
158 desc.data_pointer = 0x26000000;
159 desc.dw1 = 0x700;
160 desc.dw0 = 0x0000007f;
161
162 for (i = 0; i < num; i++) {
163 desc.data_pointer += roundup(desc.data_len, 4);
164 ep_mem_write(soc_dbase + i * sizeof(desc),
165 (void *)&desc, sizeof(desc));
166 }
167
168 ep_wr32(num, TXIN_HD_ACCUM_ADD);
169 }
170
171 static void ppe_txout_send(struct dc_ep_dev *dev,
172 u8 __iomem *ppe_sb_base, int num)
173 {
174 int i;
175 struct aca_dma_desc_2dw desc;
176
177 memset(&desc, 0, sizeof(desc));
178 desc.status.field.own = 1;
179 desc.status.field.sop = 1;
180 desc.status.field.eop = 1;
181 desc.status.field.data_len = 127;
182 desc.data_pointer = 0x26100000;
183
184 for (i = 0; i < num; i++) {
185 desc.data_pointer += roundup(desc.status.field.data_len, 4);
186 ep_mem_write(ppe_sb_base + i * sizeof(desc),
187 (void *)&desc, sizeof(desc));
188 }
189
190 ep_wr32(num, TXOUT_ACA_ACCUM_ADD);
191 }
192
193 static void ppe_rxout_send(struct dc_ep_dev *dev,
194 u8 __iomem *ppe_sb_base, int num)
195 {
196 int i;
197 struct aca_dma_desc_2dw desc;
198
199 memset(&desc, 0, sizeof(desc));
200 desc.status.field.own = 0;
201 desc.status.field.sop = 1;
202 desc.status.field.eop = 1;
203 desc.status.field.meta_data0 = 0x3;
204 desc.status.field.meta_data1 = 0x7f;
205 desc.status.field.data_len = 127;
206 desc.data_pointer = 0x26200000;
207
208 for (i = 0; i < num; i++) {
209 desc.data_pointer += roundup(desc.status.field.data_len, 4);
210 ep_mem_write(ppe_sb_base + i * sizeof(desc),
211 (void *)&desc, sizeof(desc));
212 }
213
214 ep_wr32(num, RXOUT_ACA_ACCUM_ADD);
215 }
216
217 static void dc_aca_test_init(struct dc_ep_dev *dev, void *soc_base)
218 {
219 umt_txin_send(dev, (u8 __iomem *)soc_base, 8);
220 ppe_txout_send(dev, (TXOUT_PD_DBASE + dev->membase), 8);
221 ppe_rxout_send(dev, (RXOUT_PD_DBASE + dev->membase), 8);
222 }
223
224 static const char *sysclk_str[SYS_CLK_MAX] = {
225 "36MHz",
226 "288MHz",
227 };
228
229 static const char *ppeclk_str[PPE_CLK_MAX] = {
230 "36MHz",
231 "576MHz",
232 "494MHz",
233 "432MHz",
234 "288MHz",
235 };
236
237 #define ACA_PMU_CTRL 0x11C
238 #define ACA_PMU_DMA BIT(2)
239 #define ACA_PMU_EMA BIT(22)
240
241 enum {
242 DMA_ENDIAN_TYPE0 = 0,
243 DMA_ENDIAN_TYPE1, /*!< Byte Swap(B0B1B2B3 => B1B0B3B2) */
244 DMA_ENDIAN_TYPE2, /*!< Word Swap (B0B1B2B3 => B2B3B0B1) */
245 DMA_ENDIAN_TYPE3, /*!< DWord Swap (B0B1B2B3 => B3B2B1B0) */
246 DMA_ENDIAN_MAX,
247 };
248
249 #ifdef CONFIG_CPU_BIG_ENDIAN
250 #define DMA_ENDIAN_DEFAULT DMA_ENDIAN_TYPE3
251 #else
252 #define DMA_ENDIAN_DEFAULT DMA_ENDIAN_TYPE0
253 #endif
254
255 enum {
256 DMA_BURSTL_2DW = 1, /*!< 2 DWORD DMA burst length */
257 DMA_BURSTL_4DW = 2, /*!< 4 DWORD DMA burst length */
258 DMA_BURSTL_8DW = 3, /*!< 8 DWORD DMA burst length */
259 DMA_BURSTL_16DW = 16,
260 };
261
262 #define DMA_BURSTL_DEFAULT DMA_BURSTL_16DW
263
264 #define DMA_TX_PORT_DEFAULT_WEIGHT 1
265 /** Default Port Transmit weight value */
266 #define DMA_TX_CHAN_DEFAULT_WEIGHT 1
267
268 enum {
269 DMA_RX_CH = 0, /*!< Rx channel */
270 DMA_TX_CH = 1, /*!< Tx channel */
271 };
272
273 enum {
274 DMA_PKT_DROP_DISABLE = 0,
275 DMA_PKT_DROP_ENABLE,
276 };
277
278 #ifdef CONFIG_CPU_BIG_ENDIAN
279 /* 2 DWs format descriptor */
280 struct rx_desc_2dw {
281 u32 data_pointer; /* Descriptor data pointer */
282 union {
283 struct {
284 u32 own:1;
285 u32 c:1;
286 u32 sop:1;
287 u32 eop:1;
288 u32 meta:2;
289 u32 byte_offset:3;
290 u32 meta_data:7;
291 u32 data_len:16;
292 } __packed field;
293 u32 word;
294 } __packed status;
295 } __packed __aligned(8);
296
297 struct tx_desc_2dw {
298 u32 data_pointer; /* Descriptor data pointer */
299 union {
300 struct {
301 u32 own:1;
302 u32 c:1;
303 u32 sop:1;
304 u32 eop:1;
305 u32 meta:2;
306 u32 byte_offset:3;
307 u32 meta_data:7;
308 u32 data_len:16;
309 } __packed field;
310 u32 word;
311 } __packed status;
312 } __packed __aligned(8);
313 #else
314 /* 2 DWs format descriptor */
315 struct rx_desc_2dw {
316 u32 data_pointer; /* Descriptor data pointer */
317 union {
318 struct {
319 u32 data_len:16;
320 u32 meta_data:7;
321 u32 byte_offset:3;
322 u32 meta:2;
323 u32 eop:1;
324 u32 sop:1;
325 u32 c:1;
326 u32 own:1;
327 } __packed field;
328 u32 word;
329 } __packed status;
330 } __packed __aligned(8);
331
332 struct tx_desc_2dw {
333 u32 data_pointer; /* Descriptor data pointer */
334 union {
335 struct {
336 u32 data_len:16;
337 u32 meta_data:7;
338 u32 byte_offset:3;
339 u32 meta:2;
340 u32 eop:1;
341 u32 sop:1;
342 u32 c:1;
343 u32 own:1;
344 } __packed field;
345 u32 word;
346 } __packed status;
347 } __packed __aligned(8);
348 #endif
349
350 enum {
351 SOC_TO_EP = 0,
352 EP_TO_SOC,
353 };
354
355 static int dma_pkt_size = 1024;
356 static int dma_mode = SOC_TO_EP;
357 static int dma_burst = 16;
358 static int desc_num = 32;
359
360 module_param(dma_pkt_size, int, 0);
361 MODULE_PARM_DESC(dma_pkt_size, "Single packet length");
362
363 module_param(dma_mode, int, 0);
364 MODULE_PARM_DESC(dma_mode, "mode 0 -- Soc->EP, mode 1-- EP->SoC");
365
366
367 static void dma_ctrl_rst(struct dc_ep_dev *dev)
368 {
369 ep_wr32_mask(ACA_PMU_DMA | ACA_PMU_EMA, 0, ACA_PMU_CTRL);
370
371 udelay(10);
372 ep_wr32_mask(0, 1, DMA_CTRL);
373 udelay(10);
374 ep_wr32(0, DMA_CLC);
375 }
376
377 static void dma_chan_rst(struct dc_ep_dev *dev, int cn)
378 {
379 ep_wr32(cn, DMA_CS);
380 ep_wr32(0x2, DMA_CCTRL);
381 while (ep_rd32(DMA_CCTRL) & 0x01)
382 udelay(10);
383 }
384
385 static void dma_port_cfg(struct dc_ep_dev *dev)
386 {
387 u32 reg = 0;
388
389 reg |= (DMA_TX_PORT_DEFAULT_WEIGHT << 12);
390 reg |= (DMA_ENDIAN_TYPE0 << 10);
391 reg |= (DMA_ENDIAN_TYPE0 << 8);
392 reg |= (DMA_PKT_DROP_DISABLE << 6);
393 reg |= 0x3;
394 ep_wr32(0, DMA_PS);
395 ep_wr32(reg, DMA_PCTRL);
396 }
397
398 static void dma_byte_enable(struct dc_ep_dev *dev, int enable)
399 {
400 if (enable)
401 ep_wr32_mask(0, BIT(9), DMA_CTRL);
402 else
403 ep_wr32_mask(BIT(9), 0, DMA_CTRL);
404 }
405
406 static void dma_tx_ch_cfg(struct dc_ep_dev *dev, int ch, u32 desc_base,
407 u32 desc_phys, dma_addr_t data_base, int desc_num)
408 {
409 int i;
410 struct tx_desc_2dw *tx_desc;
411
412 for (i = 0; i < desc_num; i++) {
413 tx_desc = (struct tx_desc_2dw *)
414 (desc_base + (i * sizeof(*tx_desc)));
415 tx_desc->data_pointer = (((u32)(data_base +
416 (i * dma_pkt_size))) & 0xfffffff8);
417 tx_desc->status.word = 0;
418 tx_desc->status.field.byte_offset = 0;
419 tx_desc->status.field.data_len = dma_pkt_size;
420
421 tx_desc->status.field.sop = 1;
422 tx_desc->status.field.eop = 1;
423 tx_desc->status.field.own = 1;
424 wmb();
425 #if 0
426 pr_info("Tx desc num %d word 0x%08x data pointer 0x%08x\n",
427 i, tx_desc->status.word, tx_desc->data_pointer);
428 #endif
429 }
430 ep_wr32(ch, DMA_CS);
431 ep_wr32(desc_phys, DMA_CDBA);
432 ep_wr32(desc_num, DMA_CDLEN);
433 ep_wr32(0, DMA_CIE);
434 }
435
436 static void dma_rx_ch_cfg(struct dc_ep_dev *dev, int ch, u32 desc_base,
437 u32 desc_phys, dma_addr_t data_base, int desc_num)
438 {
439 int i;
440 struct rx_desc_2dw *rx_desc;
441
442 for (i = 0; i < desc_num; i++) {
443 rx_desc = (struct rx_desc_2dw *)(desc_base
444 + (i * sizeof(*rx_desc)));
445 rx_desc->data_pointer = (((u32)(data_base +
446 (i * dma_pkt_size))) & 0xfffffff8);
447
448 rx_desc->status.word = 0;
449 rx_desc->status.field.sop = 1;
450 rx_desc->status.field.eop = 1;
451 rx_desc->status.field.byte_offset = 0;
452 rx_desc->status.field.data_len = dma_pkt_size;
453 rx_desc->status.field.own = 1; /* DMA own the descriptor */
454 wmb();
455 #if 0
456 pr_info("Rx desc num %d word 0x%08x data pointer 0x%08x\n",
457 i, rx_desc->status.word, rx_desc->data_pointer);
458 #endif
459 }
460
461 ep_wr32(ch, DMA_CS);
462 ep_wr32(desc_phys, DMA_CDBA);
463 ep_wr32(desc_num, DMA_CDLEN);
464 ep_wr32(0, DMA_CIE);
465 }
466
467 static void dma_chan_on(struct dc_ep_dev *dev, u8 cn)
468 {
469 ep_wr32(cn, DMA_CS);
470 ep_wr32_mask(0, BIT(0), DMA_CCTRL);
471 }
472
473 static void dma_chan_off(struct dc_ep_dev *dev, u8 cn)
474 {
475 ep_wr32(cn, DMA_CS);
476 ep_wr32_mask(BIT(0), 0, DMA_CCTRL);
477 udelay(10);
478 }
479
480 #define DEFAULT_TEST_PATTEN 0x12345678
481
482 #define REG32(addr) (*((volatile u32*)(addr)))
483
484 #ifdef CONFIG_CPU_BIG_ENDIAN
485 #define ___swab32(x) ((u32)( \
486 (((u32)(x) & (u32)0x000000ffUL) << 24) | \
487 (((u32)(x) & (u32)0x0000ff00UL) << 8) | \
488 (((u32)(x) & (u32)0x00ff0000UL) >> 8) | \
489 (((u32)(x) & (u32)0xff000000UL) >> 24)))
490 #else
491 #define ___swab32(x) (x)
492 #endif
493
494 static void dma_sdram_preload(void *sdram_data_tx_ptr, void *sdram_data_rx_ptr)
495 {
496 int i;
497 int j;
498
499 u32 testaddr = (u32)sdram_data_tx_ptr;
500
501 for (i = 0; i < desc_num; i++) {
502 for (j = 0; j < dma_pkt_size; j = j + 4) {
503 REG32(testaddr + i * dma_pkt_size + j)
504 = DEFAULT_TEST_PATTEN;
505 }
506 }
507 pr_info("SDR Preload(0x55aa00ff) with data on TX location done\n");
508
509 testaddr = (u32)sdram_data_rx_ptr;
510 pr_info("RX Preload start address:0x%08x\n", (u32)(testaddr));
511
512 for (i = 0; i < desc_num; i++) {
513 for (j = 0; j < roundup(dma_pkt_size,
514 dma_burst << 2); j = j + 4)
515 REG32(testaddr + i * dma_pkt_size + j) = 0xcccccccc;
516 }
517 pr_info("SDR locations for Memcopy RX preset to 0xcccccccc done\n");
518 }
519
520 static void memcopy_data_check(u32 rx_data_addr)
521 {
522 int i, j;
523 u32 read_data;
524
525 for (i = 0; i < desc_num; i++) {
526 for (j = 0; j < dma_pkt_size; j = j + 4) {
527 read_data = REG32(rx_data_addr + i * dma_pkt_size + j);
528 if (read_data != ___swab32(DEFAULT_TEST_PATTEN))
529 pr_info("Memcopy ERROR at addr 0x%08x data 0x%08x\n",
530 (rx_data_addr + j), read_data);
531 }
532 }
533 }
534
535 static u32 plat_throughput_calc(u32 payload, int cycles)
536 {
537 return (u32)((payload * 300) / cycles);
538 }
539
540 #define DMA_CPOLL_CNT_MASK 0xFFF0u
541
542 static void dma_ctrl_global_polling_enable(struct dc_ep_dev *dev, int interval)
543 {
544 u32 reg = 0;
545
546 reg |= (1 << 31);
547 reg |= (interval << 4);
548
549 ep_wr32_mask(DMA_CPOLL_CNT_MASK,
550 reg, DMA_CPOLL);
551 }
552
553 static void dma_controller_cfg(struct dc_ep_dev *dev)
554 {
555 ep_wr32_mask(0, BIT(31), DMA_CTRL);
556 ep_wr32_mask(BIT(30), 0, DMA_CTRL);
557 ep_wr32_mask(0, BIT(1), DMA_CTRL);
558 ep_wr32_mask(0, BIT(13), DMA_CTRL);
559 }
560
561 #define PDRAM_OFFSET 0x200200
562 #define PDRAM_TX_DESC_OFFSET 0x200000
563 #define PDRAM_RX_DESC_OFFSET 0x200100
564 #define ACA_SRAM_OFFSET 0x100000
565 #define PPE_SB_TX_DESC_OFFSET 0x280000
566 #define PPE_SB_RX_DESC_OFFSET 0x281000
567
568 #define PPE_FPI_TX_DESC_OFFSET 0x320000
569 #define PPE_FPI_RX_DESC_OFFSET 0x321000
570
571 static void dma_test(struct dc_ep_dev *dev, int mode, int rcn, int tcn)
572 {
573 u32 loop = 0;
574 void *tx_data;
575 void *rx_data;
576 dma_addr_t tx_data_phys = 0;
577 dma_addr_t rx_data_phys = 0;
578 u32 start, end;
579 u32 cycles;
580 struct rx_desc_2dw *rx_desc;
581 struct tx_desc_2dw *tx_desc;
582 struct tx_desc_2dw *last_tx_desc;
583 struct rx_desc_2dw *last_rx_desc;
584 dma_addr_t tx_desc_phys;
585 dma_addr_t rx_desc_phys;
586 u32 membase = (u32)(dev->membase);
587
588 rx_desc = (struct rx_desc_2dw *)(membase + PDRAM_RX_DESC_OFFSET);
589 rx_desc_phys = (dev->phy_membase + PDRAM_RX_DESC_OFFSET);
590 tx_desc = (struct tx_desc_2dw *)(membase + PDRAM_TX_DESC_OFFSET);
591 tx_desc_phys = (dev->phy_membase + PDRAM_TX_DESC_OFFSET);
592 last_rx_desc = rx_desc + (desc_num - 1);
593 last_tx_desc = tx_desc + (desc_num - 1);
594
595 if (mode == SOC_TO_EP) { /* Read from SoC DDR to local PDBRAM */
596 tx_data = dma_alloc_coherent(NULL,
597 desc_num * dma_pkt_size, &tx_data_phys, GFP_DMA);
598 rx_data_phys = (dma_addr_t)(dev->phy_membase + PDRAM_OFFSET);
599 rx_data = (void *)(membase + PDRAM_OFFSET);
600 } else { /* Write from local PDBRAM to remote DDR */
601 tx_data_phys = (dma_addr_t)(dev->phy_membase + PDRAM_OFFSET);
602 tx_data = (void *)(membase + PDRAM_OFFSET);
603 rx_data = dma_alloc_coherent(NULL, desc_num * dma_pkt_size,
604 &rx_data_phys, GFP_DMA);
605 }
606
607 pr_info("tx_desc_base %p tx_desc_phys 0x%08x tx_data %p tx_data_phys 0x%08x\n",
608 tx_desc, (u32)tx_desc_phys, tx_data, (u32)tx_data_phys);
609
610 pr_info("rx_desc_base %p rx_desc_phys 0x%08x rx_data %p rx_data_phys 0x%08x\n",
611 rx_desc, (u32)rx_desc_phys, rx_data, (u32)rx_data_phys);
612
613 pr_info("dma burst %d desc number %d packet size %d\n",
614 dma_burst, desc_num, dma_pkt_size);
615
616 dma_ctrl_rst(dev);
617 dma_chan_rst(dev, rcn);
618 dma_chan_rst(dev, tcn);
619 dma_port_cfg(dev);
620 dma_controller_cfg(dev);
621 dma_byte_enable(dev, 1);
622
623 dma_ctrl_global_polling_enable(dev, 24);
624
625 dma_sdram_preload(tx_data, rx_data);
626
627 dma_tx_ch_cfg(dev, tcn, (u32)tx_desc, tx_desc_phys,
628 tx_data_phys, desc_num);
629 dma_rx_ch_cfg(dev, rcn, (u32)rx_desc, rx_desc_phys,
630 rx_data_phys, desc_num);
631
632 udelay(5); /* Make sure that RX descriptor prefetched */
633
634 start = get_cycles();
635 dma_chan_on(dev, rcn);
636 dma_chan_on(dev, tcn);
637
638 /* wait till tx chan desc own is 0 */
639 while (last_tx_desc->status.field.own == 1) {
640 loop++;
641 udelay(1);
642 }
643 end = get_cycles();
644 cycles = end - start;
645 pr_info("cylces %d throughput %dMb\n", cycles,
646 plat_throughput_calc(desc_num * dma_pkt_size * 8, cycles));
647 pr_info("loop times %d\n", loop);
648 while (last_rx_desc->status.field.own == 1) {
649 loop++;
650 udelay(1);
651 }
652
653 memcopy_data_check((u32)rx_data);
654 dma_chan_off(dev, rcn);
655 dma_chan_off(dev, tcn);
656 if (mode == SOC_TO_EP) {
657 dma_free_coherent(NULL, desc_num * dma_pkt_size,
658 tx_data, tx_data_phys);
659 } else {
660 dma_free_coherent(NULL, desc_num * dma_pkt_size,
661 rx_data, rx_data_phys);
662 }
663 }
664
665 static int aca_soc_desc_alloc(int dev)
666 {
667 dma_addr_t phy_addr;
668 void *base;
669 u32 size;
670
671 if (dev < 0 || dev > (DC_EP_MAX_PEER + 1))
672 return -EINVAL;
673
674 /* TXIN */
675 size = TXIN_SOC_DES_NUM * TXIN_HD_DES_SIZE * 4;
676 base = dma_alloc_coherent(NULL, size, &phy_addr, GFP_DMA);
677 if (!base)
678 goto txin;
679 aca_soc_hd_desc[dev].txin.base = base;
680 aca_soc_hd_desc[dev].txin.phy_base = phy_addr;
681 aca_soc_hd_desc[dev].txin.size = size;
682 pr_info("txin soc desc base %p phy 0x%08x size 0x%08x\n",
683 base, (u32)phy_addr, size);
684
685 /* TXOUT */
686 size = TXOUT_SOC_DES_NUM * TXOUT_HD_DES_SIZE * 4;
687 base = dma_alloc_coherent(NULL, size, &phy_addr, GFP_DMA);
688 if (!base)
689 goto txout;
690 aca_soc_hd_desc[dev].txout.base = base;
691 aca_soc_hd_desc[dev].txout.phy_base = phy_addr;
692 aca_soc_hd_desc[dev].txout.size = size;
693 pr_info("txout soc desc base %p phy 0x%08x size 0x%08x\n",
694 base, (u32)phy_addr, size);
695 /* RXOUT */
696 size = RXOUT_SOC_DES_NUM * RXOUT_HD_DES_SIZE * 4;
697 base = dma_alloc_coherent(NULL, size, &phy_addr, GFP_DMA);
698 if (!base)
699 goto rxout;
700 aca_soc_hd_desc[dev].rxout.base = base;
701 aca_soc_hd_desc[dev].rxout.phy_base = phy_addr;
702 aca_soc_hd_desc[dev].rxout.size = size;
703 pr_info("rxout soc desc base %p phy 0x%08x size 0x%08x\n",
704 base, (u32)phy_addr, size);
705 return 0;
706 rxout:
707 dma_free_coherent(NULL, aca_soc_hd_desc[dev].txout.size,
708 aca_soc_hd_desc[dev].txout.base,
709 aca_soc_hd_desc[dev].txout.phy_base);
710 txout:
711 dma_free_coherent(NULL, aca_soc_hd_desc[dev].txin.size,
712 aca_soc_hd_desc[dev].txin.base,
713 aca_soc_hd_desc[dev].txin.phy_base);
714 txin:
715 return -ENOMEM;
716 }
717
718 static int aca_soc_desc_free(int dev)
719 {
720 dma_addr_t phy_addr;
721 void *base;
722 size_t size;
723
724 if (dev < 0 || dev > (DC_EP_MAX_PEER + 1))
725 return -EINVAL;
726
727 /* TXIN */
728 base = aca_soc_hd_desc[dev].txin.base;
729 phy_addr = aca_soc_hd_desc[dev].txin.phy_base;
730 size = aca_soc_hd_desc[dev].txin.size;
731 dma_free_coherent(NULL, size, base, phy_addr);
732
733 /* TXOUT */
734 base = aca_soc_hd_desc[dev].txout.base;
735 phy_addr = aca_soc_hd_desc[dev].txout.phy_base;
736 size = aca_soc_hd_desc[dev].txout.size;
737 dma_free_coherent(NULL, size, base, phy_addr);
738
739 /* RXOUT */
740 base = aca_soc_hd_desc[dev].rxout.base;
741 phy_addr = aca_soc_hd_desc[dev].rxout.phy_base;
742 size = aca_soc_hd_desc[dev].rxout.size;
743 dma_free_coherent(NULL, size, base, phy_addr);
744 return 0;
745 }
746
747 static int __init dc_ep_test_init(void)
748 {
749 int i, j;
750 int dev_num;
751 struct dc_ep_dev dev;
752 int func = 0;
753 u32 sysclk = 0;
754 u32 ppeclk = 0;
755
756 if (dc_ep_dev_num_get(&dev_num)) {
757 pr_err("%s failed to get total device number\n", __func__);
758 return -EIO;
759 }
760
761 pr_info("%s: total %d EPs found\n", __func__, dev_num);
762
763 for (i = 0; i < dev_num; i++)
764 aca_soc_desc_alloc(i);
765
766 for (i = 0; i < dev_num; i++) {
767 struct aca_param aca_cfg = {
768 .aca_txin = {
769 .soc_desc_base
770 = aca_soc_hd_desc[i].txin.phy_base,
771 .soc_desc_num = TXIN_SOC_DES_NUM,
772 .pp_buf_desc_num = 32,
773 .pd_desc_base = TXIN_PD_DBASE,
774 .pd_desc_num = TXIN_PD_DES_NUM,
775 .hd_size_in_dw = TXIN_HD_DES_SIZE,
776 .pd_size_in_dw = TXIN_PD_DES_SIZE,
777 .byteswap = 1,
778 },
779 .aca_txout = {
780 .soc_desc_base
781 = aca_soc_hd_desc[i].txout.phy_base,
782 .soc_desc_num = TXOUT_SOC_DES_NUM,
783 .pp_buf_desc_num = 32,
784 .pd_desc_base = TXOUT_PD_DBASE,
785 .pd_desc_num = TXOUT_PD_DES_NUM,
786 .hd_size_in_dw = TXOUT_HD_DES_SIZE,
787 .pd_size_in_dw = TXOUT_PD_DES_SIZE,
788 .byteswap = 1,
789 },
790 .aca_rxout = {
791 .soc_desc_base
792 = aca_soc_hd_desc[i].rxout.phy_base,
793 .soc_desc_num = RXOUT_SOC_DES_NUM,
794 .pp_buf_desc_num = 32,
795 .pd_desc_base = RXOUT_PD_DBASE,
796 .pd_desc_num = RXOUT_PD_DES_NUM,
797 .hd_size_in_dw = RXOUT_HD_DES_SIZE,
798 .pd_size_in_dw = RXOUT_PD_DES_SIZE,
799 .byteswap = 1,
800 },
801 };
802 struct aca_modem_param modem_cfg = {
803 .mdm_txout = {
804 .stat = SB_XBAR_ADDR(__TX_OUT_ACA_ACCUM_STATUS),
805 .pd = SB_XBAR_ADDR(__TX_OUT_QUEUE_PD_BASE_ADDR_OFFSET),
806 .acc_cnt = SB_XBAR_ADDR(__TX_OUT_ACA_ACCUM_COUNT),
807 },
808 .mdm_rxout = {
809 .stat = SB_XBAR_ADDR(__RX_OUT_ACA_ACCUM_STATUS),
810 .pd = SB_XBAR_ADDR(__RX_OUT_QUEUE_PD_BASE_ADDR_OFFSET),
811 .acc_cnt = SB_XBAR_ADDR(__RX_OUT_ACA_ACCUM_COUNT),
812 },
813 .mdm_rxin = {
814 .stat = SB_XBAR_ADDR(__RX_IN_ACA_ACCUM_STATUS),
815 .pd = SB_XBAR_ADDR(__RX_IN_QUEUE_PD_BASE_ADDR_OFFSET),
816 .acc_cnt = SB_XBAR_ADDR(__RX_IN_ACA_ACCUM_COUNT),
817 },
818 };
819 if (dc_ep_dev_info_req(i, DC_EP_INT_PPE, &dev))
820 pr_info("%s failed to get pcie ep %d information\n",
821 __func__, i);
822 pr_info("irq %d\n", dev.irq);
823 pr_info("phyiscal membase 0x%08x virtual membase 0x%p\n",
824 dev.phy_membase, dev.membase);
825 if (dev_num > 1) {
826 for (j = 0; j < dev.peer_num; j++) {
827 pr_info("phyiscal peer membase 0x%08x virtual peer membase 0x%p\n",
828 dev.peer_phy_membase[j], dev.peer_membase[j]);
829 }
830 }
831 /* For module unload perpose */
832 memcpy(&pcie_dev[i], &dev, sizeof(struct dc_ep_dev));
833 dc_ep_ppe_mbox_int_stress_test(&pcie_dev[i]);
834 dev.hw_ops->clk_on(&dev, PMU_CDMA | PMU_EMA | PMU_PPM2);
835 dev.hw_ops->clk_set(&dev, SYS_CLK_288MHZ, PPE_CLK_576MHZ);
836 dev.hw_ops->pinmux_set(&dev, 14, MUX_FUNC_ALT1);
837 dev.hw_ops->pinmux_set(&dev, 15, MUX_FUNC_ALT2);
838 dev.hw_ops->pinmux_get(&dev, 15, &func);
839 pr_info("gpio 15 func %d\n", func);
840 dev.hw_ops->pinmux_set(&dev, 13, MUX_FUNC_GPIO);
841 dev.hw_ops->gpio_dir(&dev, 13, GPIO_DIR_OUT);
842 dev.hw_ops->gpio_set(&dev, 13, 1);
843 dev.hw_ops->gpio_get(&dev, 13, &func);
844 pr_info("gpio 13 value %d\n", func);
845 dev.hw_ops->gpio_pupd_set(&dev, 14, GPIO_PULL_DOWN);
846 dev.hw_ops->gpio_od_set(&dev, 0, 1);
847 dev.hw_ops->gpio_src_set(&dev, 0, GPIO_SLEW_RATE_FAST);
848 dev.hw_ops->gpio_dcc_set(&dev, 0, GPIO_DRV_CUR_8MA);
849 dev.hw_ops->clk_get(&dev, &sysclk, &ppeclk);
850 pr_info("ppe clk %s sys clk %s\n", ppeclk_str[ppeclk],
851 sysclk_str[sysclk]);
852 dev.hw_ops->aca_init(&dev, &aca_cfg, &modem_cfg);
853 dev.hw_ops->aca_start(&dev, ACA_ALL_EN, 1);
854
855 pr_info("ACA test\n");
856 dc_aca_test_init(&dev, aca_soc_hd_desc[i].txin.base);
857
858 pr_info("DMA test\n");
859 dma_pkt_size = 64;
860 dma_test(&dev, dma_mode, 0, 1);
861 #if 0
862 dma_pkt_size = 128;
863 dma_test(&dev, dma_mode, 0, 1);
864 dma_pkt_size = 256;
865 dma_test(&dev, dma_mode, 0, 1);
866 dma_pkt_size = 512;
867 dma_test(&dev, dma_mode, 0, 1);
868 dma_pkt_size = 1024;
869 dma_test(&dev, dma_mode, 0, 1);
870 dma_pkt_size = 2048;
871 dma_test(&dev, dma_mode, 0, 1);
872
873 dma_mode = EP_TO_SOC;
874 dma_pkt_size = 64;
875 dma_test(&dev, dma_mode, 0, 1);
876 dma_pkt_size = 128;
877 dma_test(&dev, dma_mode, 0, 1);
878 dma_pkt_size = 256;
879 dma_test(&dev, dma_mode, 0, 1);
880 dma_pkt_size = 512;
881 dma_test(&dev, dma_mode, 0, 1);
882 dma_pkt_size = 1024;
883 dma_test(&dev, dma_mode, 0, 1);
884 dma_pkt_size = 2048;
885 dma_test(&dev, dma_mode, 0, 1);
886 #endif
887 }
888
889 pr_info("Intel(R) SmartPHY DSL(VRX518) PCIe EP Test Driver - %s\n",
890 ep_test_driver_version);
891 return 0;
892 }
893
894 static void __exit dc_ep_test_exit(void)
895 {
896 int i;
897 int dev_num;
898 u32 func = ACA_ALL_EN;
899 struct dc_ep_dev *dev;
900
901 if (dc_ep_dev_num_get(&dev_num)) {
902 pr_err("%s failed to get total device number\n", __func__);
903 return;
904 }
905 pr_info("%s: total %d EPs found\n", __func__, dev_num);
906 for (i = 0; i < dev_num; i++) {
907 dev = &pcie_dev[i];
908 free_irq(dev->irq, dev);
909 dev->hw_ops->aca_stop(dev, &func, 1);
910 dev->hw_ops->clk_off(dev, PMU_EMA);
911 if (dc_ep_dev_info_release(i)) {
912 pr_info("%s failed to release pcie ep %d information\n",
913 __func__, i);
914 }
915 aca_soc_desc_free(i);
916 }
917 }
918
919 module_init(dc_ep_test_init);
920 module_exit(dc_ep_test_exit);
921
922 MODULE_AUTHOR("Intel Corporation, <Chuanhua.lei@intel.com>");
923 MODULE_DESCRIPTION("Intel(R) SmartPHY (VRX518) PCIe EP/ACA test driver");
924 MODULE_LICENSE("GPL");