1 /*******************************************************************************
3 Intel SmartPHY DSL PCIe Endpoint/ACA Linux Test driver
4 Copyright(c) 2016 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 *******************************************************************************/
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/version.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/interrupt.h>
31 #include <linux/delay.h>
33 #include <net/dc_ep.h>
37 #define DRV_VERSION "1.0.0"
38 static const char ep_test_driver_version
[] = DRV_VERSION
;
39 static struct dc_ep_dev pcie_dev
[DC_EP_MAX_PEER
+ 1];
40 static int ppe_irq_num
;
42 #define ep_wr32(value, reg) (writel(value, dev->membase + reg))
43 #define ep_rd32(reg) (readl(dev->membase + reg))
45 #define ep_wr32_mask(clr, set, reg) \
46 ep_wr32(((ep_rd32(reg) & ~(clr)) | (set)), (reg))
51 size_t size
;/* in bytes */
54 struct aca_hd_desc_cfg
{
55 struct aca_hd_desc txin
;
56 struct aca_hd_desc txout
;
57 struct aca_hd_desc rxout
;
60 static struct aca_hd_desc_cfg aca_soc_hd_desc
[DC_EP_MAX_PEER
+ 1];
62 static void ep_mem_write(u8 __iomem
*dst
, const void *src
, size_t len
)
65 const u32
*src_addr
= src
;
68 pr_info("Warning!!: Copy len is not multiple of 4\n");
72 for (i
= 0; i
< len
; i
++)
73 writel(src_addr
[i
], (dst
+ (i
<< 2)));
76 static irqreturn_t
dc_ep_ppe_intr(int irq
, void *dev_id
)
78 struct dc_ep_dev
*dev
= dev_id
;
81 if (ep_rd32(MBOX_IGU0_ISR
) == 0) {
82 pr_err("Fatal error, dummy interrupt\n");
86 ep_wr32(PPE_MBOX_TEST_BIT
, MBOX_IGU0_ISRC
);
87 ep_rd32(MBOX_IGU0_ISR
);
91 static void dc_ep_ppe_mbox_reg_dump(struct dc_ep_dev
*dev
)
93 pr_info("MBOX_IGU0_ISRS addr %p data 0x%08x\n",
94 dev
->membase
+ MBOX_IGU0_ISRS
,
95 ep_rd32(MBOX_IGU0_ISRS
));
96 pr_info("MBOX_IGU0_ISRC addr %p data 0x%08x\n",
97 dev
->membase
+ MBOX_IGU0_ISRC
,
98 ep_rd32(MBOX_IGU0_ISRC
));
99 pr_info("MBOX_IGU0_ISR addr %p data 0x%08x\n",
100 dev
->membase
+ MBOX_IGU0_ISR
,
101 ep_rd32(MBOX_IGU0_ISR
));
102 pr_info("MBOX_IGU0_IER addr %p data 0x%08x\n",
103 dev
->membase
+ MBOX_IGU0_IER
,
104 ep_rd32(MBOX_IGU0_IER
));
107 #define PPE_INT_TIMEOUT 10
108 static int dc_ep_ppe_mbox_int_stress_test(struct dc_ep_dev
*dev
)
115 ep_wr32(PPE_MBOX_TEST_BIT
, MBOX_IGU0_ISRC
);
117 ret
= request_irq(dev
->irq
, dc_ep_ppe_intr
, 0, "PPE_MSI", dev
);
119 pr_err("%s request irq %d failed\n", __func__
, dev
->irq
);
122 pr_info("PPE test\n");
123 ep_wr32(PPE_MBOX_TEST_BIT
, MBOX_IGU0_IER
);
125 /* Purposely trigger interrupt */
126 for (i
= 0; i
< PPE_MBOX_IRQ_TEST_NUM
; i
++) {
128 while ((ep_rd32(MBOX_IGU0_ISR
) & PPE_MBOX_TEST_BIT
)) {
130 if (j
> PPE_INT_TIMEOUT
)
133 ep_wr32(PPE_MBOX_TEST_BIT
, MBOX_IGU0_ISRS
);
135 ep_rd32(MBOX_IGU0_ISR
);
138 pr_info("irq triggered %d expected %d\n", ppe_irq_num
,
139 PPE_MBOX_IRQ_TEST_NUM
);
140 dc_ep_ppe_mbox_reg_dump(dev
);
145 static void umt_txin_send(struct dc_ep_dev
*dev
,
146 u8 __iomem
*soc_dbase
, int num
)
149 struct aca_dma_desc desc
;
151 memset(&desc
, 0, sizeof(desc
));
158 desc
.data_pointer
= 0x26000000;
160 desc
.dw0
= 0x0000007f;
162 for (i
= 0; i
< num
; i
++) {
163 desc
.data_pointer
+= roundup(desc
.data_len
, 4);
164 ep_mem_write(soc_dbase
+ i
* sizeof(desc
),
165 (void *)&desc
, sizeof(desc
));
168 ep_wr32(num
, TXIN_HD_ACCUM_ADD
);
171 static void ppe_txout_send(struct dc_ep_dev
*dev
,
172 u8 __iomem
*ppe_sb_base
, int num
)
175 struct aca_dma_desc_2dw desc
;
177 memset(&desc
, 0, sizeof(desc
));
178 desc
.status
.field
.own
= 1;
179 desc
.status
.field
.sop
= 1;
180 desc
.status
.field
.eop
= 1;
181 desc
.status
.field
.data_len
= 127;
182 desc
.data_pointer
= 0x26100000;
184 for (i
= 0; i
< num
; i
++) {
185 desc
.data_pointer
+= roundup(desc
.status
.field
.data_len
, 4);
186 ep_mem_write(ppe_sb_base
+ i
* sizeof(desc
),
187 (void *)&desc
, sizeof(desc
));
190 ep_wr32(num
, TXOUT_ACA_ACCUM_ADD
);
193 static void ppe_rxout_send(struct dc_ep_dev
*dev
,
194 u8 __iomem
*ppe_sb_base
, int num
)
197 struct aca_dma_desc_2dw desc
;
199 memset(&desc
, 0, sizeof(desc
));
200 desc
.status
.field
.own
= 0;
201 desc
.status
.field
.sop
= 1;
202 desc
.status
.field
.eop
= 1;
203 desc
.status
.field
.meta_data0
= 0x3;
204 desc
.status
.field
.meta_data1
= 0x7f;
205 desc
.status
.field
.data_len
= 127;
206 desc
.data_pointer
= 0x26200000;
208 for (i
= 0; i
< num
; i
++) {
209 desc
.data_pointer
+= roundup(desc
.status
.field
.data_len
, 4);
210 ep_mem_write(ppe_sb_base
+ i
* sizeof(desc
),
211 (void *)&desc
, sizeof(desc
));
214 ep_wr32(num
, RXOUT_ACA_ACCUM_ADD
);
217 static void dc_aca_test_init(struct dc_ep_dev
*dev
, void *soc_base
)
219 umt_txin_send(dev
, (u8 __iomem
*)soc_base
, 8);
220 ppe_txout_send(dev
, (TXOUT_PD_DBASE
+ dev
->membase
), 8);
221 ppe_rxout_send(dev
, (RXOUT_PD_DBASE
+ dev
->membase
), 8);
224 static const char *sysclk_str
[SYS_CLK_MAX
] = {
229 static const char *ppeclk_str
[PPE_CLK_MAX
] = {
237 #define ACA_PMU_CTRL 0x11C
238 #define ACA_PMU_DMA BIT(2)
239 #define ACA_PMU_EMA BIT(22)
242 DMA_ENDIAN_TYPE0
= 0,
243 DMA_ENDIAN_TYPE1
, /*!< Byte Swap(B0B1B2B3 => B1B0B3B2) */
244 DMA_ENDIAN_TYPE2
, /*!< Word Swap (B0B1B2B3 => B2B3B0B1) */
245 DMA_ENDIAN_TYPE3
, /*!< DWord Swap (B0B1B2B3 => B3B2B1B0) */
249 #ifdef CONFIG_CPU_BIG_ENDIAN
250 #define DMA_ENDIAN_DEFAULT DMA_ENDIAN_TYPE3
252 #define DMA_ENDIAN_DEFAULT DMA_ENDIAN_TYPE0
256 DMA_BURSTL_2DW
= 1, /*!< 2 DWORD DMA burst length */
257 DMA_BURSTL_4DW
= 2, /*!< 4 DWORD DMA burst length */
258 DMA_BURSTL_8DW
= 3, /*!< 8 DWORD DMA burst length */
259 DMA_BURSTL_16DW
= 16,
262 #define DMA_BURSTL_DEFAULT DMA_BURSTL_16DW
264 #define DMA_TX_PORT_DEFAULT_WEIGHT 1
265 /** Default Port Transmit weight value */
266 #define DMA_TX_CHAN_DEFAULT_WEIGHT 1
269 DMA_RX_CH
= 0, /*!< Rx channel */
270 DMA_TX_CH
= 1, /*!< Tx channel */
274 DMA_PKT_DROP_DISABLE
= 0,
278 #ifdef CONFIG_CPU_BIG_ENDIAN
279 /* 2 DWs format descriptor */
281 u32 data_pointer
; /* Descriptor data pointer */
295 } __packed
__aligned(8);
298 u32 data_pointer
; /* Descriptor data pointer */
312 } __packed
__aligned(8);
314 /* 2 DWs format descriptor */
316 u32 data_pointer
; /* Descriptor data pointer */
330 } __packed
__aligned(8);
333 u32 data_pointer
; /* Descriptor data pointer */
347 } __packed
__aligned(8);
355 static int dma_pkt_size
= 1024;
356 static int dma_mode
= SOC_TO_EP
;
357 static int dma_burst
= 16;
358 static int desc_num
= 32;
360 module_param(dma_pkt_size
, int, 0);
361 MODULE_PARM_DESC(dma_pkt_size
, "Single packet length");
363 module_param(dma_mode
, int, 0);
364 MODULE_PARM_DESC(dma_mode
, "mode 0 -- Soc->EP, mode 1-- EP->SoC");
367 static void dma_ctrl_rst(struct dc_ep_dev
*dev
)
369 ep_wr32_mask(ACA_PMU_DMA
| ACA_PMU_EMA
, 0, ACA_PMU_CTRL
);
372 ep_wr32_mask(0, 1, DMA_CTRL
);
377 static void dma_chan_rst(struct dc_ep_dev
*dev
, int cn
)
380 ep_wr32(0x2, DMA_CCTRL
);
381 while (ep_rd32(DMA_CCTRL
) & 0x01)
385 static void dma_port_cfg(struct dc_ep_dev
*dev
)
389 reg
|= (DMA_TX_PORT_DEFAULT_WEIGHT
<< 12);
390 reg
|= (DMA_ENDIAN_TYPE0
<< 10);
391 reg
|= (DMA_ENDIAN_TYPE0
<< 8);
392 reg
|= (DMA_PKT_DROP_DISABLE
<< 6);
395 ep_wr32(reg
, DMA_PCTRL
);
398 static void dma_byte_enable(struct dc_ep_dev
*dev
, int enable
)
401 ep_wr32_mask(0, BIT(9), DMA_CTRL
);
403 ep_wr32_mask(BIT(9), 0, DMA_CTRL
);
406 static void dma_tx_ch_cfg(struct dc_ep_dev
*dev
, int ch
, u32 desc_base
,
407 u32 desc_phys
, dma_addr_t data_base
, int desc_num
)
410 struct tx_desc_2dw
*tx_desc
;
412 for (i
= 0; i
< desc_num
; i
++) {
413 tx_desc
= (struct tx_desc_2dw
*)
414 (desc_base
+ (i
* sizeof(*tx_desc
)));
415 tx_desc
->data_pointer
= (((u32
)(data_base
+
416 (i
* dma_pkt_size
))) & 0xfffffff8);
417 tx_desc
->status
.word
= 0;
418 tx_desc
->status
.field
.byte_offset
= 0;
419 tx_desc
->status
.field
.data_len
= dma_pkt_size
;
421 tx_desc
->status
.field
.sop
= 1;
422 tx_desc
->status
.field
.eop
= 1;
423 tx_desc
->status
.field
.own
= 1;
426 pr_info("Tx desc num %d word 0x%08x data pointer 0x%08x\n",
427 i
, tx_desc
->status
.word
, tx_desc
->data_pointer
);
431 ep_wr32(desc_phys
, DMA_CDBA
);
432 ep_wr32(desc_num
, DMA_CDLEN
);
436 static void dma_rx_ch_cfg(struct dc_ep_dev
*dev
, int ch
, u32 desc_base
,
437 u32 desc_phys
, dma_addr_t data_base
, int desc_num
)
440 struct rx_desc_2dw
*rx_desc
;
442 for (i
= 0; i
< desc_num
; i
++) {
443 rx_desc
= (struct rx_desc_2dw
*)(desc_base
444 + (i
* sizeof(*rx_desc
)));
445 rx_desc
->data_pointer
= (((u32
)(data_base
+
446 (i
* dma_pkt_size
))) & 0xfffffff8);
448 rx_desc
->status
.word
= 0;
449 rx_desc
->status
.field
.sop
= 1;
450 rx_desc
->status
.field
.eop
= 1;
451 rx_desc
->status
.field
.byte_offset
= 0;
452 rx_desc
->status
.field
.data_len
= dma_pkt_size
;
453 rx_desc
->status
.field
.own
= 1; /* DMA own the descriptor */
456 pr_info("Rx desc num %d word 0x%08x data pointer 0x%08x\n",
457 i
, rx_desc
->status
.word
, rx_desc
->data_pointer
);
462 ep_wr32(desc_phys
, DMA_CDBA
);
463 ep_wr32(desc_num
, DMA_CDLEN
);
467 static void dma_chan_on(struct dc_ep_dev
*dev
, u8 cn
)
470 ep_wr32_mask(0, BIT(0), DMA_CCTRL
);
473 static void dma_chan_off(struct dc_ep_dev
*dev
, u8 cn
)
476 ep_wr32_mask(BIT(0), 0, DMA_CCTRL
);
480 #define DEFAULT_TEST_PATTEN 0x12345678
482 #define REG32(addr) (*((volatile u32*)(addr)))
484 #ifdef CONFIG_CPU_BIG_ENDIAN
485 #define ___swab32(x) ((u32)( \
486 (((u32)(x) & (u32)0x000000ffUL) << 24) | \
487 (((u32)(x) & (u32)0x0000ff00UL) << 8) | \
488 (((u32)(x) & (u32)0x00ff0000UL) >> 8) | \
489 (((u32)(x) & (u32)0xff000000UL) >> 24)))
491 #define ___swab32(x) (x)
494 static void dma_sdram_preload(void *sdram_data_tx_ptr
, void *sdram_data_rx_ptr
)
499 u32 testaddr
= (u32
)sdram_data_tx_ptr
;
501 for (i
= 0; i
< desc_num
; i
++) {
502 for (j
= 0; j
< dma_pkt_size
; j
= j
+ 4) {
503 REG32(testaddr
+ i
* dma_pkt_size
+ j
)
504 = DEFAULT_TEST_PATTEN
;
507 pr_info("SDR Preload(0x55aa00ff) with data on TX location done\n");
509 testaddr
= (u32
)sdram_data_rx_ptr
;
510 pr_info("RX Preload start address:0x%08x\n", (u32
)(testaddr
));
512 for (i
= 0; i
< desc_num
; i
++) {
513 for (j
= 0; j
< roundup(dma_pkt_size
,
514 dma_burst
<< 2); j
= j
+ 4)
515 REG32(testaddr
+ i
* dma_pkt_size
+ j
) = 0xcccccccc;
517 pr_info("SDR locations for Memcopy RX preset to 0xcccccccc done\n");
520 static void memcopy_data_check(u32 rx_data_addr
)
525 for (i
= 0; i
< desc_num
; i
++) {
526 for (j
= 0; j
< dma_pkt_size
; j
= j
+ 4) {
527 read_data
= REG32(rx_data_addr
+ i
* dma_pkt_size
+ j
);
528 if (read_data
!= ___swab32(DEFAULT_TEST_PATTEN
))
529 pr_info("Memcopy ERROR at addr 0x%08x data 0x%08x\n",
530 (rx_data_addr
+ j
), read_data
);
535 static u32
plat_throughput_calc(u32 payload
, int cycles
)
537 return (u32
)((payload
* 300) / cycles
);
540 #define DMA_CPOLL_CNT_MASK 0xFFF0u
542 static void dma_ctrl_global_polling_enable(struct dc_ep_dev
*dev
, int interval
)
547 reg
|= (interval
<< 4);
549 ep_wr32_mask(DMA_CPOLL_CNT_MASK
,
553 static void dma_controller_cfg(struct dc_ep_dev
*dev
)
555 ep_wr32_mask(0, BIT(31), DMA_CTRL
);
556 ep_wr32_mask(BIT(30), 0, DMA_CTRL
);
557 ep_wr32_mask(0, BIT(1), DMA_CTRL
);
558 ep_wr32_mask(0, BIT(13), DMA_CTRL
);
561 #define PDRAM_OFFSET 0x200200
562 #define PDRAM_TX_DESC_OFFSET 0x200000
563 #define PDRAM_RX_DESC_OFFSET 0x200100
564 #define ACA_SRAM_OFFSET 0x100000
565 #define PPE_SB_TX_DESC_OFFSET 0x280000
566 #define PPE_SB_RX_DESC_OFFSET 0x281000
568 #define PPE_FPI_TX_DESC_OFFSET 0x320000
569 #define PPE_FPI_RX_DESC_OFFSET 0x321000
571 static void dma_test(struct dc_ep_dev
*dev
, int mode
, int rcn
, int tcn
)
576 dma_addr_t tx_data_phys
= 0;
577 dma_addr_t rx_data_phys
= 0;
580 struct rx_desc_2dw
*rx_desc
;
581 struct tx_desc_2dw
*tx_desc
;
582 struct tx_desc_2dw
*last_tx_desc
;
583 struct rx_desc_2dw
*last_rx_desc
;
584 dma_addr_t tx_desc_phys
;
585 dma_addr_t rx_desc_phys
;
586 u32 membase
= (u32
)(dev
->membase
);
588 rx_desc
= (struct rx_desc_2dw
*)(membase
+ PDRAM_RX_DESC_OFFSET
);
589 rx_desc_phys
= (dev
->phy_membase
+ PDRAM_RX_DESC_OFFSET
);
590 tx_desc
= (struct tx_desc_2dw
*)(membase
+ PDRAM_TX_DESC_OFFSET
);
591 tx_desc_phys
= (dev
->phy_membase
+ PDRAM_TX_DESC_OFFSET
);
592 last_rx_desc
= rx_desc
+ (desc_num
- 1);
593 last_tx_desc
= tx_desc
+ (desc_num
- 1);
595 if (mode
== SOC_TO_EP
) { /* Read from SoC DDR to local PDBRAM */
596 tx_data
= dma_alloc_coherent(NULL
,
597 desc_num
* dma_pkt_size
, &tx_data_phys
, GFP_DMA
);
598 rx_data_phys
= (dma_addr_t
)(dev
->phy_membase
+ PDRAM_OFFSET
);
599 rx_data
= (void *)(membase
+ PDRAM_OFFSET
);
600 } else { /* Write from local PDBRAM to remote DDR */
601 tx_data_phys
= (dma_addr_t
)(dev
->phy_membase
+ PDRAM_OFFSET
);
602 tx_data
= (void *)(membase
+ PDRAM_OFFSET
);
603 rx_data
= dma_alloc_coherent(NULL
, desc_num
* dma_pkt_size
,
604 &rx_data_phys
, GFP_DMA
);
607 pr_info("tx_desc_base %p tx_desc_phys 0x%08x tx_data %p tx_data_phys 0x%08x\n",
608 tx_desc
, (u32
)tx_desc_phys
, tx_data
, (u32
)tx_data_phys
);
610 pr_info("rx_desc_base %p rx_desc_phys 0x%08x rx_data %p rx_data_phys 0x%08x\n",
611 rx_desc
, (u32
)rx_desc_phys
, rx_data
, (u32
)rx_data_phys
);
613 pr_info("dma burst %d desc number %d packet size %d\n",
614 dma_burst
, desc_num
, dma_pkt_size
);
617 dma_chan_rst(dev
, rcn
);
618 dma_chan_rst(dev
, tcn
);
620 dma_controller_cfg(dev
);
621 dma_byte_enable(dev
, 1);
623 dma_ctrl_global_polling_enable(dev
, 24);
625 dma_sdram_preload(tx_data
, rx_data
);
627 dma_tx_ch_cfg(dev
, tcn
, (u32
)tx_desc
, tx_desc_phys
,
628 tx_data_phys
, desc_num
);
629 dma_rx_ch_cfg(dev
, rcn
, (u32
)rx_desc
, rx_desc_phys
,
630 rx_data_phys
, desc_num
);
632 udelay(5); /* Make sure that RX descriptor prefetched */
634 start
= get_cycles();
635 dma_chan_on(dev
, rcn
);
636 dma_chan_on(dev
, tcn
);
638 /* wait till tx chan desc own is 0 */
639 while (last_tx_desc
->status
.field
.own
== 1) {
644 cycles
= end
- start
;
645 pr_info("cylces %d throughput %dMb\n", cycles
,
646 plat_throughput_calc(desc_num
* dma_pkt_size
* 8, cycles
));
647 pr_info("loop times %d\n", loop
);
648 while (last_rx_desc
->status
.field
.own
== 1) {
653 memcopy_data_check((u32
)rx_data
);
654 dma_chan_off(dev
, rcn
);
655 dma_chan_off(dev
, tcn
);
656 if (mode
== SOC_TO_EP
) {
657 dma_free_coherent(NULL
, desc_num
* dma_pkt_size
,
658 tx_data
, tx_data_phys
);
660 dma_free_coherent(NULL
, desc_num
* dma_pkt_size
,
661 rx_data
, rx_data_phys
);
665 static int aca_soc_desc_alloc(int dev
)
671 if (dev
< 0 || dev
> (DC_EP_MAX_PEER
+ 1))
675 size
= TXIN_SOC_DES_NUM
* TXIN_HD_DES_SIZE
* 4;
676 base
= dma_alloc_coherent(NULL
, size
, &phy_addr
, GFP_DMA
);
679 aca_soc_hd_desc
[dev
].txin
.base
= base
;
680 aca_soc_hd_desc
[dev
].txin
.phy_base
= phy_addr
;
681 aca_soc_hd_desc
[dev
].txin
.size
= size
;
682 pr_info("txin soc desc base %p phy 0x%08x size 0x%08x\n",
683 base
, (u32
)phy_addr
, size
);
686 size
= TXOUT_SOC_DES_NUM
* TXOUT_HD_DES_SIZE
* 4;
687 base
= dma_alloc_coherent(NULL
, size
, &phy_addr
, GFP_DMA
);
690 aca_soc_hd_desc
[dev
].txout
.base
= base
;
691 aca_soc_hd_desc
[dev
].txout
.phy_base
= phy_addr
;
692 aca_soc_hd_desc
[dev
].txout
.size
= size
;
693 pr_info("txout soc desc base %p phy 0x%08x size 0x%08x\n",
694 base
, (u32
)phy_addr
, size
);
696 size
= RXOUT_SOC_DES_NUM
* RXOUT_HD_DES_SIZE
* 4;
697 base
= dma_alloc_coherent(NULL
, size
, &phy_addr
, GFP_DMA
);
700 aca_soc_hd_desc
[dev
].rxout
.base
= base
;
701 aca_soc_hd_desc
[dev
].rxout
.phy_base
= phy_addr
;
702 aca_soc_hd_desc
[dev
].rxout
.size
= size
;
703 pr_info("rxout soc desc base %p phy 0x%08x size 0x%08x\n",
704 base
, (u32
)phy_addr
, size
);
707 dma_free_coherent(NULL
, aca_soc_hd_desc
[dev
].txout
.size
,
708 aca_soc_hd_desc
[dev
].txout
.base
,
709 aca_soc_hd_desc
[dev
].txout
.phy_base
);
711 dma_free_coherent(NULL
, aca_soc_hd_desc
[dev
].txin
.size
,
712 aca_soc_hd_desc
[dev
].txin
.base
,
713 aca_soc_hd_desc
[dev
].txin
.phy_base
);
718 static int aca_soc_desc_free(int dev
)
724 if (dev
< 0 || dev
> (DC_EP_MAX_PEER
+ 1))
728 base
= aca_soc_hd_desc
[dev
].txin
.base
;
729 phy_addr
= aca_soc_hd_desc
[dev
].txin
.phy_base
;
730 size
= aca_soc_hd_desc
[dev
].txin
.size
;
731 dma_free_coherent(NULL
, size
, base
, phy_addr
);
734 base
= aca_soc_hd_desc
[dev
].txout
.base
;
735 phy_addr
= aca_soc_hd_desc
[dev
].txout
.phy_base
;
736 size
= aca_soc_hd_desc
[dev
].txout
.size
;
737 dma_free_coherent(NULL
, size
, base
, phy_addr
);
740 base
= aca_soc_hd_desc
[dev
].rxout
.base
;
741 phy_addr
= aca_soc_hd_desc
[dev
].rxout
.phy_base
;
742 size
= aca_soc_hd_desc
[dev
].rxout
.size
;
743 dma_free_coherent(NULL
, size
, base
, phy_addr
);
747 static int __init
dc_ep_test_init(void)
751 struct dc_ep_dev dev
;
756 if (dc_ep_dev_num_get(&dev_num
)) {
757 pr_err("%s failed to get total device number\n", __func__
);
761 pr_info("%s: total %d EPs found\n", __func__
, dev_num
);
763 for (i
= 0; i
< dev_num
; i
++)
764 aca_soc_desc_alloc(i
);
766 for (i
= 0; i
< dev_num
; i
++) {
767 struct aca_param aca_cfg
= {
770 = aca_soc_hd_desc
[i
].txin
.phy_base
,
771 .soc_desc_num
= TXIN_SOC_DES_NUM
,
772 .pp_buf_desc_num
= 32,
773 .pd_desc_base
= TXIN_PD_DBASE
,
774 .pd_desc_num
= TXIN_PD_DES_NUM
,
775 .hd_size_in_dw
= TXIN_HD_DES_SIZE
,
776 .pd_size_in_dw
= TXIN_PD_DES_SIZE
,
781 = aca_soc_hd_desc
[i
].txout
.phy_base
,
782 .soc_desc_num
= TXOUT_SOC_DES_NUM
,
783 .pp_buf_desc_num
= 32,
784 .pd_desc_base
= TXOUT_PD_DBASE
,
785 .pd_desc_num
= TXOUT_PD_DES_NUM
,
786 .hd_size_in_dw
= TXOUT_HD_DES_SIZE
,
787 .pd_size_in_dw
= TXOUT_PD_DES_SIZE
,
792 = aca_soc_hd_desc
[i
].rxout
.phy_base
,
793 .soc_desc_num
= RXOUT_SOC_DES_NUM
,
794 .pp_buf_desc_num
= 32,
795 .pd_desc_base
= RXOUT_PD_DBASE
,
796 .pd_desc_num
= RXOUT_PD_DES_NUM
,
797 .hd_size_in_dw
= RXOUT_HD_DES_SIZE
,
798 .pd_size_in_dw
= RXOUT_PD_DES_SIZE
,
802 struct aca_modem_param modem_cfg
= {
804 .stat
= SB_XBAR_ADDR(__TX_OUT_ACA_ACCUM_STATUS
),
805 .pd
= SB_XBAR_ADDR(__TX_OUT_QUEUE_PD_BASE_ADDR_OFFSET
),
806 .acc_cnt
= SB_XBAR_ADDR(__TX_OUT_ACA_ACCUM_COUNT
),
809 .stat
= SB_XBAR_ADDR(__RX_OUT_ACA_ACCUM_STATUS
),
810 .pd
= SB_XBAR_ADDR(__RX_OUT_QUEUE_PD_BASE_ADDR_OFFSET
),
811 .acc_cnt
= SB_XBAR_ADDR(__RX_OUT_ACA_ACCUM_COUNT
),
814 .stat
= SB_XBAR_ADDR(__RX_IN_ACA_ACCUM_STATUS
),
815 .pd
= SB_XBAR_ADDR(__RX_IN_QUEUE_PD_BASE_ADDR_OFFSET
),
816 .acc_cnt
= SB_XBAR_ADDR(__RX_IN_ACA_ACCUM_COUNT
),
819 if (dc_ep_dev_info_req(i
, DC_EP_INT_PPE
, &dev
))
820 pr_info("%s failed to get pcie ep %d information\n",
822 pr_info("irq %d\n", dev
.irq
);
823 pr_info("phyiscal membase 0x%08x virtual membase 0x%p\n",
824 dev
.phy_membase
, dev
.membase
);
826 for (j
= 0; j
< dev
.peer_num
; j
++) {
827 pr_info("phyiscal peer membase 0x%08x virtual peer membase 0x%p\n",
828 dev
.peer_phy_membase
[j
], dev
.peer_membase
[j
]);
831 /* For module unload perpose */
832 memcpy(&pcie_dev
[i
], &dev
, sizeof(struct dc_ep_dev
));
833 dc_ep_ppe_mbox_int_stress_test(&pcie_dev
[i
]);
834 dev
.hw_ops
->clk_on(&dev
, PMU_CDMA
| PMU_EMA
| PMU_PPM2
);
835 dev
.hw_ops
->clk_set(&dev
, SYS_CLK_288MHZ
, PPE_CLK_576MHZ
);
836 dev
.hw_ops
->pinmux_set(&dev
, 14, MUX_FUNC_ALT1
);
837 dev
.hw_ops
->pinmux_set(&dev
, 15, MUX_FUNC_ALT2
);
838 dev
.hw_ops
->pinmux_get(&dev
, 15, &func
);
839 pr_info("gpio 15 func %d\n", func
);
840 dev
.hw_ops
->pinmux_set(&dev
, 13, MUX_FUNC_GPIO
);
841 dev
.hw_ops
->gpio_dir(&dev
, 13, GPIO_DIR_OUT
);
842 dev
.hw_ops
->gpio_set(&dev
, 13, 1);
843 dev
.hw_ops
->gpio_get(&dev
, 13, &func
);
844 pr_info("gpio 13 value %d\n", func
);
845 dev
.hw_ops
->gpio_pupd_set(&dev
, 14, GPIO_PULL_DOWN
);
846 dev
.hw_ops
->gpio_od_set(&dev
, 0, 1);
847 dev
.hw_ops
->gpio_src_set(&dev
, 0, GPIO_SLEW_RATE_FAST
);
848 dev
.hw_ops
->gpio_dcc_set(&dev
, 0, GPIO_DRV_CUR_8MA
);
849 dev
.hw_ops
->clk_get(&dev
, &sysclk
, &ppeclk
);
850 pr_info("ppe clk %s sys clk %s\n", ppeclk_str
[ppeclk
],
852 dev
.hw_ops
->aca_init(&dev
, &aca_cfg
, &modem_cfg
);
853 dev
.hw_ops
->aca_start(&dev
, ACA_ALL_EN
, 1);
855 pr_info("ACA test\n");
856 dc_aca_test_init(&dev
, aca_soc_hd_desc
[i
].txin
.base
);
858 pr_info("DMA test\n");
860 dma_test(&dev
, dma_mode
, 0, 1);
863 dma_test(&dev
, dma_mode
, 0, 1);
865 dma_test(&dev
, dma_mode
, 0, 1);
867 dma_test(&dev
, dma_mode
, 0, 1);
869 dma_test(&dev
, dma_mode
, 0, 1);
871 dma_test(&dev
, dma_mode
, 0, 1);
873 dma_mode
= EP_TO_SOC
;
875 dma_test(&dev
, dma_mode
, 0, 1);
877 dma_test(&dev
, dma_mode
, 0, 1);
879 dma_test(&dev
, dma_mode
, 0, 1);
881 dma_test(&dev
, dma_mode
, 0, 1);
883 dma_test(&dev
, dma_mode
, 0, 1);
885 dma_test(&dev
, dma_mode
, 0, 1);
889 pr_info("Intel(R) SmartPHY DSL(VRX518) PCIe EP Test Driver - %s\n",
890 ep_test_driver_version
);
894 static void __exit
dc_ep_test_exit(void)
898 u32 func
= ACA_ALL_EN
;
899 struct dc_ep_dev
*dev
;
901 if (dc_ep_dev_num_get(&dev_num
)) {
902 pr_err("%s failed to get total device number\n", __func__
);
905 pr_info("%s: total %d EPs found\n", __func__
, dev_num
);
906 for (i
= 0; i
< dev_num
; i
++) {
908 free_irq(dev
->irq
, dev
);
909 dev
->hw_ops
->aca_stop(dev
, &func
, 1);
910 dev
->hw_ops
->clk_off(dev
, PMU_EMA
);
911 if (dc_ep_dev_info_release(i
)) {
912 pr_info("%s failed to release pcie ep %d information\n",
915 aca_soc_desc_free(i
);
919 module_init(dc_ep_test_init
);
920 module_exit(dc_ep_test_exit
);
922 MODULE_AUTHOR("Intel Corporation, <Chuanhua.lei@intel.com>");
923 MODULE_DESCRIPTION("Intel(R) SmartPHY (VRX518) PCIe EP/ACA test driver");
924 MODULE_LICENSE("GPL");