1 Index: linux-4.9.20/drivers/pci/host/pcie-qcom.c
2 ===================================================================
3 --- linux-4.9.20.orig/drivers/pci/host/pcie-qcom.c
4 +++ linux-4.9.20/drivers/pci/host/pcie-qcom.c
7 #include "pcie-designware.h"
10 -#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
11 -#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
13 -#define PCIE20_PLR_IATU_VIEWPORT 0x900
14 -#define PCIE20_PLR_IATU_REGION_OUTBOUND (0x0 << 31)
15 -#define PCIE20_PLR_IATU_REGION_INDEX(x) (x << 0)
17 -#define PCIE20_PLR_IATU_CTRL1 0x904
18 -#define PCIE20_PLR_IATU_TYPE_CFG0 (0x4 << 0)
19 -#define PCIE20_PLR_IATU_TYPE_MEM (0x0 << 0)
21 -#define PCIE20_PLR_IATU_CTRL2 0x908
22 -#define PCIE20_PLR_IATU_ENABLE BIT(31)
24 -#define PCIE20_PLR_IATU_LBAR 0x90C
25 -#define PCIE20_PLR_IATU_UBAR 0x910
26 -#define PCIE20_PLR_IATU_LAR 0x914
27 -#define PCIE20_PLR_IATU_LTAR 0x918
28 -#define PCIE20_PLR_IATU_UTAR 0x91c
30 -#define MSM_PCIE_DEV_CFG_ADDR 0x01000000
33 -#define PCIE20_PARF_PCS_DEEMPH 0x34
34 -#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) (x << 16)
35 -#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) (x << 8)
36 -#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) (x << 0)
38 -#define PCIE20_PARF_PCS_SWING 0x38
39 -#define PCS_SWING_TX_SWING_FULL(x) (x << 8)
40 -#define PCS_SWING_TX_SWING_LOW(x) (x << 0)
42 +#define PCIE20_PARF_SYS_CTRL 0x00
43 #define PCIE20_PARF_PHY_CTRL 0x40
44 -#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK (0x1f << 16)
45 -#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) (x << 16)
47 #define PCIE20_PARF_PHY_REFCLK 0x4C
48 -#define REF_SSP_EN BIT(16)
49 -#define REF_USE_PAD BIT(12)
51 -#define PCIE20_PARF_CONFIG_BITS 0x50
52 -#define PHY_RX0_EQ(x) (x << 24)
54 #define PCIE20_PARF_DBI_BASE_ADDR 0x168
55 -#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c
56 +#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
57 +#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
58 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
59 +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
60 +#define PCIE20_PARF_LTSSM 0x1B0
61 +#define PCIE20_PARF_SID_OFFSET 0x234
62 +#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
64 #define PCIE20_ELBI_SYS_CTRL 0x04
65 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
66 @@ -95,18 +59,14 @@ struct qcom_pcie_resources_v0 {
67 struct clk *iface_clk;
70 - struct clk *aux_clk;
71 - struct clk *ref_clk;
72 struct reset_control *pci_reset;
73 struct reset_control *axi_reset;
74 struct reset_control *ahb_reset;
75 struct reset_control *por_reset;
76 struct reset_control *phy_reset;
77 - struct reset_control *ext_reset;
78 struct regulator *vdda;
79 struct regulator *vdda_phy;
80 struct regulator *vdda_refclk;
81 - uint8_t phy_tx0_term_offset;
84 struct qcom_pcie_resources_v1 {
85 @@ -118,9 +78,40 @@ struct qcom_pcie_resources_v1 {
86 struct regulator *vdda;
89 +struct qcom_pcie_resources_v2 {
90 + struct clk *aux_clk;
91 + struct clk *master_clk;
92 + struct clk *slave_clk;
93 + struct clk *cfg_clk;
94 + struct clk *pipe_clk;
97 +struct qcom_pcie_resources_v3 {
98 + struct clk *ahb_clk;
99 + struct clk *axi_m_clk;
100 + struct clk *axi_s_clk;
101 + struct reset_control *axi_m_reset;
102 + struct reset_control *axi_s_reset;
103 + struct reset_control *pipe_reset;
104 + struct reset_control *axi_m_vmid_reset;
105 + struct reset_control *axi_s_xpu_reset;
106 + struct reset_control *parf_reset;
107 + struct reset_control *phy_reset;
108 + struct reset_control *axi_m_sticky_reset;
109 + struct reset_control *pipe_sticky_reset;
110 + struct reset_control *pwr_reset;
111 + struct reset_control *ahb_reset;
112 + struct reset_control *phy_ahb_reset;
113 + struct regulator *vdda;
114 + struct regulator *vdda_phy;
115 + struct regulator *vdda_refclk;
118 union qcom_pcie_resources {
119 struct qcom_pcie_resources_v0 v0;
120 struct qcom_pcie_resources_v1 v1;
121 + struct qcom_pcie_resources_v2 v2;
122 + struct qcom_pcie_resources_v3 v3;
126 @@ -128,8 +119,9 @@ struct qcom_pcie;
127 struct qcom_pcie_ops {
128 int (*get_resources)(struct qcom_pcie *pcie);
129 int (*init)(struct qcom_pcie *pcie);
130 - void (*configure)(struct qcom_pcie *pcie);
131 + int (*post_init)(struct qcom_pcie *pcie);
132 void (*deinit)(struct qcom_pcie *pcie);
133 + void (*ltssm_enable)(struct qcom_pcie *pcie);
137 @@ -163,17 +155,35 @@ static irqreturn_t qcom_pcie_msi_irq_han
138 return dw_handle_msi_irq(pp);
141 -static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
142 +static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
146 - if (dw_pcie_link_up(&pcie->pp))
149 /* enable link training */
150 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
151 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
152 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
155 +static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
159 + /* enable link training */
160 + val = readl(pcie->parf + PCIE20_PARF_LTSSM);
162 + writel(val, pcie->parf + PCIE20_PARF_LTSSM);
165 +static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
168 + if (dw_pcie_link_up(&pcie->pp))
171 + /* Enable Link Training state machine */
172 + if (pcie->ops->ltssm_enable)
173 + pcie->ops->ltssm_enable(pcie);
175 return dw_pcie_wait_for_link(&pcie->pp);
177 @@ -207,14 +217,6 @@ static int qcom_pcie_get_resources_v0(st
178 if (IS_ERR(res->phy_clk))
179 return PTR_ERR(res->phy_clk);
181 - res->aux_clk = devm_clk_get(dev, "aux");
182 - if (IS_ERR(res->aux_clk))
183 - return PTR_ERR(res->aux_clk);
185 - res->ref_clk = devm_clk_get(dev, "ref");
186 - if (IS_ERR(res->ref_clk))
187 - return PTR_ERR(res->ref_clk);
189 res->pci_reset = devm_reset_control_get(dev, "pci");
190 if (IS_ERR(res->pci_reset))
191 return PTR_ERR(res->pci_reset);
192 @@ -235,14 +237,6 @@ static int qcom_pcie_get_resources_v0(st
193 if (IS_ERR(res->phy_reset))
194 return PTR_ERR(res->phy_reset);
196 - res->ext_reset = devm_reset_control_get(dev, "ext");
197 - if (IS_ERR(res->ext_reset))
198 - return PTR_ERR(res->ext_reset);
200 - if (of_property_read_u8(dev->of_node, "phy-tx0-term-offset",
201 - &res->phy_tx0_term_offset))
202 - res->phy_tx0_term_offset = 0;
207 @@ -286,69 +280,15 @@ static void qcom_pcie_deinit_v0(struct q
208 reset_control_assert(res->axi_reset);
209 reset_control_assert(res->ahb_reset);
210 reset_control_assert(res->por_reset);
211 - reset_control_assert(res->phy_reset);
212 - reset_control_assert(res->ext_reset);
213 + reset_control_assert(res->pci_reset);
214 clk_disable_unprepare(res->iface_clk);
215 clk_disable_unprepare(res->core_clk);
216 clk_disable_unprepare(res->phy_clk);
217 - clk_disable_unprepare(res->aux_clk);
218 - clk_disable_unprepare(res->ref_clk);
219 regulator_disable(res->vdda);
220 regulator_disable(res->vdda_phy);
221 regulator_disable(res->vdda_refclk);
224 -static void qcom_pcie_prog_viewport_cfg0(struct qcom_pcie *pcie, u32 busdev)
226 - struct pcie_port *pp = &pcie->pp;
229 - * program and enable address translation region 0 (device config
230 - * address space); region type config;
231 - * axi config address range to device config address range
233 - writel(PCIE20_PLR_IATU_REGION_OUTBOUND |
234 - PCIE20_PLR_IATU_REGION_INDEX(0),
235 - pcie->pp.dbi_base + PCIE20_PLR_IATU_VIEWPORT);
237 - writel(PCIE20_PLR_IATU_TYPE_CFG0, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL1);
238 - writel(PCIE20_PLR_IATU_ENABLE, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL2);
239 - writel(pp->cfg0_base, pcie->pp.dbi_base + PCIE20_PLR_IATU_LBAR);
240 - writel((pp->cfg0_base >> 32), pcie->pp.dbi_base + PCIE20_PLR_IATU_UBAR);
241 - writel((pp->cfg0_base + pp->cfg0_size - 1),
242 - pcie->pp.dbi_base + PCIE20_PLR_IATU_LAR);
243 - writel(busdev, pcie->pp.dbi_base + PCIE20_PLR_IATU_LTAR);
244 - writel(0, pcie->pp.dbi_base + PCIE20_PLR_IATU_UTAR);
247 -static void qcom_pcie_prog_viewport_mem2_outbound(struct qcom_pcie *pcie)
249 - struct pcie_port *pp = &pcie->pp;
252 - * program and enable address translation region 2 (device resource
253 - * address space); region type memory;
254 - * axi device bar address range to device bar address range
256 - writel(PCIE20_PLR_IATU_REGION_OUTBOUND |
257 - PCIE20_PLR_IATU_REGION_INDEX(2),
258 - pcie->pp.dbi_base + PCIE20_PLR_IATU_VIEWPORT);
260 - writel(PCIE20_PLR_IATU_TYPE_MEM, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL1);
261 - writel(PCIE20_PLR_IATU_ENABLE, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL2);
262 - writel(pp->mem_base, pcie->pp.dbi_base + PCIE20_PLR_IATU_LBAR);
263 - writel((pp->mem_base >> 32), pcie->pp.dbi_base + PCIE20_PLR_IATU_UBAR);
264 - writel(pp->mem_base + pp->mem_size - 1,
265 - pcie->pp.dbi_base + PCIE20_PLR_IATU_LAR);
266 - writel(pp->mem_bus_addr, pcie->pp.dbi_base + PCIE20_PLR_IATU_LTAR);
267 - writel(upper_32_bits(pp->mem_bus_addr),
268 - pcie->pp.dbi_base + PCIE20_PLR_IATU_UTAR);
270 - /* 256B PCIE buffer setting */
271 - writel(0x1, pcie->pp.dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
272 - writel(0x1, pcie->pp.dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
275 static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
277 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
278 @@ -377,19 +317,13 @@ static int qcom_pcie_init_v0(struct qcom
279 ret = reset_control_assert(res->ahb_reset);
281 dev_err(dev, "cannot assert ahb reset\n");
282 - goto err_assert_reset;
285 - ret = reset_control_deassert(res->ext_reset);
287 - dev_err(dev, "cannot deassert ext reset\n");
288 - goto err_assert_reset;
289 + goto err_assert_ahb;
292 ret = clk_prepare_enable(res->iface_clk);
294 dev_err(dev, "cannot prepare/enable iface clock\n");
295 - goto err_assert_reset;
296 + goto err_assert_ahb;
299 ret = clk_prepare_enable(res->phy_clk);
300 @@ -404,53 +338,22 @@ static int qcom_pcie_init_v0(struct qcom
304 - ret = clk_prepare_enable(res->aux_clk);
306 - dev_err(dev, "cannot prepare/enable aux clock\n");
310 - ret = clk_prepare_enable(res->ref_clk);
312 - dev_err(dev, "cannot prepare/enable ref clock\n");
316 ret = reset_control_deassert(res->ahb_reset);
318 dev_err(dev, "cannot deassert ahb reset\n");
319 goto err_deassert_ahb;
323 /* enable PCIe clocks and resets */
324 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
326 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
328 - /* Set Tx termination offset */
329 - val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
330 - val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
331 - val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(res->phy_tx0_term_offset);
332 - writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
334 - /* PARF programming */
335 - writel(PCS_DEEMPH_TX_DEEMPH_GEN1(0x18) |
336 - PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(0x18) |
337 - PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(0x22),
338 - pcie->parf + PCIE20_PARF_PCS_DEEMPH);
339 - writel(PCS_SWING_TX_SWING_FULL(0x78) |
340 - PCS_SWING_TX_SWING_LOW(0x78),
341 - pcie->parf + PCIE20_PARF_PCS_SWING);
342 - writel(PHY_RX0_EQ(0x4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
344 - /* Enable reference clock */
345 + /* enable external reference clock */
346 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
347 - val &= ~REF_USE_PAD;
350 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
352 - /* De-assert PHY, PCIe, POR and AXI resets */
353 ret = reset_control_deassert(res->phy_reset);
355 dev_err(dev, "cannot deassert phy reset\n");
356 @@ -481,16 +384,12 @@ static int qcom_pcie_init_v0(struct qcom
360 - clk_disable_unprepare(res->ref_clk);
362 - clk_disable_unprepare(res->aux_clk);
364 clk_disable_unprepare(res->core_clk);
366 clk_disable_unprepare(res->phy_clk);
368 clk_disable_unprepare(res->iface_clk);
371 regulator_disable(res->vdda_phy);
373 regulator_disable(res->vdda_refclk);
374 @@ -500,12 +399,6 @@ err_refclk:
378 -static void qcom_pcie_configure_v0(struct qcom_pcie *pcie)
380 - qcom_pcie_prog_viewport_cfg0(pcie, MSM_PCIE_DEV_CFG_ADDR);
381 - qcom_pcie_prog_viewport_mem2_outbound(pcie);
384 static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
386 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
387 @@ -585,6 +478,429 @@ err_res:
391 +static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
393 + struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
394 + struct device *dev = pcie->pp.dev;
396 + res->aux_clk = devm_clk_get(dev, "aux");
397 + if (IS_ERR(res->aux_clk))
398 + return PTR_ERR(res->aux_clk);
400 + res->cfg_clk = devm_clk_get(dev, "cfg");
401 + if (IS_ERR(res->cfg_clk))
402 + return PTR_ERR(res->cfg_clk);
404 + res->master_clk = devm_clk_get(dev, "bus_master");
405 + if (IS_ERR(res->master_clk))
406 + return PTR_ERR(res->master_clk);
408 + res->slave_clk = devm_clk_get(dev, "bus_slave");
409 + if (IS_ERR(res->slave_clk))
410 + return PTR_ERR(res->slave_clk);
412 + res->pipe_clk = devm_clk_get(dev, "pipe");
413 + if (IS_ERR(res->pipe_clk))
414 + return PTR_ERR(res->pipe_clk);
419 +static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
421 + struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
422 + struct device *dev = pcie->pp.dev;
426 + ret = clk_prepare_enable(res->aux_clk);
428 + dev_err(dev, "cannot prepare/enable aux clock\n");
432 + ret = clk_prepare_enable(res->cfg_clk);
434 + dev_err(dev, "cannot prepare/enable cfg clock\n");
438 + ret = clk_prepare_enable(res->master_clk);
440 + dev_err(dev, "cannot prepare/enable master clock\n");
441 + goto err_master_clk;
444 + ret = clk_prepare_enable(res->slave_clk);
446 + dev_err(dev, "cannot prepare/enable slave clock\n");
447 + goto err_slave_clk;
450 + /* enable PCIe clocks and resets */
451 + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
453 + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
455 + /* change DBI base address */
456 + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
458 + /* MAC PHY_POWERDOWN MUX DISABLE */
459 + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
461 + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
463 + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
465 + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
467 + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
469 + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
474 + clk_disable_unprepare(res->master_clk);
476 + clk_disable_unprepare(res->cfg_clk);
478 + clk_disable_unprepare(res->aux_clk);
483 +static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
485 + struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
486 + struct device *dev = pcie->pp.dev;
489 + ret = clk_prepare_enable(res->pipe_clk);
491 + dev_err(dev, "cannot prepare/enable pipe clock\n");
498 +static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
500 + struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
501 + struct device *dev = pcie->pp.dev;
503 + res->vdda = devm_regulator_get(dev, "vdda");
504 + if (IS_ERR(res->vdda))
505 + return PTR_ERR(res->vdda);
507 + res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
508 + if (IS_ERR(res->vdda_phy))
509 + return PTR_ERR(res->vdda_phy);
511 + res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
512 + if (IS_ERR(res->vdda_refclk))
513 + return PTR_ERR(res->vdda_refclk);
515 + res->ahb_clk = devm_clk_get(dev, "ahb");
516 + if (IS_ERR(res->ahb_clk))
517 + return PTR_ERR(res->ahb_clk);
519 + res->axi_m_clk = devm_clk_get(dev, "axi_m");
520 + if (IS_ERR(res->axi_m_clk))
521 + return PTR_ERR(res->axi_m_clk);
523 + res->axi_s_clk = devm_clk_get(dev, "axi_s");
524 + if (IS_ERR(res->axi_s_clk))
525 + return PTR_ERR(res->axi_s_clk);
527 + res->axi_m_reset = devm_reset_control_get(dev, "axi_m");
528 + if (IS_ERR(res->axi_m_reset))
529 + return PTR_ERR(res->axi_m_reset);
531 + res->axi_s_reset = devm_reset_control_get(dev, "axi_s");
532 + if (IS_ERR(res->axi_s_reset))
533 + return PTR_ERR(res->axi_s_reset);
535 + res->pipe_reset = devm_reset_control_get(dev, "pipe");
536 + if (IS_ERR(res->pipe_reset))
537 + return PTR_ERR(res->pipe_reset);
539 + res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid");
540 + if (IS_ERR(res->axi_m_vmid_reset))
541 + return PTR_ERR(res->axi_m_vmid_reset);
543 + res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu");
544 + if (IS_ERR(res->axi_s_xpu_reset))
545 + return PTR_ERR(res->axi_s_xpu_reset);
547 + res->parf_reset = devm_reset_control_get(dev, "parf");
548 + if (IS_ERR(res->parf_reset))
549 + return PTR_ERR(res->parf_reset);
551 + res->phy_reset = devm_reset_control_get(dev, "phy");
552 + if (IS_ERR(res->phy_reset))
553 + return PTR_ERR(res->phy_reset);
555 + res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky");
556 + if (IS_ERR(res->axi_m_sticky_reset))
557 + return PTR_ERR(res->axi_m_sticky_reset);
559 + res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky");
560 + if (IS_ERR(res->pipe_sticky_reset))
561 + return PTR_ERR(res->pipe_sticky_reset);
563 + res->pwr_reset = devm_reset_control_get(dev, "pwr");
564 + if (IS_ERR(res->pwr_reset))
565 + return PTR_ERR(res->pwr_reset);
567 + res->ahb_reset = devm_reset_control_get(dev, "ahb");
568 + if (IS_ERR(res->ahb_reset))
569 + return PTR_ERR(res->ahb_reset);
571 + res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb");
572 + if (IS_ERR(res->phy_ahb_reset))
573 + return PTR_ERR(res->phy_ahb_reset);
578 +static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
580 + struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
582 + reset_control_assert(res->axi_m_reset);
583 + reset_control_assert(res->axi_s_reset);
584 + reset_control_assert(res->pipe_reset);
585 + reset_control_assert(res->pipe_sticky_reset);
586 + reset_control_assert(res->phy_reset);
587 + reset_control_assert(res->phy_ahb_reset);
588 + reset_control_assert(res->axi_m_sticky_reset);
589 + reset_control_assert(res->pwr_reset);
590 + reset_control_assert(res->ahb_reset);
591 + clk_disable_unprepare(res->ahb_clk);
592 + clk_disable_unprepare(res->axi_m_clk);
593 + clk_disable_unprepare(res->axi_s_clk);
594 + regulator_disable(res->vdda);
595 + regulator_disable(res->vdda_phy);
596 + regulator_disable(res->vdda_refclk);
599 +static int qcom_pcie_init_v3(struct qcom_pcie *pcie)
601 + struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
602 + struct device *dev = pcie->pp.dev;
606 + ret = reset_control_assert(res->axi_m_reset);
608 + dev_err(dev, "cannot assert axi master reset\n");
612 + ret = reset_control_assert(res->axi_s_reset);
614 + dev_err(dev, "cannot asser axi slave reset\n");
618 + usleep_range(10000, 12000);
620 + ret = reset_control_assert(res->pipe_reset);
622 + dev_err(dev, "cannot assert pipe reset\n");
626 + ret = reset_control_assert(res->pipe_sticky_reset);
628 + dev_err(dev, "cannot assert pipe sticky reset\n");
632 + ret = reset_control_assert(res->phy_reset);
634 + dev_err(dev, "cannot assert phy reset\n");
638 + ret = reset_control_assert(res->phy_ahb_reset);
640 + dev_err(dev, "cannot assert phy ahb reset\n");
644 + usleep_range(10000, 12000);
646 + ret = reset_control_assert(res->axi_m_sticky_reset);
648 + dev_err(dev, "cannot assert axi master sticky reset\n");
652 + ret = reset_control_assert(res->pwr_reset);
654 + dev_err(dev, "cannot assert power reset\n");
658 + ret = reset_control_assert(res->ahb_reset);
660 + dev_err(dev, "cannot assert ahb reset\n");
664 + usleep_range(10000, 12000);
666 + ret = reset_control_deassert(res->phy_ahb_reset);
668 + dev_err(dev, "cannot deassert phy ahb reset\n");
672 + ret = reset_control_deassert(res->phy_reset);
674 + dev_err(dev, "cannot deassert phy reset\n");
678 + ret = reset_control_deassert(res->pipe_reset);
680 + dev_err(dev, "cannot deassert pipe reset\n");
684 + ret = reset_control_deassert(res->pipe_sticky_reset);
686 + dev_err(dev, "cannot deassert pipe sticky reset\n");
687 + goto err_rst_pipe_sticky;
690 + usleep_range(10000, 12000);
692 + ret = reset_control_deassert(res->axi_m_reset);
694 + dev_err(dev, "cannot deassert axi master reset\n");
695 + goto err_rst_axi_m;
698 + ret = reset_control_deassert(res->axi_m_sticky_reset);
700 + dev_err(dev, "cannot deassert axi master sticky reset\n");
701 + goto err_rst_axi_m_sticky;
704 + ret = reset_control_deassert(res->axi_s_reset);
706 + dev_err(dev, "cannot deassert axi slave reset\n");
707 + goto err_rst_axi_s;
710 + ret = reset_control_deassert(res->pwr_reset);
712 + dev_err(dev, "cannot deassert power reset\n");
716 + ret = reset_control_deassert(res->ahb_reset);
718 + dev_err(dev, "cannot deassert ahb reset\n");
722 + usleep_range(10000, 12000);
724 + ret = regulator_enable(res->vdda);
726 + dev_err(dev, "cannot enable vdda regulator\n");
730 + ret = regulator_enable(res->vdda_refclk);
732 + dev_err(dev, "cannot enable vdda_refclk regulator\n");
736 + ret = regulator_enable(res->vdda_phy);
738 + dev_err(dev, "cannot enable vdda_phy regulator\n");
742 + ret = clk_prepare_enable(res->ahb_clk);
744 + dev_err(dev, "cannot prepare/enable iface clock\n");
748 + ret = clk_prepare_enable(res->axi_m_clk);
750 + dev_err(dev, "cannot prepare/enable core clock\n");
751 + goto err_clk_axi_m;
754 + ret = clk_prepare_enable(res->axi_s_clk);
756 + dev_err(dev, "cannot prepare/enable phy clock\n");
757 + goto err_clk_axi_s;
760 + /* enable PCIe clocks and resets */
761 + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
763 + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
765 + /* change DBI base address */
766 + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
768 + /* MAC PHY_POWERDOWN MUX DISABLE */
769 + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
771 + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
773 + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
775 + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
777 + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
779 + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
784 + clk_disable_unprepare(res->axi_m_clk);
786 + clk_disable_unprepare(res->ahb_clk);
788 + regulator_disable(res->vdda_phy);
790 + regulator_disable(res->vdda_refclk);
792 + regulator_disable(res->vdda);
794 + reset_control_assert(res->ahb_reset);
796 + reset_control_assert(res->pwr_reset);
798 + reset_control_assert(res->axi_s_reset);
800 + reset_control_assert(res->axi_m_sticky_reset);
801 +err_rst_axi_m_sticky:
802 + reset_control_assert(res->axi_m_reset);
804 + reset_control_assert(res->pipe_sticky_reset);
805 +err_rst_pipe_sticky:
806 + reset_control_assert(res->pipe_reset);
808 + reset_control_assert(res->phy_reset);
810 + reset_control_assert(res->phy_ahb_reset);
814 static int qcom_pcie_link_up(struct pcie_port *pp)
816 struct qcom_pcie *pcie = to_qcom_pcie(pp);
817 @@ -593,6 +909,17 @@ static int qcom_pcie_link_up(struct pcie
818 return !!(val & PCI_EXP_LNKSTA_DLLLA);
821 +static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
823 + struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
825 + clk_disable_unprepare(res->pipe_clk);
826 + clk_disable_unprepare(res->slave_clk);
827 + clk_disable_unprepare(res->master_clk);
828 + clk_disable_unprepare(res->cfg_clk);
829 + clk_disable_unprepare(res->aux_clk);
832 static void qcom_pcie_host_init(struct pcie_port *pp)
834 struct qcom_pcie *pcie = to_qcom_pcie(pp);
835 @@ -608,6 +935,9 @@ static void qcom_pcie_host_init(struct p
839 + if (pcie->ops->post_init)
840 + pcie->ops->post_init(pcie);
842 dw_pcie_setup_rc(pp);
844 if (IS_ENABLED(CONFIG_PCI_MSI))
845 @@ -619,9 +949,6 @@ static void qcom_pcie_host_init(struct p
849 - if (pcie->ops->init)
850 - pcie->ops->init(pcie);
854 qcom_ep_reset_assert(pcie);
855 @@ -653,14 +980,30 @@ static struct pcie_host_ops qcom_pcie_dw
856 static const struct qcom_pcie_ops ops_v0 = {
857 .get_resources = qcom_pcie_get_resources_v0,
858 .init = qcom_pcie_init_v0,
859 - .configure = qcom_pcie_configure_v0,
860 .deinit = qcom_pcie_deinit_v0,
861 + .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
864 static const struct qcom_pcie_ops ops_v1 = {
865 .get_resources = qcom_pcie_get_resources_v1,
866 .init = qcom_pcie_init_v1,
867 .deinit = qcom_pcie_deinit_v1,
868 + .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
871 +static const struct qcom_pcie_ops ops_v2 = {
872 + .get_resources = qcom_pcie_get_resources_v2,
873 + .init = qcom_pcie_init_v2,
874 + .post_init = qcom_pcie_post_init_v2,
875 + .deinit = qcom_pcie_deinit_v2,
876 + .ltssm_enable = qcom_pcie_v2_ltssm_enable,
879 +static const struct qcom_pcie_ops ops_v3 = {
880 + .get_resources = qcom_pcie_get_resources_v3,
881 + .init = qcom_pcie_init_v3,
882 + .deinit = qcom_pcie_deinit_v3,
883 + .ltssm_enable = qcom_pcie_v2_ltssm_enable,
886 static int qcom_pcie_probe(struct platform_device *pdev)
887 @@ -740,6 +1083,8 @@ static const struct of_device_id qcom_pc
888 { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
889 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
890 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
891 + { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
892 + { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 },