ipq806x: update ipq pcie driver
[openwrt/staging/blogic.git] / target / linux / ipq806x / patches-4.9 / 311-ipq4019-pcie.patch
1 Index: linux-4.9.20/drivers/pci/host/pcie-qcom.c
2 ===================================================================
3 --- linux-4.9.20.orig/drivers/pci/host/pcie-qcom.c
4 +++ linux-4.9.20/drivers/pci/host/pcie-qcom.c
5 @@ -36,53 +36,17 @@
6
7 #include "pcie-designware.h"
8
9 -/* DBI registers */
10 -#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
11 -#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
12 -
13 -#define PCIE20_PLR_IATU_VIEWPORT 0x900
14 -#define PCIE20_PLR_IATU_REGION_OUTBOUND (0x0 << 31)
15 -#define PCIE20_PLR_IATU_REGION_INDEX(x) (x << 0)
16 -
17 -#define PCIE20_PLR_IATU_CTRL1 0x904
18 -#define PCIE20_PLR_IATU_TYPE_CFG0 (0x4 << 0)
19 -#define PCIE20_PLR_IATU_TYPE_MEM (0x0 << 0)
20 -
21 -#define PCIE20_PLR_IATU_CTRL2 0x908
22 -#define PCIE20_PLR_IATU_ENABLE BIT(31)
23 -
24 -#define PCIE20_PLR_IATU_LBAR 0x90C
25 -#define PCIE20_PLR_IATU_UBAR 0x910
26 -#define PCIE20_PLR_IATU_LAR 0x914
27 -#define PCIE20_PLR_IATU_LTAR 0x918
28 -#define PCIE20_PLR_IATU_UTAR 0x91c
29 -
30 -#define MSM_PCIE_DEV_CFG_ADDR 0x01000000
31 -
32 -/* PARF registers */
33 -#define PCIE20_PARF_PCS_DEEMPH 0x34
34 -#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) (x << 16)
35 -#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) (x << 8)
36 -#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) (x << 0)
37 -
38 -#define PCIE20_PARF_PCS_SWING 0x38
39 -#define PCS_SWING_TX_SWING_FULL(x) (x << 8)
40 -#define PCS_SWING_TX_SWING_LOW(x) (x << 0)
41 -
42 +#define PCIE20_PARF_SYS_CTRL 0x00
43 #define PCIE20_PARF_PHY_CTRL 0x40
44 -#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK (0x1f << 16)
45 -#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) (x << 16)
46 -
47 #define PCIE20_PARF_PHY_REFCLK 0x4C
48 -#define REF_SSP_EN BIT(16)
49 -#define REF_USE_PAD BIT(12)
50 -
51 -#define PCIE20_PARF_CONFIG_BITS 0x50
52 -#define PHY_RX0_EQ(x) (x << 24)
53 -
54 #define PCIE20_PARF_DBI_BASE_ADDR 0x168
55 -#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c
56 +#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
57 +#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
58 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
59 +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
60 +#define PCIE20_PARF_LTSSM 0x1B0
61 +#define PCIE20_PARF_SID_OFFSET 0x234
62 +#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
63
64 #define PCIE20_ELBI_SYS_CTRL 0x04
65 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
66 @@ -95,18 +59,14 @@ struct qcom_pcie_resources_v0 {
67 struct clk *iface_clk;
68 struct clk *core_clk;
69 struct clk *phy_clk;
70 - struct clk *aux_clk;
71 - struct clk *ref_clk;
72 struct reset_control *pci_reset;
73 struct reset_control *axi_reset;
74 struct reset_control *ahb_reset;
75 struct reset_control *por_reset;
76 struct reset_control *phy_reset;
77 - struct reset_control *ext_reset;
78 struct regulator *vdda;
79 struct regulator *vdda_phy;
80 struct regulator *vdda_refclk;
81 - uint8_t phy_tx0_term_offset;
82 };
83
84 struct qcom_pcie_resources_v1 {
85 @@ -118,9 +78,40 @@ struct qcom_pcie_resources_v1 {
86 struct regulator *vdda;
87 };
88
89 +struct qcom_pcie_resources_v2 {
90 + struct clk *aux_clk;
91 + struct clk *master_clk;
92 + struct clk *slave_clk;
93 + struct clk *cfg_clk;
94 + struct clk *pipe_clk;
95 +};
96 +
97 +struct qcom_pcie_resources_v3 {
98 + struct clk *ahb_clk;
99 + struct clk *axi_m_clk;
100 + struct clk *axi_s_clk;
101 + struct reset_control *axi_m_reset;
102 + struct reset_control *axi_s_reset;
103 + struct reset_control *pipe_reset;
104 + struct reset_control *axi_m_vmid_reset;
105 + struct reset_control *axi_s_xpu_reset;
106 + struct reset_control *parf_reset;
107 + struct reset_control *phy_reset;
108 + struct reset_control *axi_m_sticky_reset;
109 + struct reset_control *pipe_sticky_reset;
110 + struct reset_control *pwr_reset;
111 + struct reset_control *ahb_reset;
112 + struct reset_control *phy_ahb_reset;
113 + struct regulator *vdda;
114 + struct regulator *vdda_phy;
115 + struct regulator *vdda_refclk;
116 +};
117 +
118 union qcom_pcie_resources {
119 struct qcom_pcie_resources_v0 v0;
120 struct qcom_pcie_resources_v1 v1;
121 + struct qcom_pcie_resources_v2 v2;
122 + struct qcom_pcie_resources_v3 v3;
123 };
124
125 struct qcom_pcie;
126 @@ -128,8 +119,9 @@ struct qcom_pcie;
127 struct qcom_pcie_ops {
128 int (*get_resources)(struct qcom_pcie *pcie);
129 int (*init)(struct qcom_pcie *pcie);
130 - void (*configure)(struct qcom_pcie *pcie);
131 + int (*post_init)(struct qcom_pcie *pcie);
132 void (*deinit)(struct qcom_pcie *pcie);
133 + void (*ltssm_enable)(struct qcom_pcie *pcie);
134 };
135
136 struct qcom_pcie {
137 @@ -163,17 +155,35 @@ static irqreturn_t qcom_pcie_msi_irq_han
138 return dw_handle_msi_irq(pp);
139 }
140
141 -static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
142 +static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
143 {
144 u32 val;
145
146 - if (dw_pcie_link_up(&pcie->pp))
147 - return 0;
148 -
149 /* enable link training */
150 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
151 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
152 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
153 +}
154 +
155 +static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
156 +{
157 + u32 val;
158 +
159 + /* enable link training */
160 + val = readl(pcie->parf + PCIE20_PARF_LTSSM);
161 + val |= BIT(8);
162 + writel(val, pcie->parf + PCIE20_PARF_LTSSM);
163 +}
164 +
165 +static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
166 +{
167 +
168 + if (dw_pcie_link_up(&pcie->pp))
169 + return 0;
170 +
171 + /* Enable Link Training state machine */
172 + if (pcie->ops->ltssm_enable)
173 + pcie->ops->ltssm_enable(pcie);
174
175 return dw_pcie_wait_for_link(&pcie->pp);
176 }
177 @@ -207,14 +217,6 @@ static int qcom_pcie_get_resources_v0(st
178 if (IS_ERR(res->phy_clk))
179 return PTR_ERR(res->phy_clk);
180
181 - res->aux_clk = devm_clk_get(dev, "aux");
182 - if (IS_ERR(res->aux_clk))
183 - return PTR_ERR(res->aux_clk);
184 -
185 - res->ref_clk = devm_clk_get(dev, "ref");
186 - if (IS_ERR(res->ref_clk))
187 - return PTR_ERR(res->ref_clk);
188 -
189 res->pci_reset = devm_reset_control_get(dev, "pci");
190 if (IS_ERR(res->pci_reset))
191 return PTR_ERR(res->pci_reset);
192 @@ -235,14 +237,6 @@ static int qcom_pcie_get_resources_v0(st
193 if (IS_ERR(res->phy_reset))
194 return PTR_ERR(res->phy_reset);
195
196 - res->ext_reset = devm_reset_control_get(dev, "ext");
197 - if (IS_ERR(res->ext_reset))
198 - return PTR_ERR(res->ext_reset);
199 -
200 - if (of_property_read_u8(dev->of_node, "phy-tx0-term-offset",
201 - &res->phy_tx0_term_offset))
202 - res->phy_tx0_term_offset = 0;
203 -
204 return 0;
205 }
206
207 @@ -286,69 +280,15 @@ static void qcom_pcie_deinit_v0(struct q
208 reset_control_assert(res->axi_reset);
209 reset_control_assert(res->ahb_reset);
210 reset_control_assert(res->por_reset);
211 - reset_control_assert(res->phy_reset);
212 - reset_control_assert(res->ext_reset);
213 + reset_control_assert(res->pci_reset);
214 clk_disable_unprepare(res->iface_clk);
215 clk_disable_unprepare(res->core_clk);
216 clk_disable_unprepare(res->phy_clk);
217 - clk_disable_unprepare(res->aux_clk);
218 - clk_disable_unprepare(res->ref_clk);
219 regulator_disable(res->vdda);
220 regulator_disable(res->vdda_phy);
221 regulator_disable(res->vdda_refclk);
222 }
223
224 -static void qcom_pcie_prog_viewport_cfg0(struct qcom_pcie *pcie, u32 busdev)
225 -{
226 - struct pcie_port *pp = &pcie->pp;
227 -
228 - /*
229 - * program and enable address translation region 0 (device config
230 - * address space); region type config;
231 - * axi config address range to device config address range
232 - */
233 - writel(PCIE20_PLR_IATU_REGION_OUTBOUND |
234 - PCIE20_PLR_IATU_REGION_INDEX(0),
235 - pcie->pp.dbi_base + PCIE20_PLR_IATU_VIEWPORT);
236 -
237 - writel(PCIE20_PLR_IATU_TYPE_CFG0, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL1);
238 - writel(PCIE20_PLR_IATU_ENABLE, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL2);
239 - writel(pp->cfg0_base, pcie->pp.dbi_base + PCIE20_PLR_IATU_LBAR);
240 - writel((pp->cfg0_base >> 32), pcie->pp.dbi_base + PCIE20_PLR_IATU_UBAR);
241 - writel((pp->cfg0_base + pp->cfg0_size - 1),
242 - pcie->pp.dbi_base + PCIE20_PLR_IATU_LAR);
243 - writel(busdev, pcie->pp.dbi_base + PCIE20_PLR_IATU_LTAR);
244 - writel(0, pcie->pp.dbi_base + PCIE20_PLR_IATU_UTAR);
245 -}
246 -
247 -static void qcom_pcie_prog_viewport_mem2_outbound(struct qcom_pcie *pcie)
248 -{
249 - struct pcie_port *pp = &pcie->pp;
250 -
251 - /*
252 - * program and enable address translation region 2 (device resource
253 - * address space); region type memory;
254 - * axi device bar address range to device bar address range
255 - */
256 - writel(PCIE20_PLR_IATU_REGION_OUTBOUND |
257 - PCIE20_PLR_IATU_REGION_INDEX(2),
258 - pcie->pp.dbi_base + PCIE20_PLR_IATU_VIEWPORT);
259 -
260 - writel(PCIE20_PLR_IATU_TYPE_MEM, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL1);
261 - writel(PCIE20_PLR_IATU_ENABLE, pcie->pp.dbi_base + PCIE20_PLR_IATU_CTRL2);
262 - writel(pp->mem_base, pcie->pp.dbi_base + PCIE20_PLR_IATU_LBAR);
263 - writel((pp->mem_base >> 32), pcie->pp.dbi_base + PCIE20_PLR_IATU_UBAR);
264 - writel(pp->mem_base + pp->mem_size - 1,
265 - pcie->pp.dbi_base + PCIE20_PLR_IATU_LAR);
266 - writel(pp->mem_bus_addr, pcie->pp.dbi_base + PCIE20_PLR_IATU_LTAR);
267 - writel(upper_32_bits(pp->mem_bus_addr),
268 - pcie->pp.dbi_base + PCIE20_PLR_IATU_UTAR);
269 -
270 - /* 256B PCIE buffer setting */
271 - writel(0x1, pcie->pp.dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
272 - writel(0x1, pcie->pp.dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
273 -}
274 -
275 static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
276 {
277 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
278 @@ -377,19 +317,13 @@ static int qcom_pcie_init_v0(struct qcom
279 ret = reset_control_assert(res->ahb_reset);
280 if (ret) {
281 dev_err(dev, "cannot assert ahb reset\n");
282 - goto err_assert_reset;
283 - }
284 -
285 - ret = reset_control_deassert(res->ext_reset);
286 - if (ret) {
287 - dev_err(dev, "cannot deassert ext reset\n");
288 - goto err_assert_reset;
289 + goto err_assert_ahb;
290 }
291
292 ret = clk_prepare_enable(res->iface_clk);
293 if (ret) {
294 dev_err(dev, "cannot prepare/enable iface clock\n");
295 - goto err_assert_reset;
296 + goto err_assert_ahb;
297 }
298
299 ret = clk_prepare_enable(res->phy_clk);
300 @@ -404,53 +338,22 @@ static int qcom_pcie_init_v0(struct qcom
301 goto err_clk_core;
302 }
303
304 - ret = clk_prepare_enable(res->aux_clk);
305 - if (ret) {
306 - dev_err(dev, "cannot prepare/enable aux clock\n");
307 - goto err_clk_aux;
308 - }
309 -
310 - ret = clk_prepare_enable(res->ref_clk);
311 - if (ret) {
312 - dev_err(dev, "cannot prepare/enable ref clock\n");
313 - goto err_clk_ref;
314 - }
315 -
316 ret = reset_control_deassert(res->ahb_reset);
317 if (ret) {
318 dev_err(dev, "cannot deassert ahb reset\n");
319 goto err_deassert_ahb;
320 }
321 - udelay(1);
322
323 /* enable PCIe clocks and resets */
324 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
325 val &= ~BIT(0);
326 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
327
328 - /* Set Tx termination offset */
329 - val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
330 - val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
331 - val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(res->phy_tx0_term_offset);
332 - writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
333 -
334 - /* PARF programming */
335 - writel(PCS_DEEMPH_TX_DEEMPH_GEN1(0x18) |
336 - PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(0x18) |
337 - PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(0x22),
338 - pcie->parf + PCIE20_PARF_PCS_DEEMPH);
339 - writel(PCS_SWING_TX_SWING_FULL(0x78) |
340 - PCS_SWING_TX_SWING_LOW(0x78),
341 - pcie->parf + PCIE20_PARF_PCS_SWING);
342 - writel(PHY_RX0_EQ(0x4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
343 -
344 - /* Enable reference clock */
345 + /* enable external reference clock */
346 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
347 - val &= ~REF_USE_PAD;
348 - val |= REF_SSP_EN;
349 + val |= BIT(16);
350 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
351
352 - /* De-assert PHY, PCIe, POR and AXI resets */
353 ret = reset_control_deassert(res->phy_reset);
354 if (ret) {
355 dev_err(dev, "cannot deassert phy reset\n");
356 @@ -481,16 +384,12 @@ static int qcom_pcie_init_v0(struct qcom
357 return 0;
358
359 err_deassert_ahb:
360 - clk_disable_unprepare(res->ref_clk);
361 -err_clk_ref:
362 - clk_disable_unprepare(res->aux_clk);
363 -err_clk_aux:
364 clk_disable_unprepare(res->core_clk);
365 err_clk_core:
366 clk_disable_unprepare(res->phy_clk);
367 err_clk_phy:
368 clk_disable_unprepare(res->iface_clk);
369 -err_assert_reset:
370 +err_assert_ahb:
371 regulator_disable(res->vdda_phy);
372 err_vdda_phy:
373 regulator_disable(res->vdda_refclk);
374 @@ -500,12 +399,6 @@ err_refclk:
375 return ret;
376 }
377
378 -static void qcom_pcie_configure_v0(struct qcom_pcie *pcie)
379 -{
380 - qcom_pcie_prog_viewport_cfg0(pcie, MSM_PCIE_DEV_CFG_ADDR);
381 - qcom_pcie_prog_viewport_mem2_outbound(pcie);
382 -}
383 -
384 static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
385 {
386 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
387 @@ -585,6 +478,429 @@ err_res:
388 return ret;
389 }
390
391 +static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
392 +{
393 + struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
394 + struct device *dev = pcie->pp.dev;
395 +
396 + res->aux_clk = devm_clk_get(dev, "aux");
397 + if (IS_ERR(res->aux_clk))
398 + return PTR_ERR(res->aux_clk);
399 +
400 + res->cfg_clk = devm_clk_get(dev, "cfg");
401 + if (IS_ERR(res->cfg_clk))
402 + return PTR_ERR(res->cfg_clk);
403 +
404 + res->master_clk = devm_clk_get(dev, "bus_master");
405 + if (IS_ERR(res->master_clk))
406 + return PTR_ERR(res->master_clk);
407 +
408 + res->slave_clk = devm_clk_get(dev, "bus_slave");
409 + if (IS_ERR(res->slave_clk))
410 + return PTR_ERR(res->slave_clk);
411 +
412 + res->pipe_clk = devm_clk_get(dev, "pipe");
413 + if (IS_ERR(res->pipe_clk))
414 + return PTR_ERR(res->pipe_clk);
415 +
416 + return 0;
417 +}
418 +
419 +static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
420 +{
421 + struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
422 + struct device *dev = pcie->pp.dev;
423 + u32 val;
424 + int ret;
425 +
426 + ret = clk_prepare_enable(res->aux_clk);
427 + if (ret) {
428 + dev_err(dev, "cannot prepare/enable aux clock\n");
429 + return ret;
430 + }
431 +
432 + ret = clk_prepare_enable(res->cfg_clk);
433 + if (ret) {
434 + dev_err(dev, "cannot prepare/enable cfg clock\n");
435 + goto err_cfg_clk;
436 + }
437 +
438 + ret = clk_prepare_enable(res->master_clk);
439 + if (ret) {
440 + dev_err(dev, "cannot prepare/enable master clock\n");
441 + goto err_master_clk;
442 + }
443 +
444 + ret = clk_prepare_enable(res->slave_clk);
445 + if (ret) {
446 + dev_err(dev, "cannot prepare/enable slave clock\n");
447 + goto err_slave_clk;
448 + }
449 +
450 + /* enable PCIe clocks and resets */
451 + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
452 + val &= ~BIT(0);
453 + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
454 +
455 + /* change DBI base address */
456 + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
457 +
458 + /* MAC PHY_POWERDOWN MUX DISABLE */
459 + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
460 + val &= ~BIT(29);
461 + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
462 +
463 + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
464 + val |= BIT(4);
465 + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
466 +
467 + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
468 + val |= BIT(31);
469 + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
470 +
471 + return 0;
472 +
473 +err_slave_clk:
474 + clk_disable_unprepare(res->master_clk);
475 +err_master_clk:
476 + clk_disable_unprepare(res->cfg_clk);
477 +err_cfg_clk:
478 + clk_disable_unprepare(res->aux_clk);
479 +
480 + return ret;
481 +}
482 +
483 +static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
484 +{
485 + struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
486 + struct device *dev = pcie->pp.dev;
487 + int ret;
488 +
489 + ret = clk_prepare_enable(res->pipe_clk);
490 + if (ret) {
491 + dev_err(dev, "cannot prepare/enable pipe clock\n");
492 + return ret;
493 + }
494 +
495 + return 0;
496 +}
497 +
498 +static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
499 +{
500 + struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
501 + struct device *dev = pcie->pp.dev;
502 +
503 + res->vdda = devm_regulator_get(dev, "vdda");
504 + if (IS_ERR(res->vdda))
505 + return PTR_ERR(res->vdda);
506 +
507 + res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
508 + if (IS_ERR(res->vdda_phy))
509 + return PTR_ERR(res->vdda_phy);
510 +
511 + res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
512 + if (IS_ERR(res->vdda_refclk))
513 + return PTR_ERR(res->vdda_refclk);
514 +
515 + res->ahb_clk = devm_clk_get(dev, "ahb");
516 + if (IS_ERR(res->ahb_clk))
517 + return PTR_ERR(res->ahb_clk);
518 +
519 + res->axi_m_clk = devm_clk_get(dev, "axi_m");
520 + if (IS_ERR(res->axi_m_clk))
521 + return PTR_ERR(res->axi_m_clk);
522 +
523 + res->axi_s_clk = devm_clk_get(dev, "axi_s");
524 + if (IS_ERR(res->axi_s_clk))
525 + return PTR_ERR(res->axi_s_clk);
526 +
527 + res->axi_m_reset = devm_reset_control_get(dev, "axi_m");
528 + if (IS_ERR(res->axi_m_reset))
529 + return PTR_ERR(res->axi_m_reset);
530 +
531 + res->axi_s_reset = devm_reset_control_get(dev, "axi_s");
532 + if (IS_ERR(res->axi_s_reset))
533 + return PTR_ERR(res->axi_s_reset);
534 +
535 + res->pipe_reset = devm_reset_control_get(dev, "pipe");
536 + if (IS_ERR(res->pipe_reset))
537 + return PTR_ERR(res->pipe_reset);
538 +
539 + res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid");
540 + if (IS_ERR(res->axi_m_vmid_reset))
541 + return PTR_ERR(res->axi_m_vmid_reset);
542 +
543 + res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu");
544 + if (IS_ERR(res->axi_s_xpu_reset))
545 + return PTR_ERR(res->axi_s_xpu_reset);
546 +
547 + res->parf_reset = devm_reset_control_get(dev, "parf");
548 + if (IS_ERR(res->parf_reset))
549 + return PTR_ERR(res->parf_reset);
550 +
551 + res->phy_reset = devm_reset_control_get(dev, "phy");
552 + if (IS_ERR(res->phy_reset))
553 + return PTR_ERR(res->phy_reset);
554 +
555 + res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky");
556 + if (IS_ERR(res->axi_m_sticky_reset))
557 + return PTR_ERR(res->axi_m_sticky_reset);
558 +
559 + res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky");
560 + if (IS_ERR(res->pipe_sticky_reset))
561 + return PTR_ERR(res->pipe_sticky_reset);
562 +
563 + res->pwr_reset = devm_reset_control_get(dev, "pwr");
564 + if (IS_ERR(res->pwr_reset))
565 + return PTR_ERR(res->pwr_reset);
566 +
567 + res->ahb_reset = devm_reset_control_get(dev, "ahb");
568 + if (IS_ERR(res->ahb_reset))
569 + return PTR_ERR(res->ahb_reset);
570 +
571 + res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb");
572 + if (IS_ERR(res->phy_ahb_reset))
573 + return PTR_ERR(res->phy_ahb_reset);
574 +
575 + return 0;
576 +}
577 +
578 +static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
579 +{
580 + struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
581 +
582 + reset_control_assert(res->axi_m_reset);
583 + reset_control_assert(res->axi_s_reset);
584 + reset_control_assert(res->pipe_reset);
585 + reset_control_assert(res->pipe_sticky_reset);
586 + reset_control_assert(res->phy_reset);
587 + reset_control_assert(res->phy_ahb_reset);
588 + reset_control_assert(res->axi_m_sticky_reset);
589 + reset_control_assert(res->pwr_reset);
590 + reset_control_assert(res->ahb_reset);
591 + clk_disable_unprepare(res->ahb_clk);
592 + clk_disable_unprepare(res->axi_m_clk);
593 + clk_disable_unprepare(res->axi_s_clk);
594 + regulator_disable(res->vdda);
595 + regulator_disable(res->vdda_phy);
596 + regulator_disable(res->vdda_refclk);
597 +}
598 +
599 +static int qcom_pcie_init_v3(struct qcom_pcie *pcie)
600 +{
601 + struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
602 + struct device *dev = pcie->pp.dev;
603 + u32 val;
604 + int ret;
605 +
606 + ret = reset_control_assert(res->axi_m_reset);
607 + if (ret) {
608 + dev_err(dev, "cannot assert axi master reset\n");
609 + return ret;
610 + }
611 +
612 + ret = reset_control_assert(res->axi_s_reset);
613 + if (ret) {
614 + dev_err(dev, "cannot asser axi slave reset\n");
615 + return ret;
616 + }
617 +
618 + usleep_range(10000, 12000);
619 +
620 + ret = reset_control_assert(res->pipe_reset);
621 + if (ret) {
622 + dev_err(dev, "cannot assert pipe reset\n");
623 + return ret;
624 + }
625 +
626 + ret = reset_control_assert(res->pipe_sticky_reset);
627 + if (ret) {
628 + dev_err(dev, "cannot assert pipe sticky reset\n");
629 + return ret;
630 + }
631 +
632 + ret = reset_control_assert(res->phy_reset);
633 + if (ret) {
634 + dev_err(dev, "cannot assert phy reset\n");
635 + return ret;
636 + }
637 +
638 + ret = reset_control_assert(res->phy_ahb_reset);
639 + if (ret) {
640 + dev_err(dev, "cannot assert phy ahb reset\n");
641 + return ret;
642 + }
643 +
644 + usleep_range(10000, 12000);
645 +
646 + ret = reset_control_assert(res->axi_m_sticky_reset);
647 + if (ret) {
648 + dev_err(dev, "cannot assert axi master sticky reset\n");
649 + return ret;
650 + }
651 +
652 + ret = reset_control_assert(res->pwr_reset);
653 + if (ret) {
654 + dev_err(dev, "cannot assert power reset\n");
655 + return ret;
656 + }
657 +
658 + ret = reset_control_assert(res->ahb_reset);
659 + if (ret) {
660 + dev_err(dev, "cannot assert ahb reset\n");
661 + return ret;
662 + }
663 +
664 + usleep_range(10000, 12000);
665 +
666 + ret = reset_control_deassert(res->phy_ahb_reset);
667 + if (ret) {
668 + dev_err(dev, "cannot deassert phy ahb reset\n");
669 + return ret;
670 + }
671 +
672 + ret = reset_control_deassert(res->phy_reset);
673 + if (ret) {
674 + dev_err(dev, "cannot deassert phy reset\n");
675 + goto err_rst_phy;
676 + }
677 +
678 + ret = reset_control_deassert(res->pipe_reset);
679 + if (ret) {
680 + dev_err(dev, "cannot deassert pipe reset\n");
681 + goto err_rst_pipe;
682 + }
683 +
684 + ret = reset_control_deassert(res->pipe_sticky_reset);
685 + if (ret) {
686 + dev_err(dev, "cannot deassert pipe sticky reset\n");
687 + goto err_rst_pipe_sticky;
688 + }
689 +
690 + usleep_range(10000, 12000);
691 +
692 + ret = reset_control_deassert(res->axi_m_reset);
693 + if (ret) {
694 + dev_err(dev, "cannot deassert axi master reset\n");
695 + goto err_rst_axi_m;
696 + }
697 +
698 + ret = reset_control_deassert(res->axi_m_sticky_reset);
699 + if (ret) {
700 + dev_err(dev, "cannot deassert axi master sticky reset\n");
701 + goto err_rst_axi_m_sticky;
702 + }
703 +
704 + ret = reset_control_deassert(res->axi_s_reset);
705 + if (ret) {
706 + dev_err(dev, "cannot deassert axi slave reset\n");
707 + goto err_rst_axi_s;
708 + }
709 +
710 + ret = reset_control_deassert(res->pwr_reset);
711 + if (ret) {
712 + dev_err(dev, "cannot deassert power reset\n");
713 + goto err_rst_pwr;
714 + }
715 +
716 + ret = reset_control_deassert(res->ahb_reset);
717 + if (ret) {
718 + dev_err(dev, "cannot deassert ahb reset\n");
719 + goto err_rst_ahb;
720 + }
721 +
722 + usleep_range(10000, 12000);
723 +
724 + ret = regulator_enable(res->vdda);
725 + if (ret) {
726 + dev_err(dev, "cannot enable vdda regulator\n");
727 + goto err_vdda;
728 + }
729 +
730 + ret = regulator_enable(res->vdda_refclk);
731 + if (ret) {
732 + dev_err(dev, "cannot enable vdda_refclk regulator\n");
733 + goto err_refclk;
734 + }
735 +
736 + ret = regulator_enable(res->vdda_phy);
737 + if (ret) {
738 + dev_err(dev, "cannot enable vdda_phy regulator\n");
739 + goto err_vdda_phy;
740 + }
741 +
742 + ret = clk_prepare_enable(res->ahb_clk);
743 + if (ret) {
744 + dev_err(dev, "cannot prepare/enable iface clock\n");
745 + goto err_ahb;
746 + }
747 +
748 + ret = clk_prepare_enable(res->axi_m_clk);
749 + if (ret) {
750 + dev_err(dev, "cannot prepare/enable core clock\n");
751 + goto err_clk_axi_m;
752 + }
753 +
754 + ret = clk_prepare_enable(res->axi_s_clk);
755 + if (ret) {
756 + dev_err(dev, "cannot prepare/enable phy clock\n");
757 + goto err_clk_axi_s;
758 + }
759 +
760 + /* enable PCIe clocks and resets */
761 + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
762 + val &= !BIT(0);
763 + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
764 +
765 + /* change DBI base address */
766 + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
767 +
768 + /* MAC PHY_POWERDOWN MUX DISABLE */
769 + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
770 + val &= ~BIT(29);
771 + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
772 +
773 + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
774 + val |= BIT(4);
775 + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
776 +
777 + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
778 + val |= BIT(31);
779 + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
780 +
781 + return 0;
782 +
783 +err_clk_axi_s:
784 + clk_disable_unprepare(res->axi_m_clk);
785 +err_clk_axi_m:
786 + clk_disable_unprepare(res->ahb_clk);
787 +err_ahb:
788 + regulator_disable(res->vdda_phy);
789 +err_vdda_phy:
790 + regulator_disable(res->vdda_refclk);
791 +err_refclk:
792 + regulator_disable(res->vdda);
793 +err_vdda:
794 + reset_control_assert(res->ahb_reset);
795 +err_rst_ahb:
796 + reset_control_assert(res->pwr_reset);
797 +err_rst_pwr:
798 + reset_control_assert(res->axi_s_reset);
799 +err_rst_axi_s:
800 + reset_control_assert(res->axi_m_sticky_reset);
801 +err_rst_axi_m_sticky:
802 + reset_control_assert(res->axi_m_reset);
803 +err_rst_axi_m:
804 + reset_control_assert(res->pipe_sticky_reset);
805 +err_rst_pipe_sticky:
806 + reset_control_assert(res->pipe_reset);
807 +err_rst_pipe:
808 + reset_control_assert(res->phy_reset);
809 +err_rst_phy:
810 + reset_control_assert(res->phy_ahb_reset);
811 + return ret;
812 +}
813 +
814 static int qcom_pcie_link_up(struct pcie_port *pp)
815 {
816 struct qcom_pcie *pcie = to_qcom_pcie(pp);
817 @@ -593,6 +909,17 @@ static int qcom_pcie_link_up(struct pcie
818 return !!(val & PCI_EXP_LNKSTA_DLLLA);
819 }
820
821 +static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
822 +{
823 + struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
824 +
825 + clk_disable_unprepare(res->pipe_clk);
826 + clk_disable_unprepare(res->slave_clk);
827 + clk_disable_unprepare(res->master_clk);
828 + clk_disable_unprepare(res->cfg_clk);
829 + clk_disable_unprepare(res->aux_clk);
830 +}
831 +
832 static void qcom_pcie_host_init(struct pcie_port *pp)
833 {
834 struct qcom_pcie *pcie = to_qcom_pcie(pp);
835 @@ -608,6 +935,9 @@ static void qcom_pcie_host_init(struct p
836 if (ret)
837 goto err_deinit;
838
839 + if (pcie->ops->post_init)
840 + pcie->ops->post_init(pcie);
841 +
842 dw_pcie_setup_rc(pp);
843
844 if (IS_ENABLED(CONFIG_PCI_MSI))
845 @@ -619,9 +949,6 @@ static void qcom_pcie_host_init(struct p
846 if (ret)
847 goto err;
848
849 - if (pcie->ops->init)
850 - pcie->ops->init(pcie);
851 -
852 return;
853 err:
854 qcom_ep_reset_assert(pcie);
855 @@ -653,14 +980,30 @@ static struct pcie_host_ops qcom_pcie_dw
856 static const struct qcom_pcie_ops ops_v0 = {
857 .get_resources = qcom_pcie_get_resources_v0,
858 .init = qcom_pcie_init_v0,
859 - .configure = qcom_pcie_configure_v0,
860 .deinit = qcom_pcie_deinit_v0,
861 + .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
862 };
863
864 static const struct qcom_pcie_ops ops_v1 = {
865 .get_resources = qcom_pcie_get_resources_v1,
866 .init = qcom_pcie_init_v1,
867 .deinit = qcom_pcie_deinit_v1,
868 + .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
869 +};
870 +
871 +static const struct qcom_pcie_ops ops_v2 = {
872 + .get_resources = qcom_pcie_get_resources_v2,
873 + .init = qcom_pcie_init_v2,
874 + .post_init = qcom_pcie_post_init_v2,
875 + .deinit = qcom_pcie_deinit_v2,
876 + .ltssm_enable = qcom_pcie_v2_ltssm_enable,
877 +};
878 +
879 +static const struct qcom_pcie_ops ops_v3 = {
880 + .get_resources = qcom_pcie_get_resources_v3,
881 + .init = qcom_pcie_init_v3,
882 + .deinit = qcom_pcie_deinit_v3,
883 + .ltssm_enable = qcom_pcie_v2_ltssm_enable,
884 };
885
886 static int qcom_pcie_probe(struct platform_device *pdev)
887 @@ -740,6 +1083,8 @@ static const struct of_device_id qcom_pc
888 { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
889 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
890 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
891 + { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
892 + { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 },
893 { }
894 };
895