Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Qualcomm PCIe root complex driver
4 *
5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6 * Copyright 2015 Linaro Limited.
7 *
8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9 */
10
11#include <linux/clk.h>
12#include <linux/crc8.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/gpio/consumer.h>
16#include <linux/interconnect.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/iopoll.h>
20#include <linux/kernel.h>
21#include <linux/limits.h>
22#include <linux/init.h>
23#include <linux/of.h>
24#include <linux/pci.h>
25#include <linux/pm_opp.h>
26#include <linux/pm_runtime.h>
27#include <linux/platform_device.h>
28#include <linux/phy/pcie.h>
29#include <linux/phy/phy.h>
30#include <linux/regulator/consumer.h>
31#include <linux/reset.h>
32#include <linux/slab.h>
33#include <linux/types.h>
34#include <linux/units.h>
35
36#include "../../pci.h"
37#include "pcie-designware.h"
38
39/* PARF registers */
40#define PARF_SYS_CTRL 0x00
41#define PARF_PM_CTRL 0x20
42#define PARF_PCS_DEEMPH 0x34
43#define PARF_PCS_SWING 0x38
44#define PARF_PHY_CTRL 0x40
45#define PARF_PHY_REFCLK 0x4c
46#define PARF_CONFIG_BITS 0x50
47#define PARF_DBI_BASE_ADDR 0x168
48#define PARF_MHI_CLOCK_RESET_CTRL 0x174
49#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
50#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
51#define PARF_Q2A_FLUSH 0x1ac
52#define PARF_LTSSM 0x1b0
53#define PARF_SID_OFFSET 0x234
54#define PARF_BDF_TRANSLATE_CFG 0x24c
55#define PARF_SLV_ADDR_SPACE_SIZE 0x358
56#define PARF_NO_SNOOP_OVERIDE 0x3d4
57#define PARF_DEVICE_TYPE 0x1000
58#define PARF_BDF_TO_SID_TABLE_N 0x2000
59#define PARF_BDF_TO_SID_CFG 0x2c00
60
61/* ELBI registers */
62#define ELBI_SYS_CTRL 0x04
63
64/* DBI registers */
65#define AXI_MSTR_RESP_COMP_CTRL0 0x818
66#define AXI_MSTR_RESP_COMP_CTRL1 0x81c
67
68/* MHI registers */
69#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
70#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
71#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
72#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
73#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
74
75/* PARF_SYS_CTRL register fields */
76#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
77#define MST_WAKEUP_EN BIT(13)
78#define SLV_WAKEUP_EN BIT(12)
79#define MSTR_ACLK_CGC_DIS BIT(10)
80#define SLV_ACLK_CGC_DIS BIT(9)
81#define CORE_CLK_CGC_DIS BIT(6)
82#define AUX_PWR_DET BIT(4)
83#define L23_CLK_RMV_DIS BIT(2)
84#define L1_CLK_RMV_DIS BIT(1)
85
86/* PARF_PM_CTRL register fields */
87#define REQ_NOT_ENTR_L1 BIT(5)
88
89/* PARF_PCS_DEEMPH register fields */
90#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x)
91#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x)
92#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x)
93
94/* PARF_PCS_SWING register fields */
95#define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x)
96#define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x)
97
98/* PARF_PHY_CTRL register fields */
99#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
100#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
101#define PHY_TEST_PWR_DOWN BIT(0)
102
103/* PARF_PHY_REFCLK register fields */
104#define PHY_REFCLK_SSP_EN BIT(16)
105#define PHY_REFCLK_USE_PAD BIT(12)
106
107/* PARF_CONFIG_BITS register fields */
108#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
109
110/* PARF_SLV_ADDR_SPACE_SIZE register value */
111#define SLV_ADDR_SPACE_SZ 0x10000000
112
113/* PARF_MHI_CLOCK_RESET_CTRL register fields */
114#define AHB_CLK_EN BIT(0)
115#define MSTR_AXI_CLK_EN BIT(1)
116#define BYPASS BIT(4)
117
118/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
119#define EN BIT(31)
120
121/* PARF_LTSSM register fields */
122#define LTSSM_EN BIT(8)
123
124/* PARF_NO_SNOOP_OVERIDE register fields */
125#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
126#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
127
128/* PARF_DEVICE_TYPE register fields */
129#define DEVICE_TYPE_RC 0x4
130
131/* PARF_BDF_TO_SID_CFG fields */
132#define BDF_TO_SID_BYPASS BIT(0)
133
134/* ELBI_SYS_CTRL register fields */
135#define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
136
137/* AXI_MSTR_RESP_COMP_CTRL0 register fields */
138#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
139#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
140
141/* AXI_MSTR_RESP_COMP_CTRL1 register fields */
142#define CFG_BRIDGE_SB_INIT BIT(0)
143
144/* PCI_EXP_SLTCAP register fields */
145#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
146#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
147#define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
148 PCI_EXP_SLTCAP_PCP | \
149 PCI_EXP_SLTCAP_MRLSP | \
150 PCI_EXP_SLTCAP_AIP | \
151 PCI_EXP_SLTCAP_PIP | \
152 PCI_EXP_SLTCAP_HPS | \
153 PCI_EXP_SLTCAP_EIP | \
154 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
155 PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
156
157#define PERST_DELAY_US 1000
158
159#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
160
161#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
162 Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
163
164struct qcom_pcie_resources_1_0_0 {
165 struct clk_bulk_data *clks;
166 int num_clks;
167 struct reset_control *core;
168 struct regulator *vdda;
169};
170
171#define QCOM_PCIE_2_1_0_MAX_RESETS 6
172#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
173struct qcom_pcie_resources_2_1_0 {
174 struct clk_bulk_data *clks;
175 int num_clks;
176 struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
177 int num_resets;
178 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
179};
180
181#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
182struct qcom_pcie_resources_2_3_2 {
183 struct clk_bulk_data *clks;
184 int num_clks;
185 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
186};
187
188#define QCOM_PCIE_2_3_3_MAX_RESETS 7
189struct qcom_pcie_resources_2_3_3 {
190 struct clk_bulk_data *clks;
191 int num_clks;
192 struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
193};
194
195#define QCOM_PCIE_2_4_0_MAX_RESETS 12
196struct qcom_pcie_resources_2_4_0 {
197 struct clk_bulk_data *clks;
198 int num_clks;
199 struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
200 int num_resets;
201};
202
203#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
204struct qcom_pcie_resources_2_7_0 {
205 struct clk_bulk_data *clks;
206 int num_clks;
207 struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
208 struct reset_control *rst;
209};
210
211struct qcom_pcie_resources_2_9_0 {
212 struct clk_bulk_data *clks;
213 int num_clks;
214 struct reset_control *rst;
215};
216
217union qcom_pcie_resources {
218 struct qcom_pcie_resources_1_0_0 v1_0_0;
219 struct qcom_pcie_resources_2_1_0 v2_1_0;
220 struct qcom_pcie_resources_2_3_2 v2_3_2;
221 struct qcom_pcie_resources_2_3_3 v2_3_3;
222 struct qcom_pcie_resources_2_4_0 v2_4_0;
223 struct qcom_pcie_resources_2_7_0 v2_7_0;
224 struct qcom_pcie_resources_2_9_0 v2_9_0;
225};
226
227struct qcom_pcie;
228
229struct qcom_pcie_ops {
230 int (*get_resources)(struct qcom_pcie *pcie);
231 int (*init)(struct qcom_pcie *pcie);
232 int (*post_init)(struct qcom_pcie *pcie);
233 void (*host_post_init)(struct qcom_pcie *pcie);
234 void (*deinit)(struct qcom_pcie *pcie);
235 void (*ltssm_enable)(struct qcom_pcie *pcie);
236 int (*config_sid)(struct qcom_pcie *pcie);
237};
238
239 /**
240 * struct qcom_pcie_cfg - Per SoC config struct
241 * @ops: qcom PCIe ops structure
242 * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
243 * snooping
244 */
245struct qcom_pcie_cfg {
246 const struct qcom_pcie_ops *ops;
247 bool override_no_snoop;
248 bool no_l0s;
249};
250
251struct qcom_pcie {
252 struct dw_pcie *pci;
253 void __iomem *parf; /* DT parf */
254 void __iomem *elbi; /* DT elbi */
255 void __iomem *mhi;
256 union qcom_pcie_resources res;
257 struct phy *phy;
258 struct gpio_desc *reset;
259 struct icc_path *icc_mem;
260 struct icc_path *icc_cpu;
261 const struct qcom_pcie_cfg *cfg;
262 struct dentry *debugfs;
263 bool suspended;
264};
265
266#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
267
268static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
269{
270 gpiod_set_value_cansleep(pcie->reset, 1);
271 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
272}
273
274static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
275{
276 /* Ensure that PERST has been asserted for at least 100 ms */
277 msleep(100);
278 gpiod_set_value_cansleep(pcie->reset, 0);
279 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
280}
281
282static int qcom_pcie_start_link(struct dw_pcie *pci)
283{
284 struct qcom_pcie *pcie = to_qcom_pcie(pci);
285
286 /* Enable Link Training state machine */
287 if (pcie->cfg->ops->ltssm_enable)
288 pcie->cfg->ops->ltssm_enable(pcie);
289
290 return 0;
291}
292
293static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
294{
295 struct qcom_pcie *pcie = to_qcom_pcie(pci);
296 u16 offset;
297 u32 val;
298
299 if (!pcie->cfg->no_l0s)
300 return;
301
302 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
303
304 dw_pcie_dbi_ro_wr_en(pci);
305
306 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
307 val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
308 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
309
310 dw_pcie_dbi_ro_wr_dis(pci);
311}
312
313static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
314{
315 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
316 u32 val;
317
318 dw_pcie_dbi_ro_wr_en(pci);
319
320 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
321 val &= ~PCI_EXP_SLTCAP_HPC;
322 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
323
324 dw_pcie_dbi_ro_wr_dis(pci);
325}
326
327static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
328{
329 u32 val;
330
331 /* enable link training */
332 val = readl(pcie->elbi + ELBI_SYS_CTRL);
333 val |= ELBI_SYS_CTRL_LT_ENABLE;
334 writel(val, pcie->elbi + ELBI_SYS_CTRL);
335}
336
337static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
338{
339 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
340 struct dw_pcie *pci = pcie->pci;
341 struct device *dev = pci->dev;
342 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
343 int ret;
344
345 res->supplies[0].supply = "vdda";
346 res->supplies[1].supply = "vdda_phy";
347 res->supplies[2].supply = "vdda_refclk";
348 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
349 res->supplies);
350 if (ret)
351 return ret;
352
353 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
354 if (res->num_clks < 0) {
355 dev_err(dev, "Failed to get clocks\n");
356 return res->num_clks;
357 }
358
359 res->resets[0].id = "pci";
360 res->resets[1].id = "axi";
361 res->resets[2].id = "ahb";
362 res->resets[3].id = "por";
363 res->resets[4].id = "phy";
364 res->resets[5].id = "ext";
365
366 /* ext is optional on APQ8016 */
367 res->num_resets = is_apq ? 5 : 6;
368 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
369 if (ret < 0)
370 return ret;
371
372 return 0;
373}
374
375static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
376{
377 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
378
379 clk_bulk_disable_unprepare(res->num_clks, res->clks);
380 reset_control_bulk_assert(res->num_resets, res->resets);
381
382 writel(1, pcie->parf + PARF_PHY_CTRL);
383
384 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
385}
386
387static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
388{
389 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
390 struct dw_pcie *pci = pcie->pci;
391 struct device *dev = pci->dev;
392 int ret;
393
394 /* reset the PCIe interface as uboot can leave it undefined state */
395 ret = reset_control_bulk_assert(res->num_resets, res->resets);
396 if (ret < 0) {
397 dev_err(dev, "cannot assert resets\n");
398 return ret;
399 }
400
401 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
402 if (ret < 0) {
403 dev_err(dev, "cannot enable regulators\n");
404 return ret;
405 }
406
407 ret = reset_control_bulk_deassert(res->num_resets, res->resets);
408 if (ret < 0) {
409 dev_err(dev, "cannot deassert resets\n");
410 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
411 return ret;
412 }
413
414 return 0;
415}
416
417static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
418{
419 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
420 struct dw_pcie *pci = pcie->pci;
421 struct device *dev = pci->dev;
422 struct device_node *node = dev->of_node;
423 u32 val;
424 int ret;
425
426 /* enable PCIe clocks and resets */
427 val = readl(pcie->parf + PARF_PHY_CTRL);
428 val &= ~PHY_TEST_PWR_DOWN;
429 writel(val, pcie->parf + PARF_PHY_CTRL);
430
431 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
432 if (ret)
433 return ret;
434
435 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
436 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
437 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
438 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
439 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
440 pcie->parf + PARF_PCS_DEEMPH);
441 writel(PCS_SWING_TX_SWING_FULL(120) |
442 PCS_SWING_TX_SWING_LOW(120),
443 pcie->parf + PARF_PCS_SWING);
444 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
445 }
446
447 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
448 /* set TX termination offset */
449 val = readl(pcie->parf + PARF_PHY_CTRL);
450 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
451 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
452 writel(val, pcie->parf + PARF_PHY_CTRL);
453 }
454
455 /* enable external reference clock */
456 val = readl(pcie->parf + PARF_PHY_REFCLK);
457 /* USE_PAD is required only for ipq806x */
458 if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
459 val &= ~PHY_REFCLK_USE_PAD;
460 val |= PHY_REFCLK_SSP_EN;
461 writel(val, pcie->parf + PARF_PHY_REFCLK);
462
463 /* wait for clock acquisition */
464 usleep_range(1000, 1500);
465
466 /* Set the Max TLP size to 2K, instead of using default of 4K */
467 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
468 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
469 writel(CFG_BRIDGE_SB_INIT,
470 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
471
472 qcom_pcie_clear_hpc(pcie->pci);
473
474 return 0;
475}
476
477static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
478{
479 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
480 struct dw_pcie *pci = pcie->pci;
481 struct device *dev = pci->dev;
482
483 res->vdda = devm_regulator_get(dev, "vdda");
484 if (IS_ERR(res->vdda))
485 return PTR_ERR(res->vdda);
486
487 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
488 if (res->num_clks < 0) {
489 dev_err(dev, "Failed to get clocks\n");
490 return res->num_clks;
491 }
492
493 res->core = devm_reset_control_get_exclusive(dev, "core");
494 return PTR_ERR_OR_ZERO(res->core);
495}
496
497static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
498{
499 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
500
501 reset_control_assert(res->core);
502 clk_bulk_disable_unprepare(res->num_clks, res->clks);
503 regulator_disable(res->vdda);
504}
505
506static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
507{
508 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
509 struct dw_pcie *pci = pcie->pci;
510 struct device *dev = pci->dev;
511 int ret;
512
513 ret = reset_control_deassert(res->core);
514 if (ret) {
515 dev_err(dev, "cannot deassert core reset\n");
516 return ret;
517 }
518
519 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
520 if (ret) {
521 dev_err(dev, "cannot prepare/enable clocks\n");
522 goto err_assert_reset;
523 }
524
525 ret = regulator_enable(res->vdda);
526 if (ret) {
527 dev_err(dev, "cannot enable vdda regulator\n");
528 goto err_disable_clks;
529 }
530
531 return 0;
532
533err_disable_clks:
534 clk_bulk_disable_unprepare(res->num_clks, res->clks);
535err_assert_reset:
536 reset_control_assert(res->core);
537
538 return ret;
539}
540
541static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
542{
543 /* change DBI base address */
544 writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
545
546 if (IS_ENABLED(CONFIG_PCI_MSI)) {
547 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
548
549 val |= EN;
550 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
551 }
552
553 qcom_pcie_clear_hpc(pcie->pci);
554
555 return 0;
556}
557
558static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
559{
560 u32 val;
561
562 /* enable link training */
563 val = readl(pcie->parf + PARF_LTSSM);
564 val |= LTSSM_EN;
565 writel(val, pcie->parf + PARF_LTSSM);
566}
567
568static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
569{
570 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
571 struct dw_pcie *pci = pcie->pci;
572 struct device *dev = pci->dev;
573 int ret;
574
575 res->supplies[0].supply = "vdda";
576 res->supplies[1].supply = "vddpe-3v3";
577 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
578 res->supplies);
579 if (ret)
580 return ret;
581
582 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
583 if (res->num_clks < 0) {
584 dev_err(dev, "Failed to get clocks\n");
585 return res->num_clks;
586 }
587
588 return 0;
589}
590
591static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
592{
593 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
594
595 clk_bulk_disable_unprepare(res->num_clks, res->clks);
596 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
597}
598
599static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
600{
601 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
602 struct dw_pcie *pci = pcie->pci;
603 struct device *dev = pci->dev;
604 int ret;
605
606 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
607 if (ret < 0) {
608 dev_err(dev, "cannot enable regulators\n");
609 return ret;
610 }
611
612 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
613 if (ret) {
614 dev_err(dev, "cannot prepare/enable clocks\n");
615 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
616 return ret;
617 }
618
619 return 0;
620}
621
622static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
623{
624 u32 val;
625
626 /* enable PCIe clocks and resets */
627 val = readl(pcie->parf + PARF_PHY_CTRL);
628 val &= ~PHY_TEST_PWR_DOWN;
629 writel(val, pcie->parf + PARF_PHY_CTRL);
630
631 /* change DBI base address */
632 writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
633
634 /* MAC PHY_POWERDOWN MUX DISABLE */
635 val = readl(pcie->parf + PARF_SYS_CTRL);
636 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
637 writel(val, pcie->parf + PARF_SYS_CTRL);
638
639 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
640 val |= BYPASS;
641 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
642
643 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
644 val |= EN;
645 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
646
647 qcom_pcie_clear_hpc(pcie->pci);
648
649 return 0;
650}
651
652static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
653{
654 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
655 struct dw_pcie *pci = pcie->pci;
656 struct device *dev = pci->dev;
657 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
658 int ret;
659
660 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
661 if (res->num_clks < 0) {
662 dev_err(dev, "Failed to get clocks\n");
663 return res->num_clks;
664 }
665
666 res->resets[0].id = "axi_m";
667 res->resets[1].id = "axi_s";
668 res->resets[2].id = "axi_m_sticky";
669 res->resets[3].id = "pipe_sticky";
670 res->resets[4].id = "pwr";
671 res->resets[5].id = "ahb";
672 res->resets[6].id = "pipe";
673 res->resets[7].id = "axi_m_vmid";
674 res->resets[8].id = "axi_s_xpu";
675 res->resets[9].id = "parf";
676 res->resets[10].id = "phy";
677 res->resets[11].id = "phy_ahb";
678
679 res->num_resets = is_ipq ? 12 : 6;
680
681 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
682 if (ret < 0)
683 return ret;
684
685 return 0;
686}
687
688static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
689{
690 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
691
692 reset_control_bulk_assert(res->num_resets, res->resets);
693 clk_bulk_disable_unprepare(res->num_clks, res->clks);
694}
695
696static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
697{
698 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
699 struct dw_pcie *pci = pcie->pci;
700 struct device *dev = pci->dev;
701 int ret;
702
703 ret = reset_control_bulk_assert(res->num_resets, res->resets);
704 if (ret < 0) {
705 dev_err(dev, "cannot assert resets\n");
706 return ret;
707 }
708
709 usleep_range(10000, 12000);
710
711 ret = reset_control_bulk_deassert(res->num_resets, res->resets);
712 if (ret < 0) {
713 dev_err(dev, "cannot deassert resets\n");
714 return ret;
715 }
716
717 usleep_range(10000, 12000);
718
719 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
720 if (ret) {
721 reset_control_bulk_assert(res->num_resets, res->resets);
722 return ret;
723 }
724
725 return 0;
726}
727
728static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
729{
730 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
731 struct dw_pcie *pci = pcie->pci;
732 struct device *dev = pci->dev;
733 int ret;
734
735 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
736 if (res->num_clks < 0) {
737 dev_err(dev, "Failed to get clocks\n");
738 return res->num_clks;
739 }
740
741 res->rst[0].id = "axi_m";
742 res->rst[1].id = "axi_s";
743 res->rst[2].id = "pipe";
744 res->rst[3].id = "axi_m_sticky";
745 res->rst[4].id = "sticky";
746 res->rst[5].id = "ahb";
747 res->rst[6].id = "sleep";
748
749 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
750 if (ret < 0)
751 return ret;
752
753 return 0;
754}
755
756static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
757{
758 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
759
760 clk_bulk_disable_unprepare(res->num_clks, res->clks);
761}
762
763static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
764{
765 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
766 struct dw_pcie *pci = pcie->pci;
767 struct device *dev = pci->dev;
768 int ret;
769
770 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
771 if (ret < 0) {
772 dev_err(dev, "cannot assert resets\n");
773 return ret;
774 }
775
776 usleep_range(2000, 2500);
777
778 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
779 if (ret < 0) {
780 dev_err(dev, "cannot deassert resets\n");
781 return ret;
782 }
783
784 /*
785 * Don't have a way to see if the reset has completed.
786 * Wait for some time.
787 */
788 usleep_range(2000, 2500);
789
790 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
791 if (ret) {
792 dev_err(dev, "cannot prepare/enable clocks\n");
793 goto err_assert_resets;
794 }
795
796 return 0;
797
798err_assert_resets:
799 /*
800 * Not checking for failure, will anyway return
801 * the original failure in 'ret'.
802 */
803 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
804
805 return ret;
806}
807
808static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
809{
810 struct dw_pcie *pci = pcie->pci;
811 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
812 u32 val;
813
814 writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
815
816 val = readl(pcie->parf + PARF_PHY_CTRL);
817 val &= ~PHY_TEST_PWR_DOWN;
818 writel(val, pcie->parf + PARF_PHY_CTRL);
819
820 writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
821
822 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
823 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
824 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
825 pcie->parf + PARF_SYS_CTRL);
826 writel(0, pcie->parf + PARF_Q2A_FLUSH);
827
828 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
829
830 dw_pcie_dbi_ro_wr_en(pci);
831
832 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
833
834 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
835 val &= ~PCI_EXP_LNKCAP_ASPMS;
836 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
837
838 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
839 PCI_EXP_DEVCTL2);
840
841 dw_pcie_dbi_ro_wr_dis(pci);
842
843 return 0;
844}
845
846static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
847{
848 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
849 struct dw_pcie *pci = pcie->pci;
850 struct device *dev = pci->dev;
851 int ret;
852
853 res->rst = devm_reset_control_array_get_exclusive(dev);
854 if (IS_ERR(res->rst))
855 return PTR_ERR(res->rst);
856
857 res->supplies[0].supply = "vdda";
858 res->supplies[1].supply = "vddpe-3v3";
859 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
860 res->supplies);
861 if (ret)
862 return ret;
863
864 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
865 if (res->num_clks < 0) {
866 dev_err(dev, "Failed to get clocks\n");
867 return res->num_clks;
868 }
869
870 return 0;
871}
872
873static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
874{
875 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
876 struct dw_pcie *pci = pcie->pci;
877 struct device *dev = pci->dev;
878 u32 val;
879 int ret;
880
881 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
882 if (ret < 0) {
883 dev_err(dev, "cannot enable regulators\n");
884 return ret;
885 }
886
887 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
888 if (ret < 0)
889 goto err_disable_regulators;
890
891 ret = reset_control_assert(res->rst);
892 if (ret) {
893 dev_err(dev, "reset assert failed (%d)\n", ret);
894 goto err_disable_clocks;
895 }
896
897 usleep_range(1000, 1500);
898
899 ret = reset_control_deassert(res->rst);
900 if (ret) {
901 dev_err(dev, "reset deassert failed (%d)\n", ret);
902 goto err_disable_clocks;
903 }
904
905 /* Wait for reset to complete, required on SM8450 */
906 usleep_range(1000, 1500);
907
908 /* configure PCIe to RC mode */
909 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
910
911 /* enable PCIe clocks and resets */
912 val = readl(pcie->parf + PARF_PHY_CTRL);
913 val &= ~PHY_TEST_PWR_DOWN;
914 writel(val, pcie->parf + PARF_PHY_CTRL);
915
916 /* change DBI base address */
917 writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
918
919 /* MAC PHY_POWERDOWN MUX DISABLE */
920 val = readl(pcie->parf + PARF_SYS_CTRL);
921 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
922 writel(val, pcie->parf + PARF_SYS_CTRL);
923
924 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
925 val |= BYPASS;
926 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
927
928 /* Enable L1 and L1SS */
929 val = readl(pcie->parf + PARF_PM_CTRL);
930 val &= ~REQ_NOT_ENTR_L1;
931 writel(val, pcie->parf + PARF_PM_CTRL);
932
933 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
934 val |= EN;
935 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
936
937 return 0;
938err_disable_clocks:
939 clk_bulk_disable_unprepare(res->num_clks, res->clks);
940err_disable_regulators:
941 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
942
943 return ret;
944}
945
946static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
947{
948 const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
949
950 if (pcie_cfg->override_no_snoop)
951 writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
952 pcie->parf + PARF_NO_SNOOP_OVERIDE);
953
954 qcom_pcie_clear_aspm_l0s(pcie->pci);
955 qcom_pcie_clear_hpc(pcie->pci);
956
957 return 0;
958}
959
960static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
961{
962 /*
963 * Downstream devices need to be in D0 state before enabling PCI PM
964 * substates.
965 */
966 pci_set_power_state_locked(pdev, PCI_D0);
967 pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
968
969 return 0;
970}
971
972static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
973{
974 struct dw_pcie_rp *pp = &pcie->pci->pp;
975
976 pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
977}
978
979static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
980{
981 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
982
983 clk_bulk_disable_unprepare(res->num_clks, res->clks);
984
985 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
986}
987
988static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
989{
990 /* iommu map structure */
991 struct {
992 u32 bdf;
993 u32 phandle;
994 u32 smmu_sid;
995 u32 smmu_sid_len;
996 } *map;
997 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
998 struct device *dev = pcie->pci->dev;
999 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
1000 int i, nr_map, size = 0;
1001 u32 smmu_sid_base;
1002 u32 val;
1003
1004 of_get_property(dev->of_node, "iommu-map", &size);
1005 if (!size)
1006 return 0;
1007
1008 /* Enable BDF to SID translation by disabling bypass mode (default) */
1009 val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
1010 val &= ~BDF_TO_SID_BYPASS;
1011 writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
1012
1013 map = kzalloc(size, GFP_KERNEL);
1014 if (!map)
1015 return -ENOMEM;
1016
1017 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
1018 size / sizeof(u32));
1019
1020 nr_map = size / (sizeof(*map));
1021
1022 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
1023
1024 /* Registers need to be zero out first */
1025 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
1026
1027 /* Extract the SMMU SID base from the first entry of iommu-map */
1028 smmu_sid_base = map[0].smmu_sid;
1029
1030 /* Look for an available entry to hold the mapping */
1031 for (i = 0; i < nr_map; i++) {
1032 __be16 bdf_be = cpu_to_be16(map[i].bdf);
1033 u32 val;
1034 u8 hash;
1035
1036 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
1037
1038 val = readl(bdf_to_sid_base + hash * sizeof(u32));
1039
1040 /* If the register is already populated, look for next available entry */
1041 while (val) {
1042 u8 current_hash = hash++;
1043 u8 next_mask = 0xff;
1044
1045 /* If NEXT field is NULL then update it with next hash */
1046 if (!(val & next_mask)) {
1047 val |= (u32)hash;
1048 writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
1049 }
1050
1051 val = readl(bdf_to_sid_base + hash * sizeof(u32));
1052 }
1053
1054 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
1055 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
1056 writel(val, bdf_to_sid_base + hash * sizeof(u32));
1057 }
1058
1059 kfree(map);
1060
1061 return 0;
1062}
1063
1064static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
1065{
1066 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1067 struct dw_pcie *pci = pcie->pci;
1068 struct device *dev = pci->dev;
1069
1070 res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
1071 if (res->num_clks < 0) {
1072 dev_err(dev, "Failed to get clocks\n");
1073 return res->num_clks;
1074 }
1075
1076 res->rst = devm_reset_control_array_get_exclusive(dev);
1077 if (IS_ERR(res->rst))
1078 return PTR_ERR(res->rst);
1079
1080 return 0;
1081}
1082
1083static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
1084{
1085 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1086
1087 clk_bulk_disable_unprepare(res->num_clks, res->clks);
1088}
1089
1090static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
1091{
1092 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1093 struct device *dev = pcie->pci->dev;
1094 int ret;
1095
1096 ret = reset_control_assert(res->rst);
1097 if (ret) {
1098 dev_err(dev, "reset assert failed (%d)\n", ret);
1099 return ret;
1100 }
1101
1102 /*
1103 * Delay periods before and after reset deassert are working values
1104 * from downstream Codeaurora kernel
1105 */
1106 usleep_range(2000, 2500);
1107
1108 ret = reset_control_deassert(res->rst);
1109 if (ret) {
1110 dev_err(dev, "reset deassert failed (%d)\n", ret);
1111 return ret;
1112 }
1113
1114 usleep_range(2000, 2500);
1115
1116 return clk_bulk_prepare_enable(res->num_clks, res->clks);
1117}
1118
1119static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
1120{
1121 struct dw_pcie *pci = pcie->pci;
1122 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1123 u32 val;
1124 int i;
1125
1126 writel(SLV_ADDR_SPACE_SZ,
1127 pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
1128
1129 val = readl(pcie->parf + PARF_PHY_CTRL);
1130 val &= ~PHY_TEST_PWR_DOWN;
1131 writel(val, pcie->parf + PARF_PHY_CTRL);
1132
1133 writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
1134
1135 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
1136 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
1137 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1138 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
1139 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
1140 pci->dbi_base + GEN3_RELATED_OFF);
1141
1142 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
1143 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1144 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1145 pcie->parf + PARF_SYS_CTRL);
1146
1147 writel(0, pcie->parf + PARF_Q2A_FLUSH);
1148
1149 dw_pcie_dbi_ro_wr_en(pci);
1150
1151 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1152
1153 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1154 val &= ~PCI_EXP_LNKCAP_ASPMS;
1155 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1156
1157 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1158 PCI_EXP_DEVCTL2);
1159
1160 dw_pcie_dbi_ro_wr_dis(pci);
1161
1162 for (i = 0; i < 256; i++)
1163 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
1164
1165 return 0;
1166}
1167
1168static int qcom_pcie_link_up(struct dw_pcie *pci)
1169{
1170 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1171 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1172
1173 return !!(val & PCI_EXP_LNKSTA_DLLLA);
1174}
1175
1176static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
1177{
1178 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1179 struct qcom_pcie *pcie = to_qcom_pcie(pci);
1180 int ret;
1181
1182 qcom_ep_reset_assert(pcie);
1183
1184 ret = pcie->cfg->ops->init(pcie);
1185 if (ret)
1186 return ret;
1187
1188 ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
1189 if (ret)
1190 goto err_deinit;
1191
1192 ret = phy_power_on(pcie->phy);
1193 if (ret)
1194 goto err_deinit;
1195
1196 if (pcie->cfg->ops->post_init) {
1197 ret = pcie->cfg->ops->post_init(pcie);
1198 if (ret)
1199 goto err_disable_phy;
1200 }
1201
1202 qcom_ep_reset_deassert(pcie);
1203
1204 if (pcie->cfg->ops->config_sid) {
1205 ret = pcie->cfg->ops->config_sid(pcie);
1206 if (ret)
1207 goto err_assert_reset;
1208 }
1209
1210 return 0;
1211
1212err_assert_reset:
1213 qcom_ep_reset_assert(pcie);
1214err_disable_phy:
1215 phy_power_off(pcie->phy);
1216err_deinit:
1217 pcie->cfg->ops->deinit(pcie);
1218
1219 return ret;
1220}
1221
1222static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
1223{
1224 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1225 struct qcom_pcie *pcie = to_qcom_pcie(pci);
1226
1227 qcom_ep_reset_assert(pcie);
1228 phy_power_off(pcie->phy);
1229 pcie->cfg->ops->deinit(pcie);
1230}
1231
1232static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
1233{
1234 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1235 struct qcom_pcie *pcie = to_qcom_pcie(pci);
1236
1237 if (pcie->cfg->ops->host_post_init)
1238 pcie->cfg->ops->host_post_init(pcie);
1239}
1240
1241static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1242 .init = qcom_pcie_host_init,
1243 .deinit = qcom_pcie_host_deinit,
1244 .post_init = qcom_pcie_host_post_init,
1245};
1246
1247/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
1248static const struct qcom_pcie_ops ops_2_1_0 = {
1249 .get_resources = qcom_pcie_get_resources_2_1_0,
1250 .init = qcom_pcie_init_2_1_0,
1251 .post_init = qcom_pcie_post_init_2_1_0,
1252 .deinit = qcom_pcie_deinit_2_1_0,
1253 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1254};
1255
1256/* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
1257static const struct qcom_pcie_ops ops_1_0_0 = {
1258 .get_resources = qcom_pcie_get_resources_1_0_0,
1259 .init = qcom_pcie_init_1_0_0,
1260 .post_init = qcom_pcie_post_init_1_0_0,
1261 .deinit = qcom_pcie_deinit_1_0_0,
1262 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1263};
1264
1265/* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
1266static const struct qcom_pcie_ops ops_2_3_2 = {
1267 .get_resources = qcom_pcie_get_resources_2_3_2,
1268 .init = qcom_pcie_init_2_3_2,
1269 .post_init = qcom_pcie_post_init_2_3_2,
1270 .deinit = qcom_pcie_deinit_2_3_2,
1271 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1272};
1273
1274/* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
1275static const struct qcom_pcie_ops ops_2_4_0 = {
1276 .get_resources = qcom_pcie_get_resources_2_4_0,
1277 .init = qcom_pcie_init_2_4_0,
1278 .post_init = qcom_pcie_post_init_2_3_2,
1279 .deinit = qcom_pcie_deinit_2_4_0,
1280 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1281};
1282
1283/* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
1284static const struct qcom_pcie_ops ops_2_3_3 = {
1285 .get_resources = qcom_pcie_get_resources_2_3_3,
1286 .init = qcom_pcie_init_2_3_3,
1287 .post_init = qcom_pcie_post_init_2_3_3,
1288 .deinit = qcom_pcie_deinit_2_3_3,
1289 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1290};
1291
1292/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
1293static const struct qcom_pcie_ops ops_2_7_0 = {
1294 .get_resources = qcom_pcie_get_resources_2_7_0,
1295 .init = qcom_pcie_init_2_7_0,
1296 .post_init = qcom_pcie_post_init_2_7_0,
1297 .deinit = qcom_pcie_deinit_2_7_0,
1298 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1299};
1300
1301/* Qcom IP rev.: 1.9.0 */
1302static const struct qcom_pcie_ops ops_1_9_0 = {
1303 .get_resources = qcom_pcie_get_resources_2_7_0,
1304 .init = qcom_pcie_init_2_7_0,
1305 .post_init = qcom_pcie_post_init_2_7_0,
1306 .host_post_init = qcom_pcie_host_post_init_2_7_0,
1307 .deinit = qcom_pcie_deinit_2_7_0,
1308 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1309 .config_sid = qcom_pcie_config_sid_1_9_0,
1310};
1311
1312/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
1313static const struct qcom_pcie_ops ops_2_9_0 = {
1314 .get_resources = qcom_pcie_get_resources_2_9_0,
1315 .init = qcom_pcie_init_2_9_0,
1316 .post_init = qcom_pcie_post_init_2_9_0,
1317 .deinit = qcom_pcie_deinit_2_9_0,
1318 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1319};
1320
1321static const struct qcom_pcie_cfg cfg_1_0_0 = {
1322 .ops = &ops_1_0_0,
1323};
1324
1325static const struct qcom_pcie_cfg cfg_1_9_0 = {
1326 .ops = &ops_1_9_0,
1327};
1328
1329static const struct qcom_pcie_cfg cfg_1_34_0 = {
1330 .ops = &ops_1_9_0,
1331 .override_no_snoop = true,
1332};
1333
1334static const struct qcom_pcie_cfg cfg_2_1_0 = {
1335 .ops = &ops_2_1_0,
1336};
1337
1338static const struct qcom_pcie_cfg cfg_2_3_2 = {
1339 .ops = &ops_2_3_2,
1340};
1341
1342static const struct qcom_pcie_cfg cfg_2_3_3 = {
1343 .ops = &ops_2_3_3,
1344};
1345
1346static const struct qcom_pcie_cfg cfg_2_4_0 = {
1347 .ops = &ops_2_4_0,
1348};
1349
1350static const struct qcom_pcie_cfg cfg_2_7_0 = {
1351 .ops = &ops_2_7_0,
1352};
1353
1354static const struct qcom_pcie_cfg cfg_2_9_0 = {
1355 .ops = &ops_2_9_0,
1356};
1357
1358static const struct qcom_pcie_cfg cfg_sc8280xp = {
1359 .ops = &ops_1_9_0,
1360 .no_l0s = true,
1361};
1362
1363static const struct dw_pcie_ops dw_pcie_ops = {
1364 .link_up = qcom_pcie_link_up,
1365 .start_link = qcom_pcie_start_link,
1366};
1367
1368static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
1369{
1370 struct dw_pcie *pci = pcie->pci;
1371 int ret;
1372
1373 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
1374 if (IS_ERR(pcie->icc_mem))
1375 return PTR_ERR(pcie->icc_mem);
1376
1377 pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
1378 if (IS_ERR(pcie->icc_cpu))
1379 return PTR_ERR(pcie->icc_cpu);
1380 /*
1381 * Some Qualcomm platforms require interconnect bandwidth constraints
1382 * to be set before enabling interconnect clocks.
1383 *
1384 * Set an initial peak bandwidth corresponding to single-lane Gen 1
1385 * for the pcie-mem path.
1386 */
1387 ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
1388 if (ret) {
1389 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1390 ret);
1391 return ret;
1392 }
1393
1394 /*
1395 * Since the CPU-PCIe path is only used for activities like register
1396 * access of the host controller and endpoint Config/BAR space access,
1397 * HW team has recommended to use a minimal bandwidth of 1KBps just to
1398 * keep the path active.
1399 */
1400 ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
1401 if (ret) {
1402 dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
1403 ret);
1404 icc_set_bw(pcie->icc_mem, 0, 0);
1405 return ret;
1406 }
1407
1408 return 0;
1409}
1410
1411static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
1412{
1413 u32 offset, status, width, speed;
1414 struct dw_pcie *pci = pcie->pci;
1415 unsigned long freq_kbps;
1416 struct dev_pm_opp *opp;
1417 int ret, freq_mbps;
1418
1419 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1420 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1421
1422 /* Only update constraints if link is up. */
1423 if (!(status & PCI_EXP_LNKSTA_DLLLA))
1424 return;
1425
1426 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
1427 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
1428
1429 if (pcie->icc_mem) {
1430 ret = icc_set_bw(pcie->icc_mem, 0,
1431 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
1432 if (ret) {
1433 dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1434 ret);
1435 }
1436 } else {
1437 freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
1438 if (freq_mbps < 0)
1439 return;
1440
1441 freq_kbps = freq_mbps * KILO;
1442 opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
1443 true);
1444 if (!IS_ERR(opp)) {
1445 ret = dev_pm_opp_set_opp(pci->dev, opp);
1446 if (ret)
1447 dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
1448 freq_kbps * width, ret);
1449 dev_pm_opp_put(opp);
1450 }
1451 }
1452}
1453
1454static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
1455{
1456 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
1457
1458 seq_printf(s, "L0s transition count: %u\n",
1459 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
1460
1461 seq_printf(s, "L1 transition count: %u\n",
1462 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
1463
1464 seq_printf(s, "L1.1 transition count: %u\n",
1465 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
1466
1467 seq_printf(s, "L1.2 transition count: %u\n",
1468 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
1469
1470 seq_printf(s, "L2 transition count: %u\n",
1471 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
1472
1473 return 0;
1474}
1475
1476static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
1477{
1478 struct dw_pcie *pci = pcie->pci;
1479 struct device *dev = pci->dev;
1480 char *name;
1481
1482 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1483 if (!name)
1484 return;
1485
1486 pcie->debugfs = debugfs_create_dir(name, NULL);
1487 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
1488 qcom_pcie_link_transition_count);
1489}
1490
1491static int qcom_pcie_probe(struct platform_device *pdev)
1492{
1493 const struct qcom_pcie_cfg *pcie_cfg;
1494 unsigned long max_freq = ULONG_MAX;
1495 struct device *dev = &pdev->dev;
1496 struct dev_pm_opp *opp;
1497 struct qcom_pcie *pcie;
1498 struct dw_pcie_rp *pp;
1499 struct resource *res;
1500 struct dw_pcie *pci;
1501 int ret;
1502
1503 pcie_cfg = of_device_get_match_data(dev);
1504 if (!pcie_cfg || !pcie_cfg->ops) {
1505 dev_err(dev, "Invalid platform data\n");
1506 return -EINVAL;
1507 }
1508
1509 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1510 if (!pcie)
1511 return -ENOMEM;
1512
1513 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1514 if (!pci)
1515 return -ENOMEM;
1516
1517 pm_runtime_enable(dev);
1518 ret = pm_runtime_get_sync(dev);
1519 if (ret < 0)
1520 goto err_pm_runtime_put;
1521
1522 pci->dev = dev;
1523 pci->ops = &dw_pcie_ops;
1524 pp = &pci->pp;
1525
1526 pcie->pci = pci;
1527
1528 pcie->cfg = pcie_cfg;
1529
1530 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1531 if (IS_ERR(pcie->reset)) {
1532 ret = PTR_ERR(pcie->reset);
1533 goto err_pm_runtime_put;
1534 }
1535
1536 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1537 if (IS_ERR(pcie->parf)) {
1538 ret = PTR_ERR(pcie->parf);
1539 goto err_pm_runtime_put;
1540 }
1541
1542 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1543 if (IS_ERR(pcie->elbi)) {
1544 ret = PTR_ERR(pcie->elbi);
1545 goto err_pm_runtime_put;
1546 }
1547
1548 /* MHI region is optional */
1549 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
1550 if (res) {
1551 pcie->mhi = devm_ioremap_resource(dev, res);
1552 if (IS_ERR(pcie->mhi)) {
1553 ret = PTR_ERR(pcie->mhi);
1554 goto err_pm_runtime_put;
1555 }
1556 }
1557
1558 pcie->phy = devm_phy_optional_get(dev, "pciephy");
1559 if (IS_ERR(pcie->phy)) {
1560 ret = PTR_ERR(pcie->phy);
1561 goto err_pm_runtime_put;
1562 }
1563
1564 /* OPP table is optional */
1565 ret = devm_pm_opp_of_add_table(dev);
1566 if (ret && ret != -ENODEV) {
1567 dev_err_probe(dev, ret, "Failed to add OPP table\n");
1568 goto err_pm_runtime_put;
1569 }
1570
1571 /*
1572 * Before the PCIe link is initialized, vote for highest OPP in the OPP
1573 * table, so that we are voting for maximum voltage corner for the
1574 * link to come up in maximum supported speed. At the end of the
1575 * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
1576 */
1577 if (!ret) {
1578 opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1579 if (IS_ERR(opp)) {
1580 ret = PTR_ERR(opp);
1581 dev_err_probe(pci->dev, ret,
1582 "Unable to find max freq OPP\n");
1583 goto err_pm_runtime_put;
1584 } else {
1585 ret = dev_pm_opp_set_opp(dev, opp);
1586 }
1587
1588 dev_pm_opp_put(opp);
1589 if (ret) {
1590 dev_err_probe(pci->dev, ret,
1591 "Failed to set OPP for freq %lu\n",
1592 max_freq);
1593 goto err_pm_runtime_put;
1594 }
1595 } else {
1596 /* Skip ICC init if OPP is supported as it is handled by OPP */
1597 ret = qcom_pcie_icc_init(pcie);
1598 if (ret)
1599 goto err_pm_runtime_put;
1600 }
1601
1602 ret = pcie->cfg->ops->get_resources(pcie);
1603 if (ret)
1604 goto err_pm_runtime_put;
1605
1606 pp->ops = &qcom_pcie_dw_ops;
1607
1608 ret = phy_init(pcie->phy);
1609 if (ret)
1610 goto err_pm_runtime_put;
1611
1612 platform_set_drvdata(pdev, pcie);
1613
1614 ret = dw_pcie_host_init(pp);
1615 if (ret) {
1616 dev_err(dev, "cannot initialize host\n");
1617 goto err_phy_exit;
1618 }
1619
1620 qcom_pcie_icc_opp_update(pcie);
1621
1622 if (pcie->mhi)
1623 qcom_pcie_init_debugfs(pcie);
1624
1625 return 0;
1626
1627err_phy_exit:
1628 phy_exit(pcie->phy);
1629err_pm_runtime_put:
1630 pm_runtime_put(dev);
1631 pm_runtime_disable(dev);
1632
1633 return ret;
1634}
1635
1636static int qcom_pcie_suspend_noirq(struct device *dev)
1637{
1638 struct qcom_pcie *pcie = dev_get_drvdata(dev);
1639 int ret = 0;
1640
1641 /*
1642 * Set minimum bandwidth required to keep data path functional during
1643 * suspend.
1644 */
1645 if (pcie->icc_mem) {
1646 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
1647 if (ret) {
1648 dev_err(dev,
1649 "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1650 ret);
1651 return ret;
1652 }
1653 }
1654
1655 /*
1656 * Turn OFF the resources only for controllers without active PCIe
1657 * devices. For controllers with active devices, the resources are kept
1658 * ON and the link is expected to be in L0/L1 (sub)states.
1659 *
1660 * Turning OFF the resources for controllers with active PCIe devices
1661 * will trigger access violation during the end of the suspend cycle,
1662 * as kernel tries to access the PCIe devices config space for masking
1663 * MSIs.
1664 *
1665 * Also, it is not desirable to put the link into L2/L3 state as that
1666 * implies VDD supply will be removed and the devices may go into
1667 * powerdown state. This will affect the lifetime of the storage devices
1668 * like NVMe.
1669 */
1670 if (!dw_pcie_link_up(pcie->pci)) {
1671 qcom_pcie_host_deinit(&pcie->pci->pp);
1672 pcie->suspended = true;
1673 }
1674
1675 /*
1676 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
1677 * Because on some platforms, DBI access can happen very late during the
1678 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
1679 * error.
1680 */
1681 if (pm_suspend_target_state != PM_SUSPEND_MEM) {
1682 ret = icc_disable(pcie->icc_cpu);
1683 if (ret)
1684 dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
1685
1686 if (!pcie->icc_mem)
1687 dev_pm_opp_set_opp(pcie->pci->dev, NULL);
1688 }
1689 return ret;
1690}
1691
1692static int qcom_pcie_resume_noirq(struct device *dev)
1693{
1694 struct qcom_pcie *pcie = dev_get_drvdata(dev);
1695 int ret;
1696
1697 if (pm_suspend_target_state != PM_SUSPEND_MEM) {
1698 ret = icc_enable(pcie->icc_cpu);
1699 if (ret) {
1700 dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
1701 return ret;
1702 }
1703 }
1704
1705 if (pcie->suspended) {
1706 ret = qcom_pcie_host_init(&pcie->pci->pp);
1707 if (ret)
1708 return ret;
1709
1710 pcie->suspended = false;
1711 }
1712
1713 qcom_pcie_icc_opp_update(pcie);
1714
1715 return 0;
1716}
1717
1718static const struct of_device_id qcom_pcie_match[] = {
1719 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
1720 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
1721 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
1722 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
1723 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
1724 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
1725 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
1726 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
1727 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
1728 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
1729 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
1730 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
1731 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
1732 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
1733 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
1734 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
1735 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
1736 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
1737 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
1738 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
1739 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
1740 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
1741 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
1742 { .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 },
1743 { }
1744};
1745
1746static void qcom_fixup_class(struct pci_dev *dev)
1747{
1748 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
1749}
1750DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1751DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1752DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1753DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1754DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1755DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1756DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1757
1758static const struct dev_pm_ops qcom_pcie_pm_ops = {
1759 NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
1760};
1761
1762static struct platform_driver qcom_pcie_driver = {
1763 .probe = qcom_pcie_probe,
1764 .driver = {
1765 .name = "qcom-pcie",
1766 .suppress_bind_attrs = true,
1767 .of_match_table = qcom_pcie_match,
1768 .pm = &qcom_pcie_pm_ops,
1769 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1770 },
1771};
1772builtin_platform_driver(qcom_pcie_driver);