hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/pci/controller/dwc/pcie-qcom.c
....@@ -27,6 +27,7 @@
2727 #include <linux/slab.h>
2828 #include <linux/types.h>
2929
30
+#include "../../pci.h"
3031 #include "pcie-designware.h"
3132
3233 #define PCIE20_PARF_SYS_CTRL 0x00
....@@ -38,11 +39,6 @@
3839 #define AUX_PWR_DET BIT(4)
3940 #define L23_CLK_RMV_DIS BIT(2)
4041 #define L1_CLK_RMV_DIS BIT(1)
41
-
42
-#define PCIE20_COMMAND_STATUS 0x04
43
-#define CMD_BME_VAL 0x4
44
-#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
45
-#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
4642
4743 #define PCIE20_PARF_PHY_CTRL 0x40
4844 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
....@@ -60,6 +56,7 @@
6056 #define PCIE20_PARF_LTSSM 0x1B0
6157 #define PCIE20_PARF_SID_OFFSET 0x234
6258 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
59
+#define PCIE20_PARF_DEVICE_TYPE 0x1000
6360
6461 #define PCIE20_ELBI_SYS_CTRL 0x04
6562 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
....@@ -70,10 +67,6 @@
7067 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
7168 #define CFG_BRIDGE_SB_INIT BIT(0)
7269
73
-#define PCIE20_CAP 0x70
74
-#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
75
-#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
76
-#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
7770 #define PCIE_CAP_LINK1_VAL 0x2FD7F
7871
7972 #define PCIE20_PARF_Q2A_FLUSH 0x1AC
....@@ -98,11 +91,14 @@
9891 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
9992 #define SLV_ADDR_SPACE_SZ 0x10000000
10093
94
+#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0
95
+
96
+#define DEVICE_TYPE_RC 0x4
97
+
10198 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
99
+#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
102100 struct qcom_pcie_resources_2_1_0 {
103
- struct clk *iface_clk;
104
- struct clk *core_clk;
105
- struct clk *phy_clk;
101
+ struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
106102 struct reset_control *pci_reset;
107103 struct reset_control *axi_reset;
108104 struct reset_control *ahb_reset;
....@@ -131,10 +127,10 @@
131127 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
132128 };
133129
130
+#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
134131 struct qcom_pcie_resources_2_4_0 {
135
- struct clk *aux_clk;
136
- struct clk *master_clk;
137
- struct clk *slave_clk;
132
+ struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
133
+ int num_clks;
138134 struct reset_control *axi_m_reset;
139135 struct reset_control *axi_s_reset;
140136 struct reset_control *pipe_reset;
....@@ -158,12 +154,20 @@
158154 struct reset_control *rst[7];
159155 };
160156
157
+struct qcom_pcie_resources_2_7_0 {
158
+ struct clk_bulk_data clks[6];
159
+ struct regulator_bulk_data supplies[2];
160
+ struct reset_control *pci_reset;
161
+ struct clk *pipe_clk;
162
+};
163
+
161164 union qcom_pcie_resources {
162165 struct qcom_pcie_resources_1_0_0 v1_0_0;
163166 struct qcom_pcie_resources_2_1_0 v2_1_0;
164167 struct qcom_pcie_resources_2_3_2 v2_3_2;
165168 struct qcom_pcie_resources_2_3_3 v2_3_3;
166169 struct qcom_pcie_resources_2_4_0 v2_4_0;
170
+ struct qcom_pcie_resources_2_7_0 v2_7_0;
167171 };
168172
169173 struct qcom_pcie;
....@@ -242,17 +246,21 @@
242246 if (ret)
243247 return ret;
244248
245
- res->iface_clk = devm_clk_get(dev, "iface");
246
- if (IS_ERR(res->iface_clk))
247
- return PTR_ERR(res->iface_clk);
249
+ res->clks[0].id = "iface";
250
+ res->clks[1].id = "core";
251
+ res->clks[2].id = "phy";
252
+ res->clks[3].id = "aux";
253
+ res->clks[4].id = "ref";
248254
249
- res->core_clk = devm_clk_get(dev, "core");
250
- if (IS_ERR(res->core_clk))
251
- return PTR_ERR(res->core_clk);
255
+ /* iface, core, phy are required */
256
+ ret = devm_clk_bulk_get(dev, 3, res->clks);
257
+ if (ret < 0)
258
+ return ret;
252259
253
- res->phy_clk = devm_clk_get(dev, "phy");
254
- if (IS_ERR(res->phy_clk))
255
- return PTR_ERR(res->phy_clk);
260
+ /* aux, ref are optional */
261
+ ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
262
+ if (ret < 0)
263
+ return ret;
256264
257265 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
258266 if (IS_ERR(res->pci_reset))
....@@ -282,15 +290,16 @@
282290 {
283291 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
284292
293
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
285294 reset_control_assert(res->pci_reset);
286295 reset_control_assert(res->axi_reset);
287296 reset_control_assert(res->ahb_reset);
288297 reset_control_assert(res->por_reset);
289298 reset_control_assert(res->ext_reset);
290
- reset_control_assert(res->pci_reset);
291
- clk_disable_unprepare(res->iface_clk);
292
- clk_disable_unprepare(res->core_clk);
293
- clk_disable_unprepare(res->phy_clk);
299
+ reset_control_assert(res->phy_reset);
300
+
301
+ writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
302
+
294303 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
295304 }
296305
....@@ -303,34 +312,18 @@
303312 u32 val;
304313 int ret;
305314
315
+ /* reset the PCIe interface as uboot can leave it undefined state */
316
+ reset_control_assert(res->pci_reset);
317
+ reset_control_assert(res->axi_reset);
318
+ reset_control_assert(res->ahb_reset);
319
+ reset_control_assert(res->por_reset);
320
+ reset_control_assert(res->ext_reset);
321
+ reset_control_assert(res->phy_reset);
322
+
306323 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
307324 if (ret < 0) {
308325 dev_err(dev, "cannot enable regulators\n");
309326 return ret;
310
- }
311
-
312
- ret = reset_control_assert(res->ahb_reset);
313
- if (ret) {
314
- dev_err(dev, "cannot assert ahb reset\n");
315
- goto err_assert_ahb;
316
- }
317
-
318
- ret = clk_prepare_enable(res->iface_clk);
319
- if (ret) {
320
- dev_err(dev, "cannot prepare/enable iface clock\n");
321
- goto err_assert_ahb;
322
- }
323
-
324
- ret = clk_prepare_enable(res->phy_clk);
325
- if (ret) {
326
- dev_err(dev, "cannot prepare/enable phy clock\n");
327
- goto err_clk_phy;
328
- }
329
-
330
- ret = clk_prepare_enable(res->core_clk);
331
- if (ret) {
332
- dev_err(dev, "cannot prepare/enable core clock\n");
333
- goto err_clk_core;
334327 }
335328
336329 ret = reset_control_deassert(res->ahb_reset);
....@@ -342,7 +335,31 @@
342335 ret = reset_control_deassert(res->ext_reset);
343336 if (ret) {
344337 dev_err(dev, "cannot deassert ext reset\n");
345
- goto err_deassert_ahb;
338
+ goto err_deassert_ext;
339
+ }
340
+
341
+ ret = reset_control_deassert(res->phy_reset);
342
+ if (ret) {
343
+ dev_err(dev, "cannot deassert phy reset\n");
344
+ goto err_deassert_phy;
345
+ }
346
+
347
+ ret = reset_control_deassert(res->pci_reset);
348
+ if (ret) {
349
+ dev_err(dev, "cannot deassert pci reset\n");
350
+ goto err_deassert_pci;
351
+ }
352
+
353
+ ret = reset_control_deassert(res->por_reset);
354
+ if (ret) {
355
+ dev_err(dev, "cannot deassert por reset\n");
356
+ goto err_deassert_por;
357
+ }
358
+
359
+ ret = reset_control_deassert(res->axi_reset);
360
+ if (ret) {
361
+ dev_err(dev, "cannot deassert axi reset\n");
362
+ goto err_deassert_axi;
346363 }
347364
348365 /* enable PCIe clocks and resets */
....@@ -350,7 +367,12 @@
350367 val &= ~BIT(0);
351368 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
352369
353
- if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
370
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
371
+ if (ret)
372
+ goto err_clks;
373
+
374
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
375
+ of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
354376 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
355377 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
356378 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
....@@ -377,33 +399,8 @@
377399 val |= PHY_REFCLK_SSP_EN;
378400 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
379401
380
- ret = reset_control_deassert(res->phy_reset);
381
- if (ret) {
382
- dev_err(dev, "cannot deassert phy reset\n");
383
- return ret;
384
- }
385
-
386
- ret = reset_control_deassert(res->pci_reset);
387
- if (ret) {
388
- dev_err(dev, "cannot deassert pci reset\n");
389
- return ret;
390
- }
391
-
392
- ret = reset_control_deassert(res->por_reset);
393
- if (ret) {
394
- dev_err(dev, "cannot deassert por reset\n");
395
- return ret;
396
- }
397
-
398
- ret = reset_control_deassert(res->axi_reset);
399
- if (ret) {
400
- dev_err(dev, "cannot deassert axi reset\n");
401
- return ret;
402
- }
403
-
404402 /* wait for clock acquisition */
405403 usleep_range(1000, 1500);
406
-
407404
408405 /* Set the Max TLP size to 2K, instead of using default of 4K */
409406 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
....@@ -413,13 +410,19 @@
413410
414411 return 0;
415412
413
+err_clks:
414
+ reset_control_assert(res->axi_reset);
415
+err_deassert_axi:
416
+ reset_control_assert(res->por_reset);
417
+err_deassert_por:
418
+ reset_control_assert(res->pci_reset);
419
+err_deassert_pci:
420
+ reset_control_assert(res->phy_reset);
421
+err_deassert_phy:
422
+ reset_control_assert(res->ext_reset);
423
+err_deassert_ext:
424
+ reset_control_assert(res->ahb_reset);
416425 err_deassert_ahb:
417
- clk_disable_unprepare(res->core_clk);
418
-err_clk_core:
419
- clk_disable_unprepare(res->phy_clk);
420
-err_clk_phy:
421
- clk_disable_unprepare(res->iface_clk);
422
-err_assert_ahb:
423426 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
424427
425428 return ret;
....@@ -693,18 +696,20 @@
693696 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
694697 struct dw_pcie *pci = pcie->pci;
695698 struct device *dev = pci->dev;
699
+ bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
700
+ int ret;
696701
697
- res->aux_clk = devm_clk_get(dev, "aux");
698
- if (IS_ERR(res->aux_clk))
699
- return PTR_ERR(res->aux_clk);
702
+ res->clks[0].id = "aux";
703
+ res->clks[1].id = "master_bus";
704
+ res->clks[2].id = "slave_bus";
705
+ res->clks[3].id = "iface";
700706
701
- res->master_clk = devm_clk_get(dev, "master_bus");
702
- if (IS_ERR(res->master_clk))
703
- return PTR_ERR(res->master_clk);
707
+ /* qcom,pcie-ipq4019 is defined without "iface" */
708
+ res->num_clks = is_ipq ? 3 : 4;
704709
705
- res->slave_clk = devm_clk_get(dev, "slave_bus");
706
- if (IS_ERR(res->slave_clk))
707
- return PTR_ERR(res->slave_clk);
710
+ ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
711
+ if (ret < 0)
712
+ return ret;
708713
709714 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
710715 if (IS_ERR(res->axi_m_reset))
....@@ -714,27 +719,33 @@
714719 if (IS_ERR(res->axi_s_reset))
715720 return PTR_ERR(res->axi_s_reset);
716721
717
- res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
718
- if (IS_ERR(res->pipe_reset))
719
- return PTR_ERR(res->pipe_reset);
722
+ if (is_ipq) {
723
+ /*
724
+ * These resources relates to the PHY or are secure clocks, but
725
+ * are controlled here for IPQ4019
726
+ */
727
+ res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
728
+ if (IS_ERR(res->pipe_reset))
729
+ return PTR_ERR(res->pipe_reset);
720730
721
- res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
722
- "axi_m_vmid");
723
- if (IS_ERR(res->axi_m_vmid_reset))
724
- return PTR_ERR(res->axi_m_vmid_reset);
731
+ res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
732
+ "axi_m_vmid");
733
+ if (IS_ERR(res->axi_m_vmid_reset))
734
+ return PTR_ERR(res->axi_m_vmid_reset);
725735
726
- res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
727
- "axi_s_xpu");
728
- if (IS_ERR(res->axi_s_xpu_reset))
729
- return PTR_ERR(res->axi_s_xpu_reset);
736
+ res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
737
+ "axi_s_xpu");
738
+ if (IS_ERR(res->axi_s_xpu_reset))
739
+ return PTR_ERR(res->axi_s_xpu_reset);
730740
731
- res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
732
- if (IS_ERR(res->parf_reset))
733
- return PTR_ERR(res->parf_reset);
741
+ res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
742
+ if (IS_ERR(res->parf_reset))
743
+ return PTR_ERR(res->parf_reset);
734744
735
- res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
736
- if (IS_ERR(res->phy_reset))
737
- return PTR_ERR(res->phy_reset);
745
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
746
+ if (IS_ERR(res->phy_reset))
747
+ return PTR_ERR(res->phy_reset);
748
+ }
738749
739750 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
740751 "axi_m_sticky");
....@@ -754,9 +765,11 @@
754765 if (IS_ERR(res->ahb_reset))
755766 return PTR_ERR(res->ahb_reset);
756767
757
- res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
758
- if (IS_ERR(res->phy_ahb_reset))
759
- return PTR_ERR(res->phy_ahb_reset);
768
+ if (is_ipq) {
769
+ res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
770
+ if (IS_ERR(res->phy_ahb_reset))
771
+ return PTR_ERR(res->phy_ahb_reset);
772
+ }
760773
761774 return 0;
762775 }
....@@ -774,9 +787,7 @@
774787 reset_control_assert(res->axi_m_sticky_reset);
775788 reset_control_assert(res->pwr_reset);
776789 reset_control_assert(res->ahb_reset);
777
- clk_disable_unprepare(res->aux_clk);
778
- clk_disable_unprepare(res->master_clk);
779
- clk_disable_unprepare(res->slave_clk);
790
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
780791 }
781792
782793 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
....@@ -905,23 +916,9 @@
905916
906917 usleep_range(10000, 12000);
907918
908
- ret = clk_prepare_enable(res->aux_clk);
909
- if (ret) {
910
- dev_err(dev, "cannot prepare/enable iface clock\n");
911
- goto err_clk_aux;
912
- }
913
-
914
- ret = clk_prepare_enable(res->master_clk);
915
- if (ret) {
916
- dev_err(dev, "cannot prepare/enable core clock\n");
917
- goto err_clk_axi_m;
918
- }
919
-
920
- ret = clk_prepare_enable(res->slave_clk);
921
- if (ret) {
922
- dev_err(dev, "cannot prepare/enable phy clock\n");
923
- goto err_clk_axi_s;
924
- }
919
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
920
+ if (ret)
921
+ goto err_clks;
925922
926923 /* enable PCIe clocks and resets */
927924 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
....@@ -946,11 +943,7 @@
946943
947944 return 0;
948945
949
-err_clk_axi_s:
950
- clk_disable_unprepare(res->master_clk);
951
-err_clk_axi_m:
952
- clk_disable_unprepare(res->aux_clk);
953
-err_clk_aux:
946
+err_clks:
954947 reset_control_assert(res->ahb_reset);
955948 err_rst_ahb:
956949 reset_control_assert(res->pwr_reset);
....@@ -1026,6 +1019,7 @@
10261019 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
10271020 struct dw_pcie *pci = pcie->pci;
10281021 struct device *dev = pci->dev;
1022
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
10291023 int i, ret;
10301024 u32 val;
10311025
....@@ -1099,16 +1093,16 @@
10991093 pcie->parf + PCIE20_PARF_SYS_CTRL);
11001094 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
11011095
1102
- writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
1096
+ writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
11031097 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1104
- writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
1098
+ writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
11051099
1106
- val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1107
- val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
1108
- writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1100
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1101
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
1102
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
11091103
1110
- writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
1111
- PCIE20_DEVICE_CONTROL2_STATUS2);
1104
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1105
+ PCI_EXP_DEVCTL2);
11121106
11131107 return 0;
11141108
....@@ -1131,9 +1125,132 @@
11311125 return ret;
11321126 }
11331127
1128
+static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1129
+{
1130
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1131
+ struct dw_pcie *pci = pcie->pci;
1132
+ struct device *dev = pci->dev;
1133
+ int ret;
1134
+
1135
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1136
+ if (IS_ERR(res->pci_reset))
1137
+ return PTR_ERR(res->pci_reset);
1138
+
1139
+ res->supplies[0].supply = "vdda";
1140
+ res->supplies[1].supply = "vddpe-3v3";
1141
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1142
+ res->supplies);
1143
+ if (ret)
1144
+ return ret;
1145
+
1146
+ res->clks[0].id = "aux";
1147
+ res->clks[1].id = "cfg";
1148
+ res->clks[2].id = "bus_master";
1149
+ res->clks[3].id = "bus_slave";
1150
+ res->clks[4].id = "slave_q2a";
1151
+ res->clks[5].id = "tbu";
1152
+
1153
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1154
+ if (ret < 0)
1155
+ return ret;
1156
+
1157
+ res->pipe_clk = devm_clk_get(dev, "pipe");
1158
+ return PTR_ERR_OR_ZERO(res->pipe_clk);
1159
+}
1160
+
1161
+static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1162
+{
1163
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1164
+ struct dw_pcie *pci = pcie->pci;
1165
+ struct device *dev = pci->dev;
1166
+ u32 val;
1167
+ int ret;
1168
+
1169
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1170
+ if (ret < 0) {
1171
+ dev_err(dev, "cannot enable regulators\n");
1172
+ return ret;
1173
+ }
1174
+
1175
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1176
+ if (ret < 0)
1177
+ goto err_disable_regulators;
1178
+
1179
+ ret = reset_control_assert(res->pci_reset);
1180
+ if (ret < 0) {
1181
+ dev_err(dev, "cannot deassert pci reset\n");
1182
+ goto err_disable_clocks;
1183
+ }
1184
+
1185
+ usleep_range(1000, 1500);
1186
+
1187
+ ret = reset_control_deassert(res->pci_reset);
1188
+ if (ret < 0) {
1189
+ dev_err(dev, "cannot deassert pci reset\n");
1190
+ goto err_disable_clocks;
1191
+ }
1192
+
1193
+ /* configure PCIe to RC mode */
1194
+ writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1195
+
1196
+ /* enable PCIe clocks and resets */
1197
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1198
+ val &= ~BIT(0);
1199
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1200
+
1201
+ /* change DBI base address */
1202
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1203
+
1204
+ /* MAC PHY_POWERDOWN MUX DISABLE */
1205
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1206
+ val &= ~BIT(29);
1207
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1208
+
1209
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1210
+ val |= BIT(4);
1211
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1212
+
1213
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
1214
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1215
+ val |= BIT(31);
1216
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1217
+ }
1218
+
1219
+ return 0;
1220
+err_disable_clocks:
1221
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1222
+err_disable_regulators:
1223
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1224
+
1225
+ return ret;
1226
+}
1227
+
1228
+static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1229
+{
1230
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1231
+
1232
+ clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1233
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1234
+}
1235
+
1236
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1237
+{
1238
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1239
+
1240
+ return clk_prepare_enable(res->pipe_clk);
1241
+}
1242
+
1243
+static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
1244
+{
1245
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1246
+
1247
+ clk_disable_unprepare(res->pipe_clk);
1248
+}
1249
+
11341250 static int qcom_pcie_link_up(struct dw_pcie *pci)
11351251 {
1136
- u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
1252
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1253
+ u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
11371254
11381255 return !!(val & PCI_EXP_LNKSTA_DLLLA);
11391256 }
....@@ -1161,9 +1278,7 @@
11611278 }
11621279
11631280 dw_pcie_setup_rc(pp);
1164
-
1165
- if (IS_ENABLED(CONFIG_PCI_MSI))
1166
- dw_pcie_msi_init(pp);
1281
+ dw_pcie_msi_init(pp);
11671282
11681283 qcom_ep_reset_deassert(pcie);
11691284
....@@ -1184,25 +1299,8 @@
11841299 return ret;
11851300 }
11861301
1187
-static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
1188
- u32 *val)
1189
-{
1190
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1191
-
1192
- /* the device class is not reported correctly from the register */
1193
- if (where == PCI_CLASS_REVISION && size == 4) {
1194
- *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
1195
- *val &= 0xff; /* keep revision id */
1196
- *val |= PCI_CLASS_BRIDGE_PCI << 16;
1197
- return PCIBIOS_SUCCESSFUL;
1198
- }
1199
-
1200
- return dw_pcie_read(pci->dbi_base + where, size, val);
1201
-}
1202
-
12031302 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
12041303 .host_init = qcom_pcie_host_init,
1205
- .rd_own_conf = qcom_pcie_rd_own_conf,
12061304 };
12071305
12081306 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
....@@ -1247,6 +1345,16 @@
12471345 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
12481346 };
12491347
1348
+/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
1349
+static const struct qcom_pcie_ops ops_2_7_0 = {
1350
+ .get_resources = qcom_pcie_get_resources_2_7_0,
1351
+ .init = qcom_pcie_init_2_7_0,
1352
+ .deinit = qcom_pcie_deinit_2_7_0,
1353
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1354
+ .post_init = qcom_pcie_post_init_2_7_0,
1355
+ .post_deinit = qcom_pcie_post_deinit_2_7_0,
1356
+};
1357
+
12501358 static const struct dw_pcie_ops dw_pcie_ops = {
12511359 .link_up = qcom_pcie_link_up,
12521360 };
....@@ -1270,10 +1378,8 @@
12701378
12711379 pm_runtime_enable(dev);
12721380 ret = pm_runtime_get_sync(dev);
1273
- if (ret < 0) {
1274
- pm_runtime_disable(dev);
1275
- return ret;
1276
- }
1381
+ if (ret < 0)
1382
+ goto err_pm_runtime_put;
12771383
12781384 pci->dev = dev;
12791385 pci->ops = &dw_pcie_ops;
....@@ -1289,8 +1395,7 @@
12891395 goto err_pm_runtime_put;
12901396 }
12911397
1292
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
1293
- pcie->parf = devm_ioremap_resource(dev, res);
1398
+ pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
12941399 if (IS_ERR(pcie->parf)) {
12951400 ret = PTR_ERR(pcie->parf);
12961401 goto err_pm_runtime_put;
....@@ -1303,8 +1408,7 @@
13031408 goto err_pm_runtime_put;
13041409 }
13051410
1306
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
1307
- pcie->elbi = devm_ioremap_resource(dev, res);
1411
+ pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
13081412 if (IS_ERR(pcie->elbi)) {
13091413 ret = PTR_ERR(pcie->elbi);
13101414 goto err_pm_runtime_put;
....@@ -1331,22 +1435,21 @@
13311435 }
13321436
13331437 ret = phy_init(pcie->phy);
1334
- if (ret) {
1335
- pm_runtime_disable(&pdev->dev);
1438
+ if (ret)
13361439 goto err_pm_runtime_put;
1337
- }
13381440
13391441 platform_set_drvdata(pdev, pcie);
13401442
13411443 ret = dw_pcie_host_init(pp);
13421444 if (ret) {
13431445 dev_err(dev, "cannot initialize host\n");
1344
- pm_runtime_disable(&pdev->dev);
1345
- goto err_pm_runtime_put;
1446
+ goto err_phy_exit;
13461447 }
13471448
13481449 return 0;
13491450
1451
+err_phy_exit:
1452
+ phy_exit(pcie->phy);
13501453 err_pm_runtime_put:
13511454 pm_runtime_put(dev);
13521455 pm_runtime_disable(dev);
....@@ -1357,13 +1460,28 @@
13571460 static const struct of_device_id qcom_pcie_match[] = {
13581461 { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
13591462 { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1463
+ { .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 },
13601464 { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
13611465 { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
13621466 { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
13631467 { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1468
+ { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1469
+ { .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
13641470 { }
13651471 };
13661472
1473
+static void qcom_fixup_class(struct pci_dev *dev)
1474
+{
1475
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1476
+}
1477
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1478
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1479
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1480
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1481
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1482
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1483
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1484
+
13671485 static struct platform_driver qcom_pcie_driver = {
13681486 .probe = qcom_pcie_probe,
13691487 .driver = {